prompt
stringlengths 19
1.03M
| completion
stringlengths 4
2.12k
| api
stringlengths 8
90
|
---|---|---|
#!/usr/bin/env python3.6
# <NAME>
import re
import sys
import requests
import unidecode
import pandas as pd
from bs4 import BeautifulSoup
from difflib import SequenceMatcher
from bs4.element import Comment
import urllib.request
#########################################
##### START CUSTOMIZABLE PARAMETERS #####
Season = 2019
Round = 36
list_of_match_no = [0,1,2,3,4,5,6,7,8,9] # indeces within a round go from 0 to 9 (10 matches in total)
#list_of_TeamHome = ['Betis','Getafe','Villarreal','Barcelona','Eibar','Valladolid','Osasuna','Alavés','Real Madrid']
#list_of_TeamAway = ['Granada','Espanyol','Mallorca','Leganés','Athletic Club','Celta de Vigo','Atlético de Madrid','Real Sociedad','Valencia']
#list_of_TeamHome = ['Athletic Club','Atlético de Madrid','Celta de Vigo','Espanyol','Getafe','Granada','Mallorca','Real Sociedad','Sevilla','Valencia']
#list_of_TeamAway = ['Betis','Valladolid','Alavés','Levante','Eibar','Villarreal','Leganés','Real Madrid','Barcelona','Osasuna']
#list_of_TeamHome = ['Barcelona','Alavés','Eibar','Leganés','Levante','Betis','Real Madrid','Real Sociedad','Valladolid','Villarreal']
#list_of_TeamAway = ['Athletic Club','Osasuna','Valencia','Granada','Atlético de Madrid','Espanyol','Mallorca','Celta de Vigo','Getafe','Sevilla']
#list_of_TeamHome = ['Athletic Club','Atlético de Madrid','Celta de Vigo','Espanyol','Getafe','Granada','Levante','Osasuna','Sevilla','Villarreal']
#list_of_TeamAway = ['Mallorca','Alavés','Barcelona','Real Madrid','Real Sociedad','Eibar','Betis','Leganés','Valladolid','Valencia']
#list_of_TeamHome = ['Barcelona','Alavés','Eibar','Leganés','Mallorca','Betis','Real Madrid','Real Sociedad','Valladolid','Valencia']
#list_of_TeamAway = ['Atlético de Madrid','Granada','Osasuna','Sevilla','Celta de Vigo','Villarreal','Getafe','Espanyol','Levante','Athletic Club']
#list_of_TeamHome = ['Athletic Club','Atlético de Madrid','Celta de Vigo','Espanyol','Granada','Levante','Osasuna','Valladolid','Sevilla','Villarreal']
#list_of_TeamAway = ['Real Madrid','Mallorca','Betis','Leganés','Valencia','Real Sociedad','Getafe','Alavés','Eibar','Barcelona']
#list_of_TeamHome = ['Athletic Club','Barcelona','Celta de Vigo','Eibar','Getafe','Mallorca','Betis','Real Madrid','Real Sociedad','Valencia']
#list_of_TeamAway = ['Sevilla','Espanyol','Atlético de Madrid','Leganés','Villarreal','Levante','Osasuna','Alavés','Granada','Valladolid']
list_of_TeamHome = ['Atlético de Madrid','Alavés','Espanyol','Granada','Leganés','Levante','Osasuna','Valladolid','Sevilla','Villarreal']
list_of_TeamAway = ['Betis','Getafe','Eibar','Real Madrid','Valencia','Athletic Club','Celta de Vigo','Barcelona','Mallorca','Real Sociedad']
csv_file_name = 'test_dataframe.csv'
base_url = 'https://www.comuniate.com/alineaciones'
base_url_sofifa_search = 'https://sofifa.com/players?keyword='
###### END CUSTOMIZABLE PARAMETERS ######
#########################################
def main():
data = []
for i in range(len(list_of_match_no)):
# Transform team names to sofifa format
match_no = list_of_match_no[i]
TeamHome = list_of_TeamHome[i]
TeamAway = list_of_TeamAway[i]
sofifa_TeamHome = TeamHome
sofifa_TeamAway = TeamAway
if TeamHome == 'Betis': sofifa_TeamHome = 'Real Betis'
if TeamAway == 'Betis': sofifa_TeamAway = 'Real Betis'
if TeamHome == 'Granada': sofifa_TeamHome = 'Granada CF'
if TeamAway == 'Granada': sofifa_TeamAway = 'Granada CF'
if TeamHome == 'Levante': sofifa_TeamHome = 'Levante UD'
if TeamAway == 'Levante': sofifa_TeamAway = 'Levante UD'
if TeamHome == 'Sevilla': sofifa_TeamHome = 'Sevilla FC'
if TeamAway == 'Sevilla': sofifa_TeamAway = 'Sevilla FC'
if TeamHome == 'Getafe': sofifa_TeamHome = 'Getafe CF'
if TeamAway == 'Getafe': sofifa_TeamAway = 'Getafe CF'
if TeamHome == 'Espanyol': sofifa_TeamHome = 'RCD Espanyol'
if TeamAway == 'Espanyol': sofifa_TeamAway = 'RCD Espanyol'
if TeamHome == 'Villarreal': sofifa_TeamHome = 'Villarreal CF'
if TeamAway == 'Villarreal': sofifa_TeamAway = 'Villarreal CF'
if TeamHome == 'Mallorca': sofifa_TeamHome = 'RCD Mallorca'
if TeamAway == 'Mallorca': sofifa_TeamAway = 'RCD Mallorca'
if TeamHome == 'Barcelona': sofifa_TeamHome = 'FC Barcelona'
if TeamAway == 'Barcelona': sofifa_TeamAway = 'FC Barcelona'
if TeamHome == 'Leganés': sofifa_TeamHome = 'CD Leganés'
if TeamAway == 'Leganés': sofifa_TeamAway = 'CD Leganés'
if TeamHome == 'Eibar': sofifa_TeamHome = 'SD Eibar'
if TeamAway == 'Eibar': sofifa_TeamAway = 'SD Eibar'
if TeamHome == 'Athletic Club': sofifa_TeamHome = 'Athletic Club de Bilbao'
if TeamAway == 'Athletic Club': sofifa_TeamAway = 'Athletic Club de Bilbao'
if TeamHome == 'Valladolid': sofifa_TeamHome = 'Real Valladolid CF'
if TeamAway == 'Valladolid': sofifa_TeamAway = 'Real Valladolid CF'
if TeamHome == 'Celta de Vigo': sofifa_TeamHome = 'RC Celta'
if TeamAway == 'Celta de Vigo': sofifa_TeamAway = 'RC Celta'
if TeamHome == 'Osasuna': sofifa_TeamHome = 'CA Osasuna'
if TeamAway == 'Osasuna': sofifa_TeamAway = 'CA Osasuna'
if TeamHome == 'Atlético de Madrid': sofifa_TeamHome = 'Atlético Madrid'
if TeamAway == 'Atlético de Madrid': sofifa_TeamAway = 'Atlético Madrid'
if TeamHome == 'Alavés': sofifa_TeamHome = 'Deportivo Alavés'
if TeamAway == 'Alavés': sofifa_TeamAway = 'Deportivo Alavés'
if TeamHome == 'Valencia': sofifa_TeamHome = 'Valencia CF'
if TeamAway == 'Valencia': sofifa_TeamAway = 'Valencia CF'
url = base_url + "/" + str(match_no) + "/" + str(Round) + "/"
print('#######################')
print('New Match:', TeamHome, '-', TeamAway)
print(url)
html = urllib.request.urlopen(url).read()
final_text = text_from_html(html)
#print('TEST final text:')
#print(final_text)
long_date = re.findall(r'Fecha del partido:(.*?) ', final_text)
if long_date:
long_date = long_date[0].strip().split('-')
Date = long_date[0] + "/2020"
Time = long_date[1]
else:
Date = ''
Time = ''
r = re.findall(r'Posible Alineación(.*?)Jugadores lesionados', final_text)
Referee=""
Stadium=""
Result = '0-0'
counter=0
playershome=[]
PlayersHome=[]
ratingshome=[]
RatingHome=[]
potentialshome=[]
PotentialHome=[]
playersaway=[]
PlayersAway=[]
ratingsaway=[]
RatingAway=[]
potentialsaway=[]
PotentialAway=[]
for text in r:
words = text.split(' ')
#print('TEST words:', words)
for w in words:
w = w.strip()
if w and w != '' and not w.isdecimal():
new_w = re.sub("( [0-9]+|J[0-9]+)", " ", w).strip()
new_w = unidecode.unidecode(new_w)
if counter != 0 and counter != 12:
if new_w == '<NAME>': new_w = '<NAME>'
if new_w == '<NAME>': new_w = 'Emerson'
if new_w == '<NAME>': new_w = 'Puertas'
if new_w == '<NAME>': new_w = 'Eteki'
if new_w == '<NAME>': new_w = '<NAME>'
if new_w == 'Cucho Hernandez': new_w = 'Hernandez'
if new_w == '<NAME>': new_w = '<NAME>'
if new_w == '<NAME>': new_w = 'Cote'
if new_w == '<NAME>': new_w = 'Emeterio'
if new_w == '<NAME>': new_w = 'Nacho'
if new_w == '<NAME>.': new_w = '<NAME>'
if new_w == '<NAME>': new_w = 'Gimenez'
if new_w == '<NAME>': new_w = 'Valverde'
if new_w == '<NAME>': new_w = 'Gomez'
if new_w == '<NAME>': new_w = 'Vinicius jr'
if new_w == '<NAME>': new_w = 'Herrero'
if new_w == '<NAME>': new_w = 'Carrasco'
if new_w == '<NAME>': new_w = 'jacobo&r=200024&set=true'
if new_w == '<NAME>': new_w = 'Pier'
if new_w == 'Abdallahi': new_w = '<NAME>'
if new_w == 'Bono': new_w = 'Bounou'
medium_name = new_w
#print('TEST name:',medium_name)
player_sofifa_id, final_team, player_rating, player_potential = get_id(medium_name)
counter2=0
for team in final_team:
if counter > 0 and counter < 12 and team[0] == sofifa_TeamHome:
print('Player:', medium_name, '-- Sofifa info -- ID:',player_sofifa_id[counter2],'. Team:',final_team[counter2][0],'. Rating:',player_rating[counter2],'. Potential:',player_potential[counter2])
playershome.append(medium_name)
ratingshome.append(int(player_rating[counter2]))
potentialshome.append(int(player_potential[counter2]))
elif counter > 12 and counter < 24 and team[0] == sofifa_TeamAway:
print('Player:', medium_name, '-- Sofifa info -- ID:',player_sofifa_id[counter2],'. Team:',final_team[counter2][0],'. Rating:',player_rating[counter2],'. Potential:',player_potential[counter2])
playersaway.append(medium_name)
ratingsaway.append(int(player_rating[counter2]))
potentialsaway.append(int(player_potential[counter2]))
counter2 = counter2+1
counter=counter+1
print('#######################')
PlayersHome.append(playershome)
PlayersAway.append(playersaway)
RatingHome.append(ratingshome)
RatingAway.append(ratingsaway)
PotentialHome.append(potentialshome)
PotentialAway.append(potentialsaway)
# Transform to data frame
data_row = [Season,Round,Date,Time,TeamHome,Result,TeamAway,Referee,Stadium,PlayersHome,RatingHome,PotentialHome,PlayersAway,RatingAway,PotentialAway]
data.append(data_row)
df = | pd.DataFrame(data,columns=['Season','Round','Date','Time','TeamHome','Result','TeamAway','Referee','Stadium','PlayersHome','RatingHome','PotentialHome','PlayersAway','RatingAway','PotentialAway'])
#print(df) | pandas.DataFrame |
#!/usr/bin/env python
from __future__ import print_function
from .tabulate import tabulate as tabulate_
import sys
import pandas as pd
import re
import datetime
def _get_version():
import ph._version
return ph._version.__version__
def print_version():
print(_get_version())
# Command line parsing of (1) --abc and (2) --abc=def
KWARG = re.compile("^--[a-z0-9_-]+$")
KWARG_WITH_VALUE = re.compile("^--[a-z0-9_-]+=")
USAGE_TEXT = """
ph is a command line tool for streaming csv data.
If you have a csv file `a.csv`, you can pipe it through `ph` on the
command line by using
$ cat a.csv | ph columns x y | ph eval "z = x**2 - y" | ph show
Use ph help [command] for help on the individual commands.
A list of available commands follows.
"""
COMMANDS = {}
DOCS = {}
def _gpx(fname):
try:
import gpxpy
except ImportError:
sys.exit("ph gpx needs gpxpy, pip install ph[gpx]")
def from_trackpoint(tp=None):
if tp is None:
return "time", "latitude", "longitude", "elevation", "distance"
p = tp.point
return str(p.time), p.latitude, p.longitude, p.elevation, tp.distance_from_start
with open(fname, "r") as fin:
gpx = gpxpy.parse(fin)
data = gpx.get_points_data()
columns = from_trackpoint()
dfdata = [from_trackpoint(tp) for tp in data]
return pd.DataFrame(dfdata, columns=columns)
def _tsv(*args, **kwargs):
kwargs["sep"] = "\t"
return pd.read_csv(*args, **kwargs)
# These are all lambdas because they lazy load, and some of these
# readers are introduced in later pandas.
READERS = {
"csv": pd.read_csv,
"clipboard": pd.read_clipboard,
"fwf": pd.read_fwf,
"json": pd.read_json,
"html": pd.read_html,
"tsv": _tsv,
"gpx": _gpx,
}
try:
READERS["excel"] = pd.read_excel
READERS["xls"] = pd.read_excel
READERS["odf"] = pd.read_excel
except AttributeError:
pass
try:
READERS["hdf5"] = pd.read_hdf
except AttributeError:
pass
try:
READERS["feather"] = pd.read_feather
except AttributeError:
pass
try:
READERS["parquet"] = pd.read_parquet
except AttributeError:
pass
try:
READERS["orc"] = pd.read_orc
except AttributeError:
pass
try:
READERS["msgpack"] = pd.read_msgpack
except AttributeError:
pass
try:
READERS["stata"] = pd.read_stata
except AttributeError:
pass
try:
READERS["sas"] = pd.read_sas
except AttributeError:
pass
try:
READERS["spss"] = pd.read_spss
except AttributeError:
pass
try:
READERS["pickle"] = pd.read_pickle
except AttributeError:
pass
try:
READERS["gbq"] = pd.read_gbq
except AttributeError:
pass
try:
READERS["google"] = pd.read_gbq
except AttributeError:
pass
try:
READERS["bigquery"] = pd.read_gb
except AttributeError:
pass
WRITERS = {
"csv": "to_csv",
"fwf": "to_fwf",
"json": "to_json",
"html": "to_html",
"clipboard": "to_clipboard",
"xls": "to_excel",
"odf": "to_excel",
"hdf5": "to_hdf",
"feather": "to_feather",
"parquet": "to_parquet",
"orc": "to_orc",
"msgpack": "to_msgpack",
"stata": "to_stata",
"sas": "to_sas",
"spss": "to_spss",
"pickle": "to_pickle",
"gbq": "to_gbq",
"google": "to_gbq",
"bigquery": "to_gbq",
# extras
"tsv": "to_csv",
}
FALSY = ("False", "false", "No", "no", "0", False, 0, "None")
TRUTHY = ("True", "true", "Yes", "yes", "1", True, 1)
def _assert_col(df, col, caller=None):
if col not in df.columns:
if caller is not None:
sys.exit("ph {}: Unknown column {}".format(caller, col))
sys.exit("Unknown column {}".format(col))
def _assert_cols(df, cols, caller=None):
for col in cols:
_assert_col(df, col, caller=caller)
def register(fn, name=None):
if name is None:
name = fn.__name__
COMMANDS[name] = fn
DOCS[name] = fn.__doc__
return fn
def registerx(name):
def inner(fn):
register(fn, name)
return fn
return inner
@register
def dataset(dset=None):
"""Load dataset as csv.
Usage: ph dataset linnerud | ph describe
"""
try:
import sklearn.datasets
except ImportError:
sys.exit("You need scikit-learn. Install ph[data].")
REALDATA = {
"olivetti_faces": sklearn.datasets.fetch_olivetti_faces,
"20newsgroups": sklearn.datasets.fetch_20newsgroups,
"20newsgroups_vectorized": sklearn.datasets.fetch_20newsgroups_vectorized,
"lfw_people": sklearn.datasets.fetch_lfw_people,
"lfw_pairs": sklearn.datasets.fetch_lfw_pairs,
"covtype": sklearn.datasets.fetch_covtype,
"rcv1": sklearn.datasets.fetch_rcv1,
"kddcup99": sklearn.datasets.fetch_kddcup99,
"california_housing": sklearn.datasets.fetch_california_housing,
}
TOYDATA = {
"boston": sklearn.datasets.load_boston,
"iris": sklearn.datasets.load_iris,
"diabetes": sklearn.datasets.load_diabetes,
"digits": sklearn.datasets.load_digits,
"linnerud": sklearn.datasets.load_linnerud,
"wine": sklearn.datasets.load_wine,
"breast_cancer": sklearn.datasets.load_breast_cancer,
}
if dset is None:
print("type,name")
print("\n".join("{},{}".format("real", k) for k in REALDATA))
print("\n".join("{},{}".format("toy", k) for k in TOYDATA))
sys.exit()
if dset not in TOYDATA.keys() | REALDATA.keys():
sys.exit("Unknown dataset {}. See ph help dataset.".format(dset))
if dset in TOYDATA:
data = TOYDATA[dset]()
else:
data = REALDATA[dset]()
try:
df = | pd.DataFrame(data.data, columns=data.feature_names) | pandas.DataFrame |
#dependencies
from sklearn.cross_decomposition import PLSRegression
from sklearn.model_selection import cross_validate
import pandas as pd
import numpy as np
from scipy.signal import savgol_filter
from sklearn.base import TransformerMixin, RegressorMixin, BaseEstimator
from scipy import sparse, signal
from BaselineRemoval import BaselineRemoval
from sklearn.model_selection import ShuffleSplit
from scipy.sparse.linalg import spsolve
#Import prep methods
import sklearn
from sklearn.preprocessing import StandardScaler, MinMaxScaler,MaxAbsScaler, RobustScaler
from sklearn.preprocessing import FunctionTransformer, PowerTransformer, QuantileTransformer
from sklearn.decomposition import PCA, KernelPCA
class SavgolFilter(BaseEstimator,TransformerMixin):
def __init__(self,window_length=5,polyorder=2,axis=1):
self.__name__='SavgolFilter'
self.window_length=window_length
self.polyorder=polyorder
self.axis=axis
self.output=None
def fit(self,X,y=None):
pass
def transform(self,X,y=None):
self.output=savgol_filter(X,window_length=self.window_length,polyorder=self.polyorder,axis=self.axis)
return self.output
def fit_transform(self,X,y=None):
self.output=savgol_filter(X,window_length=self.window_length,polyorder=self.polyorder,axis=self.axis)
return self.output
class BaselineASLS(BaseEstimator,TransformerMixin):
#Asymmetric Least Squares
def __init__(self, lam=1e5, p=1e-3, niter=10):
self.__name__='BaselineAsLS'
self.lam=lam
self.p=p
self.niter=niter
self.y=None
self.output=None
def fit(self,X,y=None):
self.y=y
def transform(self,X,y=None):
y=self.y
self.output=np.apply_along_axis(lambda x: self.line_remove(x), 0, X)
return self.output
def line_remove(self,f):
L = len(f)
D = sparse.csc_matrix(np.diff(np.eye(L), 2))
w = np.ones(L)
z = 0
for i in range(self.niter):
W = sparse.spdiags(w, 0, L, L)
Z=W + self.lam * D.dot(D.transpose())
z = spsolve(Z, w * f)
w = self.p * (f > z) + (1 - self.p) * (f < z)
return z
def fit_transform(self,X,y=None):
self.y=y
return self.transform(X,y)
class BaselineModpoly(BaseEstimator,TransformerMixin):
def __init__(self, degree=2):
self.__name__='BaselineModPoly'
self.degree=degree
def fit(self,X,y=None):
pass
def transform(self,X,y=None):
try:
X=X.to_numpy()
except:
pass
X_=np.zeros_like(X)
for i in range(X.shape[0]):
MP=BaselineRemoval(X[i,:])
X_[i,:]=MP.ModPoly(self.degree)
del MP
return X_
def fit_transform(self,X,y=None):
try:
X=X.to_numpy()
except:
pass
X_=np.zeros_like(X)
for i in range(X.shape[0]):
MP=BaselineRemoval(X[i,:])
X_[i,:]=MP.ModPoly(self.degree)
del MP
return X_
class BaselineZhangFit(BaseEstimator,TransformerMixin):
def __init__(self, itermax=50):
self.__name__='BaselineZhangFit'
self.itermax=itermax
def fit(self,X,y=None):
pass
def transform(self,X,y=None):
try:
X=X.to_numpy()
except:
pass
X_=np.zeros_like(X)
for i in range(X.shape[0]):
MP=BaselineRemoval(X[i,:])
X_[i,:]=MP.ZhangFit(itermax=self.itermax)
del MP
return X_
def fit_transform(self,X,y=None):
try:
X=X.to_numpy()
except:
pass
X_=np.zeros_like(X)
for i in range(X.shape[0]):
MP=BaselineRemoval(X[i,:])
X_[i,:]=MP.ZhangFit(itermax=self.itermax)
del MP
return X_
class BaselineIModPoly(BaseEstimator,TransformerMixin):
def __init__(self, degree=2):
self.__name__='BaselineImprovedModPoly'
self.degree=degree
def fit(self,X,y=None):
pass
def transform(self,X,y=None):
try:
X=X.to_numpy()
except:
pass
X_=np.zeros_like(X)
for i in range(X.shape[0]):
MP=BaselineRemoval(X[i,:])
X_[i,:]=MP.IModPoly(self.degree)
del MP
return X_
def fit_transform(self,X,y=None):
try:
X=X.to_numpy()
except:
pass
X_=np.zeros_like(X)
for i in range(X.shape[0]):
MP=BaselineRemoval(X[i,:])
X_[i,:]=MP.IModPoly(self.degree)
del MP
return X_
class BaselineLinear(BaseEstimator,TransformerMixin):
def __init__(self):
self.__name__='BaselineLinear'
def fit(self,X,y=None):
pass
def transform(self,X,y=None):
try:
X=X.to_numpy()
except:
pass
return signal.detrend(X)
def fit_transform(self,X,y=None):
try:
X=X.to_numpy()
except:
pass
return signal.detrend(X)
class BaselineSecondOrder(BaseEstimator,TransformerMixin):
def __init__(self,degree=2):
self.__name__='BaselineSecondOrder'
self.degree=degree
def fit(self,X,y=None):
pass
def fit_transform(self,X,y=None):
try:
X=pd.DataFrame(X)
except:
pass
t=np.arange(0,X.shape[1])
X_s= X.apply(lambda x: x- np.polyval(np.polyfit(t,x,self.degree), t),axis=1)
return X_s
def transform(self,X,y=None):
try:
X=pd.DataFrame(X)
except:
pass
t=np.arange(0,X.shape[1])
X_s= X.apply(lambda x: x- np.polyval(np.polyfit(t,x,self.degree), t),axis=1)
return X_s
class MSC(BaseEstimator,TransformerMixin):
def __init__(self):
self.__name__='MSC'
self.mean=None
def fit(self,X,y=None):
self.mean= np.array(X.mean(axis=0))
def transform(self,X,y=None):
try:
X=pd.DataFrame(X)
except:
pass
#self.mean= np.array(X.mean(axis=0))
def transformMSC(x,mean):
m,b= np.polyfit(mean,x,1)
return (x-b)*m
return X.apply(transformMSC,args=(self.mean,),axis=1).values
def fit_transform(self,X,y=None):
try:
X=pd.DataFrame(X)
except:
pass
self.mean= np.array(X.mean(axis=0))
def transformMSC(x,mean):
m,b= np.polyfit(mean,x,1)
return (x-b)*m
return X.apply(transformMSC,args=(self.mean,),axis=1).values
class FirstDerivative(BaseEstimator,TransformerMixin):
def __init__(self,d=2):
self.__name__='First Derivative'
self.d=d
def fit(self,X,y=None):
pass
def transform(self,X,y=None):
try:
X=pd.DataFrame(X)
except:
pass
X_=X.diff(self.d,axis=1)
drop= list(X_.columns)[0:2]
X_.drop(columns=drop,inplace=True)
return X_
def fit_transform(self,X,y=None):
try:
X=pd.DataFrame(X)
except:
pass
X_=X.diff(self.d,axis=1)
drop= list(X_.columns)[0:2]
X_.drop(columns=drop,inplace=True)
return X_
# TO DO:
#Piecewise MSC (PMSC)
#Extended MSC (2nd order), Inverse MSC, EIMSC
#Weighted MSC, Loopy MSC (LMSC)
#Norris-Williams
#WhittakerSmooth
class SecondDerivative(BaseEstimator,TransformerMixin):
def __init__(self,d=2):
self.__name__='Second Derivative'
self.d=d
def fit(self,X,y=None):
pass
def transform(self,X,y=None):
try:
X=pd.DataFrame(X)
except:
pass
X_=X.diff(self.d,axis=1)
drop= list(X_.columns)[0:2]
X_.drop(columns=drop,inplace=True)
X_=X_.diff(self.d,axis=1) #second dev
drop= list(X_.columns)[0:2]
X_.drop(columns=drop,inplace=True)
return X_
def fit_transform(self,X,y=None):
try:
X=pd.DataFrame(X)
except:
pass
X_=X.diff(self.d,axis=1)
drop= list(X_.columns)[0:2]
X_.drop(columns=drop,inplace=True)
X_=X_.diff(self.d,axis=1) #second dev
drop= list(X_.columns)[0:2]
X_.drop(columns=drop,inplace=True)
return X_
class SNV(BaseEstimator,TransformerMixin):
def __init__(self):
self.__name__='SNV'
self.mean=None
self.std=None
def fit(self,X):
try:
X=pd.DataFrame(X)
except:
pass
self.mean=X.mean(axis=0)
self.std=X.std(axis=0)
def transform(self,X, y=None):
try:
X=pd.DataFrame(X)
except:
pass
X=X.T
R=(X.subtract(self.mean,axis=0)).divide(self.std+np.finfo(float).eps,axis=0)
return R.T
def fit_transform(self,X,y=None):
try:
X=pd.DataFrame(X)
except:
pass
self.fit(X)
return self.transform(X)
class RNV(BaseEstimator,TransformerMixin):
def __init__(self,q=0.1):
self.__name__='RNV'
self.q=q
self.quantile=None
self.std=None
def fit(self,X):
try:
X=pd.DataFrame(X)
except:
pass
X=X.T
self.quantile=X.quantile(q=self.q,axis=1)
self.std=X.quantile(q=self.q,axis=1).std()
def transform(self,X, y=None):
try:
X=pd.DataFrame(X)
except:
pass
X=X.T
R=(X.subtract(self.quantile,axis=0))/(self.std+np.finfo(float).eps)
return R.T
def fit_transform(self,X,y=None):
try:
X=pd.DataFrame(X)
except:
pass
self.fit(X)
return self.transform(X)
class MeanScaling(BaseEstimator,TransformerMixin):
def __init__(self):
self.__name__='MeanScaling'
self.mean=0
def fit(self,X,y=None):
try:
X=pd.DataFrame(X)
except:
pass
self.mean=X.mean(axis=0)
def transform(self,X, y=None):
try:
X=pd.DataFrame(X)
except:
pass
return pd.DataFrame(np.divide(np.asarray(X),np.asarray(self.mean)))
def fit_transform(self,X,y=None):
self.fit(X)
return self.transform(X)
class MedianScaling(BaseEstimator,TransformerMixin):
def __init__(self):
self.__name__='MedianScaling'
self.median=0
def fit(self,X,y=None):
try:
X=pd.DataFrame(X)
except:
pass
self.median=X.median(axis=0)
def transform(self,X, y=None):
try:
X=pd.DataFrame(X)
except:
pass
return pd.DataFrame(np.divide(np.asarray(X),np.asarray(self.median)))
def fit_transform(self,X,y=None):
self.fit(X)
return self.transform(X)
class MaxScaling(BaseEstimator,TransformerMixin):
def __init__(self):
self.__name__='MaxScaling'
self.max=0
def fit(self,X,y=None):
try:
X=pd.DataFrame(X)
except:
pass
self.max=X.max(axis=0)
def transform(self,X, y=None):
try:
X=pd.DataFrame(X)
except:
pass
return pd.DataFrame(np.divide(np.asarray(X),np.asarray(self.max)))
def fit_transform(self,X,y=None):
self.fit(X)
return self.transform(X)
class MeanCentering(BaseEstimator,TransformerMixin):
def __init__(self):
self.__name__='MeanCentering'
self.mean=0
def fit(self,X,y=None):
try:
X= | pd.DataFrame(X) | pandas.DataFrame |
from datetime import datetime, timedelta
from pandas import json
from api.decorators import api_post, api_get
from api.helper import json_response, json_error_response
from api.utils import int_or_none
from broker.models import BrokerVehicle, Broker
from fms.decorators import authenticated_user
from fms.views import get_or_none
from owner.models import Vehicle
from supplier.helper import compare_format
from team.models import ManualBooking
from team.helper.helper import to_int
from transaction.models import VehicleAllocated, Transaction
from django.contrib.auth.models import User
import pandas as pd
from owner.vehicle_util import display_format
@api_post
@authenticated_user
def booking_history_data(request):
broker = Broker.objects.get(name=User.objects.get(username=request.user.username))
broker_vehicle_ids = BrokerVehicle.objects.filter(broker=broker).values_list('vehicle_id', flat=True)
allocated_vehicles_data = VehicleAllocated.objects.filter(vehicle_number_id__in=broker_vehicle_ids).values(
'transaction_id', 'total_out_ward_amount', 'total_amount_to_owner', 'transaction__shipment_datetime', 'id',
'source_city', 'destination_city', 'transaction_id', 'material', 'transaction__total_vehicle_requested',
'transaction__transaction_status', 'transaction__transaction_id', 'vehicle_number__vehicle_number', 'lr_number')
transaction_data = [{'id': v['id'],
'transaction_id': v['transaction__transaction_id'],
'status': v['transaction__transaction_status'],
'source_city': v['source_city'],
'destination_city': v['destination_city'],
'paid': str(int(v['total_out_ward_amount'])),
'amount': str(int(v['total_amount_to_owner'])),
'balance': str(int(v['total_amount_to_owner'] - v['total_out_ward_amount'])),
'total_vehicle_requested': v['transaction__total_vehicle_requested'],
'vehicle_number': display_format(v['vehicle_number__vehicle_number']),
'lr_number': v['lr_number'],
'shipment_date': v['transaction__shipment_datetime'].strftime('%d-%b-%Y')} for v in
allocated_vehicles_data]
return json_response({'status': 'success', 'data': transaction_data})
@api_post
@authenticated_user
def vehicle_trip_data(request):
data = request.data
vehicle_id = int_or_none(data.get('vehicleId', None))
if vehicle_id:
vehicle = get_or_none(Vehicle, id=vehicle_id)
if not vehicle:
return json_error_response('Vehicle with id=%s does not exist' % vehicle_id, 404)
else:
broker_vehicle_ids = BrokerVehicle.objects.filter(vehicle=vehicle).values_list(
'vehicle_id',
flat=True)
allocated_vehicles_data = VehicleAllocated.objects.filter(vehicle_number_id__in=broker_vehicle_ids).values(
'transaction_id', 'total_out_ward_amount', 'total_amount_to_owner', 'transaction__shipment_datetime',
'source_city', 'destination_city', 'transaction_id', 'material', 'transaction__total_vehicle_requested',
'transaction__transaction_status', 'transaction__transaction_id', 'vehicle_number__vehicle_number',
'lr_number')
transaction_data = [{'id': v['transaction_id'],
'transaction_id': v['transaction__transaction_id'],
'status': v['transaction__transaction_status'],
'source_city': v['source_city'],
'destination_city': v['destination_city'],
'paid': str(int(v['total_out_ward_amount'])),
'amount': str(int(v['total_amount_to_owner'])),
'balance': str(int(v['total_amount_to_owner'] - v['total_out_ward_amount'])),
'total_vehicle_requested': v['transaction__total_vehicle_requested'],
'vehicle_number': display_format(v['vehicle_number__vehicle_number']),
'lr_number': v['lr_number'],
'shipment_date': v['transaction__shipment_datetime'].strftime('%d-%b-%Y')} for v in
allocated_vehicles_data]
return json_response({'status': 'success', 'data': transaction_data})
else:
vehicle = Vehicle()
@api_post
@authenticated_user
def mb_vehicle_trip_data(request):
data = request.data
vehicle_id = int_or_none(data.get('vehicleId', None))
if vehicle_id:
vehicle = int_or_none(get_or_none(Vehicle, id=vehicle_id))
if not vehicle:
return json_error_response('Vehicle with id=%s does not exist' % vehicle_id, 404)
else:
data = []
for booking in ManualBooking.objects.filter(
lorry_number__in=[display_format(compare_format(vehicle.vehicle_number))]).order_by(
'-shipment_date'):
if to_int(booking.total_amount_to_owner - booking.total_out_ward_amount) != 0:
data.append(
{
'status': 'unpaid',
'lr_number': '\n'.join(booking.lr_numbers.values_list('lr_number', flat=True)),
'paid': to_int(booking.total_out_ward_amount),
'id': booking.id,
'total_vehicle_requested': None,
'vehicle_number': display_format(booking.lorry_number),
'source_city': booking.from_city,
'destination_city': booking.to_city,
'amount': to_int(booking.total_amount_to_owner),
'shipment_date': booking.shipment_date.strftime('%d-%b-%Y'),
'balance': to_int(booking.total_amount_to_owner - booking.total_out_ward_amount),
'transaction_id': booking.booking_id
}
)
else:
data.append(
{
'status': 'paid',
'lr_number': '\n'.join(booking.lr_numbers.values_list('lr_number', flat=True)),
'paid': to_int(booking.total_out_ward_amount),
'id': booking.id,
'total_vehicle_requested': None,
'vehicle_number': display_format(booking.lorry_number),
'source_city': booking.from_city,
'destination_city': booking.to_city,
'amount': to_int(booking.total_amount_to_owner),
'shipment_date': booking.shipment_date.strftime('%d-%b-%Y'),
'balance': to_int(booking.total_amount_to_owner - booking.total_out_ward_amount),
'final_payment_date': final_payment_date(booking=booking),
'transaction_id': booking.booking_id
}
)
return json_response({'status': 'success', 'data': data})
def get_allocated_vehicle(request):
data = json.loads(request.body)
transaction = Transaction.objects.get(transaction_id=data['transaction_id'])
allocated_vehicle_list = []
for value in transaction.allocated_vehicle.all():
temp = []
temp.append(value.vehicle_number.vehicle_type.vehicle_type + ", " + value.vehicle_number.vehicle_type.capacity)
temp.append(value.vehicle_number.vehicle_number)
temp.append(value.vehicle_number.driver.driving_licence_number)
temp.append(value.vehicle_number.driver.name)
temp.append(value.vehicle_number.driver.phone)
allocated_vehicle_list.append(temp)
df_allocated = pd.DataFrame(allocated_vehicle_list,
columns=['vehicle_type', 'vehicle_number', 'driving_licence', 'driver_name',
'driver_phone'])
data_allocated = df_allocated.reset_index().to_json(orient='records')
data_allocated = json.loads(data_allocated)
return data_allocated
def loading_unloading_points(request):
data = json.loads(request.body)
transaction = Transaction.objects.get(transaction_id=data['transaction_id'])
locations = transaction.loading_unloading_location.all()
loading_list = []
unloading_list = []
for value in locations:
temp1 = []
temp2 = []
if value.type == 'loading':
temp1.append(value.address)
temp1.append(value.city.name)
loading_list.append(temp1)
elif value.type == 'unloading':
temp2.append(value.address)
temp2.append(value.city.name)
unloading_list.append(temp2)
df_loading = pd.DataFrame(loading_list, columns=['address', 'city'])
loading_details = df_loading.reset_index().to_json(orient='records')
loading_json = | json.loads(loading_details) | pandas.json.loads |
"""
Handles abstract LineList data
Implementation of a specific source (e.g. Vald) should be in its own file
Uses a pandas dataframe under the hood to handle the data
"""
import io
import json
import logging
import numpy as np
import pandas as pd
from flex.extensions.tabledata import JSONTableExtension
from scipy import constants
from ..persistence import IPersist
from ..util import air2vac, vac2air
logger = logging.getLogger(__name__)
class LineListError(Exception):
"""Raise when attempt to read a line data file fails"""
class LineList(IPersist):
"""Atomic data for a list of spectral lines"""
_base_columns = [
"species",
"wlcent",
"excit",
"gflog",
"gamrad",
"gamqst",
"gamvw",
"atom_number",
"ionization",
]
string_columns = ["species", "term_lower", "term_upper", "reference"]
# Citations are added in the submodule (e.g. ValdFile)
citation_info = ""
@staticmethod
def parse_line_error(error_flags, values=None):
"""Transform Line Error flags into relative error values
Parameters
----------
error_flags : list(str)
Error Flags as defined by various references
values : float
depths of the lines, to convert absolute to relative errors
Returns
-------
errors : list(float)
relative errors for each line
"""
if values is None:
values = np.ones(len(error_flags))
nist = {
"AAA": 0.003,
"AA": 0.01,
"A+": 0.02,
"A": 0.03,
"B+": 0.07,
"B": 0.1,
"C+": 0.18,
"C": 0.25,
"C-": 0.3,
"D+": 0.4,
"D": 0.5,
"D-": 0.6,
"E": 0.7,
}
error = np.ones(len(error_flags), dtype=float)
for i, (flag, _) in enumerate(zip(error_flags, values)):
if len(flag) == 0:
error[i] = 0.5
elif flag[0] in [" ", "_", "P"]:
# undefined or predicted
error[i] = 0.5
elif flag[0] == "E":
# absolute error in dex
# TODO absolute?
error[i] = 10 ** float(flag[1:])
elif flag[0] == "C":
# Cancellation Factor, i.e. relative error
try:
error[i] = abs(float(flag[1:]))
except ValueError:
error[i] = 0.5
elif flag[0] == "N":
# NIST quality class
flag = flag[1:5].strip()
try:
error[i] = nist[flag]
except KeyError:
error[i] = 0.5
return error
@staticmethod
def guess_format(kwargs):
short_line_format = kwargs.pop(
"short_line_format", kwargs.pop("short_format", None)
)
if short_line_format is not None:
return short_line_format
keys = kwargs.keys()
if (
"line_extra" in keys
and "line_lulande" in keys
and "line_term_low" in keys
and "line_term_upp" in keys
):
return 2
return 1
@classmethod
def from_IDL_SME(cls, **kwargs):
"""extract LineList from IDL SME structure keywords"""
species = kwargs.pop("species").astype("U")
atomic = np.asarray(kwargs.pop("atomic"), dtype="<f8")
lande = np.asarray(kwargs.pop("lande"), dtype="<f8")
depth = np.asarray(kwargs.pop("depth"), dtype="<f8")
lineref = kwargs.pop("lineref").astype("U")
short_line_format = cls.guess_format(kwargs)
if short_line_format == 2:
line_extra = np.asarray(kwargs.pop("line_extra"), dtype="<f8")
line_lulande = np.asarray(kwargs.pop("line_lulande"), dtype="<f8")
line_term_low = kwargs.pop("line_term_low").astype("U")
line_term_upp = kwargs.pop("line_term_upp").astype("U")
# If there is only one line, it is 1D in the IDL structure, but we expect 2D
atomic = np.atleast_2d(atomic)
data = {
"species": species,
"atom_number": atomic[:, 0],
"ionization": atomic[:, 1],
"wlcent": atomic[:, 2],
"excit": atomic[:, 3],
"gflog": atomic[:, 4],
"gamrad": atomic[:, 5],
"gamqst": atomic[:, 6],
"gamvw": atomic[:, 7],
"lande": lande,
"depth": depth,
"reference": lineref,
}
if short_line_format == 1:
lineformat = "short"
if short_line_format == 2:
lineformat = "long"
error = [s[0:11].strip() for s in lineref]
error = LineList.parse_line_error(error, depth)
data["error"] = error
data["lande_lower"] = line_lulande[:, 0]
data["lande_upper"] = line_lulande[:, 1]
data["j_lo"] = line_extra[:, 0]
data["e_upp"] = line_extra[:, 1]
data["j_up"] = line_extra[:, 2]
data["term_lower"] = [t[10:].strip() for t in line_term_low]
data["term_upper"] = [t[10:].strip() for t in line_term_upp]
linedata = pd.DataFrame.from_dict(data)
return (linedata, lineformat)
def __init__(self, linedata=None, lineformat="short", medium=None, **kwargs):
if linedata is None or len(linedata) == 0:
if isinstance(linedata, pd.DataFrame):
linedata = pd.DataFrame(data=linedata, columns=self._base_columns)
elif "atomic" in kwargs.keys():
# everything is in the kwargs (usually by loading from old SME file)
linedata, lineformat = LineList.from_IDL_SME(**kwargs)
else:
linedata = pd.DataFrame(data=[], columns=self._base_columns)
else:
if isinstance(linedata, LineList):
linedata = linedata._lines
lineformat = linedata.lineformat
medium = linedata._medium
else:
if isinstance(linedata, (list, np.ndarray)):
# linedata = np.atleast_2d(linedata)
linedata = pd.DataFrame(
data=[[*linedata, 0, 0]], columns=self._base_columns
)
if "atom_number" in kwargs.keys():
linedata["atom_number"] = kwargs["atom_number"]
elif "atom_number" not in linedata:
linedata["atom_number"] = np.ones(len(linedata), dtype=float)
if "ionization" in kwargs.keys():
linedata["ionization"] = kwargs["ionization"]
elif "ionization" not in linedata and "species" in linedata:
linedata["ionization"] = np.array(
[int(s[-1]) for s in linedata["species"]], dtype=float
)
if "term_upper" in kwargs.keys():
linedata["term_upper"] = kwargs["term_upper"]
if "term_lower" in kwargs.keys():
linedata["term_lower"] = kwargs["term_lower"]
if "reference" in kwargs.keys():
linedata["reference"] = kwargs["reference"]
if "error" in kwargs.keys():
linedata["error"] = kwargs["error"]
#:{"short", "long"}: Defines how much information is available
self.lineformat = lineformat
#:pandas.DataFrame: DataFrame that contains all the data
self._lines = linedata # should have all the fields (20)
if medium in ["air", "vac", None]:
self._medium = medium
else:
raise ValueError(
f"Medium not recognized, expected one of ['air', 'vac'] , but got {medium} instead."
)
self.citation_info = ""
if "citation_info" in kwargs.keys():
self.citation_info = kwargs["citation_info"]
def __len__(self):
return len(self._lines)
def __str__(self):
return str(self._lines)
def __repr__(self):
return self.__str__()
def __iter__(self):
return self._lines.itertuples(index=False)
def __getitem__(self, index):
if isinstance(index, str) and hasattr(self, index):
return getattr(self, index)
if isinstance(index, (list, str)):
if len(index) == 0:
return LineList(
self._lines.iloc[[]],
lineformat=self.lineformat,
medium=self.medium,
)
values = self._lines[index].values
if index in self.string_columns:
values = values.astype(str)
return values
else:
if isinstance(index, int):
index = slice(index, index + 1)
# just pass on a subsection of the linelist data, but keep it a linelist object
return LineList(
self._lines.iloc[index], self.lineformat, medium=self.medium
)
def __getattribute__(self, name):
if name[0] != "_" and name not in dir(self):
return self._lines[name].values
return super().__getattribute__(name)
@property
def columns(self):
return self._lines.columns
@property
def medium(self):
return self._medium
@medium.setter
def medium(self, value):
if self._medium is None:
logger.warning(
"No medium was defined for the linelist."
" The new value of %s is assumed to be the native medium of the linelist. No conversion was performed"
)
self._medium = value
if self._medium == value:
return
else:
if self._medium == "air" and value == "vac":
self._lines["wlcent"] = air2vac(self._lines["wlcent"])
self._medium = "vac"
elif self._medium == "vac" and value == "air":
self._lines["wlcent"] = vac2air(self._lines["wlcent"])
self._medium = "air"
else:
raise ValueError(
f"Type of medium not undertstood. Expected one of [vac, air], but got {value} instead"
)
@property
def species(self):
"""list(str) of size (nlines,): Species name of each line"""
return self._lines["species"].values.astype("U")
@property
def lulande(self):
"""list(float) of size (nlines, 2): Lower and Upper Lande factors"""
if self.lineformat == "short":
raise AttributeError(
"Lower and Upper Lande Factors are only available in the long line format"
)
# additional data arrays for sme
names = ["lande_lower", "lande_upper"]
return self._lines.reindex(columns=names).values
@property
def extra(self):
"""list(float) of size (nlines, 3): additional line level information for NLTE calculation"""
if self.lineformat == "short":
raise AttributeError("Extra is only available in the long line format")
names = ["j_lo", "e_upp", "j_up"]
return self._lines.reindex(columns=names).values
@property
def atomic(self):
"""list(float) of size (nlines, 8): Data array passed to C library, should only be used for this purpose"""
names = [
"atom_number",
"ionization",
"wlcent",
"excit",
"gflog",
"gamrad",
"gamqst",
"gamvw",
]
# Select fields
values = self._lines.reindex(columns=names).values
values = values.astype(float)
return values
def sort(self, field="wlcent", ascending=True):
"""Sort the linelist
The underlying datastructure will be replaced,
i.e. any references will not be sorted or updated
Parameters
----------
field : str, optional
Field to use for sorting (default: "wlcent")
ascending : bool, optional
Wether to sort in ascending or descending order (default: True)
Returns
-------
self : LineList
The sorted LineList object
"""
self._lines = self._lines.sort_values(by=field, ascending=ascending)
return self
def add(self, species, wlcent, excit, gflog, gamrad, gamqst, gamvw):
"""Add a new line to the existing linelist
This replaces the underlying datastructure,
i.e. any references (atomic, etc.) will not be updated
Parameters
----------
species : str
Name of the element and ionization
wlcent : float
central wavelength
excit : float
excitation energy in eV
gflog : float
gf logarithm
gamrad : float
broadening factor radiation
gamqst : float
broadening factor qst
gamvw : float
broadening factor van der Waals
"""
linedata = {
"species": species,
"wlcent": wlcent,
"excit": excit,
"gflog": gflog,
"gamrad": gamrad,
"gamqst": gamqst,
"gamvw": gamvw,
}
self._lines = self._lines.append([linedata])
def trim(self, wave_min, wave_max, rvel=None):
if rvel is not None:
# Speed of light in km/s
c_light = constants.c * 1e3
wave_min *= np.sqrt((1 - rvel / c_light) / (1 + rvel / c_light))
wave_max *= np.sqrt((1 + rvel / c_light) / (1 - rvel / c_light))
selection = self._lines["wlcent"] > wave_min
selection &= self._lines["wlcent"] < wave_max
return LineList(self._lines[selection])
def _save(self):
header = {
"lineformat": self.lineformat,
"medium": self.medium,
"citation_info": self.citation_info,
}
data = self._lines
ext = JSONTableExtension(header, data)
return ext
@classmethod
def _load(cls, ext: JSONTableExtension):
ll = cls(ext.data, **ext.header)
return ll
def _save_v1(self, file, folder="linelist"):
if folder != "" and folder[-1] != "/":
folder = folder + "/"
info = {
"format": self.lineformat,
"medium": self.medium,
"citation_info": self.citation_info,
}
file.writestr(f"{folder}info.json", json.dumps(info))
lines = self._lines.reset_index(drop=True)
# Eventually feather should be stable, at that point we can use this
# Until then use JSON?
# b = io.BytesIO()
# lines.to_feather(b)
# file.writestr(f"{folder}data.feather", b.getvalue())
linedata = lines.to_json(orient="records")
file.writestr(f"{folder}data.json", linedata)
@staticmethod
def _load_v1(file, names, folder=""):
for name in names:
if name.endswith("info.json"):
info = file.read(name)
info = json.loads(info)
lineformat = info["format"]
medium = info.get("medium")
citation_info = info.get("citation_info", "")
elif name.endswith("data.feather"):
b = io.BytesIO(file.read(name))
linedata = pd.read_feather(b)
elif name.endswith("data.json"):
b = io.BytesIO(file.read(name))
linedata = pd.read_json(b, orient="records")
elif name.endswith("data.npy"):
b = io.BytesIO(file.read(name))
linedata = np.load(b)
linedata = | pd.DataFrame.from_records(linedata) | pandas.DataFrame.from_records |
import numpy as np
from numpy.testing import assert_equal, assert_raises
from pandas import Series
import pytest
from statsmodels.graphics.factorplots import _recode, interaction_plot
try:
import matplotlib.pyplot as plt
except ImportError:
pass
class TestInteractionPlot:
@classmethod
def setup_class(cls):
np.random.seed(12345)
cls.weight = np.random.randint(1,4,size=60)
cls.duration = np.random.randint(1,3,size=60)
cls.days = np.log(np.random.randint(1,30, size=60))
@pytest.mark.matplotlib
def test_plot_both(self, close_figures):
fig = interaction_plot(self.weight, self.duration, self.days,
colors=['red','blue'], markers=['D','^'], ms=10)
@pytest.mark.matplotlib
def test_plot_rainbow(self, close_figures):
fig = interaction_plot(self.weight, self.duration, self.days,
markers=['D','^'], ms=10)
@pytest.mark.matplotlib
@pytest.mark.parametrize('astype', ['str', 'int'])
def test_plot_pandas(self, astype, close_figures):
weight = Series(self.weight, name='Weight').astype(astype)
duration = Series(self.duration, name='Duration')
days = | Series(self.days, name='Days') | pandas.Series |
from datetime import datetime
import re
import unittest
import nose
from nose.tools import assert_equal
import numpy as np
from pandas.tslib import iNaT
from pandas import Series, DataFrame, date_range, DatetimeIndex, Timestamp
from pandas import compat
from pandas.compat import range, long, lrange, lmap, u
from pandas.core.common import notnull, isnull
import pandas.core.common as com
import pandas.util.testing as tm
import pandas.core.config as cf
_multiprocess_can_split_ = True
def test_mut_exclusive():
msg = "mutually exclusive arguments: '[ab]' and '[ab]'"
with tm.assertRaisesRegexp(TypeError, msg):
com._mut_exclusive(a=1, b=2)
assert com._mut_exclusive(a=1, b=None) == 1
assert com._mut_exclusive(major=None, major_axis=None) is None
def test_is_sequence():
is_seq = com._is_sequence
assert(is_seq((1, 2)))
assert(is_seq([1, 2]))
assert(not is_seq("abcd"))
assert(not is_seq(u("abcd")))
assert(not is_seq(np.int64))
class A(object):
def __getitem__(self):
return 1
assert(not is_seq(A()))
def test_notnull():
assert notnull(1.)
assert not notnull(None)
assert not notnull(np.NaN)
with cf.option_context("mode.use_inf_as_null", False):
assert notnull(np.inf)
assert notnull(-np.inf)
arr = np.array([1.5, np.inf, 3.5, -np.inf])
result = notnull(arr)
assert result.all()
with cf.option_context("mode.use_inf_as_null", True):
assert not notnull(np.inf)
assert not notnull(-np.inf)
arr = np.array([1.5, np.inf, 3.5, -np.inf])
result = notnull(arr)
assert result.sum() == 2
with cf.option_context("mode.use_inf_as_null", False):
float_series = Series(np.random.randn(5))
obj_series = Series(np.random.randn(5), dtype=object)
assert(isinstance(notnull(float_series), Series))
assert(isinstance(notnull(obj_series), Series))
def test_isnull():
assert not isnull(1.)
assert isnull(None)
assert isnull(np.NaN)
assert not isnull(np.inf)
assert not isnull(-np.inf)
float_series = Series(np.random.randn(5))
obj_series = Series(np.random.randn(5), dtype=object)
assert(isinstance(isnull(float_series), Series))
assert(isinstance(isnull(obj_series), Series))
# call on DataFrame
df = DataFrame(np.random.randn(10, 5))
df['foo'] = 'bar'
result = isnull(df)
expected = result.apply(isnull)
tm.assert_frame_equal(result, expected)
def test_isnull_tuples():
result = isnull((1, 2))
exp = np.array([False, False])
assert(np.array_equal(result, exp))
result = isnull([(False,)])
exp = np.array([[False]])
assert(np.array_equal(result, exp))
result = isnull([(1,), (2,)])
exp = np.array([[False], [False]])
assert(np.array_equal(result, exp))
# list of strings / unicode
result = isnull(('foo', 'bar'))
assert(not result.any())
result = isnull((u('foo'), u('bar')))
assert(not result.any())
def test_isnull_lists():
result = isnull([[False]])
exp = np.array([[False]])
assert(np.array_equal(result, exp))
result = isnull([[1], [2]])
exp = np.array([[False], [False]])
assert(np.array_equal(result, exp))
# list of strings / unicode
result = isnull(['foo', 'bar'])
assert(not result.any())
result = isnull([u('foo'), u('bar')])
assert(not result.any())
def test_isnull_datetime():
assert (not isnull(datetime.now()))
assert notnull(datetime.now())
idx = date_range('1/1/1990', periods=20)
assert(notnull(idx).all())
idx = np.asarray(idx)
idx[0] = iNaT
idx = DatetimeIndex(idx)
mask = isnull(idx)
assert(mask[0])
assert(not mask[1:].any())
def test_datetimeindex_from_empty_datetime64_array():
for unit in [ 'ms', 'us', 'ns' ]:
idx = DatetimeIndex(np.array([], dtype='datetime64[%s]' % unit))
assert(len(idx) == 0)
def test_nan_to_nat_conversions():
df = DataFrame(dict({
'A' : np.asarray(lrange(10),dtype='float64'),
'B' : Timestamp('20010101') }))
df.iloc[3:6,:] = np.nan
result = df.loc[4,'B'].value
assert(result == iNaT)
s = df['B'].copy()
s._data = s._data.setitem(tuple([slice(8,9)]),np.nan)
assert(isnull(s[8]))
# numpy < 1.7.0 is wrong
from distutils.version import LooseVersion
if LooseVersion(np.__version__) >= '1.7.0':
assert(s[8].value == np.datetime64('NaT').astype(np.int64))
def test_any_none():
assert(com._any_none(1, 2, 3, None))
assert(not com._any_none(1, 2, 3, 4))
def test_all_not_none():
assert(com._all_not_none(1, 2, 3, 4))
assert(not | com._all_not_none(1, 2, 3, None) | pandas.core.common._all_not_none |
__author__ = 'Yan'
import numpy
import pandas
import matplotlib.pyplot as plt
import statsmodels.api as sm
import statsmodels.formula.api as smf
import seaborn
import statistics
# bug fix for display formats to avoid run time errors
pandas.set_option('display.float_format', lambda x:'%.2f'%x)
#load the data
data = pandas.read_csv('separatedData.csv')
# convert to numeric format
data["breastCancer100th"] = pandas.to_numeric(data["breastCancer100th"], errors='coerce')
data["meanSugarPerson"] = | pandas.to_numeric(data["meanSugarPerson"], errors='coerce') | pandas.to_numeric |
#!/usr/bin/env python
# encoding: utf-8
'''
Created by <NAME>
on 2018-09-08.
Multivariate cox analysis to understand breast cancer CNA usefulness from CBioportal data.
Copyright (c) 2018. All rights reserved.
'''
import pandas as pd
import numpy as np
import argparse
import sys
import os
import pdb
import rpy2
import pprint
import cbioportal_util
sys.path.append('../common/')
import utilities as util
import analysis
import tumor_stage_util
cancer_type = 'BRCA'
age_r = 'AGE_AT_DIAGNOSIS'
er_r = 'ER_STATUS'
pr_r = 'PR_STATUS'
her2_r = 'HER2_STATUS'
stage_r = 'TUMOR_STAGE'
def get_options():
parser = argparse.ArgumentParser(description='Tumor stage group counts')
parser.add_argument('-c', action='store', dest='BRCA_clinical')
parser.add_argument('-i', action='store', dest='clinical_variables')
parser.add_argument('-d', action='store', dest='BRCA_cna')
parser.add_argument('-o', action='store', dest='outdir', default='.')
ns = parser.parse_args()
return (ns.BRCA_clinical, ns.clinical_variables, ns.BRCA_cna,
ns.outdir)
def make_clinical_data(clinical_file, clinical_variables, outdir):
clinical = pd.read_csv(clinical_file, index_col=0)
clinical = clinical[[age_r, er_r, pr_r, her2_r, stage_r, 'Time', 'Censor']]
stage_groups = pd.read_csv(os.path.join(clinical_variables, 'BRCA_stage.csv'), dtype=str)
er_groups = pd.read_csv(os.path.join(clinical_variables, 'BRCA_er.csv'), dtype=str)
pr_groups = pd.read_csv(os.path.join(clinical_variables, 'BRCA_pr.csv'), dtype=str)
her2_groups = pd.read_csv(os.path.join(clinical_variables, 'BRCA_her2.csv'), dtype=str)
clinical = tumor_stage_util.group_discontinuous_vars(stage_r, 'stage', stage_groups, clinical)
clinical = tumor_stage_util.group_discontinuous_vars(er_r, 'er', er_groups, clinical)
clinical = tumor_stage_util.group_discontinuous_vars(pr_r, 'pr', pr_groups, clinical)
clinical = tumor_stage_util.group_discontinuous_vars(her2_r, 'her2', her2_groups, clinical)
clinical['combined_er_pr'] = np.where(clinical['er_0'] & clinical['pr_0'], 1, 0)
clinical[age_r] = pd.to_numeric(clinical[age_r], errors='coerce')
clinical.to_csv(os.path.join(outdir, cancer_type + '_clinical.csv'),
index_label='patient_id')
return clinical
def do_cox_models(clinical, cn_file, outdir):
cn = pd.read_csv(cn_file, sep='\t', index_col=0)
cn_by_patient = cn.transpose()
cn_by_patient = cn_by_patient.drop(['Entrez_Gene_Id'])
cn = cn_by_patient[['MYC']]
data = cn.join(clinical, how='inner')
analyses = {
'CNA only': [age_r, 'her2_0', 'combined_er_pr', 'stage_0', 'stage_1'],
}
results = | pd.DataFrame() | pandas.DataFrame |
import itertools
import math
import warnings
from copy import deepcopy
from enum import Enum
from typing import TYPE_CHECKING
from typing import Any
from typing import Callable
from typing import Dict
from typing import List
from typing import Optional
from typing import Sequence
from typing import Set
from typing import Tuple
from typing import Type
from typing import Union
import holidays as holidays_lib
import matplotlib.pyplot as plt
import numpy as np
import pandas as pd
import plotly
import plotly.graph_objects as go
import seaborn as sns
from matplotlib.lines import Line2D
from scipy.signal import periodogram
from typing_extensions import Literal
from etna.analysis import RelevanceTable
from etna.analysis.feature_selection import AGGREGATION_FN
from etna.analysis.feature_selection import AggregationMode
from etna.analysis.utils import prepare_axes
from etna.transforms import Transform
if TYPE_CHECKING:
from etna.datasets import TSDataset
from etna.transforms import TimeSeriesImputerTransform
from etna.transforms.decomposition.change_points_trend import ChangePointsTrendTransform
from etna.transforms.decomposition.detrend import LinearTrendTransform
from etna.transforms.decomposition.detrend import TheilSenTrendTransform
from etna.transforms.decomposition.stl import STLTransform
def _get_existing_quantiles(ts: "TSDataset") -> Set[float]:
"""Get quantiles that are present inside the TSDataset."""
cols = [col for col in ts.columns.get_level_values("feature").unique().tolist() if col.startswith("target_0.")]
existing_quantiles = {float(col[len("target_") :]) for col in cols}
return existing_quantiles
def _select_quantiles(forecast_results: Dict[str, "TSDataset"], quantiles: Optional[List[float]]) -> List[float]:
"""Select quantiles from the forecast results.
Selected quantiles exist in each forecast.
"""
intersection_quantiles_set = set.intersection(
*[_get_existing_quantiles(forecast) for forecast in forecast_results.values()]
)
intersection_quantiles = sorted(list(intersection_quantiles_set))
if quantiles is None:
selected_quantiles = intersection_quantiles
else:
selected_quantiles = sorted(list(set(quantiles) & intersection_quantiles_set))
non_existent = set(quantiles) - intersection_quantiles_set
if non_existent:
warnings.warn(f"Quantiles {non_existent} do not exist in each forecast dataset. They will be dropped.")
return selected_quantiles
def _prepare_forecast_results(
forecast_ts: Union["TSDataset", List["TSDataset"], Dict[str, "TSDataset"]]
) -> Dict[str, "TSDataset"]:
"""Prepare dictionary with forecasts results."""
from etna.datasets import TSDataset
if isinstance(forecast_ts, TSDataset):
return {"1": forecast_ts}
elif isinstance(forecast_ts, list) and len(forecast_ts) > 0:
return {str(i + 1): forecast for i, forecast in enumerate(forecast_ts)}
elif isinstance(forecast_ts, dict) and len(forecast_ts) > 0:
return forecast_ts
else:
raise ValueError("Unknown type of `forecast_ts`")
def plot_forecast(
forecast_ts: Union["TSDataset", List["TSDataset"], Dict[str, "TSDataset"]],
test_ts: Optional["TSDataset"] = None,
train_ts: Optional["TSDataset"] = None,
segments: Optional[List[str]] = None,
n_train_samples: Optional[int] = None,
columns_num: int = 2,
figsize: Tuple[int, int] = (10, 5),
prediction_intervals: bool = False,
quantiles: Optional[List[float]] = None,
):
"""
Plot of prediction for forecast pipeline.
Parameters
----------
forecast_ts:
there are several options:
#. Forecasted TSDataset with timeseries data, single-forecast mode
#. List of forecasted TSDatasets, multi-forecast mode
#. Dictionary with forecasted TSDatasets, multi-forecast mode
test_ts:
TSDataset with timeseries data
train_ts:
TSDataset with timeseries data
segments:
segments to plot; if not given plot all the segments from ``forecast_df``
n_train_samples:
length of history of train to plot
columns_num:
number of graphics columns
figsize:
size of the figure per subplot with one segment in inches
prediction_intervals:
if True prediction intervals will be drawn
quantiles:
List of quantiles to draw, if isn't set then quantiles from a given dataset will be used.
In multi-forecast mode, only quantiles present in each forecast will be used.
Raises
------
ValueError:
if the format of ``forecast_ts`` is unknown
"""
forecast_results = _prepare_forecast_results(forecast_ts)
num_forecasts = len(forecast_results.keys())
if segments is None:
unique_segments = set()
for forecast in forecast_results.values():
unique_segments.update(forecast.segments)
segments = list(unique_segments)
ax = prepare_axes(segments=segments, columns_num=columns_num, figsize=figsize)
if prediction_intervals:
quantiles = _select_quantiles(forecast_results, quantiles)
if train_ts is not None:
train_ts.df.sort_values(by="timestamp", inplace=True)
if test_ts is not None:
test_ts.df.sort_values(by="timestamp", inplace=True)
for i, segment in enumerate(segments):
if train_ts is not None:
segment_train_df = train_ts[:, segment, :][segment]
else:
segment_train_df = pd.DataFrame(columns=["timestamp", "target", "segment"])
if test_ts is not None:
segment_test_df = test_ts[:, segment, :][segment]
else:
segment_test_df = pd.DataFrame(columns=["timestamp", "target", "segment"])
if n_train_samples is None:
plot_df = segment_train_df
elif n_train_samples != 0:
plot_df = segment_train_df[-n_train_samples:]
else:
plot_df = | pd.DataFrame(columns=["timestamp", "target", "segment"]) | pandas.DataFrame |
import threading
import time
import datetime
import pandas as pd
from functools import reduce, wraps
from datetime import datetime, timedelta
import numpy as np
from scipy.stats import zscore
import model.queries as qrs
from model.NodesMetaData import NodesMetaData
import utils.helpers as hp
from utils.helpers import timer
import parquet_creation as pcr
import glob
import os
import dask
import dask.dataframe as dd
class Singleton(type):
def __init__(cls, name, bases, attibutes):
cls._dict = {}
cls._registered = []
def __call__(cls, dateFrom=None, dateTo=None, *args):
print('* OBJECT DICT ', len(cls._dict), cls._dict)
if (dateFrom is None) or (dateTo is None):
defaultDT = hp.defaultTimeRange()
dateFrom = defaultDT[0]
dateTo = defaultDT[1]
if (dateFrom, dateTo) in cls._dict:
print('** OBJECT EXISTS', cls, dateFrom, dateTo)
instance = cls._dict[(dateFrom, dateTo)]
else:
print('** OBJECT DOES NOT EXIST', cls, dateFrom, dateTo)
if (len(cls._dict) > 0) and ([dateFrom, dateTo] != cls._registered):
print('*** provide the latest and start thread', cls, dateFrom, dateTo)
instance = cls._dict[list(cls._dict.keys())[-1]]
refresh = threading.Thread(target=cls.nextPeriodData, args=(dateFrom, dateTo, *args))
refresh.start()
elif ([dateFrom, dateTo] == cls._registered):
print('*** provide the latest', cls, dateFrom, dateTo)
instance = cls._dict[list(cls._dict.keys())[-1]]
elif (len(cls._dict) == 0):
print('*** no data yet, refresh and wait', cls, dateFrom, dateTo)
cls.nextPeriodData(dateFrom, dateTo, *args)
instance = cls._dict[(dateFrom, dateTo)]
# keep only a few objects in memory
if len(cls._dict) >= 2:
cls._dict.pop(list(cls._dict.keys())[0])
return instance
def nextPeriodData(cls, dateFrom, dateTo, *args):
print(f'**** thread started for {cls}')
cls._registered = [dateFrom, dateTo]
instance = super().__call__(dateFrom, dateTo, *args)
cls._dict[(dateFrom, dateTo)] = instance
print(f'**** thread finished for {cls}')
class Updater(object):
def __init__(self):
self.StartThread()
@timer
def UpdateAllData(self):
print()
print(f'{datetime.now()} New data is on its way at {datetime.utcnow()}')
print('Active threads:',threading.active_count())
# query period must be the same for all data loaders
defaultDT = hp.defaultTimeRange()
GeneralDataLoader(defaultDT[0], defaultDT[1])
SiteDataLoader(defaultDT[0], defaultDT[1])
PrtoblematicPairsDataLoader(defaultDT[0], defaultDT[1])
SitesRanksDataLoader(defaultDT[0], defaultDT[1])
self.lastUpdated = hp.roundTime(datetime.utcnow())
self.StartThread()
def StartThread(self):
thread = threading.Timer(3600, self.UpdateAllData) # 1hour
thread.daemon = True
thread.start()
class ParquetUpdater(object):
def __init__(self):
self.StartThread()
@timer
def Update(self):
print('Starting Parquet Updater')
limit = pcr.limit
indices = pcr.indices
files = glob.glob('..\parquet\*')
print('files',files)
file_end = str(int(limit*24))
print('end of file trigger',file_end)
for f in files:
if f.endswith(file_end):
os.remove(f)
files = glob.glob('..\parquet\*')
print('files2',files)
for idx in indices:
j=int((limit*24)-1)
print('idx',idx,'j',j)
for f in files[::-1]:
file_end = str(idx)
end = file_end+str(j)
print('f',f,'end',end)
if f.endswith(end):
new_name = file_end+str(j+1)
head = '..\parquet\\'
final = head+new_name
print('f',f,'final',final)
os.rename(f,final)
j -= 1
jobs = []
limit = 1/24
timerange = pcr.queryrange(limit)
for idx in indices:
thread = threading.Thread(target=pcr.btwfunc,args=(idx,timerange))
jobs.append(thread)
for j in jobs:
j.start()
for j in jobs:
j.join()
# print('Finished Querying')
for idx in indices:
filenames = pcr.ReadParquet(idx,limit)
if idx == 'ps_packetloss':
print(filenames)
plsdf = dd.read_parquet(filenames).compute()
print('Before drops',len(plsdf))
plsdf = plsdf.drop_duplicates()
print('After Drops',len(plsdf))
print('packetloss\n',plsdf)
if idx == 'ps_owd':
owddf = dd.read_parquet(filenames).compute()
print('owd\n',owddf)
if idx == 'ps_retransmits':
rtmdf = dd.read_parquet(filenames).compute()
print('retransmits\n',rtmdf)
if idx == 'ps_throughput':
trpdf = dd.read_parquet(filenames).compute()
print('throughput\n',trpdf)
print('dask df complete')
self.lastUpdated = hp.roundTime(datetime.utcnow())
self.StartThread()
def StartThread(self):
thread = threading.Timer(3600, self.Update) # 1hour
thread.daemon = True
thread.start()
class GeneralDataLoader(object, metaclass=Singleton):
def __init__(self, dateFrom, dateTo):
self.dateFrom = dateFrom
self.dateTo = dateTo
self.lastUpdated = None
self.pls = pd.DataFrame()
self.owd = pd.DataFrame()
self.thp = pd.DataFrame()
self.rtm = pd.DataFrame()
self.UpdateGeneralInfo()
@property
def dateFrom(self):
return self._dateFrom
@dateFrom.setter
def dateFrom(self, value):
self._dateFrom = int(time.mktime(datetime.strptime(value, "%Y-%m-%d %H:%M").timetuple())*1000)
@property
def dateTo(self):
return self._dateTo
@dateTo.setter
def dateTo(self, value):
self._dateTo = int(time.mktime(datetime.strptime(value, "%Y-%m-%d %H:%M").timetuple())*1000)
@property
def lastUpdated(self):
return self._lastUpdated
@lastUpdated.setter
def lastUpdated(self, value):
self._lastUpdated = value
@timer
def UpdateGeneralInfo(self):
# print("last updated: {0}, new start: {1} new end: {2} ".format(self.lastUpdated, self.dateFrom, self.dateTo))
self.pls = NodesMetaData('ps_packetloss', self.dateFrom, self.dateTo).df
self.owd = NodesMetaData('ps_owd', self.dateFrom, self.dateTo).df
self.thp = NodesMetaData('ps_throughput', self.dateFrom, self.dateTo).df
self.rtm = NodesMetaData('ps_retransmits', self.dateFrom, self.dateTo).df
self.latency_df = pd.merge(self.pls, self.owd, how='outer')
self.throughput_df = pd.merge(self.thp, self.rtm, how='outer')
all_df = pd.merge(self.latency_df, self.throughput_df, how='outer')
self.all_df = all_df.drop_duplicates()
self.pls_related_only = self.pls[self.pls['host_in_ps_meta'] == True]
self.owd_related_only = self.owd[self.owd['host_in_ps_meta'] == True]
self.thp_related_only = self.thp[self.thp['host_in_ps_meta'] == True]
self.rtm_related_only = self.rtm[self.rtm['host_in_ps_meta'] == True]
self.latency_df_related_only = self.latency_df[self.latency_df['host_in_ps_meta'] == True]
self.throughput_df_related_only = self.throughput_df[self.throughput_df['host_in_ps_meta'] == True]
self.all_df_related_only = self.all_df[self.all_df['host_in_ps_meta'] == True]
self.all_tested_pairs = self.getAllTestedPairs()
self.lastUpdated = datetime.now()
def getAllTestedPairs(self):
all_df = self.all_df[['host', 'ip']]
df = pd.DataFrame(qrs.queryAllTestedPairs([self.dateFrom, self.dateTo]))
df = pd.merge(all_df, df, left_on='ip', right_on='src', how='right')
df = pd.merge(all_df, df, left_on='ip', right_on='dest', how='right', suffixes=('_dest', '_src'))
df.drop_duplicates(keep='first', inplace=True)
df = df.sort_values(['host_src', 'host_dest'])
df['host_dest'] = df['host_dest'].fillna('N/A')
df['host_src'] = df['host_src'].fillna('N/A')
df['source'] = df[['host_src', 'src']].apply(lambda x: ': '.join(x), axis=1)
df['destination'] = df[['host_dest', 'dest']].apply(lambda x: ': '.join(x), axis=1)
# df = df.sort_values(by=['host_src', 'host_dest'], ascending=False)
df = df[['host_dest', 'host_src', 'idx', 'src', 'dest', 'source', 'destination']]
return df
class SiteDataLoader(object, metaclass=Singleton):
genData = GeneralDataLoader()
def __init__(self, dateFrom, dateTo):
self.dateFrom = dateFrom
self.dateTo = dateTo
self.UpdateSiteData()
def UpdateSiteData(self):
# print('UpdateSiteData >>> ', h self.dateFrom, self.dateTo)
pls_site_in_out = self.InOutDf("ps_packetloss", self.genData.pls_related_only)
self.pls_data = pls_site_in_out['data']
self.pls_dates = pls_site_in_out['dates']
owd_site_in_out = self.InOutDf("ps_owd", self.genData.owd_related_only)
self.owd_data = owd_site_in_out['data']
self.owd_dates = owd_site_in_out['dates']
thp_site_in_out = self.InOutDf("ps_throughput", self.genData.thp_related_only)
self.thp_data = thp_site_in_out['data']
self.thp_dates = thp_site_in_out['dates']
rtm_site_in_out = self.InOutDf("ps_retransmits", self.genData.rtm_related_only)
self.rtm_data = rtm_site_in_out['data']
self.rtm_dates = rtm_site_in_out['dates']
self.latency_df_related_only = self.genData.latency_df_related_only
self.throughput_df_related_only = self.genData.throughput_df_related_only
self.sites = self.orderSites()
@timer
def InOutDf(self, idx, idx_df):
print(idx)
in_out_values = []
time_list = hp.GetTimeRanges(self.dateFrom, self.dateTo)
for t in ['dest_host', 'src_host']:
meta_df = idx_df.copy()
df = pd.DataFrame(qrs.queryDailyAvg(idx, t, time_list[0], time_list[1])).reset_index()
df['index'] = pd.to_datetime(df['index'], unit='ms').dt.strftime('%d/%m')
df = df.transpose()
header = df.iloc[0]
df = df[1:]
df.columns = ['day-3', 'day-2', 'day-1', 'day']
meta_df = pd.merge(meta_df, df, left_on="host", right_index=True)
three_days_ago = meta_df.groupby('site').agg({'day-3': lambda x: x.mean(skipna=False)}, axis=1).reset_index()
two_days_ago = meta_df.groupby('site').agg({'day-2': lambda x: x.mean(skipna=False)}, axis=1).reset_index()
one_day_ago = meta_df.groupby('site').agg({'day-1': lambda x: x.mean(skipna=False)}, axis=1).reset_index()
today = meta_df.groupby('site').agg({'day': lambda x: x.mean(skipna=False)}, axis=1).reset_index()
site_avg_df = reduce(lambda x,y: pd.merge(x,y, on='site', how='outer'), [three_days_ago, two_days_ago, one_day_ago, today])
site_avg_df.set_index('site', inplace=True)
change = site_avg_df.pct_change(axis='columns')
site_avg_df = pd.merge(site_avg_df, change, left_index=True, right_index=True, suffixes=('_val', ''))
site_avg_df['direction'] = 'IN' if t == 'dest_host' else 'OUT'
in_out_values.append(site_avg_df)
site_df = pd.concat(in_out_values).reset_index()
site_df = site_df.round(2)
return {"data": site_df,
"dates": header}
def orderSites(self):
problematic = []
problematic.extend(self.thp_data.nsmallest(20, ['day-3_val', 'day-2_val', 'day-1_val', 'day_val'])['site'].values)
problematic.extend(self.rtm_data.nlargest(20, ['day-3_val', 'day-2_val', 'day-1_val', 'day_val'])['site'].values)
problematic.extend(self.pls_data.nlargest(20, ['day-3_val', 'day-2_val', 'day-1_val', 'day_val'])['site'].values)
problematic.extend(self.owd_data.nlargest(20, ['day-3_val', 'day-2_val', 'day-1_val', 'day_val'])['site'].values)
problematic = list(set(problematic))
all_df = self.genData.all_df_related_only.copy()
all_df['has_problems'] = all_df['site'].apply(lambda x: True if x in problematic else False)
sites = all_df.sort_values(by='has_problems', ascending=False).drop_duplicates(['site'])['site'].values
return sites
class PrtoblematicPairsDataLoader(object, metaclass=Singleton):
gobj = GeneralDataLoader()
LIST_IDXS = ['ps_packetloss', 'ps_owd', 'ps_retransmits', 'ps_throughput']
def __init__(self, dateFrom, dateTo):
self.dateFrom = dateFrom
self.dateTo = dateTo
self.all_df = self.gobj.all_df_related_only[['ip', 'is_ipv6', 'host', 'site', 'admin_email', 'admin_name', 'ip_in_ps_meta',
'host_in_ps_meta', 'host_index', 'site_index', 'host_meta', 'site_meta']].sort_values(by=['ip_in_ps_meta', 'host_in_ps_meta', 'ip'], ascending=False)
self.df = self.markNodes()
@timer
def buildProblems(self, idx):
print('buildProblems...',idx)
data = []
intv = int(hp.CalcMinutes4Period(self.dateFrom, self.dateTo)/60)
time_list = hp.GetTimeRanges(self.dateFrom, self.dateTo, intv)
for i in range(len(time_list)-1):
data.extend(qrs.query4Avg(idx, time_list[i], time_list[i+1]))
return data
@timer
def getPercentageMeasuresDone(self, grouped, tempdf):
measures_done = tempdf.groupby('hash').agg({'doc_count':'sum'})
def findRatio(row, total_minutes):
if pd.isna(row['doc_count']):
count = '0'
else: count = str(round((row['doc_count']/total_minutes)*100))+'%'
return count
one_test_per_min = hp.CalcMinutes4Period(self.dateFrom, self.dateTo)
measures_done['tests_done'] = measures_done.apply(lambda x: findRatio(x, one_test_per_min), axis=1)
grouped = pd.merge(grouped, measures_done, on='hash', how='left')
return grouped
# @timer
def markNodes(self):
df = pd.DataFrame()
for idx in hp.INDECES:
tempdf = pd.DataFrame(self.buildProblems(idx))
grouped = tempdf.groupby(['src', 'dest', 'hash']).agg({'value': lambda x: x.mean(skipna=False)}, axis=1).reset_index()
grouped = self.getRelHosts(grouped)
# zscore based on a each pair value
tempdf['zscore'] = tempdf.groupby('hash')['value'].apply(lambda x: (x - x.mean())/x.std())
# add max zscore so that it is possible to order by worst
max_z = tempdf.groupby('hash').agg({'zscore':'max'}).rename(columns={'zscore':'max_hash_zscore'})
grouped = pd.merge(grouped, max_z, on='hash', how='left')
# zscore based on the whole dataset
grouped['zscore'] = grouped[['value']].apply(lambda x: (x - x.mean())/x.std())
grouped['idx'] = idx
# calculate the percentage of measures based on the assumption that ideally measures are done once every minute
grouped = self.getPercentageMeasuresDone(grouped, tempdf)
# this is not accurate since we have some cases with 4-5 times more tests than expected
# avg_numtests = tempdf.groupby('hash').agg({'doc_count':'mean'}).values[0][0]
# Add flags for some general problems
if (idx == 'ps_packetloss'):
grouped['all_packets_lost'] = grouped['hash'].apply(lambda x: 1 if x in grouped[grouped['value']==1]['hash'].values else 0)
else: grouped['all_packets_lost'] = -1
def checkThreshold(value):
if (idx == 'ps_packetloss'):
if value > 0.05:
return 1
return 0
elif (idx == 'ps_owd'):
if value > 1000 or value < 0:
return 1
return 0
elif (idx == 'ps_throughput'):
if round(value/1e+6, 2) < 25:
return 1
return 0
elif (idx == 'ps_retransmits'):
if value > 100000:
return 1
return 0
grouped['threshold_reached'] = grouped['value'].apply(lambda row: checkThreshold(row))
grouped['has_bursts'] = grouped['hash'].apply(lambda x: 1
if x in tempdf[tempdf['zscore']>5]['hash'].values
else 0)
grouped['src_not_in'] = grouped['hash'].apply(lambda x: 1
if x in grouped[grouped['src'].isin(self.all_df['ip']) == False]['hash'].values
else 0)
grouped['dest_not_in'] = grouped['hash'].apply(lambda x: 1
if x in grouped[grouped['dest'].isin(self.all_df['ip']) == False]['hash'].values
else 0)
grouped['measures'] = grouped['doc_count'].astype(str)+'('+grouped['tests_done'].astype(str)+')'
df = df.append(grouped, ignore_index=True)
df.fillna('N/A', inplace=True)
print(f'Total number of hashes: {len(df)}')
return df
@timer
def getValues(self, probdf):
# probdf = markNodes()
df = pd.DataFrame(columns=['timestamp', 'value', 'idx', 'hash'])
time_list = hp.GetTimeRanges(self.dateFrom, self.dateTo)
for item in probdf[['src', 'dest', 'idx']].values:
tempdf = pd.DataFrame(qrs.queryAllValues(item[2], item, time_list[0], time_list[1]))
tempdf['idx'] = item[2]
tempdf['hash'] = item[0]+"-"+item[1]
tempdf['src'] = item[0]
tempdf['dest'] = item[1]
tempdf.rename(columns={hp.getValueField(item[2]): 'value'}, inplace=True)
df = df.append(tempdf, ignore_index=True)
return df
@timer
def getRelHosts(self, probdf):
df1 = pd.merge(self.all_df[['host', 'ip', 'site']], probdf[['src', 'hash']], left_on='ip', right_on='src', how='right')
df2 = | pd.merge(self.all_df[['host', 'ip', 'site']], probdf[['dest', 'hash']], left_on='ip', right_on='dest', how='right') | pandas.merge |
# -*- coding: utf-8; py-indent-offset:4 -*-
import os, sys
import datetime as dt
import tabulate as tb
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
from ..core import get_cn_fund_list, get_cn_fund_daily, get_cn_fund_manager, get_cn_fund_company, get_all_symbol_name, get_daily
from ..utils import dict_from_df, datetime_today, sort_with_options, filter_with_options, date_range_from_options, range_from_options, csv_xlsx_from_options, symbols_from_params
from ..core import LANG
def cli_fund_help():
syntax_tips = '''Syntax:
__argv0__ fund update <all | symbols | symbols.csv>
__argv0__ fund list <all | symbols | symbols.csv> [-include=... -exclude=... -same=...]
__argv0__ fund manager [<keyword>] [-s | -d] [-sortby=...] [-desc] [-filter_column=...-...]
__argv0__ fund company [<keyword>]
__argv0__ fund eval <all | symbols | symbols.csv> [-sortby=...] [-desc] [-filter_column=...-...]
__argv0__ fund plot <symbols | symbols.csv> [<options>]
__argv0__ fund backtest <all | symbols | symbols.csv> [-ref=...] [-days=...] [-date=yyyymmdd-yyyymmdd]
Options:
-sortby=<col> .................. sort by the column
-sharpe=2.5- ................... sharpe value between <from> to <to>
-drawdown_max=-20 .............. drawdown_max between <from> to <to>
-volatility=-20 ................ volatility between <from> to <to>
-out=file.csv .................. export fund list to .csv file
-out=file.xlsx ................. export fund data to .xlsx file
-s ............................. display symbol of the funds managed
-d ............................. display symbol and name of the funds managed
Example:
__argv0__ fund list
__argv0__ fund list -include=广发 -exclude=债 -out=output/myfunds.csv
__argv0__ fund pool 1.csv 2.csv -exclude=3.csv -same=4.csv -out=5.csv
__argv0__ fund update data/myfunds.csv
__argv0__ fund company 华安
__argv0__ fund manager -belongto=华夏基金
__argv0__ fund eval all -days=365 -sortby=sharpe -desc -limit=20 -out=output/top20_funds.xlsx
__argv0__ fund plot 002943 005669 000209 -days=365
__argv0__ fund plot data/funds.csv -days=365
__argv0__ fund plot data/funds.csv -years=3 -mix
__argv0__ fund backtest all -year=2018 -mix
__argv0__ fund backtest all -year=2010-2020 -mix
'''.replace('__argv0__',os.path.basename(sys.argv[0]))
print( syntax_tips )
def get_fund_symbol_name():
df = get_cn_fund_list(check_date= datetime_today())
return dict_from_df(df, 'symbol', 'name')
def get_fund_name_symbol():
df = get_cn_fund_list(check_date= datetime_today())
return dict_from_df(df, 'name', 'symbol')
def get_fund_company_mapping():
df = get_cn_fund_manager(check_date= datetime_today())
return dict_from_df(df, 'fund', 'company')
def get_manager_size_mapping():
df = get_cn_fund_manager(check_date= datetime_today())
df['manager'] = df['company'] + df['name']
return dict_from_df(df, 'manager', 'size')
def get_fund_manager_mapping():
df = get_cn_fund_manager(check_date= datetime_today())
fund_manager = {}
for i, row in df.iterrows():
name = row['name']
fund = row['fund']
if fund in fund_manager:
if name not in fund_manager[ fund ]:
fund_manager[ fund ].append( name )
pass
else:
fund_manager[ fund ] = [ name ]
return fund_manager
def get_manager_fundname_mapping():
df = get_cn_fund_manager(check_date= datetime_today())
manager_fund = {}
for i, row in df.iterrows():
name = row['company'] + ' ' + row['name']
fund = row['fund']
if name in manager_fund:
manager_fund[ name ].append( fund )
else:
manager_fund[ name ] = [ fund ]
return manager_fund
def get_manager_fundsymbol_mapping():
fund_symbol = dict_from_df(get_cn_fund_list(check_date= datetime_today()), 'name', 'symbol')
df = get_cn_fund_manager(check_date= datetime_today())
manager_fund = {}
for i, row in df.iterrows():
name = row['company'] + row['name']
fund = row['fund']
symbol = fund_symbol[fund] if (fund in fund_symbol) else ''
if symbol == '':
continue
if name in manager_fund:
manager_fund[ name ].append( symbol )
else:
manager_fund[ name ] = [ symbol ]
return manager_fund
def get_manager_fund_mapping():
fund_symbol = dict_from_df(get_cn_fund_list(check_date= datetime_today()), 'name', 'symbol')
df = get_cn_fund_manager(check_date= datetime_today())
manager_fund = {}
for i, row in df.iterrows():
name = row['company'] + ' ' + row['name']
fund = row['fund']
symbol = fund_symbol[fund] if (fund in fund_symbol) else ''
if symbol == '':
continue
if name in manager_fund:
manager_fund[ name ].append( symbol + ' - ' + fund )
else:
manager_fund[ name ] = [ symbol + ' - ' + fund ]
return manager_fund
# hiquant fund list
# hiquant fund list -include=多因子
def cli_fund_list(params, options):
df = get_cn_fund_list(check_date= datetime_today())
selected = total = df.shape[0]
if len(params) > 0:
symbols = symbols_from_params(params)
df = df[ df['symbol'].isin(symbols) ]
for option in options:
if option.startswith('-exclude='):
keywords = option.replace('-exclude=','').split(',')
for k in keywords:
df = df[ ~ df['name'].str.contains(k) ]
pass
elif option.startswith('-include='):
keywords = option.replace('-include=','').split(',')
filters = None
for k in keywords:
filter = df['name'].str.contains(k, na=False)
if filters is None:
filters = filter
else:
filters = filters | filter
df = df[ filters ]
pass
elif option.startswith('-belongto='):
keyword = option.replace('-belongto=','')
if option.endswith('.csv'):
df_filter = pd.read_csv(keyword, dtype=str)
companies = df_filter['company'].tolist()
else:
companies = [ keyword ]
df_fund_manager = get_cn_fund_manager(check_date= datetime_today())
df_fund_manager = df_fund_manager[ df_fund_manager['company'].isin(companies) ]
funds = list(set(df_fund_manager['fund'].tolist()))
df = df[ df['name'].isin(funds) ]
pass
elif option.startswith('-managedby='):
keyword = option.replace('-managedby=','')
if option.endswith('.csv'):
df_filter = pd.read_csv(keyword, dtype=str)
df_filter['manager'] = df_filter['company'] + df_filter['name']
managers = df_filter['manager'].tolist()
else:
managers = [ keyword ]
df_fund_manager = get_cn_fund_manager(check_date= datetime_today())
df_fund_manager['manager'] = df_fund_manager['company'] + df_fund_manager['name']
df_fund_manager = df_fund_manager[ df_fund_manager['manager'].isin(managers) ]
funds = list(set(df_fund_manager['fund'].tolist()))
df = df[ df['name'].isin(funds) ]
pass
pass
df = filter_with_options(df, options)
df = sort_with_options(df, options, by_default='symbol')
selected = df.shape[0]
print( tb.tabulate(df, headers='keys') )
print( selected, 'of', total, 'funds selected.')
out_csv_file, out_xls_file = csv_xlsx_from_options(options)
if out_csv_file:
df = df[['symbol', 'name']]
df.to_csv(out_csv_file, index= False)
print('Exported to:', out_csv_file)
print( tb.tabulate(df, headers='keys') )
if '-update' in options:
cli_fund_update(df['symbol'].tolist(), options)
if '-eval' in options:
cli_fund_eval(df['symbol'].tolist(), options)
return
if '-plot' in options:
cli_fund_plot(df['symbol'].tolist(), options + ['-man'])
# hiquant fund pool params -out=myfunds.csv
# hiquant fund pool params -same=other.csv -out=myfunds.csv
# hiquant fund pool params -exclude=other.csv -out=myfunds.csv
def cli_fund_pool(params, options):
df = get_cn_fund_list(check_date= datetime_today())
symbols = symbols_from_params(params)
df = df[ df['symbol'].isin(symbols) ].reset_index(drop= True)
for option in options:
if option.startswith('-same='):
other_arg = option.replace('-same=','')
other_symbols = symbols_from_params( [ other_arg ])
df = df[ df['symbol'].isin(other_symbols) ]
elif option.startswith('-exclude='):
other_arg = option.replace('-exclude=','')
other_symbols = symbols_from_params( [ other_arg ])
df = df[ ~ df['symbol'].isin(other_symbols) ]
print( tb.tabulate(df, headers='keys') )
df = filter_with_options(df, options)
df = sort_with_options(df, options, by_default='symbol')
range_from, range_to = range_from_options(options)
limit = range_to - range_from
if limit > 0:
df = df.head(limit)
out_csv_file, out_xlsx_file = csv_xlsx_from_options(options)
if out_csv_file:
df = df[['symbol', 'name']]
df.to_csv(out_csv_file, index= False)
print('Exported to:', out_csv_file)
if out_xlsx_file:
df = df[['symbol', 'name']]
df.to_excel(out_xlsx_file, index= False)
print('Exported to:', out_xlsx_file)
def cli_fund_company(params, options):
df = get_cn_fund_company()
limit = 0
yeartop = ''
manager_out_csv = ''
for k in options:
if k.startswith('-limit='):
limit = int(k.replace('-limit=',''))
if k.startswith('-yeartop='):
yeartop = k.replace('-yeartop=','')
if k.startswith('-manager_out=') and k.endswith('.csv'):
manager_out_csv = k.replace('-manager_out=','')
if yeartop:
df_top_managers = cli_fund_manager([], ['-yeartop='+yeartop])
df_yeartop = df_top_managers[['company']].groupby(['company']).size().reset_index(name='yeartopn')
company_yeartop = dict_from_df(df_yeartop, 'company', 'yeartopn')
df['yeartopn'] = [company_yeartop[c] if (c in company_yeartop) else 0 for c in df['company'].tolist()]
company_managers = {}
df_top_managers = df_top_managers.sort_values(by= 'best_return', ascending= False)
for i, row in df_top_managers.iterrows():
manager = row['name']
company = row['company']
if company in company_managers:
company_managers[company].append(manager)
else:
company_managers[company] = [ manager ]
df['names'] = ''
for i, row in df.iterrows():
company = row['company']
if company in company_managers:
names = ','.join( company_managers[company] )
df['names'].iloc[i] = names
if len(params) > 0:
if '.csv' in params[0]:
company_names = pd.read_csv(params[0], dtype=str)['company'].tolist()
df = df[ df['company'].isin(company_names) ]
else:
keyword = params[0]
df = df[ df['company'].str.contains(keyword, na=False) ]
selected = total = df.shape[0]
df = filter_with_options(df, options)
for k in options:
if k.startswith('-sortby='):
df = sort_with_options(df, options, by_default='managers')
if limit > 0:
df = df.head(limit)
selected = df.shape[0]
df = df.reset_index(drop= True)
print( tb.tabulate(df, headers='keys') )
print( selected, 'of', total, 'fund companies.')
out_csv_file, out_xls_file = csv_xlsx_from_options(options)
if manager_out_csv:
table = []
for i, row in df.iterrows():
company = row['company']
names = row['names'].split(',')
for name in names:
table.append([company, name])
df_manager = pd.DataFrame(table, columns=['company','name'])
df_manager.to_csv(manager_out_csv, index=False)
print('Managers exported to:', manager_out_csv)
if out_csv_file:
df_com = df[['company']]
df_com.to_csv(out_csv_file, index=False)
print( tb.tabulate(df_com, headers='keys') )
print('Exported to:', out_csv_file)
if out_xls_file:
df_com = df.rename(columns= {
'company_start': '成立日期',
'size': '管理规模\n(亿)',
'funds': '基金\n总数',
'managers': '基金经理\n人数',
'yeartopn': '业绩前列\n经理人数',
'names': '业绩优秀 基金经理 姓名',
})
del df_com['update_date']
df_com.to_excel(excel_writer= out_xls_file)
print( tb.tabulate(df_com, headers='keys') )
print('Exported to:', out_xls_file)
if '-plot' in options:
df_company_tmp = df.copy()
for i, row in df_company_tmp.iterrows():
company = row['company']
cli_fund_list([], options + ['-belongto=' + company, '-exclude=C,债,FOF,QDII,LOF', '-eval', '-one_per_manager', '-limit=10', '-png=output/' + company + '.png'])
def get_fund_area(name):
fund_areas = {
'QDII': ['QDII','美国','全球','现钞','现汇','人民币','纳斯达克','标普'],
'ETF': ['ETF','指数','联接'],
'债券': ['债'],
'量化': ['量化'],
'新能源': ['能源','双碳','低碳','碳中和','新经济','环保','环境','气候','智能汽车'],
'高端制造': ['制造','智造','战略','新兴产业'],
'信息技术': ['信息','互联网','芯片','半导体','集成电路','云计算'],
'医疗': ['医疗','养老','医药','健康'],
'军工': ['军工','国防','安全'],
'消费': ['消费','品质','白酒'],
'周期': ['周期','资源','钢铁','有色','金融','地产'],
'中小盘': ['中小盘','成长','创新'],
'价值': ['蓝筹','价值','龙头','优势','核心'],
'灵活配置': ['灵活配置','均衡'],
}
for k in fund_areas:
keywords = fund_areas[k]
for kk in keywords:
if kk in name:
return k
return ''
def cli_fund_manager(params, options):
df = get_cn_fund_manager(check_date= datetime_today())
selected = total = df.shape[0]
if len(params) > 0:
keyword = params[0]
if ',' in keyword:
keyword = keyword.replace(',', '')
if keyword.endswith('.csv') or keyword.endswith('.xlsx'):
df_filter = pd.read_csv(keyword, dtype= str) if keyword.endswith('.csv') else pd.read_excel(keyword, dtype= str)
if 'name' in df_filter.columns:
df_filter['manager'] = df_filter['company'] + df_filter['name']
df1 = df.copy()
df1['manager'] = df['company'] + df['name']
df = df1[ df1['manager'].isin(df_filter['manager'].tolist()) ].drop(columns=['manager'])
else:
df = df[ df['company'].isin(df_filter['company'].tolist()) ]
else:
df1 = df.copy()
df1['keywords'] = df1['company'] + df1['name'] + ' ' + df1['fund']
df = df1[ df1['keywords'].str.contains(keyword, na=False) ].drop(columns=['keywords'])
yeartop = ''
limit = 0
belongto = ''
for k in options:
if k.startswith('-limit='):
limit = int(k.replace('-limit=',''))
if k.startswith('-yeartop='):
yeartop = k.replace('-yeartop=', '')
if k.startswith('-fund='):
fund = k.replace('-fund=','')
df = df[ df['fund'].str.contains(fund, na=False) ]
if k.startswith('-belongto='):
belongto = k.replace('-belongto=', '')
df_company = get_cn_fund_company()
company_managers = dict_from_df(df_company, 'company', 'managers')
company_funds = dict_from_df(df_company, 'company', 'funds')
group = '-f' not in options
if group and (df.shape[0] > 0):
df_tmp = df.drop(columns=['fund'])
table = []
name = ''
for i, row in df_tmp.iterrows():
c = row['company']
manager = c + row['name']
if name == manager:
continue
else:
name = manager
data = list(row.values)
managers = company_managers[c] if (c in company_managers) else 0
funds = company_funds[c] if (c in company_funds) else 0
data.insert(2, managers)
data.insert(3, funds)
table.append( data )
cols = list(row.keys())
cols.insert(2, 'managers')
cols.insert(3, 'funds')
df = pd.DataFrame(table, columns=cols)
df['annual'] = round((np.power((df['best_return'] * 0.01 + 1), 1.0/(np.maximum(365.0,df['days'])/365.0)) - 1.0) * 100.0, 1)
if yeartop:
df1 = df[ df['days'] >= 3650 ].sort_values(by='best_return', ascending=False)
if '%' in yeartop:
yeartopn = int(yeartop.replace('%','')) * df1.shape[0] // 100
else:
yeartopn = int(yeartop)
df1 = df1.head(yeartopn)
for i in range(9,0,-1):
df2 = df[ (df['days'] >= (i*365)) & (df['days'] < ((i+1))*365) ].sort_values(by='best_return', ascending=False)
if '%' in yeartop:
yeartopn = int(yeartop.replace('%','')) * df2.shape[0] // 100
else:
yeartopn = int(yeartop)
df2 = df2.head( yeartopn )
df1 = pd.concat([df1, df2], ignore_index=True)
df = df1
df.insert(5, 'years', round(df['days'] / 365.0, 1))
selected = total = df.shape[0]
df = filter_with_options(df, options)
if belongto:
if belongto.endswith('.csv'):
belongto = pd.read_csv(belongto, dtype= str)['company'].tolist()
elif ',' in belongto:
belongto = belongto.split(',')
else:
belongto = [ belongto ]
df = df[ df['company'].isin(belongto) ]
for k in options:
if k.startswith('-sortby='):
df = sort_with_options(df, options, by_default='best_return')
break
if limit > 0:
df = df.head(limit)
if 'fund' in df.columns:
fund_name_symbol = get_fund_name_symbol()
df.insert(2, 'symbol', [(fund_name_symbol[fund] if fund in fund_name_symbol else '') for fund in df['fund'].tolist()])
df = df[ df['symbol'] != '' ]
elif ('-s' in options) and ('name'in df.columns):
manager_fundsymbol = get_manager_fundsymbol_mapping()
managers = (df['company'] + df['name']).tolist()
df['symbol'] = [(','.join(manager_fundsymbol[manager]) if manager in manager_fundsymbol else '') for manager in managers]
elif ('-sd' in options) and ('name'in df.columns):
manager_fund = get_manager_fund_mapping()
managers = (df['company'] + df['name']).tolist()
df['fund'] = [('\n'.join(manager_fund[manager]) if manager in manager_fund else '') for manager in managers]
df['area'] = df['fund'].apply(get_fund_area)
df = df.reset_index(drop= True)
selected = df.shape[0]
print( tb.tabulate(df, headers='keys') )
print( selected, 'of', total, 'selected.')
out_csv_file, out_xls_file = csv_xlsx_from_options(options)
if out_csv_file:
df_csv = df[['name','company']]
df_csv.to_csv(out_csv_file, index= False)
print( tb.tabulate(df_csv, headers='keys') )
print('Exported to:', out_csv_file)
if out_xls_file:
if 'days' in df.columns:
df = df.drop(columns=['days'])
df = df.rename(columns= {
'managers': '基金经理人数',
'funds': '基金总数',
'fund': '基金',
'area': '投资方向',
'years': '管理年限',
'size': '基金规模',
'best_return': '最佳回报',
'annual': '年化收益',
})
df.to_excel(excel_writer= out_xls_file)
print( tb.tabulate(df, headers='keys') )
print('Exported to:', out_xls_file)
if '-plot' in options:
for i, row in df.iterrows():
manager = row['company'] + row['name']
cli_fund_show([manager], ['-png=output/' + manager + '.png'])
return df
def cli_fund_read_fund_symbols(excel_file):
if excel_file.endswith('.csv'):
df = pd.read_csv(excel_file, dtype=str)
elif excel_file.endswith('.xlsx'):
df = pd.read_excel(excel_file, dtype=str)
return df['symbol'].tolist() if ('symbol' in df) else []
# hiquant fund update <symbols>
# hiquant fund update <symbols.csv>
# hiquant fund update all -range=500-
def cli_fund_update(params, options):
if len(params) == 0:
cli_fund_help()
return
range_from, range_to = range_from_options(options)
df_fund_list = get_cn_fund_list(check_date= datetime_today())
if params[0] == 'all':
symbols = df_fund_list['symbol'].tolist()
else:
symbols = symbols_from_params(params)
df_fund_list = df_fund_list[ df_fund_list['symbol'].isin(symbols) ]
fund_symbol_names = dict_from_df(df_fund_list, 'symbol', 'name')
i = 0
n = len(symbols)
for symbol in symbols:
i += 1
if range_from > 0 and i < range_from:
continue
if range_to > 0 and i > range_to:
break
name = symbol + ' - ' + fund_symbol_names[ symbol ]
print('{}/{} - updating {} ...'.format(i, n, name))
try:
df = get_cn_fund_daily(symbol= symbol, check_date= datetime_today())
except (KeyError, ValueError, IndexError) as err:
print('error downloading', name)
pass
print('Done.')
def eval_fund_list(df_fund_list, date_from, date_to, ignore_new = False):
fund_manager = get_fund_manager_mapping()
fund_company = get_fund_company_mapping()
manager_size = get_manager_size_mapping()
days = (date_to - date_from).days
eval_table = []
for index, row in df_fund_list.iterrows():
symbol = row['symbol']
name = row['name']
buy_state = row['buy_state']
sell_state = row['sell_state']
fee = row['fee']
print('\r', index, '-', symbol, '-', name, '...', end='', flush= True)
try:
df = get_cn_fund_daily(symbol= symbol)
except (KeyError, ValueError, IndexError) as err:
print('\nerror downloading', name, ', skip.')
continue
fund_start = min(df.index)
fund_days = (datetime_today() - fund_start).days
if ignore_new and (fund_start > date_from):
continue
df = df[ df.index >= date_from ]
df = df[ df.index < date_to ]
if df.shape[0] == 0:
continue
# skip the fund if data not reasonable, pct_change > 10.0%
pct_change_max = df['pct_change'].max()
if pct_change_max > 20.0:
print('pct_change_max', pct_change_max)
continue
try:
df['earn'] = (df['pct_change'] * 0.01 +1).cumprod()
earn = df['earn'].iloc[-1] - 1.0
earn = round(earn * 100, 2)
earn_max = df['earn'].max() - 1.0
earn_max = round(earn_max * 100, 2)
except (KeyError, ValueError, IndexError) as err:
print('error calculating', symbol, name, ', skip.')
continue
risk_free_rate = 3.0 / 365
daily_sharpe_ratio = (df['pct_change'].mean() - risk_free_rate) / df['pct_change'].std()
sharpe_ratio = round(daily_sharpe_ratio * (252 ** 0.5), 2)
drawdown = df['earn'] / df['earn'].cummax() - 1.0
drawdown_max = round(100 * drawdown.min(), 2)
drawdown_now = round(100 * drawdown.iloc[-1], 2)
drawdown_percent = round(drawdown_now / drawdown_max * 100) if (drawdown_max < 0) else 100
logreturns = np.diff( np.log(df['earn']) )
volatility = np.std(logreturns)
annualVolatility = volatility * (252 ** 0.5)
annualVolatility = round(annualVolatility * 100, 2)
managers = fund_manager[name] if (name in fund_manager) else []
manager = managers[0] if len(managers) > 0 else ''
manager2 = managers[1] if len(managers) > 1 else ''
manager3 = managers[2] if len(managers) > 2 else ''
company = fund_company[name] if (name in fund_company) else ''
if name not in fund_manager:
print('name not in fund manager')
continue
key_manager = company + manager
size = manager_size[key_manager] if (key_manager in manager_size) else 0
eval_table.append([symbol, name, company, manager, manager2, manager3, size, min(days, fund_days), earn, earn_max, drawdown_now, drawdown_max, drawdown_percent, sharpe_ratio, buy_state, sell_state, fee, fund_start, round(fund_days/365.0,1)])
en_cols = ['symbol', 'name', 'company', 'manager', 'manager2', 'manager3', 'size', 'calc_days', 'earn', 'earn_max', 'drawdown', 'drawdown_max', 'drawdown_pct', 'sharpe', 'buy_state', 'sell_state', 'fee', 'fund_start', 'fund_years']
df = pd.DataFrame(eval_table, columns=en_cols)
df['annual'] = round((np.power((df['earn'] * 0.01 + 1), 1.0/(df['calc_days']/365.0)) - 1.0) * 100.0, 1)
#df['annual'] = df[['earn', 'annual']].min(axis= 1)
df['score'] = round(df['earn'] * df['sharpe'] * 0.1, 1)
df['score2'] = round(- df['earn'] * df['sharpe'] / df['drawdown_max'], 1)
return df
# hiquant fund eval 002943 005669
# hiquant fund eval 002943 005669 -days=365
# hiquant fund eval data/myfunds.csv -days=365
# hiquant fund eval all -days=365 -sortby=sharpe -desc
def cli_fund_eval(params, options):
if len(params) == 0:
cli_fund_help()
return
yeartop = 0
manager_out_csv = ''
managedby = ''
belongto = ''
myfunds = ''
for k in options:
if k.startswith('-yeartop='):
yeartop = int(k.replace('-yeartop=', ''))
if k.startswith('-manager_out=') and k.endswith('.csv'):
manager_out_csv = k.replace('-manager_out=', '')
if k.startswith('-managedby='):
managedby = k.replace('-managedby=', '')
if k.startswith('-belongto='):
belongto = k.replace('-belongto=', '')
if k.startswith('-myfunds='):
myfunds = k.replace('-myfunds=', '')
df_fund_list = get_cn_fund_list()
if myfunds:
params.append( myfunds )
if params[0] == 'all':
pass
else:
symbols = symbols_from_params(params)
df_fund_list = df_fund_list[ df_fund_list['symbol'].isin(symbols) ]
date_from, date_to = date_range_from_options(options)
range_from, range_to = range_from_options(options)
if '-nc' in options:
df_fund_list = df_fund_list[ df_fund_list['buy_state'].isin(['限大额','开放申购']) ]
for k in ['C','E','持有']:
df_fund_list = df_fund_list[ ~ df_fund_list['name'].str.contains(k) ]
for k in ['债','油','黄金','商品','资源','周期','通胀','全球','美元','美汇','美钞','美国','香港','恒生','海外','亚太','亚洲','四国','QDII','纳斯达克','标普']:
df_fund_list = df_fund_list[ ~ df_fund_list['name'].str.contains(k) ]
for k in ['ETF','指数','联接','中证']:
df_fund_list = df_fund_list[ ~ df_fund_list['name'].str.contains(k) ]
if yeartop > 0:
symbols = []
days = (date_to - date_from).days
for i in range(0, days, 365):
print('\ryear', i // 365)
eval_from = date_from + dt.timedelta(days = i)
eval_to = min(eval_from + dt.timedelta(days= 365), date_to)
df_eval = eval_fund_list(df_fund_list, date_from= eval_from, date_to= eval_to)
df_eval = sort_with_options(df_eval, options, by_default='earn')
if yeartop > 0:
df_eval = df_eval.head(yeartop)
symbols += df_eval['symbol'].tolist()
df_fund_list = df_fund_list[ df_fund_list['symbol'].isin(set(symbols)) ]
df_eval = eval_fund_list(df_fund_list, date_from= date_from, date_to= date_to)
if myfunds:
if myfunds.endswith('.csv'):
df_myfunds = pd.read_csv(myfunds, dtype=str)
elif myfunds.endswith('.xlsx'):
df_myfunds = pd.read_excel(myfunds, dtype=str)
df_eval['total'] = 0
for i in range(10):
k = 'account' + str(i)
if k in df_myfunds.columns:
symbol_account = dict_from_df(df_myfunds, 'symbol', k)
df_eval[k] = np.nan
for i, row in df_eval.iterrows():
symbol = row['symbol']
if symbol in symbol_account:
df_eval[k].iloc[i] = float(symbol_account[symbol])
df_eval[k] = df_eval[k].fillna(0)
df_eval['total'] += df_eval[k]
if managedby:
if managedby.endswith('.csv') or managedby.endswith('.xlsx'):
df_manager = pd.read_csv(managedby, dtype= str) if managedby.endswith('.csv') else | pd.read_excel(managedby, dtype=str) | pandas.read_excel |
import pandas
from pandas.util.testing import assert_frame_equal
from unittest import TestCase
from unittest import main
from dxanalyze.dxdata.dataprocessing import calculate_percentile
from dxanalyze.dxdata.dataprocessing import set_max_y_axis
from dxanalyze.dxdata.dataprocessing import get_max_y_axis
from dxanalyze.dxdata.dataprocessing import generate_cache_hit_ratio
from dxanalyze.dxdata.dataprocessing import generate_cpu_summary
from dxanalyze.dxdata.dataprocessing import generate_network_summary
from dxanalyze.dxdata.dataprocessing import create_dataframes
class Test_datafile(TestCase):
def test_calculate_percentile(self):
csvdata = pandas.read_csv("tests/test-analytics-disk-raw.csv")
df = csvdata[["#timestamp","read_throughput"]]
pct = calculate_percentile(0.95, df, "read_throughput")
self.assertEqual(pct, 42.87)
def test_create_dataframes_cpu(self):
datadict = {
"#timestamp" : [ "2019-03-20 11:55:00", "2019-03-20 11:56:00", "2019-03-20 11:57:00",
"2019-03-21 11:58:00", "2019-03-21 11:59:00", "2019-03-21 12:00:00" ],
"util": [ 25.81, 26.29, 24.89, 25.57, 34.68, 49.87]
}
df = pandas.DataFrame(datadict)
df = pandas.DataFrame(datadict)
series_list = create_dataframes('cpu', df)
assert_frame_equal(series_list[0]["utilization"]["util"], df)
def test_create_dataframes_nfs(self):
nfsio = {
"#timestamp" : [ "2019-03-20 11:55:00", "2019-03-20 11:56:00", "2019-03-20 11:57:00",
"2019-03-20 11:58:00", "2019-03-20 11:59:00", "2019-03-20 12:00:00" ],
"read_throughput": [ 20, 80, 80, 90, 45, 10],
"write_throughput": [ 2, 8, 8, 9, 4, 1],
"ops_read": [1000, 2000, 3000, 1000, 2000, 3000],
"ops_write": [100, 200, 300, 100, 200, 300],
"read_latency": [2, 3, 4, 4, 3, 2],
"write_latency": [1, 3, 6, 6, 3, 1]
}
df = pandas.DataFrame(nfsio)
series_list = create_dataframes('nfs', df)
for s in series_list:
if "throughput" in s:
assert_frame_equal(s["throughput"]["read_throughput"], df[["#timestamp", "read_throughput"]])
assert_frame_equal(s["throughput"]["write_throughput"], df[["#timestamp", "write_throughput"]])
if "latency" in s:
assert_frame_equal(s["latency"]["read_latency"], df[["#timestamp", "read_latency"]])
assert_frame_equal(s["latency"]["write_latency"], df[["#timestamp", "write_latency"]])
if "ops" in s:
assert_frame_equal(s["ops"]["ops_read"], df[["#timestamp", "ops_read"]])
assert_frame_equal(s["ops"]["ops_write"], df[["#timestamp", "ops_write"]])
def test_generate_cpu_summary(self):
datadict = {
"#timestamp" : [ "2019-03-20 11:55:00", "2019-03-20 11:56:00", "2019-03-20 11:57:00",
"2019-03-21 11:58:00", "2019-03-21 11:59:00", "2019-03-21 12:00:00" ],
"util": [ 25.81, 26.29, 24.89, 25.57, 34.68, 49.87]
}
result_dict = {
"min": {
"#timestamp": [737138.0, 737139.0],
"min": [24.89, 25.57]
},
"max": {
"#timestamp": [737138.0, 737139.0],
"max": [26.29, 49.87]
},
"85percentile": {
"#timestamp": [737138.0, 737139.0],
"85percentile": [26.146, 45.313]
},
}
df = pandas.DataFrame(datadict)
series_dict = generate_cpu_summary(df)
for s in ["min", "max", "85percentile"]:
series = series_dict[s].to_frame()
series = series.reset_index()
series = series.rename(columns={0:s})
df_min = pandas.DataFrame(result_dict[s])
assert_frame_equal(df_min, series)
def test_generate_network_summary(self):
inbytes = {
"#timestamp" : [ "2019-03-20 11:55:00", "2019-03-20 11:56:00", "2019-03-20 11:57:00",
"2019-03-21 11:58:00", "2019-03-21 11:59:00", "2019-03-21 12:00:00" ],
"inBytes": [ 10, 20, 30, 60, 40, 20]
}
outbytes = {
"#timestamp" : [ "2019-03-20 11:55:00", "2019-03-20 11:56:00", "2019-03-20 11:57:00",
"2019-03-21 11:58:00", "2019-03-21 11:59:00", "2019-03-21 12:00:00" ],
"outBytes": [ 15, 25, 35, 65, 45, 25]
}
indf = pandas.DataFrame(inbytes)
outdf = | pandas.DataFrame(outbytes) | pandas.DataFrame |
"""
Functions for converting object to other types
"""
import numpy as np
import pandas as pd
from pandas.core.common import (_possibly_cast_to_datetime, is_object_dtype,
isnull)
import pandas.lib as lib
# TODO: Remove in 0.18 or 2017, which ever is sooner
def _possibly_convert_objects(values, convert_dates=True, convert_numeric=True,
convert_timedeltas=True, copy=True):
""" if we have an object dtype, try to coerce dates and/or numbers """
# if we have passed in a list or scalar
if isinstance(values, (list, tuple)):
values = np.array(values, dtype=np.object_)
if not hasattr(values, 'dtype'):
values = np.array([values], dtype=np.object_)
# convert dates
if convert_dates and values.dtype == np.object_:
# we take an aggressive stance and convert to datetime64[ns]
if convert_dates == 'coerce':
new_values = _possibly_cast_to_datetime(values, 'M8[ns]',
errors='coerce')
# if we are all nans then leave me alone
if not isnull(new_values).all():
values = new_values
else:
values = lib.maybe_convert_objects(values,
convert_datetime=convert_dates)
# convert timedeltas
if convert_timedeltas and values.dtype == np.object_:
if convert_timedeltas == 'coerce':
from pandas.tseries.timedeltas import to_timedelta
new_values = to_timedelta(values, coerce=True)
# if we are all nans then leave me alone
if not isnull(new_values).all():
values = new_values
else:
values = lib.maybe_convert_objects(
values, convert_timedelta=convert_timedeltas)
# convert to numeric
if values.dtype == np.object_:
if convert_numeric:
try:
new_values = lib.maybe_convert_numeric(values, set(),
coerce_numeric=True)
# if we are all nans then leave me alone
if not isnull(new_values).all():
values = new_values
except:
pass
else:
# soft-conversion
values = lib.maybe_convert_objects(values)
values = values.copy() if copy else values
return values
def _soft_convert_objects(values, datetime=True, numeric=True, timedelta=True,
coerce=False, copy=True):
""" if we have an object dtype, try to coerce dates and/or numbers """
conversion_count = sum((datetime, numeric, timedelta))
if conversion_count == 0:
raise ValueError('At least one of datetime, numeric or timedelta must '
'be True.')
elif conversion_count > 1 and coerce:
raise ValueError("Only one of 'datetime', 'numeric' or "
"'timedelta' can be True when when coerce=True.")
if isinstance(values, (list, tuple)):
# List or scalar
values = np.array(values, dtype=np.object_)
elif not hasattr(values, 'dtype'):
values = np.array([values], dtype=np.object_)
elif not is_object_dtype(values.dtype):
# If not object, do not attempt conversion
values = values.copy() if copy else values
return values
# If 1 flag is coerce, ensure 2 others are False
if coerce:
# Immediate return if coerce
if datetime:
return pd.to_datetime(values, errors='coerce', box=False)
elif timedelta:
return pd.to_timedelta(values, errors='coerce', box=False)
elif numeric:
return pd.to_numeric(values, errors='coerce')
# Soft conversions
if datetime:
values = lib.maybe_convert_objects(values, convert_datetime=datetime)
if timedelta and is_object_dtype(values.dtype):
# Object check to ensure only run if previous did not convert
values = lib.maybe_convert_objects(values, convert_timedelta=timedelta)
if numeric and is_object_dtype(values.dtype):
try:
converted = lib.maybe_convert_numeric(values, set(),
coerce_numeric=True)
# If all NaNs, then do not-alter
values = converted if not | isnull(converted) | pandas.core.common.isnull |
import pandas as pd
import numpy as np
data_c2 = | pd.read_csv("AllRollsWildemount.csv") | pandas.read_csv |
"""
kissim.io.biopython
Defines a Biopython-based pocket class.
"""
import collections
import logging
import warnings
import pandas as pd
from Bio.PDB import HSExposure, Vector, Entity
from Bio.PDB.PDBExceptions import PDBConstructionWarning
from opencadd.io import Biopython
from opencadd.structure.pocket import PocketBase
from .data import KlifsToKissimData
from ..definitions import (
STANDARD_AMINO_ACIDS,
NON_STANDARD_AMINO_ACID_CONVERSION,
SIDE_CHAIN_REPRESENTATIVE,
)
from ..utils import enter_temp_directory
logger = logging.getLogger(__name__)
warnings.simplefilter("ignore", PDBConstructionWarning)
class PocketBioPython(PocketBase):
"""
Class defining the Biopython-based pocket object.
Attributes
----------
name : str
Name of protein.
_residue_ids : list of int
Pocket residue IDs.
_residue_ixs : list of int
Pocket residue indices.
_data_complex : Bio.PDB.Chain.Chain
Structural data for the full complex (not the pocket only).
_hse_ca_complex : Bio.PDB.HSExposure.HSExposureCA
CA exposures for the full complex (not the pocket only).
_hse_cb_complex : Bio.PDB.HSExposure.HSExposureCB
CB exposures for the full complex (not the pocket only).
Properties
----------
center
ca_atoms
pcb_atoms
side_chain_representatives
hse_ca
hse_cb
"""
def __init__(self):
self.name = None
self._residue_ids = None
self._residue_ixs = None
self._data_complex = None
self._hse_ca_complex = None
self._hse_cb_complex = None
@classmethod
def from_structure_klifs_id(cls, structure_klifs_id, klifs_session=None):
"""
Get Biopython-based pocket object from a KLIFS structure ID.
Parameters
----------
structure_id : int
KLIFS structure ID.
klifs_session : None or opencadd.databases.klifs.session.Session
Local or remote KLIFS session. If None, generate new remote session.
Returns
-------
kissim.io.PocketBioPython or None
Biopython-based pocket object.
"""
data = KlifsToKissimData.from_structure_klifs_id(structure_klifs_id, klifs_session)
if data:
pocket = cls.from_text(
data.text, data.extension, data.residue_ids, data.residue_ixs, structure_klifs_id
)
return pocket
else:
return None
@classmethod
def from_text(cls, text, extension, residue_ids, residue_ixs, name):
"""
Get Biopython-based pocket object from text, pocket residue IDs and indices.
Parameters
----------
text : str
Structural complex data as string (file content).
extension : str
Structural complex data format (file extension).
residue_ids : list of int
Pocket residue IDs.
residue_ixs : list of int
Pocket residue indices.
name : str
Structure name.
Returns
-------
kissim.io.PocketBioPython
Biopython-based pocket object.
"""
with enter_temp_directory():
filename = "complex.pdb"
with open(filename, "w") as f:
f.write(text)
# Get biopython Structure object
structure = Biopython.from_file(filename)
# KLIFS PDB files contain only one model and one chain - get their IDs
model_id = next(structure.get_models()).id
chain_id = next(structure.get_chains()).id
# Get biopython Chain object
chain = structure[model_id][chain_id]
pocket = cls()
pocket.name = name
pocket._data_complex = chain
pocket._residue_ids, pocket._residue_ixs = residue_ids, residue_ixs
try:
pocket._hse_ca_complex = HSExposure.HSExposureCA(pocket._data_complex)
pocket._hse_cb_complex = HSExposure.HSExposureCB(pocket._data_complex)
except AttributeError as e:
logger.error(
f"{pocket.name}: Bio.PDB.Exposure could not be calculated "
f"(AttributeError: {e})"
)
if e.args[0] == "'NoneType' object has no attribute 'norm'":
# If HSE cannot be calculated with this error message,
# it is most likely related to
# https://github.com/volkamerlab/kissim/issues/27
# Return None for this pocket, with will result in a None fingerprint
pocket = None
else:
# Other errors shall be raised!!!
raise AttributeError(f"{e}")
return pocket
@property
def center(self):
"""
Pocket centroid.
Returns
-------
Bio.PDB.vectors.Vector
Coordinates for the pocket centroid.
"""
ca_atoms = self.ca_atoms
ca_atom_vectors = ca_atoms["ca.atom"].to_list()
ca_atom_vectors = [i for i in ca_atom_vectors if i is not None]
centroid = self.center_of_mass(ca_atom_vectors, geometric=False)
centroid = Vector(centroid)
return centroid
@property
def ca_atoms(self):
"""
Pocket CA atoms.
Returns
-------
pandas.DataFrame
Pocket CA atoms (rows) with the following columns:
- "residue.id": Residue ID
- "ca.atom": CA atom (Bio.PDB.Atom.Atom)
- "ca.vector": CA atom vector (Bio.PDB.vectors.Vector)
"""
ca_atoms = []
for residue_id in self._residue_ids:
ca_atom = self._ca_atom(residue_id)
ca_atoms.append([residue_id, ca_atom])
ca_atoms = pd.DataFrame(ca_atoms, columns=["residue.id", "ca.atom"])
# Add vectors
ca_atom_vectors = []
for ca_atom in ca_atoms["ca.atom"]:
try:
ca_atom_vectors.append(ca_atom.get_vector())
except AttributeError:
ca_atom_vectors.append(None)
ca_atoms["ca.vector"] = ca_atom_vectors
return ca_atoms.astype({"residue.id": "Int32"})
@property
def pcb_atoms(self):
"""
Pocket pseudo-CB atoms.
Returns
-------
pandas.DataFrame
Pocket pseudo-CB atoms (rows) with the following columns:
- "residue.id": Residue ID
- "pcb.vector": Pseudo-CB atom vector (Bio.PDB.vectors.Vector)
"""
pcb_atoms = []
for residue_id in self._residue_ids:
pcb_atom = self._pcb_atom(residue_id)
pcb_atoms.append([residue_id, pcb_atom])
pcb_atoms = | pd.DataFrame(pcb_atoms, columns=["residue.id", "pcb.vector"]) | pandas.DataFrame |
import matplotlib.pyplot as plt
import numpy as np
import pandas as pd
import seaborn as sns
import matplotlib.ticker as ticker
from collections import defaultdict
import statsmodels.formula.api as sm
from mreader import MeasurementReader
import log_constants as c
class MeasurementPlotter(object):
"""docstring for MeasurementPlotter"""
def __init__(self):
self.colors = ['#ffad01', '#042e60', '#11875d', '#7a5901', '#f7022a',
'#29e8e8', '#ff7fa7', '#800ed1', '#51b73b', '#730039', '#63b365']
self.color_i = 0
self.readers = []
self.max_values = []
self.corr = []
self.categorized_corr = defaultdict(list)
self.categorized_values = defaultdict(list)
self.H = defaultdict(list)
self.H['cat'] = defaultdict(list)
def plot_run_results(self, i, folder):
''' Plot graph for time per k'''
self.fig_runtime = plt.figure(1, figsize=(16, 12), dpi=80)
plt.figure(1)
for xi, yi in zip(self.measured_k, self.y):
# plot actual data points when interpolation)
plt.plot(xi, yi, marker='x',
color=self.colors[1], figure=self.fig_runtime)
# interpolate and plot
# Graph 1
# graph info
plot_header = "Time per k for {} - {}, oracle_type {}, L ={}, max_iteration_count={}, algorithm version ={}".format(
self.info[c.DATASET], self.info[c.TOPIC], self.info[c.ORACLE_TYPE], self.info[c.SUMMARY_LEN], self.info[c.ITERATIONS], self.info[c.VERSION])
plt.title(plot_header)
plt.xlabel('k')
plt.ylabel('time in seconds')
plt.plot(self.measured_k, self.y, marker='x', color=self.colors[1 + i],
figure=self.fig_runtime, label=self.info[c.VERSION]) # uninterpolated
plt.grid()
plt.legend(title='version')
self.fig_runtime.savefig(
folder + '/' + self.info[c.TOPIC] + '-runtime' + '.png')
return
def plot_run_results_quality(self, folder):
self.fig_runtime = plt.figure(1, figsize=(16, 12), dpi=80)
plt.figure(1)
x = self.readers[-1].run_log['k']
y = self.readers[-1].run_log['r2']
plt.plot(x, y, marker='x', figure=self.fig_runtime)
self.fig_runtime.savefig(folder + '-runquality' + '.png')
plt.clf()
def plot_iteration_results(self, k, data=None, plot_iteration=False, marker='.',
linestyle='-', corpus_size=0, cut=None):
'''Plot graph for Quality per time; one graph per k'''
x = []
t = 0.0
if data is None:
data = self.readers[-1].iteration_log
for i, ti in enumerate(data[k]['t']):
t += ti
if plot_iteration:
x.append(i + 1)
else:
x.append(t)
x = x[:cut]
y = data[k]['r2'][:cut]
self.fig_quality = plt.figure(2, figsize=(10, 6), dpi=80)
plt.figure(2)
color = self.colors[self.color_i]
# plt.axvline(x=300, color='salmon')
# # # # #
# Plot graph for quality/time per k
# graph info
# plt.title("R2-Wert pro Zeit für Datensatz:" +
# self.info[c.DATASET] + "; Topic: " + self.info[c.TOPIC] +
# "; Referenzzusammenfassung: " + self.readers[-1].run_log['model_id'][0])
if plot_iteration:
plt.xlabel('Iteration t')
else:
plt.xlabel('Zeit in s')
plt.ylabel('Qualität in ROUGE-2')
plt.grid(True, which="both")
if k == 'original':
k = 'Basis'
plt.plot(x, y, marker=marker, color=color, linestyle=linestyle,
figure=self.fig_quality, label=k) # uninterpolated
# needs to be called last
plt.legend(title='k')
self.color_i = (self.color_i + 1) % len(self.colors)
pass
def plot_qt_ratio_per_k(self, folder):
self.fig_ratio = plt.figure(3, figsize=(16, 12), dpi=80)
plt.figure(3)
x = [int(k) for k in self.measured_k]
y = []
threshold = 0.1
for k in self.measured_k:
val = 0.0
yk = self.iterations_data[k]['r2']
for i in range(0, len(yk)):
if yk[i] > threshold:
val = yk[i]
break
y.append(val)
plot_header = "Q/t per k for {} - {}, L ={}, max_iteration_count={}, algorithm version ={}".format(
self.info[c.DATASET], self.info[c.TOPIC], self.info[c.SUMMARY_LEN], self.info[c.ITERATIONS], self.info[c.VERSION])
plt.title(plot_header)
plt.xlabel(self.info[c.VERSION])
plt.ylabel("Quality / time")
plt.plot(x, y, marker='x', color=self.colors[1])
self.fig_ratio.savefig(
folder + '/' + self.info[c.TOPIC] + '-qt-ratio' + '.png')
def plot_number_of_iterations_per_k(self):
x = [int(k) for k in self.measured_k]
y = []
for k in self.measured_k:
y.append(len(self.iterations_data[k]['t']))
self.fig_num_it = plt.figure(5, figsize=(16, 12), dpi=80)
plt.figure(5)
plt.title("Number of iterations per k for " +
self.info[c.DATASET] + " " + self.info[c.TOPIC])
plt.xlabel('k')
plt.ylabel('# of iterations')
plt.grid(True, which="both")
plt.plot(x, y, marker='x', color=self.colors[self.color_i],
figure=self.fig_num_it, label=k)
self.fig_num_it.savefig(
folder + '/' + self.info[c.TOPIC] + '-num_it' + '.png')
def plot_summary_length_per_k(self):
self.fig_sum_len = plt.figure(4, figsize=(16, 12), dpi=80)
x = [int(k) for k in self.measured_k]
y = self.summary_lengths
plt.figure(4)
plt.title("Resulting summary length per k for " +
self.info[c.DATASET] + " " + self.info[c.TOPIC])
plt.xlabel('k')
plt.ylabel('Summary length')
plt.grid(True, which="both")
plt.plot(x, y, marker='x', color=self.colors[self.color_i],
figure=self.fig_sum_len)
self.fig_sum_len.savefig(
folder + '/' + self.info[c.TOPIC] + '-sum_len' + '.png')
def plot_accept_reject_ratio(self):
plt.figure(6)
for k in self.measured_k:
df = pd.DataFrame(data=self.iterations_data[k])
df['ratio'] = df['accepts'] / (df['accepts'] + df['rejects'])
df['ratio'].fillna(0, inplace=True)
x = self.iterations_data[k]['iteration']
y = df['ratio'].tolist()
plt.plot(x, y, label=k)
plt.grid(True, which="both")
plt.legend(title='k')
plt.show()
# x = [self.iterations_data]
pass
def plot_upperbound(self, r2score, color='salmon'):
plt.figure(2)
plt.axhline(y=r2score, color=color)
return
def plot_vanilla_timebound(self):
plt.figure(1)
plt.axhline(y=self.timebound, color='salmon')
def save_it_plots(self, folder):
'''Save plot as png'''
self.fig_quality.savefig(
folder + self.readers[-1].run_log['model_id'][0] + '-q-per-it' + '.png')
self.color_i = 0
def boxplot_number_of_iterations(self, folder):
df = pd.DataFrame(data=self.readers[-1].aggregated_data)
sns.set(style="ticks")
# Select * from where
# print(df.loc[df['k'] == 100.0])
# df = df.loc[~df['k'].isin([200.0, 300.0, 500.0, 750.0])]
# print(df.loc[df['avg_time_per_iteration'] > 6])
# Initialize the figure with a logarithmic x axis
f, ax = plt.subplots(figsize=(12, 8))
# ax.xaxis.set_major_locator(ticker.MultipleLocator(0.5))
# ax.xaxis.set_major_formatter(ticker.ScalarFormatter())
sns.boxplot(x="number_of_iterations", y="k", data=df,
palette="vlag", orient='h')
# Add in points to show each observation
sns.swarmplot(x="number_of_iterations", y="k", data=df,
size=2, color=".3", linewidth=0, orient='h')
sns.set(color_codes=True)
lmp = sns.lmplot(x="k", y="number_of_iterations", data=df, order=2, ci=None, scatter_kws={"s": 2}, size=9)
lmp.set_axis_labels("k", "number_of_iterations")
lmp.set(xticks=range(0, 1001, 100))
# sns.distplot(df.loc[df['k'] == 500.0]['avg_time_per_iteration'])
# Tweak the visual presentation
ax.xaxis.grid(True)
ax.set(ylabel="k")
sns.despine(trim=True, left=True)
# plt.show()
f.savefig(folder + "boxplot number of iterations.png")
lmp.savefig(folder + "lmplot number of iterations.png")
def boxplot_avg_time_per_iteration(self, folder):
df = pd.DataFrame(data=self.readers[-1].aggregated_data)
sns.set(style="ticks")
f, ax = plt.subplots(figsize=(12, 8))
# ax.xaxis.set_major_locator(ticker.MultipleLocator(0.5))
# ax.xaxis.set_major_formatter(ticker.ScalarFormatter())
ax.set_xscale("log")
sns.boxplot(x="avg_time_per_iteration", y="k", data=df,
palette="husl", orient='h')
# Add in points to show each observation
sns.swarmplot(x="avg_time_per_iteration", y="k", data=df,
size=2, color=".3", linewidth=0, orient='h')
sns.set(color_codes=True)
lmp = sns.lmplot(x="k", y="avg_time_per_iteration", data=df, order=3, ci=None, scatter_kws={"s": 2}, size=5)
# lmp.set(xticks=range(0, 1050,100))
# Tweak the visual presentation
ax.xaxis.grid(True)
ax.set(ylabel="average time per iteration")
sns.despine(trim=True, left=True)
# plt.show()
f.savefig(folder + "boxplot avg time per iterations.png")
lmp.savefig(folder + "lmplot number of iterations.png")
f.close()
def time_per_constraints(self, folder):
# df = pd.DataFrame(data=self.readers[-1].aggregated_iteration_data)
df = self.get_aggregate_data()
# Data points only
point_size = 0.6
# with regression
point_size = 0.4
sns.set(color_codes=True)
# gr = sns.lmplot(x="constraints", y="t", data=df, ci=None, scatter_kws={"s": point_size}, size=9)
# gr = sns.lmplot(x="constraints", y="t", data=df, order=2,
# ci=None, scatter_kws={"s": point_size}, size=5, truncate=True)
# # ci=None, scatter_kws={"s": point_size, 'color': 'blue'}, size=9, truncate=True)
# Data points only
gr = sns.lmplot(x="constraints", y="t", data=df, order=1, fit_reg=False, ci=None, scatter_kws={"s": point_size}, size=6, truncate=True)
max_tick = df['t'].max() + 1
gr.set(yticks=np.arange(0, max_tick, 1))
gr.set_axis_labels("Anzahl der ILP-Constraints", "Zeit pro Iteration")
# sns.residplot(x="constraints", y="t", data=df, order=1, scatter_kws={"s": point_size})
gr.savefig(folder + "time per constraints.png")
plt.show()
def regression(self, folder):
df = pd.DataFrame()
for reader in self.readers:
tmp = | pd.DataFrame(data=reader.aggregated_iteration_data) | pandas.DataFrame |
# 导入类库
import os.path
import os
import datetime
import lightgbm as lgb
from sklearn.model_selection import train_test_split
from gensim.models import Word2Vec
from pandas import read_csv
import re
import pandas as pd
import numpy as np
import itertools
import sys
def get_dict(file): #该函数可以获得蛋白质ID与其序列对应的字典或
if file=='df_molecule.csv':
fig_dict=pd.read_csv(file)[['Molecule_ID','Fingerprint']].T.to_dict('series')
elif file=='df_protein_train.csv' or file=='df_protein_test.csv' :
pro=open(file,'r').read().upper() #将蛋白质序列文件中的小写改为大写字母
pro_out=open(file,'w')
pro_out.write(pro)
pro_out.close()
fig_dict=pd.read_csv(file)[['PROTEIN_ID','SEQUENCE']].T.to_dict('series')
else:
print('文件格式错误')
sys.exit()
return fig_dict
def get_new_pro(id_pro, pramate_file): #该函数可以获得蛋白质序列进行数字化处理后的矩阵
pro_result={}
for key,valuex in id_pro.items():
value=list(valuex)[-1]
length=len(value)
pro_mol={'G':75.07,'A':89.09,'V':117.15,'L':131.17,'I':131.17,'F':165.19,'W':204.23,'Y':181.19,'D':133.10,'N':132.12,'E':147.13,'K':146.19,'Q':146.15,'M':149.21,'S':105.09,'T':119.12,'C':121.16,'P':115.13,'H':155.16,'R':174.20}
pramate_file_dict = pd.read_csv(pramate_file, index_col='aa').T.to_dict('series')
pro_n_8_maxitic=np.array([pramate_file_dict[value[0]],pramate_file_dict[value[1]]])
pro_line=np.array([pro_mol[value[0]],pro_mol[value[1]]])
for i in value[2:]:
pro_n_8_maxitic=np.row_stack((pro_n_8_maxitic,pramate_file_dict[i])) #得到n*属性 的计算矩阵
pro_line= np.append(pro_line,pro_mol[i])
Lag=list(np.dot(pro_line,pro_n_8_maxitic)/float(length))
Lag=[ str(i) for i in Lag ]
pro_result[str(key)] =str(key)+','+','.join(Lag)
return pro_result
def get_AC_figuer(file_fig_dict): #该函数可以获得分子指纹进行数字化处理后的矩阵
fig = []
for i in itertools.product('01', repeat=8):
fig.append(''.join(list(i)))
out={}
for k, vx in file_fig_dict.items():
fig_nu_dict = {}
v=''.join([ str(i) for i in list(vx)[1:] ]).replace(', ','')
s = 0
e = 8
for ii in range(len(v) - 7):
read = v[s:e]
if read in fig_nu_dict:
fig_nu_dict[read] = fig_nu_dict[read] + 1
else:
fig_nu_dict[read] = 1
s = s + 1
e = e + 1
fig_list=[]
for i in fig:
if i in fig_nu_dict:
fig_list.append(str(fig_nu_dict[i]))
else:
fig_list.append('0')
out[str(k)]=str(k)+','+','.join(fig_list)
return out
def merge_file(new_fig,new_pro,pro_mol_id_file,out_file): #该函数将蛋白质序列数字矩阵,分子指纹矩阵,小分子18个属性进行融合
df=pd.read_csv(pro_mol_id_file)
new_pro=pd.read_csv('new_pro.list',sep='\t')
new_fig=pd.read_csv('new_fig.list',sep='\t')
nu_18=pd.read_csv('df_molecule.csv')[['Molecule_ID','cyp_3a4','cyp_2c9','cyp_2d6','ames_toxicity','fathead_minnow_toxicity','tetrahymena_pyriformis_toxicity','honey_bee','cell_permeability','logP','renal_organic_cation_transporter','CLtotal','hia','biodegradation','Vdd','p_glycoprotein_inhibition','NOAEL','solubility','bbb']]
df['Protein_ID']=df['Protein_ID'].astype(int)
result=pd.merge(new_pro,df,on='Protein_ID')
result=pd.merge(new_fig, result, on='Molecule_ID')
result=pd.merge(nu_18, result, on='Molecule_ID')
del result['Molecule_ID']
del result['Protein_ID']
result.to_csv(out_file,header=True,index=False)
def pro_mol_result(df_protein,df_molecule,df_affinity,df_out): #该函数调用其它函数生成最后的分析矩阵
new_fig=pd.DataFrame([get_AC_figuer(get_dict(df_molecule))]).T[0].str.split(',', expand=True)
new_fig.columns = ['Molecule_ID'] + ['Molecule_%s'%i for i in range(256)]
new_fig.to_csv('new_fig.list',sep='\t',index=False)
new_pro=pd.DataFrame([get_new_pro(get_dict(df_protein),'aa2.csv')]).T[0].str.split(',', expand=True)
new_pro.columns = ['Protein_ID'] + ['Protein_%s'%i for i in range(14)]
new_pro.to_csv('new_pro.list',sep='\t',index=False)
merge_file(new_fig,new_pro,df_affinity,df_out)
os.remove('new_fig.list')
os.remove('new_pro.list')
"蛋白质参数14,单次最好成绩1.31"
def protein_14():
print('-------------------------------------蛋白质参数14,单次最好成绩1.31-----------------------------------------------------')
dataset = | pd.read_csv('df_train.csv') | pandas.read_csv |
import numpy as np
import pandas as pd
# Auxiliary functions
def get_dummies(data):
data = data.copy()
if isinstance(data, pd.Series):
data = | pd.factorize(data) | pandas.factorize |
import datetime
import numpy as np
import pandas as pd
import pandas.testing as pdt
from cape_privacy.pandas import dtypes
from cape_privacy.pandas.transformations import DateTruncation
from cape_privacy.pandas.transformations import NumericRounding
def _make_apply_numeric_rounding(input, expected_output, ctype, dtype):
transform = NumericRounding(dtype=ctype, precision=1)
df = pd.DataFrame({"amount": input}).astype(dtype)
expected = pd.DataFrame({"amount": expected_output}).astype(dtype)
df["amount"] = transform(df.amount)
return df, expected
def _make_apply_datetruncation(frequency, input_date, expected_date):
transform = DateTruncation(frequency=frequency)
df = pd.DataFrame({"date": [input_date]})
expected = pd.DataFrame({"date": [expected_date]})
df["date"] = transform(df.date)
return df, expected
def test_rounding_float32():
input = [10.8834, 4.21221]
expected_output = [10.9, 4.2]
df, expected = _make_apply_numeric_rounding(
input, expected_output, dtypes.Float, np.float32
)
pdt.assert_frame_equal(df, expected)
def test_rounding_float64():
input = [10.8834, 4.21221]
expected_output = [10.9, 4.2]
df, expected = _make_apply_numeric_rounding(
input, expected_output, dtypes.Double, np.float64
)
pdt.assert_frame_equal(df, expected)
def test_truncate_date_year():
input_date = datetime.date(year=2018, month=10, day=3)
expected_date = datetime.date(year=2018, month=1, day=1)
df, expected = _make_apply_datetruncation("YEAR", input_date, expected_date)
pdt.assert_frame_equal(df, expected)
def test_truncate_datetime_year():
input_date = pd.Timestamp(year=2018, month=10, day=3)
expected_date = | pd.Timestamp(year=2018, month=1, day=1) | pandas.Timestamp |
import glob
import math
import os
import sys
import warnings
from decimal import Decimal
import numpy as np
import pandas as pd
import pytest
from packaging.version import parse as parse_version
import dask
import dask.dataframe as dd
import dask.multiprocessing
from dask.blockwise import Blockwise, optimize_blockwise
from dask.dataframe._compat import PANDAS_GT_110, PANDAS_GT_121, PANDAS_GT_130
from dask.dataframe.io.parquet.utils import _parse_pandas_metadata
from dask.dataframe.optimize import optimize_dataframe_getitem
from dask.dataframe.utils import assert_eq
from dask.layers import DataFrameIOLayer
from dask.utils import natural_sort_key
from dask.utils_test import hlg_layer
try:
import fastparquet
except ImportError:
fastparquet = False
fastparquet_version = parse_version("0")
else:
fastparquet_version = parse_version(fastparquet.__version__)
try:
import pyarrow as pa
except ImportError:
pa = False
pa_version = parse_version("0")
else:
pa_version = parse_version(pa.__version__)
try:
import pyarrow.parquet as pq
except ImportError:
pq = False
SKIP_FASTPARQUET = not fastparquet
FASTPARQUET_MARK = pytest.mark.skipif(SKIP_FASTPARQUET, reason="fastparquet not found")
if sys.platform == "win32" and pa and pa_version == parse_version("2.0.0"):
SKIP_PYARROW = True
SKIP_PYARROW_REASON = (
"skipping pyarrow 2.0.0 on windows: "
"https://github.com/dask/dask/issues/6093"
"|https://github.com/dask/dask/issues/6754"
)
else:
SKIP_PYARROW = not pq
SKIP_PYARROW_REASON = "pyarrow not found"
PYARROW_MARK = pytest.mark.skipif(SKIP_PYARROW, reason=SKIP_PYARROW_REASON)
# "Legacy" and "Dataset"-specific MARK definitions
SKIP_PYARROW_LE = SKIP_PYARROW
SKIP_PYARROW_LE_REASON = "pyarrow not found"
SKIP_PYARROW_DS = SKIP_PYARROW
SKIP_PYARROW_DS_REASON = "pyarrow not found"
if not SKIP_PYARROW_LE:
# NOTE: We should use PYARROW_LE_MARK to skip
# pyarrow-legacy tests once pyarrow officially
# removes ParquetDataset support in the future.
PYARROW_LE_MARK = pytest.mark.filterwarnings(
"ignore::DeprecationWarning",
"ignore::FutureWarning",
)
else:
PYARROW_LE_MARK = pytest.mark.skipif(SKIP_PYARROW_LE, reason=SKIP_PYARROW_LE_REASON)
PYARROW_DS_MARK = pytest.mark.skipif(SKIP_PYARROW_DS, reason=SKIP_PYARROW_DS_REASON)
ANY_ENGINE_MARK = pytest.mark.skipif(
SKIP_FASTPARQUET and SKIP_PYARROW,
reason="No parquet engine (fastparquet or pyarrow) found",
)
nrows = 40
npartitions = 15
df = pd.DataFrame(
{
"x": [i * 7 % 5 for i in range(nrows)], # Not sorted
"y": [i * 2.5 for i in range(nrows)], # Sorted
},
index=pd.Index([10 * i for i in range(nrows)], name="myindex"),
)
ddf = dd.from_pandas(df, npartitions=npartitions)
@pytest.fixture(
params=[
pytest.param("fastparquet", marks=FASTPARQUET_MARK),
pytest.param("pyarrow-legacy", marks=PYARROW_LE_MARK),
pytest.param("pyarrow-dataset", marks=PYARROW_DS_MARK),
]
)
def engine(request):
return request.param
def write_read_engines(**kwargs):
"""Product of both engines for write/read:
To add custom marks, pass keyword of the form: `mark_writer_reader=reason`,
or `mark_engine=reason` to apply to all parameters with that engine."""
backends = {"pyarrow-dataset", "pyarrow-legacy", "fastparquet"}
# Skip if uninstalled
skip_marks = {
"fastparquet": FASTPARQUET_MARK,
"pyarrow-legacy": PYARROW_LE_MARK,
"pyarrow-dataset": PYARROW_DS_MARK,
}
marks = {(w, r): [skip_marks[w], skip_marks[r]] for w in backends for r in backends}
# Custom marks
for kw, val in kwargs.items():
kind, rest = kw.split("_", 1)
key = tuple(rest.split("_"))
if kind not in ("xfail", "skip") or len(key) > 2 or set(key) - backends:
raise ValueError("unknown keyword %r" % kw)
val = getattr(pytest.mark, kind)(reason=val)
if len(key) == 2:
marks[key].append(val)
else:
for k in marks:
if key in k:
marks[k].append(val)
return pytest.mark.parametrize(
("write_engine", "read_engine"),
[pytest.param(*k, marks=tuple(v)) for (k, v) in sorted(marks.items())],
)
pyarrow_fastparquet_msg = "pyarrow schema and pandas metadata may disagree"
write_read_engines_xfail = write_read_engines(
**{
"xfail_pyarrow-dataset_fastparquet": pyarrow_fastparquet_msg,
"xfail_pyarrow-legacy_fastparquet": pyarrow_fastparquet_msg,
}
)
if (
fastparquet
and fastparquet_version < parse_version("0.5")
and PANDAS_GT_110
and not PANDAS_GT_121
):
# a regression in pandas 1.1.x / 1.2.0 caused a failure in writing partitioned
# categorical columns when using fastparquet 0.4.x, but this was (accidentally)
# fixed in fastparquet 0.5.0
fp_pandas_msg = "pandas with fastparquet engine does not preserve index"
fp_pandas_xfail = write_read_engines(
**{
"xfail_pyarrow-dataset_fastparquet": pyarrow_fastparquet_msg,
"xfail_pyarrow-legacy_fastparquet": pyarrow_fastparquet_msg,
"xfail_fastparquet_fastparquet": fp_pandas_msg,
"xfail_fastparquet_pyarrow-dataset": fp_pandas_msg,
"xfail_fastparquet_pyarrow-legacy": fp_pandas_msg,
}
)
else:
fp_pandas_msg = "pandas with fastparquet engine does not preserve index"
fp_pandas_xfail = write_read_engines()
@PYARROW_MARK
def test_pyarrow_getengine():
from dask.dataframe.io.parquet.arrow import ArrowDatasetEngine
from dask.dataframe.io.parquet.core import get_engine
# Check that the default engine for "pyarrow"/"arrow"
# is the `pyarrow.dataset`-based engine
assert get_engine("pyarrow") == ArrowDatasetEngine
assert get_engine("arrow") == ArrowDatasetEngine
if SKIP_PYARROW_LE:
with pytest.warns(FutureWarning):
get_engine("pyarrow-legacy")
@write_read_engines()
def test_local(tmpdir, write_engine, read_engine):
tmp = str(tmpdir)
data = pd.DataFrame(
{
"i32": np.arange(1000, dtype=np.int32),
"i64": np.arange(1000, dtype=np.int64),
"f": np.arange(1000, dtype=np.float64),
"bhello": np.random.choice(["hello", "yo", "people"], size=1000).astype(
"O"
),
}
)
df = dd.from_pandas(data, chunksize=500)
df.to_parquet(tmp, write_index=False, engine=write_engine)
files = os.listdir(tmp)
assert "_common_metadata" in files
assert "_metadata" in files
assert "part.0.parquet" in files
df2 = dd.read_parquet(tmp, index=False, engine=read_engine)
assert len(df2.divisions) > 1
out = df2.compute(scheduler="sync").reset_index()
for column in df.columns:
assert (data[column] == out[column]).all()
@pytest.mark.parametrize("index", [False, True])
@write_read_engines_xfail
def test_empty(tmpdir, write_engine, read_engine, index):
fn = str(tmpdir)
df = pd.DataFrame({"a": ["a", "b", "b"], "b": [4, 5, 6]})[:0]
if index:
df.set_index("a", inplace=True, drop=True)
ddf = dd.from_pandas(df, npartitions=2)
ddf.to_parquet(fn, write_index=index, engine=write_engine)
read_df = dd.read_parquet(fn, engine=read_engine)
assert_eq(ddf, read_df)
@write_read_engines()
def test_simple(tmpdir, write_engine, read_engine):
fn = str(tmpdir)
if write_engine != "fastparquet":
df = pd.DataFrame({"a": [b"a", b"b", b"b"], "b": [4, 5, 6]})
else:
df = pd.DataFrame({"a": ["a", "b", "b"], "b": [4, 5, 6]})
df.set_index("a", inplace=True, drop=True)
ddf = dd.from_pandas(df, npartitions=2)
ddf.to_parquet(fn, engine=write_engine)
read_df = dd.read_parquet(fn, index=["a"], engine=read_engine)
assert_eq(ddf, read_df)
@write_read_engines()
def test_delayed_no_metadata(tmpdir, write_engine, read_engine):
fn = str(tmpdir)
df = pd.DataFrame({"a": ["a", "b", "b"], "b": [4, 5, 6]})
df.set_index("a", inplace=True, drop=True)
ddf = dd.from_pandas(df, npartitions=2)
ddf.to_parquet(
fn, engine=write_engine, compute=False, write_metadata_file=False
).compute()
files = os.listdir(fn)
assert "_metadata" not in files
# Fastparquet doesn't currently handle a directory without "_metadata"
read_df = dd.read_parquet(
os.path.join(fn, "*.parquet"),
index=["a"],
engine=read_engine,
gather_statistics=True,
)
assert_eq(ddf, read_df)
@write_read_engines()
def test_read_glob(tmpdir, write_engine, read_engine):
tmp_path = str(tmpdir)
ddf.to_parquet(tmp_path, engine=write_engine)
if os.path.exists(os.path.join(tmp_path, "_metadata")):
os.unlink(os.path.join(tmp_path, "_metadata"))
files = os.listdir(tmp_path)
assert "_metadata" not in files
ddf2 = dd.read_parquet(
os.path.join(tmp_path, "*.parquet"),
engine=read_engine,
index="myindex", # Must specify index without _metadata
gather_statistics=True,
)
assert_eq(ddf, ddf2)
@write_read_engines()
def test_gather_statistics_false(tmpdir, write_engine, read_engine):
tmp_path = str(tmpdir)
ddf.to_parquet(tmp_path, write_index=False, engine=write_engine)
ddf2 = dd.read_parquet(
tmp_path,
engine=read_engine,
index=False,
gather_statistics=False,
)
assert_eq(ddf, ddf2, check_index=False, check_divisions=False)
@write_read_engines()
def test_read_list(tmpdir, write_engine, read_engine):
if write_engine == read_engine == "fastparquet" and os.name == "nt":
# fastparquet or dask is not normalizing filepaths correctly on
# windows.
pytest.skip("filepath bug.")
tmpdir = str(tmpdir)
ddf.to_parquet(tmpdir, engine=write_engine)
files = sorted(
(
os.path.join(tmpdir, f)
for f in os.listdir(tmpdir)
if not f.endswith("_metadata")
),
key=natural_sort_key,
)
ddf2 = dd.read_parquet(
files, engine=read_engine, index="myindex", gather_statistics=True
)
assert_eq(ddf, ddf2)
@write_read_engines()
def test_columns_auto_index(tmpdir, write_engine, read_engine):
fn = str(tmpdir)
ddf.to_parquet(fn, engine=write_engine)
# XFAIL, auto index selection no longer supported (for simplicity)
# ### Empty columns ###
# With divisions if supported
assert_eq(dd.read_parquet(fn, columns=[], engine=read_engine), ddf[[]])
# No divisions
assert_eq(
dd.read_parquet(fn, columns=[], engine=read_engine, gather_statistics=False),
ddf[[]].clear_divisions(),
check_divisions=True,
)
# ### Single column, auto select index ###
# With divisions if supported
assert_eq(dd.read_parquet(fn, columns=["x"], engine=read_engine), ddf[["x"]])
# No divisions
assert_eq(
dd.read_parquet(fn, columns=["x"], engine=read_engine, gather_statistics=False),
ddf[["x"]].clear_divisions(),
check_divisions=True,
)
@write_read_engines()
def test_columns_index(tmpdir, write_engine, read_engine):
fn = str(tmpdir)
ddf.to_parquet(fn, engine=write_engine)
# With Index
# ----------
# ### Empty columns, specify index ###
# With divisions if supported
assert_eq(
dd.read_parquet(fn, columns=[], engine=read_engine, index="myindex"), ddf[[]]
)
# No divisions
assert_eq(
dd.read_parquet(
fn, columns=[], engine=read_engine, index="myindex", gather_statistics=False
),
ddf[[]].clear_divisions(),
check_divisions=True,
)
# ### Single column, specify index ###
# With divisions if supported
assert_eq(
dd.read_parquet(fn, index="myindex", columns=["x"], engine=read_engine),
ddf[["x"]],
)
# No divisions
assert_eq(
dd.read_parquet(
fn,
index="myindex",
columns=["x"],
engine=read_engine,
gather_statistics=False,
),
ddf[["x"]].clear_divisions(),
check_divisions=True,
)
# ### Two columns, specify index ###
# With divisions if supported
assert_eq(
dd.read_parquet(fn, index="myindex", columns=["x", "y"], engine=read_engine),
ddf,
)
# No divisions
assert_eq(
dd.read_parquet(
fn,
index="myindex",
columns=["x", "y"],
engine=read_engine,
gather_statistics=False,
),
ddf.clear_divisions(),
check_divisions=True,
)
def test_nonsense_column(tmpdir, engine):
fn = str(tmpdir)
ddf.to_parquet(fn, engine=engine)
with pytest.raises((ValueError, KeyError)):
dd.read_parquet(fn, columns=["nonesense"], engine=engine)
with pytest.raises((Exception, KeyError)):
dd.read_parquet(fn, columns=["nonesense"] + list(ddf.columns), engine=engine)
@write_read_engines()
def test_columns_no_index(tmpdir, write_engine, read_engine):
fn = str(tmpdir)
ddf.to_parquet(fn, engine=write_engine)
ddf2 = ddf.reset_index()
# No Index
# --------
# All columns, none as index
assert_eq(
dd.read_parquet(fn, index=False, engine=read_engine, gather_statistics=True),
ddf2,
check_index=False,
check_divisions=True,
)
# Two columns, none as index
assert_eq(
dd.read_parquet(
fn,
index=False,
columns=["x", "y"],
engine=read_engine,
gather_statistics=True,
),
ddf2[["x", "y"]],
check_index=False,
check_divisions=True,
)
# One column and one index, all as columns
assert_eq(
dd.read_parquet(
fn,
index=False,
columns=["myindex", "x"],
engine=read_engine,
gather_statistics=True,
),
ddf2[["myindex", "x"]],
check_index=False,
check_divisions=True,
)
@write_read_engines()
def test_gather_statistics_no_index(tmpdir, write_engine, read_engine):
fn = str(tmpdir)
ddf.to_parquet(fn, engine=write_engine, write_index=False)
df = dd.read_parquet(fn, engine=read_engine, index=False)
assert df.index.name is None
assert not df.known_divisions
def test_columns_index_with_multi_index(tmpdir, engine):
fn = os.path.join(str(tmpdir), "test.parquet")
index = pd.MultiIndex.from_arrays(
[np.arange(10), np.arange(10) + 1], names=["x0", "x1"]
)
df = pd.DataFrame(np.random.randn(10, 2), columns=["a", "b"], index=index)
df2 = df.reset_index(drop=False)
if engine == "fastparquet":
fastparquet.write(fn, df.reset_index(), write_index=False)
else:
pq.write_table(pa.Table.from_pandas(df.reset_index(), preserve_index=False), fn)
ddf = dd.read_parquet(fn, engine=engine, index=index.names)
assert_eq(ddf, df)
d = dd.read_parquet(fn, columns="a", engine=engine, index=index.names)
assert_eq(d, df["a"])
d = dd.read_parquet(fn, index=["a", "b"], columns=["x0", "x1"], engine=engine)
assert_eq(d, df2.set_index(["a", "b"])[["x0", "x1"]])
# Just index
d = dd.read_parquet(fn, index=False, engine=engine)
assert_eq(d, df2)
d = dd.read_parquet(fn, columns=["b"], index=["a"], engine=engine)
assert_eq(d, df2.set_index("a")[["b"]])
d = dd.read_parquet(fn, columns=["a", "b"], index=["x0"], engine=engine)
assert_eq(d, df2.set_index("x0")[["a", "b"]])
# Just columns
d = dd.read_parquet(fn, columns=["x0", "a"], index=["x1"], engine=engine)
assert_eq(d, df2.set_index("x1")[["x0", "a"]])
# Both index and columns
d = dd.read_parquet(fn, index=False, columns=["x0", "b"], engine=engine)
assert_eq(d, df2[["x0", "b"]])
for index in ["x1", "b"]:
d = dd.read_parquet(fn, index=index, columns=["x0", "a"], engine=engine)
assert_eq(d, df2.set_index(index)[["x0", "a"]])
# Columns and index intersect
for index in ["a", "x0"]:
with pytest.raises(ValueError):
d = dd.read_parquet(fn, index=index, columns=["x0", "a"], engine=engine)
# Series output
for ind, col, sol_df in [
("x1", "x0", df2.set_index("x1")),
(False, "b", df2),
(False, "x0", df2[["x0"]]),
("a", "x0", df2.set_index("a")[["x0"]]),
("a", "b", df2.set_index("a")),
]:
d = dd.read_parquet(fn, index=ind, columns=col, engine=engine)
assert_eq(d, sol_df[col])
@write_read_engines()
def test_no_index(tmpdir, write_engine, read_engine):
fn = str(tmpdir)
df = pd.DataFrame({"a": [1, 2, 3], "b": [4, 5, 6]})
ddf = dd.from_pandas(df, npartitions=2)
ddf.to_parquet(fn, engine=write_engine)
ddf2 = dd.read_parquet(fn, engine=read_engine)
assert_eq(df, ddf2, check_index=False)
def test_read_series(tmpdir, engine):
fn = str(tmpdir)
ddf.to_parquet(fn, engine=engine)
ddf2 = dd.read_parquet(fn, columns=["x"], index="myindex", engine=engine)
assert_eq(ddf[["x"]], ddf2)
ddf2 = dd.read_parquet(fn, columns="x", index="myindex", engine=engine)
assert_eq(ddf.x, ddf2)
def test_names(tmpdir, engine):
fn = str(tmpdir)
ddf.to_parquet(fn, engine=engine)
def read(fn, **kwargs):
return dd.read_parquet(fn, engine=engine, **kwargs)
assert set(read(fn).dask) == set(read(fn).dask)
assert set(read(fn).dask) != set(read(fn, columns=["x"]).dask)
assert set(read(fn, columns=("x",)).dask) == set(read(fn, columns=["x"]).dask)
@write_read_engines()
def test_roundtrip_from_pandas(tmpdir, write_engine, read_engine):
fn = str(tmpdir.join("test.parquet"))
dfp = df.copy()
dfp.index.name = "index"
dfp.to_parquet(
fn, engine="pyarrow" if write_engine.startswith("pyarrow") else "fastparquet"
)
ddf = dd.read_parquet(fn, index="index", engine=read_engine)
assert_eq(dfp, ddf)
@write_read_engines()
def test_categorical(tmpdir, write_engine, read_engine):
tmp = str(tmpdir)
df = pd.DataFrame({"x": ["a", "b", "c"] * 100}, dtype="category")
ddf = dd.from_pandas(df, npartitions=3)
dd.to_parquet(ddf, tmp, engine=write_engine)
ddf2 = dd.read_parquet(tmp, categories="x", engine=read_engine)
assert ddf2.compute().x.cat.categories.tolist() == ["a", "b", "c"]
ddf2 = dd.read_parquet(tmp, categories=["x"], engine=read_engine)
assert ddf2.compute().x.cat.categories.tolist() == ["a", "b", "c"]
# autocat
if read_engine == "fastparquet":
ddf2 = dd.read_parquet(tmp, engine=read_engine)
assert ddf2.compute().x.cat.categories.tolist() == ["a", "b", "c"]
ddf2.loc[:1000].compute()
assert assert_eq(df, ddf2)
# dereference cats
ddf2 = dd.read_parquet(tmp, categories=[], engine=read_engine)
ddf2.loc[:1000].compute()
assert (df.x == ddf2.x.compute()).all()
def test_append(tmpdir, engine):
"""Test that appended parquet equal to the original one."""
tmp = str(tmpdir)
df = pd.DataFrame(
{
"i32": np.arange(1000, dtype=np.int32),
"i64": np.arange(1000, dtype=np.int64),
"f": np.arange(1000, dtype=np.float64),
"bhello": np.random.choice(["hello", "yo", "people"], size=1000).astype(
"O"
),
}
)
df.index.name = "index"
half = len(df) // 2
ddf1 = dd.from_pandas(df.iloc[:half], chunksize=100)
ddf2 = dd.from_pandas(df.iloc[half:], chunksize=100)
ddf1.to_parquet(tmp, engine=engine)
ddf2.to_parquet(tmp, append=True, engine=engine)
ddf3 = dd.read_parquet(tmp, engine=engine)
assert_eq(df, ddf3)
def test_append_create(tmpdir, engine):
"""Test that appended parquet equal to the original one."""
tmp_path = str(tmpdir)
df = pd.DataFrame(
{
"i32": np.arange(1000, dtype=np.int32),
"i64": np.arange(1000, dtype=np.int64),
"f": np.arange(1000, dtype=np.float64),
"bhello": np.random.choice(["hello", "yo", "people"], size=1000).astype(
"O"
),
}
)
df.index.name = "index"
half = len(df) // 2
ddf1 = dd.from_pandas(df.iloc[:half], chunksize=100)
ddf2 = dd.from_pandas(df.iloc[half:], chunksize=100)
ddf1.to_parquet(tmp_path, append=True, engine=engine)
ddf2.to_parquet(tmp_path, append=True, engine=engine)
ddf3 = dd.read_parquet(tmp_path, engine=engine)
assert_eq(df, ddf3)
def test_append_with_partition(tmpdir, engine):
tmp = str(tmpdir)
df0 = pd.DataFrame(
{
"lat": np.arange(0, 10, dtype="int64"),
"lon": np.arange(10, 20, dtype="int64"),
"value": np.arange(100, 110, dtype="int64"),
}
)
df0.index.name = "index"
df1 = pd.DataFrame(
{
"lat": np.arange(10, 20, dtype="int64"),
"lon": np.arange(10, 20, dtype="int64"),
"value": np.arange(120, 130, dtype="int64"),
}
)
df1.index.name = "index"
# Check that nullable dtypes work
# (see: https://github.com/dask/dask/issues/8373)
df0["lat"] = df0["lat"].astype("Int64")
df1["lat"].iloc[0] = np.nan
df1["lat"] = df1["lat"].astype("Int64")
dd_df0 = dd.from_pandas(df0, npartitions=1)
dd_df1 = dd.from_pandas(df1, npartitions=1)
dd.to_parquet(dd_df0, tmp, partition_on=["lon"], engine=engine)
dd.to_parquet(
dd_df1,
tmp,
partition_on=["lon"],
append=True,
ignore_divisions=True,
engine=engine,
)
out = dd.read_parquet(
tmp, engine=engine, index="index", gather_statistics=True
).compute()
# convert categorical to plain int just to pass assert
out["lon"] = out.lon.astype("int64")
# sort required since partitioning breaks index order
assert_eq(
out.sort_values("value"), pd.concat([df0, df1])[out.columns], check_index=False
)
def test_partition_on_cats(tmpdir, engine):
tmp = str(tmpdir)
d = pd.DataFrame(
{
"a": np.random.rand(50),
"b": np.random.choice(["x", "y", "z"], size=50),
"c": np.random.choice(["x", "y", "z"], size=50),
}
)
d = dd.from_pandas(d, 2)
d.to_parquet(tmp, partition_on=["b"], engine=engine)
df = dd.read_parquet(tmp, engine=engine)
assert set(df.b.cat.categories) == {"x", "y", "z"}
@PYARROW_MARK
@pytest.mark.parametrize("meta", [False, True])
@pytest.mark.parametrize("stats", [False, True])
def test_partition_on_cats_pyarrow(tmpdir, stats, meta):
tmp = str(tmpdir)
d = pd.DataFrame(
{
"a": np.random.rand(50),
"b": np.random.choice(["x", "y", "z"], size=50),
"c": np.random.choice(["x", "y", "z"], size=50),
}
)
d = dd.from_pandas(d, 2)
d.to_parquet(tmp, partition_on=["b"], engine="pyarrow", write_metadata_file=meta)
df = dd.read_parquet(tmp, engine="pyarrow", gather_statistics=stats)
assert set(df.b.cat.categories) == {"x", "y", "z"}
def test_partition_on_cats_2(tmpdir, engine):
tmp = str(tmpdir)
d = pd.DataFrame(
{
"a": np.random.rand(50),
"b": np.random.choice(["x", "y", "z"], size=50),
"c": np.random.choice(["x", "y", "z"], size=50),
}
)
d = dd.from_pandas(d, 2)
d.to_parquet(tmp, partition_on=["b", "c"], engine=engine)
df = dd.read_parquet(tmp, engine=engine)
assert set(df.b.cat.categories) == {"x", "y", "z"}
assert set(df.c.cat.categories) == {"x", "y", "z"}
df = dd.read_parquet(tmp, columns=["a", "c"], engine=engine)
assert set(df.c.cat.categories) == {"x", "y", "z"}
assert "b" not in df.columns
assert_eq(df, df.compute())
df = dd.read_parquet(tmp, index="c", engine=engine)
assert set(df.index.categories) == {"x", "y", "z"}
assert "c" not in df.columns
# series
df = dd.read_parquet(tmp, columns="b", engine=engine)
assert set(df.cat.categories) == {"x", "y", "z"}
def test_append_wo_index(tmpdir, engine):
"""Test append with write_index=False."""
tmp = str(tmpdir.join("tmp1.parquet"))
df = pd.DataFrame(
{
"i32": np.arange(1000, dtype=np.int32),
"i64": np.arange(1000, dtype=np.int64),
"f": np.arange(1000, dtype=np.float64),
"bhello": np.random.choice(["hello", "yo", "people"], size=1000).astype(
"O"
),
}
)
half = len(df) // 2
ddf1 = dd.from_pandas(df.iloc[:half], chunksize=100)
ddf2 = dd.from_pandas(df.iloc[half:], chunksize=100)
ddf1.to_parquet(tmp, engine=engine)
with pytest.raises(ValueError) as excinfo:
ddf2.to_parquet(tmp, write_index=False, append=True, engine=engine)
assert "Appended columns" in str(excinfo.value)
tmp = str(tmpdir.join("tmp2.parquet"))
ddf1.to_parquet(tmp, write_index=False, engine=engine)
ddf2.to_parquet(tmp, write_index=False, append=True, engine=engine)
ddf3 = dd.read_parquet(tmp, index="f", engine=engine)
assert_eq(df.set_index("f"), ddf3)
def test_append_overlapping_divisions(tmpdir, engine):
"""Test raising of error when divisions overlapping."""
tmp = str(tmpdir)
df = pd.DataFrame(
{
"i32": np.arange(1000, dtype=np.int32),
"i64": np.arange(1000, dtype=np.int64),
"f": np.arange(1000, dtype=np.float64),
"bhello": np.random.choice(["hello", "yo", "people"], size=1000).astype(
"O"
),
}
)
half = len(df) // 2
ddf1 = dd.from_pandas(df.iloc[:half], chunksize=100)
ddf2 = dd.from_pandas(df.iloc[half - 10 :], chunksize=100)
ddf1.to_parquet(tmp, engine=engine)
with pytest.raises(ValueError) as excinfo:
ddf2.to_parquet(tmp, engine=engine, append=True)
assert "Appended divisions" in str(excinfo.value)
ddf2.to_parquet(tmp, engine=engine, append=True, ignore_divisions=True)
def test_append_different_columns(tmpdir, engine):
"""Test raising of error when non equal columns."""
tmp = str(tmpdir)
df1 = pd.DataFrame({"i32": np.arange(100, dtype=np.int32)})
df2 = pd.DataFrame({"i64": np.arange(100, dtype=np.int64)})
df3 = pd.DataFrame({"i32": np.arange(100, dtype=np.int64)})
ddf1 = dd.from_pandas(df1, chunksize=2)
ddf2 = dd.from_pandas(df2, chunksize=2)
ddf3 = dd.from_pandas(df3, chunksize=2)
ddf1.to_parquet(tmp, engine=engine)
with pytest.raises(ValueError) as excinfo:
ddf2.to_parquet(tmp, engine=engine, append=True)
assert "Appended columns" in str(excinfo.value)
with pytest.raises(ValueError) as excinfo:
ddf3.to_parquet(tmp, engine=engine, append=True)
assert "Appended dtypes" in str(excinfo.value)
def test_append_dict_column(tmpdir, engine):
# See: https://github.com/dask/dask/issues/7492
if engine == "fastparquet":
pytest.xfail("Fastparquet engine is missing dict-column support")
elif pa_version < parse_version("1.0.1"):
pytest.skip("PyArrow 1.0.1+ required for dict-column support.")
tmp = str(tmpdir)
dts = pd.date_range("2020-01-01", "2021-01-01")
df = pd.DataFrame(
{"value": [{"x": x} for x in range(len(dts))]},
index=dts,
)
ddf1 = dd.from_pandas(df, npartitions=1)
# Write ddf1 to tmp, and then append it again
ddf1.to_parquet(tmp, append=True, engine=engine)
ddf1.to_parquet(tmp, append=True, engine=engine, ignore_divisions=True)
# Read back all data (ddf1 + ddf1)
ddf2 = dd.read_parquet(tmp, engine=engine)
# Check computed result
expect = pd.concat([df, df])
result = ddf2.compute()
assert_eq(expect, result)
@write_read_engines_xfail
def test_ordering(tmpdir, write_engine, read_engine):
tmp = str(tmpdir)
df = pd.DataFrame(
{"a": [1, 2, 3], "b": [10, 20, 30], "c": [100, 200, 300]},
index=pd.Index([-1, -2, -3], name="myindex"),
columns=["c", "a", "b"],
)
ddf = dd.from_pandas(df, npartitions=2)
dd.to_parquet(ddf, tmp, engine=write_engine)
if read_engine == "fastparquet":
pf = fastparquet.ParquetFile(tmp)
assert pf.columns == ["myindex", "c", "a", "b"]
ddf2 = dd.read_parquet(tmp, index="myindex", engine=read_engine)
assert_eq(ddf, ddf2, check_divisions=False)
def test_read_parquet_custom_columns(tmpdir, engine):
tmp = str(tmpdir)
data = pd.DataFrame(
{"i32": np.arange(1000, dtype=np.int32), "f": np.arange(1000, dtype=np.float64)}
)
df = dd.from_pandas(data, chunksize=50)
df.to_parquet(tmp, engine=engine)
df2 = dd.read_parquet(tmp, columns=["i32", "f"], engine=engine)
assert_eq(df[["i32", "f"]], df2, check_index=False)
fns = glob.glob(os.path.join(tmp, "*.parquet"))
df2 = dd.read_parquet(fns, columns=["i32"], engine=engine).compute()
df2.sort_values("i32", inplace=True)
assert_eq(df[["i32"]], df2, check_index=False, check_divisions=False)
df3 = dd.read_parquet(tmp, columns=["f", "i32"], engine=engine)
assert_eq(df[["f", "i32"]], df3, check_index=False)
@pytest.mark.parametrize(
"df,write_kwargs,read_kwargs",
[
(pd.DataFrame({"x": [3, 2, 1]}), {}, {}),
(pd.DataFrame({"x": ["c", "a", "b"]}), {}, {}),
(pd.DataFrame({"x": ["cc", "a", "bbb"]}), {}, {}),
(pd.DataFrame({"x": [b"a", b"b", b"c"]}), {"object_encoding": "bytes"}, {}),
(
pd.DataFrame({"x": pd.Categorical(["a", "b", "a"])}),
{},
{"categories": ["x"]},
),
(pd.DataFrame({"x": pd.Categorical([1, 2, 1])}), {}, {"categories": ["x"]}),
(pd.DataFrame({"x": list(map(pd.Timestamp, [3000, 2000, 1000]))}), {}, {}),
(pd.DataFrame({"x": [3000, 2000, 1000]}).astype("M8[ns]"), {}, {}),
pytest.param(
pd.DataFrame({"x": [3, 2, 1]}).astype("M8[ns]"),
{},
{},
),
(pd.DataFrame({"x": [3, 2, 1]}).astype("M8[us]"), {}, {}),
(pd.DataFrame({"x": [3, 2, 1]}).astype("M8[ms]"), {}, {}),
(pd.DataFrame({"x": [3000, 2000, 1000]}).astype("datetime64[ns]"), {}, {}),
(pd.DataFrame({"x": [3000, 2000, 1000]}).astype("datetime64[ns, UTC]"), {}, {}),
(pd.DataFrame({"x": [3000, 2000, 1000]}).astype("datetime64[ns, CET]"), {}, {}),
(pd.DataFrame({"x": [3, 2, 1]}).astype("uint16"), {}, {}),
(pd.DataFrame({"x": [3, 2, 1]}).astype("float32"), {}, {}),
(pd.DataFrame({"x": [3, 1, 2]}, index=[3, 2, 1]), {}, {}),
(pd.DataFrame({"x": [3, 1, 5]}, index=pd.Index([1, 2, 3], name="foo")), {}, {}),
(pd.DataFrame({"x": [1, 2, 3], "y": [3, 2, 1]}), {}, {}),
(pd.DataFrame({"x": [1, 2, 3], "y": [3, 2, 1]}, columns=["y", "x"]), {}, {}),
(pd.DataFrame({"0": [3, 2, 1]}), {}, {}),
(pd.DataFrame({"x": [3, 2, None]}), {}, {}),
(pd.DataFrame({"-": [3.0, 2.0, None]}), {}, {}),
(pd.DataFrame({".": [3.0, 2.0, None]}), {}, {}),
(pd.DataFrame({" ": [3.0, 2.0, None]}), {}, {}),
],
)
def test_roundtrip(tmpdir, df, write_kwargs, read_kwargs, engine):
if "x" in df and df.x.dtype == "M8[ns]" and "arrow" in engine:
pytest.xfail(reason="Parquet pyarrow v1 doesn't support nanosecond precision")
if (
"x" in df
and df.x.dtype == "M8[ns]"
and engine == "fastparquet"
and fastparquet_version <= parse_version("0.6.3")
):
pytest.xfail(reason="fastparquet doesn't support nanosecond precision yet")
if (
PANDAS_GT_130
and read_kwargs.get("categories", None)
and engine == "fastparquet"
and fastparquet_version <= parse_version("0.6.3")
):
pytest.xfail("https://github.com/dask/fastparquet/issues/577")
tmp = str(tmpdir)
if df.index.name is None:
df.index.name = "index"
ddf = dd.from_pandas(df, npartitions=2)
oe = write_kwargs.pop("object_encoding", None)
if oe and engine == "fastparquet":
dd.to_parquet(ddf, tmp, engine=engine, object_encoding=oe, **write_kwargs)
else:
dd.to_parquet(ddf, tmp, engine=engine, **write_kwargs)
ddf2 = dd.read_parquet(tmp, index=df.index.name, engine=engine, **read_kwargs)
if str(ddf2.dtypes.get("x")) == "UInt16" and engine == "fastparquet":
# fastparquet choooses to use masked type to be able to get true repr of
# 16-bit int
assert_eq(ddf.astype("UInt16"), ddf2)
else:
assert_eq(ddf, ddf2)
def test_categories(tmpdir, engine):
fn = str(tmpdir)
df = pd.DataFrame({"x": [1, 2, 3, 4, 5], "y": list("caaab")})
ddf = dd.from_pandas(df, npartitions=2)
ddf["y"] = ddf.y.astype("category")
ddf.to_parquet(fn, engine=engine)
ddf2 = dd.read_parquet(fn, categories=["y"], engine=engine)
# Shouldn't need to specify categories explicitly
ddf3 = dd.read_parquet(fn, engine=engine)
assert_eq(ddf3, ddf2)
with pytest.raises(NotImplementedError):
ddf2.y.cat.categories
assert set(ddf2.y.compute().cat.categories) == {"a", "b", "c"}
cats_set = ddf2.map_partitions(lambda x: x.y.cat.categories.sort_values()).compute()
assert cats_set.tolist() == ["a", "c", "a", "b"]
if engine == "fastparquet":
assert_eq(ddf.y, ddf2.y, check_names=False)
with pytest.raises(TypeError):
# attempt to load as category that which is not so encoded
ddf2 = dd.read_parquet(fn, categories=["x"], engine=engine).compute()
with pytest.raises((ValueError, FutureWarning)):
# attempt to load as category unknown column
ddf2 = dd.read_parquet(fn, categories=["foo"], engine=engine)
def test_categories_unnamed_index(tmpdir, engine):
# Check that we can handle an unnamed categorical index
# https://github.com/dask/dask/issues/6885
tmpdir = str(tmpdir)
df = pd.DataFrame(
data={"A": [1, 2, 3], "B": ["a", "a", "b"]}, index=["x", "y", "y"]
)
ddf = dd.from_pandas(df, npartitions=1)
ddf = ddf.categorize(columns=["B"])
ddf.to_parquet(tmpdir, engine=engine)
ddf2 = dd.read_parquet(tmpdir, engine=engine)
assert_eq(ddf.index, ddf2.index, check_divisions=False)
def test_empty_partition(tmpdir, engine):
fn = str(tmpdir)
df = pd.DataFrame({"a": range(10), "b": range(10)})
ddf = dd.from_pandas(df, npartitions=5)
ddf2 = ddf[ddf.a <= 5]
ddf2.to_parquet(fn, engine=engine)
ddf3 = dd.read_parquet(fn, engine=engine)
assert ddf3.npartitions < 5
sol = ddf2.compute()
assert_eq(sol, ddf3, check_names=False, check_index=False)
def test_timestamp_index(tmpdir, engine):
fn = str(tmpdir)
df = dd._compat.makeTimeDataFrame()
df.index.name = "foo"
ddf = dd.from_pandas(df, npartitions=5)
ddf.to_parquet(fn, engine=engine)
ddf2 = dd.read_parquet(fn, engine=engine)
assert_eq(ddf, ddf2)
@FASTPARQUET_MARK
@PYARROW_MARK
def test_to_parquet_default_writes_nulls(tmpdir):
fn = str(tmpdir.join("test.parquet"))
df = pd.DataFrame({"c1": [1.0, np.nan, 2, np.nan, 3]})
ddf = dd.from_pandas(df, npartitions=1)
ddf.to_parquet(fn)
table = pq.read_table(fn)
assert table[1].null_count == 2
@PYARROW_LE_MARK
def test_to_parquet_pyarrow_w_inconsistent_schema_by_partition_fails_by_default(tmpdir):
df = pd.DataFrame(
{"partition_column": [0, 0, 1, 1], "strings": ["a", "b", None, None]}
)
ddf = dd.from_pandas(df, npartitions=2)
# In order to allow pyarrow to write an inconsistent schema,
# we need to avoid writing the _metadata file (will fail >0.17.1)
# and need to avoid schema inference (i.e. use `schema=None`)
ddf.to_parquet(
str(tmpdir),
engine="pyarrow",
partition_on=["partition_column"],
write_metadata_file=False,
schema=None,
)
# Test that schema is not validated by default
# (shouldn't raise error with legacy dataset)
dd.read_parquet(
str(tmpdir),
engine="pyarrow-legacy",
gather_statistics=False,
).compute()
# Test that read fails when validate_schema=True
# Note: This fails differently for pyarrow.dataset api
with pytest.raises(ValueError) as e_info:
dd.read_parquet(
str(tmpdir),
engine="pyarrow-legacy",
gather_statistics=False,
dataset={"validate_schema": True},
).compute()
assert e_info.message.contains("ValueError: Schema in partition")
assert e_info.message.contains("was different")
@PYARROW_MARK
def test_to_parquet_pyarrow_w_inconsistent_schema_by_partition_succeeds_w_manual_schema(
tmpdir,
):
# Data types to test: strings, arrays, ints, timezone aware timestamps
in_arrays = [[0, 1, 2], [3, 4], np.nan, np.nan]
out_arrays = [[0, 1, 2], [3, 4], None, None]
in_strings = ["a", "b", np.nan, np.nan]
out_strings = ["a", "b", None, None]
tstamp = pd.Timestamp(1513393355, unit="s")
in_tstamps = [tstamp, tstamp, pd.NaT, pd.NaT]
out_tstamps = [
# Timestamps come out in numpy.datetime64 format
tstamp.to_datetime64(),
tstamp.to_datetime64(),
np.datetime64("NaT"),
np.datetime64("NaT"),
]
timezone = "US/Eastern"
tz_tstamp = pd.Timestamp(1513393355, unit="s", tz=timezone)
in_tz_tstamps = [tz_tstamp, tz_tstamp, pd.NaT, pd.NaT]
out_tz_tstamps = [
# Timezones do not make it through a write-read cycle.
tz_tstamp.tz_convert(None).to_datetime64(),
tz_tstamp.tz_convert(None).to_datetime64(),
np.datetime64("NaT"),
np.datetime64("NaT"),
]
df = pd.DataFrame(
{
"partition_column": [0, 0, 1, 1],
"arrays": in_arrays,
"strings": in_strings,
"tstamps": in_tstamps,
"tz_tstamps": in_tz_tstamps,
}
)
ddf = dd.from_pandas(df, npartitions=2)
schema = pa.schema(
[
("arrays", pa.list_(pa.int64())),
("strings", pa.string()),
("tstamps", pa.timestamp("ns")),
("tz_tstamps", pa.timestamp("ns", timezone)),
("partition_column", pa.int64()),
]
)
ddf.to_parquet(
str(tmpdir), engine="pyarrow", partition_on="partition_column", schema=schema
)
ddf_after_write = (
dd.read_parquet(str(tmpdir), engine="pyarrow", gather_statistics=False)
.compute()
.reset_index(drop=True)
)
# Check array support
arrays_after_write = ddf_after_write.arrays.values
for i in range(len(df)):
assert np.array_equal(arrays_after_write[i], out_arrays[i]), type(out_arrays[i])
# Check datetime support
tstamps_after_write = ddf_after_write.tstamps.values
for i in range(len(df)):
# Need to test NaT separately
if np.isnat(tstamps_after_write[i]):
assert np.isnat(out_tstamps[i])
else:
assert tstamps_after_write[i] == out_tstamps[i]
# Check timezone aware datetime support
tz_tstamps_after_write = ddf_after_write.tz_tstamps.values
for i in range(len(df)):
# Need to test NaT separately
if np.isnat(tz_tstamps_after_write[i]):
assert np.isnat(out_tz_tstamps[i])
else:
assert tz_tstamps_after_write[i] == out_tz_tstamps[i]
# Check string support
assert np.array_equal(ddf_after_write.strings.values, out_strings)
# Check partition column
assert np.array_equal(ddf_after_write.partition_column, df.partition_column)
@PYARROW_MARK
@pytest.mark.parametrize("index", [False, True])
@pytest.mark.parametrize("schema", ["infer", "complex"])
def test_pyarrow_schema_inference(tmpdir, index, engine, schema):
if schema == "complex":
schema = {"index": pa.string(), "amount": pa.int64()}
tmpdir = str(tmpdir)
df = pd.DataFrame(
{
"index": ["1", "2", "3", "2", "3", "1", "4"],
"date": pd.to_datetime(
[
"2017-01-01",
"2017-01-01",
"2017-01-01",
"2017-01-02",
"2017-01-02",
"2017-01-06",
"2017-01-09",
]
),
"amount": [100, 200, 300, 400, 500, 600, 700],
},
index=range(7, 14),
)
if index:
df = dd.from_pandas(df, npartitions=2).set_index("index")
else:
df = dd.from_pandas(df, npartitions=2)
df.to_parquet(tmpdir, engine="pyarrow", schema=schema)
df_out = dd.read_parquet(tmpdir, engine=engine)
df_out.compute()
if index and engine == "fastparquet":
# Fastparquet fails to detect int64 from _metadata
df_out["amount"] = df_out["amount"].astype("int64")
# Fastparquet not handling divisions for
# pyarrow-written dataset with string index
assert_eq(df, df_out, check_divisions=False)
else:
assert_eq(df, df_out)
def test_partition_on(tmpdir, engine):
tmpdir = str(tmpdir)
df = pd.DataFrame(
{
"a1": np.random.choice(["A", "B", "C"], size=100),
"a2": np.random.choice(["X", "Y", "Z"], size=100),
"b": np.random.random(size=100),
"c": np.random.randint(1, 5, size=100),
"d": np.arange(0, 100),
}
)
d = dd.from_pandas(df, npartitions=2)
d.to_parquet(tmpdir, partition_on=["a1", "a2"], engine=engine)
# Note #1: Cross-engine functionality is missing
# Note #2: The index is not preserved in pyarrow when partition_on is used
out = dd.read_parquet(
tmpdir, engine=engine, index=False, gather_statistics=False
).compute()
for val in df.a1.unique():
assert set(df.d[df.a1 == val]) == set(out.d[out.a1 == val])
# Now specify the columns and allow auto-index detection
out = dd.read_parquet(tmpdir, engine=engine, columns=["d", "a2"]).compute()
for val in df.a2.unique():
assert set(df.d[df.a2 == val]) == set(out.d[out.a2 == val])
def test_partition_on_duplicates(tmpdir, engine):
# https://github.com/dask/dask/issues/6445
tmpdir = str(tmpdir)
df = pd.DataFrame(
{
"a1": np.random.choice(["A", "B", "C"], size=100),
"a2": np.random.choice(["X", "Y", "Z"], size=100),
"data": np.random.random(size=100),
}
)
d = dd.from_pandas(df, npartitions=2)
for _ in range(2):
d.to_parquet(tmpdir, partition_on=["a1", "a2"], engine=engine)
out = dd.read_parquet(tmpdir, engine=engine).compute()
assert len(df) == len(out)
for root, dirs, files in os.walk(tmpdir):
for file in files:
assert file in (
"part.0.parquet",
"part.1.parquet",
"_common_metadata",
"_metadata",
)
@PYARROW_MARK
@pytest.mark.parametrize("partition_on", ["aa", ["aa"]])
def test_partition_on_string(tmpdir, partition_on):
tmpdir = str(tmpdir)
with dask.config.set(scheduler="single-threaded"):
tmpdir = str(tmpdir)
df = pd.DataFrame(
{
"aa": np.random.choice(["A", "B", "C"], size=100),
"bb": np.random.random(size=100),
"cc": np.random.randint(1, 5, size=100),
}
)
d = dd.from_pandas(df, npartitions=2)
d.to_parquet(
tmpdir, partition_on=partition_on, write_index=False, engine="pyarrow"
)
out = dd.read_parquet(
tmpdir, index=False, gather_statistics=False, engine="pyarrow"
)
out = out.compute()
for val in df.aa.unique():
assert set(df.bb[df.aa == val]) == set(out.bb[out.aa == val])
@write_read_engines()
def test_filters_categorical(tmpdir, write_engine, read_engine):
tmpdir = str(tmpdir)
cats = ["2018-01-01", "2018-01-02", "2018-01-03", "2018-01-04"]
dftest = pd.DataFrame(
{
"dummy": [1, 1, 1, 1],
"DatePart": pd.Categorical(cats, categories=cats, ordered=True),
}
)
ddftest = dd.from_pandas(dftest, npartitions=4).set_index("dummy")
ddftest.to_parquet(tmpdir, partition_on="DatePart", engine=write_engine)
ddftest_read = dd.read_parquet(
tmpdir,
index="dummy",
engine=read_engine,
filters=[(("DatePart", "<=", "2018-01-02"))],
)
assert len(ddftest_read) == 2
@write_read_engines()
def test_filters(tmpdir, write_engine, read_engine):
tmp_path = str(tmpdir)
df = pd.DataFrame({"x": range(10), "y": list("aabbccddee")})
ddf = dd.from_pandas(df, npartitions=5)
assert ddf.npartitions == 5
ddf.to_parquet(tmp_path, engine=write_engine)
a = dd.read_parquet(tmp_path, engine=read_engine, filters=[("x", ">", 4)])
assert a.npartitions == 3
assert (a.x > 3).all().compute()
b = dd.read_parquet(tmp_path, engine=read_engine, filters=[("y", "==", "c")])
assert b.npartitions == 1
assert (b.y == "c").all().compute()
c = dd.read_parquet(
tmp_path, engine=read_engine, filters=[("y", "==", "c"), ("x", ">", 6)]
)
assert c.npartitions <= 1
assert not len(c)
assert_eq(c, c)
d = dd.read_parquet(
tmp_path,
engine=read_engine,
filters=[
# Select two overlapping ranges
[("x", ">", 1), ("x", "<", 6)],
[("x", ">", 3), ("x", "<", 8)],
],
)
assert d.npartitions == 3
assert ((d.x > 1) & (d.x < 8)).all().compute()
e = dd.read_parquet(tmp_path, engine=read_engine, filters=[("x", "in", (0, 9))])
assert e.npartitions == 2
assert ((e.x < 2) | (e.x > 7)).all().compute()
f = dd.read_parquet(tmp_path, engine=read_engine, filters=[("y", "=", "c")])
assert f.npartitions == 1
assert len(f)
assert (f.y == "c").all().compute()
@write_read_engines()
def test_filters_v0(tmpdir, write_engine, read_engine):
if write_engine == "fastparquet" or read_engine == "fastparquet":
pytest.importorskip("fastparquet", minversion="0.3.1")
# Recent versions of pyarrow support full row-wise filtering
# (fastparquet and older pyarrow versions do not)
pyarrow_row_filtering = read_engine == "pyarrow-dataset"
fn = str(tmpdir)
df = pd.DataFrame({"at": ["ab", "aa", "ba", "da", "bb"]})
ddf = dd.from_pandas(df, npartitions=1)
# Ok with 1 partition and filters
ddf.repartition(npartitions=1, force=True).to_parquet(
fn, write_index=False, engine=write_engine
)
ddf2 = dd.read_parquet(
fn, index=False, engine=read_engine, filters=[("at", "==", "aa")]
).compute()
ddf3 = dd.read_parquet(
fn, index=False, engine=read_engine, filters=[("at", "=", "aa")]
).compute()
if pyarrow_row_filtering:
assert_eq(ddf2, ddf[ddf["at"] == "aa"], check_index=False)
assert_eq(ddf3, ddf[ddf["at"] == "aa"], check_index=False)
else:
assert_eq(ddf2, ddf)
assert_eq(ddf3, ddf)
# with >1 partition and no filters
ddf.repartition(npartitions=2, force=True).to_parquet(fn, engine=write_engine)
ddf2 = dd.read_parquet(fn, engine=read_engine).compute()
assert_eq(ddf2, ddf)
# with >1 partition and filters using base fastparquet
if read_engine == "fastparquet":
ddf.repartition(npartitions=2, force=True).to_parquet(fn, engine=write_engine)
df2 = fastparquet.ParquetFile(fn).to_pandas(filters=[("at", "==", "aa")])
df3 = fastparquet.ParquetFile(fn).to_pandas(filters=[("at", "=", "aa")])
assert len(df2) > 0
assert len(df3) > 0
# with >1 partition and filters
ddf.repartition(npartitions=2, force=True).to_parquet(fn, engine=write_engine)
ddf2 = dd.read_parquet(
fn, engine=read_engine, filters=[("at", "==", "aa")]
).compute()
ddf3 = dd.read_parquet(
fn, engine=read_engine, filters=[("at", "=", "aa")]
).compute()
assert len(ddf2) > 0
assert len(ddf3) > 0
assert_eq(ddf2, ddf3)
def test_filtering_pyarrow_dataset(tmpdir, engine):
pytest.importorskip("pyarrow", minversion="1.0.0")
fn = str(tmpdir)
df = pd.DataFrame({"aa": range(100), "bb": ["cat", "dog"] * 50})
ddf = dd.from_pandas(df, npartitions=10)
ddf.to_parquet(fn, write_index=False, engine=engine)
# Filtered read
aa_lim = 40
bb_val = "dog"
filters = [[("aa", "<", aa_lim), ("bb", "==", bb_val)]]
ddf2 = dd.read_parquet(fn, index=False, engine="pyarrow-dataset", filters=filters)
# Check that partitions are filetered for "aa" filter
nonempty = 0
for part in ddf[ddf["aa"] < aa_lim].partitions:
nonempty += int(len(part.compute()) > 0)
assert ddf2.npartitions == nonempty
# Check that rows are filtered for "aa" and "bb" filters
df = df[df["aa"] < aa_lim]
df = df[df["bb"] == bb_val]
assert_eq(df, ddf2.compute(), check_index=False)
def test_fiters_file_list(tmpdir, engine):
df = pd.DataFrame({"x": range(10), "y": list("aabbccddee")})
ddf = dd.from_pandas(df, npartitions=5)
ddf.to_parquet(str(tmpdir), engine=engine)
fils = str(tmpdir.join("*.parquet"))
ddf_out = dd.read_parquet(
fils, gather_statistics=True, engine=engine, filters=[("x", ">", 3)]
)
assert ddf_out.npartitions == 3
assert_eq(df[df["x"] > 3], ddf_out.compute(), check_index=False)
# Check that first parition gets filtered for single-path input
ddf2 = dd.read_parquet(
str(tmpdir.join("part.0.parquet")),
gather_statistics=True,
engine=engine,
filters=[("x", ">", 3)],
)
assert len(ddf2) == 0
def test_pyarrow_filter_divisions(tmpdir):
pytest.importorskip("pyarrow")
# Write simple dataset with an index that will only
# have a sorted index if certain row-groups are filtered out.
# In this case, we filter "a" <= 3 to get a sorted
# index. Otherwise, "a" is NOT monotonically increasing.
df = pd.DataFrame({"a": [0, 1, 10, 12, 2, 3, 8, 9], "b": range(8)}).set_index("a")
df.iloc[:4].to_parquet(
str(tmpdir.join("file.0.parquet")), engine="pyarrow", row_group_size=2
)
df.iloc[4:].to_parquet(
str(tmpdir.join("file.1.parquet")), engine="pyarrow", row_group_size=2
)
# Only works for ArrowDatasetEngine.
# Legacy code will not apply filters on individual row-groups
# when `split_row_groups=False`.
ddf = dd.read_parquet(
str(tmpdir),
engine="pyarrow-dataset",
split_row_groups=False,
gather_statistics=True,
filters=[("a", "<=", 3)],
)
assert ddf.divisions == (0, 2, 3)
ddf = dd.read_parquet(
str(tmpdir),
engine="pyarrow-dataset",
split_row_groups=True,
gather_statistics=True,
filters=[("a", "<=", 3)],
)
assert ddf.divisions == (0, 2, 3)
def test_divisions_read_with_filters(tmpdir):
pytest.importorskip("fastparquet", minversion="0.3.1")
tmpdir = str(tmpdir)
# generate dataframe
size = 100
categoricals = []
for value in ["a", "b", "c", "d"]:
categoricals += [value] * int(size / 4)
df = pd.DataFrame(
{
"a": categoricals,
"b": np.random.random(size=size),
"c": np.random.randint(1, 5, size=size),
}
)
d = dd.from_pandas(df, npartitions=4)
# save it
d.to_parquet(tmpdir, write_index=True, partition_on=["a"], engine="fastparquet")
# read it
out = dd.read_parquet(tmpdir, engine="fastparquet", filters=[("a", "==", "b")])
# test it
expected_divisions = (25, 49)
assert out.divisions == expected_divisions
def test_divisions_are_known_read_with_filters(tmpdir):
pytest.importorskip("fastparquet", minversion="0.3.1")
tmpdir = str(tmpdir)
# generate dataframe
df = pd.DataFrame(
{
"unique": [0, 0, 1, 1, 2, 2, 3, 3],
"id": ["id1", "id2", "id1", "id2", "id1", "id2", "id1", "id2"],
},
index=[0, 0, 1, 1, 2, 2, 3, 3],
)
d = dd.from_pandas(df, npartitions=2)
# save it
d.to_parquet(tmpdir, partition_on=["id"], engine="fastparquet")
# read it
out = dd.read_parquet(tmpdir, engine="fastparquet", filters=[("id", "==", "id1")])
# test it
assert out.known_divisions
expected_divisions = (0, 2, 3)
assert out.divisions == expected_divisions
@FASTPARQUET_MARK
@pytest.mark.xfail(reason="No longer accept ParquetFile objects")
def test_read_from_fastparquet_parquetfile(tmpdir):
fn = str(tmpdir)
df = pd.DataFrame(
{
"a": np.random.choice(["A", "B", "C"], size=100),
"b": np.random.random(size=100),
"c": np.random.randint(1, 5, size=100),
}
)
d = dd.from_pandas(df, npartitions=2)
d.to_parquet(fn, partition_on=["a"], engine="fastparquet")
pq_f = fastparquet.ParquetFile(fn)
# OK with no filters
out = dd.read_parquet(pq_f).compute()
for val in df.a.unique():
assert set(df.b[df.a == val]) == set(out.b[out.a == val])
# OK with filters
out = dd.read_parquet(pq_f, filters=[("a", "==", "B")]).compute()
assert set(df.b[df.a == "B"]) == set(out.b)
# Engine should not be set to 'pyarrow'
with pytest.raises(AssertionError):
out = dd.read_parquet(pq_f, engine="pyarrow")
@pytest.mark.parametrize("scheduler", ["threads", "processes"])
def test_to_parquet_lazy(tmpdir, scheduler, engine):
tmpdir = str(tmpdir)
df = pd.DataFrame({"a": [1, 2, 3, 4], "b": [1.0, 2.0, 3.0, 4.0]})
df.index.name = "index"
ddf = dd.from_pandas(df, npartitions=2)
value = ddf.to_parquet(tmpdir, compute=False, engine=engine)
assert hasattr(value, "dask")
value.compute(scheduler=scheduler)
assert os.path.exists(tmpdir)
ddf2 = dd.read_parquet(tmpdir, engine=engine)
assert_eq(ddf, ddf2)
@FASTPARQUET_MARK
def test_timestamp96(tmpdir):
fn = str(tmpdir)
df = pd.DataFrame({"a": [pd.to_datetime("now", utc=True)]})
ddf = dd.from_pandas(df, 1)
ddf.to_parquet(fn, write_index=False, times="int96")
pf = fastparquet.ParquetFile(fn)
assert pf._schema[1].type == fastparquet.parquet_thrift.Type.INT96
out = dd.read_parquet(fn, index=False).compute()
assert_eq(out, df)
@FASTPARQUET_MARK
def test_drill_scheme(tmpdir):
fn = str(tmpdir)
N = 5
df1 = pd.DataFrame({c: np.random.random(N) for i, c in enumerate(["a", "b", "c"])})
df2 = pd.DataFrame({c: np.random.random(N) for i, c in enumerate(["a", "b", "c"])})
files = []
for d in ["test_data1", "test_data2"]:
dn = os.path.join(fn, d)
if not os.path.exists(dn):
os.mkdir(dn)
files.append(os.path.join(dn, "data1.parq"))
fastparquet.write(files[0], df1)
fastparquet.write(files[1], df2)
df = dd.read_parquet(files)
assert "dir0" in df.columns
out = df.compute()
assert "dir0" in out
assert (np.unique(out.dir0) == ["test_data1", "test_data2"]).all()
def test_parquet_select_cats(tmpdir, engine):
fn = str(tmpdir)
df = pd.DataFrame(
{
"categories": pd.Series(
np.random.choice(["a", "b", "c", "d", "e", "f"], size=100),
dtype="category",
),
"ints": pd.Series(list(range(0, 100)), dtype="int"),
"floats": pd.Series(list(range(0, 100)), dtype="float"),
}
)
ddf = dd.from_pandas(df, 1)
ddf.to_parquet(fn, engine=engine)
rddf = dd.read_parquet(fn, columns=["ints"], engine=engine)
assert list(rddf.columns) == ["ints"]
rddf = dd.read_parquet(fn, engine=engine)
assert list(rddf.columns) == list(df)
def test_columns_name(tmpdir, engine):
if engine == "fastparquet" and fastparquet_version <= parse_version("0.3.1"):
pytest.skip("Fastparquet does not write column_indexes up to 0.3.1")
tmp_path = str(tmpdir)
df = pd.DataFrame({"A": [1, 2]}, index=pd.Index(["a", "b"], name="idx"))
df.columns.name = "cols"
ddf = dd.from_pandas(df, 2)
ddf.to_parquet(tmp_path, engine=engine)
result = dd.read_parquet(tmp_path, engine=engine, index=["idx"])
assert_eq(result, df)
def check_compression(engine, filename, compression):
if engine == "fastparquet":
pf = fastparquet.ParquetFile(filename)
md = pf.fmd.row_groups[0].columns[0].meta_data
if compression is None:
assert md.total_compressed_size == md.total_uncompressed_size
else:
assert md.total_compressed_size != md.total_uncompressed_size
else:
metadata = pa.parquet.ParquetDataset(filename).metadata
names = metadata.schema.names
for i in range(metadata.num_row_groups):
row_group = metadata.row_group(i)
for j in range(len(names)):
column = row_group.column(j)
if compression is None:
assert (
column.total_compressed_size == column.total_uncompressed_size
)
else:
compress_expect = compression
if compression == "default":
compress_expect = "snappy"
assert compress_expect.lower() == column.compression.lower()
assert (
column.total_compressed_size != column.total_uncompressed_size
)
@pytest.mark.parametrize("compression,", ["default", None, "gzip", "snappy"])
def test_writing_parquet_with_compression(tmpdir, compression, engine):
fn = str(tmpdir)
if compression in ["snappy", "default"]:
pytest.importorskip("snappy")
df = | pd.DataFrame({"x": ["a", "b", "c"] * 10, "y": [1, 2, 3] * 10}) | pandas.DataFrame |
import honeycomb_io.core
import honeycomb_io.utils
import honeycomb_io.environments
import minimal_honeycomb
import pandas as pd
import numpy as np
import datetime
import logging
logger = logging.getLogger(__name__)
DEFAULT_CAMERA_DEVICE_TYPES = [
'PI3WITHCAMERA',
'PI4WITHCAMERA',
'PIZEROWITHCAMERA'
]
# Used by:
# camera_calibration.colmap (wf-camera-calibration)
def write_intrinsic_calibration_data(
data,
start_datetime,
client=None,
uri=None,
token_uri=None,
audience=None,
client_id=None,
client_secret=None
):
intrinsic_calibration_data_columns = [
'device_id',
'image_width',
'image_height',
'camera_matrix',
'distortion_coefficients'
]
if not set(intrinsic_calibration_data_columns).issubset(set(data.columns)):
raise ValueError('Data must contain the following columns: {}'.format(
intrinsic_calibration_data_columns
))
intrinsic_calibration_data_df = data.reset_index().reindex(columns=intrinsic_calibration_data_columns)
intrinsic_calibration_data_df.rename(columns={'device_id': 'device'}, inplace=True)
intrinsic_calibration_data_df['start'] = honeycomb_io.utils.to_honeycomb_datetime(start_datetime)
intrinsic_calibration_data_df['camera_matrix'] = intrinsic_calibration_data_df['camera_matrix'].apply(lambda x: x.tolist())
intrinsic_calibration_data_df['distortion_coefficients'] = intrinsic_calibration_data_df['distortion_coefficients'].apply(lambda x: x.tolist())
records = intrinsic_calibration_data_df.to_dict(orient='records')
if client is None:
client = minimal_honeycomb.MinimalHoneycombClient(
uri=uri,
token_uri=token_uri,
audience=audience,
client_id=client_id,
client_secret=client_secret
)
result=client.bulk_mutation(
request_name='createIntrinsicCalibration',
arguments={
'intrinsicCalibration': {
'type': 'IntrinsicCalibrationInput',
'value': records
}
},
return_object=[
'intrinsic_calibration_id'
]
)
ids = None
if len(result) > 0:
ids = [datum.get('intrinsic_calibration_id') for datum in result]
return ids
# Used by:
# camera_calibration.colmap (wf-camera-calibration)
def write_extrinsic_calibration_data(
data,
start_datetime,
coordinate_space_id,
client=None,
uri=None,
token_uri=None,
audience=None,
client_id=None,
client_secret=None
):
extrinsic_calibration_data_columns = [
'device_id',
'rotation_vector',
'translation_vector'
]
if not set(extrinsic_calibration_data_columns).issubset(set(data.columns)):
raise ValueError('Data must contain the following columns: {}'.format(
extrinsic_calibration_data_columns
))
extrinsic_calibration_data_df = data.reset_index().reindex(columns=extrinsic_calibration_data_columns)
extrinsic_calibration_data_df.rename(columns={'device_id': 'device'}, inplace=True)
extrinsic_calibration_data_df['start'] = honeycomb_io.utils.to_honeycomb_datetime(start_datetime)
extrinsic_calibration_data_df['coordinate_space'] = coordinate_space_id
extrinsic_calibration_data_df['rotation_vector'] = extrinsic_calibration_data_df['rotation_vector'].apply(lambda x: x.tolist())
extrinsic_calibration_data_df['translation_vector'] = extrinsic_calibration_data_df['translation_vector'].apply(lambda x: x.tolist())
records = extrinsic_calibration_data_df.to_dict(orient='records')
if client is None:
client = minimal_honeycomb.MinimalHoneycombClient(
uri=uri,
token_uri=token_uri,
audience=audience,
client_id=client_id,
client_secret=client_secret
)
result=client.bulk_mutation(
request_name='createExtrinsicCalibration',
arguments={
'extrinsicCalibration': {
'type': 'ExtrinsicCalibrationInput',
'value': records
}
},
return_object=[
'extrinsic_calibration_id'
]
)
ids = None
if len(result) > 0:
ids = [datum.get('extrinsic_calibration_id') for datum in result]
return ids
def fetch_camera_status(
environment_id=None,
environment_name=None,
chunk_size=100,
client=None,
uri=None,
token_uri=None,
audience=None,
client_id=None,
client_secret=None
):
now = datetime.datetime.now(tz=datetime.timezone.utc)
camera_info_df = fetch_camera_info(
environment_id=environment_id,
environment_name=environment_name,
start=now,
end=now,
chunk_size=chunk_size,
client=client,
uri=uri,
token_uri=token_uri,
audience=audience,
client_id=client_id,
client_secret=client_secret
)
assignment_ids = list(camera_info_df['assignment_id'])
video_latest_df = fetch_latest_video_datapoints(
assignment_ids=assignment_ids,
environment_id=None,
environment_name=None,
device_types=DEFAULT_CAMERA_DEVICE_TYPES,
output_format='dataframe',
chunk_size=chunk_size,
client=client,
uri=uri,
token_uri=token_uri,
audience=audience,
client_id=client_id,
client_secret=client_secret
)
camera_status_df = (
camera_info_df
.join(
video_latest_df
.set_index('device_id')
.loc[:, ['timestamp']]
.rename(columns={'timestamp': 'latest_timestamp'})
)
)
camera_status_df['minutes_ago'] = camera_status_df['latest_timestamp'].apply(
lambda timestamp: honeycomb_io.utils.minutes_elapsed(timestamp, now)
)
return camera_status_df
def fetch_camera_info(
environment_id=None,
environment_name=None,
start=None,
end=None,
chunk_size=100,
client=None,
uri=None,
token_uri=None,
audience=None,
client_id=None,
client_secret=None
):
devices_df = honeycomb_io.devices.fetch_devices(
device_types=DEFAULT_CAMERA_DEVICE_TYPES,
environment_id=environment_id,
environment_name=environment_name,
start=start,
end=end,
output_format='dataframe',
chunk_size=chunk_size,
client=client,
uri=uri,
token_uri=token_uri,
audience=audience,
client_id=client_id,
client_secret=client_secret
)
device_ids = list(devices_df.index.unique().dropna())
device_assignments_df = honeycomb_io.devices.fetch_device_assignments_by_device_id(
device_ids=device_ids,
start=start,
end=end,
require_unique_assignment=True,
require_all_devices=False,
output_format='dataframe',
chunk_size=chunk_size,
client=client,
uri=uri,
token_uri=token_uri,
audience=audience,
client_id=client_id,
client_secret=client_secret
)
camera_info_df = (
devices_df
.join(device_assignments_df.reset_index().set_index('device_id'))
)
return camera_info_df
def fetch_latest_video_datapoints(
assignment_ids=None,
environment_id=None,
environment_name=None,
device_types=DEFAULT_CAMERA_DEVICE_TYPES,
output_format='list',
chunk_size=100,
client=None,
uri=None,
token_uri=None,
audience=None,
client_id=None,
client_secret=None
):
now = datetime.datetime.now(tz=datetime.timezone.utc)
if assignment_ids is None:
camera_info_df = fetch_camera_info(
environment_id=environment_id,
environment_name=environment_name,
start=now,
end=now,
chunk_size=chunk_size,
client=client,
uri=uri,
token_uri=token_uri,
audience=audience,
client_id=client_id,
client_secret=client_secret
)
assignment_ids = list(camera_info_df['assignment_id'].dropna())
logger.info('Fetching latest video datapoints for assignment IDs {}'.format(
assignment_ids
))
return_data = [
'data_id',
'timestamp',
{'source': [
{'... on Assignment': [
'assignment_id',
'start',
'end',
{'environment': [
'environment_id',
'name'
]},
{'assigned': [
{'... on Device': [
'device_id',
'serial_number',
'part_number',
'tag_id',
'name',
'mac_address'
]}
]}
]}
]},
{'file': [
'bucketName',
'key'
]}
]
data = list()
for assignment_id in assignment_ids:
query_list=[
{'field': 'source', 'operator': 'EQ', 'value': assignment_id}
]
datum=honeycomb_io.core.fetch_latest_object(
object_name='Datapoint',
query_list=query_list,
return_data=return_data,
request_name=None,
id_field_name=None,
timestamp_field='timestamp',
chunk_size=chunk_size,
client=client,
uri=uri,
token_uri=token_uri,
audience=audience,
client_id=client_id,
client_secret=client_secret
)
if datum is not None:
data.append(datum)
if output_format=='list':
return data
elif output_format == 'dataframe':
return generate_video_datapoint_dataframe(data)
else:
raise ValueError('Output format {} not recognized'.format(output_format))
def generate_video_datapoint_dataframe(
data
):
flat_list = list()
for datum in data:
flat_list.append({
'data_id': datum.get('data_id'),
'timestamp': pd.to_datetime(datum.get('timestamp'), utc=True),
'device_id': datum.get('source', {}).get('assigned', {}).get('device_id'),
'device_part_number': datum.get('source', {}).get('assigned', {}).get('part_number'),
'device_serial_number': datum.get('source', {}).get('assigned', {}).get('serial_number'),
'device_tag_id': datum.get('source', {}).get('assigned', {}).get('tag_id'),
'device_name': datum.get('source', {}).get('assigned', {}).get('name'),
'device_mac_address': datum.get('source', {}).get('assigned', {}).get('mac_address'),
'assignment_id': datum.get('source', {}).get('assignment_id'),
'assignment_start': datum.get('source', {}).get('start'),
'assignment_end': datum.get('source', {}).get('end'),
'bucket_name': datum.get('file', {}).get('bucketName'),
'key': datum.get('file', {}).get('key')
})
df = pd.DataFrame(flat_list, dtype='object')
df['timestamp'] = pd.to_datetime(df['timestamp'])
df['assignment_start'] = | pd.to_datetime(df['assignment_start']) | pandas.to_datetime |
import warnings
from pathlib import Path
import pandas as pd
from scipy.signal import correlate
from scipy.optimize import curve_fit
from laspec.mrs import MrsSpec
from laspec.wavelength import vac2air
from astropy import constants
from astropy.io import fits
from .util import dgauss, gauss, sersic
from .values import *
warnings.simplefilter(action="ignore", category=RuntimeWarning)
def cal_rv(ms, wavelength=6562.80277):
try:
popt, pcov = curve_fit(
sersic, ms.wave, ms.flux_norm, p0=(1, 1, 1, wavelength, 1)
)
except:
return np.nan, np.nan
perr = np.sqrt(np.diag(pcov))
rv = (popt[3] / wavelength - 1) * constants.c.cgs.value / 1e5
rv_err = (perr[3] / wavelength) * constants.c.cgs.value / 1e5
return rv, rv_err
def combine_band(ms_b, ms_r):
ms_b.wave = np.append(ms_b.wave, ms_r.wave)
ms_b.flux = np.append(ms_b.flux, ms_r.flux)
ms_b.flux_cont = np.append(ms_b.flux_cont, ms_r.flux_cont)
ms_b.flux_err = np.append(ms_b.flux_err, ms_r.flux_err)
ms_b.flux_norm = np.append(ms_b.flux_norm, ms_r.flux_norm)
ms_b.flux_norm_err = np.append(ms_b.flux_norm_err, ms_r.flux_norm_err)
ms_b.ivar = np.append(ms_b.ivar, ms_r.ivar)
ms_b.ivar_norm = np.append(ms_b.ivar_norm, ms_r.ivar_norm)
ms_b.mask = np.append(ms_b.mask, ms_r.mask)
return ms_b
def read_mrs_spectrum(file_name, rvb, rvr, **kargs):
ms_r = read_mrs_band_rv_spectrum(file_name, "COADD_R", **kargs)
ms_b = read_mrs_band_rv_spectrum(file_name, "COADD_B", **kargs)
ms_r.wave = ms_r.wave_rv(rvr)
ms_b.wave = ms_b.wave_rv(rvb)
return combine_band(ms_b, ms_r)
def read_mrs_band_rv_spectrum(file_name, ext, **kargs):
ms = MrsSpec.from_mrs(file_name, ext, **kargs)
ms.wave = vac2air(ms.wave)
return ms
def read_mrs_single_spectrum(file_name, rv, ext_r, ext_b, **kargs):
ms_r = read_mrs_band_rv_spectrum(file_name, ext_r, **kargs)
ms_b = read_mrs_band_rv_spectrum(file_name, ext_b, **kargs)
ms_r.wave = ms_r.wave_rv(rv)
ms_b.wave = ms_b.wave_rv(rv)
return combine_band(ms_b, ms_r)
def prepare_mrs_data(ms_list, wave):
flux_norm = np.array(
[
np.interp(wave, ms.wave[ms.mask == 0], ms.flux_norm[ms.mask == 0])
for ms in ms_list
]
)
ivar_norm = np.array(
[
np.interp(wave, ms.wave[ms.mask == 0], ms.flux_norm_err[ms.mask == 0] ** -2)
for ms in ms_list
]
)
return flux_norm, ivar_norm
def prepare_mrs_ori_data(ms_list, wave):
flux = np.array(
[
np.interp(wave, ms.wave[ms.mask == 0], ms.flux[ms.mask == 0])
for ms in ms_list
]
)
ivar = np.array(
[
np.interp(wave, ms.wave[ms.mask == 0], ms.flux_err[ms.mask == 0] ** -2)
for ms in ms_list
]
)
return flux, ivar
def extract_mrs_info(file_name, rvb, rvr, **kargs):
ms = read_mrs_spectrum(file_name, rvb, rvr, **kargs)
flux_norm, ivar_norm = prepare_mrs_data([ms], wave_mrs_array)
ms_n = | pd.DataFrame({"wave": wave_mrs_array, "flux": flux_norm[0]}) | pandas.DataFrame |
# Copyright (c) 2020, NVIDIA CORPORATION.
import numpy as np
import pandas as pd
import pytest
import cudf
from cudf.core import DataFrame, Series
from cudf.tests.utils import (
INTEGER_TYPES,
NUMERIC_TYPES,
assert_eq,
assert_exceptions_equal,
)
def test_series_replace():
a1 = np.array([0, 1, 2, 3, 4])
# Numerical
a2 = np.array([5, 1, 2, 3, 4])
sr1 = Series(a1)
sr2 = sr1.replace(0, 5)
assert_eq(a2, sr2.to_array())
# Categorical
psr3 = pd.Series(["one", "two", "three"], dtype="category")
psr4 = psr3.replace("one", "two")
sr3 = Series.from_pandas(psr3)
sr4 = sr3.replace("one", "two")
assert_eq(psr4, sr4)
psr5 = psr3.replace("one", "five")
sr5 = sr3.replace("one", "five")
assert_eq(psr5, sr5)
# List input
a6 = np.array([5, 6, 2, 3, 4])
sr6 = sr1.replace([0, 1], [5, 6])
assert_eq(a6, sr6.to_array())
with pytest.raises(TypeError):
sr1.replace([0, 1], [5.5, 6.5])
# Series input
a8 = np.array([5, 5, 5, 3, 4])
sr8 = sr1.replace(sr1[:3], 5)
assert_eq(a8, sr8.to_array())
# large input containing null
sr9 = Series(list(range(400)) + [None])
sr10 = sr9.replace([22, 323, 27, 0], None)
assert sr10.null_count == 5
assert len(sr10.to_array()) == (401 - 5)
sr11 = sr9.replace([22, 323, 27, 0], -1)
assert sr11.null_count == 1
assert len(sr11.to_array()) == (401 - 1)
# large input not containing nulls
sr9 = sr9.fillna(-11)
sr12 = sr9.replace([22, 323, 27, 0], None)
assert sr12.null_count == 4
assert len(sr12.to_array()) == (401 - 4)
sr13 = sr9.replace([22, 323, 27, 0], -1)
assert sr13.null_count == 0
assert len(sr13.to_array()) == 401
def test_series_replace_with_nulls():
a1 = np.array([0, 1, 2, 3, 4])
# Numerical
a2 = np.array([-10, 1, 2, 3, 4])
sr1 = Series(a1)
sr2 = sr1.replace(0, None).fillna(-10)
assert_eq(a2, sr2.to_array())
# List input
a6 = np.array([-10, 6, 2, 3, 4])
sr6 = sr1.replace([0, 1], [None, 6]).fillna(-10)
assert_eq(a6, sr6.to_array())
sr1 = Series([0, 1, 2, 3, 4, None])
with pytest.raises(TypeError):
sr1.replace([0, 1], [5.5, 6.5]).fillna(-10)
# Series input
a8 = np.array([-10, -10, -10, 3, 4, -10])
sr8 = sr1.replace(sr1[:3], None).fillna(-10)
assert_eq(a8, sr8.to_array())
a9 = np.array([-10, 6, 2, 3, 4, -10])
sr9 = sr1.replace([0, 1], [None, 6]).fillna(-10)
assert_eq(a9, sr9.to_array())
def test_dataframe_replace():
# numerical
pdf1 = pd.DataFrame({"a": [0, 1, 2, 3], "b": [0, 1, 2, 3]})
gdf1 = DataFrame.from_pandas(pdf1)
pdf2 = pdf1.replace(0, 4)
gdf2 = gdf1.replace(0, 4)
assert_eq(gdf2, pdf2)
# categorical
pdf4 = pd.DataFrame(
{"a": ["one", "two", "three"], "b": ["one", "two", "three"]},
dtype="category",
)
gdf4 = DataFrame.from_pandas(pdf4)
pdf5 = pdf4.replace("two", "three")
gdf5 = gdf4.replace("two", "three")
assert_eq(gdf5, pdf5)
# list input
pdf6 = pdf1.replace([0, 1], [4, 5])
gdf6 = gdf1.replace([0, 1], [4, 5])
assert_eq(gdf6, pdf6)
pdf7 = pdf1.replace([0, 1], 4)
gdf7 = gdf1.replace([0, 1], 4)
assert_eq(gdf7, pdf7)
# dict input:
pdf8 = pdf1.replace({"a": 0, "b": 0}, {"a": 4, "b": 5})
gdf8 = gdf1.replace({"a": 0, "b": 0}, {"a": 4, "b": 5})
assert_eq(gdf8, pdf8)
pdf9 = pdf1.replace({"a": 0}, {"a": 4})
gdf9 = gdf1.replace({"a": 0}, {"a": 4})
assert_eq(gdf9, pdf9)
def test_dataframe_replace_with_nulls():
# numerical
pdf1 = pd.DataFrame({"a": [0, 1, 2, 3], "b": [0, 1, 2, 3]})
gdf1 = DataFrame.from_pandas(pdf1)
pdf2 = pdf1.replace(0, 4)
gdf2 = gdf1.replace(0, None).fillna(4)
assert_eq(gdf2, pdf2)
# list input
pdf6 = pdf1.replace([0, 1], [4, 5])
gdf6 = gdf1.replace([0, 1], [4, None]).fillna(5)
assert_eq(gdf6, pdf6)
pdf7 = pdf1.replace([0, 1], 4)
gdf7 = gdf1.replace([0, 1], None).fillna(4)
assert_eq(gdf7, pdf7)
# dict input:
pdf8 = pdf1.replace({"a": 0, "b": 0}, {"a": 4, "b": 5})
gdf8 = gdf1.replace({"a": 0, "b": 0}, {"a": None, "b": 5}).fillna(4)
assert_eq(gdf8, pdf8)
gdf1 = DataFrame({"a": [0, 1, 2, 3], "b": [0, 1, 2, None]})
gdf9 = gdf1.replace([0, 1], [4, 5]).fillna(3)
assert_eq(gdf9, pdf6)
def test_replace_strings():
pdf = pd.Series(["a", "b", "c", "d"])
gdf = Series(["a", "b", "c", "d"])
assert_eq(pdf.replace("a", "e"), gdf.replace("a", "e"))
@pytest.mark.parametrize(
"psr",
[
pd.Series([0, 1, None, 2, None], dtype=pd.Int8Dtype()),
pd.Series([0, 1, np.nan, 2, np.nan]),
],
)
@pytest.mark.parametrize("data_dtype", NUMERIC_TYPES)
@pytest.mark.parametrize("fill_value", [10, pd.Series([10, 20, 30, 40, 50])])
@pytest.mark.parametrize("inplace", [True, False])
def test_series_fillna_numerical(psr, data_dtype, fill_value, inplace):
test_psr = psr.copy(deep=True)
# TODO: These tests should use Pandas' nullable int type
# when we support a recent enough version of Pandas
# https://pandas.pydata.org/pandas-docs/stable/user_guide/integer_na.html
if np.dtype(data_dtype).kind not in ("f") and test_psr.dtype.kind == "i":
test_psr = test_psr.astype(
cudf.utils.dtypes.cudf_dtypes_to_pandas_dtypes[
np.dtype(data_dtype)
]
)
gsr = cudf.from_pandas(test_psr)
if isinstance(fill_value, pd.Series):
fill_value_cudf = cudf.from_pandas(fill_value)
else:
fill_value_cudf = fill_value
expected = test_psr.fillna(fill_value, inplace=inplace)
actual = gsr.fillna(fill_value_cudf, inplace=inplace)
if inplace:
expected = test_psr
actual = gsr
# TODO: Remove check_dtype when we have support
# to compare with pandas nullable dtypes
assert_eq(expected, actual, check_dtype=False)
@pytest.mark.parametrize(
"data",
[
[1, None, None, 2, 3, 4],
[None, None, 1, 2, None, 3, 4],
[1, 2, None, 3, 4, None, None],
],
)
@pytest.mark.parametrize("container", [pd.Series, pd.DataFrame])
@pytest.mark.parametrize("data_dtype", NUMERIC_TYPES)
@pytest.mark.parametrize("method", ["ffill", "bfill"])
@pytest.mark.parametrize("inplace", [True, False])
def test_fillna_method_numerical(data, container, data_dtype, method, inplace):
if container == pd.DataFrame:
data = {"a": data, "b": data, "c": data}
pdata = container(data)
if np.dtype(data_dtype).kind not in ("f"):
data_dtype = cudf.utils.dtypes.cudf_dtypes_to_pandas_dtypes[
np.dtype(data_dtype)
]
pdata = pdata.astype(data_dtype)
# Explicitly using nans_as_nulls=True
gdata = cudf.from_pandas(pdata, nan_as_null=True)
expected = pdata.fillna(method=method, inplace=inplace)
actual = gdata.fillna(method=method, inplace=inplace)
if inplace:
expected = pdata
actual = gdata
assert_eq(expected, actual, check_dtype=False)
@pytest.mark.parametrize(
"psr",
[
pd.Series(["a", "b", "a", None, "c", None], dtype="category"),
pd.Series(
["a", "b", "a", None, "c", None],
dtype="category",
index=["q", "r", "z", "a", "b", "c"],
),
pd.Series(
["a", "b", "a", None, "c", None],
dtype="category",
index=["x", "t", "p", "q", "r", "z"],
),
pd.Series(["a", "b", "a", np.nan, "c", np.nan], dtype="category"),
pd.Series(
[None, None, None, None, None, None, "a", "b", "c"],
dtype="category",
),
],
)
@pytest.mark.parametrize(
"fill_value",
[
"c",
pd.Series(["c", "c", "c", "c", "c", "a"], dtype="category"),
pd.Series(
["a", "b", "a", None, "c", None],
dtype="category",
index=["x", "t", "p", "q", "r", "z"],
),
pd.Series(
["a", "b", "a", None, "c", None],
dtype="category",
index=["q", "r", "z", "a", "b", "c"],
),
pd.Series(["a", "b", "a", None, "c", None], dtype="category"),
pd.Series(["a", "b", "a", np.nan, "c", np.nan], dtype="category"),
],
)
@pytest.mark.parametrize("inplace", [True, False])
def test_fillna_categorical(psr, fill_value, inplace):
gsr = Series.from_pandas(psr)
if isinstance(fill_value, pd.Series):
fill_value_cudf = cudf.from_pandas(fill_value)
else:
fill_value_cudf = fill_value
expected = psr.fillna(fill_value, inplace=inplace)
got = gsr.fillna(fill_value_cudf, inplace=inplace)
if inplace:
expected = psr
got = gsr
assert_eq(expected, got)
@pytest.mark.parametrize(
"psr",
[
pd.Series(pd.date_range("2010-01-01", "2020-01-10", freq="1y")),
pd.Series(["2010-01-01", None, "2011-10-10"], dtype="datetime64[ns]"),
pd.Series(
[
None,
None,
None,
None,
None,
None,
"2011-10-10",
"2010-01-01",
"2010-01-02",
"2010-01-04",
"2010-11-01",
],
dtype="datetime64[ns]",
),
pd.Series(
[
None,
None,
None,
None,
None,
None,
"2011-10-10",
"2010-01-01",
"2010-01-02",
"2010-01-04",
"2010-11-01",
],
dtype="datetime64[ns]",
index=["a", "b", "c", "d", "e", "f", "g", "h", "i", "j", "k"],
),
],
)
@pytest.mark.parametrize(
"fill_value",
[
pd.Timestamp("2010-01-02"),
pd.Series(pd.date_range("2010-01-01", "2020-01-10", freq="1y"))
+ pd.Timedelta("1d"),
pd.Series(["2010-01-01", None, "2011-10-10"], dtype="datetime64[ns]"),
pd.Series(
[
None,
None,
None,
None,
None,
None,
"2011-10-10",
"2010-01-01",
"2010-01-02",
"2010-01-04",
"2010-11-01",
],
dtype="datetime64[ns]",
),
pd.Series(
[
None,
None,
None,
None,
None,
None,
"2011-10-10",
"2010-01-01",
"2010-01-02",
"2010-01-04",
"2010-11-01",
],
dtype="datetime64[ns]",
index=["a", "b", "c", "d", "e", "f", "g", "h", "i", "j", "k"],
),
],
)
@pytest.mark.parametrize("inplace", [True, False])
def test_fillna_datetime(psr, fill_value, inplace):
gsr = cudf.from_pandas(psr)
if isinstance(fill_value, pd.Series):
fill_value_cudf = cudf.from_pandas(fill_value)
else:
fill_value_cudf = fill_value
expected = psr.fillna(fill_value, inplace=inplace)
got = gsr.fillna(fill_value_cudf, inplace=inplace)
if inplace:
got = gsr
expected = psr
assert_eq(expected, got)
@pytest.mark.parametrize(
"data",
[
# Categorical
pd.Categorical([1, 2, None, None, 3, 4]),
pd.Categorical([None, None, 1, None, 3, 4]),
pd.Categorical([1, 2, None, 3, 4, None, None]),
pd.Categorical(["1", "20", None, None, "3", "40"]),
pd.Categorical([None, None, "10", None, "30", "4"]),
pd.Categorical(["1", "20", None, "30", "4", None, None]),
# Datetime
np.array(
[
"2020-01-01 08:00:00",
"2020-01-01 09:00:00",
None,
"2020-01-01 10:00:00",
None,
"2020-01-01 10:00:00",
],
dtype="datetime64[ns]",
),
np.array(
[
None,
None,
"2020-01-01 09:00:00",
"2020-01-01 10:00:00",
None,
"2020-01-01 10:00:00",
],
dtype="datetime64[ns]",
),
np.array(
[
"2020-01-01 09:00:00",
None,
None,
"2020-01-01 10:00:00",
None,
None,
],
dtype="datetime64[ns]",
),
# Timedelta
np.array(
[10, 100, 1000, None, None, 10, 100, 1000], dtype="datetime64[ns]"
),
np.array(
[None, None, 10, None, 1000, 100, 10], dtype="datetime64[ns]"
),
np.array(
[10, 100, None, None, 1000, None, None], dtype="datetime64[ns]"
),
# String
np.array(
["10", "100", "1000", None, None, "10", "100", "1000"],
dtype="object",
),
np.array(
[None, None, "1000", None, "10", "100", "10"], dtype="object"
),
np.array(
["10", "100", None, None, "1000", None, None], dtype="object"
),
],
)
@pytest.mark.parametrize("container", [pd.Series, pd.DataFrame])
@pytest.mark.parametrize("method", ["ffill", "bfill"])
@pytest.mark.parametrize("inplace", [True, False])
def test_fillna_method_fixed_width_non_num(data, container, method, inplace):
if container == pd.DataFrame:
data = {"a": data, "b": data, "c": data}
pdata = container(data)
# Explicitly using nans_as_nulls=True
gdata = cudf.from_pandas(pdata, nan_as_null=True)
expected = pdata.fillna(method=method, inplace=inplace)
actual = gdata.fillna(method=method, inplace=inplace)
if inplace:
expected = pdata
actual = gdata
assert_eq(expected, actual)
@pytest.mark.parametrize(
"df",
[
pd.DataFrame({"a": [1, 2, None], "b": [None, None, 5]}),
pd.DataFrame(
{"a": [1, 2, None], "b": [None, None, 5]}, index=["a", "p", "z"]
),
],
)
@pytest.mark.parametrize(
"value",
[
10,
pd.Series([10, 20, 30]),
pd.Series([3, 4, 5]),
pd.Series([10, 20, 30], index=["z", "a", "p"]),
{"a": 5, "b": pd.Series([3, 4, 5])},
{"a": 5001},
{"b": pd.Series([11, 22, 33], index=["a", "p", "z"])},
{"a": 5, "b": pd.Series([3, 4, 5], index=["a", "p", "z"])},
{"c": 100},
],
)
@pytest.mark.parametrize("inplace", [True, False])
def test_fillna_dataframe(df, value, inplace):
pdf = df.copy(deep=True)
gdf = DataFrame.from_pandas(pdf)
fill_value_pd = value
if isinstance(fill_value_pd, (pd.Series, pd.DataFrame)):
fill_value_cudf = cudf.from_pandas(fill_value_pd)
elif isinstance(fill_value_pd, dict):
fill_value_cudf = {}
for key in fill_value_pd:
temp_val = fill_value_pd[key]
if isinstance(temp_val, pd.Series):
temp_val = cudf.from_pandas(temp_val)
fill_value_cudf[key] = temp_val
else:
fill_value_cudf = value
expect = pdf.fillna(fill_value_pd, inplace=inplace)
got = gdf.fillna(fill_value_cudf, inplace=inplace)
if inplace:
got = gdf
expect = pdf
assert_eq(expect, got)
@pytest.mark.parametrize(
"psr",
[
pd.Series(["a", "b", "c", "d"]),
pd.Series([None] * 4, dtype="object"),
pd.Series(["z", None, "z", None]),
pd.Series(["x", "y", None, None, None]),
pd.Series([None, None, None, "i", "P"]),
],
)
@pytest.mark.parametrize(
"fill_value",
[
"a",
pd.Series(["a", "b", "c", "d"]),
pd.Series(["z", None, "z", None]),
pd.Series([None] * 4, dtype="object"),
pd.Series(["x", "y", None, None, None]),
pd.Series([None, None, None, "i", "P"]),
],
)
@pytest.mark.parametrize("inplace", [True, False])
def test_fillna_string(psr, fill_value, inplace):
gsr = cudf.from_pandas(psr)
if isinstance(fill_value, pd.Series):
fill_value_cudf = cudf.from_pandas(fill_value)
else:
fill_value_cudf = fill_value
expected = psr.fillna(fill_value, inplace=inplace)
got = gsr.fillna(fill_value_cudf, inplace=inplace)
if inplace:
expected = psr
got = gsr
assert_eq(expected, got)
@pytest.mark.parametrize("data_dtype", INTEGER_TYPES)
def test_series_fillna_invalid_dtype(data_dtype):
gdf = Series([1, 2, None, 3], dtype=data_dtype)
fill_value = 2.5
with pytest.raises(TypeError) as raises:
gdf.fillna(fill_value)
raises.match(
f"Cannot safely cast non-equivalent"
f" {type(fill_value).__name__} to {gdf.dtype.type.__name__}"
)
@pytest.mark.parametrize("data_dtype", NUMERIC_TYPES)
@pytest.mark.parametrize("fill_value", [100, 100.0, 128.5])
def test_series_where(data_dtype, fill_value):
psr = pd.Series(list(range(10)), dtype=data_dtype)
sr = Series.from_pandas(psr)
if sr.dtype.type(fill_value) != fill_value:
with pytest.raises(TypeError):
sr.where(sr > 0, fill_value)
else:
# Cast back to original dtype as pandas automatically upcasts
expect = psr.where(psr > 0, fill_value).astype(psr.dtype)
got = sr.where(sr > 0, fill_value)
assert_eq(expect, got)
if sr.dtype.type(fill_value) != fill_value:
with pytest.raises(TypeError):
sr.where(sr < 0, fill_value)
else:
expect = psr.where(psr < 0, fill_value).astype(psr.dtype)
got = sr.where(sr < 0, fill_value)
assert_eq(expect, got)
if sr.dtype.type(fill_value) != fill_value:
with pytest.raises(TypeError):
sr.where(sr == 0, fill_value)
else:
expect = psr.where(psr == 0, fill_value).astype(psr.dtype)
got = sr.where(sr == 0, fill_value)
assert_eq(expect, got)
@pytest.mark.parametrize("fill_value", [100, 100.0, 100.5])
def test_series_with_nulls_where(fill_value):
psr = pd.Series([None] * 3 + list(range(5)))
sr = Series.from_pandas(psr)
expect = psr.where(psr > 0, fill_value)
got = sr.where(sr > 0, fill_value)
assert_eq(expect, got)
expect = psr.where(psr < 0, fill_value)
got = sr.where(sr < 0, fill_value)
assert_eq(expect, got)
expect = psr.where(psr == 0, fill_value)
got = sr.where(sr == 0, fill_value)
assert_eq(expect, got)
@pytest.mark.parametrize("fill_value", [[888, 999]])
def test_dataframe_with_nulls_where_with_scalars(fill_value):
pdf = pd.DataFrame(
{
"A": [-1, 2, -3, None, 5, 6, -7, 0],
"B": [4, -2, 3, None, 7, 6, 8, 0],
}
)
gdf = DataFrame.from_pandas(pdf)
expect = pdf.where(pdf % 3 == 0, fill_value)
got = gdf.where(gdf % 3 == 0, fill_value)
assert_eq(expect, got)
def test_dataframe_with_different_types():
# Testing for int and float
pdf = pd.DataFrame(
{"A": [111, 22, 31, 410, 56], "B": [-10.12, 121.2, 45.7, 98.4, 87.6]}
)
gdf = DataFrame.from_pandas(pdf)
expect = pdf.where(pdf > 50, -pdf)
got = gdf.where(gdf > 50, -gdf)
assert_eq(expect, got)
# Testing for string
pdf = pd.DataFrame({"A": ["a", "bc", "cde", "fghi"]})
gdf = DataFrame.from_pandas(pdf)
pdf_mask = pd.DataFrame({"A": [True, False, True, False]})
gdf_mask = DataFrame.from_pandas(pdf_mask)
expect = pdf.where(pdf_mask, ["cudf"])
got = gdf.where(gdf_mask, ["cudf"])
assert_eq(expect, got)
# Testing for categoriacal
pdf = pd.DataFrame({"A": ["a", "b", "b", "c"]})
pdf["A"] = pdf["A"].astype("category")
gdf = DataFrame.from_pandas(pdf)
expect = pdf.where(pdf_mask, "c")
got = gdf.where(gdf_mask, ["c"])
assert_eq(expect, got)
def test_dataframe_where_with_different_options():
pdf = pd.DataFrame({"A": [1, 2, 3], "B": [3, 4, 5]})
gdf = DataFrame.from_pandas(pdf)
# numpy array
boolean_mask = np.array([[False, True], [True, False], [False, True]])
expect = pdf.where(boolean_mask, -pdf)
got = gdf.where(boolean_mask, -gdf)
assert_eq(expect, got)
# with single scalar
expect = pdf.where(boolean_mask, 8)
got = gdf.where(boolean_mask, 8)
assert_eq(expect, got)
# with multi scalar
expect = pdf.where(boolean_mask, [8, 9])
got = gdf.where(boolean_mask, [8, 9])
assert_eq(expect, got)
def test_series_multiple_times_with_nulls():
sr = Series([1, 2, 3, None])
expected = Series([None, None, None, None], dtype=np.int64)
for i in range(3):
got = sr.replace([1, 2, 3], None)
assert_eq(expected, got)
# BUG: #2695
# The following series will acquire a chunk of memory and update with
# values, but these values may still linger even after the memory
# gets released. This memory space might get used for replace in
# subsequent calls and the memory used for mask may have junk values.
# So, if it is not updated properly, the result would be wrong.
# So, this will help verify that scenario.
Series([1, 1, 1, None])
@pytest.mark.parametrize("series_dtype", NUMERIC_TYPES)
@pytest.mark.parametrize(
"replacement", [128, 128.0, 128.5, 32769, 32769.0, 32769.5]
)
def test_numeric_series_replace_dtype(series_dtype, replacement):
psr = pd.Series([0, 1, 2, 3, 4, 5], dtype=series_dtype)
sr = Series.from_pandas(psr)
# Both Scalar
if sr.dtype.type(replacement) != replacement:
with pytest.raises(TypeError):
sr.replace(1, replacement)
else:
expect = psr.replace(1, replacement).astype(psr.dtype)
got = sr.replace(1, replacement)
assert_eq(expect, got)
# to_replace is a list, replacement is a scalar
if sr.dtype.type(replacement) != replacement:
with pytest.raises(TypeError):
sr.replace([2, 3], replacement)
else:
expect = psr.replace([2, 3], replacement).astype(psr.dtype)
got = sr.replace([2, 3], replacement)
assert_eq(expect, got)
# If to_replace is a scalar and replacement is a list
with pytest.raises(TypeError):
sr.replace(0, [replacement, 2])
# Both list of unequal length
with pytest.raises(ValueError):
sr.replace([0, 1], [replacement])
# Both lists of equal length
if (
np.dtype(type(replacement)).kind == "f" and sr.dtype.kind in {"i", "u"}
) or (sr.dtype.type(replacement) != replacement):
with pytest.raises(TypeError):
sr.replace([2, 3], [replacement, replacement])
else:
expect = psr.replace([2, 3], [replacement, replacement]).astype(
psr.dtype
)
got = sr.replace([2, 3], [replacement, replacement])
assert_eq(expect, got)
def test_replace_inplace():
data = np.array([5, 1, 2, 3, 4])
sr = Series(data)
psr = pd.Series(data)
sr_copy = sr.copy()
psr_copy = psr.copy()
assert_eq(sr, psr)
assert_eq(sr_copy, psr_copy)
sr.replace(5, 0, inplace=True)
psr.replace(5, 0, inplace=True)
assert_eq(sr, psr)
assert_eq(sr_copy, psr_copy)
sr = Series(data)
psr = pd.Series(data)
sr_copy = sr.copy()
psr_copy = psr.copy()
assert_eq(sr, psr)
assert_eq(sr_copy, psr_copy)
sr.replace({5: 0, 3: -5})
psr.replace({5: 0, 3: -5})
assert_eq(sr, psr)
assert_eq(sr_copy, psr_copy)
srr = sr.replace()
psrr = psr.replace()
assert_eq(srr, psrr)
psr = pd.Series(["one", "two", "three"], dtype="category")
sr = Series.from_pandas(psr)
sr_copy = sr.copy()
psr_copy = psr.copy()
assert_eq(sr, psr)
assert_eq(sr_copy, psr_copy)
sr.replace("one", "two", inplace=True)
psr.replace("one", "two", inplace=True)
assert_eq(sr, psr)
assert_eq(sr_copy, psr_copy)
pdf = pd.DataFrame({"A": [0, 1, 2, 3, 4], "B": [5, 6, 7, 8, 9]})
gdf = DataFrame.from_pandas(pdf)
pdf_copy = pdf.copy()
gdf_copy = gdf.copy()
assert_eq(pdf, gdf)
assert_eq(pdf_copy, gdf_copy)
pdf.replace(5, 0, inplace=True)
gdf.replace(5, 0, inplace=True)
assert_eq(pdf, gdf)
assert_eq(pdf_copy, gdf_copy)
pds = pd.Series([1, 2, 3, 45])
gds = Series.from_pandas(pds)
vals = np.array([]).astype(int)
assert_eq(pds.replace(vals, -1), gds.replace(vals, -1))
pds.replace(vals, 77, inplace=True)
gds.replace(vals, 77, inplace=True)
assert_eq(pds, gds)
pdf = pd.DataFrame({"a": [1, 2, 3, 4, 5, 666]})
gdf = DataFrame.from_pandas(pdf)
assert_eq(
pdf.replace({"a": 2}, {"a": -33}), gdf.replace({"a": 2}, {"a": -33})
)
assert_eq(
pdf.replace({"a": [2, 5]}, {"a": [9, 10]}),
gdf.replace({"a": [2, 5]}, {"a": [9, 10]}),
)
assert_eq(
pdf.replace([], []), gdf.replace([], []),
)
assert_exceptions_equal(
lfunc=pdf.replace,
rfunc=gdf.replace,
lfunc_args_and_kwargs=([], {"to_replace": -1, "value": []}),
rfunc_args_and_kwargs=([], {"to_replace": -1, "value": []}),
compare_error_message=False,
)
@pytest.mark.parametrize(
("lower", "upper"),
[([2, 7.4], [4, 7.9]), ([2, 7.4], None), (None, [4, 7.9],)],
)
@pytest.mark.parametrize("inplace", [True, False])
def test_dataframe_clip(lower, upper, inplace):
pdf = pd.DataFrame(
{"a": [1, 2, 3, 4, 5], "b": [7.1, 7.24, 7.5, 7.8, 8.11]}
)
gdf = DataFrame.from_pandas(pdf)
got = gdf.clip(lower=lower, upper=upper, inplace=inplace)
expect = pdf.clip(lower=lower, upper=upper, axis=1)
if inplace is True:
assert_eq(expect, gdf)
else:
assert_eq(expect, got)
@pytest.mark.parametrize(
("lower", "upper"), [("b", "d"), ("b", None), (None, "c"), (None, None)],
)
@pytest.mark.parametrize("inplace", [True, False])
def test_dataframe_category_clip(lower, upper, inplace):
data = ["a", "b", "c", "d", "e"]
pdf = | pd.DataFrame({"a": data}) | pandas.DataFrame |
# coding: utf-8
# Copyright (c) Max-Planck-Institut für Eisenforschung GmbH - Computational Materials Design (CM) Department
# Distributed under the terms of "New BSD License", see the LICENSE file.
from __future__ import print_function, unicode_literals
import numpy as np
import os
from pyiron.base.settings.generic import Settings
from mendeleev import element
import sys
import pandas
__author__ = "<NAME>, <NAME>, <NAME>"
__copyright__ = (
"Copyright 2020, Max-Planck-Institut für Eisenforschung GmbH - "
"Computational Materials Design (CM) Department"
)
__version__ = "1.0"
__maintainer__ = "<NAME>"
__email__ = "<EMAIL>"
__status__ = "production"
__date__ = "Sep 1, 2017"
s = Settings()
pandas.options.mode.chained_assignment = None
class ChemicalElement(object):
"""
An Object which contains the element specific parameters
"""
def __init__(self, sub):
"""
Constructor: assign PSE dictionary to object
"""
self._dataset = None
self.sub = sub
self._mendeleev_element = None
self._mendeleev_property_lst = None
stringtypes = str
if isinstance(self.sub, stringtypes):
self._init_mendeleev(self.sub)
elif "Parent" in self.sub.index and isinstance(self.sub.Parent, stringtypes):
self._init_mendeleev(self.sub.Parent)
elif len(self.sub) > 0:
self._init_mendeleev(self.sub.Abbreviation)
self._mendeleev_translation_dict = {'AtomicNumber': 'atomic_number',
'AtomicRadius': 'covalent_radius_cordero',
'AtomicMass': 'mass',
'CovalentRadius': 'covalent_radius',
'DiscoveryYear': 'discovery_year',
'Group': 'group_id',
'Name': 'name',
'Period': 'period',
'StandardName': 'name',
'VanDerWaalsRadius': 'vdw_radius',
'MeltingPoint': 'melting_point',
'ElectronAffinity': 'electron_affinity'
}
self.el = None
def _init_mendeleev(self, element_str):
self._mendeleev_element = element(str(element_str))
self._mendeleev_property_lst = [s for s in dir(self._mendeleev_element) if not s.startswith('_')]
def __getattr__(self, item):
return self[item]
def __getitem__(self, item):
if item in self._mendeleev_translation_dict.keys():
item = self._mendeleev_translation_dict[item]
if item in self._mendeleev_property_lst:
return getattr(self._mendeleev_element, item)
if item in self.sub.index:
return self.sub[item]
def __eq__(self, other):
if isinstance(other, self.__class__):
conditions = list()
conditions.append(self.sub.to_dict() == other.sub.to_dict())
return all(conditions)
elif isinstance(other, (np.ndarray, list)):
conditions = list()
for sp in other:
conditions.append(self.sub.to_dict() == sp.sub.to_dict())
return any(conditions)
def __ne__(self, other):
return not self.__eq__(other)
def __gt__(self, other):
if self != other:
if self["AtomicNumber"] != other["AtomicNumber"]:
return self["AtomicNumber"] > other["AtomicNumber"]
else:
return self["Abbreviation"] > other["Abbreviation"]
else:
return False
def __ge__(self, other):
if self != other:
return self > other
else:
return True
def __hash__(self):
return hash(repr(self))
@property
def tags(self):
if "tags" not in self.sub.keys() or self.sub["tags"] is None:
return dict()
return self.sub["tags"]
def __dir__(self):
return list(self.sub.index) + super(ChemicalElement, self).__dir__()
def __str__(self):
return str([self._dataset, self.sub])
def add_tags(self, tag_dic):
"""
Add tags to an existing element inside its specific panda series without overwriting the old tags
Args:
tag_dic (dict): dictionary containing e.g. key = "spin" value = "up",
more than one tag can be added at once
"""
(self.sub["tags"]).update(tag_dic)
def to_hdf(self, hdf):
"""
saves the element with his parameters into his hdf5 job file
Args:
hdf (Hdfio): Hdfio object which will be used
"""
with hdf.open(self.Abbreviation) as hdf_el: # "Symbol of the chemical element"
# TODO: save all parameters that are different from the parent (e.g. modified mass)
if self.Parent is not None:
self._dataset = {"Parameter": ["Parent"], "Value": [self.Parent]}
hdf_el["elementData"] = self._dataset
with hdf_el.open(
"tagData"
) as hdf_tag: # "Dictionary of element tag static"
for key in self.tags.keys():
hdf_tag[key] = self.tags[key]
def from_hdf(self, hdf):
"""
loads an element with his parameters from the hdf5 job file and store it into its specific pandas series
Args:
hdf (Hdfio): Hdfio object which will be used to read a hdf5 file
"""
pse = PeriodicTable()
elname = self.sub.name
with hdf.open(elname) as hdf_el:
if "elementData" in hdf_el.list_nodes():
element_data = hdf_el["elementData"]
for key, val in zip(element_data["Parameter"], element_data["Value"]):
if key in "Parent":
self.sub = pse.dataframe.loc[val]
self.sub["Parent"] = val
self._init_mendeleev(val)
else:
self.sub["Parent"] = None
self._init_mendeleev(elname)
self.sub.name = elname
if "tagData" in hdf_el.list_groups():
with hdf_el.open(
"tagData"
) as hdf_tag: # "Dictionary of element tag static"
tag_dic = {}
for key in hdf_tag.list_nodes():
tag_dic[key] = hdf_tag[key]
self.sub["tags"] = tag_dic
class PeriodicTable(object):
"""
An Object which stores an elementary table which can be modified for the current session
"""
def __init__(self, file_name=None): # PSE_dat_file = None):
"""
Args:
file_name (str): Possibility to choose an source hdf5 file
"""
self.dataframe = self._get_periodic_table_df(file_name)
if "Abbreviation" not in self.dataframe.columns.values:
self.dataframe["Abbreviation"] = None
if not all(self.dataframe["Abbreviation"].values):
for item in self.dataframe.index.values:
if self.dataframe["Abbreviation"][item] is None:
self.dataframe["Abbreviation"][item] = item
self._parent_element = None
self.el = None
def __getattr__(self, item):
return self[item]
def __getitem__(self, item):
if item in self.dataframe.columns.values:
return self.dataframe[item]
if item in self.dataframe.index.values:
return self.dataframe.loc[item]
def from_hdf(self, hdf):
"""
loads an element with his parameters from the hdf5 job file by creating an Object of the ChemicalElement type.
The new element will be stored in the current periodic table.
Changes in the tags will also be modified inside the periodic table.
Args:
hdf (Hdfio): Hdfio object which will be used to read the data from a hdf5 file
Returns:
"""
elements = hdf.list_groups() # ["elements"]
for el in elements:
sub = pandas.Series()
new_element = ChemicalElement(sub)
new_element.sub.name = el
new_element.from_hdf(hdf)
new_element.sub["Abbreviation"] = el
if "sub_tags" in new_element.tags:
if not new_element.tags["sub_tags"]:
del new_element.tags["sub_tags"]
if new_element.Parent is None:
if not (el in self.dataframe.index.values):
raise AssertionError()
if len(new_element.sub["tags"]) > 0:
raise ValueError("Element cannot get tag-assignment twice")
if "tags" not in self.dataframe.keys():
self.dataframe["tags"] = None
self.dataframe["tags"][el] = new_element.tags
else:
self.dataframe = self.dataframe.append(new_element.sub)
self.dataframe["tags"] = self.dataframe["tags"].apply(
lambda x: None if | pandas.isnull(x) | pandas.isnull |
#v1.0
#v0.9 - All research graph via menu & mouse click
#v0.8 - Candlestick graphs
#v0.7 - Base version with all graphs and bug fixes
#v0.6
import pandas as pd
from pandas import DataFrame
from alpha_vantage.timeseries import TimeSeries
from alpha_vantage.techindicators import TechIndicators
class PrepareTestData():
def __init__(self, argFolder=None, argOutputSize='compact'):
super().__init__()
#argFolder='./scriptdata'
self.folder = argFolder + '/'
self.outputsize = argOutputSize.lower()
def loadDaily(self, argScript):
try:
if(self.outputsize == 'compact'):
filename=self.folder + 'daily_compact_'+argScript+'.csv'
else:
filename=self.folder + 'daily_full_'+argScript+'.csv'
csvdf = pd.read_csv(filename)
csvdf=csvdf.rename(columns={'open':'1. open', 'high':'2. high', 'low':'3. low', 'close':'4. close', 'volume': '5. volume'})
convert_type={'1. open':float, '2. high':float, '3. low':float, '4. close':float, '5. volume':float}
csvdf = csvdf.astype(convert_type)
csvdf.set_index('timestamp', inplace=True)
csvdf.index = pd.to_datetime(csvdf.index)
csvdf.index.names = ['date']
except Exception as e:
csvdf = DataFrame()
return csvdf
def loadIntra(self, argScript):
try:
if(self.outputsize == 'compact'):
filename=self.folder + 'intraday_5min_compact_'+argScript+'.csv'
else:
filename=self.folder + 'intraday_5min_full_'+argScript+'.csv'
csvdf = pd.read_csv(filename)
csvdf=csvdf.rename(columns={'open':'1. open', 'high':'2. high', 'low':'3. low', 'close':'4. close', 'volume': '5. volume'})
convert_type={'1. open':float, '2. high':float, '3. low':float, '4. close':float, '5. volume':float}
csvdf = csvdf.astype(convert_type)
csvdf.set_index('timestamp', inplace=True)
csvdf.index = pd.to_datetime(csvdf.index)
csvdf.index.names = ['date']
except Exception as e:
csvdf = DataFrame()
return csvdf
def loadSMA(self, argScript='', argPeriod=20):
try:
#if(argPeriod == 0):
# csvdf = pd.read_csv(self.folder + 'SMA_'+argScript+'.csv')
#else:
csvdf = pd.read_csv(self.folder + 'SMA_'+str(argPeriod)+ '_'+argScript+'.csv')
convert_type={'SMA':float}
csvdf = csvdf.astype(convert_type)
csvdf.set_index('time', inplace=True)
csvdf.index = pd.to_datetime(csvdf.index)
csvdf.index.names = ['date']
#ti = TechIndicators('XXXX', output_format='pandas')
#padf, pameta = ti.get_sma(argScript)
except Exception as e:
csvdf = DataFrame()
return csvdf
def loadEMA(self, argScript):
try:
csvdf = pd.read_csv(self.folder + 'EMA_'+argScript+'.csv')
convert_type={'EMA':float}
csvdf = csvdf.astype(convert_type)
csvdf.set_index('time', inplace=True)
csvdf.index = pd.to_datetime(csvdf.index)
csvdf.index.names = ['date']
#ti = TechIndicators('XXXX', output_format='pandas')
#padf, pameta = ti.get_ema(argScript)
except Exception as e:
csvdf = DataFrame()
return csvdf
def loadVWMP(self, argScript):
try:
csvdf = pd.read_csv(self.folder + 'VWAP_'+argScript+'.csv')
convert_type={'VWAP':float}
csvdf = csvdf.astype(convert_type)
csvdf.set_index('time', inplace=True)
csvdf.index = pd.to_datetime(csvdf.index)
csvdf.index.names = ['date']
#ti = TechIndicators('XXXX', output_format='pandas')
#padf, pameta = ti.get_ema(argScript)
except Exception as e:
csvdf = DataFrame()
return csvdf
def loadRSI(self, argScript):
try:
csvdf = pd.read_csv(self.folder + 'RSI_'+argScript+'.csv')
convert_type={'RSI':float}
csvdf = csvdf.astype(convert_type)
csvdf.set_index('time', inplace=True)
csvdf.index = pd.to_datetime(csvdf.index)
csvdf.index.names = ['date']
#ti = TechIndicators('XXXX', output_format='pandas')
#padf, pameta = ti.get_ema(argScript)
except Exception as e:
csvdf = DataFrame()
return csvdf
def loadStochasticOscillator(self, argScript):
try:
csvdf = pd.read_csv(self.folder + 'STOCH_'+argScript+'.csv')
convert_type={'SlowD':float, 'SlowK':float}
csvdf = csvdf.astype(convert_type)
csvdf.set_index('time', inplace=True)
csvdf.index = pd.to_datetime(csvdf.index)
csvdf.index.names = ['date']
#ti = TechIndicators('XXXX', output_format='pandas')
#padf, pameta = ti.get_ema(argScript)
except Exception as e:
csvdf = DataFrame()
return csvdf
def loadMACD(self, argScript):
try:
csvdf = pd.read_csv(self.folder + 'MACD_'+argScript+'.csv')
convert_type={'MACD':float, 'MACD_Hist':float, 'MACD_Signal':float}
csvdf = csvdf.astype(convert_type)
csvdf.set_index('time', inplace=True)
csvdf.index = pd.to_datetime(csvdf.index)
csvdf.index.names = ['date']
#ti = TechIndicators('XXXX', output_format='pandas')
#padf, pameta = ti.get_ema(argScript)
except Exception as e:
csvdf = | DataFrame() | pandas.DataFrame |
#!/usr/bin/env python
# coding: utf-8
import torch
import numpy as np
from sklearn import metrics
import pandas as pd
import torch.utils.data as Data
import sklearn
from sklearn import tree
from sklearn.metrics import precision_score, recall_score, f1_score
from sklearn.ensemble import RandomForestClassifier, AdaBoostClassifier
from sklearn.preprocessing import QuantileTransformer
from xgboost import XGBClassifier
from sklearn.pipeline import make_pipeline
from sklearn.preprocessing import StandardScaler
from sklearn.svm import SVC
df = pd.read_csv("./beijing_cate2id.csv")
shape1 = df.shape[0]
baseline_train_df = df.iloc[0:int(0.6 * shape1)]
baseline_val_df = df.iloc[int(0.6 * shape1):int(0.8 * shape1)]
baseline_test_df = df.iloc[int(0.8 * shape1):]
qt = QuantileTransformer(output_distribution="normal").fit(df.loc[:, df.columns != 'scene'])
x_train = baseline_train_df.loc[:, baseline_train_df.columns != 'scene']
a = x_train.columns
x_train = qt.transform(x_train)
x_train = pd.DataFrame(x_train)
x_train.columns = a
y_train = pd.Categorical(baseline_train_df.scene).codes
x_test = baseline_test_df.loc[:, baseline_test_df.columns != 'scene']
a = x_test.columns
x_test = qt.transform(x_test)
x_test = pd.DataFrame(x_test)
x_test.columns = a
y_test = | pd.Categorical(baseline_test_df.scene) | pandas.Categorical |
import json
import pandas as pd
import re
def clean_string(floatString):
return floatString.rstrip('0').rstrip('.')
def convert_frac(q):
match_list = re.findall(r'\d+/\d+', q)
for match_str in match_list:
new_str = clean_string(str(eval(match_str)))
q = q.replace(match_str, new_str)
return q
def pre_question(q):
q = convert_frac(q)
return q
def break_punctuate(q):
q = re.sub(r'([^\d])(\.)($|\s)', r'\1 \2\3', q)
q = re.sub(r'([^\s])([\,\?\'\"\:])', r'\1 \2', q)
q = re.sub(r'([\,\?\'\"\:])([^\s])', r'\1 \2', q)
return q
def convert_number(q):
match_list = re.findall(r"[-+]?\d*\.\d+|\d+", q)
num_list = []
# skip for safety
if 'number' in q:
return ' '.join(num_list), q
for i, match in enumerate(match_list):
q = q.replace(match, 'number_', 1)
s = match
if '.' not in s:
s = s + '.0'
num_list.append(s)
for i, match in enumerate(match_list):
q = q.replace('number_', 'number%i' % i, 1)
return ' '.join(num_list), q
def isnot_punc(word):
return word not in ['.', ',', '?']
def make_group_num(q):
words = q.split()
n = len(words)
group_num = []
for i, word in enumerate(words):
if 'number' in word:
for j in range(i - 1, -1, -1):
if isnot_punc(word):
if j not in group_num:
group_num.append(j)
break
if i not in group_num:
group_num.append(i)
for j in range(i + 1, n):
if isnot_punc(word):
if j not in group_num:
group_num.append(j)
break
last_num = 0
'''
for i in range(n-3, -1, -1):
if isnot_punc(words[i]):
last_num += 1
if i not in group_num:
group_num.append(i)
if last_num >= 3:
break
'''
# n-1: punctuate(maybe), n-2: last word, n-3: last-1 pos word
for i in range(n - 3, -1, -1):
last_num += 1
group_num.append(i)
if last_num >= 3:
break
group_num.sort()
return '%s' % group_num
def load_infer_data(path):
with open(path, "r", encoding='utf-8-sig') as json_file:
q_json = json.load(json_file)
n_question = len(q_json.keys())
df = | pd.DataFrame(columns=['Question', 'Numbers', 'group_nums', 'id']) | pandas.DataFrame |
# basic imports
import math
import matplotlib
import matplotlib.pyplot as plt
import matplotlib.patches as mpatches
from matplotlib.ticker import MaxNLocator
from mpl_toolkits.axes_grid1.inset_locator import zoomed_inset_axes
from mpl_toolkits.axes_grid1.inset_locator import mark_inset
import numpy as np
import pandas as pd
import astropy
from astropy.table import Table
from scipy.interpolate import interp1d
from PyAstronomy import pyasl
# class imports
from mlfinder.bd import BrownDwarf
from mlfinder.fields import Fields
from mlfinder.mcmc import MonteCarlo
# class to check for events
class FindEvents():
def __init__(self, bd, fields, precision):
# check if bd and fields are classes
if not isinstance(bd, BrownDwarf):
raise Exception('Brown dwarf must be an instance of the BrownDwarf() class.')
if not isinstance(fields, Fields):
raise Exception('Fields must an instance of the Fields() class.')
# basic creation of class
self.bd = bd
self.fields = fields
self.stars = fields.stars
self.m_jup_prec = precision
self.coord_df = bd.coord_df
# some helpful values
self.theta_max = self.theta_max_calc()
self.events_per_year = self.events_per_year_calc()
# finding events
self.event_table = self.find_events()
##
# name: find_events
#
# inputs: bd and fields
# outputs: table of closest approaches
#And now for the event juicier stuff and the thing that calls upon the functions above.
def find_events(self):
# path
coord_df = self.bd.coord_df
# ends of path
self.a_ends = [coord_df.ra[0], coord_df.ra[len(coord_df.ra) - 1]]
self.d_ends = [coord_df.dec[0], coord_df.dec[len(coord_df.dec) - 1]]
# add to df holding all closest dwarf and background star pairs to call on later.
close_df = self.close_stars(a_ends = self.a_ends, d_ends = self.d_ends)
return close_df
##
# Name: theta_max_calc
#
# inputs: data from the stars
# outputs: theta_max for that individual star
#
# purpose: calculate theta max for a given parallax for each brown dwarf. So I can use to include any dwarf-background star
# pairs that are under the theta max for astrometric microlensing. I didn't want to miss any.
#
def theta_max_calc(self):
# get parallax and astrometric precision.
parallax = float(self.bd.bd_cut['pi']) / 1000 # arcseconds
astro_precision = 0.2 #cushings example
#constants
big_g = 4.3 * math.pow(10, -3) #pc * solar_mass^-1 * (km/s)^2
c_squared = 9 * math.pow(10, 10) #(km/s)^2
d_l = 1 / parallax #in parsecs
delta_ml = self.m_jup_prec * 9.548 * math.pow(10, -4) # masses of jupiter
k = 8.144 # mas/solar masses
#actual formula. Used k one because was easiest.
theta_max = (k * delta_ml * parallax) / (astro_precision)
return theta_max
##
# Name: delta_ml_calc
#
# inputs: brown dwarf data, a theta
# outputs: a delta_ml by Cushing's formula
#
# purpose: when I have called this function, I have the smallest theta between dwarfs and background stars for each
# dwarf. So I need to find the delta_ml to see how helpful microlensing would be.
#
def delta_ml_calc(self, theta):
#get parallax and astrometric precision
parallax = float(self.bd.bd_cut['pi']) / 1000
astro_precision = 0.2 #mas, cushing's example
k = 8.144 #mas/solar masses
#using helpful formula
delta_ml = (theta * astro_precision) / (k * parallax)
delta_ml = delta_ml / (9.548 * math.pow(10, -4)) #making into jupiter masses
return delta_ml
##
# Name: events_per_year_calc
#
# inputs: brown dwarf data, background stars
# outputs: number of events per year
#
# purpose: previously wanted to know how many events per year should be occuring. I used it as a check for my microlensing
# events output. I may use it again, so I kept it.
#
def events_per_year_calc(self):
#calculate the number of expected microlensing events per year for a given brown dwarf
k = 8.144 #mas/solar masses
#get parallax and astrometric precision
# convert parallax to arcseconds
parallax = float(self.bd.bd_cut['pi']) / 1000
astro_precision = 0.2 #cushing's example
mu_a = float(self.bd.bd_cut['mu_alpha']) / 1000
mu_d = float(self.bd.bd_cut['mu_delta']) / 1000
mu = math.sqrt((mu_a ** 2) + (mu_d ** 2))
#formula from Cushing et al. I have delta_ml and delta_ml2 for two different Ml's (cushing and ours). Subsequently,
#I also have two events per year. I last used our delta_ml, so I return our number
sigma = len(self.stars) / (np.pi * ((self.fields.n_arcmin * 60) ** 2)) #the surface density of stars per arcsecond^2 (#stars / area of view with radius 5 degrees)
delta_ml = self.m_jup_prec * 9.548 * math.pow(10, -4) #solar mass of jupiter
number = 2 * k * parallax * sigma * (delta_ml / astro_precision)
return number, sigma
##
# Name: add_to_close
#
# inputs: dataframe to add to, separation, time of separataion, index of bs, ra of bs, dec of bs, mass uncertainty.
# outputs: updated dataframe
#
# purpose: to add rows to self.close_dict. there are two cases where i want to add (smallest theta after going through all the stars
# and if theta < theta_min)
def add_to_close(self, close_df, object_name, sep, delta_m, bd_ra, bd_dec, ls_id, bs_ra, bs_dec, mag, time_of_min):
# set up dictionary and add to df
value_dict = {'object_name': object_name,
'sep': sep,
'delta_m': delta_m,
'bd_ra': bd_ra,
'bd_dec': bd_dec,
'ls_id': ls_id,
'bs_ra': bs_ra,
'bs_dec': bs_dec,
'mag': mag,
'time_of_min': time_of_min
}
return close_df.append(value_dict, ignore_index=True)
##
# Name: close_stars
#
# inputs: ends of brown dwarf (to cut the stars looked at for nearness to save time),
# outputs: dictionary of dwarf and background star pairs that make the cut of closest and/or under theta_max
#
# purpose: to run close_star_find function and keep on adding to the radius until close_star_find can find a star with the
# given radius. Basically just increase the radius unl not given back None.
#
def close_stars(self, a_ends, d_ends):
#continually increase the radius until hit more stars. makes sure get closest star in each while still keeping processing low
close_df = | pd.DataFrame() | pandas.DataFrame |
import pandas as pd
import sqlite3
from datetime import datetime
from sklearn.decomposition import LatentDirichletAllocation
from sklearn.feature_extraction.text import CountVectorizer
import Stemmer # From pip module PyStemmer
from sklearn.decomposition import LatentDirichletAllocation
from sklearn.model_selection import GridSearchCV
import numpy as np
from matplotlib import pyplot as plt
# Note: This file is based on the approach by <EMAIL> described here:
# https://investigate.ai/text-analysis/choosing-the-right-number-of-topics-for-a-scikit-learn-topic-model/
#
class GridSearchLDA:
def get_reviews(self, database):
print("Connecting to {} for reviews. Please be patient.".format(database))
cnx = sqlite3.connect(database)
reviews = pd.read_sql_query("select * from reviews", cnx)
cnx.close()
return reviews
def get_products(self, database):
print("Connecting to {} for products. Please be patient.".format(database))
cnx = sqlite3.connect(database)
products = | pd.read_sql_query("select asin, title, description from products", cnx) | pandas.read_sql_query |
from typing import Dict, List
from google.cloud import aiplatform
import numpy as np
import pandas as pd
from elasticsearch import Elasticsearch
from elasticsearch import helpers
import json
import time
from tensorflow.keras.preprocessing.text import Tokenizer
from tensorflow.keras.preprocessing.sequence import pad_sequences
# create a client instance of the library
elastic_client = Elasticsearch()
print('************************************')
#import and train word tokenizer
#import csv from github
# url = "https://raw.githubusercontent.com/Nawod/malicious_url_classifier_api/master/archive/url_train.csv"
# data = pd.read_csv(url)
data = pd.read_csv('archive/url_train.csv')
tokenizer = Tokenizer(num_words=10000, split=' ')
tokenizer.fit_on_texts(data['url'].values)
print('Tokenizer Loaded')
print('************************************')
#retrive elk value
def get_elk_nlp():
response = elastic_client.search(
index='nlp-log-1',
body={},
)
# print(type(response))
# nested inside the API response object
elastic_docs_nlp = response["hits"]["hits"]
traffics_n = elastic_docs_nlp
nlp_traffic = {}
#append data
for num, doc in enumerate(traffics_n):
traffic = doc["_source"]
for key, value in traffic.items():
if key == "@timestamp":
try:
nlp_traffic[key] = np.append(nlp_traffic[key], value)
except KeyError:
nlp_traffic[key] = np.array([value])
if key == "host":
try:
nlp_traffic[key] = np.append(nlp_traffic[key], value)
except KeyError:
nlp_traffic[key] = np.array([value])
if key == "uri":
try:
nlp_traffic[key] = np.append(nlp_traffic[key], value)
except KeyError:
nlp_traffic[key] = np.array([value])
return nlp_traffic
#text cleaning
def clean_text(df):
spec_chars = ["!",'"',"#","%","&","'","(",")",
"*","+",",","-",".","/",":",";","<",
"=",">","?","@","[","\\","]","^","_",
"`","{","|","}","~","–"]
for char in spec_chars:
df['url'] = df['url'].str.replace(char, ' ')
return df
#tokenize inputs
def token(text):
X = tokenizer.texts_to_sequences(pd.Series(text).values)
Y = pad_sequences(X, maxlen=200)
return Y
#vertex AI API call for get prediction
def predict_mal_url_api(
project: str ="511747508882",
endpoint_id: str ="2047431388407267328",
location: str = "us-central1",
instances: List = list
):
aiplatform.init(project=project, location=location)
endpoint = aiplatform.Endpoint(endpoint_id)
prediction = endpoint.predict(instances=instances)
return prediction
#malicious url classification
def url_predict(body):
#retive the data
input_data = json.loads(body)['data']
embeded_text = token(input_data) #tokenize the data
list = embeded_text.tolist()
response = predict_mal_url_api(instances=list) #call vertex AI API
prediction = response.predictions[0][0] #retrive data
# print('prediction: ', prediction)
#set appropriate sentiment
if prediction < 0.5:
t_sentiment = 'bad'
elif prediction >= 0.5:
t_sentiment = 'good'
return { #return the dictionary for endpoint
"Label" : t_sentiment
}
#nlp models prediciton
def nlp_model(df):
print('Malicious URLs classifing_#####')
#text pre processing
new_df = df
new_df['url'] = new_df['host'].astype(str).values + new_df['uri'].astype(str).values
new_df = clean_text(new_df)
#convert dataframe into a array
df_array = new_df[['url']].to_numpy()
# creating a blank series
label_array = pd.Series([])
for i in range(df_array.shape[0]):
# for i in range(0,10):
#create json requests
lists = df_array[i].tolist()
data = {'data':lists}
body = str.encode(json.dumps(data))
#call mal url function to classification
pred_url = url_predict(body)
#retrive the outputs
output = str.encode(json.dumps(pred_url))
label2 = json.loads(output)['Label']
#insert labels to series
label_array[i] = label2
#inserting new column with labels
df.insert(2, "url_label", label_array)
return df
#index key values for mal url output
mal_url_keys = [ "@timestamp","ID","host","uri","url_label"]
def nlpFilterKeys(document):
return {key: document[key] for key in mal_url_keys }
# es_client = Elasticsearch(http_compress=True)
es_client = Elasticsearch([{'host': 'localhost', 'port': 9200}])
def nlp_doc_generator(df):
df_iter = df.iterrows()
for index, document in df_iter:
yield {
"_index": 'nlp_output',
"_type": "_doc",
"_id" : f"{document['ID']}",
"_source": nlpFilterKeys(document),
}
#raise StopIteration
#main loop
def main():
count = 1
while True:
print('Batch :', count)
#retrive data and convert to dataframe
print('Retrive the data batch from ELK_#####')
net_traffic = get_elk_nlp()
elk_df_nlp = | pd.DataFrame(net_traffic) | pandas.DataFrame |
import pandas as pd
def split_dataframe_rows(df, column_selectors, separator):
'''
df = pandas dataframe to split,
column_selectors = the columns containing the values to split
separator = the symbol used to perform the split
returns: a dataframe with each entry for the target column separated, with each element moved into a new row.
The values in the other columns are duplicated across the newly divided rows.
'''
# we need to keep track of the ordering of the columns
def _split_list_to_rows(row, row_accumulator, column_selector, row_delimiter):
split_rows = {}
max_split = 0
for column_selector in column_selectors:
split_row = row[column_selector].split(row_delimiter)
split_rows[column_selector] = split_row
if len(split_row) > max_split:
max_split = len(split_row)
for i in range(max_split):
new_row = row.to_dict()
for column_selector in column_selectors:
try:
new_row[column_selector] = split_rows[column_selector].pop(0)
except IndexError:
new_row[column_selector] = ''
row_accumulator.append(new_row)
new_rows = []
df.apply(_split_list_to_rows, axis=1, args=(new_rows, column_selectors, separator))
new_df = | pd.DataFrame(new_rows, columns=df.columns) | pandas.DataFrame |
import os
from datetime import date
from dask.dataframe import DataFrame as DaskDataFrame
from numpy import nan, ndarray
from numpy.testing import assert_allclose, assert_array_equal
from pandas import DataFrame, Series, Timedelta, Timestamp
from pandas.testing import assert_frame_equal, assert_series_equal
from pymove import (
DaskMoveDataFrame,
MoveDataFrame,
PandasDiscreteMoveDataFrame,
PandasMoveDataFrame,
read_csv,
)
from pymove.core.grid import Grid
from pymove.utils.constants import (
DATE,
DATETIME,
DAY,
DIST_PREV_TO_NEXT,
DIST_TO_PREV,
HOUR,
HOUR_SIN,
LATITUDE,
LOCAL_LABEL,
LONGITUDE,
PERIOD,
SITUATION,
SPEED_PREV_TO_NEXT,
TID,
TIME_PREV_TO_NEXT,
TRAJ_ID,
TYPE_DASK,
TYPE_PANDAS,
UID,
WEEK_END,
)
list_data = [
[39.984094, 116.319236, '2008-10-23 05:53:05', 1],
[39.984198, 116.319322, '2008-10-23 05:53:06', 1],
[39.984224, 116.319402, '2008-10-23 05:53:11', 2],
[39.984224, 116.319402, '2008-10-23 05:53:11', 2],
]
str_data_default = """
lat,lon,datetime,id
39.984093,116.319236,2008-10-23 05:53:05,4
39.9842,116.319322,2008-10-23 05:53:06,1
39.984222,116.319402,2008-10-23 05:53:11,2
39.984222,116.319402,2008-10-23 05:53:11,2
"""
str_data_different = """
latitude,longitude,time,traj_id
39.984093,116.319236,2008-10-23 05:53:05,4
39.9842,116.319322,2008-10-23 05:53:06,1
39.984222,116.319402,2008-10-23 05:53:11,2
39.984222,116.319402,2008-10-23 05:53:11,2
"""
str_data_missing = """
39.984093,116.319236,2008-10-23 05:53:05,4
39.9842,116.319322,2008-10-23 05:53:06,1
39.984222,116.319402,2008-10-23 05:53:11,2
39.984222,116.319402,2008-10-23 05:53:11,2
"""
def _default_move_df():
return MoveDataFrame(
data=list_data,
latitude=LATITUDE,
longitude=LONGITUDE,
datetime=DATETIME,
traj_id=TRAJ_ID,
)
def _default_pandas_df():
return DataFrame(
data=[
[39.984094, 116.319236, Timestamp('2008-10-23 05:53:05'), 1],
[39.984198, 116.319322, Timestamp('2008-10-23 05:53:06'), 1],
[39.984224, 116.319402, Timestamp('2008-10-23 05:53:11'), 2],
[39.984224, 116.319402, Timestamp('2008-10-23 05:53:11'), 2],
],
columns=['lat', 'lon', 'datetime', 'id'],
index=[0, 1, 2, 3],
)
def test_move_data_frame_from_list():
move_df = _default_move_df()
assert MoveDataFrame.has_columns(move_df)
try:
MoveDataFrame.validate_move_data_frame(move_df)
except Exception:
assert False
assert isinstance(move_df, PandasMoveDataFrame)
def test_move_data_frame_from_file(tmpdir):
d = tmpdir.mkdir('core')
file_default_columns = d.join('test_read_default.csv')
file_default_columns.write(str_data_default)
filename_default = os.path.join(
file_default_columns.dirname, file_default_columns.basename
)
move_df = read_csv(filename_default)
assert MoveDataFrame.has_columns(move_df)
try:
MoveDataFrame.validate_move_data_frame(move_df)
except Exception:
assert False
assert isinstance(move_df, PandasMoveDataFrame)
file_different_columns = d.join('test_read_different.csv')
file_different_columns.write(str_data_different)
filename_diferent = os.path.join(
file_different_columns.dirname, file_different_columns.basename
)
move_df = read_csv(
filename_diferent,
latitude='latitude',
longitude='longitude',
datetime='time',
traj_id='traj_id',
)
assert MoveDataFrame.has_columns(move_df)
try:
MoveDataFrame.validate_move_data_frame(move_df)
except Exception:
assert False
assert isinstance(move_df, PandasMoveDataFrame)
file_missing_columns = d.join('test_read_missing.csv')
file_missing_columns.write(str_data_missing)
filename_missing = os.path.join(
file_missing_columns.dirname, file_missing_columns.basename
)
move_df = read_csv(
filename_missing, names=[LATITUDE, LONGITUDE, DATETIME, TRAJ_ID]
)
assert MoveDataFrame.has_columns(move_df)
try:
MoveDataFrame.validate_move_data_frame(move_df)
except Exception:
assert False
assert isinstance(move_df, PandasMoveDataFrame)
def test_move_data_frame_from_dict():
dict_data = {
LATITUDE: [39.984198, 39.984224, 39.984094],
LONGITUDE: [116.319402, 116.319322, 116.319402],
DATETIME: [
'2008-10-23 05:53:11',
'2008-10-23 05:53:06',
'2008-10-23 05:53:06',
],
}
move_df = MoveDataFrame(
data=dict_data,
latitude=LATITUDE,
longitude=LONGITUDE,
datetime=DATETIME,
traj_id=TRAJ_ID,
)
assert MoveDataFrame.has_columns(move_df)
try:
MoveDataFrame.validate_move_data_frame(move_df)
except Exception:
assert False
assert isinstance(move_df, PandasMoveDataFrame)
def test_move_data_frame_from_data_frame():
df = _default_pandas_df()
move_df = MoveDataFrame(
data=df, latitude=LATITUDE, longitude=LONGITUDE, datetime=DATETIME
)
assert MoveDataFrame.has_columns(move_df)
try:
MoveDataFrame.validate_move_data_frame(move_df)
except Exception:
assert False
assert isinstance(move_df, PandasMoveDataFrame)
def test_attribute_error_from_data_frame():
df = DataFrame(
data=[
[39.984094, 116.319236, Timestamp('2008-10-23 05:53:05'), 1],
[39.984198, 116.319322, Timestamp('2008-10-23 05:53:06'), 1],
[39.984224, 116.319402, Timestamp('2008-10-23 05:53:11'), 2],
[39.984224, 116.319402, Timestamp('2008-10-23 05:53:11'), 2],
],
columns=['laterr', 'lon', 'datetime', 'id'],
index=[0, 1, 2, 3],
)
try:
MoveDataFrame(
data=df, latitude=LATITUDE, longitude=LONGITUDE, datetime=DATETIME
)
raise AssertionError(
'AttributeError error not raised by MoveDataFrame'
)
except KeyError:
pass
df = DataFrame(
data=[
[39.984094, 116.319236, Timestamp('2008-10-23 05:53:05'), 1],
[39.984198, 116.319322, Timestamp('2008-10-23 05:53:06'), 1],
[39.984224, 116.319402, Timestamp('2008-10-23 05:53:11'), 2],
[39.984224, 116.319402, Timestamp('2008-10-23 05:53:11'), 2],
],
columns=['lat', 'lonerr', 'datetime', 'id'],
index=[0, 1, 2, 3],
)
try:
MoveDataFrame(
data=df, latitude=LATITUDE, longitude=LONGITUDE, datetime=DATETIME
)
raise AssertionError(
'AttributeError error not raised by MoveDataFrame'
)
except KeyError:
pass
df = DataFrame(
data=[
[39.984094, 116.319236, Timestamp('2008-10-23 05:53:05'), 1],
[39.984198, 116.319322, Timestamp('2008-10-23 05:53:06'), 1],
[39.984224, 116.319402, Timestamp('2008-10-23 05:53:11'), 2],
[39.984224, 116.319402, Timestamp('2008-10-23 05:53:11'), 2],
],
columns=['lat', 'lon', 'datetimerr', 'id'],
index=[0, 1, 2, 3],
)
try:
MoveDataFrame(
data=df, latitude=LATITUDE, longitude=LONGITUDE, datetime=DATETIME
)
raise AssertionError(
'AttributeError error not raised by MoveDataFrame'
)
except KeyError:
pass
def test_lat():
move_df = _default_move_df()
lat = move_df.lat
srs = Series(
data=[39.984094, 39.984198, 39.984224, 39.984224],
index=[0, 1, 2, 3],
dtype='float64',
name='lat',
)
assert_series_equal(lat, srs)
def test_lon():
move_df = _default_move_df()
lon = move_df.lon
srs = Series(
data=[116.319236, 116.319322, 116.319402, 116.319402],
index=[0, 1, 2, 3],
dtype='float64',
name='lon',
)
assert_series_equal(lon, srs)
def test_datetime():
move_df = _default_move_df()
datetime = move_df.datetime
srs = Series(
data=[
'2008-10-23 05:53:05',
'2008-10-23 05:53:06',
'2008-10-23 05:53:11',
'2008-10-23 05:53:11',
],
index=[0, 1, 2, 3],
dtype='datetime64[ns]',
name='datetime',
)
assert_series_equal(datetime, srs)
def test_loc():
move_df = _default_move_df()
assert move_df.loc[0, TRAJ_ID] == 1
loc_ = move_df.loc[move_df[LONGITUDE] > 116.319321]
expected = DataFrame(
data=[
[39.984198, 116.319322, Timestamp('2008-10-23 05:53:06'), 1],
[39.984224, 116.319402, Timestamp('2008-10-23 05:53:11'), 2],
[39.984224, 116.319402, Timestamp('2008-10-23 05:53:11'), 2],
],
columns=['lat', 'lon', 'datetime', 'id'],
index=[1, 2, 3],
)
assert_frame_equal(loc_, expected)
def test_iloc():
move_df = _default_move_df()
expected = Series(
data=[39.984094, 116.319236, Timestamp('2008-10-23 05:53:05'), 1],
index=['lat', 'lon', 'datetime', 'id'],
dtype='object',
name=0,
)
assert_series_equal(move_df.iloc[0], expected)
def test_at():
move_df = _default_move_df()
assert move_df.at[0, TRAJ_ID] == 1
def test_values():
move_df = _default_move_df()
expected = [
[39.984094, 116.319236, Timestamp('2008-10-23 05:53:05'), 1],
[39.984198, 116.319322, Timestamp('2008-10-23 05:53:06'), 1],
[39.984224, 116.319402, Timestamp('2008-10-23 05:53:11'), 2],
[39.984224, 116.319402, Timestamp('2008-10-23 05:53:11'), 2],
]
assert_array_equal(move_df.values, expected)
def test_columns():
move_df = _default_move_df()
assert_array_equal(
move_df.columns, [LATITUDE, LONGITUDE, DATETIME, TRAJ_ID]
)
def test_index():
move_df = _default_move_df()
assert_array_equal(move_df.index, [0, 1, 2, 3])
def test_dtypes():
move_df = _default_move_df()
expected = Series(
data=['float64', 'float64', '<M8[ns]', 'int64'],
index=['lat', 'lon', 'datetime', 'id'],
dtype='object',
name=None,
)
assert_series_equal(move_df.dtypes, expected)
def test_shape():
move_df = _default_move_df()
assert move_df.shape == (4, 4)
def test_len():
move_df = _default_move_df()
assert move_df.len() == 4
def test_unique():
move_df = _default_move_df()
assert_array_equal(move_df['id'].unique(), [1, 2])
def test_head():
move_df = _default_move_df()
expected = DataFrame(
data=[
[39.984094, 116.319236, Timestamp('2008-10-23 05:53:05'), 1],
[39.984198, 116.319322, Timestamp('2008-10-23 05:53:06'), 1],
],
columns=['lat', 'lon', 'datetime', 'id'],
index=[0, 1],
)
assert_frame_equal(move_df.head(2), expected)
def test_tail():
move_df = _default_move_df()
expected = DataFrame(
data=[
[39.984224, 116.319402, Timestamp('2008-10-23 05:53:11'), 2],
[39.984224, 116.319402, Timestamp('2008-10-23 05:53:11'), 2],
],
columns=['lat', 'lon', 'datetime', 'id'],
index=[2, 3],
)
assert_frame_equal(move_df.tail(2), expected)
def test_number_users():
move_df = MoveDataFrame(
data=list_data,
latitude=LATITUDE,
longitude=LONGITUDE,
datetime=DATETIME,
traj_id=TRAJ_ID,
)
assert move_df.get_users_number() == 1
move_df[UID] = [1, 1, 2, 3]
assert move_df.get_users_number() == 3
def test_to_numpy():
move_df = MoveDataFrame(
data=list_data,
latitude=LATITUDE,
longitude=LONGITUDE,
datetime=DATETIME,
traj_id=TRAJ_ID,
)
assert isinstance(move_df.to_numpy(), ndarray)
def test_to_dict():
move_df = MoveDataFrame(
data=list_data,
latitude=LATITUDE,
longitude=LONGITUDE,
datetime=DATETIME,
traj_id=TRAJ_ID,
)
assert isinstance(move_df.to_dict(), dict)
def test_to_grid():
move_df = MoveDataFrame(
data=list_data,
latitude=LATITUDE,
longitude=LONGITUDE,
datetime=DATETIME,
traj_id=TRAJ_ID,
)
g = move_df.to_grid(8)
assert isinstance(move_df.to_grid(8), Grid)
def test_to_data_frame():
move_df = MoveDataFrame(
data=list_data,
latitude=LATITUDE,
longitude=LONGITUDE,
datetime=DATETIME,
traj_id=TRAJ_ID,
)
assert isinstance(move_df.to_data_frame(), DataFrame)
def test_to_discrete_move_df():
move_df = PandasDiscreteMoveDataFrame(
data={DATETIME: ['2020-01-01 01:08:29',
'2020-01-05 01:13:24',
'2020-01-06 02:21:53',
'2020-01-06 03:34:48',
'2020-01-08 05:55:41'],
LATITUDE: [3.754245,
3.150849,
3.754249,
3.165933,
3.920178],
LONGITUDE: [38.3456743,
38.6913486,
38.3456743,
38.2715962,
38.5161605],
TRAJ_ID: ['pwe-5089',
'xjt-1579',
'tre-1890',
'xjt-1579',
'pwe-5089'],
LOCAL_LABEL: [1, 4, 2, 16, 32]},
)
assert isinstance(
move_df.to_dicrete_move_df(), PandasDiscreteMoveDataFrame
)
def test_describe():
move_df = _default_move_df()
expected = DataFrame(
data=[
[4.0, 4.0, 4.0],
[39.984185, 116.31934049999998, 1.5],
[6.189237971348586e-05, 7.921910543639078e-05, 0.5773502691896257],
[39.984094, 116.319236, 1.0],
[39.984172, 116.3193005, 1.0],
[39.984211, 116.319362, 1.5],
[39.984224, 116.319402, 2.0],
[39.984224, 116.319402, 2.0],
],
columns=['lat', 'lon', 'id'],
index=['count', 'mean', 'std', 'min', '25%', '50%', '75%', 'max'],
)
assert_frame_equal(move_df.describe(), expected)
def test_memory_usage():
move_df = _default_move_df()
expected = Series(
data=[128, 32, 32, 32, 32],
index=['Index', 'lat', 'lon', 'datetime', 'id'],
dtype='int64',
name=None,
)
assert_series_equal(move_df.memory_usage(), expected)
def test_copy():
move_df = _default_move_df()
cp = move_df.copy()
| assert_frame_equal(move_df, cp) | pandas.testing.assert_frame_equal |
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Sat Oct 27 17:08:08 2018
@author: Kazuki
"""
import numpy as np
import pandas as pd
import os, gc
from glob import glob
from tqdm import tqdm
import sys
sys.path.append(f'/home/{os.environ.get("USER")}/PythonLibrary')
import lgbextension as ex
import lightgbm as lgb
from multiprocessing import cpu_count
import utils
utils.start(__file__)
#==============================================================================
SUBMIT_FILE_PATH = '../output/1027-2.csv.gz'
COMMENT = '1026-1 + GalacticCut2'
EXE_SUBMIT = True
#DROP = ['f001_hostgal_photoz']
SEED = np.random.randint(9999)
np.random.seed(SEED)
print('SEED:', SEED)
NFOLD = 5
LOOP = 5
param = {
'objective': 'multiclass',
# 'num_class': 14,
'metric': 'multi_logloss',
'learning_rate': 0.01,
'max_depth': 6,
'num_leaves': 63,
'max_bin': 255,
'min_child_weight': 10,
'min_data_in_leaf': 150,
'reg_lambda': 0.5, # L2 regularization term on weights.
'reg_alpha': 0.5, # L1 regularization term on weights.
'colsample_bytree': 0.5,
'subsample': 0.5,
# 'nthread': 32,
'nthread': cpu_count(),
'bagging_freq': 1,
'verbose':-1,
}
# =============================================================================
# def
# =============================================================================
classes_gal = [6, 16, 53, 65, 92, 99]
class_weight_gal = {6: 1,
16: 1,
53: 1,
65: 1,
92: 1,
99: 2}
classes_exgal = [15, 42, 52, 62, 64, 67, 88, 90, 95, 99]
class_weight_exgal = {15: 2,
42: 1,
52: 1,
62: 1,
64: 2,
67: 1,
88: 1,
90: 1,
95: 1,
99: 2}
def lgb_multi_weighted_logloss_gal(y_preds, train_data):
"""
@author olivier https://www.kaggle.com/ogrellier
https://www.kaggle.com/ogrellier/plasticc-in-a-kernel-meta-and-data/code
multi logloss for PLAsTiCC challenge
"""
# class_weights taken from Giba's topic : https://www.kaggle.com/titericz
# https://www.kaggle.com/c/PLAsTiCC-2018/discussion/67194
# with Kyle Boone's post https://www.kaggle.com/kyleboone
y_true = train_data.get_label()
y_p = y_preds.reshape(y_true.shape[0], len(classes_gal), order='F')
# Trasform y_true in dummies
y_ohe = pd.get_dummies(y_true)
# Normalize rows and limit y_preds to 1e-15, 1-1e-15
y_p = np.clip(a=y_p, a_min=1e-15, a_max=1 - 1e-15)
# Transform to log
y_p_log = np.log(y_p)
# Get the log for ones, .values is used to drop the index of DataFrames
# Exclude class 99 for now, since there is no class99 in the training set
# we gave a special process for that class
y_log_ones = np.sum(y_ohe.values * y_p_log, axis=0)
# Get the number of positives for each class
nb_pos = y_ohe.sum(axis=0).values.astype(float)
# Weight average and divide by the number of positives
class_arr = np.array([class_weight_gal[k] for k in sorted(class_weight_gal.keys())])
y_w = y_log_ones * class_arr / nb_pos
loss = - np.sum(y_w) / np.sum(class_arr)
return 'wloss', loss, False
def lgb_multi_weighted_logloss_exgal(y_preds, train_data):
"""
@author olivier https://www.kaggle.com/ogrellier
https://www.kaggle.com/ogrellier/plasticc-in-a-kernel-meta-and-data/code
multi logloss for PLAsTiCC challenge
"""
# class_weights taken from Giba's topic : https://www.kaggle.com/titericz
# https://www.kaggle.com/c/PLAsTiCC-2018/discussion/67194
# with Kyle Boone's post https://www.kaggle.com/kyleboone
y_true = train_data.get_label()
y_p = y_preds.reshape(y_true.shape[0], len(classes_exgal), order='F')
# Trasform y_true in dummies
y_ohe = pd.get_dummies(y_true)
# Normalize rows and limit y_preds to 1e-15, 1-1e-15
y_p = np.clip(a=y_p, a_min=1e-15, a_max=1 - 1e-15)
# Transform to log
y_p_log = np.log(y_p)
# Get the log for ones, .values is used to drop the index of DataFrames
# Exclude class 99 for now, since there is no class99 in the training set
# we gave a special process for that class
y_log_ones = np.sum(y_ohe.values * y_p_log, axis=0)
# Get the number of positives for each class
nb_pos = y_ohe.sum(axis=0).values.astype(float)
# Weight average and divide by the number of positives
class_arr = np.array([class_weight_exgal[k] for k in sorted(class_weight_exgal.keys())])
y_w = y_log_ones * class_arr / nb_pos
loss = - np.sum(y_w) / np.sum(class_arr)
return 'wloss', loss, False
# =============================================================================
# load
# =============================================================================
files_tr = sorted(glob('../data/train_f*.pkl'))
[print(f) for f in files_tr]
X = pd.concat([
pd.read_pickle(f) for f in tqdm(files_tr, mininterval=60)
], axis=1)
y = utils.load_target().target
if X.columns.duplicated().sum()>0:
raise Exception(f'duplicated!: { X.columns[X.columns.duplicated()] }')
print('no dup :) ')
print(f'X.shape {X.shape}')
gc.collect()
# =============================================================================
# cv(galactic)
# =============================================================================
print('==== CV galactic ====')
y_gal = y.copy()
y_gal.loc[~y.isin(classes_gal)] = 99
target_dict_gal = {}
target_dict_r_gal = {}
for i,e in enumerate(y_gal.sort_values().unique()):
target_dict_gal[e] = i
target_dict_r_gal[i] = e
y_gal = y_gal.replace(target_dict_gal)
param['num_class'] = i+1
dtrain = lgb.Dataset(X, y_gal,
#categorical_feature=CAT,
free_raw_data=False)
gc.collect()
model_all = []
nround_mean = 0
wloss_list = []
for i in range(LOOP):
gc.collect()
param['seed'] = np.random.randint(9999)
ret, models = lgb.cv(param, dtrain, 99999, nfold=NFOLD,
feval=lgb_multi_weighted_logloss_gal,
early_stopping_rounds=100, verbose_eval=50,
seed=SEED)
model_all += models
nround_mean += len(ret['multi_logloss-mean'])
wloss_list.append( ret['wloss-mean'][-1] )
nround_mean = int((nround_mean/LOOP) * 1.3)
result = f"CV wloss: {np.mean(wloss_list)} + {np.std(wloss_list)}"
print(result)
imp = ex.getImp(model_all)
imp['split'] /= imp['split'].max()
imp['gain'] /= imp['gain'].max()
imp['total'] = imp['split'] + imp['gain']
imp.sort_values('total', ascending=False, inplace=True)
imp.reset_index(drop=True, inplace=True)
imp.to_csv(f'LOG/imp_{__file__}_gal.csv', index=False)
png = f'LOG/imp_{__file__}_gal.png'
utils.savefig_imp(imp, png, x='total', title=f'{__file__}_gal')
utils.send_line(result, png)
COL_gal = imp[imp.gain>0].feature.tolist()
# =============================================================================
# model(galactic)
# =============================================================================
dtrain = lgb.Dataset(X, y_gal,
#categorical_feature=CAT,
free_raw_data=False)
gc.collect()
np.random.seed(SEED)
model_all = []
for i in range(LOOP):
print('building', i)
gc.collect()
param['seed'] = np.random.randint(9999)
model = lgb.train(param, dtrain, num_boost_round=nround_mean, valid_sets=None,
valid_names=None, fobj=None, feval=None, init_model=None,
feature_name='auto', categorical_feature='auto',
early_stopping_rounds=None, evals_result=None,
verbose_eval=True, learning_rates=None,
keep_training_booster=False, callbacks=None)
model_all.append(model)
del dtrain; gc.collect()
model_all_gal = model_all
# =============================================================================
# cv(extragalactic)
# =============================================================================
print('==== CV extragalactic ====')
y_exgal = y.copy()
y_exgal.loc[~y.isin(classes_exgal)] = 99
target_dict_exgal = {}
target_dict_r_exgal = {}
for i,e in enumerate(y_exgal.sort_values().unique()):
target_dict_exgal[e] = i
target_dict_r_exgal[i] = e
y_exgal = y_exgal.replace(target_dict_exgal)
param['num_class'] = i+1
dtrain = lgb.Dataset(X, y_exgal, #categorical_feature=CAT,
free_raw_data=False)
gc.collect()
model_all = []
nround_mean = 0
wloss_list = []
for i in range(LOOP):
gc.collect()
param['seed'] = np.random.randint(9999)
ret, models = lgb.cv(param, dtrain, 99999, nfold=NFOLD,
feval=lgb_multi_weighted_logloss_exgal,
early_stopping_rounds=100, verbose_eval=50,
seed=SEED)
model_all += models
nround_mean += len(ret['multi_logloss-mean'])
wloss_list.append( ret['wloss-mean'][-1] )
nround_mean = int((nround_mean/LOOP) * 1.3)
result = f"CV wloss: {np.mean(wloss_list)} + {np.std(wloss_list)}"
print(result)
imp = ex.getImp(model_all)
imp['split'] /= imp['split'].max()
imp['gain'] /= imp['gain'].max()
imp['total'] = imp['split'] + imp['gain']
imp.sort_values('total', ascending=False, inplace=True)
imp.reset_index(drop=True, inplace=True)
imp.to_csv(f'LOG/imp_{__file__}_exgal.csv', index=False)
png = f'LOG/imp_{__file__}_exgal.png'
utils.savefig_imp(imp, png, x='total', title=f'{__file__}_exgal')
utils.send_line(result, png)
COL_exgal = imp[imp.gain>0].feature.tolist()
# =============================================================================
# model(extragalactic)
# =============================================================================
dtrain = lgb.Dataset(X, y_exgal, #categorical_feature=CAT,
free_raw_data=False)
gc.collect()
np.random.seed(SEED)
model_all = []
for i in range(LOOP):
print('building', i)
gc.collect()
param['seed'] = np.random.randint(9999)
model = lgb.train(param, dtrain, num_boost_round=nround_mean, valid_sets=None,
valid_names=None, fobj=None, feval=None, init_model=None,
feature_name='auto', categorical_feature='auto',
early_stopping_rounds=None, evals_result=None,
verbose_eval=True, learning_rates=None,
keep_training_booster=False, callbacks=None)
model_all.append(model)
model_all_exgal = model_all
del dtrain; gc.collect()
# =============================================================================
# test
# =============================================================================
files_te = sorted(glob('../data/test_f*.pkl'))
X_test = pd.concat([
pd.read_pickle(f) for f in tqdm(files_te, mininterval=60)
], axis=1)
X_test_gal = X_test[X_test['f001_hostgal_photoz'] == 0][COL_gal]
X_test_exgal = X_test[X_test['f001_hostgal_photoz'] != 0][COL_exgal]
del X_test; gc.collect()
# gal
for i,model in enumerate(tqdm(model_all_gal)):
y_pred = model.predict(X_test_gal)
if i==0:
y_pred_all_gal = y_pred
else:
y_pred_all_gal += y_pred
y_pred_all_gal /= len(model_all_gal)
y_pred_all_gal = | pd.DataFrame(y_pred_all_gal) | pandas.DataFrame |
import json
import requests as rq
import pandas as pd
def retrieve_fct(params:tuple):
''' Formats the met.no weather api 2.0 url with the coordinates (lat, lon and alt) and retrieves
the api connection.
Args:
params (tuple): Tuple containing (lat, lon, altitude)
Returns:
The api connection response object
'''
headers = {'User-Agent':'Mozilla/5.0 (Macintosh; Intel Mac OS X 10_11_5) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/50.0.2661.102 Safari/537.36'}
url = 'https://api.met.no/weatherapi/locationforecast/2.0/complete?lat={}&lon={}&altitude={}'
# Insert params in the url
r_url = url.format(*params.values())
# Request connect
r = rq.get(r_url, headers = headers)
return r
def save_json(r, filename:str):
''' Writes the JSON file with forecasts (RAW).
Args:
r (object): requests.models.Response, The api connection response object, returned by retrieve_fct().
filename (str): name of the json forecast file to be saved, generally "place.json".
'''
# Read forecast as json
data = r.json()
# Writes forecast as Json
with open(filename, 'w', encoding='utf-8') as f:
json.dump(data, f, ensure_ascii=False, indent=4)
def open_json_file(filename:str):
''' Reads and returns a JSON file.
Args:
filename (str): name of the json forecast file to be read, generally "place.json".
Returns:
data (dict): parsed JSON file in as a dict object.
'''
with open(filename) as json_file:
data = json.load(json_file)
return data
def create_df_from_JSON(data_file):
''' Selects relevant information ['air_temperature', 'cloud_area_fraction', 'relative_humidity']
from JSON and returns a pandas dataframe.
Args:
data_file (dict): parsed JSON file in as a dict object. In it there are the forecasts of an specific location.
Returns:
df (pandas.DataFrame): DF with the columns ['date', 'time', 'air_temperature', 'cloud_area_fraction',
'relative_humidity']
'''
# Generate a list of timepoints
timepoints = data_file['properties']['timeseries']
# Create a pandas df with instant timepoints
keys = ['air_temperature', 'cloud_area_fraction', 'relative_humidity']
df = pd.DataFrame(columns=['date','time']+keys)
# Iterate through timepoints in order to retrieve details
for tpt in timepoints:
# Format datetime
datetime = tpt['time'].replace('Z','').split('T')
# Create list of datetime and details (air temperature, cloud area fraction and relative humidity)
details = [tpt['data']['instant']['details'][i] for i in keys]
data = [datetime + list(details)]
# Try to append the timepoint to the dataframe, otherwise print the error
try:
df_tmp = | pd.DataFrame(data = data,columns=['date','time']+keys) | pandas.DataFrame |
# coding: utf-8
import pandas as pd
import numpy as np
# load the dataset
papers_data = pd.read_csv('../../../Downloads/papers.csv')
# removing data which doesn't contain abstract
papers = papers_data.replace('Abstract Missing', "NaN")
dataset = papers.dropna()
dataset.index = np.arange(0, len(dataset))
""" Preliminary text exploration """
# Fetch word count for each abstract
dataset['word_count'] = dataset["abstract"].apply(lambda x: len(str(x).split(" ")))
# Descriptive statistics of word counts
dataset['word_count'].describe()
# Identify common words
common_words = pd.Series(''.join(dataset['abstract']).split()).value_counts()
# Identify uncommon words
uncommon_words = pd.Series(''.join(dataset['abstract']).split()).value_counts()[-20:]
""" Text Pre-processing """
# --- objective ----
# text clean-up
# shrinking the vocab to retaining only important world
# reduce sparsity
# --- task ----
# noise reduction
# normalization 1) stemming 2) lemmetization
import re
from nltk.corpus import stopwords
from nltk.stem.porter import PorterStemmer
from nltk.stem.wordnet import WordNetLemmatizer
from nltk.tokenize import regexp_tokenize
stem = PorterStemmer()
lem = WordNetLemmatizer()
# word = "dries"
# print("stemming:", stem.stem(word))
# print("lemmatisation:", lem.lemmatize(word, "v"))
# removing stopwords
stop_words = set(stopwords.words("english"))
# list of custom stop words: common words frequently occurs 1000 times
custom_words = list(common_words.index[common_words > 1000])
# combined stopwords
stop_words = stop_words.union(custom_words)
# cleaning and normalizing text corpus of data
corpus = []
dataset_length = dataset.__len__()
for i in range(0, dataset_length):
# remove punctuation
text = re.sub('[^a-zA-Z]', ' ', dataset['abstract'][i])
# convert to lowercase
text = text.lower()
# remove tags
text = re.sub("</?.*?>", " <> ", text)
# remove special characters and digits
text = re.sub("(\\d|\\W)+", " ", text)
## convert to list from string
text = text.split()
## stemming and lemmetization
text = [lem.lemmatize(word) for word in text if not word in stop_words]
text = " ".join(text)
corpus.append(text)
""" Data Exploration """
# word count
from os import path
from PIL import Image
from wordcloud import WordCloud, STOPWORDS, ImageColorGenerator
import matplotlib.pyplot as plt
wordcloud = WordCloud(background_color="white", stopwords=stop_words, max_words=100,
max_font_size=50, random_state=42).generate(str(corpus))
fig = plt.figure(1)
plt.imshow(wordcloud)
plt.axis('off')
plt.show()
fig.savefig("nips_word.png", dpi=900)
""" Text preparation """
# bag of word approach: considers word frequencies
from sklearn.feature_extraction.text import CountVectorizer
import re
cv = CountVectorizer(max_df=0.8, stop_words=stop_words, max_features=10000, ngram_range=(1,3))
X = cv.fit_transform(corpus)
# length of top 10 vocabulary
# list(cv.vocabulary_.keys())[:10]
""" Visualize top N uni-grams, bi-grams & tri-grams """
# Most frequently occurring words
def get_top_n_words(corpus, n=None, ngram=1):
vec = CountVectorizer(ngram_range=(ngram, ngram), max_features=2000).fit(corpus)
bag_of_words = vec.transform(corpus)
sum_words = bag_of_words.sum(axis=0)
words_freq = [(word, sum_words[0, idx]) for word, idx in vec.vocabulary_.items()]
words_freq = sorted(words_freq, key = lambda x: x[1], reverse=True)
return words_freq[:n]
## uni-gram
# Convert most freq words to dataframe for plotting bar plot
top_words = get_top_n_words(corpus, n=20)
top_df = pd.DataFrame(top_words)
top_df.columns=["Word", "Freq"]
# bar plot of most freq words
import seaborn as sns
sns.set(rc={'figure.figsize': (13,8)})
g = sns.barplot(x="Word", y="Freq", data=top_df)
g.set_xticklabels(g.get_xticklabels(), rotation=30)
## bi-gram
# Convert most freq words to dataframe for plotting bar plot
top2_words = get_top_n_words(corpus, n=20, ngram=2)
top2_df = pd.DataFrame(top2_words)
top2_df.columns=["Bi-gram", "Freq"]
# bar plot of most freq words
import seaborn as sns
sns.set(rc={'figure.figsize': (13,8)})
h = sns.barplot(x="Bi-gram", y="Freq", data=top2_df)
h.set_xticklabels(g.get_xticklabels(), rotation=45)
## tri-gram
# Convert most freq words to dataframe for plotting bar plot
top3_words = get_top_n_words(corpus, n=20, ngram=3)
top3_df = | pd.DataFrame(top3_words) | pandas.DataFrame |
import os
import pandas as pd
import numpy
tweets_path = './data/twitter_data/'
processed_path = './data/processed_data/'
files = os.listdir(tweets_path)
def process(store, category):
df = store[category]
df = resample(df)
df = calculate_polarity(df)
return df
def resample(df):
df.index = [(t//3600) * 3600 for t in df.index]
df = df.groupby(df.index).mean()
return df
def calculate_polarity(df):
df['pol'] = numpy.sqrt(df['pos'] * df['neg'])
return df
def save_data(df, category):
store = | pd.HDFStore(processed_path + category + '.h5') | pandas.HDFStore |
import pytest
import numpy as np
import pandas as pd
from cobra.preprocessing import CategoricalDataProcessor
class TestCategoricalDataProcessor:
def test_attributes_to_dict(self):
processor = CategoricalDataProcessor()
cleaned_categories = ["a", "b", "c"]
processor._cleaned_categories_by_column = {
"variable": set(cleaned_categories)
}
actual = processor.attributes_to_dict()
expected = {
"model_type": "classification",
"regroup": True,
"regroup_name": "Other",
"keep_missing": True,
"category_size_threshold": 5,
"p_value_threshold": 0.001,
"scale_contingency_table": True,
"forced_categories": {},
"_cleaned_categories_by_column": {
"variable": list(set(cleaned_categories))
}
}
assert actual == expected
@pytest.mark.parametrize("attribute",
["regroup", "regroup_name", "keep_missing",
"category_size_threshold", "p_value_threshold",
"scale_contingency_table", "forced_categories",
"_cleaned_categories_by_column"])
def test_set_attributes_from_dict(self, attribute):
processor = CategoricalDataProcessor()
cleaned_categories = ["a", "b", "c"]
params = {
"regroup": True,
"regroup_name": "Other",
"keep_missing": True,
"category_size_threshold": 5,
"p_value_threshold": 0.001,
"scale_contingency_table": True,
"forced_categories": {},
"_cleaned_categories_by_column": {
"variable": cleaned_categories
}
}
expected = params[attribute]
if attribute == "_cleaned_categories_by_column":
# list is transformed to a set in CategoricalDataProcessor
expected = {"variable": set(cleaned_categories)}
processor.set_attributes_from_dict(params)
actual = getattr(processor, attribute)
assert actual == expected
@pytest.mark.parametrize("scale_contingency_table, expected",
[(False, 0.01329),
(True, 0.43437)])
def test_compute_p_value_classification(self, scale_contingency_table, expected):
X = pd.Series(data=(["c1"]*70 + ["c2"]*20 + ["c3"]*10))
y = pd.Series(data=([0]*35 + [1]*35 + [0]*15 + [1]*5 + [0]*8 + [1]*2))
category = "c1"
actual = (CategoricalDataProcessor
._compute_p_value(X, y, category, "classification", scale_contingency_table))
assert pytest.approx(actual, abs=1e-5) == expected
@pytest.mark.parametrize("seed, expected",
[(505, 0.02222),
(603, 0.89230)])
def test_compute_p_value_regression(self, seed, expected):
np.random.seed(seed)
X = pd.Series(data=(["c1"]*70 + ["c2"]*20 + ["c3"]*10))
y = pd.Series(data=np.random.uniform(0, 1, 100)*5)
category = "c1"
actual = (CategoricalDataProcessor
._compute_p_value(X, y, category, "regression", None))
assert pytest.approx(actual, abs=1e-5) == expected
def test_get_small_categories(self):
data = pd.Series(data=(["c1"]*50 + ["c2"]*25 + ["c3"]*15 + ["c4"]*5))
incidence = 0.35
threshold = 10 # to make it easy to manualLy compute
expected = {"c3", "c4"}
actual = (CategoricalDataProcessor
._get_small_categories(data, incidence, threshold))
assert actual == expected
def test_replace_missings(self):
data = pd.DataFrame({"variable": ["c1", "c2", np.nan, "", " "]})
expected = pd.DataFrame({"variable": ["c1", "c2", "Missing", "Missing",
"Missing"]
})
actual = (CategoricalDataProcessor
._replace_missings(data, ["variable"]))
pd.testing.assert_frame_equal(actual, expected)
@pytest.mark.parametrize("cleaned_categories, expected",
[({"c1", "c2"},
pd.Series(data=["c1", "c2", "Other", "Other"])),
({"c1", "c2", "c3", "c4"},
pd.Series(data=["c1", "c2", "c3", "c4"]))])
def test_replace_categories(self, cleaned_categories, expected):
data = pd.Series(data=["c1", "c2", "c3", "c4"])
actual = (CategoricalDataProcessor
._replace_categories(data, cleaned_categories, 'Other'))
pd.testing.assert_series_equal(actual, expected)
def test_all_cats_not_significant(self):
# Expected
e = {'categorical_var': ['A', 'A', 'A', 'A',
'B', 'B', 'B', 'B',
'C', 'C', 'C', 'C'],
'target': [1, 1, 1, 1,
0, 0, 0, 0,
1, 0, 1, 0],
'categorical_var_processed': ['A', 'A', 'A', 'A',
'B', 'B', 'B', 'B',
'C', 'C', 'C', 'C']}
# data -> actual
d = {'categorical_var': ['A', 'A', 'A', 'A',
'B', 'B', 'B', 'B',
'C', 'C', 'C', 'C'],
'target': [1, 1, 1, 1,
0, 0, 0, 0,
1, 0, 1, 0]}
discrete_vars = ['categorical_var']
target_column_name = 'target'
data = pd.DataFrame(d, columns=['categorical_var', 'target'])
expected = pd.DataFrame(e, columns=['categorical_var',
'target',
'categorical_var_processed'])
categorical_data_processor = CategoricalDataProcessor(
category_size_threshold=0,
p_value_threshold=0.0001)
categorical_data_processor.fit(data,
discrete_vars,
target_column_name)
actual = categorical_data_processor.transform(data,
discrete_vars)
| pd.testing.assert_frame_equal(actual, expected) | pandas.testing.assert_frame_equal |
import pandas as pd
import glob as glob
# **Introduction**
# <NAME>
#
# The dataset from MS Birka Stockholm is in .xls Excel-97 format.
# And the data was gathered in several steps during three different trips.
# Some of the data is overlapping in time-index, and same headers (data points) exist in several files.
# So to be able to filter and consolidate all the data it must be done in several steps.
# As the Excel-97 format is limited in 65k rows and also a limited amount of columns it was needed to
# divide into several files.
#
# Some of the data is in Boolean format, and some have data-points missing but
# the majority should be in numerical format.
#
# In all Excel-files the meta data of each data-point (header) is in the first 14 rows.
# The first step is to make a pre-processing of the .xls files, and filter out non uni-code characters,
# put in a split character between the meta-data and joining everything in the data header.
# Still keeping the index in time-series format.
#
# In[7]:
csv_data_path = '/Users/fredde/Database/csv-1year/'
xls_data_path = '/Users/fredde/Database/data-files/1year/'
database_path = '/Users/fredde/Database/'
xlsfiles = glob.glob(xls_data_path + '*.xls')
print(xlsfiles)
df = pd.DataFrame()
all_data = | pd.DataFrame() | pandas.DataFrame |
#
# Copyright (C) 2019 Databricks, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
from distutils.version import LooseVersion
import inspect
import numpy as np
import pandas as pd
import pyspark
import databricks.koalas as ks
from databricks.koalas.exceptions import PandasNotImplementedError
from databricks.koalas.missing.indexes import _MissingPandasLikeIndex, _MissingPandasLikeMultiIndex
from databricks.koalas.testing.utils import ReusedSQLTestCase, TestUtils
class IndexesTest(ReusedSQLTestCase, TestUtils):
@property
def pdf(self):
return pd.DataFrame({
'a': [1, 2, 3, 4, 5, 6, 7, 8, 9],
'b': [4, 5, 6, 3, 2, 1, 0, 0, 0],
}, index=[0, 1, 3, 5, 6, 8, 9, 9, 9])
@property
def kdf(self):
return ks.from_pandas(self.pdf)
def test_index(self):
for pdf in [pd.DataFrame(np.random.randn(10, 5), index=list('abcdefghij')),
pd.DataFrame(np.random.randn(10, 5),
index=pd.date_range('2011-01-01', freq='D', periods=10)),
pd.DataFrame(np.random.randn(10, 5),
columns=list('abcde')).set_index(['a', 'b'])]:
kdf = ks.from_pandas(pdf)
self.assert_eq(kdf.index, pdf.index)
def test_index_getattr(self):
kidx = self.kdf.index
item = 'databricks'
expected_error_message = ("'Index' object has no attribute '{}'".format(item))
with self.assertRaisesRegex(AttributeError, expected_error_message):
kidx.__getattr__(item)
def test_multi_index_getattr(self):
arrays = [[1, 1, 2, 2], ['red', 'blue', 'red', 'blue']]
idx = pd.MultiIndex.from_arrays(arrays, names=('number', 'color'))
pdf = pd.DataFrame(np.random.randn(4, 5), idx)
kdf = ks.from_pandas(pdf)
kidx = kdf.index
item = 'databricks'
expected_error_message = ("'MultiIndex' object has no attribute '{}'".format(item))
with self.assertRaisesRegex(AttributeError, expected_error_message):
kidx.__getattr__(item)
def test_to_series(self):
pidx = self.pdf.index
kidx = self.kdf.index
self.assert_eq(kidx.to_series(), pidx.to_series())
self.assert_eq(kidx.to_series(name='a'), pidx.to_series(name='a'))
# FIXME: the index values are not addressed the change. (#1190)
# self.assert_eq((kidx + 1).to_series(), (pidx + 1).to_series())
pidx = self.pdf.set_index('b', append=True).index
kidx = self.kdf.set_index('b', append=True).index
with self.sql_conf({'spark.sql.execution.arrow.enabled': False}):
self.assert_eq(kidx.to_series(), pidx.to_series())
self.assert_eq(kidx.to_series(name='a'), pidx.to_series(name='a'))
def test_to_frame(self):
pidx = self.pdf.index
kidx = self.kdf.index
self.assert_eq(repr(kidx.to_frame()), repr(pidx.to_frame()))
self.assert_eq(repr(kidx.to_frame(index=False)), repr(pidx.to_frame(index=False)))
pidx.name = 'a'
kidx.name = 'a'
self.assert_eq(repr(kidx.to_frame()), repr(pidx.to_frame()))
self.assert_eq(repr(kidx.to_frame(index=False)), repr(pidx.to_frame(index=False)))
if LooseVersion(pd.__version__) >= LooseVersion('0.24'):
# The `name` argument is added in pandas 0.24.
self.assert_eq(repr(kidx.to_frame(name='x')), repr(pidx.to_frame(name='x')))
self.assert_eq(repr(kidx.to_frame(index=False, name='x')),
repr(pidx.to_frame(index=False, name='x')))
pidx = self.pdf.set_index('b', append=True).index
kidx = self.kdf.set_index('b', append=True).index
self.assert_eq(repr(kidx.to_frame()), repr(pidx.to_frame()))
self.assert_eq(repr(kidx.to_frame(index=False)), repr(pidx.to_frame(index=False)))
if LooseVersion(pd.__version__) >= LooseVersion('0.24'):
# The `name` argument is added in pandas 0.24.
self.assert_eq(repr(kidx.to_frame(name=['x', 'y'])),
repr(pidx.to_frame(name=['x', 'y'])))
self.assert_eq(repr(kidx.to_frame(index=False, name=['x', 'y'])),
repr(pidx.to_frame(index=False, name=['x', 'y'])))
def test_index_names(self):
kdf = self.kdf
self.assertIsNone(kdf.index.name)
idx = pd.Index([0, 1, 2, 3, 4, 5, 6, 7, 8, 9], name='x')
pdf = pd.DataFrame(np.random.randn(10, 5), idx)
kdf = ks.from_pandas(pdf)
self.assertEqual(kdf.index.name, pdf.index.name)
self.assertEqual(kdf.index.names, pdf.index.names)
pidx = pdf.index
kidx = kdf.index
pidx.name = 'renamed'
kidx.name = 'renamed'
self.assertEqual(kidx.name, pidx.name)
self.assertEqual(kidx.names, pidx.names)
self.assert_eq(kidx, pidx)
pidx.name = None
kidx.name = None
self.assertEqual(kidx.name, pidx.name)
self.assertEqual(kidx.names, pidx.names)
self.assert_eq(kidx, pidx)
with self.assertRaisesRegex(ValueError, "Names must be a list-like"):
kidx.names = 'hi'
expected_error_message = ("Length of new names must be {}, got {}"
.format(len(kdf._internal.index_map), len(['0', '1'])))
with self.assertRaisesRegex(ValueError, expected_error_message):
kidx.names = ['0', '1']
def test_multi_index_names(self):
arrays = [[1, 1, 2, 2], ['red', 'blue', 'red', 'blue']]
idx = pd.MultiIndex.from_arrays(arrays, names=('number', 'color'))
pdf = pd.DataFrame(np.random.randn(4, 5), idx)
kdf = ks.from_pandas(pdf)
self.assertEqual(kdf.index.names, pdf.index.names)
pidx = pdf.index
kidx = kdf.index
pidx.names = ['renamed_number', 'renamed_color']
kidx.names = ['renamed_number', 'renamed_color']
self.assertEqual(kidx.names, pidx.names)
pidx.names = ['renamed_number', None]
kidx.names = ['renamed_number', None]
self.assertEqual(kidx.names, pidx.names)
if LooseVersion(pyspark.__version__) < LooseVersion('2.4'):
# PySpark < 2.4 does not support struct type with arrow enabled.
with self.sql_conf({'spark.sql.execution.arrow.enabled': False}):
self.assert_eq(kidx, pidx)
else:
self.assert_eq(kidx, pidx)
with self.assertRaises(PandasNotImplementedError):
kidx.name
with self.assertRaises(PandasNotImplementedError):
kidx.name = 'renamed'
def test_index_rename(self):
pdf = pd.DataFrame(np.random.randn(10, 5),
index=pd.Index([0, 1, 2, 3, 4, 5, 6, 7, 8, 9], name='x'))
kdf = ks.from_pandas(pdf)
pidx = pdf.index
kidx = kdf.index
self.assert_eq(kidx.rename('y'), pidx.rename('y'))
self.assert_eq(kdf.index.names, pdf.index.names)
kidx.rename('z', inplace=True)
pidx.rename('z', inplace=True)
self.assert_eq(kidx, pidx)
self.assert_eq(kdf.index.names, pdf.index.names)
self.assert_eq(kidx.rename(None), pidx.rename(None))
self.assert_eq(kdf.index.names, pdf.index.names)
def test_multi_index_rename(self):
arrays = [[1, 1, 2, 2], ['red', 'blue', 'red', 'blue']]
idx = pd.MultiIndex.from_arrays(arrays, names=('number', 'color'))
pdf = pd.DataFrame(np.random.randn(4, 5), idx)
kdf = ks.from_pandas(pdf)
pmidx = pdf.index
kmidx = kdf.index
self.assert_eq(kmidx.rename(['n', 'c']), pmidx.rename(['n', 'c']))
self.assert_eq(kdf.index.names, pdf.index.names)
kmidx.rename(['num', 'col'], inplace=True)
pmidx.rename(['num', 'col'], inplace=True)
self.assert_eq(kmidx, pmidx)
self.assert_eq(kdf.index.names, pdf.index.names)
self.assert_eq(kmidx.rename([None, None]), pmidx.rename([None, None]))
self.assert_eq(kdf.index.names, pdf.index.names)
self.assertRaises(TypeError, lambda: kmidx.rename('number'))
self.assertRaises(ValueError, lambda: kmidx.rename(['number']))
def test_multi_index_levshape(self):
pidx = pd.MultiIndex.from_tuples([('a', 'x', 1), ('b', 'y', 2)])
kidx = ks.MultiIndex.from_tuples([('a', 'x', 1), ('b', 'y', 2)])
self.assertEqual(pidx.levshape, kidx.levshape)
def test_index_unique(self):
kidx = self.kdf.index
# here the output is different than pandas in terms of order
expected = [0, 1, 3, 5, 6, 8, 9]
self.assert_eq(expected, sorted(kidx.unique().to_pandas()))
self.assert_eq(expected, sorted(kidx.unique(level=0).to_pandas()))
expected = [1, 2, 4, 6, 7, 9, 10]
self.assert_eq(expected, sorted((kidx + 1).unique().to_pandas()))
with self.assertRaisesRegexp(IndexError, "Too many levels*"):
kidx.unique(level=1)
with self.assertRaisesRegexp(KeyError, "Requested level (hi)*"):
kidx.unique(level='hi')
def test_multi_index_copy(self):
arrays = [[1, 1, 2, 2], ['red', 'blue', 'red', 'blue']]
idx = pd.MultiIndex.from_arrays(arrays, names=('number', 'color'))
pdf = pd.DataFrame(np.random.randn(4, 5), idx)
kdf = ks.from_pandas(pdf)
self.assert_eq(kdf.index.copy(), pdf.index.copy())
def test_index_symmetric_difference(self):
idx = ks.Index(['a', 'b', 'c'])
midx = ks.MultiIndex.from_tuples([('a', 'x'), ('b', 'y'), ('c', 'z')])
with self.assertRaisesRegexp(NotImplementedError, "Doesn't support*"):
idx.symmetric_difference(midx)
def test_multi_index_symmetric_difference(self):
idx = ks.Index(['a', 'b', 'c'])
midx = ks.MultiIndex.from_tuples([('a', 'x'), ('b', 'y'), ('c', 'z')])
midx_ = ks.MultiIndex.from_tuples([('a', 'x'), ('b', 'y'), ('c', 'z')])
self.assert_eq(
midx.symmetric_difference(midx_),
midx.to_pandas().symmetric_difference(midx_.to_pandas()))
with self.assertRaisesRegexp(NotImplementedError, "Doesn't support*"):
midx.symmetric_difference(idx)
def test_missing(self):
kdf = ks.DataFrame({'a': [1, 2, 3], 'b': [4, 5, 6], 'c': [7, 8, 9]})
# Index functions
missing_functions = inspect.getmembers(_MissingPandasLikeIndex, inspect.isfunction)
unsupported_functions = [name for (name, type_) in missing_functions
if type_.__name__ == 'unsupported_function']
for name in unsupported_functions:
with self.assertRaisesRegex(
PandasNotImplementedError,
"method.*Index.*{}.*not implemented( yet\\.|\\. .+)".format(name)):
getattr(kdf.set_index('a').index, name)()
deprecated_functions = [name for (name, type_) in missing_functions
if type_.__name__ == 'deprecated_function']
for name in deprecated_functions:
with self.assertRaisesRegex(PandasNotImplementedError,
"method.*Index.*{}.*is deprecated".format(name)):
getattr(kdf.set_index('a').index, name)()
# MultiIndex functions
missing_functions = inspect.getmembers(_MissingPandasLikeMultiIndex, inspect.isfunction)
unsupported_functions = [name for (name, type_) in missing_functions
if type_.__name__ == 'unsupported_function']
for name in unsupported_functions:
with self.assertRaisesRegex(
PandasNotImplementedError,
"method.*Index.*{}.*not implemented( yet\\.|\\. .+)".format(name)):
getattr(kdf.set_index(['a', 'b']).index, name)()
deprecated_functions = [name for (name, type_) in missing_functions
if type_.__name__ == 'deprecated_function']
for name in deprecated_functions:
with self.assertRaisesRegex(PandasNotImplementedError,
"method.*Index.*{}.*is deprecated".format(name)):
getattr(kdf.set_index(['a', 'b']).index, name)()
# Index properties
missing_properties = inspect.getmembers(_MissingPandasLikeIndex,
lambda o: isinstance(o, property))
unsupported_properties = [name for (name, type_) in missing_properties
if type_.fget.__name__ == 'unsupported_property']
for name in unsupported_properties:
with self.assertRaisesRegex(
PandasNotImplementedError,
"property.*Index.*{}.*not implemented( yet\\.|\\. .+)".format(name)):
getattr(kdf.set_index('a').index, name)
deprecated_properties = [name for (name, type_) in missing_properties
if type_.fget.__name__ == 'deprecated_property']
for name in deprecated_properties:
with self.assertRaisesRegex(PandasNotImplementedError,
"property.*Index.*{}.*is deprecated".format(name)):
getattr(kdf.set_index('a').index, name)
# MultiIndex properties
missing_properties = inspect.getmembers(_MissingPandasLikeMultiIndex,
lambda o: isinstance(o, property))
unsupported_properties = [name for (name, type_) in missing_properties
if type_.fget.__name__ == 'unsupported_property']
for name in unsupported_properties:
with self.assertRaisesRegex(
PandasNotImplementedError,
"property.*Index.*{}.*not implemented( yet\\.|\\. .+)".format(name)):
getattr(kdf.set_index(['a', 'b']).index, name)
deprecated_properties = [name for (name, type_) in missing_properties
if type_.fget.__name__ == 'deprecated_property']
for name in deprecated_properties:
with self.assertRaisesRegex(PandasNotImplementedError,
"property.*Index.*{}.*is deprecated".format(name)):
getattr(kdf.set_index(['a', 'b']).index, name)
def test_index_has_duplicates(self):
indexes = [("a", "b", "c"), ("a", "a", "c"), (1, 3, 3), (1, 2, 3)]
names = [None, 'ks', 'ks', None]
has_dup = [False, True, True, False]
for idx, name, expected in zip(indexes, names, has_dup):
pdf = pd.DataFrame({"a": [1, 2, 3]}, index=pd.Index(idx, name=name))
kdf = ks.from_pandas(pdf)
self.assertEqual(kdf.index.has_duplicates, expected)
def test_multiindex_has_duplicates(self):
indexes = [[list("abc"), list("edf")], [list("aac"), list("edf")],
[list("aac"), list("eef")], [[1, 4, 4], [4, 6, 6]]]
has_dup = [False, False, True, True]
for idx, expected in zip(indexes, has_dup):
pdf = pd.DataFrame({"a": [1, 2, 3]}, index=idx)
kdf = ks.from_pandas(pdf)
self.assertEqual(kdf.index.has_duplicates, expected)
def test_multi_index_not_supported(self):
kdf = ks.DataFrame({'a': [1, 2, 3], 'b': [4, 5, 6], 'c': [7, 8, 9]})
with self.assertRaisesRegex(TypeError,
"cannot perform any with this index type"):
kdf.set_index(['a', 'b']).index.any()
with self.assertRaisesRegex(TypeError,
"cannot perform all with this index type"):
kdf.set_index(['a', 'b']).index.all()
def test_index_nlevels(self):
pdf = pd.DataFrame({"a": [1, 2, 3]}, index= | pd.Index(['a', 'b', 'c']) | pandas.Index |
# -*- coding: utf-8 -*-
#%%
import pandas as pd
import numpy as np
import tensorflow as tf
from sklearn.datasets import load_iris
#%%
iris = load_iris()
col_X = ['SepalLength', 'SepalWidth', 'PetalLength', 'PetalWidth']
col_Y = ['Species']
iris_X = pd.DataFrame(iris.data, columns=col_X)
iris_Y = pd.DataFrame(iris.target, columns=col_Y)
iris_data = pd.concat([iris_X, iris_Y], axis=1)
#%%
###Dataset Separation###
def BuildDataSet(data, X, Y, frac, keys = None) :
if keys != None :
data.index = data[keys]
X_train = | pd.DataFrame() | pandas.DataFrame |
from collections import (
Counter,
) # Si conta la ricorrenza per ogni parola all'interno della lista
from sklearn.linear_model import LinearRegression
import pandas as pd
import numpy as np
import math
import seaborn as sns
from scipy import stats
import statsmodels.api as sm
import matplotlib.pyplot as plt
from statsmodels.formula.api import ols
from statsmodels.graphics.regressionplots import *
# from year to decade
def etichetta(row):
anno = row["anno"].strip()
if anno in (
"2010",
"2011",
"2012",
"2013",
"2014",
"2015",
"2016",
"2017",
"2018",
"2019",
"2020",
"2021",
):
return "2010-2021"
elif anno in (
"2000",
"2001",
"2002",
"2003",
"2004",
"2005",
"2006",
"2007",
"2008",
"2009",
):
return "2000-2009"
elif anno in (
"1990",
"1991",
"1992",
"1993",
"1994",
"1995",
"1996",
"1997",
"1998",
"1999",
):
return "1990-1999"
elif anno in (
"1980",
"1981",
"1982",
"1983",
"1984",
"1985",
"1986",
"1987",
"1988",
"1989",
):
return "1980-1989"
elif anno in (
"1970",
"1971",
"1972",
"1973",
"1974",
"1975",
"1976",
"1977",
"1978",
"1979",
):
return "1970-1979"
elif anno in (
"1960",
"1961",
"1962",
"1963",
"1964",
"1965",
"1966",
"1967",
"1968",
"1969",
):
return "1960-1969"
elif anno in (
"1950",
"1951",
"1952",
"1953",
"1954",
"1955",
"1956",
"1957",
"1958",
"1959",
):
return "1950-1959"
elif anno in (
"1940",
"1941",
"1942",
"1943",
"1944",
"1945",
"1946",
"1947",
"1948",
"1949",
):
return "1940-1949"
elif anno in (
"1930",
"1931",
"1932",
"1933",
"1934",
"1935",
"1936",
"1937",
"1938",
"1939",
):
return "1930-1939"
elif anno in (
"1920",
"1921",
"1922",
"1923",
"1924",
"1925",
"1926",
"1927",
"1928",
"1929",
):
return "1920-1929"
elif anno in (
"1900",
"1901",
"1902",
"1903",
"1904",
"1905",
"1906",
"1907",
"1908",
"1909",
"1910",
"1911",
"1912",
"1913",
"1914",
"1915",
"1916",
"1917",
"1918",
"1919",
):
return "1900-1919"
elif anno in ("1847", "1865", "1880", "1883", "1886"):
return "1840-1899"
else:
return "other"
# from sub-category to books' category
def rename(row):
if row["physical_format"] == "Brossura":
return "Brossura"
if row["physical_format"] == "Rilegato":
return "Copertina Rigida"
if row["physical_format"] == "Libro":
return "Tascabile"
if row["physical_format"] == "hardcover":
return "Copertina Rigida"
if row["physical_format"] == "Illustrato":
return "Copertina Rigida"
if row["physical_format"] == "Cartonato":
return "Copertina Rigida"
if row["physical_format"] == "paperback":
return "Tascabile"
if row["physical_format"] == "Paperback / softback":
return "Tascabile"
if row["physical_format"] == "[electronic resource]":
return "Ebook"
if row["physical_format"] == "Libro + altro":
return "Tascabile"
if row["physical_format"] == "Hardback":
return "Copertina Rigida"
if row["physical_format"] == "unknown binding":
return "Altro"
if row["physical_format"] == "Libro + CD-Rom":
return "Tascabile"
if row["physical_format"] == "board book":
return "Copertina Rigida"
if row["physical_format"] == "pamphlet":
return "Tascabile"
if row["physical_format"] == "Paperback":
return "Tascabile"
if row["physical_format"] == "calendar":
return "Spiralato"
if row["physical_format"] == "Tascabile":
return "Tascabile"
if row["physical_format"] == "map":
return "Tascabile"
if row["physical_format"] == "spiral-bound":
return "Spiralato"
if row["physical_format"] == "mass market paperback":
return "Tascabile"
if row["physical_format"] == "library binding":
return "Copertina Rigida"
if row["physical_format"] == "pop-up":
return "pop-up"
if row["physical_format"] == "turtleback":
return "Copertina Rigida"
if row["physical_format"] == "cards":
return "Tascabile"
if row["physical_format"] == "paperback":
return "Tascabile"
return "Other"
# from key words to books' subjects
def assignment_cat(row):
subject_old = row["subjects"].lower().strip().split(" ")
lista_viaggi = [
"viaggi",
"travel",
"turismo",
"holiday",
"places",
"place",
"guide",
"guidebooks",
"cartine",
"guides",
"foreign",
"museum",
"turistiche",
"world",
]
lista_arte = [
"art",
"arte",
"arts",
"buildings",
"pittura",
"photography",
"exhibitions",
"landscape",
"ceramics",
"music",
"urban",
"catalogs",
"museo",
"scultura",
"moda",
"symphony",
"design",
"fashion",
"architettura",
"beni",
"culturali",
"individual",
"architects",
"photographs",
"photographers",
"fotografia",
"cinema",
"musica",
"artists",
"viviani",
]
lista_sport = ["sport", "sports"]
lista_storia_filosofia = ["storia", "history", "filosofia"]
lista_biografie = ["biografie", "biographies", "biography"]
lista_istruzione = [
"english",
"grammatica",
"dizionari",
"vocabulary",
"translating",
"manual",
"manuals",
"lingue",
"languages",
"università",
"study",
"scuola",
"psycholinguistics",
]
lista_attualità_politica_economia = [
"società",
"politics",
"rights",
"philosophy",
"immigration",
"emigration",
"business",
"economia",
"finanza",
"management",
"marketing",
"politica",
"diritto",
"lavoro",
"econometrics",
]
lista_bamibini_ragazzi = [
"bambini",
"ragazzi",
"children",
"childrens",
"fumetti",
"babypreschool",
]
lista_narrativa = [
"fantasy",
"family",
"fiction",
"romance",
"mistery",
"crime",
"horror",
"gothic",
"readers",
"narrativa",
"gialli",
"noir",
"avventura",
"passione",
"sentimenti",
]
lista_letteratura = [
"letteratura",
"criticism",
"literature",
"drama",
"letterature",
"poetry",
"romanzi",
"tolstoy",
"bronte",
"austen",
"defoe",
"dickens",
]
lista_scienza = [
"scienza",
"ambiente",
"animali",
"geology",
"tecnologia",
"technology",
"science",
"physics",
"nature",
"informatica",
"web",
"machine",
"learning",
"computer",
"combustion",
"engine",
]
lista_religione = [
"religione",
"spiritualità",
"religion",
"gnosticism",
"mind",
"spirit",
"christian",
"bible",
"church",
]
lista_gastronomia_cucina = [
"gastronomia",
"cucina",
"cook",
"wine",
"salute",
"benessere",
"cookery",
]
lista_hobby = ["hobby", "tempo"]
lista_categorie = [
lista_viaggi,
lista_arte,
lista_sport,
lista_storia_filosofia,
lista_biografie,
lista_istruzione,
lista_attualità_politica_economia,
lista_bamibini_ragazzi,
lista_narrativa,
lista_letteratura,
lista_scienza,
lista_religione,
lista_gastronomia_cucina,
lista_hobby,
]
nome_categorie = [
"viaggi",
"arte",
"sport",
"storia e filosofia",
"biografie",
"istruzione",
"attualità,politica ed economia",
"bambini e ragazzi",
"narrativa",
"letteratura",
"scienza e tecnologia",
"religione",
"gastronomia e cucina",
"hobby e tempo libero",
]
dizionario = zip(nome_categorie, lista_categorie)
max_intersection = 0
categoria_risultante = ""
for nome, lista_parole in dizionario:
intersection = len(list(set(lista_parole) & set(subject_old)))
if intersection > max_intersection:
max_intersection = intersection
categoria_risultante = nome
return categoria_risultante
def analysis_data(data_set_path, corrected_year_path, corrected_category_path):
# enriched dataset
data = pd.read_csv(data_set_path)
# dataset with fixed year
con_mille = pd.read_excel(corrected_year_path)
# dataset with fixed subjects
categorie = pd.read_excel(corrected_category_path)
# not useful columns
data2 = data.drop(
labels=[
"level_0",
"index",
"isbn_13",
"language",
"description",
"mondadori_url",
"hoepli_url",
"languages",
],
axis=1,
)
# drop all rows with at least a null
data2 = data2.dropna()
# from string to categorical column
data2["physical_format"] = pd.Categorical(data2["physical_format"])
# drop unuseful books
data2 = data2[
(data2.physical_format != "Game")
& (data2.physical_format != "audio cassette")
& (data2.physical_format != "Audio CD")
& (data2.physical_format != "Long Playing Mix")
& (data2.physical_format != "audio cd")
& (data2.physical_format != "cd-rom")
& (data2.physical_format != "CD-ROM")
& (data2.physical_format != "unknown binding")
]
# associate a subject to each book based on key words
data2["physical_format"] = data2.apply(lambda row: rename(row), axis=1)
# assign subject to each book
data2["Categoria"] = data2.apply(lambda row: assignment_cat(row), axis=1)
b = data2[data2["Categoria"] == ""]
c = data2[data2["Categoria"] != ""]
# cancatenate datasets which subjects where assigned manually and automatically
finale = pd.concat([c, categorie])
# split date
a = finale["publish_date"].str.rsplit("/", n=1, expand=True)
# Prendo le colonne dello split precedente e con un ciclo assegno ai valori None della seconda colonna
# il corrispondente anno della prima colonna.
d = a[0].tolist()
e = a[1].tolist()
conta = 0
for item in e:
if item is None:
e[conta] = d[conta]
conta += 1
else:
conta += 1
# Aggiungo la colonna anno sfruttando la lista precedente.
finale["anno"] = e
# manually cleaned
senza_mille = finale[finale["anno"] != "1000"]
senza_mille.drop(["publish_date", "subjects"], axis=1, inplace=True)
mille = finale[finale["anno"] == "1000"]
finale = pd.concat([senza_mille, con_mille])
finale.drop(["publish_date", "subjects"], axis=1, inplace=True)
finale["anno"] = finale["anno"].astype("str")
# year to decade
finale["anno"] = finale.apply(lambda row: etichetta(row), axis=1)
# manually clean some incorrect price
finale["price"].iloc[479] = 1000.00
finale["price"].iloc[974] = 1000.00
finale["price"].iloc[1467] = 5000.00
# violin plot: number_of_pages~physical_format
sns.set(style="whitegrid")
ax = sns.violinplot(
data=finale,
x="physical_format",
y="number_of_pages",
palette="Set2",
split=True,
)
ax.set_xlabel("")
sns.set(rc={"figure.figsize": (15, 15)})
# violin plot: price~physical_format
finale["price"] = pd.to_numeric(finale["price"], downcast="float")
sns.set(style="whitegrid")
ax1 = sns.violinplot(
data=finale, x="physical_format", y="price", palette="Set2", split=True
)
ax1.set_xlabel("")
sns.set(rc={"figure.figsize": (15, 15)})
"""### ANALISI E RIMOZIONE DEGLI OUTLIERS:
1. Create 5 sub-dataset, one for each book's physical format
2. For each dataset create a new column with the z-score calculated from number_of_pages and price. Z-score>3 are considered outliers
3. Concatenate cleaned datasets,
"""
# 1. create subsets
brossura = finale[finale["physical_format"] == "Brossura"]
cop_rig = finale[finale["physical_format"] == "Copertina Rigida"]
tascabile = finale[finale["physical_format"] == "Tascabile"]
spiralato = finale[finale["physical_format"] == "Spiralato"]
ebook = finale[finale["physical_format"] == "Ebook"]
# 2. compute z-scores
# BROSSURA
brossura["z_score_pagine"] = stats.zscore(brossura["number_of_pages"])
brossura["z_score_prezzo"] = stats.zscore(brossura["price"])
brossura_meno_pagine = brossura[brossura["z_score_pagine"].abs() < 3]
brossura_fin = brossura_meno_pagine[
brossura_meno_pagine["z_score_prezzo"].abs() < 3
]
# COPERTINA_RIGIDA
cop_rig["z_score_pagine"] = stats.zscore(cop_rig["number_of_pages"])
cop_rig["z_score_prezzo"] = stats.zscore(cop_rig["price"])
cop_rig_meno_pagine = cop_rig[cop_rig["z_score_pagine"].abs() < 3]
cop_rig_fin = cop_rig_meno_pagine[cop_rig_meno_pagine["z_score_prezzo"].abs() < 3]
# TASCABILE
tascabile["z_score_pagine"] = stats.zscore(tascabile["number_of_pages"])
tascabile["z_score_prezzo"] = stats.zscore(tascabile["price"])
tascabile_meno_pagine = tascabile[tascabile["z_score_pagine"].abs() < 3]
tascabile_fin = tascabile_meno_pagine[
tascabile_meno_pagine["z_score_prezzo"].abs() < 3
]
# SPIRALATO
spiralato["z_score_pagine"] = stats.zscore(spiralato["number_of_pages"])
spiralato["z_score_prezzo"] = stats.zscore(spiralato["price"])
spiralato_meno_pagine = spiralato[spiralato["z_score_pagine"].abs() < 3]
spiralato_fin = spiralato_meno_pagine[
spiralato_meno_pagine["z_score_prezzo"].abs() < 3
]
# EBOOK
ebook["z_score_pagine"] = stats.zscore(ebook["number_of_pages"])
ebook["z_score_prezzo"] = stats.zscore(ebook["price"])
ebook_meno_pagine = ebook[ebook["z_score_pagine"].abs() < 3]
ebook_fin = ebook_meno_pagine[ebook_meno_pagine["z_score_prezzo"].abs() < 3]
# concatenate cleaned datasets
finale_meno_outliers = pd.concat(
[brossura_fin, cop_rig_fin, tascabile_fin, spiralato_fin, ebook_fin]
).reset_index()
finale_meno_outliers
# violin plot: number_of_pages~physical_format
sns.set(style="whitegrid")
ax = sns.violinplot(
data=finale_meno_outliers,
x="physical_format",
y="number_of_pages",
palette="Set2",
)
ax.set_xlabel("")
sns.set(rc={"figure.figsize": (15, 15)})
# violin plot: price~physical_format
sns.set(style="whitegrid")
ax1 = sns.violinplot(
data=finale_meno_outliers, x="physical_format", y="price", palette="Set2"
)
ax1.set_xlabel("")
sns.set(rc={"figure.figsize": (15, 15)})
""" LINEAR MODEL """
# create dummy variables from 'formato'
finale_meno_outliers["number_of_pages"] = finale_meno_outliers[
"number_of_pages"
].astype("float64")
finale_meno_outliers.physical_format = pd.Categorical(
finale_meno_outliers.physical_format
)
finale_meno_outliers["formato"] = finale_meno_outliers.physical_format.cat.codes
# features
x = finale_meno_outliers[["number_of_pages", "formato"]].to_numpy()
# label
y = finale_meno_outliers["price"].astype("float64").to_numpy()
# performance analysis of OLS model
model = sm.OLS(y, x).fit()
np.set_printoptions(suppress=True)
print_model = model.summary()
print(print_model)
""" ANALYSIS OF OUTLIES AND INFLUENTIAL POINTS"""
fitted_values = | pd.Series(model.fittedvalues, name="Fitted Values") | pandas.Series |
"""
PREPARE
Before running train, you need to run prepare.py with the respective task.
Example (in the command line):
> cd to root dir
> conda activate nlp
> python src/prepare.py --do_format --task 1
"""
#NOTE: the following is a workaround for AML to load modules
import os, sys; sys.path.append(os.path.dirname(os.path.realpath(__file__)))
import os
import spacy
import pandas as pd
import numpy as np
import string
import re
import argparse
from sklearn.model_selection import StratifiedShuffleSplit
# Custom functions
import sys
sys.path.append('./src')
import helper as he
import data as dt
import custom as cu
logger = he.get_logger(location=__name__)
class Clean():
"""Text preprocessing and cleaning steps
SUPPORTED LANGUAGES
- EN
- DE
- IT
- ES
- FR
- XX (multi - NER only)
SUPPORTED MODULES
- Remove Noise
Remove formatting and other noise that may be contained in emails or
other document types.
- Get Placeholders
Placeholders for common items such as dates, times, urls but also
custom customer IDs.
- Remove Stopwords
Stopwords can be added by adding a language specific stopword file
to /assets. Format: "assets/stopwords_<language>.txt".
- Lemmatize
"""
def __init__(self, task,
download_source=False,
download_train=False,
inference=False):
self.task = task
self.language = cu.params.get('language')
# Load data class
self.dt = dt.Data(task=self.task, inference=inference)
# Download data, if needed
if download_train:
self.dt.download('data_dir', dir = 'data_dir', source = 'datastore')
# Load spacy model
self.nlp = he.load_spacy_model(language=self.language, disable=['ner','parser','tagger'])
# Create stopword list
stopwords_active = []
## Load names
try:
names = self.dt.load('fn_names', dir = 'asset_dir', file_type = 'list')
stopwords_active = stopwords_active + names
except FileNotFoundError as e:
logger.warning(f'[WARNING] No names list loaded: {e}')
## Load stopwords
try:
stopwords = self.dt.load('fn_stopwords', dir = 'asset_dir', file_type = 'list')
stopwords_active = stopwords_active + stopwords
except FileNotFoundError as e:
logger.warning(f'[WARNING] No stopwords list loaded: {e}')
## Add to Spacy stopword list
logger.warning(f'[INFO] Active stopwords list lenght: {len(stopwords_active)}')
for w in stopwords_active:
self.nlp.vocab[w.replace('\n','')].is_stop = True
def remove(self, line,
rm_email_formatting=False,
rm_email_header=False,
rm_email_footer=False,
rm_punctuation=False):
"""Remove content from text"""
if not isinstance(line, str):
line = str(line)
# Customer Remove
line = cu.remove(line)
if rm_email_formatting:
line = re.sub(r'<[^>]+>', ' ', line) # Remove HTML tags
line = re.sub(r'^(.*\.eml)', ' ', line) # remove header for system generated emails
if rm_email_header:
#DE/EN
if self.language == 'en' or self.language == 'de':
line = re.sub(r'\b(AW|RE|VON|WG|FWD|FW)(\:| )', '', line, flags=re.I)
#DE
if self.language == 'de':
line = re.sub(r'(Sehr geehrte( Damen und Herren.)?.)|hallo.|guten( tag)?.', '', line, flags=re.I)
if rm_email_footer:
#EN
if self.language == 'en':
line = re.sub(r'\bkind regards.*', '', line, flags=re.I)
#DE
if self.language == 'de':
line = re.sub(r'\b(mit )?(beste|viele|liebe|freundlich\w+)? (gr[u,ü][ß,ss].*)', '', line, flags=re.I)
line = re.sub(r'\b(besten|herzlichen|lieben) dank.*', '', line, flags=re.I)
line = re.sub(r'\bvielen dank für ihr verständnis.*', '', line, flags=re.I)
line = re.sub(r'\bvielen dank im voraus.*', '', line, flags=re.I)
line = re.sub(r'\b(mfg|m\.f\.g) .*','', line, flags=re.I)
line = re.sub(r'\b(lg) .*','',line, flags=re.I)
line = re.sub(r'\b(meinem iPhone gesendet) .*','',line, flags=re.I)
line = re.sub(r'\b(Gesendet mit der (WEB|GMX)) .*','',line, flags=re.I)
line = re.sub(r'\b(Diese E-Mail wurde von Avast) .*','',line, flags=re.I)
# Remove remaining characters
##NOTE: may break other regex
if rm_punctuation:
line = re.sub('['+string.punctuation+']',' ',line)
return line
def get_placeholder(self, line,
rp_generic=False,
rp_custom=False,
rp_num=False):
"""Replace text with type specfic placeholders"""
# Customer placeholders
line = cu.get_placeholder(line)
# Generic placeholder
if rp_generic:
line = re.sub(r' \+[0-9]+', ' ', line) # remove phone numbers
line = re.sub(r'0x([a-z]|[0-9])+ ',' PER ',line, re.IGNORECASE) # replace
line = re.sub(r'[0-9]{2}[\/.,:][0-9]{2}[\/.,:][0-9]{2,4}', ' PDT ', line) # remove dates and time, replace with placeholder
line = re.sub(r'([0-9]{2,3}[\.]){3}[0-9]{1,3}',' PIP ',line) # replace ip with placeholder
line = re.sub(r'[0-9]{1,2}[\/.,:][0-9]{1,2}', ' PTI ', line) # remove only time, replace with placeholder
line = re.sub(r'[\w\.-]+@[\w\.-]+', ' PEM ', line) # remove emails
line = re.sub(r'http[s]?://(?:[a-z]|[0-9]|[$-_@.&+]|[!*\(\),]|(?:%[0-9a-f][0-9a-f]))+', ' PUR ', line) # Remove links
line = re.sub(r'€|\$|(USD)|(EURO)', ' PMO ', line)
# Placeholders for numerics
if rp_num:
line = re.sub(r' ([0-9]{4,30}) ',' PNL ', line) # placeholder for long stand alone numbers
line = re.sub(r' [0-9]{2,3} ',' PNS ', line) # placeholder for short stand alone numbers
return line
def tokenize(self, line, lemmatize = False, rm_stopwords = False):
"""Tokenizer for non DL tasks"""
if not isinstance(line, str):
line = str(line)
if lemmatize and rm_stopwords:
line = ' '.join([t.lemma_ for t in self.nlp(line) if not t.is_stop])
elif lemmatize:
line = ' '.join([t.lemma_ for t in self.nlp(line)])
elif rm_stopwords:
line = ' '.join([t.text for t in self.nlp(line) if not t.is_stop])
return line
def transform(self, texts,
to_lower = False,
# Remove
rm_email_formatting = False,
rm_email_header = False,
rm_email_footer = False,
rm_punctuation = False,
# Placeholders
rp_generic = False,
rp_num = False,
# Tokenize
lemmatize = False,
rm_stopwords = False,
return_token = False,
# Whitespace
remove_whitespace = True
):
"""Main run function for cleaning process"""
if isinstance(texts, str):
texts = [texts]
# Convert to series for improved efficiency
df_texts = pd.Series(texts)
# Avoid loading errors
df_texts = df_texts.replace('\t', ' ', regex=True)
# Remove noise
if any((rm_email_formatting, rm_email_header,
rm_email_footer, rm_punctuation)):
df_texts = df_texts.apply(lambda x: self.remove(x,
rm_email_formatting = rm_email_formatting,
rm_email_header = rm_email_header,
rm_email_footer = rm_email_footer,
rm_punctuation = rm_punctuation))
# Replace placeholders
if any((rp_generic, rp_num)):
df_texts = df_texts.apply(lambda x: self.get_placeholder(x,
rp_generic = rp_generic,
rp_num = rp_num))
# Tokenize text
if any((lemmatize, rm_stopwords, return_token)):
df_texts = df_texts.apply(self.tokenize,
lemmatize = lemmatize,
rm_stopwords = rm_stopwords)
# To lower
if to_lower:
df_texts = df_texts.apply(str.lower)
# Remove spacing
if remove_whitespace:
df_texts = df_texts.apply(lambda x: " ".join(x.split()))
# Return Tokens
if return_token:
return [t.split(' ') for t in df_texts.to_list()]
else:
return df_texts.to_list()
def transform_by_task(self, text):
# CUSTOM FUNCTION
if cu.tasks.get(str(self.task)).get('type') == 'classification':
return self.transform(text,
rm_email_formatting = True,
rm_email_header = True,
rm_email_footer = True,
rp_generic = True)[0]
elif cu.tasks.get(str(self.task)).get('type') == 'multi_classification':
return self.transform(text,
rm_email_formatting = True,
rm_email_header = True,
rm_email_footer = True,
rp_generic = True)[0]
elif cu.tasks.get(str(self.task)).get('type') == 'ner':
return text[0]
elif cu.tasks.get(str(self.task)).get('type') == 'qa':
return self.transform(text,
to_lower = True,
# Remove
rm_email_formatting = True,
rm_email_header = True,
rm_email_footer = True,
rm_punctuation = True,
# Placeholders
rp_generic = True,
rp_num = True,
# Tokenize
lemmatize = True,
rm_stopwords = True,
return_token = True
)[0]
else:
logger.warning('[WARNING] No transform by task found.')
return text[0]
def prepare_classification(task, do_format, train_split, min_cat_occurance,
min_char_length, register_data):
# Get clean object
cl = Clean(task=task, download_source=True)
# Load data
if not os.path.isfile(cl.dt.get_path('fn_prep', dir = 'data_dir')) or do_format:
data = dt.get_dataset(cl, source="cdb")
else:
data = cl.dt.load('fn_prep', dir = 'data_dir')
logger.warning(f'Data Length : {len(data)}')
# Load text & label field
text_raw = cu.load_text(data)
data['label'] = cu.load_label(data, task)
if cu.tasks.get(str(task)).get('type') == 'multi_classification':
data['label'] = data['label'].str.replace(', ', '_').str.replace(' ', '_')
flat_labels = [row['label'].split(',') for index, row in data.iterrows()]
labels_clean = []
for labels in flat_labels:
for label in labels:
labels_clean.append(label)
label_list_raw = pd.DataFrame({'label':labels_clean})
label_list_raw = label_list_raw[label_list_raw.label != '']
label_list_raw = label_list_raw.label.drop_duplicates()
elif cu.tasks.get(str(task)).get('type') == 'classification': # in case of single label classification
label_list_raw = data.label.drop_duplicates()
# Clean text
data['text'] = cl.transform(text_raw,
rm_email_formatting = True,
rm_email_header = True,
rm_email_footer = True,
rp_generic = True)
# Filter by length
data = he.remove_short(data, 'text', min_char_length=min_char_length)
logger.warning(f'Data Length : {len(data)}')
# Remove duplicates
data_red = data.drop_duplicates(subset=['text'])
logger.warning(f'Data Length : {len(data_red)}')
# Min class occurance
if cu.tasks.get(str(task)).get('type') == 'classification':
data_red = data_red[data_red.groupby('label').label.transform('size') > min_cat_occurance]
elif cu.tasks.get(str(task)).get('type') == 'multi_classification':
# Split rows
data_transform = data_red[['id', 'label']].copy()
data_transform['label'] = [row['label'].split(",") for index, row in data_transform.iterrows()] # pipe it to list
data_transform = pd.DataFrame({'index':data_transform.index.repeat(data_transform.label.str.len()), 'label':np.concatenate(data_transform.label.values)}) # explode df
data_transform = data_transform[data_transform.groupby('label').label.transform('size') > min_cat_occurance] # count for min occurance and only keep relevant ones
data_transform = data_transform.groupby(['index'])['label'].apply(lambda x: ','.join(x.astype(str))).reset_index() # re-merge
data_transform = data_transform.set_index('index')
del data_red['label']
data_red = | pd.concat([data_red, data_transform], join='inner', axis=1) | pandas.concat |
import sys
import logging
import json
import numpy as np
import pandas as pd
import calliope
logger = logging.getLogger(name=__package__) # Logger with name 'psm', can be customised elsewhere
def get_logger(name: str, run_config: dict) -> logging.Logger: # pragma: no cover
"""Get a logger to use throughout the codebase.
Parameters:
-----------
name: logger name
config: dictionary with model, run and save properties
"""
output_save_dir = run_config['output_save_dir']
# Create the master logger and formatter
logger = logging.getLogger(name=name)
logger.setLevel(logging.DEBUG) # Set master to lowest level -- gets overwritten by handlers
formatter = logging.Formatter(
fmt='%(asctime)s - %(levelname)-8s - %(name)s - %(filename)s - %(message)s',
datefmt='%Y-%m-%d,%H:%M:%S'
)
# Create two handlers: one writes to a log file, the other to stdout
logger_file = logging.FileHandler(f'{output_save_dir}/model_run.log')
logger_file.setFormatter(fmt=formatter)
logger_file.setLevel(level=getattr(logging, run_config['log_level_file']))
logger.addHandler(hdlr=logger_file)
logger_stdout = logging.StreamHandler(sys.stdout)
logger_stdout.setFormatter(fmt=formatter)
logger_stdout.setLevel(level=getattr(logging, run_config['log_level_stdout']))
logger.addHandler(hdlr=logger_stdout)
return logger
def load_time_series_data(
model_name: str, path: str = 'data/demand_wind_solar.csv'
) -> pd.DataFrame: # pragma: no cover
"""Load demand, wind and solar time series data for model.
Parameters:
-----------
model_name: '1_region' or '6_region'
path: path to CSV file
"""
ts_data = pd.read_csv(path, index_col=0)
ts_data = ts_data.clip(lower=0.) # Trim negative values, can come from floating point error
ts_data.index = pd.to_datetime(ts_data.index)
# If 1_region model, take demand, wind and solar from region 5
if model_name == '1_region':
ts_data = ts_data.loc[:, ['demand_region5', 'wind_region5', 'solar_region5']]
ts_data.columns = ['demand', 'wind', 'solar']
logger.debug(f'Loaded raw time series data:\n\n{ts_data}\n')
return ts_data
def has_missing_leap_days(ts_data: pd.DataFrame) -> bool:
"""Detect if a time series has missing leap days.
Parameters:
-----------
ts_data : time series to check for missing leap days
"""
index = ts_data.index
feb28_index = ts_data.index[(index.year % 4 == 0) & (index.month == 2) & (index.day == 28)]
feb29_index = ts_data.index[(index.year % 4 == 0) & (index.month == 2) & (index.day == 29)]
mar01_index = ts_data.index[(index.year % 4 == 0) & (index.month == 3) & (index.day == 1)]
if len(feb29_index) < min((len(feb28_index), len(mar01_index))):
return True
return False
def get_scenario(
run_mode: str, baseload_integer: bool, baseload_ramping: bool, allow_unmet: bool
) -> str:
"""Get scenario name, a comma-separated list of overrides in `model.yaml` for Calliope model
Parameters:
-----------
run_mode: 'plan' or 'operate'
baseload_integer: activate baseload discrete capacity constraint
baseload_ramping: enforce baseload ramping constraint
allow_unmet: allow unmet demand in 'plan' mode (should always be allowed in 'operate' mode)
"""
scenario = run_mode
if run_mode == 'plan':
if baseload_integer:
scenario += ',integer'
else:
scenario += ',continuous'
if allow_unmet:
scenario += ',allow_unmet'
if baseload_ramping:
scenario += ',ramping'
logger.debug(f'Created Calliope model scenario: `{scenario}`.')
return scenario
def get_cap_override_dict(model_name: str, fixed_caps: dict) -> dict:
"""Create override dictionary used to set fixed fixed capacities in Calliope model.
Parameters:
-----------
model_name: '1_region' or '6_region'
fixed_caps: fixed capacities -- `model.get_summary_outputs(as_dict=True)` has correct format
Returns:
--------
o_dict: Dict that can be fed as 'override_dict' into Calliope model in 'operate' mode
"""
if not isinstance(fixed_caps, dict):
raise ValueError('Incorrect input format for fixed_caps')
o_dict = {} # Populate this override dict
# Add generation capacities capacities for 1_region model
if model_name == '1_region':
for tech, attribute in [
('baseload', 'energy_cap_equals'),
('peaking', 'energy_cap_equals'),
('wind', 'resource_area_equals'),
('solar', 'resource_area_equals')
]:
# If this technology is specified, add it to o_dict
fixed_caps_key = f'cap_{tech}_total'
if fixed_caps_key in fixed_caps:
idx = f'locations.region1.techs.{tech}.constraints.{attribute}'
o_dict[idx] = fixed_caps[fixed_caps_key]
# Add generation and transmission capacities for 6_region model
elif model_name == '6_region':
for region in [f'region{i+1}' for i in range(6)]:
# Add generation capacities
for tech, attribute in [
('baseload', 'energy_cap_equals'),
('peaking', 'energy_cap_equals'),
('wind', 'resource_area_equals'),
('solar', 'resource_area_equals')
]:
# If this technology is specified in this region, add it to o_dict
fixed_caps_key = f'cap_{tech}_{region}'
if fixed_caps_key in fixed_caps:
idx = f'locations.{region}.techs.{tech}_{region}.constraints.{attribute}'
o_dict[idx] = fixed_caps[fixed_caps_key]
# Add transmission capacities
for region_to in [f'region{i+1}' for i in range(6)]:
# If this technology is specified in this region, add it to o_dict
fixed_caps_key = f'cap_transmission_{region}_{region_to}'
if fixed_caps_key in fixed_caps:
idx_regions = f'{region},{region_to}.techs.transmission_{region}_{region_to}'
idx = f'links.{idx_regions}.constraints.energy_cap_equals'
o_dict[idx] = fixed_caps[fixed_caps_key]
if len(o_dict) == 0:
raise AttributeError('Override dict is empty. Check if something has gone wrong.')
logger.debug(f'Created override dict:\n{json.dumps(o_dict, indent=4)}')
return o_dict
def _get_technology_info(model: calliope.Model) -> pd.DataFrame:
"""Get technology install & generation costs and emissions from model config."""
model_dict = model._model_run
costs = pd.DataFrame(columns=['install', 'generation', 'emissions'], dtype='float')
regions = list(model_dict['locations'].keys())
# Add the technologies in each region
for region in regions:
region_dict = model_dict['locations'][region]
# Add generation technologies
techs = [i for i in region_dict['techs'].keys() if 'demand' not in i]
for tech in techs:
tech_costs_dict = region_dict['techs'][tech]['costs']
is_variable_renewable = ('wind' in tech) or ('solar' in tech)
install_cost_name = 'resource_area' if is_variable_renewable else 'energy_cap'
costs.loc[tech, 'install'] = (
0. if 'unmet' in tech else float(tech_costs_dict['monetary'][install_cost_name])
)
costs.loc[tech, 'generation'] = float(tech_costs_dict['monetary']['om_prod'])
costs.loc[tech, 'emissions'] = float(tech_costs_dict['emissions']['om_prod'])
# Add transmission technologies
regions_to = region_dict.get('links', [])
for region_to in regions_to:
tech = f'transmission_{region}_{region_to}'
tech_reversed = f'transmission_{region_to}_{region}'
if tech_reversed in costs.index:
continue # Only count links in one direction
tech_costs_dict = region_dict['links'][region_to]['techs'][tech]['costs']
costs.loc[tech, 'install'] = float(tech_costs_dict['monetary']['energy_cap'])
costs.loc[tech, 'generation'] = 0.
costs.loc[tech, 'emissions'] = 0.
logger.debug(f'Costs read from model config:\n\n{costs}\n')
return costs
def _has_consistent_outputs_1_region(model: calliope.Model) -> bool: # pragma: no cover
"""Check if model outputs (costs, generation levels, emissions) are internally consistent.
Log errors whenever they are not.
Parameters:
-----------
model: instance of OneRegionModel
"""
passing = True # Changes to False if any outputs are found to be inconsistent
cost_total_v1 = 0
costs = _get_technology_info(model=model)
techs = list(costs.index)
sum_out = model.get_summary_outputs()
ts_out = model.get_timeseries_outputs()
res = model.results
# Normalise install costs to same temporal scale as generation costs
corrfac = model.num_timesteps / 8760
# Test if generation technology installation costs are consistent
if model.run_mode == 'plan':
for tech in techs:
if tech == 'unmet':
continue # Unmet demand doesn't have meaningful install cost
cost_v1 = corrfac * float(costs.loc[tech, 'install'] * sum_out.loc[f'cap_{tech}_total'])
cost_v2 = float(res.cost_investment.loc['monetary', f'region1::{tech}'])
if not np.isclose(cost_v1, cost_v2, rtol=0., atol=0.1):
logger.error(
f'Cannot recreate {tech} install costs -- manual: {cost_v1}, model: {cost_v2}.'
)
passing = False
cost_total_v1 += cost_v1
# Test if generation costs are consistent
for tech in techs:
cost_v1 = float(costs.loc[tech, 'generation'] * sum_out.loc[f'gen_{tech}_total'])
cost_v2 = float(res.cost_var.loc['monetary', f'region1::{tech}'].sum())
if not np.isclose(cost_v1, cost_v2, rtol=0., atol=0.1):
logger.error(
f'Cannot recreate {tech} generation costs -- manual: {cost_v1}, model: {cost_v2}.'
)
passing = False
cost_total_v1 += cost_v1
# Test if total costs are consistent
if model.run_mode == 'plan':
cost_total_v2 = float(res.cost.loc['monetary'].sum())
if not np.isclose(cost_total_v1, cost_total_v2, rtol=0., atol=0.1):
logger.error(
f'Cannot recreate system cost -- manual: {cost_total_v1}, model: {cost_total_v2}.'
)
passing = False
# Test if emissions are consistent
for tech in techs:
emission_v1 = float(costs.loc[tech, 'emissions'] * sum_out.loc[f'gen_{tech}_total'])
emission_v2 = float(res.cost_var.loc['emissions', f'region1::{tech}'].sum())
if not np.isclose(cost_v1, cost_v2, rtol=0., atol=0.1):
logger.error(
f'Cannot recreate {tech} emissions -- manual: {emission_v1}, model: {emission_v2}.'
)
passing = False
cost_total_v1 += cost_v1
# Test if supply matches demand
generation_total = float(sum_out.filter(regex='gen_.*_total', axis=0).sum())
demand_total = float(sum_out.loc['demand_total'])
if not np.isclose(generation_total, demand_total, rtol=0., atol=0.1):
logger.error(
f'Supply-demand mismatch -- generation: {generation_total}, demand: {demand_total}.'
)
passing = False
# Test that generation levels are all nonnegative and add up to the demand
if not (ts_out.filter(like='gen', axis=1) >= 0).all().all():
logger.error(f'Some generation/demand levels are negative:\n\n{ts_out}\n.')
passing = False
if not np.allclose(
ts_out.filter(like='gen', axis=1).sum(axis=1),
ts_out.filter(like='demand', axis=1).sum(axis=1),
rtol=0.,
atol=0.1
):
logger.error(f'Generation does not add up to demand:\n\n{ts_out}\n.')
passing = False
return passing
def _has_consistent_outputs_6_region(model: calliope.Model) -> bool: # pragma: no cover
"""Check if model outputs (costs, generation levels, emissions) are internally consistent.
Log errors whenever they are not.
Parameters:
-----------
model: instance of SixRegionModel
"""
passing = True # Changes to False if any outputs are found to be inconsistent
cost_total_v1 = 0
costs = _get_technology_info(model=model)
sum_out = model.get_summary_outputs()
ts_out = model.get_timeseries_outputs()
res = model.results
# Normalise install costs to same temporal scale as generation costs
corrfac = model.num_timesteps / 8760
# Get list of tech-location pairs
regions = list(model._model_run['locations'].keys())
tech_locations = [i.split('_') for i in costs.index]
generation_tech_locations = [i for i in tech_locations if i[0] != 'transmission']
transmission_tech_locations = [i for i in tech_locations if i[0] == 'transmission']
# Test if generation technology installation costs are consistent
if model.run_mode == 'plan':
for tech, region in generation_tech_locations:
if tech == 'unmet':
continue # Unmet demand doesn't have meaningful install cost
cost_v1 = corrfac * float(
costs.loc[f'{tech}_{region}', 'install'] * sum_out.loc[f'cap_{tech}_{region}']
)
cost_v2 = float(res.cost_investment.loc['monetary', f'{region}::{tech}_{region}'])
if not np.isclose(cost_v1, cost_v2, rtol=0., atol=0.1):
logger.error(
f'Cannot recreate {tech} install costs in {region} -- '
f'manual: {cost_v1}, model: {cost_v2}.'
)
passing = False
cost_total_v1 += cost_v1
# Test if transmission technology installation costs are consistent
if model.run_mode == 'plan':
for tech, region, region_to in transmission_tech_locations:
cost_v1 = corrfac * float(
costs.loc[f'{tech}_{region}_{region_to}', 'install']
* sum_out.loc[f'cap_transmission_{region}_{region_to}']
)
cost_v2 = 2 * float(
res.cost_investment.loc[
'monetary', f'{region}::{tech}_{region}_{region_to}:{region_to}'
]
)
if not np.isclose(cost_v1, cost_v2, rtol=0., atol=0.1):
logger.error(
f'Cannot recreate {tech} install costs from {region} to {region_to} -- '
f'manual: {cost_v1}, model: {cost_v2}.'
)
passing = False
cost_total_v1 += cost_v1
# Test if generation costs are consistent
for tech, region in generation_tech_locations:
cost_v1 = float(
costs.loc[f'{tech}_{region}', 'generation'] * sum_out.loc[f'gen_{tech}_{region}']
)
cost_v2 = float(res.cost_var.loc['monetary', f'{region}::{tech}_{region}'].sum())
if not np.isclose(cost_v1, cost_v2, rtol=0., atol=0.1):
logger.error(
f'Cannot recreate {tech} generation costs in {region} -- '
f'manual: {cost_v1}, model: {cost_v2}.'
)
passing = False
cost_total_v1 += cost_v1
# Test if total costs are consistent
if model.run_mode == 'plan':
cost_total_v2 = float(res.cost.loc['monetary'].sum())
if not np.isclose(cost_total_v1, cost_total_v2, rtol=0., atol=0.1):
logger.error(
f'Cannot recreate system cost -- manual: {cost_total_v1}, model: {cost_total_v2}.'
)
passing = False
# Test if emissions are consistent
for tech, region in generation_tech_locations:
emission_v1 = float(
costs.loc[f'{tech}_{region}', 'emissions'] * sum_out.loc[f'gen_{tech}_{region}']
)
emission_v2 = float(res.cost_var.loc['emissions', f'{region}::{tech}_{region}'].sum())
if not np.isclose(cost_v1, cost_v2, rtol=0., atol=0.1):
logger.error(
f'Cannot recreate {tech} emissions in {region} -- '
f'manual: {emission_v1}, model: {emission_v2}.'
)
passing = False
cost_total_v1 += cost_v1
# Test if supply matches demand
generation_total = float(sum_out.filter(regex='gen_.*_region.*', axis=0).sum())
demand_total = float(sum_out.loc['demand_total'])
if not np.isclose(generation_total, demand_total, rtol=0., atol=0.1):
logger.error(
f'Supply-demand mismatch -- generation: {generation_total}, demand: {demand_total}.'
)
passing = False
# Test that generation levels are all nonnegative and add up to the demand
if not (ts_out.filter(like='gen', axis=1) >= 0).all().all():
logger.error(f'Some generation/demand levels are negative:\n\n{ts_out}\n.')
passing = False
if not np.allclose(
ts_out.filter(like='gen', axis=1).sum(axis=1),
ts_out.filter(like='demand', axis=1).sum(axis=1),
rtol=0.,
atol=0.1
):
logger.error(f'Generation does not add up to demand:\n\n{ts_out}\n.')
passing = False
# Test regional power balance: generation equals demand + transmission out of region
for region in regions:
generation_total_region = ts_out.filter(regex=f'gen_.*_{region}', axis=1).sum(axis=1)
demand_total_region = ts_out.filter(regex=f'demand_{region}', axis=1).sum(axis=1)
transmission_total_from_region = (
ts_out.filter(regex=f'transmission_{region}_region.*', axis=1).sum(axis=1)
- ts_out.filter(regex=f'transmission_region.*_{region}', axis=1).sum(axis=1)
)
if not np.allclose(
generation_total_region,
demand_total_region + transmission_total_from_region,
rtol=0.,
atol=0.1
):
balance_info = | pd.DataFrame() | pandas.DataFrame |
# The original code was executed on Jupyter notebooks. This is a copy of the scraper and is divided into two sections:
# To make this code work efficiently. Copy+Paste the two sections in different cells of the jupyter notebook
## This is an Autoscaper which would keep on extracting tweets from Twitter website
# given a list of search queries generated using 'Twitter_Query_Generator.py' as input file
# SECTION A - AUTHENTICATION AND LOGIN
# Import required libraries
from bs4 import BeautifulSoup
import requests
from selenium import webdriver
from selenium.webdriver.common.keys import Keys
import time
from datetime import timedelta
import pandas as pd
import re
# Initiate chrome webdriver
driver = webdriver.Chrome('C:\\Users\\<NAME>\\Downloads\\chromedriver')
driver.maximize_window()
# Enter twitter URL
url = 'https://twitter.com/login'
driver.get(url)
time.sleep(4)
## Create a text file 'credentials.txt' and add twitter username in the first row and password in the second row
# Load username from the first row of credentials.txt
username = driver.find_element_by_xpath('//*[@id="react-root"]/div/div/div[2]/main/div/div/form/div/div[1]/label/div/div[2]/div/input')
username.send_keys([line.rstrip('\n') for line in open('credentials.txt')][0])
# Load password from the second row of credentials.txt
password = driver.find_element_by_xpath('//*[@id="react-root"]/div/div/div[2]/main/div/div/form/div/div[2]/label/div/div[2]/div/input')
password.send_keys([line.rstrip('\n') for line in open('credentials.txt')][1])
# Locate the login button and sign in to twitter
login = driver.find_element_by_xpath('//*[@id="react-root"]/div/div/div[2]/main/div/div/form/div/div[3]/div/div/span/span')
login.click()
time.sleep(3)
# Locate the search box on the home page and prepare for keyword search queries input
first_search = driver.find_element_by_xpath('//*[@id="react-root"]/div/div/div[2]/main/div/div/div/div[2]/div/div[2]/div/div/div/div[1]/div/div/div/form/div[1]/div/div/div[2]/input')
first_search.send_keys('Starting...')
first_search.send_keys(Keys.ENTER)
time.sleep(2)
# Countermeasure for initiating autosaving later on
times_saved = 0
# SECTION B - TWEET LOADING AND SCRAPING USING twitter_query.txt AS INPUT
search_queries = [line.rstrip('\n') for line in open('twitter_query.txt')]
# Use regex to extract tweet features including username, userhandle, post date and tweet text
user_name = re.compile(r'.+?@')
user_handle = re.compile(r'@.+?·')
post_date = re.compile(r'·[A-z][a-z][a-z].+?, [0-9][0-9][0-9][0-9]')
tweet_text = re.compile(r', [0-9][0-9][0-9][0-9].+')
# Empty dictionary to store tweet features
twitter_data = {}
tweet_count = 0
# Iterating over the list of queries generated from Twitter_Query_Generator.py
for search_query in search_queries:
search = driver.find_element_by_xpath('//*[@id="react-root"]/div/div/div[2]/main/div/div/div/div/div/div[1]/div[1]/div/div/div/div/div[2]/div[2]/div/div/div/form/div[1]/div/div/div[2]/input')
search.send_keys(Keys.CONTROL + "a")
search.send_keys(Keys.BACK_SPACE)
search.send_keys(search_query) #yyyy-mm-dd
search.send_keys(Keys.ENTER)
time.sleep(3)
tweet_dup = []
lastHeight = driver.execute_script("return document.body.scrollHeight")
driver.execute_script("window.scrollBy(0, 3)")
time.sleep(2)
while len(tweet_dup)<=1500:
source = driver.page_source
soup = BeautifulSoup(source,'html.parser')
tweet_data = soup.find_all('article',{'aria-haspopup':'false'})
try:
new_tweet_data = set(tweet_data).difference(tweet_dup)
except:
new_tweet_data = tweet_data
tweet_dup = []
for tweet_du in tweet_data:
tweet_dup.append(tweet_du)
#page_break+=1
#time.sleep(1.2)
for new_article in new_tweet_data:
if tweet_count % 10000 == 1:
start_time = time.monotonic()
if tweet_count % 50 == 0:
print(tweet_count)
all_text = new_article.text
try:
user = re.findall(user_name,all_text)[0]
user = user[:-1]
except:
user = 'N/A'
try:
handle = re.findall(user_handle,all_text)[0]
handle = handle[:-1]
replace = re.sub("@","/",handle)
true_handle = 'https://twitter.com'+replace
except:
true_handle = 'N/A'
try:
date = re.findall(post_date,all_text)[0]
except:
date = 'N/A'
try:
tweet_find = re.findall(tweet_text,all_text)[0]
tweet_http = re.sub(', [0-9][0-9][0-9][0-9]','',tweet_find)
tweet = re.sub('https?:\/\/(?:www\.|(?!www))[a-zA-Z0-9][a-zA-Z0-9-]+[a-zA-Z0-9]\.[^\s]{2,}|www\.[a-zA-Z0-9][a-zA-Z0-9-]+[a-zA-Z0-9]\.[^\s]{2,}|https?:\/\/(?:www\.|(?!www))[a-zA-Z0-9]+\.[^\s]{2,}|www\.[a-zA-Z0-9]+\.[^\s]{2,}','',tweet_http)
except:
tweet = 'N/A'
#print('User: ', user)
#print('Handle: ', true_handle)
#print('Published: ', date)
#print('Tweet: ', tweet)
#print('\n')
tweet_count+=1
twitter_data[tweet_count] = [user,true_handle,date,tweet]
# Counter check to save files on local disk
write = tweet_count % 10000
# One of the challenges faced during execution. The browser page gets stuck after loading 4-5k tweets
# Solved by refreshing the browser window after every 2k tweets automatically
if tweet_count in [2000,4000,6000,8000]:
driver.refresh()
time.sleep(10)
driver.refresh()
time.sleep(50)
if write == 0: # Autosave scraped data to a csv file once every 10k tweets
times_saved+=1
end_time = time.monotonic()
print(timedelta(seconds=end_time - start_time))
print('\n')
try:
twitter_df = | pd.DataFrame.from_dict(twitter_data,orient='index',columns=['Username','Handle','Published','Tweet']) | pandas.DataFrame.from_dict |
import math
import time
import numpy as np
import pandas as pd
import torch
from tqdm.contrib import tenumerate
from millipede import CountLikelihoodSampler, NormalLikelihoodSampler
from .containers import SimpleSampleContainer, StreamingSampleContainer
from .util import namespace_to_numpy
def populate_alpha_beta_stats(container, stats):
for s in ['h_alpha', 'h_beta', 'h']:
if hasattr(container, s):
stats['Mean ' + s] = getattr(container, s)
def populate_weight_stats(selector, stats, weights, quantiles=[5.0, 10.0, 20.0, 50.0, 90.0, 95.0]):
q5, q10, q20, q50, q90, q95 = np.percentile(weights, quantiles).tolist()
s = "5/10/20/50/90/95: {:.2e} {:.2e} {:.2e} {:.2e} {:.2e} {:.2e}"
stats['Weight quantiles'] = s.format(q5, q10, q20, q50, q90, q95)
s = "mean/std/min/max: {:.2e} {:.2e} {:.2e} {:.2e}"
stats['Weight moments'] = s.format(weights.mean().item(), weights.std().item(),
weights.min().item(), weights.max().item())
T, T_burnin = selector.T, selector.T_burnin
elapsed_time = time.time() - selector.ts[0]
stats['Elapsed MCMC time'] = "{:.1f} seconds".format(elapsed_time)
stats['Mean iteration time'] = "{:.3f} ms".format(1000.0 * elapsed_time / (T + T_burnin))
stats['Number of retained samples'] = T
stats['Number of burn-in samples'] = T_burnin
class BayesianVariableSelector(object):
"""
Base class for all Bayesian variable selection classes.
"""
def run(self, T=2000, T_burnin=1000, verbosity='bar', report_frequency=200, streaming=True, seed=None):
r"""
Run MCMC inference for :math:`T + T_{\rm burn-in}` iterations. After completion the results
of the MCMC run can be accessed in the `summary` and `stats` attributes. Additionally,
if `streaming == False` the `samples` attribute will contain raw samples from the MCMC algorithm.
The `summary` DataFrame contains five columns. The first column lists the Posterior Inclusion
Probability (PIP) for each covariate. The second column lists the posterior mean of the coefficient
that corresponds to each covariate. The third column lists the posterior standard deviation for
each coefficient. The fourth and fifth columns are analogous to the second and third columns,
respectively, with the difference that the fourth and fifth columns report conditional posterior
statistics. For example, the fourth column reports the posterior mean of each coefficient
conditioned on the corresponding covariate being included in the model.
:param int T: Positive integer that controls the number of MCMC samples that are
generated (i.e. after burn-in/adaptation). Defaults to 2000.
:param int T_burnin: Positive integer that controls the number of MCMC samples that are
generated during burn-in/adaptation. Defaults to 1000.
:param str verbosity: Controls the verbosity of the `run` method. If `stdout`, progress is reported via stdout.
If `bar`, then progress is reported via a progress bar. If `None`, then nothing is reported.
Defaults to `bar`.
:param int report_frequency: Controls the frequency with which progress is reported if the `verbosity`
argument is `stdout`. Defaults to 200, i.e. every 200 MCMC iterations.
:param bool streaming: If True, MCMC samples are not stored in memory and summary statistics are computed
online. Otherwise all `T` MCMC samples are stored in memory. Defaults to True. Only disable streaming if
you wish to do something with the samples in the `samples` attribute (and have sufficient memory available).
:param int seed: Random number seed for reproducibility. Defaults to None.
"""
if not isinstance(T, int) and T > 0:
raise ValueError("T must be a positive integer.")
if not isinstance(T_burnin, int) and T_burnin > 0:
raise ValueError("T_burnin must be a positive integer.")
self.T = T
self.T_burnin = T_burnin
if streaming:
self.container = StreamingSampleContainer()
else:
self.container = SimpleSampleContainer()
self.ts = [time.time()]
digits_to_print = str(1 + int(math.log(T + T_burnin + 1, 10)))
if verbosity == 'bar':
enumerate_samples = tenumerate(self.sampler.mcmc_chain(T=T, T_burnin=T_burnin, seed=seed),
total=T + T_burnin)
else:
enumerate_samples = enumerate(self.sampler.mcmc_chain(T=T, T_burnin=T_burnin, seed=seed))
for t, (burned, sample) in enumerate_samples:
self.ts.append(time.time())
if burned:
self.container(namespace_to_numpy(sample))
if verbosity == 'stdout' and (t % report_frequency == 0 or t == T + T_burnin - 1):
s = ("[Iteration {:0" + digits_to_print + "d}]").format(t)
s += "\t# of active features: {}".format(sample.gamma.sum().item())
if t >= report_frequency:
dt = 1000.0 * (self.ts[-1] - self.ts[-1 - report_frequency]) / report_frequency
s += " mean iteration time: {:.2f} ms".format(dt)
print(s)
if not streaming:
self.samples = self.container.samples
self.weights = self.samples.weight
else:
self.weights = np.array(self.container._weights)
class NormalLikelihoodVariableSelector(BayesianVariableSelector):
r"""
Bayesian variable selection for a linear model with a Normal likelihood.
The likelihood variance is controlled by an Inverse Gamma prior.
This class is appropriate for continuous-valued responses.
Usage::
selector = NormalLikelihoodVariableSelector(dataframe, 'response', ...)
selector.run(T=2000, T_burnin=1000)
print(selector.summary)
The details of the model used in :class:`NormalLikelihoodVariableSelector` are as follows.
The covariates :math:`X` and responses :math:`Y` are defined as follows:
.. math::
X \in \mathbb{R}^{N \times P} \qquad \qquad Y \in \mathbb{R}^{N}
and are provided by the user. The user should put some thought into whether the covariates :math:`X`
and responses :math:`Y` should be centered and/or normalized. This is generally a good idea for the responses
:math:`Y`, but whether pre-processing for :math:`X` is advisable depends on the nature of the dataset.
The inclusion of each covariate is governed by a Bernoulli random variable :math:`\gamma_p`.
In particular :math:`\gamma_p = 0` corresponds to exclusion and :math:`\gamma_p = 1` corresponds to inclusion.
The prior probability of inclusion is governed by :math:`h` or alternatively :math:`S`:
.. math::
h \in [0, 1] \qquad \rm{with} \qquad S \equiv hP
Alternatively, if :math:`h` is not known a priori we can put a prior on :math:`h`:
.. math::
h \sim {\rm Beta}(\alpha, \beta) \qquad \rm{with} \qquad \alpha > 0 \;\;\;\; \beta > 0
Putting this together, the model specification for an isotopric prior (with an intercept
:math:`\beta_0` included) is as follows:
.. math::
&\gamma_p \sim \rm{Bernoulli}(h) \qquad \rm{for} \qquad p=1,2,...,P
&\sigma^2 \sim \rm{InverseGamma}(\nu_0 / 2, \nu_0 \lambda_0 / 2)
&\beta_0 \sim \rm{Normal}(0, \sigma^2\tau_\rm{intercept}^{-1})
&\beta_\gamma \sim \rm{Normal}(0, \sigma^2 \tau^{-1} \mathbb{1}_\gamma)
&Y_n \sim \rm{Normal}(\beta_0 + X_{n, \gamma} \cdot \beta_\gamma, \sigma^2)
\qquad \rm{for} \qquad n=1,2,...,N
Note that the dimension of :math:`\beta_\gamma` depends on the number of covariates
included in a particular model (i.e. on the number of non-zero entries in :math:`\gamma`).
The hyperparameters :math:`\nu_0` and :math:`\lambda_0` govern the prior over
:math:`\sigma^2`. The default choice :math:`\nu_0=\lambda_0=0` corresponds to an
improper prior :math:`p(\sigma^2) \propto 1/\sigma^2`.
For a gprior the prior over the coefficients (including the intercept :math:`\beta_0` if it is included)
is instead specified as follows:
.. math::
\beta_{\gamma} \sim \rm{Normal}(0, c \sigma^2 (X_\gamma^{\rm{T}} X_\gamma)^{-1})
where :math:`c > 0` is a user-specified hyperparameter.
:param DataFrame dataframe: A `pandas.DataFrame` that contains covariates and responses. Each row
encodes a single data point. All columns apart from the response column (and the columns in `assumed_columns`
if there are any) are assumed to be covariates.
:param str response_column: The name of the column in `dataframe` that contains the continuous-valued responses.
:param list assumed_columns: A list of the names of the columns in `dataframe` that correspond to covariates that
are always assumed to be part of the model. Defaults to []. Note that these columns do not have PIPs,
as they are always included in the model.
:param S: Controls the expected number of covariates to include in the model a priori. Defaults to 5.0.
To specify covariate-level prior inclusion probabilities provide a `pandas.Series` with index that corresponds
to covariate columns in `dataframe` and that specifies covariate-level prior inclusion probabilities.
If a tuple of positive floats `(alpha, beta)` is provided, the a priori inclusion probability is a latent
variable governed by the corresponding Beta prior so that the sparsity level is inferred from the data.
Note that for a given choice of `alpha` and `beta` the expected number of covariates to include in the model
a priori is given by :math:`\frac{\alpha}{\alpha + \beta} \times P`. Also note that the mean number of
covariates in the posterior can vary significantly from prior expectations, since the posterior is in
effect a compromise between the prior and the observed data.
:param str prior: One of the two supported priors for the coefficients: 'isotropic' or 'gprior'.
Defaults to 'isotropic'.
:param bool include_intercept: Whether to include an intercept term. If included the intercept term is
is included in all models so that the corresponding coefficient does not have a PIP. Defaults to True.
:param float tau: Controls the precision of the coefficients in the isotropic prior. Defaults to 0.01.
:param float tau_intercept: Controls the precision of the intercept in the isotropic prior. Defaults to 1.0e-4.
:param float c: Controls the precision of the coefficients in the gprior. Defaults to 100.0.
:param float nu0: Controls the prior over the variance in the Normal likelihood. Defaults to 0.0.
:param float lambda0: Controls the prior over the variance in the Normal likelihood. Defaults to 0.0.
:param str precision: Whether computations should be done with 'single' (i.e. 32-bit) or 'double' (i.e. 64-bit)
floating point precision. Defaults to 'double'. Note that it may be ill-advised to use single precision.
:param str device: Whether computations should be done on CPU ('cpu') or GPU ('gpu'). Defaults to 'cpu'.
:param float explore: This hyperparameter controls how greedy the MCMC algorithm is. Defaults to 5.0.
For expert users only.
:param bool precompute_XX: Whether the covariance matrix :math:`X^{\rm T} X \in \mathbb{R}^{P \times P}`
should be pre-computed. Defaults to False. Note that setting this to True may result in out-of-memory errors
for sufficiently large covariate matrices :math:`X`.
However, if sufficient memory is available, setting precompute_XX to True should be faster.
:param float xi_target: This hyperparameter controls how frequently the MCMC algorithm makes :math:`h` updates
if :math:`h` is a latent variable. Defaults to 0.20. For expert users only.
"""
def __init__(self, dataframe, response_column,
assumed_columns=[],
S=5, prior="isotropic",
include_intercept=True,
tau=0.01, tau_intercept=1.0e-4,
c=100.0,
nu0=0.0, lambda0=0.0,
precision="double", device="cpu",
explore=5, precompute_XX=False,
xi_target=0.2):
if precision not in ['single', 'double']:
raise ValueError("precision must be one of `single` or `double`")
if device not in ['cpu', 'gpu']:
raise ValueError("device must be one of `cpu` or `gpu`")
if response_column not in dataframe.columns:
raise ValueError("response_column must be a valid column in the dataframe.")
if not isinstance(assumed_columns, list) or any([c not in dataframe.columns for c in assumed_columns]):
raise ValueError("assumed_columns must be a list of string names of columns in the dataframe.")
X, Y = dataframe.drop([response_column] + assumed_columns, axis=1), dataframe[response_column]
X_assumed = None if len(assumed_columns) == 0 else dataframe[assumed_columns]
self.X_columns = X.columns.tolist()
self.assumed_columns = assumed_columns
self.include_intercept = include_intercept
if precision == 'single':
X, Y = torch.from_numpy(X.values).float(), torch.from_numpy(Y.values).float()
X_assumed = None if X_assumed is None else torch.from_numpy(X_assumed.values).float()
elif precision == 'double':
X, Y = torch.from_numpy(X.values).double(), torch.from_numpy(Y.values).double()
X_assumed = None if X_assumed is None else torch.from_numpy(X_assumed.values).double()
if device == 'cpu':
X, Y = X.cpu(), Y.cpu()
X_assumed = None if X_assumed is None else X_assumed.cpu()
elif device == 'gpu':
X, Y = X.cuda(), Y.cuda()
X_assumed = None if X_assumed is None else X_assumed.cuda()
if isinstance(S, pd.Series):
if set(self.X_columns) != set(S.index):
raise ValueError("The index of S must match the named columns of dataframe.")
S = torch.from_numpy(S.loc[self.X_columns].values).type_as(X)
self.sampler = NormalLikelihoodSampler(X, Y, X_assumed=X_assumed, S=S, c=c, explore=explore,
precompute_XX=precompute_XX, prior=prior,
tau=tau, tau_intercept=tau_intercept,
compute_betas=True, nu0=nu0, lambda0=lambda0,
include_intercept=include_intercept,
verbose_constructor=False,
xi_target=xi_target)
def run(self, T=2000, T_burnin=1000, verbosity='bar', report_frequency=200, streaming=True, seed=None):
super().run(T=T, T_burnin=T_burnin, verbosity=verbosity, report_frequency=report_frequency,
streaming=streaming, seed=seed)
self.pip = pd.Series(self.container.pip, index=self.X_columns, name="PIP")
column_names = self.X_columns + self.assumed_columns
if self.include_intercept:
column_names += ['Intercept']
self.beta = pd.Series(self.container.beta, index=column_names, name="Coefficient")
self.beta_std = pd.Series(self.container.beta_std, index=column_names, name="Coefficient StdDev")
self.conditional_beta = pd.Series(self.container.conditional_beta, index=column_names,
name="Conditional Coefficient")
self.conditional_beta_std = pd.Series(self.container.conditional_beta_std, index=column_names,
name="Conditional Coefficient StdDev")
self.summary = pd.concat([self.pip, self.beta, self.beta_std,
self.conditional_beta, self.conditional_beta_std], axis=1)
self.stats = {}
populate_alpha_beta_stats(self.container, self.stats)
populate_weight_stats(self, self.stats, self.weights)
if verbosity == 'stdout':
for k, v in self.stats.items():
print('{}: '.format(k), v)
class BinomialLikelihoodVariableSelector(BayesianVariableSelector):
r"""
Bayesian variable selection for a generalized linear model with a Binomial likelihood
and a logistic link function. This class is appropriate for count-valued responses that are bounded.
Usage::
selector = BinomialLikelihoodVariableSelector(dataframe, 'response', 'total_count', ...)
selector.run(T=2000, T_burnin=1000)
print(selector.summary)
The details of the model used in :class:`BinomialLikelihoodVariableSelector` are as follows.
The covariates :math:`X`, responses :math:`Y`, and total counts :math:`T` are defined as:
.. math::
X \in \mathbb{R}^{N \times P} \qquad \qquad Y \in \mathbb{Z}_{\ge 0}^{N}
\qquad \qquad T \in \mathbb{Z}_{\ge 1}^{N}
and are provided by the user. The user should put some thought into whether :math:`X`
should be centered and/or normalized.
The inclusion of each covariate is governed by a Bernoulli random variable :math:`\gamma_p`.
In particular :math:`\gamma_p = 0` corresponds to exclusion and :math:`\gamma_p = 1` corresponds to inclusion.
The prior probability of inclusion is governed by :math:`h` or alternatively :math:`S`:
.. math::
h \in [0, 1] \qquad \rm{with} \qquad S \equiv hP
Alternatively, if :math:`h` is not known a priori we can put a prior on :math:`h`:
.. math::
h \sim {\rm Beta}(\alpha, \beta) \qquad \rm{with} \qquad \alpha > 0 \;\;\;\; \beta > 0
The rest of the model is specified as:
.. math::
&\gamma_p \sim \rm{Bernoulli}(h) \qquad \rm{for} \qquad p=1,2,...,P
&\beta_0 \sim \rm{Normal}(0, \tau_\rm{intercept}^{-1})
&\beta_\gamma \sim \rm{Normal}(0, \tau^{-1} \mathbb{1}_\gamma)
&Y_n \sim \rm{Binomial}(T_n, \sigma(\beta_0 + X_{n, \gamma} \cdot \beta_\gamma))
\qquad \rm{for} \qquad n=1,2,...,N
where :math:`\sigma(\cdot)` is the logistic or sigmoid function and :math:`T_n` denotes the
:math:`N`-dimensional vector of total counts. That is each Binomial likelihood is equivalent
to :math:`T_n` corresponding Bernoulli likelihoods.
Note that the dimension of :math:`\beta_\gamma` depends on the number of covariates
included in a particular model (i.e. on the number of non-zero entries in :math:`\gamma`).
The intercept :math:`\beta_0` is always included in the model.
:param DataFrame dataframe: A `pandas.DataFrame` that contains covariates and responses. Each row
encodes a single data point. All columns apart from the response and total count column (and the columns
in `assumed_columns` if there are any) are assumed to be covariates.
:param str response_column: The name of the column in `dataframe` that contains the count-valued responses.
:param str total_count_column: The name of the column in `dataframe` that contains the total count
for each data point.
:param list assumed_columns: A list of the names of the columns in `dataframe` that correspond to covariates that
are always assumed to be part of the model. Defaults to []. Note that these columns do not have PIPs,
as they are always included in the model.
:param S: Controls the expected number of covariates to include in the model a priori. Defaults to 5.0.
To specify covariate-level prior inclusion probabilities provide a `pandas.Series` with index that corresponds
to covariate columns in `dataframe` and that specifies covariate-level prior inclusion probabilities.
If a tuple of positive floats `(alpha, beta)` is provided, the a priori inclusion probability is a latent
variable governed by the corresponding Beta prior so that the sparsity level is inferred from the data.
Note that for a given choice of `alpha` and `beta` the expected number of covariates to include in the model
a priori is given by :math:`\frac{\alpha}{\alpha + \beta} \times P`. Also note that the mean number of
covariates in the posterior can vary significantly from prior expectations, since the posterior is in
effect a compromise between the prior and the observed data.
:param float tau: Controls the precision of the coefficients in the isotropic prior. Defaults to 0.01.
:param float tau_intercept: Controls the precision of the intercept in the isotropic prior. Defaults to 1.0e-4.
:param str precision: Whether computations should be done with 'single' (i.e. 32-bit) or 'double' (i.e. 64-bit)
floating point precision. Defaults to 'double'. Note that it may be ill-advised to use single precision.
:param str device: Whether computations should be done on CPU ('cpu') or GPU ('gpu'). Defaults to 'cpu'.
:param float explore: This hyperparameter controls how greedy the MCMC algorithm is. Defaults to 5.0.
For expert users only.
:param float xi_target: This hyperparameter controls how frequently the MCMC algorithm makes Polya-Gamma updates
or :math:`h` updates if the latter is a latent variable. Defaults to 0.25. For expert users only.
"""
def __init__(self, dataframe, response_column, total_count_column, assumed_columns=[],
S=5, tau=0.01, tau_intercept=1.0e-4,
precision="double", device="cpu",
explore=5, xi_target=0.25):
if precision not in ['single', 'double']:
raise ValueError("precision must be one of `single` or `double`")
if device not in ['cpu', 'gpu']:
raise ValueError("device must be one of `cpu` or `gpu`")
if response_column not in dataframe.columns:
raise ValueError("response_column must be a valid column in the dataframe.")
if total_count_column not in dataframe.columns:
raise ValueError("total_count_column must be a valid column in the dataframe.")
if not isinstance(assumed_columns, list) or any([c not in dataframe.columns for c in assumed_columns]):
raise ValueError("assumed_columns must be a list of string names of columns in the dataframe.")
X = dataframe.drop([response_column, total_count_column] + assumed_columns, axis=1)
Y = dataframe[response_column]
TC = dataframe[total_count_column]
X_assumed = None if len(assumed_columns) == 0 else dataframe[assumed_columns]
self.X_columns = X.columns.tolist()
self.assumed_columns = assumed_columns
if precision == 'single':
X, Y = torch.from_numpy(X.values).float(), torch.from_numpy(Y.values).float()
TC = torch.from_numpy(TC.values).float()
X_assumed = None if X_assumed is None else torch.from_numpy(X_assumed.values).float()
elif precision == 'double':
X, Y = torch.from_numpy(X.values).double(), torch.from_numpy(Y.values).double()
TC = torch.from_numpy(TC.values).double()
X_assumed = None if X_assumed is None else torch.from_numpy(X_assumed.values).double()
if device == 'cpu':
X, Y, TC = X.cpu(), Y.cpu(), TC.cpu()
X_assumed = None if X_assumed is None else X_assumed.cpu()
elif device == 'gpu':
X, Y, TC = X.cuda(), Y.cuda(), TC.cuda()
X_assumed = None if X_assumed is None else X_assumed.cuda()
if isinstance(S, pd.Series):
if set(self.X_columns) != set(S.index):
raise ValueError("The index of S must match the named columns of dataframe.")
S = torch.from_numpy(S.loc[self.X_columns].values).type_as(X)
self.sampler = CountLikelihoodSampler(X, Y, TC=TC, S=S, X_assumed=X_assumed, explore=explore,
tau=tau, tau_intercept=tau_intercept,
xi_target=xi_target,
verbose_constructor=False)
def run(self, T=2000, T_burnin=1000, verbosity='bar', report_frequency=100, streaming=True, seed=None):
super().run(T=T, T_burnin=T_burnin, verbosity=verbosity,
report_frequency=report_frequency, streaming=streaming, seed=seed)
self.pip = pd.Series(self.container.pip, index=self.X_columns, name="PIP")
column_names = self.X_columns + self.assumed_columns + ['Intercept']
self.beta = | pd.Series(self.container.beta, index=column_names, name="Coefficient") | pandas.Series |
from aif360.metrics.mdss.ScoringFunctions.ScoringFunction import ScoringFunction
from aif360.metrics.mdss.generator import get_entire_subset, get_random_subset
import pandas as pd
import numpy as np
class MDSS(object):
def __init__(self, scoring_function: ScoringFunction):
self.scoring_function = scoring_function
def get_aggregates(self, coordinates: pd.DataFrame, outcomes: pd.Series, probs: pd.Series,
current_subset: dict, column_name: str, penalty: float):
"""
Conditioned on the current subsets of values for all other attributes,
compute the summed outcome (observed_sum = \sum_i y_i) and all probabilities p_i
for each value of the current attribute.
Also use additive linear-time subset scanning to compute the set of distinct thresholds
for which different subsets of attribute values have positive scores. Note that the number
of such thresholds will be linear rather than exponential in the arity of the attribute.
:param coordinates: data frame containing having as columns the covariates/features
:param probs: data series containing the probabilities/expected outcomes
:param outcomes: data series containing the outcomes/observed outcomes
:param current_subset: current subset to compute aggregates
:param column_name: attribute name to scan over
:param penalty: penalty coefficient
:return: dictionary of aggregates, sorted thresholds (roots), observed sum of the subset, array of observed
probabilities
"""
# compute the subset of records matching the current subgroup along all other dimensions
# temp_df includes the covariates x_i, outcome y_i, and predicted probability p_i for each matching record
if current_subset:
to_choose = coordinates[current_subset.keys()].isin(current_subset).all(axis=1)
temp_df = pd.concat([coordinates.loc[to_choose], outcomes[to_choose], probs[to_choose]], axis=1)
else:
temp_df = pd.concat([coordinates, outcomes, probs], axis=1)
# these wil be used to keep track of the aggregate values and the distinct thresholds to be considered
aggregates = {}
thresholds = set()
scoring_function = self.scoring_function
# consider each distinct value of the given attribute (column_name)
for name, group in temp_df.groupby(column_name):
# compute the sum of outcomes \sum_i y_i
observed_sum = group.iloc[:, -2].sum()
# all probabilities p_i
probs = group.iloc[:, -1].values
# compute q_min and q_max for the attribute value
exist, q_mle, q_min, q_max = scoring_function.compute_qs(observed_sum, probs, penalty)
# Add to aggregates, and add q_min and q_max to thresholds.
# Note that thresholds is a set so duplicates will be removed automatically.
if exist:
aggregates[name] = {
'q_mle': q_mle,
'q_min': q_min,
'q_max': q_max,
'observed_sum': observed_sum,
'probs': probs
}
thresholds.update([q_min, q_max])
# We also keep track of the summed outcomes \sum_i y_i and the probabilities p_i for the case where _
# all_ values of that attribute are considered (regardless of whether they contribute positively to score).
# This is necessary because of the way we compute the penalty term: including all attribute values, equivalent
# to ignoring the attribute, has the lowest penalty (of 0) and thus we need to score that subset as well.
all_observed_sum = temp_df.iloc[:, -2].sum()
all_probs = temp_df.iloc[:, -1].values
return [aggregates, sorted(thresholds), all_observed_sum, all_probs]
def choose_aggregates(self, aggregates: dict, thresholds: list, penalty: float, all_observed_sum: float,
all_probs: list):
"""
Having previously computed the aggregates and the distinct q thresholds
to consider in the get_aggregates function,we are now ready to choose the best
subset of attribute values for the given attribute.
For each range defined by these thresholds, we will choose all of the positive contributions,
compute the MLE value of q, and the corresponding score.
We then pick the best q and score over all of the ranges considered.
:param aggregates: dictionary of aggregates. For each feature value, it has q_mle, q_min, q_max, observed_sum,
and the probabilities
:param thresholds: sorted thresholds (roots)
:param penalty: penalty coefficient
:param all_observed_sum: sum of observed binary outcomes for all i
:param all_probs: data series containing all the probabilities/expected outcomes
:return:
"""
# initialize
best_score = 0
best_names = []
scoring_function = self.scoring_function
# for each threshold
for i in range(len(thresholds) - 1):
threshold = (thresholds[i] + thresholds[i + 1]) / 2
observed_sum = 0.0
probs = []
names = []
# keep only the aggregates which have a positive contribution to the score in that q range
# we must keep track of the sum of outcome values as well as all predicted probabilities
for key, value in aggregates.items():
if (value['q_min'] < threshold) & (value['q_max'] > threshold):
names.append(key)
observed_sum += value['observed_sum']
probs = probs + value['probs'].tolist()
if len(probs) == 0:
continue
# compute the MLE value of q, making sure to only consider the desired direction (positive or negative)
probs = np.asarray(probs)
current_q_mle = scoring_function.qmle(observed_sum, probs)
# Compute the score for the given subset at the MLE value of q.
# Notice that each included value gets a penalty, so the total penalty
# is multiplied by the number of included values.
current_interval_score = scoring_function.score(observed_sum, probs, penalty * len(names), current_q_mle)
# keep track of the best score, best q, and best subset of attribute values found so far
if current_interval_score > best_score:
best_score = current_interval_score
best_names = names
# Now we also have to consider the case of including all attribute values,
# including those that never make positive contributions to the score.
# Note that the penalty term is 0 in this case. (We are neglecting penalties
# from all other attributes, just considering the current attribute.)
# compute the MLE value of q, making sure to only consider the desired direction (positive or negative)
current_q_mle = scoring_function.qmle(all_observed_sum, all_probs)
# Compute the score for the given subset at the MLE value of q.
# Again, the penalty (for that attribute) is 0 when all attribute values are included.
current_score = scoring_function.score(all_observed_sum, all_probs, 0, current_q_mle)
# Keep track of the best score, best q, and best subset of attribute values found.
# Note that if the best subset contains all values of the given attribute,
# we return an empty list for best_names.
if current_score > best_score:
best_score = current_score
best_names = []
return [best_names, best_score]
def score_current_subset(self, coordinates: pd.DataFrame, probs: pd.Series, outcomes: pd.Series,
current_subset: dict, penalty: float):
"""
Just scores the subset without performing ALTSS.
We still need to determine the MLE value of q.
:param coordinates: data frame containing having as columns the covariates/features
:param probs: data series containing the probabilities/expected outcomes
:param outcomes: data series containing the outcomes/observed outcomes
:param current_subset: current subset to be scored
:param penalty: penalty coefficient
:return: penalized score of subset
"""
# compute the subset of records matching the current subgroup along all dimensions
# temp_df includes the covariates x_i, outcome y_i, and predicted probability p_i for each matching record
if current_subset:
to_choose = coordinates[current_subset.keys()].isin(current_subset).all(axis=1)
temp_df = | pd.concat([coordinates.loc[to_choose], outcomes[to_choose], probs[to_choose]], axis=1) | pandas.concat |
#!/usr/bin/env python
# coding: utf-8
# # The Claremont Colleges' Semester Start Timeline vs Los Angeles County COVID-19 Trends
#
# ## Semester Start Dates
# * **Fall 2020** - 24 August 2020
# * **Spring 2021** - 25 January 2021
#
# <!--## Last Update
# Tuesday, 3 November 2020 -->
#
# ## Data Sources
# * California Department of Public Health
# * [COVID-19 Cases](https://data.ca.gov/dataset/covid-19-cases/resource/926fd08f-cc91-4828-af38-bd45de97f8c3?filters=county%3ALos+Angeles)
# * [COVID-19 Hospital Data](https://data.ca.gov/dataset/covid-19-hospital-data/resource/42d33765-20fd-44b8-a978-b083b7542225?filters=county%3ALos+Angeles)
# In[ ]:
import locale
import matplotlib.pyplot as plt
from matplotlib.ticker import FuncFormatter
import numpy as np
import pandas as pd
import requests
import seaborn as sns
locale.setlocale(locale.LC_ALL, 'en_US.UTF-8')
plt.rcParams.update({'figure.autolayout': True})
sns.set()
CA_CASES_URL = 'https://data.ca.gov/dataset/590188d5-8545-4c93-a9a0-e230f0db7290/resource/926fd08f-cc91-4828-af38-bd45de97f8c3/download/statewide_cases.csv'
CA_CASES_CSV = 'ca_cases.csv'
CA_HOSPITALIZED_URL = 'https://data.ca.gov/dataset/529ac907-6ba1-4cb7-9aae-8966fc96aeef/resource/42d33765-20fd-44b8-a978-b083b7542225/download/hospitals_by_county.csv'
CA_HOSPITALIZED_CSV = 'ca_hospitalized.csv'
COUNTY = 'county'
DATE = 'date'
NEW_CASES = 'newcountconfirmed'
LOS_ANGELES = 'Los Angeles'
NEW_CASES_AVG = 'New Cases, 14 day average'
HOSPITALIZED_CONFIRMED_AVG = 'Hospitalized - Confrimed, 3 day average'
HOSPITALIZED_ALL_AVG = 'Hospitalized - Confirmed and Suspected, 3 day average'
SEMESTER = 'Semester'
DAYS_UNTIL_SEMESTER = 'Days Until Semester Start'
CASE_ROLLING_WINDOW = 14
NEW_CASES_AVG = 'New Cases, {} day average'.format(CASE_ROLLING_WINDOW)
FALL_2020 = 'Fall 2020'
FALL_2020_START = pd.Timestamp('2020-08-24')
FALL_2020_COLOR = sns.color_palette()[0]
SPRING_2021 = 'Spring 2021'
SPRING_2021_START = pd.Timestamp('2021-01-25')
SPRING_2021_COLOR = sns.color_palette()[1]
X_AXIS_LABEL = 'Date (Fall 2020 timeline, Spring 2021 timeline)'
def fetch_ca_dataset(url, output_csv):
r = requests.get(url)
if r.status_code == 200:
with open(output_csv, 'w') as f:
f.write(r.text)
else:
raise ConnectionError('HTTP code not 200')
def days_until_start(row: pd.Series) -> int:
if row[SEMESTER] == FALL_2020:
return (FALL_2020_START - row[DATE]).days
elif row[SEMESTER] == SPRING_2021:
return (SPRING_2021_START - row[DATE]).days
def date_axis_text(x, pos):
td = pd.Timedelta(x, 'days')
fall_equiv, spring_equiv = [
(semester-td).strftime('%b %d') for semester in (FALL_2020_START, SPRING_2021_START)]
return ('{}\n{}'.format(fall_equiv, spring_equiv))
def chart_upper_bound(dep_var_series, tick_step, buffer):
ticks_needed = (dep_var_series.max() + tick_step) // tick_step
return int(tick_step * ticks_needed + buffer)
def chart_lower_bound(upper_bound, ratio, top_value):
return (ratio * upper_bound - top_value) / (ratio - 1)
# In[ ]:
fetch_ca_dataset(CA_CASES_URL, CA_CASES_CSV)
fetch_ca_dataset(CA_HOSPITALIZED_URL, CA_HOSPITALIZED_CSV)
# In[ ]:
df_cases = pd.read_csv(CA_CASES_CSV)
la_cases = df_cases.loc[
df_cases[COUNTY]==LOS_ANGELES
].drop(columns=COUNTY).reset_index(drop=True).copy()
la_cases[DATE] = pd.to_datetime(la_cases[DATE])
# Forward fill new cases for negative new cases day.
la_cases.loc[198, NEW_CASES] = pd.NA
la_cases[NEW_CASES].ffill(inplace=True)
la_cases[NEW_CASES_AVG] = la_cases.loc[:, NEW_CASES].rolling(CASE_ROLLING_WINDOW).mean()
df_hospitalized = pd.read_csv(CA_HOSPITALIZED_CSV).rename(columns={'todays_date': DATE})
la_hospitalized = df_hospitalized.loc[
df_hospitalized[COUNTY]==LOS_ANGELES].drop(columns=COUNTY).reset_index(drop=True)
la_hospitalized.loc[:, DATE] = pd.to_datetime(la_hospitalized.loc[:, DATE])
daily_average = (
('hospitalized_covid_confirmed_patients', HOSPITALIZED_CONFIRMED_AVG),
('hospitalized_covid_patients', HOSPITALIZED_ALL_AVG),
)
for col_day, col_avg in daily_average:
la_hospitalized[col_avg] = la_hospitalized[col_day].rolling(3).mean().round(1)
df_la = pd.merge(la_cases, la_hospitalized, on=DATE).reset_index(drop=True)
df_la[SEMESTER] = df_la.loc[:, DATE].apply(
lambda x: FALL_2020 if x <= FALL_2020_START else SPRING_2021)
df_la[DAYS_UNTIL_SEMESTER] = df_la.apply(days_until_start, 'columns')
df_la = df_la.loc[:, (DATE, SEMESTER, DAYS_UNTIL_SEMESTER,
NEW_CASES_AVG, HOSPITALIZED_CONFIRMED_AVG, HOSPITALIZED_ALL_AVG)]
# In[ ]:
fig, ax = plt.subplots(figsize=(8, 5.5), dpi=300)
rate_multiplier = (10_257_557 / 1e5) / 0.500
substantial_rate, moderate_rate = [rate_multiplier * x for x in (7, 4)]
widespread_color = '#802f67'
substantial_color = '#c43d53'
moderate_color = '#d97641'
widespread_message = 'Closed for in-person lectures'
substantial_message, moderate_message = [
'Lecture capacity limited to {}%'.format(x) for x in (25, 50)]
vertical_pad = 100
horizontal_pad = 5
alpha = 0.75
ax.text(horizontal_pad, substantial_rate+vertical_pad, widespread_message,
ha='right', color=widespread_color, alpha=alpha)
ax.axhline(substantial_rate, color=substantial_color, linestyle='dashed', alpha=alpha)
ax.text(horizontal_pad, substantial_rate-vertical_pad, substantial_message,
ha='right', va='top', color=substantial_color, alpha=alpha)
# ax.axhline(moderate_rate, color=moderate_color, linestyle='dashed', alpha=alpha)
# ax.text(horizontal_pad, moderate_rate-vertical_pad, moderate_message,
# ha='right', va='top', color=moderate_color, alpha=alpha)
ax.set_title('Los Angeles County COVID-19 Transmission before TCC Semester')
sns.lineplot(x=DAYS_UNTIL_SEMESTER, y=NEW_CASES_AVG, hue=SEMESTER, data=df_la, ax=ax)
tick_step = 1500
y_max = chart_upper_bound(df_la[NEW_CASES_AVG], tick_step, 200)
ax.set_yticks(list(range(0, y_max, tick_step)))
ax.set_yticklabels([f'{int(x):n}' if x%3_000==0 else '' for x in ax.get_yticks()])
ax.set_xlabel(X_AXIS_LABEL)
ax.set_ylabel(NEW_CASES_AVG)
ax.set_xlim(120, 0)
ax.xaxis.set_major_formatter(FuncFormatter(date_axis_text))
# ax.set_ylim(moderate_rate-vertical_pad-250, df_la[NEW_CASES_AVG].max()+100)
ax.set_ylim(0, y_max)
ax.legend(loc='upper left', title=SEMESTER)
fig.savefig('docs/semester-start-v-new-cases.png')
fig.show()
# In[ ]:
fig, ax = plt.subplots(figsize=(8, 5), dpi=300)
ax.plot(DAYS_UNTIL_SEMESTER, HOSPITALIZED_ALL_AVG, 'b--', label='Fall 2020, Confirmed & Suspected',
data=df_la[df_la[SEMESTER] == FALL_2020])
ax.plot(DAYS_UNTIL_SEMESTER, HOSPITALIZED_CONFIRMED_AVG, 'b-', label='Fall 2020, Confirmed',
data=df_la[df_la[SEMESTER] == FALL_2020])
ax.plot(DAYS_UNTIL_SEMESTER, HOSPITALIZED_ALL_AVG, '--', color=sns.color_palette()[1],
label='Spring 2021, Confirmed & Suspected', data=df_la[df_la[SEMESTER] == SPRING_2021])
ax.plot(DAYS_UNTIL_SEMESTER, HOSPITALIZED_CONFIRMED_AVG, color=sns.color_palette()[1], label='Spring 2021, Confirmed',
data=df_la[df_la[SEMESTER] == SPRING_2021])
tick_step = 1000
y_max = chart_upper_bound(df_la[HOSPITALIZED_ALL_AVG], tick_step, 200)
ax.set_yticks(list(range(0, y_max, tick_step)))
ax.set_yticklabels([f'{int(x):n}' if x%2_000==0 else '' for x in ax.get_yticks()])
ax.set_xlabel(X_AXIS_LABEL)
ax.xaxis.set_major_formatter(FuncFormatter(date_axis_text))
ax.set_ylabel('Hospitalized, 3 day avgerage')
ax.set_title('Los Angeles County COVID-19 Hospital Patients before TCC Semester')
ax.set_xlim(120, 0)
legend_top = 600
# ax.axhline(legend_top, color='k')
ax.set_ylim(chart_lower_bound(y_max, .2, legend_top), y_max)
ax.legend(title='Semester, Patient COVID-19 Diagnosis', loc='lower right',
ncol=2, fontsize='small', title_fontsize='small')
fig.savefig('docs/semester-start-v-hospitalized.png')
fig.show()
# In[ ]:
LACDPH_CSV = 'lacdph.csv'
r = requests.get('https://github.com/amhirsch/lac_covid19/raw/master/docs/time-series/aggregate-ts.csv')
if r.status_code == 200:
with open(LACDPH_CSV, 'w') as f:
f.write(r.text)
else:
raise ConnectionError('LACDPH Time Series Unavailable')
# In[ ]:
df_lacdph = pd.read_csv(LACDPH_CSV)
df_lacdph[DATE] = pd.to_datetime(df_lacdph['Date'])
df_lacdph = df_lacdph.loc[df_lacdph[DATE]>=pd.to_datetime('2020-12-01'),
[DATE, 'New cases']].copy().reset_index(drop=True)
df_lacdph[NEW_CASES_AVG] = df_lacdph['New cases'].rolling(14).mean()
# In[ ]:
reopening_threshold = 10 / 100_000 * 10_260_237
fig, ax = plt.subplots(figsize=(8, 5), dpi=300)
sns.lineplot(x=DATE, y=NEW_CASES_AVG, data=df_lacdph, ax=ax)
ax.axhline(reopening_threshold, linestyle='dashed', color='k')
ax.text(pd.to_datetime('2021-01-02'), reopening_threshold+300, 'LACDPH Reopening Waivers')
ax.set_xlim(pd.to_datetime('2021-01-01'), | pd.to_datetime('2021-03-12') | pandas.to_datetime |
# https://www.cnblogs.com/mtcnn/p/9411597.html
# https://machinelearningmastery.com/convert-time-series-supervised-learning-problem-python/
import os
import glob
from sklearn.preprocessing import MinMaxScaler
import sys
import seaborn as sns
import pandas as pd
import numpy as np
# from tensorflow import keras
from tensorflow.keras.models import Sequential
from tensorflow.keras.layers import Dense, LSTM, Dropout
# from keras.models import Sequential
# from keras.layers import Dense, LSTM, Dropout
import matplotlib.pyplot as plt
# % matplotlib inline # only valid in jupyter
columns = ['YEAR', 'MONTH', 'DAY', 'TEMP_HIG',
'TEMP_COL', 'AVG_TEMP', 'AVG_WET', 'DATA_COL']
data = pd.read_csv(
'/Users/johnsaxon/test/github.com/learn-tensorflow/data/industry_timeseries/timeseries_train_data/1.csv', names=columns)
# print(data.head())
# print(data.shape)
plt.figure(figsize=(24, 8))
for i in range(8):
plt.subplot(8, 1, i+1)
plt.plot(data.values[:, i])
plt.title(columns[i], y=0.5, loc='right')
# plt.show()
def series_to_supervised(data, n_in=1, n_out=1, dropnan=True):
n_vars = 1 if type(data) is list else data.shape[1]
df = pd.DataFrame(data)
cols, names = list(), list()
# input sequence (t-n, ... t-1)
for i in range(n_in, 0, -1):
cols.append(df.shift(1))
names += [('var%d(t-%d)' % (j+1, i)) for j in range(n_vars)]
# print(cols.head())
# forecast sequence (t, t+1, ... t+n)
for i in range(0, n_out):
cols.append(df.shift(-i))
if i == 0:
names += [('var%d(t)' % (j+1)) for j in range(n_vars)]
else:
names += [('var%d(t+%d)' % (j+1, i)) for j in range(n_vars)]
# print(cols.head())
# put it all together
agg = | pd.concat(cols, axis=1) | pandas.concat |
import yaml
import pandas as pd
import numpy as np
from os.path import join
from os import makedirs
import glob
import sys
import re
def parse_samplesheet(fp_samplesheet):
#print(fp_samplesheet.split('/')[-1])
# in a first iteration, open the file, read line by line and determine start
# of sample information by looking for a line starting with "[Data]".
# the following lines will be sample information, about lines are header infos.
row_sampleinformation = None
row_reads = None
with open(fp_samplesheet, "r") as f:
for linenumber, line in enumerate(f.readlines()):
if line.startswith("[Data]"):
row_sampleinformation = linenumber+1
elif line.startswith("[Reads]"):
row_reads = linenumber+1
if row_sampleinformation is None:
raise ValueError("Could not find [Data] line in file '%s'." % fp_samplesheet)
if row_reads is None:
raise ValueError("Could not find [Reads] line in file '%s'." % fp_samplesheet)
header = pd.read_csv(fp_samplesheet, sep=",", nrows=row_reads-2, index_col=0).dropna(axis=1, how="all").dropna(axis=0, how="all")
#header = header.set_index(header.columns[0])
header.index = list(map(lambda x: 'header_%s' % x, header.index))
header = header.dropna(axis=0, how="any")
header = header.T.reset_index()
del header['index']
# a xxx iteration parses sample information via pandas
ss = pd.read_csv(fp_samplesheet, sep=",", skiprows=row_sampleinformation, dtype={'Sample_Name': str, 'Sample_ID': str, 'spike_entity_id': str})
# bcl2fasta automatically changes - into _ char in output filenames
idx_rawilluminainput = ss[pd.notnull(ss['Lane'])].index
for f in ['Sample_ID', 'Sample_Name', 'Sample_Project']:
ss.loc[idx_rawilluminainput, f] = ss.loc[idx_rawilluminainput, f].apply(lambda x: x.replace('-', '_') if type(x) != float else x)
# bcl2fastq uses a S%03i index to address samples.
# They are numbered as occuring in the samplesheet order starting with 1.
# However, number is not increased if Sample_ID was already seen.
uidx = dict()
for _, sample_id in ss['Sample_ID'].iteritems():
if sample_id not in uidx:
uidx[sample_id] = len(uidx) + 1
ss['s-idx'] = ss['Sample_ID'].apply(lambda x: uidx[x])
ss['run'] = fp_samplesheet.split('/')[-1].replace('_spike.csv', '')
# TODO: ensure that sample names do not clash when not considering s-idx!
# fastq-prefix
fp_fastqs = []
for idx, row in ss.iterrows():
fp_fastq = ''
if pd.notnull(row['Sample_Project']):
fp_fastq = row['Sample_Project']
if pd.notnull(row['Sample_Name']):
fp_fastq = join(fp_fastq, row['Sample_ID'])
fp_fastqs.append(join(fp_fastq,
'%s' % (
row['Sample_Name'] if pd.notnull(
row['Sample_Name']) else row['Sample_ID'])))
ss['fastq-prefix'] = fp_fastqs
# remove samples that are marked to be ignored
if 'spike_ignore_sample' in ss.columns:
ss = ss[pd.isnull(ss['spike_ignore_sample'])]
if 'spike_notes' not in ss.columns:
ss['spike_notes'] = None
# merge with header information
if not all([c not in ss.columns for c in header.columns]):
raise ValueError("Header name conflicts with sample column in '%s'." % fp_samplesheet)
for c in header.columns:
ss[c] = header[c].iloc[0]
return ss
def validate_samplesheet(ss: pd.DataFrame, config, line_offset: int=22, err=sys.stderr):
"""Checks if sample sheet is valid.
Parameters
----------
ss : pd.DataFrame
Samplesheet to be validated.
config : dict from YAML
Snakemake configuration file holding information about projects.
line_offset : int
Default: 22.
To give user information about problematic lines, we need to go back
to the file (not the DataFrame) to address the correct line.
err : IO.stream
Default: sys.stderr
Stream onto which warnings are written.
Returns
-------
[str] : List of warnings
Raises
------
ValueError if errors are found in the sample sheet.
"""
errors = []
warnings = []
# ensure all needed columns are in the table
exp_columns = {'Lane', 'Sample_ID', 'Sample_Name', 'I7_Index_ID', 'index',
'Sample_Project', 'spike_entity_id', 'spike_entity_role'}
if len(exp_columns - set(ss.columns)) > 0:
errors.append(
'Samplesheet is missing column(s): "%s".' %
'", "'.join(sorted(exp_columns - set(ss.columns))))
# ensure to only use [A-z0-9_] in identifiers
allowedChars = re.compile("^[A-z0-9_]*$")
for field in ['Sample_ID', 'Sample_Name', 'Sample_Project',
'spike_entity_id', 'spike_entity_role']:
if field in ss:
for idx, x in ss[field].iteritems():
if pd.notnull(x):
if allowedChars.fullmatch(x) is None:
errors.append(
('%s in line %i contains a restricted char'
'acter: "%s". Only a-z A-Z 0-9 and _ are al'
'lowed!') % (field, line_offset+idx, x))
# ensure Sample_Project is not empty
if 'Sample_Project' in ss:
for idx, x in ss['Sample_Project'].iteritems():
if pd.isnull(x) or x.strip() == "":
errors.append('Line %i has an empty Sample_Project.' %
(line_offset+idx))
if len(errors) > 0:
raise ValueError('The following %i errors(s) were found in your sample sheet:\n%s\n' % (len(errors), '\n'.join(['ERROR %i: %s' % (i+1, error) for i, error in enumerate(errors)])))
# check that sample project is describes in config.yaml
for prj in ss['Sample_Project'].unique():
if prj not in config['projects']:
warnings.append(('Sample_Project "%s" is not described in config.'
'yaml. No processing other than demultiplexing w'
'ill be applied.') % (prj))
# check that spike_entity_role is a defined one
exp_roles = { 'patient', 'father', 'mother', 'sibling', 'healthy',
'tumor', 'tumor_patient', 'tumor_father', 'tumor_mother', 'tumor_sibling'}
for idx, row in ss.iterrows():
if pd.notnull(row['spike_entity_role']):
if row['spike_entity_role'] not in exp_roles:
warnings.append('spike_entity_role "%s" in line %i for Sample_Project "%s" is unknown!' % (row['spike_entity_role'], line_offset+idx, row['Sample_Project']))
# test that entity name is infix of sample name
for idx, row in ss.iterrows():
if pd.notnull(row['spike_entity_id']):
if row['spike_entity_id'] not in row['Sample_ID']:
warnings.append('spike_entity_id "%s" is not part of the Sample_ID "%s" in line %i.' % (row['spike_entity_id'], row['Sample_ID'], line_offset+idx))
# check assumptions about naming schemas per project
exp_names = {'Keimbahn': re.compile("^KB\d{4}"),
'Alps': re.compile("^ALPS")}
for idx, row in ss.iterrows():
if row['Sample_Project'] in exp_names:
if exp_names[row['Sample_Project']].match(row['Sample_ID']) is None:
warnings.append('Sample_ID "%s" does not follow expected naming schema "%s" in line %i.' % (row['Sample_ID'], exp_names[row['Sample_Project']].pattern, line_offset+idx))
# check assumptions about name suffices
exp_suffices = {'Keimbahn': {'patient': {'_c'},
'father': {'_f'},
'mother': {'_m'}},
'Alps': {'patient': {''},
'father': {'_a', 'a'},
'mother': {'_b', 'b'}},
'Maus_Hauer': {'healthy': {'_c', 'c', '_n', 'n'},
'tumor': {'_t', 't'}}}
for idx, row in ss.iterrows():
if pd.isnull(row['spike_entity_id']):
continue
suffix = row['Sample_ID'][len(row['spike_entity_id']):].lower()
if row['Sample_Project'] in exp_suffices:
for role in exp_suffices[row['Sample_Project']].keys():
if (row['spike_entity_role'] == role) and (suffix not in exp_suffices[row['Sample_Project']][role]):
warnings.append('Sample_ID "%s" does not match expected spike_entity_role "%s" for Sample_Project "%s" in line %i.' % (row['Sample_ID'], row['spike_entity_role'], row['Sample_Project'], line_offset+idx))
# check assumptions about barcodes used by specific wet lab members:
exp_barcodes = {
'Keimbahn': {
'A01': 'ATGCCTAA',
'B01': 'GAATCTGA',
'C01': 'AACGTGAT',
'D01': 'CACTTCGA',
'E01': 'GCCAAGAC',
'F01': 'GACTAGTA',
'G01': 'ATTGGCTC',
'H01': 'GATGAATC'},
'Alps': {
'A01': 'ATGCCTAA',
'B01': 'GAATCTGA',
'C01': 'AACGTGAT',
'D01': 'CACTTCGA',
'E01': 'GCCAAGAC',
'F01': 'GACTAGTA',
'G01': 'ATTGGCTC',
'H01': 'GATGAATC'},
'Maus_Hauer': {
'A02': 'AGCAGGAA',
'B02': 'GAGCTGAA',
'C02': 'AAACATCG',
'D02': 'GAGTTAGC',
'E02': 'CGAACTTA',
'F02': 'GATAGACA',
'G02': 'AAGGACAC',
'H02': 'GACAGTGC'},
}
for idx, row in ss.iterrows():
if row['Sample_Project'] in exp_barcodes:
if row['I7_Index_ID'] not in exp_barcodes[row['Sample_Project']]:
warnings.append('Sample_ID "%s" for Sample_Project "%s" in line %i uses unexpected demultiplexing barcode %s: "%s"' % (row['Sample_ID'], row['Sample_Project'], line_offset+idx, row['I7_Index_ID'], row['index']))
elif (exp_barcodes[row['Sample_Project']][row['I7_Index_ID']] != row['index']):
warnings.append('Sample_ID "%s" for Sample_Project "%s" in line %i uses unexpected combination of index and I7_index_ID %s: "%s"' % (row['Sample_ID'], row['Sample_Project'], line_offset+idx, row['I7_Index_ID'], row['index']))
if len(warnings) > 0:
err.write('The following %i warning(s) are issued by your sample sheet:\n' % len(warnings))
for i, warning in enumerate(warnings):
err.write('warning %i: %s\n' % (i+1, warning))
return warnings
def get_global_samplesheets(dir_samplesheets, config):
# parse all available sample sheets
fps_samplesheets = glob.glob('%s*_spike.csv' % dir_samplesheets)
global_samplesheet = []
for fp_samplesheet in fps_samplesheets:
ss = parse_samplesheet(fp_samplesheet)
global_samplesheet.append(ss)
if len(global_samplesheet) <= 0:
raise ValueError("Could not find a single demultiplexing sample sheet in directory '%s'." % dir_samplesheets)
global_samplesheet = pd.concat(global_samplesheet, sort=False)
return add_aliassamples(global_samplesheet, config)
def write_samplesheet(fp_output, samplesheet):
with open(fp_output, 'w') as f:
# obtain number of data colums, which dictates number of "," in each line
data_cols = ['Lane',
'Sample_ID',
'Sample_Name',
'Sample_Plate',
'Sample_Well',
'I7_Index_ID',
'index']
if 'I5_Index_ID' in samplesheet.columns:
data_cols += ['I5_Index_ID',
'index2']
data_cols += ['Sample_Project',
'Description',
'spike_notes']
# header
f.write('[Header]\n')
header_cols = []
for col in sorted(samplesheet.columns):
if col.startswith('header_'):
if samplesheet[col].dropna().unique().shape[0] <= 0:
continue
if samplesheet[col].dropna().unique().shape[0] > 1:
raise ValueError("Conflicting header information!")
header_cols.append(col)
f.write(samplesheet[header_cols].drop_duplicates().rename(columns={col: col[len('header_'):] for col in header_cols}).T.to_csv(header=False))
# reads & settings
if 'header_kind_of_run' in samplesheet.columns:
f.write('\n')
pattern = re.compile("^(\d+)x(\d+)bp$")
match = pattern.fullmatch(samplesheet['header_kind_of_run'].dropna().unique()[0])
MAP_ADAPTERS = {0: 'AGATCGGAAGAGCACACGTCTGAACTCCAGTCA',
1: 'AGATCGGAAGAGCACACGTCTGAACTCCAGTCA',
'miseq': 'CTGTCTCTTATACACATCT'}
if match is not None:
f.write('[Reads]\n')
for r in range(int(match.group(1))):
f.write('%s\n' % match.group(2))
f.write('\n')
f.write('[Settings]\n')
f.write('ReverseComplement,0\n')
for r in range(int(match.group(1))):
if r > 0:
f.write('AdapterRead%i' % (r+1))
else:
f.write('Adapter')
f.write(',%s\n' % MAP_ADAPTERS['miseq' if '_000000000-' in samplesheet['run'].dropna().unique()[0] else r])
f.write('\n')
# data
f.write('[Data]')
f.write('\n')
f.write(samplesheet[data_cols].sort_values('Lane').fillna('').to_csv(index=False, float_format='%i'))
def split_samplesheets(samples, config, dry=False):
"""Creates (multiple) samplesheets for bcl2fastq according to barcode length.
Parameters
----------
samples : pd.DataFrame
Sample metadata from parsing global samplesheets.
config : dict
Snakemakes config objects
dry : Bool
Default: False
If True, only return filepaths without creating any dirs or files.
Returns
-------
List of created samplesheet filepaths.
"""
if len(samples['run'].unique()) != 1:
raise ValueError('Not all samples belong to one unique run.')
results = []
ss_split = samples.copy()
ss_split['barcode_len'] = ss_split['index'].fillna("").apply(len)
split_by = ['barcode_len']
if ss_split['index'].dropna().shape[0] > 1:
split_by.append('Lane')
for i, (grp, ss_barcode) in enumerate(ss_split.sort_values(by=['barcode_len', 'Lane']).groupby(split_by)):
fp_dir = join(config['dirs']['prefix'], config['dirs']['intermediate'], config['stepnames']['split_demultiplex'], ss_barcode['run'].unique()[0])
fp_samplesheet = join(fp_dir, 'samplesheet_part_%i.csv' % (i+1))
results.append(fp_dir)
if dry is not True:
makedirs(fp_dir, exist_ok=True)
write_samplesheet(fp_samplesheet, ss_barcode)
if dry is True:
return len(results)
else:
return results
def get_role(spike_project, spike_entity_id, spike_entity_role, samplesheets):
"""Returns file path for bam, given project, entity and role (for trio).
Parameters
----------
spike_project : str
Name of project, to avoid entity ID clashes across projects.
spike_entity_id : str
Entity ID for which role needs to be obtained.
spike_entity_role : str
Role of entity ID whose bam filepath shall be returned.
config : snakemake.config
Snakemakes config object to obtain file path of sample sheets.
Returns
-------
str: Filepath of bam file for given entity role.
"""
samples = samplesheets
if spike_entity_role in ['patient', 'mother', 'father', 'sibling']:
# edge case: trios shall be computed not for patient, but for e.g. siblings
# Usecase in Keimbahn project, e.g. KB0164
# 1) check that no regular sample can be found, because of e.g. suffix _s1
if samples[(samples['Sample_Project'] == spike_project) &
(samples['spike_entity_id'] == spike_entity_id)].shape[0] == 0:
alt_samples = samples[(samples['Sample_Project'] == spike_project) &
(samples['Sample_ID'] == spike_entity_id) &
(samples['spike_entity_role'] == 'sibling')]
# 2) test that excatly ONE alternative sample can be found (might be merged across runs/lanes)
if alt_samples[['Sample_ID', 'Sample_Name', 'Sample_Project', 'spike_entity_id', 'spike_entity_role', 'fastq-prefix']].drop_duplicates().shape[0] != 1:
raise ValueError('Alternative entity name leads to none or ambiguous sample information!')
if spike_entity_role == 'patient':
return alt_samples['fastq-prefix'].unique()[0]
else:
return get_role(spike_project, alt_samples['spike_entity_id'].unique()[0], spike_entity_role, samplesheets)
elif spike_entity_role in ['tumor', 'healthy']:
# edge case 2: trios might have additional tumor samples (patient, mother, father, siblings are healthy, i.e. germline)
# Usecase in Keimbahn project, e.g. KB0049
if samples[(samples['Sample_Project'] == spike_project) &
(samples['spike_entity_id'] == spike_entity_id)].shape[0] == 0:
alt_samples = samples[(samples['Sample_Project'] == spike_project) &
(samples['Sample_ID'] == spike_entity_id) &
(samples['spike_entity_role'].apply(lambda x: x.split('_')[0] if pd.notnull(x) else "") == 'tumor')]
if alt_samples[['Sample_ID', 'Sample_Name', 'Sample_Project', 'spike_entity_id', 'spike_entity_role', 'fastq-prefix']].drop_duplicates().shape[0] != 1:
raise ValueError('Alternative entity name leads to none or ambiguous sample information!')
if spike_entity_role == 'tumor':
return alt_samples['fastq-prefix'].unique()[0]
elif spike_entity_role == 'healthy':
return get_role(spike_project, alt_samples['spike_entity_id'].unique()[0], alt_samples['spike_entity_role'].unique()[0].split('_')[-1], samplesheets)
# select correct project
try:
x = samples[samples['Sample_Project'] == spike_project]
x.iloc[0]
except IndexError:
raise ValueError('Could not find a spike project with name "%s". Available projects are:\n\t%s\n' % (spike_project, '\n\t'.join(sorted(samples['Sample_Project'].unique()))))
else:
samples = x
# select correct entity
try:
x = samples[samples['spike_entity_id'] == spike_entity_id]
x.iloc[0]
except IndexError:
raise ValueError('Could not find a spike entity group with name "%s". Available entities for projects "%s" are:\n\t%s\n' % (spike_entity_id, spike_project, '\n\t'.join(sorted(samples['spike_entity_id'].unique()))))
else:
samples = x
# select correct role
try:
x = samples[samples['spike_entity_role'] == spike_entity_role]
x.iloc[0]
except IndexError:
raise ValueError('Could not find a role "%s" for spike entity group with name "%s". Available roles are:\n\t%s\n' % (spike_entity_role, spike_entity_id, '\n\t'.join(sorted(samples['spike_entity_role'].unique()))))
else:
samples = x
res = {sample['fastq-prefix'] for idx, sample in samples.iterrows()}
if len(res) > 1:
raise ValueError("Stefan, check if use cases can occour with more than one result!\nspike_project: %s, spike_entity_id: %s, spike_entity_role: %s\n%s" % (spike_project, spike_entity_id, spike_entity_role, res))
return list(res)[0]
def get_species(sample, samplesheets, config):
# sample can be a single sample ...
projects = samplesheets[
(samplesheets['fastq-prefix'] == sample) &
(samplesheets['is_alias'] != True)]['Sample_Project'].unique()
# ... or an entity
if len(projects) == 0:
projects = samplesheets[
(samplesheets['Sample_Project'] == sample.split('/')[0]) &
((samplesheets['spike_entity_id'] == sample.split('/')[-1]) |
(samplesheets['Sample_ID'] == sample.split('/')[-1]))]['Sample_Project'].unique()
if len(projects) > 1:
raise ValueError("Ambiguous projects: '%s' for sample '%s'" % (projects, sample))
if projects[0] not in config['projects']:
raise ValueError('Project "%s" is not specified in config.yaml!' % projects[0])
if 'species' not in config['projects'][projects[0]]:
raise ValueError('"species" is not specified for project "%s" in config.yaml!' % projects[0])
return config['projects'][projects[0]]['species']
def get_reference_genome(sample, samplesheets, config):
return config['references']['genomes'][get_species(sample, samplesheets, config)]
def get_reference_knowns(sample, samplesheets, config, _key):
return [k for k in config['references']['knowns'][get_species(sample, samplesheets, config)] if _key in k]
def get_reference_exometrack(sample, samplesheets, config, returnfield='file', debug=False):
# there are three ways to define the capture kit for a sample:
# 1. by adding a column "capture_kit" to the input samplesheet and set the cell value to a capture kit name defined in config.yaml
# 2. by specifing a key "capture_kit" at the project in config.yaml. Value must match one of the defined capture kits in config.yaml
# 3. by specifing the species for the project, which than select the default kit defined in config.yaml for the species.
# 1,2,3 are listed in order of their precedence, i.e. of 1 and 3 is defined, 1 wins.
if 'references' not in config:
raise ValueError("Key 'references' is not defined in config.yaml")
if 'capture_kits' not in config['references']:
raise ValueError('Definition of "capture_kits" in config.yaml is missing.')
capture_kit = None
# use-case 1
if capture_kit is None:
if 'capture_kit' in samplesheets.columns:
sample_capture_kits = samplesheets[(samplesheets['fastq-prefix'] == sample) & (samplesheets['is_alias'] != True)]['capture_kit'].dropna().unique()
if sample_capture_kits.shape[0] > 1:
raise ValueError("Ambiguous per-sample capture-kit definition in samplesheet for sample '%s': '%s'" % (sample, "', '".join(sample_capture_kits)))
if sample_capture_kits.shape[0] == 1:
capture_kit = sample_capture_kits[0]
if debug:
print('per-sample:', sample, capture_kit)
# use-case 2
if capture_kit is None:
sample_project = samplesheets[(samplesheets['fastq-prefix'] == sample) & (samplesheets['is_alias'] != True)]['Sample_Project'].dropna().unique()
if sample_project.shape[0] > 1:
raise ValueError("Ambigious projects for '%s': %s" % (sample, "', '".join(sample_project)))
if sample_project.shape[0] == 0:
raise ValueError("Missing project definition for sample '%s'." % sample)
sample_project = sample_project[0]
if ('projects' in config) and (sample_project in config['projects']) and ('capture_kit' in config['projects'][sample_project]):
capture_kit = config['projects'][sample_project]['capture_kit']
if debug:
print('per-project', sample, sample_project, capture_kit)
# use-case 3
if capture_kit is None:
species = get_species(sample, samplesheets, config)
for kit_name in config['references']['capture_kits']:
if 'default_for_species' in config['references']['capture_kits'][kit_name]:
if config['references']['capture_kits'][kit_name]['default_for_species'] == species:
capture_kit = kit_name
if debug:
print('per-species', sample, kit_name)
if capture_kit is None:
raise ValueError("Could not determine capture kit for sample '%s'." % sample)
# finally, return found capture_kit information
return config['references']['capture_kits'][capture_kit][returnfield]
def get_reference_varscan_somatic(sample, samplesheets, config):
return config['references']['varscan_somatic'][get_species(sample, samplesheets, config)]
######## avoid run
def _run2date(run):
date='%04i/%02i/%02i' % (
int(run.split('_')[0][:2])+2000,
int(run.split('_')[0][3:4]),
int(run.split('_')[0][5:6]))
return date
def get_bwa_mem_header(sample, samplesheets, config):
samples = samplesheets[samplesheets['fastq-prefix'] == sample]
res = []
for run in sorted(samples['run'].dropna().unique()):
res.append(' -R "@RG\\tID:%s\\tCN:Department_of_Pediatric_Oncology_Dusseldorf\\tPU:%s\\tDT:%s\\tPL:ILLUMINA\\tLB:%s\\tSM:%s" ' % (
run,
run.split('_')[-1][1:],
_run2date(run),
get_reference_exometrack(sample, samplesheets, config, returnfield='protocol_name'),
samples['Sample_ID'].dropna().unique()[0],
))
return "".join(res)
def get_demux_samples(samplesheets, config):
# ignore samples aliases
samples = samplesheets[samplesheets['is_alias'] != True]
# remove samples that stem from per sample fastq sources, like Macrogen sequencing
samples = samples[pd.notnull(samples['Lane'])]
return list(samples['run'].unique())
def get_samples(samplesheets, config):
# only consider samples that have some spike_entity_role defined
samples_with_role = samplesheets[pd.notnull(samplesheets['spike_entity_role'])]
samples = []
for sample, g in samples_with_role.groupby(['Sample_Project', 'fastq-prefix']):
samples.append({'Sample_Project': sample[0],
'sample': sample[1],
'spike_entity_id': g['spike_entity_id'].iloc[0]})
return samples
def get_tumorNormalPairs(samplesheets, config, species=None):
# only consider samples that have some spike_entity_role defined
samples_with_role = samplesheets[pd.notnull(samplesheets['spike_entity_role'])]
pairs = []
for pair, g in samples_with_role.groupby(['Sample_Project', 'spike_entity_id']):
# only choose comlete pairs
if len(set(g['spike_entity_role'].unique()) & {'healthy','tumor'}) == 2:
if species is not None:
if get_species(g['fastq-prefix'].iloc[0], samplesheets, config) != species:
continue
pairs.append({'Sample_Project': pair[0],
'spike_entity_id': pair[1]})
# add tumor/normal computations for trio-like projects, i.e. Keimbahn, where special samples stem from tumor tissue and
# need to be compared to the normal germline (i.e. healthy) samples, e.g. KB0049
for (project, tumor), g in samples_with_role[samples_with_role['spike_entity_role'].apply(lambda x: x.startswith('tumor_'))].groupby(['Sample_Project', 'Sample_ID']):
if species is not None:
if get_species(g['fastq-prefix'].iloc[0], samplesheets, config) != species:
continue
pairs.append({'Sample_Project': project,
'spike_entity_id': tumor})
return pairs
def get_genepanels(samplesheets, config, prefix):
"""Returns list of gene panel result files.
Parameters
----------
samplesheets : pd.DataFrame
Global samplesheets.
config : dict
Snakemakes config object.
prefix : str
Filepath to prefix directory.
Returns
-------
[str] : List of filepaths for gene panel results that shall be computed.
"""
# collect which panels should be computed for which projects
project_panels = dict()
if 'projects' not in config:
raise ValueError('config.yaml does not contain any projects!')
for project in samplesheets['Sample_Project'].unique():
if project in config['projects']:
if (config['projects'][project] is not None) and ('genepanels' in config['projects'][project]):
project_panels[project] = config['projects'][project]['genepanels']
# for every sample, check which panels have to be computed
to_be_created = []
for project in project_panels.keys():
for panel in project_panels[project]:
to_be_created.extend(
['%s%s%s/%s.yaml/%s/%s.tsv' % (prefix, config['dirs']['intermediate'], config['stepnames']['genepanel_coverage'], panel, project, sample)
for sample
in samplesheets[(samplesheets['Sample_Project'] == project) & (samplesheets['is_alias'] != True)]['Sample_ID'].unique()])
# in addition to the above, also add samples used as aliases
if 'sample_aliases' in config:
for sample in config['sample_aliases']:
if ('roles' in sample) and ('real_id' in sample):
for role in sample['roles']:
if ('Sample_Project' in role) and (role['Sample_Project'] in project_panels):
for panel in project_panels[role['Sample_Project']]:
if ('Sample_Project' in sample['real_id']) and ('Sample_ID' in sample['real_id']):
if samplesheets[(samplesheets['Sample_Project'] == sample['real_id']['Sample_Project']) & (samplesheets['Sample_ID'] == sample['real_id']['Sample_ID'])].shape[0] > 0:
to_be_created.append('%s%s%s/%s.yaml/%s/%s.tsv' % (prefix, config['dirs']['intermediate'], config['stepnames']['genepanel_coverage'], panel, sample['real_id']['Sample_Project'], sample['real_id']['Sample_ID']))
return to_be_created
def add_aliassamples(samplesheets, config):
aliases = []
if (config is not None) and ('sample_aliases' in config):
for alias in config['sample_aliases']:
if not (('roles' in alias) and ('real_id' in alias)):
raise ValueError('Sample alias is not properly defined (missing keys "roles" or "real_id"), check config.yaml file!')
for role in alias['roles']:
if not (('Sample_Project' in alias['real_id']) and ('Sample_ID' in alias['real_id'])):
raise ValueError('Sample alias is not properly defined (missing keys "Sample_Project" or "Sample_ID"), check config.yaml file!')
role['fastq-prefix'] = '%s/%s' % (alias['real_id']['Sample_Project'], alias['real_id']['Sample_ID'])
if 'run' in samplesheets.columns:
role['run'] = '+'.join(samplesheets[(samplesheets['Sample_Project'] == alias['real_id']['Sample_Project']) &
(samplesheets['Sample_ID'] == alias['real_id']['Sample_ID'])]['run'].unique())
role['is_alias'] = True
lanes = samplesheets[(samplesheets['Sample_Project'] == alias['real_id']['Sample_Project']) &
(samplesheets['Sample_ID'] == alias['real_id']['Sample_ID'])]['Lane']
if lanes.shape[0] > 0:
role['Lane'] = lanes.iloc[0]
aliases.append(role)
if len(aliases) > 0:
return pd.concat([samplesheets, pd.DataFrame(aliases)], sort=False)
else:
samplesheets['is_alias'] = np.nan
return samplesheets
def get_trios(samplesheets, config):
trios = []
for trio, g in samplesheets[ | pd.notnull(samplesheets['spike_entity_role']) | pandas.notnull |
import os
import pandas as pd
import numpy as np
for sub in ['data']:
belief_error = | pd.DataFrame() | pandas.DataFrame |
from os import listdir
from os.path import isfile, join
import re
import nltk
from nltk.corpus import stopwords
from string import punctuation
import pymorphy2
import pandas
from collections import Counter
from collections import defaultdict, OrderedDict
import math
import numpy
# nltk.download("stopwords") # used only for first time
russian_stopwords = stopwords.words("russian")
morgh = pymorphy2.MorphAnalyzer()
# def to_normal_form(word):
# p = morgh.parse(word)[0]
# print(p.normal_form)
# return p.normal_form
# Подсчитать tf каждого термина
def computeTF(wordDict, bow):
tfDict = {}
bowCount = len(bow)
for word, count in wordDict.items():
tfDict[word] = count/float(bowCount)
return tfDict
# Подсчитать idf
def computeIDF(docList):
import math
idfDict = {}
N = len(docList)
idfDict = dict.fromkeys(docList[0].keys(), 0)
for doc in docList:
for word, val in doc.items():
if val > 0:
idfDict[word] += 1
for word, val in idfDict.items():
idfDict[word] = math.log10(N / float(val))
return idfDict
# Подсчитать tf-idf
def computeTFIDF(tfBow, idfs):
tfidf = {}
for word, val in tfBow.items():
tfidf[word] = val*idfs[word]
return tfidf
files_path = '../files'
files = [f for f in listdir(files_path) if isfile(join(files_path, f))]
print(files)
files_words = []
for file_name in files:
file = open(files_path + '/' + file_name, "r", encoding="utf-8")
file_content = file.read().replace('<b>', ' ')
sentence = re.sub(r"[\n\s.,:–\\?—\-!()/«»'#№{}\[\]→%|+®©\"]+", " ", file_content, flags=re.UNICODE).lower()
sentence = re.sub(r"[\d+]", "", sentence, flags=re.UNICODE)
tokens = [token for token in sentence.split(" ") if token not in russian_stopwords \
and token != " " \
and token.strip() not in punctuation]
files_words.append(tokens)
wordSet = set([item for sublist in files_words for item in sublist])
fileWordDictionaries = []
for i in range(len(files_words)):
fileWordDictionaries.append(dict.fromkeys(wordSet,0))
for word in files_words[i]:
fileWordDictionaries[i][word] += 1
df = pandas.DataFrame(fileWordDictionaries)
tfDictionaries = []
for i in range(len(fileWordDictionaries)):
tfDictionaries.append(computeTF(fileWordDictionaries[i],files_words[i]))
df_TF = pandas.DataFrame(tfDictionaries)
idfs = computeIDF(fileWordDictionaries)
tfIdfDictionaries = []
for i in range(len(tfDictionaries)):
tfIdfDictionaries.append(computeTFIDF(tfDictionaries[i],idfs))
df_TF_IDF = | pandas.DataFrame(tfIdfDictionaries) | pandas.DataFrame |
from datetime import datetime, timedelta
import dateutil
import numpy as np
import pytest
import pytz
from pandas._libs.tslibs.ccalendar import DAYS, MONTHS
from pandas._libs.tslibs.period import IncompatibleFrequency
from pandas.compat import lrange, range, zip
import pandas as pd
from pandas import DataFrame, Series, Timestamp
from pandas.core.indexes.base import InvalidIndexError
from pandas.core.indexes.datetimes import date_range
from pandas.core.indexes.period import Period, PeriodIndex, period_range
from pandas.core.resample import _get_period_range_edges
import pandas.util.testing as tm
from pandas.util.testing import (
assert_almost_equal, assert_frame_equal, assert_series_equal)
import pandas.tseries.offsets as offsets
@pytest.fixture()
def _index_factory():
return period_range
@pytest.fixture
def _series_name():
return 'pi'
class TestPeriodIndex(object):
@pytest.mark.parametrize('freq', ['2D', '1H', '2H'])
@pytest.mark.parametrize('kind', ['period', None, 'timestamp'])
def test_asfreq(self, series_and_frame, freq, kind):
# GH 12884, 15944
# make sure .asfreq() returns PeriodIndex (except kind='timestamp')
obj = series_and_frame
if kind == 'timestamp':
expected = obj.to_timestamp().resample(freq).asfreq()
else:
start = obj.index[0].to_timestamp(how='start')
end = (obj.index[-1] + obj.index.freq).to_timestamp(how='start')
new_index = date_range(start=start, end=end, freq=freq,
closed='left')
expected = obj.to_timestamp().reindex(new_index).to_period(freq)
result = obj.resample(freq, kind=kind).asfreq()
assert_almost_equal(result, expected)
def test_asfreq_fill_value(self, series):
# test for fill value during resampling, issue 3715
s = series
new_index = date_range(s.index[0].to_timestamp(how='start'),
(s.index[-1]).to_timestamp(how='start'),
freq='1H')
expected = s.to_timestamp().reindex(new_index, fill_value=4.0)
result = s.resample('1H', kind='timestamp').asfreq(fill_value=4.0)
assert_series_equal(result, expected)
frame = s.to_frame('value')
new_index = date_range(frame.index[0].to_timestamp(how='start'),
(frame.index[-1]).to_timestamp(how='start'),
freq='1H')
expected = frame.to_timestamp().reindex(new_index, fill_value=3.0)
result = frame.resample('1H', kind='timestamp').asfreq(fill_value=3.0)
assert_frame_equal(result, expected)
@pytest.mark.parametrize('freq', ['H', '12H', '2D', 'W'])
@pytest.mark.parametrize('kind', [None, 'period', 'timestamp'])
@pytest.mark.parametrize('kwargs', [dict(on='date'), dict(level='d')])
def test_selection(self, index, freq, kind, kwargs):
# This is a bug, these should be implemented
# GH 14008
rng = np.arange(len(index), dtype=np.int64)
df = DataFrame({'date': index, 'a': rng},
index=pd.MultiIndex.from_arrays([rng, index],
names=['v', 'd']))
msg = ("Resampling from level= or on= selection with a PeriodIndex is"
r" not currently supported, use \.set_index\(\.\.\.\) to"
" explicitly set index")
with pytest.raises(NotImplementedError, match=msg):
df.resample(freq, kind=kind, **kwargs)
@pytest.mark.parametrize('month', MONTHS)
@pytest.mark.parametrize('meth', ['ffill', 'bfill'])
@pytest.mark.parametrize('conv', ['start', 'end'])
@pytest.mark.parametrize('targ', ['D', 'B', 'M'])
def test_annual_upsample_cases(self, targ, conv, meth, month,
simple_period_range_series):
ts = simple_period_range_series(
'1/1/1990', '12/31/1991', freq='A-%s' % month)
result = getattr(ts.resample(targ, convention=conv), meth)()
expected = result.to_timestamp(targ, how=conv)
expected = expected.asfreq(targ, meth).to_period()
assert_series_equal(result, expected)
def test_basic_downsample(self, simple_period_range_series):
ts = simple_period_range_series('1/1/1990', '6/30/1995', freq='M')
result = ts.resample('a-dec').mean()
expected = ts.groupby(ts.index.year).mean()
expected.index = period_range('1/1/1990', '6/30/1995', freq='a-dec')
assert_series_equal(result, expected)
# this is ok
assert_series_equal(ts.resample('a-dec').mean(), result)
assert_series_equal(ts.resample('a').mean(), result)
@pytest.mark.parametrize('rule,expected_error_msg', [
('a-dec', '<YearEnd: month=12>'),
('q-mar', '<QuarterEnd: startingMonth=3>'),
('M', '<MonthEnd>'),
('w-thu', '<Week: weekday=3>')
])
def test_not_subperiod(
self, simple_period_range_series, rule, expected_error_msg):
# These are incompatible period rules for resampling
ts = simple_period_range_series('1/1/1990', '6/30/1995', freq='w-wed')
msg = ("Frequency <Week: weekday=2> cannot be resampled to {}, as they"
" are not sub or super periods").format(expected_error_msg)
with pytest.raises(IncompatibleFrequency, match=msg):
ts.resample(rule).mean()
@pytest.mark.parametrize('freq', ['D', '2D'])
def test_basic_upsample(self, freq, simple_period_range_series):
ts = simple_period_range_series('1/1/1990', '6/30/1995', freq='M')
result = ts.resample('a-dec').mean()
resampled = result.resample(freq, convention='end').ffill()
expected = result.to_timestamp(freq, how='end')
expected = expected.asfreq(freq, 'ffill').to_period(freq)
| assert_series_equal(resampled, expected) | pandas.util.testing.assert_series_equal |
import pandas as pd
import numpy as np
from pathlib import Path
from sklearn.manifold import TSNE
from sklearn.cluster import KMeans
import seaborn as sns
import matplotlib.pyplot as plt
import configparser
from dateutil.parser import parse
import os
from sklearn.metrics import roc_auc_score, f1_score, precision_score,\
recall_score, classification_report, accuracy_score
import logging
logger = logging.getLogger(__name__)
print = logger.info
def multilabel_from_tags(tag_list):
"""
function to generate pd dataframe for tags based on list of tag strings
tag_list: the raw list of tags from input. each row is "tag1, tag2, tag3..."
"""
# turn tag list strings into list for each row
tag_list = [[tag.strip() for tag in tag_text.split(',')] for tag_text in tag_list]
# obtain unique tags
unique_tags = list(set([tag for tags in tag_list for tag in tags]))
try:
unique_tags.remove('')
except:
print("Unique tags does not have empty situations")
# create df based on tags
tag_dict = {}
for tag in unique_tags:
tag_dict[f"Tag_{tag}"] = [1 if tag in tags else 0 for tags in tag_list]
tag_df = pd.DataFrame(tag_dict)
return tag_df
def create_tag_columns(train_df, tag_col='Tags'):
"""
function to create tags columns for a training dataframe
train_df: pd DataFrame of training text and tags
tag_col: str. Column name of the column that houses the multilabel tags
"""
tag_list = train_df[tag_col].to_list()
tag_df = multilabel_from_tags(tag_list)
train_df = pd.concat([train_df, tag_df], axis=1)
return train_df
def binary_tag_to_tags(text_df, tag_values):
"""
+++INPUT+++
text_df: dataframe with binary tags, fillna with 0
tag_values: array of tag strings
example: tag_values = text_df.columns[2:].values
+++OUTPUT+++
text_df: with Tags column added containing tags
"""
tags_list = []
for row_index in range(len(text_df)):
selector = text_df.loc[row_index, tag_values].values.astype(bool)
selected_tags = tag_values[selector]
tags_string = ", ".join(selected_tags)
tags_list.append(tags_string)
text_df['Tags'] = tags_list
return text_df
def df_to_json_form(sample_df, tag_col='Tags', ui_dir='../input/',
ui_filename='text_tags.json'):
"""
function to save a sampled text df to directory for human tags
sample_df: pd.DataFrame. Has "Text" and "UID" columns
tag_col: str. The expected name of the tags column. Blank fields will be
populated for human input
ui_dir: str. directory of the human input json form
ui_filename: str. file name for the human input. should be in json
"""
try:
assert "Text" in sample_df.columns
assert "UID" in sample_df.columns
except:
print("Make sure the DF has Text and UID columns!")
exit(1)
if tag_col not in sample_df.columns:
print(f"Column {tag_col} not in columns. Adding empty column for it.")
sample_df[tag_col] = ''
sample_df = sample_df.loc[:, ['Text', 'UID', tag_col]]
print("Saving the sampled texts as JSON for human tags")
Path(ui_dir).mkdir(parents=True, exist_ok=True)
sample_df.to_json(f'{ui_dir}{ui_filename}', orient='records', indent=2)
print("Done")
def kmeans_from_proba(scored_df, tsne_fig_name, score_col_prefix='proba_', random_state=0):
print("Extracting tag scores and training KMeans for clusters")
# extract tag scores into np.array
proba_scores = scored_df.loc[:, scored_df.columns.str.startswith(score_col_prefix)].values
# fit and extract kmeans clusters
kmeans = KMeans(n_clusters=proba_scores.shape[1] + 1, random_state=random_state)
kmeans.fit(proba_scores)
clusters = kmeans.predict(proba_scores).reshape((-1, 1))
print("Visualizing tag score-based KMeans clusters with tSNE")
# visualize the clusters using tsne
tsne_xy = TSNE(n_components=2).fit_transform(proba_scores)
visualize_df = pd.DataFrame(
np.concatenate((tsne_xy, clusters), axis=1), columns=['tsne_1', 'tsne_2', 'cluster_id'])
plt.figure(figsize=(10, 6))
sns.scatterplot(data=visualize_df,x='tsne_1',y='tsne_2',hue='cluster_id',
legend="full",alpha=0.5, palette='pastel')
plt.title("KMeans Cluster on TSNE 2D Transformation")
plt.savefig(tsne_fig_name, bbox_inches='tight')
plt.close()
# save cluster info back to scored_df
print("Saving cluster information back to dataframe")
scored_df['cluster_id'] = clusters
return scored_df, kmeans
def sample_by_cluster(scored_df, sample_size, cluster_col='cluster_id', row_key='UID'):
print("Sampling records based on cluster information...")
group_sample_n = sample_size // scored_df[cluster_col].nunique()
sample_df = scored_df.groupby(cluster_col).apply(lambda x: x.sample(n=group_sample_n)).reset_index(drop=True)
unsampled_count = sample_size - sample_df.shape[0]
print(f"A total of {sample_df.shape[0]:,} records were sampled based on clusters.")
if unsampled_count > 0:
print(f"{unsampled_count:,} remaining records are to be sampled from total population.")
unsampled_ids = scored_df[row_key][~np.isin(scored_df.UID, sample_df.UID)]
additional_ids = np.random.choice(unsampled_ids, unsampled_count, replace=False)
additional_df = scored_df.loc[np.isin(scored_df[row_key], additional_ids), :]
sample_df = pd.concat([sample_df, additional_df], ignore_index=True)
sample_df['Tags'] = ''
return sample_df
def sample_by_random(scored_df, sample_size, cluster_col='cluster_id', row_key='UID'):
print("Sampling records based on pure randomness...")
print(f"{sample_size:,} records are to be sampled from total population.")
sample_ids = np.random.choice(scored_df[row_key], sample_size, replace=False)
sample_df = scored_df.loc[np.isin(scored_df[row_key], sample_ids), :].reset_index(drop=True)
sample_df['Tags'] = ''
return sample_df
def coder_sim(samples_df, answers_df):
assert "UID" in samples_df.columns
assert "UID" in answers_df.columns
assert "Tags" in samples_df.columns
assert "Tags" in answers_df.columns
samples_df['Tags'] = answers_df.set_index("UID").loc[samples_df.UID, ['Tags']].values.flatten()
print("Samples have been tagged using the provided answers dataframe")
return samples_df
class MetaProject(object):
def __init__(self, project_path, rundir='./wrapper_al/'):
"""
Simple MetaProject class to analyze project output
project_path: path to the project folder of the active learning run
rundir: the path where the active learning ran, default './wrapper_al/'
"""
print(">>> Instantiate MetaProject class...")
self.project_path = project_path
self.rundir = rundir
self.cfg_path = os.path.abspath(f'{self.project_path}orchestration_record.cfg')
self.log_path = os.path.abspath(f'{self.project_path}orchestration_log.log')
self._load_config()
self.total_rounds = int(self.config.get('active_learning', 'total_rounds'))
self.round_sample = int(self.config.get('sampling', 'sample_size'))
self.total_sample = self.total_rounds * self.round_sample
# get abspath of the answer file since the exec path of project is different from analytics path
self.answer_file = os.path.abspath(os.path.join(
self.rundir, self.config.get('coder_sim', 'answer_file')))
print(self.answer_file)
self.max_tags = int(self.config.get('training', 'max_tags'))
self.run_sim = int(self.config.get('active_learning', 'run_sim'))
self.run_time = self._parse_log(self.log_path)
self._gen_tag_sum_df(self.answer_file)
def _load_config(self):
print(">>> Loading project orchestration config")
self.config = configparser.ConfigParser(interpolation=configparser.ExtendedInterpolation())
self.config.read(self.cfg_path)
def _parse_log(self, log_path):
"""
Method to parse orchestration log file to obtain run duration in seconds
"""
print(">>> Parsing project execution run time")
with open(log_path, 'r') as logfile:
first_line = logfile.readline()
for last_line in logfile:
pass
try:
start_time = parse(first_line[:23])
end_time = parse(last_line[:23])
run_time = (end_time - start_time).seconds
except:
print(">>> Project did not run successfully based on log records!")
run_time = -1
return run_time
def _gen_tag_sum_df(self, tag_col='Tag_'):
"""
Method to generate tag positive ratios of a given DF (stored in JSON format)
"""
print(">>> Reading full dataset...")
df = pd.read_json(self.answer_file, orient='records')
df = create_tag_columns(df)
self.df = df
self.total_records = df.shape[0]
if self.run_sim == 1:
print(">>> Project ran as simulation...")
self.answer_tag_sum_df = df.loc[:, df.columns.str.startswith(tag_col)].sum().sort_values(
ascending=False).reset_index().rename(
{'index':'Tag_Name', 0: 'Pos_Count'}, axis=1)
self.answer_tag_sum_df['Pos_Rate'] = self.answer_tag_sum_df.Pos_Count / df.shape[0]
else:
print(">>> Project ran in real time with manual coders...")
self.answer_tag_sum_df = None
def describe(self):
"""
Method to describe the project with Meta Cfg and Logs
method only loads attributes of the object
"""
print(">>> Composing project high level description...")
self.stmts = []
self.stmts.append('INTRO\n-------')
self.stmts.append(f"\nThis Active Learning Run has a round count of {self.total_rounds:,},")
self.stmts.append(f"and a total of {self.total_sample:,} samples are included for model training.")
if self.run_sim == 1:
self.stmts.append("This run is a simulation with known tags already available.")
else:
self.stmts.append("This run is an actual application with manual coder input for tags on the fly.")
self.stmts.append(f"In each round, {int(self.config.get('sampling', 'sample_size')):,} samples are selected as additional training data.")
self.stmts.append(f"While the first round always runs random sampling to gather the samples,")
self.stmts.append(f"the second and beyond rounds use {self.config.get('sampling', 'sampling_method')} method.")
self.stmts.append('\n\nDATA\n-------')
self.stmts.append(f'\nThe input dataframe has a total of {self.total_records:,} records.')
if self.answer_tag_sum_df is not None:
self.stmts.append('The positive rates of each tag in the full answer dataset:')
self.stmts.append("\n" + self.answer_tag_sum_df.to_string())
self.stmts.append('\n\nMODELING\n-------')
self.stmts.append("\nThe training config for each round's Bi-Directional LSTM modeling is as below:")
for key, value in dict(self.config['training']).items():
self.stmts.append(f"\n\t{key}: {value}")
if self.config.get('training', 'random_embed') == 'True':
self.stmts.append('\nThe text embeddings are randomly initiated 300-length via Tensorflow 2.')
else:
self.stmts.append('\nThe text embeddings are GloVe 300-length text embeddings loaded via Spacy.')
self.stmts.append('\n\nRUNTIME\n-------')
if self.run_time > 0:
self.stmts.append(f"\nExecution of the run took {self.run_time / 60:,.2f} minutes to complete")
else:
self.stmts.append("Program log file indicates that this run was not successfully executed...")
self.description = " ".join(self.stmts)
print(">>> Displaying the description:")
print(self.description)
class RoundResult(object):
def __init__(self, round_path, answer_file, proba_cutoff, rundir='./wrapper_al/'):
self.round_path = os.path.abspath(os.path.join(rundir, round_path))
print(self.round_path)
self.config_dir = f"{self.round_path.rstrip('/')}/config/"
self.sample_dir = f"{self.round_path.rstrip('/')}/sample/"
self.label_dir = f"{self.round_path.rstrip('/')}/label/"
self.input_dir = f"{self.round_path.rstrip('/')}/input/"
self.output_dir = f"{self.round_path.rstrip('/')}/output/"
self.train_file = f"{self.output_dir.rstrip('/')}/train_df.csv"
self.scored_file = f"{self.output_dir.rstrip('/')}/scored/scored_output.json"
self.answer_file = os.path.abspath(os.path.join(rundir, answer_file))
self.proba_cutoff = proba_cutoff
self.load_outputs()
def load_outputs(self, proba_prefix='proba_', tag_prefix='Tag_', row_key='UID'):
# read the round related datasets
train_df = pd.read_csv(self.train_file)
scored_df = | pd.read_json(self.scored_file, orient='records') | pandas.read_json |
import torch
import pandas as pd
from transformers import AutoTokenizer, AutoModelForSequenceClassification
import os
from utils import set_seed, parse_training_args
from dataset import ToxicDataset, PairedToxicDataset
from trainer import PairedTrainer
import wandb
if __name__ == "__main__":
args = parse_training_args()
config = vars(args)
if config["use_extra_data"]:
extra_files = [
os.path.join(config["extra_data_dir"], f)
for f in os.listdir(config["extra_data_dir"])
if f.endswith(".csv")
]
config["extra_files"] = extra_files
wandb.login()
fold = args.fold if args.fold is not None else 0
with wandb.init(
project="jigsaw-paired-train",
group=str(args.group_id),
name=f"{args.group_id}-{args.checkpoint}-fold-{fold}",
config=config,
):
config = wandb.config
set_seed(config.seed)
data = | pd.read_csv(config.train_path) | pandas.read_csv |
'''
Created on Jan 15, 2020
@author: bsana
'''
from os.path import join
import sys,datetime
import pandas as pd
OUT_SEP = ' '
COUNTY_FIPS = [37,59]
if __name__ == '__main__':
if len(sys.argv)<2:
print('Please provide a control file which contains all the required input parameters as an argument!')
else:
print('Reformat survey program started: {}'.format(datetime.datetime.now()))
#Initiate log file
logfilename = 'reformat_survey.log'
logfile = open(logfilename,'w')
logfile.write('Reformat survey program started: ' + str(datetime.datetime.now()) + '\n')
inputctlfile = sys.argv[1]
ctlfile = open(inputctlfile)
for ctlfileline in ctlfile:
logfile.write(ctlfileline)
if len(str.split(ctlfileline))>1:
param = (str.split(ctlfileline)[0]).upper()
value = str.split(ctlfileline)[1]
if param == 'INDIR':
inputdir = value
elif param == 'INHHFILE':
inhhfilename = value
elif param == 'INPERFILE':
inperfilename = value
elif param == 'INTRIPFILE':
intripfilename = value
elif param == 'OUTDIR':
outputdir = value
elif param == 'OUTHHFILE':
outhhfilename = value
elif param == 'OUTPERFILE':
outperfilename = value
elif param == 'OUTTRIPFILE':
outtripfilename = value
inhhfilename = join(inputdir, inhhfilename)
inperfilename = join(inputdir, inperfilename)
intripfilename = join(inputdir, intripfilename)
outhhfilename = join(outputdir, outhhfilename)
outperfilename = join(outputdir, outperfilename)
outtripfilename = join(outputdir, outtripfilename)
###### Household file processing
print('Household file processing started: {}'.format(datetime.datetime.now()))
logfile.write('\n')
logfile.write('Household file processing started: ' + str(datetime.datetime.now()) + '\n')
hh = pd.read_csv(inhhfilename, sep='\t')
hh['hhno'] = hh['hh_id']
hh['hhsize'] = hh['num_people']
hh.loc[hh['hhsize']>900, 'hhsize'] = -1
hh['hhvehs'] = hh['num_vehicles']
hh.loc[hh['hhvehs']>900, 'hhvehs'] = -1
INC1_DICT = {999:-1, 1:7500, 2:20000, 3:30000, 4:42500, 5:62500, 6:87500, 7:125000, 8:175000, 9:225000, 10:350000}
hh['hhincome'] = hh['income_detailed'].map(INC1_DICT)
INC2_DICT = {999:-1, 1:12500, 2:37500, 3:62500, 4:87500, 5:175000, 6:350000}
hh['hhinc2'] = hh['income_followup'].map(INC2_DICT)
hh.loc[(hh['hhincome']<0) & (hh['hhinc2']>0), 'hhincome'] = hh.loc[(hh['hhincome']<0) & (hh['hhinc2']>0), 'hhinc2']
hh['hownrent'] = hh['rent_own']
hh.loc[hh['hownrent']==997, 'hownrent'] = 3 #Other
hh.loc[hh['hownrent']==999, 'hownrent'] = 9 #Prefer not to answer -> Missing
hh.loc[hh['hownrent']<0, 'hownrent'] = -1
RESTYPE_DICT = {1:1, 2:2, 3:3, 4:3, 5:3, 6:5, 7:4, 997:6}
hh['hrestype'] = hh['res_type'].map(RESTYPE_DICT)
hh.loc[pd.isnull(hh['hrestype']), 'hrestype'] = -1
hh['hxcord'] = hh['reported_home_lon']
hh['hycord'] = hh['reported_home_lat']
hh['hhtaz'] = hh['home_taz']
hh['hhparcel'] = hh['home_bg_geoid']
int_cols = ['hhparcel','hhtaz','hhincome','hrestype']
hh[int_cols] = hh[int_cols].astype('int64')
out_colnames = ['hhno','hhsize','hhvehs','hhincome','hownrent','hrestype','hhparcel','hhtaz','hxcord','hycord','wt_alladult_wkday','wt_alladult_7day']
hh = hh[out_colnames]
hh = hh.sort_values('hhno')
hh.to_csv(outhhfilename, sep=OUT_SEP, index=False)
print('Household file processing finished: {}'.format(datetime.datetime.now()))
logfile.write('Household file processing finished: ' + str(datetime.datetime.now()) + '\n')
###### Person file processing
print('Person file processing started: {}'.format(datetime.datetime.now()))
logfile.write('\n')
logfile.write('Person file processing started: ' + str(datetime.datetime.now()) + '\n')
per = pd.read_csv(inperfilename, sep='\t')
per['person_id'] = per['person_id'].round()
per['hhno'] = per['hh_id']
per['pno'] = per['person_num']
AGE_DICT = {1:3, 2:10, 3:16, 4:21, 5:30, 6:40, 7:50, 8:60, 9:70, 10:80}
per['pagey'] = per['age'].map(AGE_DICT)
GEND_DICT = {1:2, 2:1, 3:3, 4:3, 997:3, 995:9, 999:9}
per['pgend'] = per['gender'].map(GEND_DICT)
per.loc[per['pgend']<0, 'pgend'] = -1
per.loc[pd.isna(per['pgend']), 'pgend'] = -1
per['pptyp'] = 0
per['pwtyp'] = 0
per['pstyp'] = 0
per.loc[(per['pagey']>=0) & (per['pagey']<5), 'pptyp'] = 8
per.loc[(per['pagey']>=0) & (per['pagey']<16) & (per['pptyp']==0), 'pptyp'] = 7
per.loc[(per['employment']==1) & (per['hours_work'].isin([1,2,3])) & (per['pptyp']==0), 'pptyp'] = 1
per.loc[(per['pagey']>=16) & (per['pagey']<18) & (per['pptyp']==0), 'pptyp'] = 6
per.loc[(per['pagey']>=16) & (per['pagey']<25) & (per['school_type'].isin([4,7])) & (per['student']==1) & (per['pptyp']==0), 'pptyp'] = 6
per.loc[(per['student'].isin([1,2])) & (per['pptyp']==0), 'pptyp'] = 5
per.loc[(per['employment'].isin([1,2,3])) & (per['pptyp']==0), 'pptyp'] = 2 # Remaining workers are part-time
per.loc[(per['pagey']>65) & (per['pptyp']==0), 'pptyp'] = 3
per.loc[per['pptyp']==0, 'pptyp'] = 4
per.loc[per['pptyp']==1, 'pwtyp'] = 1
per.loc[per['pptyp']==2, 'pwtyp'] = 2
# student workers are also part-time workers
per.loc[(per['pptyp']==5) & (per['employment'].isin([1,2,3])), 'pwtyp'] = 2
per.loc[(per['pptyp']==6) & (per['employment'].isin([1,2,3])), 'pwtyp'] = 2
per.loc[per['student']==1, 'pstyp'] = 1
per.loc[per['student']==2, 'pstyp'] = 2
per['pwxcord'] = per['work_lon']
per['pwycord'] = per['work_lat']
per['psxcord'] = per['school_lon']
per['psycord'] = per['school_lat']
per['ppaidprk'] = 1
per.loc[per['work_park']==1, 'ppaidprk'] = 0
per = per.rename(columns={'work_taz':'pwtaz_tmp', 'school_taz':'pstaz_tmp',
'work_bg_geo_id':'pwpcl_tmp', 'school_bg_geo_id':'pspcl_tmp'})
per.loc[per['work_county_fips'].isin(COUNTY_FIPS), 'pwtaz'] = per.loc[per['work_county_fips'].isin(COUNTY_FIPS), 'pwtaz_tmp']
per.loc[per['work_county_fips'].isin(COUNTY_FIPS), 'pwpcl'] = per.loc[per['work_county_fips'].isin(COUNTY_FIPS), 'pwpcl_tmp']
per.loc[per['school_county_fips'].isin(COUNTY_FIPS), 'pstaz'] = per.loc[per['school_county_fips'].isin(COUNTY_FIPS), 'pstaz_tmp']
per.loc[per['school_county_fips'].isin(COUNTY_FIPS), 'pspcl'] = per.loc[per['school_county_fips'].isin(COUNTY_FIPS), 'pspcl_tmp']
per.loc[ | pd.isnull(per['pwtaz']) | pandas.isnull |
#python librairies
# Data manipulation
import numpy as np
import pandas as pd
# Plotting
import matplotlib.pyplot as plt
import seaborn
import matplotlib.mlab as mlab
# Statistical calculation
from scipy.stats import norm
# Tabular data output
from tabulate import tabulate
from pandas_datareader import data as web
from tabulate import tabulate
from datetime import datetime
import plotly.graph_objs as go
import matplotlib.pyplot as plt
import yfinance as yf
import statsmodels.api as sm
from statsmodels import regression
plt.style.use('fivethirtyeight')
# ------------------------------------------------------------------------------------------
def graph_close(stock, start_date, end_date):
"""
Source and plot Close prices from yahoo for any given stock/s & period
Parameters
----------
stock : str,list
Either a single stock ticker or list of tickers.
start_date : str
Date in yyyy-mm-dd format
end_date : str
Date in yyyy-mm-dd format
"""
df = web.DataReader(stock, data_source='yahoo', start = start_date, end= end_date)['Close']
df = pd.DataFrame(df)
plt.figure(figsize=(20,10))
plt.plot(df.index, df[stock])
plt.xlabel("Date")
plt.ylabel("$ price")
plt.title(" Close Price from "+start_date + " to "+ end_date)
# ------------------------------------------------------------------------------------------
def graph_open(stock, start_date, end_date):
df = web.DataReader(stock, data_source='yahoo', start = start_date, end= end_date)['Open']
df = pd.DataFrame(df)
plt.figure(figsize=(20,10))
plt.plot(df.index, df[stock])
plt.xlabel("Date")
plt.ylabel("$ price")
plt.title(" Open Price from "+start_date + " to "+ end_date)
# ------------------------------------------------------------------------------------------
def graph_volume(stock, start_date, end_date):
df = web.DataReader(stock, data_source='yahoo', start = start_date, end= end_date)['Volume']
df = pd.DataFrame(df)
plt.figure(figsize=(20,10))
plt.plot(df.index, df[stock])
plt.xlabel("Date")
plt.ylabel("$ price")
plt.title(" Close Price from "+start_date + " to "+ end_date)
# ------------------------------------------------------------------------------------------
def graph_adj_close(stock, start_date, end_date):
df = web.DataReader(stock, data_source='yahoo', start = start_date, end= end_date)['Adj Close']
df = pd.DataFrame(df)
plt.figure(figsize=(20,10))
plt.plot(df.index, df[stock])
plt.xlabel("Date")
plt.ylabel("$ price")
plt.title(" Close Price from "+start_date + " to "+ end_date)
# ------------------------------------------------------------------------------------------
def close(stock, start_date, end_date):
df = web.DataReader(stock, data_source='yahoo', start = start_date, end= end_date)['Close']
df = pd.DataFrame(df)
return df
# ------------------------------------------------------------------------------------------
def open(stock, start_date, end_date):
df = web.DataReader(stock, data_source='yahoo', start = start_date, end= end_date)['Open']
df = | pd.DataFrame(df) | pandas.DataFrame |
# Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for image preprocessing utils."""
import io
import os
import pathlib
import random
import shutil
import tempfile
from absl.testing import parameterized
from keras import layers
from keras.engine import sequential
from keras.preprocessing import image
from keras.testing_infra import test_combinations
from keras.testing_infra import test_utils
import numpy as np
import pandas as pd
import tensorflow.compat.v2 as tf
try:
import PIL # pylint:disable=g-import-not-at-top
except ImportError:
PIL = None
def _generate_test_images(include_rgba=False,
include_16bit=False,
include_32bit=False):
img_w = img_h = 20
rgb_images = []
rgba_images = []
gray_images = []
gray_images_16bit = []
gray_images_32bit = []
for _ in range(8):
bias = np.random.rand(img_w, img_h, 1) * 64
variance = np.random.rand(img_w, img_h, 1) * (255 - 64)
# RGB
imarray = np.random.rand(img_w, img_h, 3) * variance + bias
im = PIL.Image.fromarray(imarray.astype('uint8')).convert('RGB')
rgb_images.append(im)
# RGBA
imarray = np.random.rand(img_w, img_h, 4) * variance + bias
im = PIL.Image.fromarray(imarray.astype('uint8')).convert('RGBA')
rgba_images.append(im)
# 8-bit grayscale
imarray = np.random.rand(img_w, img_h, 1) * variance + bias
im = PIL.Image.fromarray(imarray.astype('uint8').squeeze()).convert('L')
gray_images.append(im)
# 16-bit grayscale
imarray = np.array(
np.random.randint(-2147483648, 2147483647, (img_w, img_h)))
im = PIL.Image.fromarray(imarray.astype('uint16'))
gray_images_16bit.append(im)
# 32-bit grayscale
im = PIL.Image.fromarray(imarray.astype('uint32'))
gray_images_32bit.append(im)
ret = [rgb_images, gray_images]
if include_rgba:
ret.append(rgba_images)
if include_16bit:
ret.append(gray_images_16bit)
if include_32bit:
ret.append(gray_images_32bit)
return ret
@test_utils.run_v2_only
class TestImage(test_combinations.TestCase):
def test_iterator_empty_directory(self):
# Testing with different batch sizes
for batch_size in [0, 32]:
data_iterator = image.Iterator(0, batch_size, False, 0)
ret = next(data_iterator.index_generator)
self.assertEqual(ret.size, 0)
def test_image(self):
if PIL is None:
return # Skip test if PIL is not available.
for test_images in _generate_test_images():
img_list = []
for im in test_images:
img_list.append(image.img_to_array(im)[None, ...])
images = np.vstack(img_list)
generator = image.ImageDataGenerator(
featurewise_center=True,
samplewise_center=True,
featurewise_std_normalization=True,
samplewise_std_normalization=True,
zca_whitening=True,
rotation_range=90.,
width_shift_range=0.1,
height_shift_range=0.1,
shear_range=0.5,
zoom_range=0.2,
channel_shift_range=0.,
brightness_range=(1, 5),
fill_mode='nearest',
cval=0.5,
horizontal_flip=True,
vertical_flip=True)
# Basic test before fit
x = np.random.random((32, 10, 10, 3))
generator.flow(x)
# Fit
generator.fit(images, augment=True)
for x, _ in generator.flow(
images, np.arange(images.shape[0]), shuffle=True):
self.assertEqual(x.shape[1:], images.shape[1:])
break
def test_image_with_split_value_error(self):
with self.assertRaises(ValueError):
image.ImageDataGenerator(validation_split=5)
def test_image_invalid_data(self):
generator = image.ImageDataGenerator(
featurewise_center=True,
samplewise_center=True,
featurewise_std_normalization=True,
samplewise_std_normalization=True,
zca_whitening=True,
data_format='channels_last')
# Test fit with invalid data
with self.assertRaises(ValueError):
x = np.random.random((3, 10, 10))
generator.fit(x)
# Test flow with invalid data
with self.assertRaises(ValueError):
generator.flow(np.arange(5))
# Invalid number of channels: will work but raise a warning
x = np.random.random((32, 10, 10, 5))
generator.flow(x)
with self.assertRaises(ValueError):
generator = image.ImageDataGenerator(data_format='unknown')
generator = image.ImageDataGenerator(zoom_range=(2., 2.))
def test_image_fit(self):
generator = image.ImageDataGenerator(
featurewise_center=True,
samplewise_center=True,
featurewise_std_normalization=True,
samplewise_std_normalization=True,
zca_whitening=True,
data_format='channels_last')
# Test grayscale
x = np.random.random((32, 10, 10, 1))
generator.fit(x)
# Test RBG
x = np.random.random((32, 10, 10, 3))
generator.fit(x)
generator = image.ImageDataGenerator(
featurewise_center=True,
samplewise_center=True,
featurewise_std_normalization=True,
samplewise_std_normalization=True,
zca_whitening=True,
data_format='channels_first')
# Test grayscale
x = np.random.random((32, 1, 10, 10))
generator.fit(x)
# Test RBG
x = np.random.random((32, 3, 10, 10))
generator.fit(x)
def test_directory_iterator(self):
if PIL is None:
return # Skip test if PIL is not available.
num_classes = 2
temp_dir = self.get_temp_dir()
self.addCleanup(shutil.rmtree, temp_dir)
# create folders and subfolders
paths = []
for cl in range(num_classes):
class_directory = 'class-{}'.format(cl)
classpaths = [
class_directory,
os.path.join(class_directory, 'subfolder-1'),
os.path.join(class_directory, 'subfolder-2'),
os.path.join(class_directory, 'subfolder-1', 'sub-subfolder')
]
for path in classpaths:
os.mkdir(os.path.join(temp_dir, path))
paths.append(classpaths)
# save the images in the paths
count = 0
filenames = []
for test_images in _generate_test_images():
for im in test_images:
# rotate image class
im_class = count % num_classes
# rotate subfolders
classpaths = paths[im_class]
filename = os.path.join(classpaths[count % len(classpaths)],
'image-{}.jpg'.format(count))
filenames.append(filename)
im.save(os.path.join(temp_dir, filename))
count += 1
# Test image loading util
fname = os.path.join(temp_dir, filenames[0])
_ = image.load_img(fname)
_ = image.load_img(fname, grayscale=True)
_ = image.load_img(fname, target_size=(10, 10))
_ = image.load_img(fname, target_size=(10, 10), interpolation='bilinear')
# create iterator
generator = image.ImageDataGenerator()
dir_iterator = generator.flow_from_directory(temp_dir)
# check number of classes and images
self.assertEqual(len(dir_iterator.class_indices), num_classes)
self.assertEqual(len(dir_iterator.classes), count)
self.assertEqual(set(dir_iterator.filenames), set(filenames))
def preprocessing_function(x):
"""This will fail if not provided by a Numpy array.
Note: This is made to enforce backward compatibility.
Args:
x: A numpy array.
Returns:
An array of zeros with the same shape as the given array.
"""
self.assertEqual(x.shape, (26, 26, 3))
self.assertIs(type(x), np.ndarray)
return np.zeros_like(x)
# Test usage as Sequence
generator = image.ImageDataGenerator(
preprocessing_function=preprocessing_function)
dir_seq = generator.flow_from_directory(
str(temp_dir),
target_size=(26, 26),
color_mode='rgb',
batch_size=3,
class_mode='categorical')
self.assertEqual(len(dir_seq), count // 3 + 1)
x1, y1 = dir_seq[1]
self.assertEqual(x1.shape, (3, 26, 26, 3))
self.assertEqual(y1.shape, (3, num_classes))
x1, y1 = dir_seq[5]
self.assertTrue((x1 == 0).all())
def directory_iterator_with_validation_split_test_helper(
self, validation_split):
if PIL is None:
return # Skip test if PIL is not available.
num_classes = 2
tmp_folder = tempfile.mkdtemp(prefix='test_images')
# create folders and subfolders
paths = []
for cl in range(num_classes):
class_directory = 'class-{}'.format(cl)
classpaths = [
class_directory,
os.path.join(class_directory, 'subfolder-1'),
os.path.join(class_directory, 'subfolder-2'),
os.path.join(class_directory, 'subfolder-1', 'sub-subfolder')
]
for path in classpaths:
os.mkdir(os.path.join(tmp_folder, path))
paths.append(classpaths)
# save the images in the paths
count = 0
filenames = []
for test_images in _generate_test_images():
for im in test_images:
# rotate image class
im_class = count % num_classes
# rotate subfolders
classpaths = paths[im_class]
filename = os.path.join(classpaths[count % len(classpaths)],
'image-{}.jpg'.format(count))
filenames.append(filename)
im.save(os.path.join(tmp_folder, filename))
count += 1
# create iterator
generator = image.ImageDataGenerator(validation_split=validation_split)
with self.assertRaises(ValueError):
generator.flow_from_directory(tmp_folder, subset='foo')
num_validation = int(count * validation_split)
num_training = count - num_validation
train_iterator = generator.flow_from_directory(
tmp_folder, subset='training')
self.assertEqual(train_iterator.samples, num_training)
valid_iterator = generator.flow_from_directory(
tmp_folder, subset='validation')
self.assertEqual(valid_iterator.samples, num_validation)
# check number of classes and images
self.assertEqual(len(train_iterator.class_indices), num_classes)
self.assertEqual(len(train_iterator.classes), num_training)
self.assertEqual(
len(set(train_iterator.filenames) & set(filenames)), num_training)
model = sequential.Sequential([layers.Flatten(), layers.Dense(2)])
model.compile(optimizer='sgd', loss='mse')
model.fit(train_iterator, epochs=1)
shutil.rmtree(tmp_folder)
@test_combinations.run_all_keras_modes
def test_directory_iterator_with_validation_split_25_percent(self):
self.directory_iterator_with_validation_split_test_helper(0.25)
@test_combinations.run_all_keras_modes
def test_directory_iterator_with_validation_split_40_percent(self):
self.directory_iterator_with_validation_split_test_helper(0.40)
@test_combinations.run_all_keras_modes
def test_directory_iterator_with_validation_split_50_percent(self):
self.directory_iterator_with_validation_split_test_helper(0.50)
def test_img_utils(self):
if PIL is None:
return # Skip test if PIL is not available.
height, width = 10, 8
# Test channels_first data format
x = np.random.random((3, height, width))
img = image.array_to_img(x, data_format='channels_first')
self.assertEqual(img.size, (width, height))
x = image.img_to_array(img, data_format='channels_first')
self.assertEqual(x.shape, (3, height, width))
# Test 2D
x = np.random.random((1, height, width))
img = image.array_to_img(x, data_format='channels_first')
self.assertEqual(img.size, (width, height))
x = image.img_to_array(img, data_format='channels_first')
self.assertEqual(x.shape, (1, height, width))
# Test channels_last data format
x = np.random.random((height, width, 3))
img = image.array_to_img(x, data_format='channels_last')
self.assertEqual(img.size, (width, height))
x = image.img_to_array(img, data_format='channels_last')
self.assertEqual(x.shape, (height, width, 3))
# Test 2D
x = np.random.random((height, width, 1))
img = image.array_to_img(x, data_format='channels_last')
self.assertEqual(img.size, (width, height))
x = image.img_to_array(img, data_format='channels_last')
self.assertEqual(x.shape, (height, width, 1))
def test_batch_standardize(self):
if PIL is None:
return # Skip test if PIL is not available.
# ImageDataGenerator.standardize should work on batches
for test_images in _generate_test_images():
img_list = []
for im in test_images:
img_list.append(image.img_to_array(im)[None, ...])
images = np.vstack(img_list)
generator = image.ImageDataGenerator(
featurewise_center=True,
samplewise_center=True,
featurewise_std_normalization=True,
samplewise_std_normalization=True,
zca_whitening=True,
rotation_range=90.,
width_shift_range=0.1,
height_shift_range=0.1,
shear_range=0.5,
zoom_range=0.2,
channel_shift_range=0.,
brightness_range=(1, 5),
fill_mode='nearest',
cval=0.5,
horizontal_flip=True,
vertical_flip=True)
generator.fit(images, augment=True)
transformed = np.copy(images)
for i, im in enumerate(transformed):
transformed[i] = generator.random_transform(im)
transformed = generator.standardize(transformed)
def test_img_transforms(self):
x = np.random.random((3, 200, 200))
_ = image.random_rotation(x, 20)
_ = image.random_shift(x, 0.2, 0.2)
_ = image.random_shear(x, 2.)
_ = image.random_zoom(x, (0.5, 0.5))
_ = image.apply_channel_shift(x, 2, 2)
_ = image.apply_affine_transform(x, 2)
with self.assertRaises(ValueError):
image.random_zoom(x, (0, 0, 0))
_ = image.random_channel_shift(x, 2.)
@test_utils.run_v2_only
class TestImageLoading(test_combinations.TestCase):
def test_validate_filename(self):
tmpdir = self.create_tempdir()
valid_extensions = ('png', 'jpg')
filename = tmpdir.create_file('test.png').full_path
self.assertTrue(image.validate_filename(str(filename), valid_extensions))
filename = tmpdir.create_file('test.PnG').full_path
self.assertTrue(image.validate_filename(str(filename), valid_extensions))
filename = tmpdir.create_file('test.some_extension').full_path
self.assertFalse(image.validate_filename(str(filename), valid_extensions))
self.assertFalse(
image.validate_filename('some_test_file.png', valid_extensions))
def test_load_img(self):
tmpdir = self.create_tempdir()
filename_rgb = os.path.join(tmpdir.full_path, 'rgb_utils.png')
filename_rgba = os.path.join(tmpdir.full_path, 'rgba_utils.png')
filename_grayscale_8bit = os.path.join(tmpdir.full_path,
'grayscale_8bit_utils.png')
filename_grayscale_16bit = os.path.join(tmpdir.full_path,
'grayscale_16bit_utils.tiff')
filename_grayscale_32bit = os.path.join(tmpdir.full_path,
'grayscale_32bit_utils.tiff')
original_rgb_array = np.array(
255 * np.random.rand(100, 100, 3), dtype=np.uint8)
original_rgb = image.array_to_img(original_rgb_array, scale=False)
original_rgb.save(filename_rgb)
original_rgba_array = np.array(
255 * np.random.rand(100, 100, 4), dtype=np.uint8)
original_rgba = image.array_to_img(original_rgba_array, scale=False)
original_rgba.save(filename_rgba)
original_grayscale_8bit_array = np.array(
255 * np.random.rand(100, 100, 1), dtype=np.uint8)
original_grayscale_8bit = image.array_to_img(
original_grayscale_8bit_array, scale=False)
original_grayscale_8bit.save(filename_grayscale_8bit)
original_grayscale_16bit_array = np.array(
np.random.randint(-2147483648, 2147483647, (100, 100, 1)),
dtype=np.int16)
original_grayscale_16bit = image.array_to_img(
original_grayscale_16bit_array, scale=False, dtype='int16')
original_grayscale_16bit.save(filename_grayscale_16bit)
original_grayscale_32bit_array = np.array(
np.random.randint(-2147483648, 2147483647, (100, 100, 1)),
dtype=np.int32)
original_grayscale_32bit = image.array_to_img(
original_grayscale_32bit_array, scale=False, dtype='int32')
original_grayscale_32bit.save(filename_grayscale_32bit)
# Test that loaded image is exactly equal to original.
loaded_im = image.load_img(filename_rgb)
loaded_im_array = image.img_to_array(loaded_im)
self.assertEqual(loaded_im_array.shape, original_rgb_array.shape)
self.assertAllClose(loaded_im_array, original_rgb_array)
loaded_im = image.load_img(filename_rgba, color_mode='rgba')
loaded_im_array = image.img_to_array(loaded_im)
self.assertEqual(loaded_im_array.shape, original_rgba_array.shape)
self.assertAllClose(loaded_im_array, original_rgba_array)
loaded_im = image.load_img(filename_rgb, color_mode='grayscale')
loaded_im_array = image.img_to_array(loaded_im)
self.assertEqual(
loaded_im_array.shape,
(original_rgb_array.shape[0], original_rgb_array.shape[1], 1))
loaded_im = image.load_img(filename_grayscale_8bit, color_mode='grayscale')
loaded_im_array = image.img_to_array(loaded_im)
self.assertEqual(loaded_im_array.shape, original_grayscale_8bit_array.shape)
self.assertAllClose(loaded_im_array, original_grayscale_8bit_array)
loaded_im = image.load_img(filename_grayscale_16bit, color_mode='grayscale')
loaded_im_array = image.img_to_array(loaded_im, dtype='int16')
self.assertEqual(loaded_im_array.shape,
original_grayscale_16bit_array.shape)
self.assertAllClose(loaded_im_array, original_grayscale_16bit_array)
# test casting int16 image to float32
loaded_im_array = image.img_to_array(loaded_im)
self.assertAllClose(loaded_im_array, original_grayscale_16bit_array)
loaded_im = image.load_img(filename_grayscale_32bit, color_mode='grayscale')
loaded_im_array = image.img_to_array(loaded_im, dtype='int32')
self.assertEqual(loaded_im_array.shape,
original_grayscale_32bit_array.shape)
self.assertAllClose(loaded_im_array, original_grayscale_32bit_array)
# test casting int32 image to float32
loaded_im_array = image.img_to_array(loaded_im)
self.assertAllClose(loaded_im_array, original_grayscale_32bit_array)
# Test that nothing is changed when target size is equal to original.
loaded_im = image.load_img(filename_rgb, target_size=(100, 100))
loaded_im_array = image.img_to_array(loaded_im)
self.assertEqual(loaded_im_array.shape, original_rgb_array.shape)
self.assertAllClose(loaded_im_array, original_rgb_array)
loaded_im = image.load_img(
filename_rgba, color_mode='rgba', target_size=(100, 100))
loaded_im_array = image.img_to_array(loaded_im)
self.assertEqual(loaded_im_array.shape, original_rgba_array.shape)
self.assertAllClose(loaded_im_array, original_rgba_array)
loaded_im = image.load_img(
filename_rgb, color_mode='grayscale', target_size=(100, 100))
loaded_im_array = image.img_to_array(loaded_im)
self.assertEqual(
loaded_im_array.shape,
(original_rgba_array.shape[0], original_rgba_array.shape[1], 1))
loaded_im = image.load_img(
filename_grayscale_8bit, color_mode='grayscale', target_size=(100, 100))
loaded_im_array = image.img_to_array(loaded_im)
self.assertEqual(loaded_im_array.shape, original_grayscale_8bit_array.shape)
self.assertAllClose(loaded_im_array, original_grayscale_8bit_array)
loaded_im = image.load_img(
filename_grayscale_16bit,
color_mode='grayscale',
target_size=(100, 100))
loaded_im_array = image.img_to_array(loaded_im, dtype='int16')
self.assertEqual(loaded_im_array.shape,
original_grayscale_16bit_array.shape)
self.assertAllClose(loaded_im_array, original_grayscale_16bit_array)
loaded_im = image.load_img(
filename_grayscale_32bit,
color_mode='grayscale',
target_size=(100, 100))
loaded_im_array = image.img_to_array(loaded_im, dtype='int32')
self.assertEqual(loaded_im_array.shape,
original_grayscale_32bit_array.shape)
self.assertAllClose(loaded_im_array, original_grayscale_32bit_array)
# Test down-sampling with bilinear interpolation.
loaded_im = image.load_img(filename_rgb, target_size=(25, 25))
loaded_im_array = image.img_to_array(loaded_im)
self.assertEqual(loaded_im_array.shape, (25, 25, 3))
loaded_im = image.load_img(
filename_rgba, color_mode='rgba', target_size=(25, 25))
loaded_im_array = image.img_to_array(loaded_im)
self.assertEqual(loaded_im_array.shape, (25, 25, 4))
loaded_im = image.load_img(
filename_rgb, color_mode='grayscale', target_size=(25, 25))
loaded_im_array = image.img_to_array(loaded_im)
self.assertEqual(loaded_im_array.shape, (25, 25, 1))
loaded_im = image.load_img(
filename_grayscale_8bit, color_mode='grayscale', target_size=(25, 25))
loaded_im_array = image.img_to_array(loaded_im)
self.assertEqual(loaded_im_array.shape, (25, 25, 1))
loaded_im = image.load_img(
filename_grayscale_16bit, color_mode='grayscale', target_size=(25, 25))
loaded_im_array = image.img_to_array(loaded_im, dtype='int16')
self.assertEqual(loaded_im_array.shape, (25, 25, 1))
loaded_im = image.load_img(
filename_grayscale_32bit, color_mode='grayscale', target_size=(25, 25))
loaded_im_array = image.img_to_array(loaded_im, dtype='int32')
self.assertEqual(loaded_im_array.shape, (25, 25, 1))
# Test down-sampling with nearest neighbor interpolation.
loaded_im_nearest = image.load_img(
filename_rgb, target_size=(25, 25), interpolation='nearest')
loaded_im_array_nearest = image.img_to_array(loaded_im_nearest)
self.assertEqual(loaded_im_array_nearest.shape, (25, 25, 3))
self.assertTrue(np.any(loaded_im_array_nearest != loaded_im_array))
loaded_im_nearest = image.load_img(
filename_rgba,
color_mode='rgba',
target_size=(25, 25),
interpolation='nearest')
loaded_im_array_nearest = image.img_to_array(loaded_im_nearest)
self.assertEqual(loaded_im_array_nearest.shape, (25, 25, 4))
self.assertTrue(np.any(loaded_im_array_nearest != loaded_im_array))
loaded_im = image.load_img(
filename_grayscale_8bit,
color_mode='grayscale',
target_size=(25, 25),
interpolation='nearest')
loaded_im_array = image.img_to_array(loaded_im)
self.assertEqual(loaded_im_array.shape, (25, 25, 1))
loaded_im = image.load_img(
filename_grayscale_16bit,
color_mode='grayscale',
target_size=(25, 25),
interpolation='nearest')
loaded_im_array = image.img_to_array(loaded_im, dtype='int16')
self.assertEqual(loaded_im_array.shape, (25, 25, 1))
loaded_im = image.load_img(
filename_grayscale_32bit,
color_mode='grayscale',
target_size=(25, 25),
interpolation='nearest')
loaded_im_array = image.img_to_array(loaded_im, dtype='int32')
self.assertEqual(loaded_im_array.shape, (25, 25, 1))
# Test different path type
with open(filename_grayscale_32bit, 'rb') as f:
path_ = io.BytesIO(f.read()) # io.Bytesio
loaded_im = image.load_img(path_, color_mode='grayscale')
loaded_im_array = image.img_to_array(loaded_im, dtype=np.int32)
self.assertAllClose(loaded_im_array, original_grayscale_32bit_array)
path_ = filename_grayscale_32bit # str
loaded_im = image.load_img(path_, color_mode='grayscale')
loaded_im_array = image.img_to_array(loaded_im, dtype=np.int32)
self.assertAllClose(loaded_im_array, original_grayscale_32bit_array)
path_ = filename_grayscale_32bit.encode() # bytes
loaded_im = image.load_img(path_, color_mode='grayscale')
loaded_im_array = image.img_to_array(loaded_im, dtype=np.int32)
self.assertAllClose(loaded_im_array, original_grayscale_32bit_array)
path_ = pathlib.Path(
os.path.join(tmpdir.full_path, 'grayscale_32bit_utils.tiff'))
loaded_im = image.load_img(path_, color_mode='grayscale')
loaded_im_array = image.img_to_array(loaded_im, dtype=np.int32)
self.assertAllClose(loaded_im_array, original_grayscale_32bit_array)
# Check that exception is raised if interpolation not supported.
loaded_im = image.load_img(filename_rgb, interpolation='unsupported')
with self.assertRaises(ValueError):
loaded_im = image.load_img(
filename_rgb, target_size=(25, 25), interpolation='unsupported')
# Check that the aspect ratio of a square is the same
filename_red_square = os.path.join(tmpdir.full_path, 'red_square_utils.png')
arr = np.zeros((50, 100, 3), dtype=np.uint8) # rectangle image 100x50
arr[20:30, 45:55, 0] = 255 # red square 10x10
red_square_array = np.array(arr)
red_square = image.array_to_img(red_square_array, scale=False)
red_square.save(filename_red_square)
loaded_im = image.load_img(
filename_red_square, target_size=(25, 25), keep_aspect_ratio=True)
loaded_im_array = image.img_to_array(loaded_im)
self.assertEqual(loaded_im_array.shape, (25, 25, 3))
red_channel_arr = loaded_im_array[:, :, 0].astype(np.bool)
square_width = np.sum(np.sum(red_channel_arr, axis=0))
square_height = np.sum(np.sum(red_channel_arr, axis=1))
aspect_ratio_result = square_width / square_height
# original square had 1:1 ratio
self.assertNear(aspect_ratio_result, 1.0, 0.01)
def test_array_to_img_and_img_to_array(self):
height, width = 10, 8
# Test the data format
# Test RGB 3D
x = np.random.random((3, height, width))
img = image.array_to_img(x, data_format='channels_first')
self.assertEqual(img.size, (width, height))
x = image.img_to_array(img, data_format='channels_first')
self.assertEqual(x.shape, (3, height, width))
# Test RGBA 3D
x = np.random.random((4, height, width))
img = image.array_to_img(x, data_format='channels_first')
self.assertEqual(img.size, (width, height))
x = image.img_to_array(img, data_format='channels_first')
self.assertEqual(x.shape, (4, height, width))
# Test 2D
x = np.random.random((1, height, width))
img = image.array_to_img(x, data_format='channels_first')
self.assertEqual(img.size, (width, height))
x = image.img_to_array(img, data_format='channels_first')
self.assertEqual(x.shape, (1, height, width))
# grayscale 32-bit signed integer
x = np.array(
np.random.randint(-2147483648, 2147483647, (1, height, width)),
dtype=np.int32)
img = image.array_to_img(x, data_format='channels_first')
self.assertEqual(img.size, (width, height))
x = image.img_to_array(img, data_format='channels_first')
self.assertEqual(x.shape, (1, height, width))
# Test tf data format
# Test RGB 3D
x = np.random.random((height, width, 3))
img = image.array_to_img(x, data_format='channels_last')
self.assertEqual(img.size, (width, height))
x = image.img_to_array(img, data_format='channels_last')
self.assertEqual(x.shape, (height, width, 3))
# Test RGBA 3D
x = np.random.random((height, width, 4))
img = image.array_to_img(x, data_format='channels_last')
self.assertEqual(img.size, (width, height))
x = image.img_to_array(img, data_format='channels_last')
self.assertEqual(x.shape, (height, width, 4))
# Test 2D
x = np.random.random((height, width, 1))
img = image.array_to_img(x, data_format='channels_last')
self.assertEqual(img.size, (width, height))
x = image.img_to_array(img, data_format='channels_last')
self.assertEqual(x.shape, (height, width, 1))
# grayscale 16-bit signed integer
x = np.array(
np.random.randint(-2147483648, 2147483647, (height, width, 1)),
dtype=np.int16)
img = image.array_to_img(x, data_format='channels_last')
self.assertEqual(img.size, (width, height))
x = image.img_to_array(img, data_format='channels_last')
self.assertEqual(x.shape, (height, width, 1))
# grayscale 32-bit signed integer
x = np.array(
np.random.randint(-2147483648, 2147483647, (height, width, 1)),
dtype=np.int32)
img = image.array_to_img(x, data_format='channels_last')
self.assertEqual(img.size, (width, height))
x = image.img_to_array(img, data_format='channels_last')
self.assertEqual(x.shape, (height, width, 1))
# Test invalid use case
with self.assertRaises(ValueError):
x = np.random.random((height, width)) # not 3D
img = image.array_to_img(x, data_format='channels_first')
with self.assertRaises(ValueError):
x = np.random.random((height, width, 3))
# unknown data_format
img = image.array_to_img(x, data_format='channels')
with self.assertRaises(ValueError):
# neither RGB, RGBA, or gray-scale
x = np.random.random((height, width, 5))
img = image.array_to_img(x, data_format='channels_last')
with self.assertRaises(ValueError):
x = np.random.random((height, width, 3))
# unknown data_format
img = image.img_to_array(x, data_format='channels')
with self.assertRaises(ValueError):
# neither RGB, RGBA, or gray-scale
x = np.random.random((height, width, 5, 3))
img = image.img_to_array(x, data_format='channels_last')
@test_utils.run_v2_only
class TestDirectoryIterator(test_combinations.TestCase):
def test_directory_iterator(self):
tmpdir = self.create_tempdir()
all_test_images = _generate_test_images(
include_rgba=True, include_16bit=True, include_32bit=True)
num_classes = 2
# create folders and subfolders
paths = []
for cl in range(num_classes):
class_directory = 'class-{}'.format(cl)
classpaths = [
class_directory,
os.path.join(class_directory, 'subfolder-1'),
os.path.join(class_directory, 'subfolder-2'),
os.path.join(class_directory, 'subfolder-1', 'sub-subfolder')
]
for path in classpaths:
os.mkdir(os.path.join(tmpdir.full_path, path))
paths.append(classpaths)
# save the images in the paths
count = 0
filenames = []
for test_images in all_test_images:
for im in test_images:
# rotate image class
im_class = count % num_classes
# rotate subfolders
classpaths = paths[im_class]
filename = os.path.join(classpaths[count % len(classpaths)],
'image-{}.png'.format(count))
filenames.append(filename)
im.save(os.path.join(tmpdir.full_path, filename))
count += 1
# create iterator
generator = image.ImageDataGenerator()
dir_iterator = generator.flow_from_directory(tmpdir.full_path)
# check number of classes and images
self.assertLen(dir_iterator.class_indices, num_classes)
self.assertLen(dir_iterator.classes, count)
self.assertEqual(set(dir_iterator.filenames), set(filenames))
# Test invalid use cases
with self.assertRaises(ValueError):
generator.flow_from_directory(tmpdir.full_path, color_mode='cmyk')
with self.assertRaises(ValueError):
generator.flow_from_directory(tmpdir.full_path, class_mode='output')
def preprocessing_function(x):
# This will fail if not provided by a Numpy array.
# Note: This is made to enforce backward compatibility.
self.assertEqual(x.shape, (26, 26, 3))
self.assertIsInstance(x, np.ndarray)
return np.zeros_like(x)
# Test usage as Sequence
generator = image.ImageDataGenerator(
preprocessing_function=preprocessing_function)
dir_seq = generator.flow_from_directory(
tmpdir.full_path,
target_size=(26, 26),
color_mode='rgb',
batch_size=3,
class_mode='categorical')
self.assertLen(dir_seq, np.ceil(count / 3.))
x1, y1 = dir_seq[1]
self.assertEqual(x1.shape, (3, 26, 26, 3))
self.assertEqual(y1.shape, (3, num_classes))
x1, y1 = dir_seq[5]
self.assertTrue((x1 == 0).all())
with self.assertRaises(ValueError):
x1, y1 = dir_seq[14] # there are 40 images and batch size is 3
def test_directory_iterator_class_mode_input(self):
tmpdir = self.create_tempdir()
os.mkdir(os.path.join(tmpdir.full_path, 'class-1'))
all_test_images = _generate_test_images(
include_rgba=True, include_16bit=True, include_32bit=True)
# save the images in the paths
count = 0
for test_images in all_test_images:
for im in test_images:
filename = os.path.join(tmpdir, 'class-1', 'image-{}.png'.format(count))
im.save(filename)
count += 1
# create iterator
generator = image.ImageDataGenerator()
dir_iterator = generator.flow_from_directory(
tmpdir.full_path, class_mode='input')
batch = next(dir_iterator)
# check if input and output have the same shape
self.assertEqual(batch[0].shape, batch[1].shape)
# check if the input and output images are not the same numpy array
input_img = batch[0][0]
output_img = batch[1][0]
output_img[0][0][0] += 1
self.assertNotEqual(input_img[0][0][0], output_img[0][0][0])
@parameterized.parameters([
(0.25, 30),
(0.50, 20),
(0.75, 10),
])
def test_directory_iterator_with_validation_split(self, validation_split,
num_training):
tmpdir = self.create_tempdir()
all_test_images = _generate_test_images(
include_rgba=True, include_16bit=True, include_32bit=True)
num_classes = 2
# create folders and subfolders
paths = []
for cl in range(num_classes):
class_directory = 'class-{}'.format(cl)
classpaths = [
class_directory,
os.path.join(class_directory, 'subfolder-1'),
os.path.join(class_directory, 'subfolder-2'),
os.path.join(class_directory, 'subfolder-1', 'sub-subfolder')
]
for path in classpaths:
os.mkdir(os.path.join(tmpdir.full_path, path))
paths.append(classpaths)
# save the images in the paths
count = 0
filenames = []
for test_images in all_test_images:
for im in test_images:
# rotate image class
im_class = count % num_classes
# rotate subfolders
classpaths = paths[im_class]
filename = os.path.join(classpaths[count % len(classpaths)],
'image-{}.png'.format(count))
filenames.append(filename)
im.save(os.path.join(tmpdir.full_path, filename))
count += 1
# create iterator
generator = image.ImageDataGenerator(validation_split=validation_split)
with self.assertRaises(ValueError):
generator.flow_from_directory(tmpdir.full_path, subset='foo')
train_iterator = generator.flow_from_directory(
tmpdir.full_path, subset='training')
self.assertEqual(train_iterator.samples, num_training)
valid_iterator = generator.flow_from_directory(
tmpdir.full_path, subset='validation')
self.assertEqual(valid_iterator.samples, count - num_training)
# check number of classes and images
self.assertLen(train_iterator.class_indices, num_classes)
self.assertLen(train_iterator.classes, num_training)
self.assertLen(set(train_iterator.filenames) & set(filenames), num_training)
@test_utils.run_v2_only
class TestNumpyArrayIterator(test_combinations.TestCase):
def test_numpy_array_iterator(self):
tmpdir = self.create_tempdir()
all_test_images = _generate_test_images(include_rgba=True)
image_data_generator = image.ImageDataGenerator(
featurewise_center=True,
samplewise_center=True,
featurewise_std_normalization=True,
samplewise_std_normalization=True,
zca_whitening=True,
rotation_range=90.,
width_shift_range=0.1,
height_shift_range=0.1,
shear_range=0.5,
zoom_range=0.2,
channel_shift_range=0.,
brightness_range=(1, 5),
fill_mode='nearest',
cval=0.5,
horizontal_flip=True,
vertical_flip=True,
interpolation_order=1)
for test_images in all_test_images:
img_list = []
for im in test_images:
img_list.append(image.img_to_array(im)[None, ...])
images = np.vstack(img_list)
dsize = images.shape[0]
iterator = image.NumpyArrayIterator(
images,
np.arange(images.shape[0]),
image_data_generator,
shuffle=False,
save_to_dir=tmpdir.full_path,
batch_size=3)
x, y = next(iterator)
self.assertEqual(x.shape, images[:3].shape)
self.assertEqual(list(y), [0, 1, 2])
# Test with sample weights
iterator = image.NumpyArrayIterator(
images,
np.arange(images.shape[0]),
image_data_generator,
shuffle=False,
sample_weight=np.arange(images.shape[0]) + 1,
save_to_dir=tmpdir.full_path,
batch_size=3)
x, y, w = iterator.next()
self.assertEqual(x.shape, images[:3].shape)
self.assertEqual(list(y), [0, 1, 2])
self.assertEqual(list(w), [1, 2, 3])
# Test with `shuffle=True`
iterator = image.NumpyArrayIterator(
images,
np.arange(images.shape[0]),
image_data_generator,
shuffle=True,
save_to_dir=tmpdir.full_path,
batch_size=3,
seed=42)
x, y = iterator.next()
self.assertEqual(x.shape, images[:3].shape)
# Check that the sequence is shuffled.
self.assertNotEqual(list(y), [0, 1, 2])
# Test without y
iterator = image.NumpyArrayIterator(
images,
None,
image_data_generator,
shuffle=True,
save_to_dir=tmpdir.full_path,
batch_size=3)
x = iterator.next()
self.assertIsInstance(x, np.ndarray)
self.assertEqual(x.shape, images[:3].shape)
# Test with a single miscellaneous input data array
x_misc1 = np.random.random(dsize)
iterator = image.NumpyArrayIterator((images, x_misc1),
np.arange(dsize),
image_data_generator,
shuffle=False,
batch_size=2)
for i, (x, y) in enumerate(iterator):
self.assertEqual(x[0].shape, images[:2].shape)
self.assertTrue((x[1] == x_misc1[(i * 2):((i + 1) * 2)]).all())
if i == 2:
break
# Test with two miscellaneous inputs
x_misc2 = np.random.random((dsize, 3, 3))
iterator = image.NumpyArrayIterator((images, [x_misc1, x_misc2]),
np.arange(dsize),
image_data_generator,
shuffle=False,
batch_size=2)
for i, (x, y) in enumerate(iterator):
self.assertEqual(x[0].shape, images[:2].shape)
self.assertTrue((x[1] == x_misc1[(i * 2):((i + 1) * 2)]).all())
self.assertTrue((x[2] == x_misc2[(i * 2):((i + 1) * 2)]).all())
if i == 2:
break
# Test cases with `y = None`
iterator = image.NumpyArrayIterator(
images, None, image_data_generator, batch_size=3)
x = iterator.next()
self.assertIsInstance(x, np.ndarray)
self.assertEqual(x.shape, images[:3].shape)
iterator = image.NumpyArrayIterator((images, x_misc1),
None,
image_data_generator,
batch_size=3,
shuffle=False)
x = iterator.next()
self.assertIsInstance(x, list)
self.assertEqual(x[0].shape, images[:3].shape)
self.assertTrue((x[1] == x_misc1[:3]).all())
iterator = image.NumpyArrayIterator((images, [x_misc1, x_misc2]),
None,
image_data_generator,
batch_size=3,
shuffle=False)
x = iterator.next()
self.assertIsInstance(x, list)
self.assertEqual(x[0].shape, images[:3].shape)
self.assertTrue((x[1] == x_misc1[:3]).all())
self.assertTrue((x[2] == x_misc2[:3]).all())
# Test with validation split
generator = image.ImageDataGenerator(validation_split=0.2)
iterator = image.NumpyArrayIterator(images, None, generator, batch_size=3)
x = iterator.next()
self.assertIsInstance(x, np.ndarray)
self.assertEqual(x.shape, images[:3].shape)
# Test some failure cases:
x_misc_err = np.random.random((dsize + 1, 3, 3))
with self.assertRaisesRegex(ValueError, 'All of the arrays in'):
image.NumpyArrayIterator((images, x_misc_err),
np.arange(dsize),
generator,
batch_size=3)
with self.assertRaisesRegex(ValueError,
r'`x` \(images tensor\) and `y` \(labels\)'):
image.NumpyArrayIterator((images, x_misc1),
np.arange(dsize + 1),
generator,
batch_size=3)
# Test `flow` behavior as Sequence
seq = image.NumpyArrayIterator(
images,
np.arange(images.shape[0]),
generator,
shuffle=False,
save_to_dir=tmpdir.full_path,
batch_size=3)
self.assertLen(seq, images.shape[0] // 3 + 1)
x, y = seq[0]
self.assertEqual(x.shape, images[:3].shape)
self.assertEqual(list(y), [0, 1, 2])
# Test with `shuffle=True`
seq = image.NumpyArrayIterator(
images,
np.arange(images.shape[0]),
generator,
shuffle=True,
save_to_dir=tmpdir.full_path,
batch_size=3,
seed=123)
x, y = seq[0]
# Check that the sequence is shuffled.
self.assertNotEqual(list(y), [0, 1, 2])
# `on_epoch_end` should reshuffle the sequence.
seq.on_epoch_end()
_, y2 = seq[0]
self.assertNotEqual(list(y), list(y2))
# test order_interpolation
labels = np.array([[2, 2, 0, 2, 2], [1, 3, 2, 3, 1], [2, 1, 0, 1, 2],
[3, 1, 0, 2, 0], [3, 1, 3, 2, 1]])
label_generator = image.ImageDataGenerator(
rotation_range=90., interpolation_order=0)
labels_gen = image.NumpyArrayIterator(
labels[np.newaxis, ..., np.newaxis], None, label_generator, seed=123)
self.assertTrue((np.unique(labels) == np.unique(next(labels_gen))).all())
@test_utils.run_v2_only
class TestDataFrameIterator(test_combinations.TestCase):
def test_dataframe_iterator(self):
tmpdir = self.create_tempdir()
all_test_images = _generate_test_images(include_rgba=True)
num_classes = 2
# save the images in the tmpdir
count = 0
filenames = []
filepaths = []
filenames_without = []
for test_images in all_test_images:
for im in test_images:
filename = 'image-{}.png'.format(count)
filename_without = 'image-{}'.format(count)
filenames.append(filename)
filepaths.append(os.path.join(tmpdir.full_path, filename))
filenames_without.append(filename_without)
im.save(os.path.join(tmpdir.full_path, filename))
count += 1
df = pd.DataFrame({
'filename': filenames,
'class': [str(random.randint(0, 1)) for _ in filenames],
'filepaths': filepaths
})
# create iterator
iterator = image.DataFrameIterator(df, tmpdir.full_path)
batch = next(iterator)
self.assertLen(batch, 2)
self.assertIsInstance(batch[0], np.ndarray)
self.assertIsInstance(batch[1], np.ndarray)
generator = image.ImageDataGenerator()
df_iterator = generator.flow_from_dataframe(df, x_col='filepaths')
df_iterator_dir = generator.flow_from_dataframe(df, tmpdir.full_path)
df_sparse_iterator = generator.flow_from_dataframe(
df, tmpdir.full_path, class_mode='sparse')
self.assertFalse(np.isnan(df_sparse_iterator.classes).any())
# check number of classes and images
self.assertLen(df_iterator.class_indices, num_classes)
self.assertLen(df_iterator.classes, count)
self.assertEqual(set(df_iterator.filenames), set(filepaths))
self.assertLen(df_iterator_dir.class_indices, num_classes)
self.assertLen(df_iterator_dir.classes, count)
self.assertEqual(set(df_iterator_dir.filenames), set(filenames))
# test without shuffle
_, batch_y = next(
generator.flow_from_dataframe(
df, tmpdir.full_path, shuffle=False, class_mode='sparse'))
self.assertTrue(
(batch_y == df['class'].astype('float')[:len(batch_y)]).all())
# Test invalid use cases
with self.assertRaises(ValueError):
generator.flow_from_dataframe(df, tmpdir.full_path, color_mode='cmyk')
with self.assertRaises(ValueError):
generator.flow_from_dataframe(df, tmpdir.full_path, class_mode='output')
with self.assertWarns(DeprecationWarning):
generator.flow_from_dataframe(df, tmpdir.full_path, has_ext=True)
with self.assertWarns(DeprecationWarning):
generator.flow_from_dataframe(df, tmpdir.full_path, has_ext=False)
def preprocessing_function(x):
# This will fail if not provided by a Numpy array.
# Note: This is made to enforce backward compatibility.
self.assertEqual(x.shape, (26, 26, 3))
self.assertIsInstance(x, np.ndarray)
return np.zeros_like(x)
# Test usage as Sequence
generator = image.ImageDataGenerator(
preprocessing_function=preprocessing_function)
dir_seq = generator.flow_from_dataframe(
df,
tmpdir.full_path,
target_size=(26, 26),
color_mode='rgb',
batch_size=3,
class_mode='categorical')
self.assertLen(dir_seq, np.ceil(count / 3))
x1, y1 = dir_seq[1]
self.assertEqual(x1.shape, (3, 26, 26, 3))
self.assertEqual(y1.shape, (3, num_classes))
x1, y1 = dir_seq[5]
self.assertTrue((x1 == 0).all())
with self.assertRaises(ValueError):
x1, y1 = dir_seq[9]
def test_dataframe_iterator_validate_filenames(self):
tmpdir = self.create_tempdir()
all_test_images = _generate_test_images(include_rgba=True)
# save the images in the paths
count = 0
filenames = []
for test_images in all_test_images:
for im in test_images:
filename = 'image-{}.png'.format(count)
im.save(os.path.join(tmpdir.full_path, filename))
filenames.append(filename)
count += 1
df = pd.DataFrame({'filename': filenames + ['test.jpp', 'test.jpg']})
generator = image.ImageDataGenerator()
df_iterator = generator.flow_from_dataframe(
df, tmpdir.full_path, class_mode='input')
self.assertLen(df_iterator.filenames, len(df['filename']) - 2)
df_iterator = generator.flow_from_dataframe(
df, tmpdir.full_path, class_mode='input', validate_filenames=False)
self.assertLen(df_iterator.filenames, len(df['filename']))
def test_dataframe_iterator_sample_weights(self):
tmpdir = self.create_tempdir()
all_test_images = _generate_test_images(include_rgba=True)
# save the images in the paths
count = 0
filenames = []
for test_images in all_test_images:
for im in test_images:
filename = 'image-{}.png'.format(count)
im.save(os.path.join(tmpdir.full_path, filename))
filenames.append(filename)
count += 1
df = pd.DataFrame({'filename': filenames})
df['weight'] = ([2, 5] * len(df))[:len(df)]
generator = image.ImageDataGenerator()
df_iterator = generator.flow_from_dataframe(
df,
tmpdir.full_path,
x_col='filename',
y_col=None,
shuffle=False,
batch_size=5,
weight_col='weight',
class_mode='input')
batch = next(df_iterator)
self.assertLen(batch, 3) # (x, y, weights)
# check if input and output have the same shape and they're the same
self.assertEqual(batch[0].all(), batch[1].all())
# check if the input and output images are not the same numpy array
input_img = batch[0][0]
output_img = batch[1][0]
output_img[0][0][0] += 1
self.assertNotEqual(input_img[0][0][0], output_img[0][0][0])
self.assertAllEqual(np.array([2, 5, 2, 5, 2]), batch[2])
# fail
df['weight'] = (['2', '5'] * len(df))[:len(df)]
with self.assertRaises(TypeError):
image.ImageDataGenerator().flow_from_dataframe(
df, weight_col='weight', class_mode='input')
def test_dataframe_iterator_class_mode_input(self):
tmpdir = self.create_tempdir()
all_test_images = _generate_test_images(include_rgba=True)
# save the images in the paths
count = 0
filenames = []
for test_images in all_test_images:
for im in test_images:
filename = 'image-{}.png'.format(count)
im.save(os.path.join(tmpdir.full_path, filename))
filenames.append(filename)
count += 1
df = pd.DataFrame({'filename': filenames})
generator = image.ImageDataGenerator()
df_autoencoder_iterator = generator.flow_from_dataframe(
df, tmpdir.full_path, x_col='filename', y_col=None, class_mode='input')
batch = next(df_autoencoder_iterator)
# check if input and output have the same shape and they're the same
self.assertAllClose(batch[0], batch[1])
# check if the input and output images are not the same numpy array
input_img = batch[0][0]
output_img = batch[1][0]
output_img[0][0][0] += 1
self.assertNotEqual(input_img[0][0][0], output_img[0][0][0])
df_autoencoder_iterator = generator.flow_from_dataframe(
df,
tmpdir.full_path,
x_col='filename',
y_col='class',
class_mode='input')
batch = next(df_autoencoder_iterator)
# check if input and output have the same shape and they're the same
self.assertEqual(batch[0].all(), batch[1].all())
# check if the input and output images are not the same numpy array
input_img = batch[0][0]
output_img = batch[1][0]
output_img[0][0][0] += 1
self.assertNotEqual(input_img[0][0][0], output_img[0][0][0])
def test_dataframe_iterator_class_mode_categorical_multi_label(self):
tmpdir = self.create_tempdir()
all_test_images = _generate_test_images(include_rgba=True)
# save the images in the paths
filenames = []
count = 0
for test_images in all_test_images:
for im in test_images:
filename = 'image-{}.png'.format(count)
im.save(os.path.join(tmpdir.full_path, filename))
filenames.append(filename)
count += 1
label_opt = ['a', 'b', ['a'], ['b'], ['a', 'b'], ['b', 'a']]
df = pd.DataFrame({
'filename': filenames,
'class': [random.choice(label_opt) for _ in filenames[:-2]] +
['b', 'a']
})
generator = image.ImageDataGenerator()
df_iterator = generator.flow_from_dataframe(df, tmpdir.full_path)
batch_x, batch_y = next(df_iterator)
self.assertIsInstance(batch_x, np.ndarray)
self.assertLen(batch_x.shape, 4)
self.assertIsInstance(batch_y, np.ndarray)
self.assertEqual(batch_y.shape, (len(batch_x), 2))
for labels in batch_y:
self.assertTrue(all(label in {0, 1} for label in labels))
# on first 3 batches
df = pd.DataFrame({
'filename':
filenames,
'class': [['b', 'a']] + ['b'] + [['c']] +
[random.choice(label_opt) for _ in filenames[:-3]]
})
generator = image.ImageDataGenerator()
df_iterator = generator.flow_from_dataframe(
df, tmpdir.full_path, shuffle=False)
batch_x, batch_y = next(df_iterator)
self.assertIsInstance(batch_x, np.ndarray)
self.assertLen(batch_x.shape, 4)
self.assertIsInstance(batch_y, np.ndarray)
self.assertEqual(batch_y.shape, (len(batch_x), 3))
for labels in batch_y:
self.assertTrue(all(label in {0, 1} for label in labels))
self.assertTrue((batch_y[0] == np.array([1, 1, 0])).all())
self.assertTrue((batch_y[1] == np.array([0, 1, 0])).all())
self.assertTrue((batch_y[2] == np.array([0, 0, 1])).all())
def test_dataframe_iterator_class_mode_multi_output(self):
tmpdir = self.create_tempdir()
all_test_images = _generate_test_images(include_rgba=True)
# save the images in the paths
filenames = []
count = 0
for test_images in all_test_images:
for im in test_images:
filename = 'image-{}.png'.format(count)
im.save(os.path.join(tmpdir.full_path, filename))
filenames.append(filename)
count += 1
# fit both outputs are a single number
df = pd.DataFrame({
'filename': filenames
}).assign(
output_0=np.random.uniform(size=len(filenames)),
output_1=np.random.uniform(size=len(filenames)))
df_iterator = image.ImageDataGenerator().flow_from_dataframe(
df,
y_col=['output_0', 'output_1'],
directory=tmpdir.full_path,
batch_size=3,
shuffle=False,
class_mode='multi_output')
batch_x, batch_y = next(df_iterator)
self.assertIsInstance(batch_x, np.ndarray)
self.assertLen(batch_x.shape, 4)
self.assertIsInstance(batch_y, list)
self.assertLen(batch_y, 2)
self.assertAllEqual(batch_y[0], np.array(df['output_0'].tolist()[:3]))
self.assertAllEqual(batch_y[1], np.array(df['output_1'].tolist()[:3]))
# if one of the outputs is a 1D array
df['output_1'] = [
np.random.uniform(size=(2, 2, 1)).flatten() for _ in range(len(df))
]
df_iterator = image.ImageDataGenerator().flow_from_dataframe(
df,
y_col=['output_0', 'output_1'],
directory=tmpdir.full_path,
batch_size=3,
shuffle=False,
class_mode='multi_output')
batch_x, batch_y = next(df_iterator)
self.assertIsInstance(batch_x, np.ndarray)
self.assertLen(batch_x.shape, 4)
self.assertIsInstance(batch_y, list)
self.assertLen(batch_y, 2)
self.assertAllEqual(batch_y[0], np.array(df['output_0'].tolist()[:3]))
self.assertAllEqual(batch_y[1], np.array(df['output_1'].tolist()[:3]))
# if one of the outputs is a 2D array
df['output_1'] = [np.random.uniform(size=(2, 2, 1)) for _ in range(len(df))]
df_iterator = image.ImageDataGenerator().flow_from_dataframe(
df,
y_col=['output_0', 'output_1'],
directory=tmpdir.full_path,
batch_size=3,
shuffle=False,
class_mode='multi_output')
batch_x, batch_y = next(df_iterator)
self.assertIsInstance(batch_x, np.ndarray)
self.assertLen(batch_x.shape, 4)
self.assertIsInstance(batch_y, list)
self.assertLen(batch_y, 2)
self.assertAllEqual(batch_y[0], np.array(df['output_0'].tolist()[:3]))
self.assertAllEqual(batch_y[1], np.array(df['output_1'].tolist()[:3]))
# fail if single column
with self.assertRaises(TypeError):
image.ImageDataGenerator().flow_from_dataframe(
df,
y_col='output_0',
directory=tmpdir.full_path,
class_mode='multi_output')
def test_dataframe_iterator_class_mode_raw(self):
tmpdir = self.create_tempdir()
all_test_images = _generate_test_images(include_rgba=True)
# save the images in the paths
filenames = []
count = 0
for test_images in all_test_images:
for im in test_images:
filename = 'image-{}.png'.format(count)
im.save(os.path.join(tmpdir.full_path, filename))
filenames.append(filename)
count += 1
# case for 1D output
df = pd.DataFrame({
'filename': filenames
}).assign(
output_0=np.random.uniform(size=len(filenames)),
output_1=np.random.uniform(size=len(filenames)))
df_iterator = image.ImageDataGenerator().flow_from_dataframe(
df,
y_col='output_0',
directory=tmpdir.full_path,
batch_size=3,
shuffle=False,
class_mode='raw')
batch_x, batch_y = next(df_iterator)
self.assertIsInstance(batch_x, np.ndarray)
self.assertLen(batch_x.shape, 4)
self.assertIsInstance(batch_y, np.ndarray)
self.assertEqual(batch_y.shape, (3,))
self.assertAllEqual(batch_y, df['output_0'].values[:3])
# case with a 2D output
df_iterator = image.ImageDataGenerator().flow_from_dataframe(
df,
y_col=['output_0', 'output_1'],
directory=tmpdir.full_path,
batch_size=3,
shuffle=False,
class_mode='raw')
batch_x, batch_y = next(df_iterator)
self.assertIsInstance(batch_x, np.ndarray)
self.assertLen(batch_x.shape, 4)
self.assertIsInstance(batch_y, np.ndarray)
self.assertEqual(batch_y.shape, (3, 2))
self.assertAllEqual(batch_y, df[['output_0', 'output_1']].values[:3])
@parameterized.parameters([
(0.25, 18),
(0.50, 12),
(0.75, 6),
])
def test_dataframe_iterator_with_validation_split(self, validation_split,
num_training):
tmpdir = self.create_tempdir()
all_test_images = _generate_test_images(include_rgba=True)
num_classes = 2
# save the images in the tmpdir
count = 0
filenames = []
filenames_without = []
for test_images in all_test_images:
for im in test_images:
filename = 'image-{}.png'.format(count)
filename_without = 'image-{}'.format(count)
filenames.append(filename)
filenames_without.append(filename_without)
im.save(os.path.join(tmpdir.full_path, filename))
count += 1
df = pd.DataFrame({
'filename': filenames,
'class': [str(random.randint(0, 1)) for _ in filenames]
})
# create iterator
generator = image.ImageDataGenerator(validation_split=validation_split)
df_sparse_iterator = generator.flow_from_dataframe(
df, tmpdir.full_path, class_mode='sparse')
if np.isnan(next(df_sparse_iterator)[:][1]).any():
raise ValueError('Invalid values.')
with self.assertRaises(ValueError):
generator.flow_from_dataframe(df, tmpdir.full_path, subset='foo')
train_iterator = generator.flow_from_dataframe(
df, tmpdir.full_path, subset='training')
self.assertEqual(train_iterator.samples, num_training)
valid_iterator = generator.flow_from_dataframe(
df, tmpdir.full_path, subset='validation')
self.assertEqual(valid_iterator.samples, count - num_training)
# check number of classes and images
self.assertLen(train_iterator.class_indices, num_classes)
self.assertLen(train_iterator.classes, num_training)
self.assertLen(set(train_iterator.filenames) & set(filenames), num_training)
def test_dataframe_iterator_with_custom_indexed_dataframe(self):
tmpdir = self.create_tempdir()
all_test_images = _generate_test_images(include_rgba=True)
num_classes = 2
# save the images in the tmpdir
count = 0
filenames = []
for test_images in all_test_images:
for im in test_images:
filename = 'image-{}.png'.format(count)
filenames.append(filename)
im.save(os.path.join(tmpdir.full_path, filename))
count += 1
# create dataframes
classes = np.random.randint(num_classes, size=len(filenames))
classes = [str(c) for c in classes]
df = pd.DataFrame({'filename': filenames, 'class': classes})
df2 = pd.DataFrame({
'filename': filenames,
'class': classes
},
index=np.arange(1,
len(filenames) + 1))
df3 = pd.DataFrame({
'filename': filenames,
'class': classes
},
index=filenames)
# create iterators
seed = 1
generator = image.ImageDataGenerator()
df_iterator = generator.flow_from_dataframe(df, tmpdir.full_path, seed=seed)
df2_iterator = generator.flow_from_dataframe(
df2, tmpdir.full_path, seed=seed)
df3_iterator = generator.flow_from_dataframe(
df3, tmpdir.full_path, seed=seed)
# Test all iterators return same pairs of arrays
for _ in range(len(filenames)):
a1, c1 = next(df_iterator)
a2, c2 = next(df2_iterator)
a3, c3 = next(df3_iterator)
self.assertAllEqual(a1, a2)
self.assertAllEqual(a1, a3)
self.assertAllEqual(c1, c2)
self.assertAllEqual(c1, c3)
def test_dataframe_iterator_n(self):
tmpdir = self.create_tempdir()
all_test_images = _generate_test_images(include_rgba=True)
# save the images in the tmpdir
count = 0
filenames = []
for test_images in all_test_images:
for im in test_images:
filename = 'image-{}.png'.format(count)
filenames.append(filename)
im.save(os.path.join(tmpdir.full_path, filename))
count += 1
# exclude first two items
n_files = len(filenames)
input_filenames = filenames[2:]
# create dataframes
classes = np.random.randint(2, size=len(input_filenames))
classes = [str(c) for c in classes]
df = pd.DataFrame({'filename': input_filenames})
df2 = pd.DataFrame({'filename': input_filenames, 'class': classes})
# create iterators
generator = image.ImageDataGenerator()
df_iterator = generator.flow_from_dataframe(
df, tmpdir.full_path, class_mode=None)
df2_iterator = generator.flow_from_dataframe(
df2, tmpdir.full_path, class_mode='binary')
# Test the number of items in iterators
self.assertEqual(df_iterator.n, n_files - 2)
self.assertEqual(df2_iterator.n, n_files - 2)
def test_dataframe_iterator_absolute_path(self):
tmpdir = self.create_tempdir()
all_test_images = _generate_test_images(include_rgba=True)
# save the images in the tmpdir
count = 0
file_paths = []
for test_images in all_test_images:
for im in test_images:
filename = 'image-{:0>5}.png'.format(count)
file_path = os.path.join(tmpdir.full_path, filename)
file_paths.append(file_path)
im.save(file_path)
count += 1
# prepare an image with a forbidden extension.
file_path_fbd = os.path.join(tmpdir.full_path, 'image-forbid.fbd')
shutil.copy(file_path, file_path_fbd)
# create dataframes
classes = np.random.randint(2, size=len(file_paths))
classes = [str(c) for c in classes]
df = pd.DataFrame({'filename': file_paths})
df2 = | pd.DataFrame({'filename': file_paths, 'class': classes}) | pandas.DataFrame |
#!/usr/bin/env python2
# -*- coding: utf-8 -*-
"""
Created on Fri Apr 27 19:59:33 2018
@author: rhou
"""
import warnings
warnings.filterwarnings("ignore")
import argparse
import pandas as pd
import numpy as np
import os, sys
from scipy.stats import spearmanr, pearsonr
import glob
from scipy import io
import multiprocessing
from functools import partial
def SPMAnno(refDB, keepZeros, testMethod, testCol):
firstLayerHeader = [testCol.columns[0], testCol.columns[0]]
#find common genes between test data and ref data
testRows = set(testCol.index)
refRows = set(refDB.index)
if keepZeros:
commonRows = list(refRows)
else:
commonRows = list(refRows.intersection(testRows))
commonRowsLen = len(commonRows)
thirdLayerHeader = [commonRowsLen, commonRowsLen]
#only keep non-zero genes
testCol = testCol.loc[commonRows, ].fillna(0.0)
testrefDB = refDB.loc[commonRows, ].fillna(0.0)
if testMethod == 'Spearman':
spr_correlation = testrefDB.apply(lambda col: spearmanr(col, testCol)[0], axis=0)
spr_correlation = spr_correlation.to_frame().fillna(0).round(10).reset_index()
spr_correlation.columns = ['sample name', 'Spearman correlation coefficient']
secondLayerHeader = ['sample name', 'Spearman correlation coefficient']
spr_correlation = spr_correlation.sort_values(by=['Spearman correlation coefficient'], ascending=False)
spr_correlation = spr_correlation.reset_index(drop=True)
return (firstLayerHeader, thirdLayerHeader, secondLayerHeader, spr_correlation, testCol.columns[0], spr_correlation.iloc[0,0], spr_correlation.iloc[0,1])
elif testMethod == 'Pearson':
pes_correlation = testrefDB.apply(lambda col: pearsonr(col.to_frame(), testCol)[0], axis=0)
pes_correlation = pes_correlation.fillna(0).round(10).T.ix[:,0].reset_index()
pes_correlation.columns = ['sample name', 'Pearson correlation coefficient']
secondLayerHeader = ['sample name', 'Pearson correlation coefficient']
pes_correlation = pes_correlation.sort_values(by=['Pearson correlation coefficient'], ascending=False)
pes_correlation = pes_correlation.reset_index(drop=True)
return (firstLayerHeader, thirdLayerHeader, secondLayerHeader, pes_correlation, testCol.columns[0], pes_correlation.iloc[0,0], pes_correlation.iloc[0,1])
#transfer given species gene symbols to hids
def TransferToHids(refDS, species, geneList):
hidCol = 0 # universal id for homology genes
taxidCol = 1 # species id
geneSymbolCol = 3 # for jordan's data, it only has gene symbol
homoDF = pd.read_csv(os.path.join(refDS, 'homologene.data'), sep='\t', index_col=None, header=None)
# reduce the list to genes of the given species
speciesDF = homoDF.ix[homoDF[taxidCol] == int(species),].set_index(geneSymbolCol)
geneDF = speciesDF.loc[speciesDF.index.isin(geneList)]
notnaGenes = geneDF[geneDF[hidCol].notnull()]
notnaGenes = notnaGenes.ix[~notnaGenes[hidCol].duplicated(keep='first'),]
notnaGenesSymbols = list(notnaGenes.index)
notnaGenesHIDs = list(notnaGenes.ix[:, hidCol])
notnaGenesHIDs = [int(i) for i in notnaGenesHIDs]
return notnaGenesSymbols, notnaGenesHIDs
def AnnSCData(testType, em, refDS, refType, refTypeName, keepZeros, testMethod, coreNum, savefolder):
#if across species need to replace symbols to HIDs
if testType == refType:
#load reference database
refDB = pd.read_csv(os.path.join(refDS, '%s_symbol.csv' % refType), index_col=0, header=0)
else:
#load reference database
refDB = pd.read_csv(os.path.join(refDS, '%s_HID.csv' % refType), index_col=0, header=0)
#for different species, transfer symbols to hids
geneList = list(em.index)
geneList, hidList = TransferToHids(refDS, testType, geneList)
em = em.ix[~em.index.duplicated(keep='first'),]
em = em.ix[geneList, ]
hidList = [str(i) for i in hidList]
em.index = hidList
hidList = [str(i) for i in refDB.index]
refDB.index = hidList
#remove duplicate indices
refDB = refDB.ix[~refDB.index.duplicated(keep='first'),]
print('reference dataset shape: %s genes, %s samples' % refDB.shape)
em = em.ix[~em.index.duplicated(keep='first'),]
#split expression matrix to single-cell expression profiles
eps = np.split(em, len(em.columns), axis=1)
#annotate single-cell expression profiles in parallel
p = multiprocessing.Pool(coreNum)
func = partial(SPMAnno, refDB, keepZeros, testMethod)
resultList = p.map(func, eps)
p.close()
p.join()
#generate final sample annotation expression matrix
firstLayerHeader = [item for i in resultList for item in i[0]]
secondLayerHeader = [item for i in resultList for item in i[1]]
thirdLayerHeader = [item for i in resultList for item in i[2]]
merged = pd.concat([i[3] for i in resultList], axis=1)
arrays = [firstLayerHeader, secondLayerHeader, thirdLayerHeader]
tuples = list(zip(*arrays))
headers = pd.MultiIndex.from_tuples(tuples, names=['identifier', 'tested genes', 'annotation'])
merged.columns = headers
#prepare folder to save annotation result
if not os.path.exists(savefolder):
os.mkdir(savefolder)
rstFolder = 'annotation_result'
if keepZeros:
rstFolder = rstFolder + '_keep_all_genes'
else:
rstFolder = rstFolder + '_keep_expressed_genes'
savefolder = os.path.join(savefolder, rstFolder)
if not os.path.exists(savefolder):
os.mkdir(savefolder)
#save file
print('##########saving annotation results in the folder: %s' % savefolder)
if len(merged.columns) > 8190:
colNum = len(merged.columns)
parts = int(colNum / 8000)
for partIdx in range(parts):
subMerged = merged.iloc[:,8000*partIdx:8000*(partIdx+1)]
subMerged.to_excel(os.path.join(savefolder, refTypeName+"_%s_Part%04d.xlsx" % (testMethod, partIdx+1)))
subMerged = merged.iloc[:,8000*parts:]
subMerged.to_excel(os.path.join(savefolder, refTypeName+"_%s_Part%04d.xlsx" % (testMethod, parts+1)))
else:
merged.to_excel(os.path.join(savefolder, refTypeName+"_%s.xlsx" % testMethod))
#save top ann result
topAnn = pd.DataFrame({'cell':[i[4] for i in resultList], 'cell type':[''] * len(resultList), 'top sample':[i[5] for i in resultList], 'top correlation score':[i[6] for i in resultList]})
mapData = pd.read_csv(os.path.join(refDS, '%s_map.csv' % refType), index_col=0, header=0)
for idx in topAnn.index:
topAnn.ix[idx, 'cell type'] = mapData.ix[topAnn.ix[idx, 'top sample'], 'cell type']
saveNameP = os.path.join(savefolder, refTypeName+"_%s_top_ann.csv" % (testMethod))
topAnn.to_csv(saveNameP, index=False, columns = ['cell', 'cell type', 'top sample', 'top correlation score'])
print('##########DONE!')
def SortAnno(testItem):
oldcols = testItem.columns
cell = oldcols.get_level_values(0)[0]
testItem.columns = ["name", "coefficient"]
testItem = testItem.sort_values(by=['coefficient'], ascending=False)
topAnn = testItem.iloc[0,0]
topCoff = testItem.iloc[0,1]
testItem.columns = oldcols
testItem = testItem.reset_index(drop=True)
return (testItem, cell, topAnn, topCoff)
#start to annotate test dataset
def main(testType, testFormat, testDS, testGenes, refDS, refTypeList, keepZeros, testMethodList, coreNum):
#load test data
print('##########loading test data')
if testFormat == '10x':
fileItem = glob.glob(os.path.join(testDS, "matrix.mtx"))[0]
em = io.mmread(fileItem)
em = em.tocsr().toarray()
if os.path.exists(os.path.join(opt.testDS, 'genes.tsv')):
row = pd.read_table(fileItem[:-10]+"genes.tsv", header=None, index_col=None)
else:
row = pd.read_table(fileItem[:-10]+"features.tsv", header=None, index_col=None)
col = pd.read_table(fileItem[:-10]+"barcodes.tsv", header=None, index_col=None)
em = | pd.DataFrame(em, index=row.T.values[1], columns=col.T.values[0]) | pandas.DataFrame |
import time
import json
import copy
import os
import torch
import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
from torch.utils.data import Dataset, DataLoader
from torchvision import datasets, transforms, utils, models
from PIL import Image
from pathlib import Path
from collections import OrderedDict
from utils.utils import rot, get_bbox, imshow
from data_loaders import ROB535Dataset
# check if GPU is available
train_on_gpu = torch.cuda.is_available()
data_transforms = {
'train': transforms.Compose([
transforms.RandomRotation(30),
transforms.Resize((300,600)),
transforms.ColorJitter(brightness=0.4, contrast=0.4, saturation=0.4, hue=0.4),
transforms.RandomHorizontalFlip(),
transforms.ToTensor(),
transforms.Normalize([0.485, 0.456, 0.406], [0.229, 0.224, 0.225])
]),
'valid': transforms.Compose([
transforms.Resize((300,600)),
transforms.ToTensor(),
transforms.Normalize([0.485, 0.456, 0.406], [0.229, 0.224, 0.225])
]),
'test': transforms.Compose([
transforms.Resize((300,600)),
transforms.ToTensor(),
transforms.Normalize([0.485, 0.456, 0.406],[0.229, 0.224, 0.225])
]),
}
# phase='train' in order to first load the official model and then make adjustments to it
dataset = ROB535Dataset(data_dir='data/rob535-fall-2019-task-1-image-classification', phase='train', transforms=data_transforms['test'])
dataloader = DataLoader(dataset, batch_size = 32, shuffle=False)
device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
model = models.resnext50_32x4d(pretrained=True)
num_in_features = 2048
# Freezing parameters
for param in model.parameters():
param.require_grad = False
# Create Custom Classifier
hidden_layers = [1000]
new_classifier = torch.nn.Sequential()
new_classifier.add_module('fc0', torch.nn.Linear(num_in_features, hidden_layers[0]))
new_classifier.add_module('relu0', torch.nn.ReLU())
new_classifier.add_module('drop0', torch.nn.Dropout(.6))
new_classifier.add_module('output', torch.nn.Linear(hidden_layers[0], 4))
# Defining model hyperparameters
model.fc = new_classifier
criterion = torch.nn.CrossEntropyLoss()
optimizer = torch.optim.SGD(model.parameters(), lr=0.001, momentum=0.9)
sched = torch.optim.lr_scheduler.ReduceLROnPlateau(optimizer, mode='max', patience=3, threshold = 0.9)
if not train_on_gpu:
model.load_state_dict(torch.load('models/team10_trained_resnext50_final.pt', map_location=torch.device('cpu')))
else:
model.load_state_dict(torch.load('models/team10_trained_resnext50_final.pt'))
model.to(device)
from torchsummary import summary
if device == 'cpu':
summary(model.cpu(), (3,224,224))
elif device == 'cuda':
summary(model.cuda(), (3,224,224))
test_dir = 'data/rob535-fall-2019-task-1-image-classification'
with torch.no_grad():
print("Evaluating test data: ")
model.eval()
test_dataset = ROB535Dataset(data_dir=test_dir, phase='test', transforms=data_transforms['test'])
test_dataloader = DataLoader(test_dataset, batch_size = 64, shuffle=False, num_workers=2)
image_names = []
pred = []
for index in test_dataloader.dataset.imgs:
tmp = index.replace('data/rob535-fall-2019-task-1-image-classification/data-2019/test/','')
tmp = tmp.replace('_image.jpg', '')
image_names.append(Path(tmp))
results = []
file_names = []
predicted_car = []
predicted_class = []
for inputs in test_dataloader:
inputs = inputs.to(device)
outputs = model(inputs)
_, pred = torch.max(outputs, 1)
for i in range(len(inputs)):
file_names.append(image_names[i])
predicted_car.append(int(pred[i]))
results.append((file_names, predicted_car))
# Create new dataframe
df = | pd.DataFrame({'guid/image': image_names, 'label': results[0][1]}) | pandas.DataFrame |
import csv
import pandas as pd
import numpy as np
######=================================================########
###### Segment A.1 ########
######=================================================########
SimDays = 365
SimHours = SimDays * 24
HorizonHours = 24 ##planning horizon (e.g., 24, 48, 72 hours etc.)
TransLoss = 0.075 ##transmission loss as a percent of generation
n1criterion = 0.75 ##maximum line-usage as a percent of line-capacity
res_margin = 0.15 ##minimum reserve as a percent of system demand
spin_margin = 0.50 ##minimum spinning reserve as a percent of total reserve
data_name = 'pownet_data_camb_2016'
######=================================================########
###### Segment A.2 ########
######=================================================########
#read parameters for dispatchable resources (coal/gas/oil/biomass generators, imports)
df_gen = pd.read_csv('data_camb_genparams.csv',header=0)
##hourly ts of dispatchable hydropower at each domestic dam
df_hydro = pd.read_csv('data_camb_hydro_2016.csv',header=0)
##hourly ts of dispatchable hydropower at each import dam
df_hydro_import = pd.read_csv('data_camb_hydro_import_2016.csv',header=0)
####hourly ts of dispatchable solar-power at each plant
##df_solar = pd.read_csv('data_solar.csv',header=0)
##
####hourly ts of dispatchable wind-power at each plant
##df_wind = pd.read_csv('data_wind.csv',header=0)
##hourly ts of load at substation-level
df_load = | pd.read_csv('data_camb_load_2016.csv',header=0) | pandas.read_csv |
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Thu Jan 21 23:24:11 2021
@author: rayin
"""
import os, sys
import pandas as pd
import matplotlib.pyplot as plt
import numpy as np
import math
import re
import random
from collections import Counter
from pprint import pprint
os.chdir("/Users/rayin/Google Drive/Harvard/5_data/UDN/work")
case_gene_update = pd.read_csv("data/processed/variant_clean.csv", index_col=0)
aa_variant = list(case_gene_update['\\12_Candidate variants\\09 Protein\\'])
#pd.DataFrame(aa_variant).to_csv('aa_variant.csv')
#aa_variant_update = pd.read_csv("data/processed/aa_variant_update.csv", index_col=0)
#aa_variant_update = list(aa_variant_update['\\12_Candidate variants\\09 Protein\\'])
amino_acid = {'CYS': 'C', 'ASP': 'D', 'SER': 'S', 'GLN': 'Q', 'LYS': 'K', 'ILE': 'I', 'PRO': 'P', 'THR': 'T', 'PHE': 'F', 'ASN': 'N',
'GLY': 'G', 'HIS': 'H', 'LEU': 'L', 'ARG': 'R', 'TRP': 'W', 'ALA': 'A', 'VAL':'V', 'GLU': 'E', 'TYR': 'Y', 'MET': 'M', 'TER': 'X'}
aa_3 = []
aa_1 = []
for i in amino_acid.keys():
aa_3.append(i)
aa_1.append(amino_acid[i])
for i in range(0, len(aa_variant)):
for j in range(len(aa_3)):
if isinstance(aa_variant[i], float):
break
aa_variant[i] = str(aa_variant[i].upper())
if aa_3[j] in aa_variant[i]:
aa_variant[i] = aa_variant[i].replace(aa_3[j], aa_1[j])
#extracting aa properties from aaindex
#https://www.genome.jp/aaindex/
aa = ['A', 'R', 'N', 'D', 'C', 'Q', 'E', 'G', 'H', 'I', 'L', 'K', 'M', 'F', 'P', 'S', 'T', 'W', 'Y', 'V']
#RADA880108
polarity = [-0.06, -0.84, -0.48, -0.80, 1.36, -0.73, -0.77, -0.41, 0.49, 1.31, 1.21, -1.18, 1.27, 1.27, 0.0, -0.50, -0.27, 0.88, 0.33, 1.09]
aa_polarity = pd.concat([pd.Series(aa), pd.Series(polarity)], axis=1)
aa_polarity = aa_polarity.rename(columns={0:'amino_acid', 1: 'polarity_value'})
#KLEP840101
net_charge = [0, 1, 0, -1, 0, 0, -1, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0]
aa_net_charge = pd.concat([pd.Series(aa), pd.Series(net_charge)], axis=1)
aa_net_charge = aa_net_charge.rename(columns={0:'amino_acid', 1: 'net_charge_value'})
#CIDH920103
hydrophobicity = [0.36, -0.52, -0.90, -1.09, 0.70, -1.05, -0.83, -0.82, 0.16, 2.17, 1.18, -0.56, 1.21, 1.01, -0.06, -0.60, -1.20, 1.31, 1.05, 1.21]
aa_hydrophobicity = pd.concat([pd.Series(aa), pd.Series(hydrophobicity)], axis=1)
aa_hydrophobicity = aa_hydrophobicity.rename(columns={0:'amino_acid', 1: 'hydrophobicity_value'})
#FAUJ880103 -- Normalized van der Waals volume
normalized_vdw = [1.00, 6.13, 2.95, 2.78, 2.43, 3.95, 3.78, 0.00, 4.66, 4.00, 4.00, 4.77, 4.43, 5.89, 2.72, 1.60, 2.60, 8.08, 6.47, 3.00]
aa_normalized_vdw = pd.concat([pd.Series(aa), pd.Series(normalized_vdw)], axis=1)
aa_normalized_vdw = aa_normalized_vdw.rename(columns={0:'amino_acid', 1: 'normalized_vdw_value'})
#CHAM820101
polarizability = [0.046, 0.291, 0.134, 0.105, 0.128, 0.180, 0.151, 0.000, 0.230, 0.186, 0.186, 0.219, 0.221, 0.290, 0.131, 0.062, 0.108, 0.409, 0.298, 0.140]
aa_polarizability = pd.concat([pd.Series(aa), pd.Series(polarizability)], axis=1)
aa_polarizability = aa_polarizability.rename(columns={0:'amino_acid', 1: 'polarizability_value'})
#JOND750102
pK_COOH = [2.34, 1.18, 2.02, 2.01, 1.65, 2.17, 2.19, 2.34, 1.82, 2.36, 2.36, 2.18, 2.28, 1.83, 1.99, 2.21, 2.10, 2.38, 2.20, 2.32]
aa_pK_COOH = pd.concat([pd.Series(aa), pd.Series(pK_COOH)], axis=1)
aa_pK_COOH = aa_pK_COOH.rename(columns={0:'amino_acid', 1: 'pK_COOH_value'})
#FASG760104
pK_NH2 = [9.69, 8.99, 8.80, 9.60, 8.35, 9.13, 9.67, 9.78, 9.17, 9.68, 9.60, 9.18, 9.21, 9.18, 10.64, 9.21, 9.10, 9.44, 9.11, 9.62]
aa_pK_NH2 = pd.concat([pd.Series(aa), pd.Series(pK_NH2)], axis=1)
aa_pK_NH2 = aa_pK_NH2.rename(columns={0:'amino_acid', 1: 'pK_NH2_value'})
#ROBB790101 Hydration free energy
hydration = [-1.0, 0.3, -0.7, -1.2, 2.1, -0.1, -0.7, 0.3, 1.1, 4.0, 2.0, -0.9, 1.8, 2.8, 0.4, -1.2, -0.5, 3.0, 2.1, 1.4]
aa_hydration = pd.concat([pd.Series(aa), pd.Series(hydration)], axis=1)
aa_hydration = aa_hydration.rename(columns={0:'amino_acid', 1: 'hydration_value'})
#FASG760101
molecular_weight = [89.09, 174.20, 132.12, 133.10, 121.15, 146.15, 147.13, 75.07, 155.16, 131.17, 131.17, 146.19, 149.21, 165.19,
115.13, 105.09, 119.12, 204.24, 181.19, 117.15]
aa_molecular_weight = pd.concat([pd.Series(aa), pd.Series(molecular_weight)], axis=1)
aa_molecular_weight = aa_molecular_weight.rename(columns={0:'amino_acid', 1: 'molecular_weight_value'})
#FASG760103
optical_rotation = [1.80, 12.50, -5.60, 5.05, -16.50, 6.30, 12.00, 0.00, -38.50, 12.40, -11.00, 14.60, -10.00, -34.50, -86.20,
-7.50, -28.00, -33.70, -10.00, 5.63]
aa_optical_rotation = pd.concat([pd.Series(aa), pd.Series(optical_rotation)], axis=1)
aa_optical_rotation = aa_optical_rotation.rename(columns={0:'amino_acid', 1: 'optical_rotation_value'})
#secondary structure #LEVJ860101
#https://pybiomed.readthedocs.io/en/latest/_modules/CTD.html#CalculateCompositionSolventAccessibility
#SecondaryStr = {'1': 'EALMQKRH', '2': 'VIYCWFT', '3': 'GNPSD'}
# '1'stand for Helix; '2'stand for Strand, '3' stand for coil
secondary_structure = [1, 1, 3, 3, 2, 1, 1, 3, 1, 2, 1, 1, 1, 2, 3, 3, 2, 2, 2, 2]
aa_secondary_structure = pd.concat([pd.Series(aa), pd.Series(secondary_structure)], axis=1)
aa_secondary_structure = aa_secondary_structure.rename(columns={0:'amino_acid', 1: 'secondary_structure_value'})
#_SolventAccessibility = {'-1': 'ALFCGIVW', '1': 'RKQEND', '0': 'MPSTHY'}
# '-1'stand for Buried; '1'stand for Exposed, '0' stand for Intermediate
solvent_accessibility = [-1, 1, 1, 1, -1, 1, 1, -1, 0, -1, -1, 1, 0, -1, 0, 0, 0, -1, 0, -1]
aa_solvent_accessibility = pd.concat([pd.Series(aa), pd.Series(solvent_accessibility)], axis=1)
aa_solvent_accessibility = aa_solvent_accessibility.rename(columns={0:'amino_acid', 1: 'solvent_accessibility_value'})
############################################################################################################################################
#CHAM820102 Free energy of solution in water
free_energy_solution = [-0.368, -1.03, 0.0, 2.06, 4.53, 0.731, 1.77, -0.525, 0.0, 0.791, 1.07, 0.0, 0.656, 1.06, -2.24, -0.524, 0.0, 1.60, 4.91, 0.401]
aa_free_energy_solution = pd.concat([pd.Series(aa), | pd.Series(free_energy_solution) | pandas.Series |
import pandas as pd
import numpy as np
import sklearn.feature_selection
import sklearn.preprocessing
import sklearn.model_selection
import mlr
import matplotlib.pyplot as plt
from sklearn.metrics import mean_squared_error
from sklearn.metrics import r2_score
import statistics
# sorting variables
def sort_by_feature_name(df):
df =df.T
a = []
for i in df.T.columns:
a.append(len(i))
df["len"] = a
df_sorted = df.sort_values(["len"])
df_sorted = df_sorted.drop(["len"],axis=1)
return df_sorted.T
# Remove feature correlations, using Pearson correlation, based on the variable threshold
def remove_correlation(dataset, threshold):
col_corr = set() # Set of all the names of deleted columns
corr_matrix = dataset.corr().abs()
for i in range(len(corr_matrix.columns)):
for j in range(i):
if corr_matrix.iloc[i, j] >= threshold:
colname = corr_matrix.columns[i] # getting the name of column
col_corr.add(colname)
if colname in dataset.columns:
del dataset[colname] # deleting the column from the dataset
return dataset
# SEP is the standard error of prediction (test set). SEE is the error for training
def sep(yt,yp):
return np.sqrt(mean_squared_error(yt, yp))
def run_MLREM(df2, name, dependent_variable, up_to_beta=200, screen_variance=False):
df=df2.copy()
# Separating independent and dependent variables x and y
y = df[dependent_variable].to_numpy().reshape(-1,1)
x = df.drop(dependent_variable,axis=1)
x_sorted=sort_by_feature_name(x)
x_pastvar = x_sorted.copy()
if screen_variance:
selector = sklearn.feature_selection.VarianceThreshold(threshold=0.01)
selector.fit(x_sorted)
x_pastvar=x_sorted.T[selector.get_support()].T
x_remcorr = remove_correlation(x_pastvar,0.9)
y_scaller = sklearn.preprocessing.StandardScaler()
x_scaller = sklearn.preprocessing.StandardScaler()
ys_scalled = y_scaller.fit_transform(y)
xs_scalled = x_scaller.fit_transform(x_remcorr)
ind = x_remcorr.columns
X_train, X_test, y_train, y_test = sklearn.model_selection.train_test_split(xs_scalled, ys_scalled, test_size=0.3)
df_X_test = pd.DataFrame(X_test, columns=ind) # this is to be able to calculate SEP for each iteration of beta
df_X_train = pd.DataFrame(X_train, columns=ind)
sepai = []
betai = []
indexai = []
weights = []
pvalues = []
# Beta optimisation
for i in range(1,up_to_beta):
beta = 0.1 * i
betai.append(beta)
w, indice, pv = mlr.train(X_train, y_train, ind, beta=beta)
indexai.append(indice)
weights.append(w)
pvalues.append(pv)
X_test2 = df_X_test[indice[1:]]
X_train2 = df_X_train[indice[1:]]
# RMSE calculation - test set
yp = np.dot(X_test2,w[1:])
yp = y_scaller.inverse_transform(yp)
yt = y_scaller.inverse_transform(y_test)
sepai.append(sep(yp,yt))
# RMSE calculation - training set
yp = np.dot(X_train2,w[1:])
yp = y_scaller.inverse_transform(yp)
yt = y_scaller.inverse_transform(y_train)
#print(beta, ';', sep(yp,yt),';', sepai[-1])
# Extracting best results obtained in the previous loop based on the minimum error of prediction
best_beta_indx = sepai.index(np.array(sepai).min())
print('Best beta =', betai[best_beta_indx])
# weights for each remaining feature after correlation has been performed
df_features = pd.DataFrame(weights[best_beta_indx],index=indexai[best_beta_indx])
df_features.columns = ["weights"]
# p value calculation for the regression
df_pvalues = pd.DataFrame(pvalues[best_beta_indx],index=indexai[best_beta_indx])
df_pvalues.columns = ["pvalues"]
# Indexes of the features selected for regression after correlation study is performed
saved_idx_MLR = df_features.index.tolist()
saved_idx_MLR.append(dependent_variable)
# Intercept is a feature used in the MLR function
if 'Intercept' in saved_idx_MLR:
df_X_train['Intercept'] = 1
df_X_test['Intercept'] = 1
df['Intercept'] = 1
mlr_name = "DataForAnalysis_features_not_correlated"+ name +".csv"
df[saved_idx_MLR].to_csv(mlr_name,sep=",",header=True)
# stats for the training set
yp = np.dot(df_X_train[indexai[best_beta_indx]],weights[best_beta_indx])
yp = y_scaller.inverse_transform(yp)
y = y_scaller.inverse_transform(y_train)
y_train = y
# stats calculation
r2_train = r2_score(y,yp)
rmse_train = np.sqrt(mean_squared_error(yp, y))
print("\n\n R2 train MLREM: %f" % (r2_train))
print("\n\n RMSE train MLREM: %f" % (rmse_train))
# stats for the test set
yp = np.dot(df_X_test[indexai[best_beta_indx]],weights[best_beta_indx])
yp = y_scaller.inverse_transform(yp)
y = y_scaller.inverse_transform(y_test)
y_test = y
r2_test = r2_score(y,yp)#stats.pearsonr(yp,y)[0]**2
rmse_test = np.sqrt(mean_squared_error(yp, y))
print("\n\n RMSE test MLREM: %f" % (rmse_test))
return (rmse_train,rmse_test,r2_train,r2_test)
# Add here the csv file to be analysed and the name of the dependent variable
R2_train = list()
R2_test = list()
RMSE_train = list()
RMSE_test = list()
data = | pd.read_excel('LigandsSubstrateBoronFragmentDescriptors_LASSO.xlsx') | pandas.read_excel |
import pandas as pd
import datetime
import dash_html_components as html
from ventilators.utils import us_map, us_timeline, get_no_model_visual, get_model_visual
def build_transfers_map(chosen_model,chosen_date,p1,p2,p3):
if chosen_model == "Washington IHME":
df_map = pd.read_csv('data/predicted_ventilator/state_supplies_table-ihme.csv', sep=",", parse_dates = ['Date'])
else:
df_map = | pd.read_csv('data/predicted_ventilator/state_supplies_table-ode.csv', sep=",", parse_dates = ['Date']) | pandas.read_csv |
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Fri Jan 3 17:28:04 2020
@author: shlomi
"""
from PW_paths import work_yuval
from matplotlib import rcParams
import seaborn as sns
from pathlib import Path
import matplotlib.pyplot as plt
from PW_paths import savefig_path
import matplotlib.ticker as ticker
import matplotlib.dates as mdates
from PW_stations import produce_geo_gnss_solved_stations
tela_results_path = work_yuval / 'GNSS_stations/tela/rinex/30hr/results'
tela_solutions = work_yuval / 'GNSS_stations/tela/gipsyx_solutions'
sound_path = work_yuval / 'sounding'
phys_soundings = sound_path / 'bet_dagan_phys_sounding_2007-2019.nc'
ims_path = work_yuval / 'IMS_T'
gis_path = work_yuval / 'gis'
dem_path = work_yuval / 'AW3D30'
era5_path = work_yuval / 'ERA5'
hydro_path = work_yuval / 'hydro'
ceil_path = work_yuval / 'ceilometers'
aero_path = work_yuval / 'AERONET'
climate_path = work_yuval / 'climate'
df_gnss = produce_geo_gnss_solved_stations(
plot=False, add_distance_to_coast=True)
st_order_climate = [x for x in df_gnss.dropna().sort_values(
['groups_climate', 'lat', 'lon'], ascending=[1, 0, 0]).index]
rc = {
'font.family': 'serif',
'xtick.labelsize': 'large',
'ytick.labelsize': 'large'}
for key, val in rc.items():
rcParams[key] = val
# sns.set(rc=rc, style='white')
seasonal_colors = {'DJF': 'tab:blue',
'SON': 'tab:red',
'JJA': 'tab:green',
'MAM': 'tab:orange',
'Annual': 'tab:purple'}
def get_twin(ax, axis):
assert axis in ("x", "y")
siblings = getattr(ax, f"get_shared_{axis}_axes")().get_siblings(ax)
for sibling in siblings:
if sibling.bbox.bounds == ax.bbox.bounds and sibling is not ax:
return sibling
return None
def sci_notation(num, decimal_digits=1, precision=None, exponent=None):
"""
Returns a string representation of the scientific
notation of the given number formatted for use with
LaTeX or Mathtext, with specified number of significant
decimal digits and precision (number of decimal digits
to show). The exponent to be used can also be specified
explicitly.
"""
from math import floor, log10
if exponent is None:
exponent = int(floor(log10(abs(num))))
coeff = round(num / float(10**exponent), decimal_digits)
if precision is None:
precision = decimal_digits
return r"${0:.{2}f}\cdot10^{{{1:d}}}$".format(coeff, exponent, precision)
def utm_from_lon(lon):
"""
utm_from_lon - UTM zone for a longitude
Not right for some polar regions (Norway, Svalbard, Antartica)
:param float lon: longitude
:return: UTM zone number
:rtype: int
"""
from math import floor
return floor((lon + 180) / 6) + 1
def scale_bar(ax, proj, length, location=(0.5, 0.05), linewidth=3,
units='km', m_per_unit=1000, bounds=None):
"""
http://stackoverflow.com/a/35705477/1072212
ax is the axes to draw the scalebar on.
proj is the projection the axes are in
location is center of the scalebar in axis coordinates ie. 0.5 is the middle of the plot
length is the length of the scalebar in km.
linewidth is the thickness of the scalebar.
units is the name of the unit
m_per_unit is the number of meters in a unit
"""
import cartopy.crs as ccrs
from matplotlib import patheffects
# find lat/lon center to find best UTM zone
try:
x0, x1, y0, y1 = ax.get_extent(proj.as_geodetic())
except AttributeError:
if bounds is not None:
x0, x1, y0, y1 = bounds
# Projection in metres
utm = ccrs.UTM(utm_from_lon((x0+x1)/2))
# Get the extent of the plotted area in coordinates in metres
x0, x1, y0, y1 = ax.get_extent(utm)
# Turn the specified scalebar location into coordinates in metres
sbcx, sbcy = x0 + (x1 - x0) * location[0], y0 + (y1 - y0) * location[1]
# Generate the x coordinate for the ends of the scalebar
bar_xs = [sbcx - length * m_per_unit/2, sbcx + length * m_per_unit/2]
# buffer for scalebar
buffer = [patheffects.withStroke(linewidth=5, foreground="w")]
# Plot the scalebar with buffer
ax.plot(bar_xs, [sbcy, sbcy], transform=utm, color='k',
linewidth=linewidth, path_effects=buffer)
# buffer for text
buffer = [patheffects.withStroke(linewidth=3, foreground="w")]
# Plot the scalebar label
t0 = ax.text(sbcx, sbcy, str(length) + ' ' + units, transform=utm,
horizontalalignment='center', verticalalignment='bottom',
path_effects=buffer, zorder=2)
left = x0+(x1-x0)*0.05
# Plot the N arrow
t1 = ax.text(left, sbcy, u'\u25B2\nN', transform=utm,
horizontalalignment='center', verticalalignment='bottom',
path_effects=buffer, zorder=2)
# Plot the scalebar without buffer, in case covered by text buffer
ax.plot(bar_xs, [sbcy, sbcy], transform=utm, color='k',
linewidth=linewidth, zorder=3)
return
@ticker.FuncFormatter
def lon_formatter(x, pos):
if x < 0:
return r'{0:.1f}$\degree$W'.format(abs(x))
elif x > 0:
return r'{0:.1f}$\degree$E'.format(abs(x))
elif x == 0:
return r'0$\degree$'
@ticker.FuncFormatter
def lat_formatter(x, pos):
if x < 0:
return r'{0:.1f}$\degree$S'.format(abs(x))
elif x > 0:
return r'{0:.1f}$\degree$N'.format(abs(x))
elif x == 0:
return r'0$\degree$'
def align_yaxis_np(ax1, ax2):
"""Align zeros of the two axes, zooming them out by same ratio"""
import numpy as np
axes = np.array([ax1, ax2])
extrema = np.array([ax.get_ylim() for ax in axes])
tops = extrema[:,1] / (extrema[:,1] - extrema[:,0])
# Ensure that plots (intervals) are ordered bottom to top:
if tops[0] > tops[1]:
axes, extrema, tops = [a[::-1] for a in (axes, extrema, tops)]
# How much would the plot overflow if we kept current zoom levels?
tot_span = tops[1] + 1 - tops[0]
extrema[0,1] = extrema[0,0] + tot_span * (extrema[0,1] - extrema[0,0])
extrema[1,0] = extrema[1,1] + tot_span * (extrema[1,0] - extrema[1,1])
[axes[i].set_ylim(*extrema[i]) for i in range(2)]
# def align_yaxis(ax1, v1, ax2, v2):
# """adjust ax2 ylimit so that v2 in ax2 is aligned to v1 in ax1"""
# _, y1 = ax1.transData.transform((0, v1))
# _, y2 = ax2.transData.transform((0, v2))
# inv = ax2.transData.inverted()
# _, dy = inv.transform((0, 0)) - inv.transform((0, y1-y2))
# miny, maxy = ax2.get_ylim()
# ax2.set_ylim(miny+dy, maxy+dy)
def get_legend_labels_handles_title_seaborn_histplot(ax):
old_legend = ax.legend_
handles = old_legend.legendHandles
labels = [t.get_text() for t in old_legend.get_texts()]
title = old_legend.get_title().get_text()
return handles, labels, title
def alignYaxes(axes, align_values=None):
'''Align the ticks of multiple y axes
Args:
axes (list): list of axes objects whose yaxis ticks are to be aligned.
Keyword Args:
align_values (None or list/tuple): if not None, should be a list/tuple
of floats with same length as <axes>. Values in <align_values>
define where the corresponding axes should be aligned up. E.g.
[0, 100, -22.5] means the 0 in axes[0], 100 in axes[1] and -22.5
in axes[2] would be aligned up. If None, align (approximately)
the lowest ticks in all axes.
Returns:
new_ticks (list): a list of new ticks for each axis in <axes>.
A new sets of ticks are computed for each axis in <axes> but with equal
length.
'''
from matplotlib.pyplot import MaxNLocator
import numpy as np
nax = len(axes)
ticks = [aii.get_yticks() for aii in axes]
if align_values is None:
aligns = [ticks[ii][0] for ii in range(nax)]
else:
if len(align_values) != nax:
raise Exception(
"Length of <axes> doesn't equal that of <align_values>.")
aligns = align_values
bounds = [aii.get_ylim() for aii in axes]
# align at some points
ticks_align = [ticks[ii]-aligns[ii] for ii in range(nax)]
# scale the range to 1-100
ranges = [tii[-1]-tii[0] for tii in ticks]
lgs = [-np.log10(rii)+2. for rii in ranges]
igs = [np.floor(ii) for ii in lgs]
log_ticks = [ticks_align[ii]*(10.**igs[ii]) for ii in range(nax)]
# put all axes ticks into a single array, then compute new ticks for all
comb_ticks = np.concatenate(log_ticks)
comb_ticks.sort()
locator = MaxNLocator(nbins='auto', steps=[1, 2, 2.5, 3, 4, 5, 8, 10])
new_ticks = locator.tick_values(comb_ticks[0], comb_ticks[-1])
new_ticks = [new_ticks/10.**igs[ii] for ii in range(nax)]
new_ticks = [new_ticks[ii]+aligns[ii] for ii in range(nax)]
# find the lower bound
idx_l = 0
for i in range(len(new_ticks[0])):
if any([new_ticks[jj][i] > bounds[jj][0] for jj in range(nax)]):
idx_l = i-1
break
# find the upper bound
idx_r = 0
for i in range(len(new_ticks[0])):
if all([new_ticks[jj][i] > bounds[jj][1] for jj in range(nax)]):
idx_r = i
break
# trim tick lists by bounds
new_ticks = [tii[idx_l:idx_r+1] for tii in new_ticks]
# set ticks for each axis
for axii, tii in zip(axes, new_ticks):
axii.set_yticks(tii)
return new_ticks
def align_yaxis(ax1, v1, ax2, v2):
"""adjust ax2 ylimit so that v2 in ax2 is aligned to v1 in ax1"""
_, y1 = ax1.transData.transform((0, v1))
_, y2 = ax2.transData.transform((0, v2))
adjust_yaxis(ax2, (y1 - y2) / 2, v2)
adjust_yaxis(ax1, (y2 - y1) / 2, v1)
def adjust_yaxis(ax, ydif, v):
"""shift axis ax by ydiff, maintaining point v at the same location"""
inv = ax.transData.inverted()
_, dy = inv.transform((0, 0)) - inv.transform((0, ydif))
miny, maxy = ax.get_ylim()
miny, maxy = miny - v, maxy - v
if -miny > maxy or (-miny == maxy and dy > 0):
nminy = miny
nmaxy = miny * (maxy + dy) / (miny + dy)
else:
nmaxy = maxy
nminy = maxy * (miny + dy) / (maxy + dy)
ax.set_ylim(nminy + v, nmaxy + v)
def qualitative_cmap(n=2):
import matplotlib.colors as mcolors
if n == 2:
colorsList = [mcolors.BASE_COLORS['r'], mcolors.BASE_COLORS['g']]
cmap = mcolors.ListedColormap(colorsList)
elif n == 4:
colorsList = [
mcolors.BASE_COLORS['r'],
mcolors.BASE_COLORS['g'],
mcolors.BASE_COLORS['c'],
mcolors.BASE_COLORS['m']]
cmap = mcolors.ListedColormap(colorsList)
elif n == 5:
colorsList = [
mcolors.BASE_COLORS['r'],
mcolors.BASE_COLORS['g'],
mcolors.BASE_COLORS['c'],
mcolors.BASE_COLORS['m'],
mcolors.BASE_COLORS['b']]
cmap = mcolors.ListedColormap(colorsList)
return cmap
def caption(text, color='blue', **kwargs):
from termcolor import colored
print(colored('Caption:', color, attrs=['bold'], **kwargs))
print(colored(text, color, attrs=['bold'], **kwargs))
return
def adjust_lightness(color, amount=0.5):
import matplotlib.colors as mc
import colorsys
try:
c = mc.cnames[color]
except:
c = color
c = colorsys.rgb_to_hls(*mc.to_rgb(c))
return colorsys.hls_to_rgb(c[0], max(0, min(1, amount * c[1])), c[2])
def produce_colors_for_pwv_station(scope='annual', zebra=False,
as_dict=False, as_cat_dict=False):
import pandas as pd
stns = group_sites_to_xarray(scope=scope)
cdict = {'coastal': 'tab:blue',
'highland': 'tab:green',
'eastern': 'tab:orange'}
if as_cat_dict:
return cdict
# for grp, color in cdict.copy().items():
# cdict[grp] = to_rgba(get_named_colors_mapping()[
# color], alpha=1)
ds = stns.to_dataset('group')
colors = []
for group in ds:
sts = ds[group].dropna('GNSS').values
for i, st in enumerate(sts):
color = cdict.get(group)
if zebra:
if i % 2 != 0:
# rgba = np.array(rgba)
# rgba[-1] = 0.5
color = adjust_lightness(color, 0.5)
colors.append(color)
# colors = [item for sublist in colors for item in sublist]
stns = stns.T.values.ravel()
stns = stns[~pd.isnull(stns)]
if as_dict:
colors = dict(zip(stns, colors))
return colors
def fix_time_axis_ticks(ax, limits=None, margin=15):
import pandas as pd
import matplotlib.dates as mdates
if limits is not None:
ax.set_xlim(*pd.to_datetime(limits))
years_fmt = mdates.DateFormatter('%Y')
ax.xaxis.set_major_locator(mdates.YearLocator())
ax.xaxis.set_major_formatter(years_fmt)
ax.xaxis.set_minor_locator(mdates.MonthLocator())
# locator = mdates.AutoDateLocator(minticks=3, maxticks=7)
# formatter = mdates.ConciseDateFormatter(locator)
# ax.xaxis.set_major_locator(locator)
# ax.xaxis.set_major_formatter(formatter)
return ax
def plot_qflux_climatotlogy_israel(path=era5_path, save=True, reduce='mean',
plot_type='uv'):
import xarray as xr
import matplotlib.pyplot as plt
import numpy as np
ds = xr.load_dataset(path / 'ERA5_UVQ_mm_israel_1979-2020.nc')
ds = ds.sel(expver=1).reset_coords(drop=True)
if plot_type == 'uv':
f1 = ds['q'] * ds['u']
f2 = ds['q'] * ds['v']
elif plot_type == 'md':
qu = ds['q'] * ds['u']
qv = ds['q'] * ds['v']
f1 = np.sqrt(qu**2 + qv**2)
f2 = np.rad2deg(np.arctan2(qv, qu))
if reduce == 'mean':
f1_clim = f1.groupby('time.month').mean().mean(
'longitude').mean('latitude')
f2_clim = f2.groupby('time.month').mean().mean(
'longitude').mean('latitude')
center = 0
cmap = 'bwr'
elif reduce == 'std':
f1_clim = f1.groupby('time.month').std().mean(
'longitude').mean('latitude')
f2_clim = f2.groupby('time.month').std().mean(
'longitude').mean('latitude')
center = None
cmap = 'viridis'
ds_clim = xr.concat([f1_clim, f2_clim], 'direction')
ds_clim['direction'] = ['zonal', 'meridional']
if plot_type == 'md':
fg, axes = plt.subplots(1, 2, figsize=(14, 7))
f1_clim.sel(
level=slice(
300,
1000)).T.plot.contourf(levels=41,
yincrease=False,
cmap=cmap,
center=center, ax=axes[0])
f2_clim.sel(
level=slice(
300,
1000)).T.plot.contourf(levels=41,
yincrease=False,
cmap=cmap,
center=center, ax=axes[1])
else:
fg = ds_clim.sel(
level=slice(
300,
1000)).T.plot.contourf(
levels=41,
yincrease=False,
cmap=cmap,
center=center,
col='direction',
figsize=(
15,
6))
fg.fig.suptitle('Moisture flux climatology over Israel')
# fig, axes = plt.subplots(1, 2, figsize=(15, 5))
# qu_clim.sel(level=slice(300,1000)).T.plot.contourf(levels=41, yincrease=False, ax=axes[0], cmap='bwr', center=0)
# qv_clim.sel(level=slice(300,1000)).T.plot.contourf(levels=41, yincrease=False, ax=axes[1], cmap='bwr', center=0)
fg.fig.subplots_adjust(top=0.923,
bottom=0.102,
left=0.058,
right=0.818,
hspace=0.2,
wspace=0.045)
if save:
filename = 'moisture_clim_from_ERA5_over_israel.png'
# plt.savefig(savefig_path / filename, bbox_inches='tight')
plt.savefig(savefig_path / filename, orientation='landscape')
return fg
def plot_mean_std_count(da_ts, time_reduce='hour', reduce='mean',
count_factor=1):
import xarray as xr
import seaborn as sns
"""plot mean, std and count of Xarray dataarray time-series"""
cmap = sns.color_palette("colorblind", 2)
time_dim = list(set(da_ts.dims))[0]
grp = '{}.{}'.format(time_dim, time_reduce)
if reduce == 'mean':
mean = da_ts.groupby(grp).mean()
elif reduce == 'median':
mean = da_ts.groupby(grp).median()
std = da_ts.groupby(grp).std()
mean_plus_std = mean + std
mean_minus_std = mean - std
count = da_ts.groupby(grp).count()
if isinstance(da_ts, xr.Dataset):
dvars = [x for x in da_ts.data_vars.keys()]
assert len(dvars) == 2
secondary_y = dvars[1]
else:
secondary_y = None
fig, axes = plt.subplots(2, 1, sharex=True, sharey=False, figsize=(15, 15))
mean_df = mean.to_dataframe()
if secondary_y is not None:
axes[0] = mean_df[dvars[0]].plot(
ax=axes[0], linewidth=2.0, marker='o', color=cmap[0])
ax2mean = mean_df[secondary_y].plot(
ax=axes[0],
linewidth=2.0,
marker='s',
color=cmap[1],
secondary_y=True)
h1, l1 = axes[0].get_legend_handles_labels()
h2, l2 = axes[0].right_ax.get_legend_handles_labels()
handles = h1 + h2
labels = l1 + l2
axes[0].legend(handles, labels)
axes[0].fill_between(mean_df.index.values,
mean_minus_std[dvars[0]].values,
mean_plus_std[dvars[0]].values,
color=cmap[0],
alpha=0.5)
ax2mean.fill_between(
mean_df.index.values,
mean_minus_std[secondary_y].values,
mean_plus_std[secondary_y].values,
color=cmap[1],
alpha=0.5)
ax2mean.tick_params(axis='y', colors=cmap[1])
else:
mean_df.plot(ax=axes[0], linewidth=2.0, marker='o', color=cmap[0])
axes[0].fill_between(
mean_df.index.values,
mean_minus_std.values,
mean_plus_std.values,
color=cmap[0],
alpha=0.5)
axes[0].grid()
count_df = count.to_dataframe() / count_factor
count_df.plot.bar(ax=axes[1], rot=0)
axes[0].xaxis.set_tick_params(labelbottom=True)
axes[0].tick_params(axis='y', colors=cmap[0])
fig.tight_layout()
if secondary_y is not None:
return axes, ax2mean
else:
return axes
def plot_seasonal_histogram(da, dim='sound_time', xlim=None, xlabel=None,
suptitle=''):
fig_hist, axs = plt.subplots(2, 2, sharex=False, sharey=True,
figsize=(10, 8))
seasons = ['DJF', 'MAM', 'JJA', 'SON']
cmap = sns.color_palette("colorblind", 4)
for i, ax in enumerate(axs.flatten()):
da_season = da.sel(
{dim: da['{}.season'.format(dim)] == seasons[i]}).dropna(dim)
ax = sns.distplot(da_season, ax=ax, norm_hist=False,
color=cmap[i], hist_kws={'edgecolor': 'k'},
axlabel=xlabel,
label=seasons[i])
ax.set_xlim(xlim)
ax.legend()
# axes.set_xlabel('MLH [m]')
ax.set_ylabel('Frequency')
fig_hist.suptitle(suptitle)
fig_hist.tight_layout()
return axs
def plot_two_histograms_comparison(x, y, bins=None, labels=['x', 'y'],
ax=None, colors=['b', 'r']):
import numpy as np
import matplotlib.pyplot as plt
x_w = np.empty(x.shape)
x_w.fill(1/x.shape[0])
y_w = np.empty(y.shape)
y_w.fill(1/y.shape[0])
if ax is None:
fig, ax = plt.subplots()
ax.hist([x, y], bins=bins, weights=[x_w, y_w], color=colors,
label=labels)
ax.legend()
return ax
def plot_diurnal_wind_hodograph(path=ims_path, station='TEL-AVIV-COAST',
season=None, cmax=None, ax=None):
import xarray as xr
from metpy.plots import Hodograph
# import matplotlib
import numpy as np
colorbar = False
# from_list = matplotlib.colors.LinearSegmentedColormap.from_list
cmap = plt.cm.get_cmap('hsv', 24)
# cmap = from_list(None, plt.cm.jet(range(0,24)), 24)
U = xr.open_dataset(path / 'IMS_U_israeli_10mins.nc')
V = xr.open_dataset(path / 'IMS_V_israeli_10mins.nc')
u_sta = U[station]
v_sta = V[station]
u_sta.load()
v_sta.load()
if season is not None:
print('{} season selected'.format(season))
u_sta = u_sta.sel(time=u_sta['time.season'] == season)
v_sta = v_sta.sel(time=v_sta['time.season'] == season)
u = u_sta.groupby('time.hour').mean()
v = v_sta.groupby('time.hour').mean()
if ax is None:
colorbar = True
fig, ax = plt.subplots()
max_uv = max(max(u.values), max(v.values)) + 1
if cmax is None:
max_uv = max(max(u.values), max(v.values)) + 1
else:
max_uv = cmax
h = Hodograph(component_range=max_uv, ax=ax)
h.add_grid(increment=0.5)
# hours = np.arange(0, 25)
lc = h.plot_colormapped(u, v, u.hour, cmap=cmap,
linestyle='-', linewidth=2)
#ticks = np.arange(np.min(hours), np.max(hours))
# cb = fig.colorbar(lc, ticks=range(0,24), label='Time of Day [UTC]')
if colorbar:
cb = ax.figure.colorbar(lc, ticks=range(
0, 24), label='Time of Day [UTC]')
# cb.ax.tick_params(length=0)
if season is None:
ax.figure.suptitle('{} diurnal wind Hodograph'.format(station))
else:
ax.figure.suptitle(
'{} diurnal wind Hodograph {}'.format(station, season))
ax.set_xlabel('North')
ax.set_ylabel('East')
ax.set_title('South')
ax2 = ax.twinx()
ax2.tick_params(axis='y', right=False, labelright=False)
ax2.set_ylabel('West')
# axcb = fig.colorbar(lc)
return ax
def plot_MLR_GNSS_PW_harmonics_facetgrid(path=work_yuval, season='JJA',
n_max=2, ylim=None, scope='diurnal',
save=True, era5=False, leg_size=15):
"""
Parameters
----------
path : TYPE, optional
DESCRIPTION. The default is work_yuval.
season : TYPE, optional
DESCRIPTION. The default is 'JJA'.
n_max : TYPE, optional
DESCRIPTION. The default is 2.
ylim : TYPE, optional
the ylimits of each panel use [-6,8] for annual. The default is None.
scope : TYPE, optional
DESCRIPTION. The default is 'diurnal'.
save : TYPE, optional
DESCRIPTION. The default is True.
era5 : TYPE, optional
DESCRIPTION. The default is False.
leg_size : TYPE, optional
DESCRIPTION. The default is 15.
Returns
-------
None.
"""
import xarray as xr
from aux_gps import run_MLR_harmonics
from matplotlib.ticker import AutoMinorLocator
from PW_stations import produce_geo_gnss_solved_stations
import numpy as np
sns.set_style('whitegrid')
sns.set_style('ticks')
geo = produce_geo_gnss_solved_stations(add_distance_to_coast=True, plot=False)
if scope == 'diurnal':
cunits = 'cpd'
ticks = np.arange(0, 23, 3)
xlabel = 'Hour of day [UTC]'
elif scope == 'annual':
cunits = 'cpy'
ticks = np.arange(1, 13, 1)
xlabel = 'month'
print('producing {} harmonics plot.'.format(scope))
if era5:
harmonics = xr.load_dataset(path / 'GNSS_PW_era5_harmonics_{}.nc'.format(scope))
else:
harmonics = xr.load_dataset(path / 'GNSS_PW_harmonics_{}.nc'.format(scope))
# sites = sorted(list(set([x.split('_')[0] for x in harmonics])))
# da = xr.DataArray([x for x in range(len(sites))], dims='GNSS')
# da['GNSS'] = sites
sites = group_sites_to_xarray(upper=False, scope=scope)
sites_flat = [x for x in sites.values.flatten()]
da = xr.DataArray([x for x in range(len(sites_flat))], dims='GNSS')
da['GNSS'] = [x for x in range(len(da))]
fg = xr.plot.FacetGrid(
da,
col='GNSS',
col_wrap=3,
sharex=False,
sharey=False, figsize=(20, 20))
for i in range(fg.axes.shape[0]): # i is rows
for j in range(fg.axes.shape[1]): # j is cols
site = sites.values[i, j]
ax = fg.axes[i, j]
try:
harm_site = harmonics[[x for x in harmonics if site in x]]
if site in ['nrif']:
leg_loc = 'upper center'
elif site in ['yrcm', 'ramo']:
leg_loc = 'lower center'
# elif site in ['katz']:
# leg_loc = 'upper right'
else:
leg_loc = None
if scope == 'annual':
leg_loc = 'upper left'
ax, handles, labels = run_MLR_harmonics(harm_site, season=season,
cunits=cunits,
n_max=n_max, plot=True, ax=ax,
legend_loc=leg_loc, ncol=1,
legsize=leg_size, lw=2.5,
legend_S_only=True)
ax.set_xlabel(xlabel, fontsize=16)
if ylim is not None:
ax.set_ylim(*ylim)
ax.tick_params(axis='x', which='major', labelsize=18)
# if scope == 'diurnal':
ax.yaxis.set_major_locator(plt.MaxNLocator(4))
ax.yaxis.set_minor_locator(AutoMinorLocator(2))
ax.tick_params(axis='y', which='major', labelsize=18)
ax.yaxis.tick_left()
ax.xaxis.set_ticks(ticks)
ax.grid()
ax.set_title('')
ax.set_ylabel('')
ax.grid(axis='y', which='minor', linestyle='--')
# get this for upper legend:
# handles, labels = ax.get_legend_handles_labels()
if scope == 'annual':
site_label = '{} ({:.0f})'.format(
site.upper(), geo.loc[site].alt)
label_coord = [0.52, 0.87]
fs = 18
elif scope == 'diurnal':
site_label = site.upper()
label_coord = [0.1, 0.85]
fs = 20
ax.text(*label_coord, site_label,
horizontalalignment='center', fontweight='bold',
transform=ax.transAxes, fontsize=fs)
if j == 0:
ax.set_ylabel('PWV anomalies [mm]', fontsize=16)
# if j == 0:
# ax.set_ylabel('PW anomalies [mm]', fontsize=12)
# elif j == 1:
# if i>5:
# ax.set_ylabel('PW anomalies [mm]', fontsize=12)
except TypeError:
print('{}, {} axis off'.format(i, j))
ax.set_axis_off()
# for i, (site, ax) in enumerate(zip(da['GNSS'].values, fg.axes.flatten())):
# harm_site = harmonics[[x for x in harmonics if sites[i] in x]]
# if site in ['elat', 'nrif']:
# loc = 'upper center'
# text = 0.1
# elif site in ['elro', 'yrcm', 'ramo', 'slom', 'jslm']:
# loc = 'upper right'
# text = 0.1
# else:
# loc = None
# text = 0.1
# ax = run_MLR_diurnal_harmonics(harm_site, season=season, n_max=n_max, plot=True, ax=ax, legend_loc=loc)
# ax.set_title('')
# ax.set_ylabel('PW anomalies [mm]')
# if ylim is not None:
# ax.set_ylim(ylim[0], ylim[1])
# ax.text(text, .85, site.upper(),
# horizontalalignment='center', fontweight='bold',
# transform=ax.transAxes)
# for i, ax in enumerate(fg.axes.flatten()):
# if i > (da.GNSS.telasize-1):
# ax.set_axis_off()
# pass
# add upper legend for all factes:
S_labels = labels[:-2]
S_labels = [x.split(' ')[0] for x in S_labels]
last_label = 'Mean PWV anomalies'
sum_label = labels[-2].split("'")[1]
S_labels.append(sum_label)
S_labels.append(last_label)
fg.fig.legend(handles=handles, labels=S_labels, prop={'size': 20}, edgecolor='k',
framealpha=0.5, fancybox=True, facecolor='white',
ncol=5, fontsize=20, loc='upper center', bbox_to_anchor=(0.5, 1.005),
bbox_transform=plt.gcf().transFigure)
fg.fig.subplots_adjust(
top=0.973,
bottom=0.032,
left=0.054,
right=0.995,
hspace=0.15,
wspace=0.12)
if save:
if era5:
filename = 'pw_era5_{}_harmonics_{}_{}.png'.format(scope, n_max, season)
else:
filename = 'pw_{}_harmonics_{}_{}.png'.format(scope, n_max, season)
# plt.savefig(savefig_path / filename, bbox_inches='tight')
plt.savefig(savefig_path / filename, orientation='portrait')
return fg
def plot_gustiness(path=work_yuval, ims_path=ims_path, site='tela',
ims_site='HAIFA-TECHNION', season='JJA', month=None, pts=7,
ax=None):
import xarray as xr
import numpy as np
g = xr.open_dataset(
ims_path / 'IMS_G{}_israeli_10mins_daily_anoms.nc'.format(pts))[ims_site]
g.load()
if season is not None:
g = g.sel(time=g['time.season'] == season)
label = 'Gustiness {} IMS station in {} season'.format(
site, season)
elif month is not None:
g = g.sel(time=g['time.month'] == month)
label = 'Gustiness {} IMS station in {} month'.format(
site, month)
elif season is not None and month is not None:
raise('pls pick either season or month...')
# date = groupby_date_xr(g)
# # g_anoms = g.groupby('time.month') - g.groupby('time.month').mean('time')
# g_anoms = g.groupby(date) - g.groupby(date).mean('time')
# g_anoms = g_anoms.reset_coords(drop=True)
G = g.groupby('time.hour').mean('time') * 100.0
if ax is None:
fig, ax = plt.subplots(figsize=(16, 8))
Gline = G.plot(ax=ax, color='b', marker='o', label='Gustiness')
ax.set_title(label)
ax.axhline(0, color='b', linestyle='--')
ax.set_ylabel('Gustiness anomalies [dimensionless]', color='b')
ax.set_xlabel('Time of day [UTC]')
# ax.set_xticks(np.arange(0, 24, step=1))
ax.yaxis.label.set_color('b')
ax.tick_params(axis='y', colors='b')
ax.xaxis.set_ticks(np.arange(0, 23, 3))
ax.grid()
pw = xr.open_dataset(
work_yuval /
'GNSS_PW_hourly_anoms_thresh_50_homogenized.nc')[site]
pw.load().dropna('time')
if season is not None:
pw = pw.sel(time=pw['time.season'] == season)
elif month is not None:
pw = pw.sel(time=pw['time.month'] == month)
# date = groupby_date_xr(pw)
# pw = pw.groupby(date) - pw.groupby(date).mean('time')
# pw = pw.reset_coords(drop=True)
pw = pw.groupby('time.hour').mean()
axpw = ax.twinx()
PWline = pw.plot.line(ax=axpw, color='tab:green',
marker='s', label='PW ({})'.format(season))
axpw.axhline(0, color='k', linestyle='--')
lns = Gline + PWline
axpw.set_ylabel('PW anomalies [mm]')
align_yaxis(ax, 0, axpw, 0)
return lns
def plot_gustiness_facetgrid(path=work_yuval, ims_path=ims_path,
season='JJA', month=None, save=True):
import xarray as xr
gnss_ims_dict = {
'alon': 'ASHQELON-PORT', 'bshm': 'HAIFA-TECHNION', 'csar': 'HADERA-PORT',
'tela': 'TEL-AVIV-COAST', 'slom': 'BESOR-FARM', 'kabr': 'SHAVE-ZIYYON',
'nzrt': 'DEIR-HANNA', 'katz': 'GAMLA', 'elro': 'MEROM-GOLAN-PICMAN',
'mrav': 'MAALE-GILBOA', 'yosh': 'ARIEL', 'jslm': 'JERUSALEM-GIVAT-RAM',
'drag': 'METZOKE-DRAGOT', 'dsea': 'SEDOM', 'ramo': 'MIZPE-RAMON-20120927',
'nrif': 'NEOT-SMADAR', 'elat': 'ELAT', 'klhv': 'SHANI',
'yrcm': 'ZOMET-HANEGEV', 'spir': 'PARAN-20060124'}
da = xr.DataArray([x for x in gnss_ims_dict.values()], dims=['GNSS'])
da['GNSS'] = [x for x in gnss_ims_dict.keys()]
to_remove = ['kabr', 'nzrt', 'katz', 'elro', 'klhv', 'yrcm', 'slom']
sites = [x for x in da['GNSS'].values if x not in to_remove]
da = da.sel(GNSS=sites)
gnss_order = ['bshm', 'mrav', 'drag', 'csar', 'yosh', 'dsea', 'tela', 'jslm',
'nrif', 'alon', 'ramo', 'elat']
df = da.to_dataframe('gnss')
da = df.reindex(gnss_order).to_xarray()['gnss']
fg = xr.plot.FacetGrid(
da,
col='GNSS',
col_wrap=3,
sharex=False,
sharey=False, figsize=(20, 20))
for i, (site, ax) in enumerate(zip(da['GNSS'].values, fg.axes.flatten())):
lns = plot_gustiness(path=path, ims_path=ims_path,
ims_site=gnss_ims_dict[site],
site=site, season=season, month=month, ax=ax)
labs = [l.get_label() for l in lns]
if site in ['tela', 'alon', 'dsea', 'csar', 'elat', 'nrif']:
ax.legend(lns, labs, loc='upper center', prop={
'size': 8}, framealpha=0.5, fancybox=True, title=site.upper())
elif site in ['drag']:
ax.legend(lns, labs, loc='upper right', prop={
'size': 8}, framealpha=0.5, fancybox=True, title=site.upper())
else:
ax.legend(lns, labs, loc='best', prop={
'size': 8}, framealpha=0.5, fancybox=True, title=site.upper())
ax.set_title('')
ax.set_ylabel(r'G anomalies $\times$$10^{2}$')
# ax.text(.8, .85, site.upper(),
# horizontalalignment='center', fontweight='bold',
# transform=ax.transAxes)
for i, ax in enumerate(fg.axes.flatten()):
if i > (da.GNSS.size-1):
ax.set_axis_off()
pass
fg.fig.tight_layout()
fg.fig.subplots_adjust(top=0.974,
bottom=0.053,
left=0.041,
right=0.955,
hspace=0.15,
wspace=0.3)
filename = 'gustiness_israeli_gnss_pw_diurnal_{}.png'.format(season)
if save:
plt.savefig(savefig_path / filename, bbox_inches='tight')
return fg
def plot_fft_diurnal(path=work_yuval, save=True):
import xarray as xr
import numpy as np
import matplotlib.ticker as tck
sns.set_style("whitegrid",
{'axes.grid': True,
'xtick.bottom': True,
'font.family': 'serif',
'ytick.left': True})
sns.set_context('paper')
power = xr.load_dataset(path / 'GNSS_PW_power_spectrum_diurnal.nc')
power = power.to_array('site')
sites = [x for x in power.site.values]
fg = power.plot.line(col='site', col_wrap=4,
sharex=False, figsize=(20, 18))
fg.set_xlabels('Frequency [cpd]')
fg.set_ylabels('PW PSD [dB]')
ticklabels = np.arange(0, 7)
for ax, site in zip(fg.axes.flatten(), sites):
sns.despine()
ax.set_title('')
ax.set_xticklabels(ticklabels)
# ax.tick_params(axis='y', which='minor')
ax.yaxis.set_minor_locator(tck.AutoMinorLocator())
ax.set_xlim(0, 6.5)
ax.set_ylim(70, 125)
ax.grid(True)
ax.grid(which='minor', axis='y')
ax.text(.8, .85, site.upper(),
horizontalalignment='center', fontweight='bold',
transform=ax.transAxes)
fg.fig.tight_layout()
filename = 'power_pw_diurnal.png'
if save:
plt.savefig(savefig_path / filename, bbox_inches='tight')
return fg
def plot_rinex_availability_with_map(path=work_yuval, gis_path=gis_path,
scope='diurnal', ims=True,
dem_path=dem_path, fontsize=18, save=True):
# TODO: add box around merged stations and removed stations
# TODO: add color map labels to stations removed and merged
from aux_gps import gantt_chart
import xarray as xr
import pandas as pd
import geopandas as gpd
from PW_stations import produce_geo_gnss_solved_stations
from aux_gps import geo_annotate
from ims_procedures import produce_geo_ims
from matplotlib.colors import ListedColormap
from aux_gps import path_glob
sns.set_style('whitegrid')
sns.set_style('ticks')
print('{} scope selected.'.format(scope))
fig = plt.figure(figsize=(20, 15))
# grid = plt.GridSpec(1, 2, width_ratios=[
# 5, 2], wspace=0.1)
grid = plt.GridSpec(1, 2, width_ratios=[
5, 3], wspace=0.05)
ax_gantt = fig.add_subplot(grid[0, 0]) # plt.subplot(221)
ax_map = fig.add_subplot(grid[0, 1]) # plt.subplot(122)
# fig, ax = plt.subplots(1, 2, sharex=False, sharey=False, figsize=(20, 6))
# RINEX gantt chart:
if scope == 'diurnal':
file = path_glob(path, 'GNSS_PW_thresh_50_for_diurnal_analysis.nc')[-1]
elif scope == 'annual':
file = path / 'GNSS_PW_monthly_thresh_50.nc'
ds = xr.open_dataset(file)
just_pw = [x for x in ds if 'error' not in x]
ds = ds[just_pw]
da = ds.to_array('station').sel(time=slice(None,'2019'))
da['station'] = [x.upper() for x in da.station.values]
ds = da.to_dataset('station')
# reorder for annual, coastal, highland and eastern:
stns = group_sites_to_xarray(scope='annual', upper=True).T.values.ravel()
stns = stns[~pd.isnull(stns)]
ds = ds[stns]
# colors:
colors = produce_colors_for_pwv_station(scope=scope, zebra=False)
title = 'Daily RINEX files availability for the Israeli GNSS stations'
ax_gantt = gantt_chart(
ds,
ax=ax_gantt,
fw='bold', grid=True,
title='', colors=colors,
pe_dict=None, fontsize=fontsize, linewidth=24, antialiased=False)
years_fmt = mdates.DateFormatter('%Y')
# ax_gantt.xaxis.set_major_locator(mdates.YearLocator())
ax_gantt.xaxis.set_major_locator(mdates.YearLocator(4))
ax_gantt.xaxis.set_minor_locator(mdates.YearLocator(1))
ax_gantt.xaxis.set_major_formatter(years_fmt)
# ax_gantt.xaxis.set_minor_formatter(years_fmt)
ax_gantt.tick_params(axis='x', labelrotation=0)
# Israel gps ims map:
ax_map = plot_israel_map(
gis_path=gis_path, ax=ax_map, ticklabelsize=fontsize)
# overlay with dem data:
cmap = plt.get_cmap('terrain', 41)
dem = xr.open_dataarray(dem_path / 'israel_dem_250_500.nc')
# dem = xr.open_dataarray(dem_path / 'israel_dem_500_1000.nc')
fg = dem.plot.imshow(ax=ax_map, alpha=0.5, cmap=cmap,
vmin=dem.min(), vmax=dem.max(), add_colorbar=False)
# scale_bar(ax_map, 50)
cbar_kwargs = {'fraction': 0.1, 'aspect': 50, 'pad': 0.03}
cb = plt.colorbar(fg, **cbar_kwargs)
cb.set_label(label='meters above sea level',
size=fontsize, weight='normal')
cb.ax.tick_params(labelsize=fontsize)
ax_map.set_xlabel('')
ax_map.set_ylabel('')
gps = produce_geo_gnss_solved_stations(path=gis_path, plot=False)
# removed = ['hrmn', 'nizn', 'spir']
# removed = ['hrmn']
if scope == 'diurnal':
removed = ['hrmn', 'gilb', 'lhav']
elif scope == 'annual':
removed = ['hrmn', 'gilb', 'lhav']
print('removing {} stations from map.'.format(removed))
# merged = ['klhv', 'lhav', 'mrav', 'gilb']
merged = []
gps_list = [x for x in gps.index if x not in merged and x not in removed]
gps.loc[gps_list, :].plot(ax=ax_map, edgecolor='black', marker='s',
alpha=1.0, markersize=35, facecolor="None", linewidth=2, zorder=3)
# gps.loc[removed, :].plot(ax=ax_map, color='black', edgecolor='black', marker='s',
# alpha=1.0, markersize=25, facecolor='white')
# gps.loc[merged, :].plot(ax=ax_map, color='black', edgecolor='r', marker='s',
# alpha=0.7, markersize=25)
gps_stations = gps_list # [x for x in gps.index]
# to_plot_offset = ['mrav', 'klhv', 'nzrt', 'katz', 'elro']
to_plot_offset = []
for x, y, label in zip(gps.loc[gps_stations, :].lon, gps.loc[gps_stations,
:].lat, gps.loc[gps_stations, :].index.str.upper()):
if label.lower() in to_plot_offset:
ax_map.annotate(label, xy=(x, y), xytext=(4, -6),
textcoords="offset points", color='k',
fontweight='bold', fontsize=fontsize - 2)
else:
ax_map.annotate(label, xy=(x, y), xytext=(3, 3),
textcoords="offset points", color='k',
fontweight='bold', fontsize=fontsize - 2)
# geo_annotate(ax_map, gps_normal_anno.lon, gps_normal_anno.lat,
# gps_normal_anno.index.str.upper(), xytext=(3, 3), fmt=None,
# c='k', fw='normal', fs=10, colorupdown=False)
# geo_annotate(ax_map, gps_offset_anno.lon, gps_offset_anno.lat,
# gps_offset_anno.index.str.upper(), xytext=(4, -6), fmt=None,
# c='k', fw='normal', fs=10, colorupdown=False)
# plot bet-dagan:
df = pd.Series([32.00, 34.81]).to_frame().T
df.index = ['Bet-Dagan']
df.columns = ['lat', 'lon']
bet_dagan = gpd.GeoDataFrame(df, geometry=gpd.points_from_xy(df.lon,
df.lat),
crs=gps.crs)
bet_dagan.plot(ax=ax_map, color='black', edgecolor='black',
marker='x', linewidth=2, zorder=2)
geo_annotate(ax_map, bet_dagan.lon, bet_dagan.lat,
bet_dagan.index, xytext=(4, -6), fmt=None,
c='k', fw='bold', fs=fontsize - 2, colorupdown=False)
# plt.legend(['GNSS \nreceiver sites',
# 'removed \nGNSS sites',
# 'merged \nGNSS sites',
# 'radiosonde\nstation'],
# loc='upper left', framealpha=0.7, fancybox=True,
# handletextpad=0.2, handlelength=1.5)
if ims:
print('getting IMS temperature stations metadata...')
ims = produce_geo_ims(path=gis_path, freq='10mins', plot=False)
ims.plot(ax=ax_map, marker='o', edgecolor='tab:orange', alpha=1.0,
markersize=35, facecolor="tab:orange", zorder=1)
# ims, gps = produce_geo_df(gis_path=gis_path, plot=False)
print('getting solved GNSS israeli stations metadata...')
plt.legend(['GNSS \nstations',
'radiosonde\nstation', 'IMS stations'],
loc='upper left', framealpha=0.7, fancybox=True,
handletextpad=0.2, handlelength=1.5, fontsize=fontsize - 2)
else:
plt.legend(['GNSS \nstations',
'radiosonde\nstation'],
loc='upper left', framealpha=0.7, fancybox=True,
handletextpad=0.2, handlelength=1.5, fontsize=fontsize - 2)
fig.subplots_adjust(top=0.95,
bottom=0.11,
left=0.05,
right=0.95,
hspace=0.2,
wspace=0.2)
# plt.legend(['IMS stations', 'GNSS stations'], loc='upper left')
filename = 'rinex_israeli_gnss_map_{}.png'.format(scope)
# caption('Daily RINEX files availability for the Israeli GNSS station network at the SOPAC/GARNER website')
if save:
plt.savefig(savefig_path / filename, bbox_inches='tight')
return fig
def plot_means_box_plots(path=work_yuval, thresh=50, kind='box',
x='month', col_wrap=5, ylimits=None, twin=None,
twin_attrs=None,
xlimits=None, anoms=True, bins=None,
season=None, attrs_plot=True, save=True, ds_input=None):
import xarray as xr
pw = xr.open_dataset(
work_yuval /
'GNSS_PW_thresh_{:.0f}_homogenized.nc'.format(thresh))
pw = pw[[x for x in pw.data_vars if '_error' not in x]]
attrs = [x.attrs for x in pw.data_vars.values()]
if x == 'month':
pw = xr.load_dataset(
work_yuval /
'GNSS_PW_monthly_thresh_{:.0f}_homogenized.nc'.format(thresh))
# pw = pw.resample(time='MS').mean('time')
elif x == 'hour':
# pw = pw.resample(time='1H').mean('time')
# pw = pw.groupby('time.hour').mean('time')
pw = xr.load_dataset(
work_yuval / 'GNSS_PW_hourly_thresh_{:.0f}_homogenized.nc'.format(thresh))
pw = pw[[x for x in pw.data_vars if '_error' not in x]]
# first remove long term monthly means:
if anoms:
pw = xr.load_dataset(
work_yuval / 'GNSS_PW_hourly_anoms_thresh_{:.0f}_homogenized.nc'.format(thresh))
if twin is not None:
twin = twin.groupby('time.month') - \
twin.groupby('time.month').mean('time')
twin = twin.reset_coords(drop=True)
# pw = pw.groupby('time.month') - pw.groupby('time.month').mean('time')
elif x == 'day':
# pw = pw.resample(time='1H').mean('time')
# pw = pw.groupby('time.hour').mean('time')
pw = xr.load_dataset(
work_yuval / 'GNSS_PW_daily_thresh_{:.0f}_homogenized.nc'.format(thresh))
pw = pw[[x for x in pw.data_vars if '_error' not in x]]
# first remove long term monthly means:
if anoms:
# pw = pw.groupby('time.month') - pw.groupby('time.month').mean('time')
pw = pw.groupby('time.dayofyear') - \
pw.groupby('time.dayodyear').mean('time')
if season is not None:
if season != 'all':
print('{} season is selected'.format(season))
pw = pw.sel(time=pw['time.season'] == season)
all_seas = False
if twin is not None:
twin = twin.sel(time=twin['time.season'] == season)
else:
print('all seasons selected')
all_seas = True
else:
all_seas = False
for i, da in enumerate(pw.data_vars):
pw[da].attrs = attrs[i]
if not attrs_plot:
attrs = None
if ds_input is not None:
# be carful!:
pw = ds_input
fg = plot_multi_box_xr(pw, kind=kind, x=x, col_wrap=col_wrap,
ylimits=ylimits, xlimits=xlimits, attrs=attrs,
bins=bins, all_seasons=all_seas, twin=twin,
twin_attrs=twin_attrs)
attrs = [x.attrs for x in pw.data_vars.values()]
for i, ax in enumerate(fg.axes.flatten()):
try:
mean_years = float(attrs[i]['mean_years'])
# print(i)
# print(mean_years)
except IndexError:
ax.set_axis_off()
pass
if kind != 'hist':
[fg.axes[x, 0].set_ylabel('PW [mm]')
for x in range(len(fg.axes[:, 0]))]
# [fg.axes[-1, x].set_xlabel('month') for x in range(len(fg.axes[-1, :]))]
fg.fig.subplots_adjust(top=0.98,
bottom=0.05,
left=0.025,
right=0.985,
hspace=0.27,
wspace=0.215)
if season is not None:
filename = 'pw_{}ly_means_{}_seas_{}.png'.format(x, kind, season)
else:
filename = 'pw_{}ly_means_{}.png'.format(x, kind)
if save:
plt.savefig(savefig_path / filename, bbox_inches='tight')
return fg
def plot_interannual_MLR_results(path=climate_path, fontsize=16, save=True):
import matplotlib.pyplot as plt
from climate_works import run_best_MLR
# rds = xr.load_dataset(path / 'best_MLR_interannual_gnss_pwv.nc')
model_lci, rdf_lci = run_best_MLR(plot=False, heatmap=False, keep='lci',
add_trend=True)
rds_lci = model_lci.results_
model_eofi, rdf_eofi = run_best_MLR(plot=False, heatmap=False, keep='eofi',
add_trend=False)
rds_eofi = model_eofi.results_
fig, axes = plt.subplots(2, 1, sharex=True, sharey=False, figsize=(15, 7))
origln = rds_lci['original'].plot.line('k-.', ax=axes[0], linewidth=1.5)
predln_lci = rds_lci['predict'].plot.line('b-', ax=axes[0], linewidth=1.5)
predln_eofi = rds_eofi['predict'].plot.line(
'g-', ax=axes[0], linewidth=1.5)
r2_lci = rds_lci['r2_adj'].item()
r2_eofi = rds_eofi['r2_adj'].item()
axes[0].legend(origln+predln_lci+predln_eofi, ['mean PWV (12m-mean)', 'MLR with LCI (Adj R$^2$:{:.2f})'.format(
r2_lci), 'MLR with EOFs (Adj R$^2$:{:.2f})'.format(r2_eofi)], fontsize=fontsize-2)
axes[0].grid()
axes[0].set_xlabel('')
axes[0].set_ylabel('PWV anomalies [mm]', fontsize=fontsize)
axes[0].tick_params(labelsize=fontsize)
axes[0].grid(which='minor', color='k', linestyle='--')
residln_lci = rds_lci['resid'].plot.line('b-', ax=axes[1])
residln_eofi = rds_eofi['resid'].plot.line('g-', ax=axes[1])
axes[1].legend(residln_lci+residln_eofi, ['MLR with LCI',
'MLR with EOFs'], fontsize=fontsize-2)
axes[1].grid()
axes[1].set_ylabel('Residuals [mm]', fontsize=fontsize)
axes[1].tick_params(labelsize=fontsize)
axes[1].set_xlabel('')
years_fmt = mdates.DateFormatter('%Y')
# ax.figure.autofmt_xdate()
axes[1].xaxis.set_major_locator(mdates.YearLocator(2))
axes[1].xaxis.set_minor_locator(mdates.YearLocator(1))
axes[1].xaxis.set_major_formatter(years_fmt)
axes[1].grid(which='minor', color='k', linestyle='--')
# ax.xaxis.set_minor_locator(mdates.MonthLocator())
axes[1].figure.autofmt_xdate()
fig.tight_layout()
fig.subplots_adjust()
if save:
filename = 'pw_interannual_MLR_comparison.png'
plt.savefig(savefig_path / filename, bbox_inches='tight')
return fig
def plot_annual_pw(path=work_yuval, fontsize=20, labelsize=18, compare='uerra',
ylim=[7.5, 40], save=True, kind='violin', bins=None, ds=None,
add_temperature=False):
"""kind can be violin or hist, for violin choose ylim=7.5,40 and for hist
choose ylim=0,0.3"""
import xarray as xr
import pandas as pd
import numpy as np
from synoptic_procedures import slice_xr_with_synoptic_class
gnss_filename = 'GNSS_PW_monthly_thresh_50.nc'
# gnss_filename = 'first_climatol_try.nc'
pw = xr.load_dataset(path / gnss_filename)
df_annual = pw.to_dataframe()
hue = None
if compare is not None:
df_annual = prepare_reanalysis_monthly_pwv_to_dataframe(
path, re=compare, ds=ds)
hue = 'source'
if not add_temperature:
fg = plot_pw_geographical_segments(
df_annual, scope='annual',
kind=kind,
fg=None,
ylim=ylim,
fontsize=fontsize,
labelsize=labelsize, hue=hue,
save=False, bins=bins)
fg.fig.subplots_adjust(
top=0.973,
bottom=0.029,
left=0.054,
right=0.995,
hspace=0.15,
wspace=0.12)
filename = 'pw_annual_means_{}.png'.format(kind)
else:
fg = plot_pw_geographical_segments(
df_annual, scope='annual',
kind='mean_month',
fg=None, ticklabelcolor='tab:blue',
ylim=[10, 31], color='tab:blue',
fontsize=fontsize,
labelsize=labelsize, hue=None,
save=False, bins=None)
# tmm = xr.load_dataset(path / 'GNSS_TD_monthly_1996_2020.nc')
tmm = xr.load_dataset(path / 'IMS_T/GNSS_TD_daily.nc')
tmm = tmm.groupby('time.month').mean()
dftm = tmm.to_dataframe()
# dftm.columns = [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11]
sites = group_sites_to_xarray(scope='annual')
sites_flat = sites.values.ravel()
# sites = sites[~pd.isnull(sites)]
for i, ax in enumerate(fg.axes.flat):
if pd.isnull(sites_flat[i]):
continue
twinax = ax.twinx()
twinax.plot(dftm.index.values, dftm[sites_flat[i]].values, color='tab:red',
markersize=10, marker='s', lw=1, markerfacecolor="None",
label='Temperature')
# dftm[sites[i]].plot(ax=twinax, color='r', markersize=10,
# marker='s', lw=1, markerfacecolor="None")
twinax.set_ylim(5, 37)
twinax.set_yticks(np.arange(5, 40, 10))
twinax.tick_params(axis='y', which='major', labelcolor='tab:red',
labelsize=labelsize)
if sites_flat[i] in sites.sel(group='eastern'):
twinax.set_ylabel(r'Temperature [$\degree$ C]',
fontsize=labelsize)
# fg.fig.canvas.draw()
# twinax.xaxis.set_ticks(np.arange(1, 13))
# twinax.tick_params(axis='x', which='major', labelsize=labelsize-2)
lines, labels = ax.get_legend_handles_labels()
lines2, labels2 = twinax.get_legend_handles_labels()
labels = ['PWV', 'Surface Temperature']
fg.fig.legend(handles=lines+lines2, labels=labels, prop={'size': 20}, edgecolor='k',
framealpha=0.5, fancybox=True, facecolor='white',
ncol=5, fontsize=20, loc='upper center', bbox_to_anchor=(0.5, 1.005),
bbox_transform=plt.gcf().transFigure)
fg.fig.subplots_adjust(
top=0.97,
bottom=0.029,
left=0.049,
right=0.96,
hspace=0.15,
wspace=0.17)
filename = 'pw_annual_means_temperature.png'
if save:
if compare is not None:
filename = 'pw_annual_means_{}_with_{}.png'.format(kind, compare)
plt.savefig(savefig_path / filename, orientation='portrait')
return fg
def plot_multi_box_xr(pw, kind='violin', x='month', sharex=False, sharey=False,
col_wrap=5, ylimits=None, xlimits=None, attrs=None,
bins=None, all_seasons=False, twin=None, twin_attrs=None):
import xarray as xr
pw = pw.to_array('station')
if twin is not None:
twin = twin.to_array('station')
fg = xr.plot.FacetGrid(pw, col='station', col_wrap=col_wrap, sharex=sharex,
sharey=sharey)
for i, (sta, ax) in enumerate(zip(pw['station'].values, fg.axes.flatten())):
pw_sta = pw.sel(station=sta).reset_coords(drop=True)
if all_seasons:
pw_seas = pw_sta.sel(time=pw_sta['time.season'] == 'DJF')
df = pw_seas.to_dataframe(sta)
plot_box_df(df, ax=ax, x=x, title=sta, ylabel='', kind=kind,
ylimits=ylimits, xlimits=xlimits, attrs=None, bins=bins,
marker='o')
pw_seas = pw_sta.sel(time=pw_sta['time.season'] == 'MAM')
df = pw_seas.to_dataframe(sta)
plot_box_df(df, ax=ax, x=x, title=sta, ylabel='', kind=kind,
ylimits=ylimits, xlimits=xlimits, attrs=None, bins=bins,
marker='^')
pw_seas = pw_sta.sel(time=pw_sta['time.season'] == 'JJA')
df = pw_seas.to_dataframe(sta)
plot_box_df(df, ax=ax, x=x, title=sta, ylabel='', kind=kind,
ylimits=ylimits, xlimits=xlimits, attrs=None, bins=bins,
marker='s')
pw_seas = pw_sta.sel(time=pw_sta['time.season'] == 'SON')
df = pw_seas.to_dataframe(sta)
plot_box_df(df, ax=ax, x=x, title=sta, ylabel='', kind=kind,
ylimits=ylimits, xlimits=xlimits, attrs=attrs[i], bins=bins,
marker='x')
df = pw_sta.to_dataframe(sta)
plot_box_df(df, ax=ax, x=x, title=sta, ylabel='', kind=kind,
ylimits=ylimits, xlimits=xlimits, attrs=attrs[i], bins=bins,
marker='d')
if sta == 'nrif' or sta == 'elat':
ax.legend(['DJF', 'MAM', 'JJA', 'SON', 'Annual'],
prop={'size': 8}, loc='upper center', framealpha=0.5, fancybox=True)
elif sta == 'yrcm' or sta == 'ramo':
ax.legend(['DJF', 'MAM', 'JJA', 'SON', 'Annual'],
prop={'size': 8}, loc='upper right', framealpha=0.5, fancybox=True)
else:
ax.legend(['DJF', 'MAM', 'JJA', 'SON', 'Annual'],
prop={'size': 8}, loc='best', framealpha=0.5, fancybox=True)
else:
# if x == 'hour':
# # remove seasonal signal:
# pw_sta = pw_sta.groupby('time.dayofyear') - pw_sta.groupby('time.dayofyear').mean('time')
# elif x == 'month':
# # remove daily signal:
# pw_sta = pw_sta.groupby('time.hour') - pw_sta.groupby('time.hour').mean('time')
df = pw_sta.to_dataframe(sta)
if twin is not None:
twin_sta = twin.sel(station=sta).reset_coords(drop=True)
twin_df = twin_sta.to_dataframe(sta)
else:
twin_df = None
if attrs is not None:
plot_box_df(df, ax=ax, x=x, title=sta, ylabel='', kind=kind,
ylimits=ylimits, xlimits=xlimits, attrs=attrs[i],
bins=bins, twin_df=twin_df, twin_attrs=twin_attrs)
else:
plot_box_df(df, ax=ax, x=x, title=sta, ylabel='', kind=kind,
ylimits=ylimits, xlimits=xlimits, attrs=None,
bins=bins, twin_df=twin_df, twin_attrs=twin_attrs)
return fg
def plot_box_df(df, x='month', title='TELA', marker='o',
ylabel=r'IWV [kg$\cdot$m$^{-2}$]', ax=None, kind='violin',
ylimits=(5, 40), xlimits=None, attrs=None, bins=None, twin_df=None,
twin_attrs=None):
# x=hour is experimental
import seaborn as sns
from matplotlib.ticker import MultipleLocator
import matplotlib.pyplot as plt
import numpy as np
from scipy.stats import kurtosis
from scipy.stats import skew
# df = da_ts.to_dataframe()
if x == 'month':
df[x] = df.index.month
pal = sns.color_palette("Paired", 12)
elif x == 'hour':
df[x] = df.index.hour
if twin_df is not None:
twin_df[x] = twin_df.index.hour
# df[x] = df.index
pal = sns.color_palette("Paired", 12)
y = df.columns[0]
if ax is None:
fig, ax = plt.subplots()
if kind is None:
df = df.groupby(x).mean()
df.plot(ax=ax, legend=False, marker=marker)
if twin_df is not None:
twin_df = twin_df.groupby(x).mean()
twinx = ax.twinx()
twin_df.plot.line(ax=twinx, color='r', marker='s')
ax.axhline(0, color='k', linestyle='--')
if twin_attrs is not None:
twinx.set_ylabel(twin_attrs['ylabel'])
align_yaxis(ax, 0, twinx, 0)
ax.set_xlabel('Time of day [UTC]')
elif kind == 'violin':
sns.violinplot(ax=ax, data=df, x=x, y=y, palette=pal, fliersize=4,
gridsize=250, inner='quartile', scale='area')
ax.set_ylabel(ylabel)
ax.set_title(title)
ax.set_xlabel('')
elif kind == 'box':
kwargs = dict(markerfacecolor='r', marker='o')
sns.boxplot(ax=ax, data=df, x=x, y=y, palette=pal, fliersize=4,
whis=1.0, flierprops=kwargs, showfliers=False)
ax.set_ylabel(ylabel)
ax.set_title(title)
ax.set_xlabel('')
elif kind == 'hist':
if bins is None:
bins = 15
a = df[y].dropna()
sns.distplot(ax=ax, a=a, norm_hist=True, bins=bins, axlabel='PW [mm]')
xmean = df[y].mean()
xmedian = df[y].median()
std = df[y].std()
sk = skew(df[y].dropna().values)
kurt = kurtosis(df[y].dropna().values)
# xmode = df[y].mode().median()
data_x, data_y = ax.lines[0].get_data()
ymean = np.interp(xmean, data_x, data_y)
ymed = np.interp(xmedian, data_x, data_y)
# ymode = np.interp(xmode, data_x, data_y)
ax.vlines(x=xmean, ymin=0, ymax=ymean, color='r', linestyle='--')
ax.vlines(x=xmedian, ymin=0, ymax=ymed, color='g', linestyle='-')
# ax.vlines(x=xmode, ymin=0, ymax=ymode, color='k', linestyle='-')
# ax.legend(['Mean:{:.1f}'.format(xmean),'Median:{:.1f}'.format(xmedian),'Mode:{:.1f}'.format(xmode)])
ax.legend(['Mean: {:.1f}'.format(xmean),
'Median: {:.1f}'.format(xmedian)])
ax.text(0.55, 0.45, "Std-Dev: {:.1f}\nSkewness: {:.1f}\nKurtosis: {:.1f}".format(
std, sk, kurt), transform=ax.transAxes)
ax.yaxis.set_minor_locator(MultipleLocator(5))
ax.yaxis.grid(True, which='minor', linestyle='--', linewidth=1, alpha=0.7)
ax.yaxis.grid(True, linestyle='--', linewidth=1, alpha=0.7)
title = ax.get_title().split('=')[-1].strip(' ')
if attrs is not None:
mean_years = float(attrs['mean_years'])
ax.set_title('')
ax.text(.2, .85, y.upper(),
horizontalalignment='center', fontweight='bold',
transform=ax.transAxes)
if kind is not None:
if kind != 'hist':
ax.text(.22, .72, '{:.1f} years'.format(mean_years),
horizontalalignment='center',
transform=ax.transAxes)
ax.yaxis.tick_left()
ax.spines["top"].set_visible(False)
ax.spines["right"].set_visible(False)
ax.spines["bottom"].set_visible(False)
if ylimits is not None:
ax.set_ylim(*ylimits)
if twin_attrs is not None:
twinx.set_ylim(*twin_attrs['ylimits'])
align_yaxis(ax, 0, twinx, 0)
if xlimits is not None:
ax.set_xlim(*xlimits)
return ax
def plot_means_pw(load_path=work_yuval, ims_path=ims_path, thresh=50,
col_wrap=5, means='hour', save=True):
import xarray as xr
import numpy as np
pw = xr.load_dataset(
work_yuval /
'GNSS_PW_thresh_{:.0f}_homogenized.nc'.format(thresh))
pw = pw[[x for x in pw.data_vars if '_error' not in x]]
if means == 'hour':
# remove long term monthly means:
pw_clim = pw.groupby('time.month') - \
pw.groupby('time.month').mean('time')
pw_clim = pw_clim.groupby('time.{}'.format(means)).mean('time')
else:
pw_clim = pw.groupby('time.{}'.format(means)).mean('time')
# T = xr.load_dataset(
# ims_path /
# 'GNSS_5mins_TD_ALL_1996_2020.nc')
# T_clim = T.groupby('time.month').mean('time')
attrs = [x.attrs for x in pw.data_vars.values()]
fg = pw_clim.to_array('station').plot(col='station', col_wrap=col_wrap,
color='b', marker='o', alpha=0.7,
sharex=False, sharey=True)
col_arr = np.arange(0, len(pw_clim))
right_side = col_arr[col_wrap-1::col_wrap]
for i, ax in enumerate(fg.axes.flatten()):
title = ax.get_title().split('=')[-1].strip(' ')
try:
mean_years = float(attrs[i]['mean_years'])
ax.set_title('')
ax.text(.2, .85, title.upper(),
horizontalalignment='center', fontweight='bold',
transform=ax.transAxes)
ax.text(.2, .73, '{:.1f} years'.format(mean_years),
horizontalalignment='center',
transform=ax.transAxes)
# ax_t = ax.twinx()
# T_clim['{}'.format(title)].plot(
# color='r', linestyle='dashed', marker='s', alpha=0.7,
# ax=ax_t)
# ax_t.set_ylim(0, 30)
fg.fig.canvas.draw()
# labels = [item.get_text() for item in ax_t.get_yticklabels()]
# ax_t.yaxis.set_ticklabels([])
# ax_t.tick_params(axis='y', color='r')
# ax_t.set_ylabel('')
# if i in right_side:
# ax_t.set_ylabel(r'Surface temperature [$\degree$C]', fontsize=10)
# ax_t.yaxis.set_ticklabels(labels)
# ax_t.tick_params(axis='y', labelcolor='r', color='r')
# show months ticks and grid lines for pw:
ax.xaxis.tick_bottom()
ax.yaxis.tick_left()
ax.yaxis.grid()
# ax.legend([ax.lines[0], ax_t.lines[0]], ['PW', 'T'],
# loc='upper right', fontsize=10, prop={'size': 8})
# ax.legend([ax.lines[0]], ['PW'],
# loc='upper right', fontsize=10, prop={'size': 8})
except IndexError:
pass
# change bottom xticks to 1-12 and show them:
# fg.axes[-1, 0].xaxis.set_ticks(np.arange(1, 13))
[fg.axes[x, 0].set_ylabel('PW [mm]') for x in range(len(fg.axes[:, 0]))]
# adjust subplots:
fg.fig.subplots_adjust(top=0.977,
bottom=0.039,
left=0.036,
right=0.959,
hspace=0.185,
wspace=0.125)
filename = 'PW_{}_climatology.png'.format(means)
if save:
plt.savefig(savefig_path / filename, bbox_inches='tight')
return fg
def plot_gnss_radiosonde_monthly_means(sound_path=sound_path, path=work_yuval,
times=['2014', '2019'], sample='MS',
gps_station='tela', east_height=5000):
import xarray as xr
from aux_gps import path_glob
import pandas as pd
file = path_glob(sound_path, 'bet_dagan_phys_PW_Tm_Ts_*.nc')
phys = xr.load_dataset(file[0])['PW']
if east_height is not None:
file = path_glob(sound_path, 'bet_dagan_edt_sounding*.nc')
east = xr.load_dataset(file[0])['east_distance']
east = east.resample(sound_time=sample).mean().sel(
Height=east_height, method='nearest')
east_df = east.reset_coords(drop=True).to_dataframe()
if times is not None:
phys = phys.sel(sound_time=slice(*times))
ds = phys.resample(sound_time=sample).mean(
).to_dataset(name='Bet-dagan-radiosonde')
ds = ds.rename({'sound_time': 'time'})
gps = xr.load_dataset(
path / 'GNSS_PW_thresh_50_homogenized.nc')[gps_station]
if times is not None:
gps = gps.sel(time=slice(*times))
ds[gps_station] = gps.resample(time=sample).mean()
df = ds.to_dataframe()
# now plot:
fig, axes = plt.subplots(2, 1, sharex=True, figsize=(12, 8))
# [x.set_xlim([pd.to_datetime(times[0]), pd.to_datetime(times[1])])
# for x in axes]
df.columns = ['Bet dagan soundings', '{} GNSS station'.format(gps_station)]
sns.lineplot(data=df, markers=['o', 's'], linewidth=2.0, ax=axes[0])
# axes[0].legend(['Bet_Dagan soundings', 'TELA GPS station'])
df_r = df.iloc[:, 1] - df.iloc[:, 0]
df_r.columns = ['Residual distribution']
sns.lineplot(data=df_r, color='k', marker='o', linewidth=1.5, ax=axes[1])
if east_height is not None:
ax_east = axes[1].twinx()
sns.lineplot(data=east_df, color='red',
marker='x', linewidth=1.5, ax=ax_east)
ax_east.set_ylabel(
'East drift at {} km altitude [km]'.format(east_height / 1000.0))
axes[1].axhline(y=0, color='r')
axes[0].grid(b=True, which='major')
axes[1].grid(b=True, which='major')
axes[0].set_ylabel('Precipitable Water [mm]')
axes[1].set_ylabel('Residuals [mm]')
plt.tight_layout()
plt.subplots_adjust(wspace=0, hspace=0.01)
return ds
def plot_wetz_example(path=tela_results_path, plot='WetZ', fontsize=16,
save=True):
from aux_gps import path_glob
import matplotlib.pyplot as plt
from gipsyx_post_proc import process_one_day_gipsyx_output
filepath = path_glob(path, 'tela*_smoothFinal.tdp')[3]
if plot is None:
df, meta = process_one_day_gipsyx_output(filepath, True)
return df, meta
else:
df, meta = process_one_day_gipsyx_output(filepath, False)
if not isinstance(plot, str):
raise ValueError('pls pick only one field to plot., e.g., WetZ')
error_plot = '{}_error'.format(plot)
fig, ax = plt.subplots(1, 1, figsize=(8, 6))
desc = meta['desc'][plot]
unit = meta['units'][plot]
df[plot].plot(ax=ax, legend=False, color='k')
ax.fill_between(df.index, df[plot] - df[error_plot],
df[plot] + df[error_plot], alpha=0.5)
ax.grid()
# ax.set_title('{} from station TELA in {}'.format(
# desc, df.index[100].strftime('%Y-%m-%d')))
ax.set_ylabel('WetZ [{}]'.format(unit), fontsize=fontsize)
ax.set_xlabel('Time [UTC]', fontsize=fontsize)
ax.tick_params(which='both', labelsize=fontsize)
ax.grid('on')
fig.tight_layout()
filename = 'wetz_tela_daily.png'
caption('{} from station TELA in {}. Note the error estimation from the GipsyX software(filled)'.format(
desc, df.index[100].strftime('%Y-%m-%d')))
if save:
plt.savefig(savefig_path / filename, bbox_inches='tight')
return ax
def plot_figure_3(path=tela_solutions, year=2004, field='WetZ',
middle_date='11-25', zooms=[10, 3, 0.5], save=True):
from gipsyx_post_proc import analyse_results_ds_one_station
import xarray as xr
import matplotlib.pyplot as plt
import pandas as pd
dss = xr.open_dataset(path / 'TELA_ppp_raw_{}.nc'.format(year))
nums = sorted(list(set([int(x.split('-')[1])
for x in dss if x.split('-')[0] == field])))
ds = dss[['{}-{}'.format(field, i) for i in nums]]
da = analyse_results_ds_one_station(dss, field=field, plot=False)
fig, axes = plt.subplots(ncols=1, nrows=3, sharex=False, figsize=(16, 10))
for j, ax in enumerate(axes):
start = pd.to_datetime('{}-{}'.format(year, middle_date)
) - pd.Timedelta(zooms[j], unit='D')
end = pd.to_datetime('{}-{}'.format(year, middle_date)
) + pd.Timedelta(zooms[j], unit='D')
daa = da.sel(time=slice(start, end))
for i, ppp in enumerate(ds):
ds['{}-{}'.format(field, i)].plot(ax=ax, linewidth=3.0)
daa.plot.line(marker='.', linewidth=0., ax=ax, color='k')
axes[j].set_xlim(start, end)
axes[j].set_ylim(daa.min() - 0.5, daa.max() + 0.5)
try:
axes[j - 1].axvline(x=start, color='r', alpha=0.85,
linestyle='--', linewidth=2.0)
axes[j - 1].axvline(x=end, color='r', alpha=0.85,
linestyle='--', linewidth=2.0)
except IndexError:
pass
units = ds.attrs['{}>units'.format(field)]
sta = da.attrs['station']
desc = da.attrs['{}>desc'.format(field)]
ax.set_ylabel('{} [{}]'.format(field, units))
ax.set_xlabel('')
ax.grid()
# fig.suptitle(
# '30 hours stitched {} for GNSS station {}'.format(
# desc, sta), fontweight='bold')
fig.tight_layout()
caption('20, 6 and 1 days of zenith wet delay in 2004 from the TELA GNSS station for the top, middle and bottom figures respectively. The colored segments represent daily solutions while the black dots represent smoothed mean solutions.')
filename = 'zwd_tela_discon_panel.png'
if save:
plt.savefig(savefig_path / filename, bbox_inches='tight')
# fig.subplots_adjust(top=0.95)
return axes
def plot_figure_3_1(path=work_yuval, data='zwd'):
import xarray as xr
from aux_gps import plot_tmseries_xarray
from PW_stations import load_gipsyx_results
if data == 'zwd':
tela = load_gipsyx_results('tela', sample_rate='1H', plot_fields=None)
label = 'ZWD [cm]'
title = 'Zenith wet delay derived from GPS station TELA'
ax = plot_tmseries_xarray(tela, 'WetZ')
elif data == 'pw':
ds = xr.open_dataset(path / 'GNSS_hourly_PW.nc')
tela = ds['tela']
label = 'PW [mm]'
title = 'Precipitable water derived from GPS station TELA'
ax = plot_tmseries_xarray(tela)
ax.set_ylabel(label)
ax.set_xlim('1996-02', '2019-07')
ax.set_title(title)
ax.set_xlabel('')
ax.figure.tight_layout()
return ax
def plot_ts_tm(path=sound_path, model='TSEN',
times=['2007', '2019'], fontsize=14, save=True):
"""plot ts-tm relashonship"""
import xarray as xr
import matplotlib.pyplot as plt
import seaborn as sns
from PW_stations import ML_Switcher
from sklearn.metrics import mean_squared_error
from sklearn.metrics import r2_score
import numpy as np
from mpl_toolkits.axes_grid1.inset_locator import inset_axes
from sounding_procedures import get_field_from_radiosonde
models_dict = {'LR': 'Linear Regression',
'TSEN': 'Theil–Sen Regression'}
# sns.set_style('whitegrid')
pds = xr.Dataset()
Ts = get_field_from_radiosonde(path=sound_path, field='Ts',
data_type='phys', reduce=None, times=times,
plot=False)
Tm = get_field_from_radiosonde(path=sound_path, field='Tm',
data_type='phys', reduce='min', times=times,
plot=False)
pds['Tm'] = Tm
pds['Ts'] = Ts
pds = pds.dropna('sound_time')
fig, ax = plt.subplots(1, 1, figsize=(7, 7))
pds.plot.scatter(
x='Ts',
y='Tm',
marker='.',
s=100.,
linewidth=0,
alpha=0.5,
ax=ax)
ax.grid()
ml = ML_Switcher()
fit_model = ml.pick_model(model)
X = pds.Ts.values.reshape(-1, 1)
y = pds.Tm.values
fit_model.fit(X, y)
predict = fit_model.predict(X)
coef = fit_model.coef_[0]
inter = fit_model.intercept_
ax.plot(X, predict, c='r')
bevis_tm = pds.Ts.values * 0.72 + 70.0
ax.plot(pds.Ts.values, bevis_tm, c='purple')
ax.legend(['{} ({:.2f}, {:.2f})'.format(models_dict.get(model),
coef, inter), 'Bevis 1992 et al. (0.72, 70.0)'], fontsize=fontsize-4)
# ax.set_xlabel('Surface Temperature [K]')
# ax.set_ylabel('Water Vapor Mean Atmospheric Temperature [K]')
ax.set_xlabel('Ts [K]', fontsize=fontsize)
ax.set_ylabel('Tm [K]', fontsize=fontsize)
ax.set_ylim(265, 320)
ax.tick_params(labelsize=fontsize)
axin1 = inset_axes(ax, width="40%", height="40%", loc=2)
resid = predict - y
sns.distplot(resid, bins=50, color='k', label='residuals', ax=axin1,
kde=False,
hist_kws={"linewidth": 1, "alpha": 0.5, "color": "k", 'edgecolor': 'k'})
axin1.yaxis.tick_right()
rmean = np.mean(resid)
rmse = np.sqrt(mean_squared_error(y, predict))
print(rmean, rmse)
r2 = r2_score(y, predict)
axin1.axvline(rmean, color='r', linestyle='dashed', linewidth=1)
# axin1.set_xlabel('Residual distribution[K]')
textstr = '\n'.join(['n={}'.format(pds.Ts.size),
'RMSE: ', '{:.2f} K'.format(rmse)]) # ,
# r'R$^2$: {:.2f}'.format(r2)])
props = dict(boxstyle='round', facecolor='wheat', alpha=0.5)
axin1.text(0.05, 0.95, textstr, transform=axin1.transAxes, fontsize=14,
verticalalignment='top', bbox=props)
# axin1.text(0.2, 0.9, 'n={}'.format(pds.Ts.size),
# verticalalignment='top', horizontalalignment='center',
# transform=axin1.transAxes, color='k', fontsize=10)
# axin1.text(0.78, 0.9, 'RMSE: {:.2f} K'.format(rmse),
# verticalalignment='top', horizontalalignment='center',
# transform=axin1.transAxes, color='k', fontsize=10)
axin1.set_xlim(-15, 15)
fig.tight_layout()
filename = 'Bet_dagan_ts_tm_fit_{}-{}.png'.format(times[0], times[1])
caption('Water vapor mean temperature (Tm) vs. surface temperature (Ts) of the Bet-Dagan radiosonde station. Ordinary least squares linear fit(red) yields the residual distribution with RMSE of 4 K. Bevis(1992) model is plotted(purple) for comparison.')
if save:
plt.savefig(savefig_path / filename, bbox_inches='tight')
return
def plot_pw_tela_bet_dagan_scatterplot(path=work_yuval, sound_path=sound_path,
ims_path=ims_path, station='tela',
cats=None,
times=['2007', '2019'], wv_name='pw',
r2=False, fontsize=14,
save=True):
"""plot the PW of Bet-Dagan vs. PW of gps station"""
from PW_stations import mean_ZWD_over_sound_time_and_fit_tstm
from sklearn.metrics import mean_squared_error
from sklearn.metrics import r2_score
import matplotlib.pyplot as plt
import seaborn as sns
import numpy as np
from mpl_toolkits.axes_grid1.inset_locator import inset_axes
# sns.set_style('white')
ds, mda = mean_ZWD_over_sound_time_and_fit_tstm(path=path, sound_path=sound_path,
ims_path=ims_path,
data_type='phys',
gps_station=station,
times=times,
plot=False,
cats=cats)
ds = ds.drop_dims('time')
time_dim = list(set(ds.dims))[0]
ds = ds.rename({time_dim: 'time'})
tpw = 'tpw_bet_dagan'
ds = ds[[tpw, 'tela_pw']].dropna('time')
ds = ds.sel(time=slice(*times))
fig, ax = plt.subplots(1, 1, figsize=(7, 7))
ds.plot.scatter(x=tpw,
y='tela_pw',
marker='.',
s=100.,
linewidth=0,
alpha=0.5,
ax=ax)
ax.plot(ds[tpw], ds[tpw], c='r')
ax.legend(['y = x'], loc='upper right', fontsize=fontsize)
if wv_name == 'pw':
ax.set_xlabel('PWV from Bet-Dagan [mm]', fontsize=fontsize)
ax.set_ylabel('PWV from TELA GPS station [mm]', fontsize=fontsize)
elif wv_name == 'iwv':
ax.set_xlabel(
r'IWV from Bet-Dagan station [kg$\cdot$m$^{-2}$]', fontsize=fontsize)
ax.set_ylabel(
r'IWV from TELA GPS station [kg$\cdot$m$^{-2}$]', fontsize=fontsize)
ax.grid()
axin1 = inset_axes(ax, width="40%", height="40%", loc=2)
resid = ds.tela_pw.values - ds[tpw].values
sns.distplot(resid, bins=50, color='k', label='residuals', ax=axin1,
kde=False,
hist_kws={"linewidth": 1, "alpha": 0.5, "color": "k", "edgecolor": 'k'})
axin1.yaxis.tick_right()
rmean = np.mean(resid)
rmse = np.sqrt(mean_squared_error(ds[tpw].values, ds.tela_pw.values))
r2s = r2_score(ds[tpw].values, ds.tela_pw.values)
axin1.axvline(rmean, color='r', linestyle='dashed', linewidth=1)
# axin1.set_xlabel('Residual distribution[mm]')
ax.tick_params(labelsize=fontsize)
if wv_name == 'pw':
if r2:
textstr = '\n'.join(['n={}'.format(ds[tpw].size),
'bias: {:.2f} mm'.format(rmean),
'RMSE: {:.2f} mm'.format(rmse),
r'R$^2$: {:.2f}'.format(r2s)])
else:
textstr = '\n'.join(['n={}'.format(ds[tpw].size),
'bias: {:.2f} mm'.format(rmean),
'RMSE: {:.2f} mm'.format(rmse)])
elif wv_name == 'iwv':
if r2:
textstr = '\n'.join(['n={}'.format(ds[tpw].size),
r'bias: {:.2f} kg$\cdot$m$^{{-2}}$'.format(
rmean),
r'RMSE: {:.2f} kg$\cdot$m$^{{-2}}$'.format(
rmse),
r'R$^2$: {:.2f}'.format(r2s)])
else:
textstr = '\n'.join(['n={}'.format(ds[tpw].size),
r'bias: {:.2f} kg$\cdot$m$^{{-2}}$'.format(
rmean),
r'RMSE: {:.2f} kg$\cdot$m$^{{-2}}$'.format(rmse)])
props = dict(boxstyle='round', facecolor='wheat', alpha=0.5)
axin1.text(0.05, 0.95, textstr, transform=axin1.transAxes, fontsize=14,
verticalalignment='top', bbox=props)
#
# axin1.text(0.2, 0.95, 'n={}'.format(ds[tpw].size),
# verticalalignment='top', horizontalalignment='center',
# transform=axin1.transAxes, color='k', fontsize=10)
# axin1.text(0.3, 0.85, 'bias: {:.2f} mm'.format(rmean),
# verticalalignment='top', horizontalalignment='center',
# transform=axin1.transAxes, color='k', fontsize=10)
# axin1.text(0.35, 0.75, 'RMSE: {:.2f} mm'.format(rmse),
# verticalalignment='top', horizontalalignment='center',
# transform=axin1.transAxes, color='k', fontsize=10)
# fig.suptitle('Precipitable Water comparison for the years {} to {}'.format(*times))
fig.tight_layout()
caption(
'PW from TELA GNSS station vs. PW from Bet-Dagan radiosonde station in {}-{}. A 45 degree line is plotted(red) for comparison. Note the skew in the residual distribution with an RMSE of 4.37 mm.'.format(times[0], times[1]))
# fig.subplots_adjust(top=0.95)
filename = 'Bet_dagan_tela_pw_compare_{}-{}.png'.format(times[0], times[1])
if save:
plt.savefig(savefig_path / filename, bbox_inches='tight')
return ds
def plot_tela_bet_dagan_comparison(path=work_yuval, sound_path=sound_path,
ims_path=ims_path, station='tela',
times=['2007', '2020'], cats=None,
compare='pwv',
save=True):
from PW_stations import mean_ZWD_over_sound_time_and_fit_tstm
import matplotlib.pyplot as plt
import seaborn as sns
import pandas as pd
import matplotlib.dates as mdates
# sns.set_style('whitegrid')
ds, mda = mean_ZWD_over_sound_time_and_fit_tstm(path=path,
sound_path=sound_path,
ims_path=ims_path,
data_type='phys',
gps_station=station,
times=times,
plot=False,
cats=cats)
ds = ds.drop_dims('time')
time_dim = list(set(ds.dims))[0]
ds = ds.rename({time_dim: 'time'})
ds = ds.dropna('time')
ds = ds.sel(time=slice(*times))
if compare == 'zwd':
df = ds[['zwd_bet_dagan', 'tela']].to_dataframe()
elif compare == 'pwv':
df = ds[['tpw_bet_dagan', 'tela_pw']].to_dataframe()
fig, axes = plt.subplots(2, 1, sharex=True, figsize=(12, 8))
df.columns = ['Bet-Dagan soundings', 'TELA GNSS station']
sns.scatterplot(
data=df,
s=20,
ax=axes[0],
style='x',
linewidth=0,
alpha=0.8)
# axes[0].legend(['Bet_Dagan soundings', 'TELA GPS station'])
df_r = df.iloc[:, 0] - df.iloc[:, 1]
df_r.columns = ['Residual distribution']
sns.scatterplot(
data=df_r,
color='k',
s=20,
ax=axes[1],
linewidth=0,
alpha=0.5)
axes[0].grid(b=True, which='major')
axes[1].grid(b=True, which='major')
if compare == 'zwd':
axes[0].set_ylabel('Zenith Wet Delay [cm]')
axes[1].set_ylabel('Residuals [cm]')
elif compare == 'pwv':
axes[0].set_ylabel('Precipitable Water Vapor [mm]')
axes[1].set_ylabel('Residuals [mm]')
# axes[0].set_title('Zenith wet delay from Bet-Dagan radiosonde station and TELA GNSS satation')
sonde_change_x = pd.to_datetime('2013-08-20')
axes[1].axvline(sonde_change_x, color='red')
axes[1].annotate(
'changed sonde type from VIZ MK-II to PTU GPS',
(mdates.date2num(sonde_change_x),
10),
xytext=(
15,
15),
textcoords='offset points',
arrowprops=dict(
arrowstyle='fancy',
color='red'),
color='red')
# axes[1].set_aspect(3)
[x.set_xlim(*[pd.to_datetime(times[0]), pd.to_datetime(times[1])])
for x in axes]
plt.tight_layout()
plt.subplots_adjust(wspace=0, hspace=0.01)
filename = 'Bet_dagan_tela_{}_compare.png'.format(compare)
caption('Top: zenith wet delay from Bet-dagan radiosonde station(blue circles) and from TELA GNSS station(orange x) in 2007-2019. Bottom: residuals. Note the residuals become constrained from 08-2013 probebly due to an equipment change.')
if save:
plt.savefig(savefig_path / filename, bbox_inches='tight')
return df
def plot_israel_map_from_shape_file(gis_path=gis_path):
import geopandas as gpd
agr = gpd.read_file(gis_path/'ISR_agriculture_districts.shp')
isr = gpd.GeoSeries(agr.geometry.unary_union)
isr.crs = agr.crs
isr = isr.to_crs(epsg=4326)
return isr
def plot_israel_map(gis_path=gis_path, rc=rc, ticklabelsize=12, ax=None):
"""general nice map for israel, need that to plot stations,
and temperature field on top of it"""
import geopandas as gpd
import contextily as ctx
import seaborn as sns
import cartopy.crs as ccrs
sns.set_style("ticks", rc=rc)
isr_with_yosh = gpd.read_file(gis_path / 'Israel_and_Yosh.shp')
isr_with_yosh.crs = {'init': 'epsg:4326'}
# isr_with_yosh = isr_with_yosh.to_crs(epsg=3857)
crs_epsg = ccrs.epsg('3857')
# crs_epsg = ccrs.epsg('2039')
if ax is None:
# fig, ax = plt.subplots(subplot_kw={'projection': crs_epsg},
# figsize=(6, 15))
bounds = isr_with_yosh.geometry.total_bounds
extent = [bounds[0], bounds[2], bounds[1], bounds[3]]
# ax.set_extent([bounds[0], bounds[2], bounds[1], bounds[3]], crs=crs_epsg)
# ax.add_geometries(isr_with_yosh.geometry, crs=crs_epsg)
ax = isr_with_yosh.plot(alpha=0.0, figsize=(6, 15))
else:
isr_with_yosh.plot(alpha=0.0, ax=ax)
ctx.add_basemap(
ax,
source=ctx.providers.Stamen.TerrainBackground,
crs='epsg:4326')
ax.xaxis.set_major_locator(ticker.MaxNLocator(2))
ax.yaxis.set_major_locator(ticker.MaxNLocator(5))
ax.yaxis.set_major_formatter(lat_formatter)
ax.xaxis.set_major_formatter(lon_formatter)
ax.tick_params(top=True, bottom=True, left=True, right=True,
direction='out', labelsize=ticklabelsize)
# scale_bar(ax, ccrs.Mercator(), 50, bounds=bounds)
return ax
def plot_israel_with_stations(gis_path=gis_path, dem_path=dem_path, ims=True,
gps=True, radio=True, terrain=True, alt=False,
ims_names=False, gps_final=False, save=True):
from PW_stations import produce_geo_gnss_solved_stations
from aux_gps import geo_annotate
from ims_procedures import produce_geo_ims
import matplotlib.pyplot as plt
import xarray as xr
import pandas as pd
import geopandas as gpd
ax = plot_israel_map(gis_path)
station_names = []
legend = []
if ims:
print('getting IMS temperature stations metadata...')
ims_t = produce_geo_ims(path=gis_path, freq='10mins', plot=False)
ims_t.plot(ax=ax, color='red', edgecolor='black', alpha=0.5)
station_names.append('ims')
legend.append('IMS stations')
if ims_names:
geo_annotate(ax, ims_t.lon, ims_t.lat,
ims_t['name_english'], xytext=(3, 3), fmt=None,
c='k', fw='normal', fs=7, colorupdown=False)
# ims, gps = produce_geo_df(gis_path=gis_path, plot=False)
if gps:
print('getting solved GNSS israeli stations metadata...')
gps_df = produce_geo_gnss_solved_stations(path=gis_path, plot=False)
if gps_final:
to_drop = ['gilb', 'lhav', 'hrmn', 'nizn', 'spir']
gps_final_stations = [x for x in gps_df.index if x not in to_drop]
gps = gps_df.loc[gps_final_stations, :]
gps.plot(ax=ax, color='k', edgecolor='black', marker='s')
gps_stations = [x for x in gps.index]
to_plot_offset = ['gilb', 'lhav']
# [gps_stations.remove(x) for x in to_plot_offset]
gps_normal_anno = gps.loc[gps_stations, :]
# gps_offset_anno = gps.loc[to_plot_offset, :]
geo_annotate(ax, gps_normal_anno.lon, gps_normal_anno.lat,
gps_normal_anno.index.str.upper(), xytext=(3, 3), fmt=None,
c='k', fw='bold', fs=10, colorupdown=False)
if alt:
geo_annotate(ax, gps_normal_anno.lon, gps_normal_anno.lat,
gps_normal_anno.alt, xytext=(4, -6), fmt='{:.0f}',
c='k', fw='bold', fs=9, colorupdown=False)
# geo_annotate(ax, gps_offset_anno.lon, gps_offset_anno.lat,
# gps_offset_anno.index.str.upper(), xytext=(4, -6), fmt=None,
# c='k', fw='bold', fs=10, colorupdown=False)
station_names.append('gps')
legend.append('GNSS stations')
if terrain:
# overlay with dem data:
cmap = plt.get_cmap('terrain', 41)
dem = xr.open_dataarray(dem_path / 'israel_dem_250_500.nc')
# dem = xr.open_dataarray(dem_path / 'israel_dem_500_1000.nc')
fg = dem.plot.imshow(ax=ax, alpha=0.5, cmap=cmap,
vmin=dem.min(), vmax=dem.max(), add_colorbar=False)
cbar_kwargs = {'fraction': 0.1, 'aspect': 50, 'pad': 0.03}
cb = plt.colorbar(fg, **cbar_kwargs)
cb.set_label(label='meters above sea level', size=8, weight='normal')
cb.ax.tick_params(labelsize=8)
ax.set_xlabel('')
ax.set_ylabel('')
if radio: # plot bet-dagan:
df = pd.Series([32.00, 34.81]).to_frame().T
df.index = ['Bet-Dagan']
df.columns = ['lat', 'lon']
bet_dagan = gpd.GeoDataFrame(df, geometry=gpd.points_from_xy(df.lon,
df.lat),
crs=gps.crs)
bet_dagan.plot(ax=ax, color='black', edgecolor='black',
marker='+')
geo_annotate(ax, bet_dagan.lon, bet_dagan.lat,
bet_dagan.index, xytext=(4, -6), fmt=None,
c='k', fw='bold', fs=10, colorupdown=False)
station_names.append('radio')
legend.append('radiosonde')
if legend:
plt.legend(legend, loc='upper left')
plt.tight_layout()
plt.subplots_adjust(bottom=0.05)
if station_names:
station_names = '_'.join(station_names)
else:
station_names = 'no_stations'
filename = 'israel_map_{}.png'.format(station_names)
if save:
plt.savefig(savefig_path / filename, bbox_inches='tight')
return ax
def plot_zwd_lapse_rate(path=work_yuval, fontsize=18, model='TSEN', save=True):
from PW_stations import calculate_zwd_altitude_fit
df, zwd_lapse_rate = calculate_zwd_altitude_fit(path=path, model=model,
plot=True, fontsize=fontsize)
if save:
filename = 'zwd_lapse_rate.png'
if save:
plt.savefig(savefig_path / filename, bbox_inches='tight')
return
def plot_ims_T_lapse_rate(ims_path=ims_path, dt='2013-10-19T22:00:00',
fontsize=16, save=True):
from aux_gps import path_glob
import xarray as xr
import matplotlib.pyplot as plt
import numpy as np
import pandas as pd
# from matplotlib import rc
def choose_dt_and_lapse_rate(tdf, dt, T_alts, lapse_rate):
ts = tdf.loc[dt, :]
# dt_col = dt.strftime('%Y-%m-%d %H:%M')
# ts.name = dt_col
# Tloc_df = Tloc_df.join(ts, how='right')
# Tloc_df = Tloc_df.dropna(axis=0)
ts_vs_alt = pd.Series(ts.values, index=T_alts)
ts_vs_alt_for_fit = ts_vs_alt.dropna()
[a, b] = np.polyfit(ts_vs_alt_for_fit.index.values,
ts_vs_alt_for_fit.values, 1)
if lapse_rate == 'auto':
lapse_rate = np.abs(a) * 1000
if lapse_rate < 5.0:
lapse_rate = 5.0
elif lapse_rate > 10.0:
lapse_rate = 10.0
return ts_vs_alt, lapse_rate
# rc('text', usetex=False)
# rc('text',latex.unicode=False)
glob_str = 'IMS_TD_israeli_10mins*.nc'
file = path_glob(ims_path, glob_str=glob_str)[0]
ds = xr.open_dataset(file)
time_dim = list(set(ds.dims))[0]
# slice to a starting year(1996?):
ds = ds.sel({time_dim: slice('1996', None)})
# years = sorted(list(set(ds[time_dim].dt.year.values)))
# get coords and alts of IMS stations:
T_alts = np.array([ds[x].attrs['station_alt'] for x in ds])
# T_lats = np.array([ds[x].attrs['station_lat'] for x in ds])
# T_lons = np.array([ds[x].attrs['station_lon'] for x in ds])
print('loading IMS_TD of israeli stations 10mins freq..')
# transform to dataframe and add coords data to df:
tdf = ds.to_dataframe()
# dt_col = dt.strftime('%Y-%m-%d %H:%M')
dt = pd.to_datetime(dt)
# prepare the ims coords and temp df(Tloc_df) and the lapse rate:
ts_vs_alt, lapse_rate = choose_dt_and_lapse_rate(tdf, dt, T_alts, 'auto')
fig, ax_lapse = plt.subplots(figsize=(10, 6))
sns.regplot(x=ts_vs_alt.index, y=ts_vs_alt.values, color='r',
scatter_kws={'color': 'k'}, ax=ax_lapse)
# suptitle = dt.strftime('%Y-%m-%d %H:%M')
ax_lapse.set_xlabel('Altitude [m]', fontsize=fontsize)
ax_lapse.set_ylabel(r'Temperature [$\degree$C]', fontsize=fontsize)
ax_lapse.text(0.5, 0.95, r'Lapse rate: {:.2f} $\degree$C/km'.format(lapse_rate),
horizontalalignment='center', verticalalignment='center',
fontsize=fontsize,
transform=ax_lapse.transAxes, color='k')
ax_lapse.grid()
ax_lapse.tick_params(labelsize=fontsize)
# ax_lapse.set_title(suptitle, fontsize=14, fontweight='bold')
fig.tight_layout()
filename = 'ims_lapse_rate_example.png'
caption('Temperature vs. altitude for 10 PM in 2013-10-19 for all automated 10 mins IMS stations. The lapse rate is calculated using ordinary least squares linear fit.')
if save:
plt.savefig(savefig_path / filename, bbox_inches='tight')
return ax_lapse
def plot_figure_9(hydro_path=hydro_path, gis_path=gis_path, pw_anom=False,
max_flow_thresh=None, wv_name='pw', save=True):
from hydro_procedures import get_hydro_near_GNSS
from hydro_procedures import loop_over_gnss_hydro_and_aggregate
import matplotlib.pyplot as plt
df = get_hydro_near_GNSS(
radius=5,
hydro_path=hydro_path,
gis_path=gis_path,
plot=False)
ds = loop_over_gnss_hydro_and_aggregate(df, pw_anom=pw_anom,
max_flow_thresh=max_flow_thresh,
hydro_path=hydro_path,
work_yuval=work_yuval, ndays=3,
plot=False, plot_all=False)
names = [x for x in ds.data_vars]
fig, ax = plt.subplots(figsize=(10, 6))
for name in names:
ds.mean('station').mean('tide_start')[name].plot.line(
marker='.', linewidth=0., ax=ax)
ax.set_xlabel('Days before tide event')
ax.grid()
hstations = [ds[x].attrs['hydro_stations'] for x in ds.data_vars]
events = [ds[x].attrs['total_events'] for x in ds.data_vars]
fmt = list(zip(names, hstations, events))
ax.legend(['{} with {} stations ({} total events)'.format(x, y, z)
for x, y, z in fmt])
fig.canvas.draw()
labels = [item.get_text() for item in ax.get_xticklabels()]
xlabels = [x.replace('−', '') for x in labels]
ax.set_xticklabels(xlabels)
fig.canvas.draw()
if wv_name == 'pw':
if pw_anom:
ax.set_ylabel('PW anomalies [mm]')
else:
ax.set_ylabel('PW [mm]')
elif wv_name == 'iwv':
if pw_anom:
ax.set_ylabel(r'IWV anomalies [kg$\cdot$m$^{-2}$]')
else:
ax.set_ylabel(r'IWV [kg$\cdot$m$^{-2}$]')
fig.tight_layout()
# if pw_anom:
# title = 'Mean PW anomalies for tide stations near all GNSS stations'
# else:
# title = 'Mean PW for tide stations near all GNSS stations'
# if max_flow_thresh is not None:
# title += ' (max_flow > {} m^3/sec)'.format(max_flow_thresh)
# ax.set_title(title)
if pw_anom:
filename = 'hydro_tide_lag_pw_anom.png'
if max_flow_thresh:
filename = 'hydro_tide_lag_pw_anom_max{}.png'.format(
max_flow_thresh)
else:
filename = 'hydro_tide_lag_pw.png'
if max_flow_thresh:
filename = 'hydro_tide_lag_pw_anom_max{}.png'.format(
max_flow_thresh)
if save:
plt.savefig(savefig_path / filename, bbox_inches='tight')
return ax
def produce_table_1(removed=['hrmn', 'nizn', 'spir'], merged={'klhv': ['klhv', 'lhav'],
'mrav': ['gilb', 'mrav']}, add_location=False,
scope='annual', remove_distance=True):
"""for scope='diurnal' use removed=['hrmn'], add_location=True
and remove_distance=False"""
from PW_stations import produce_geo_gnss_solved_stations
import pandas as pd
sites = group_sites_to_xarray(upper=False, scope=scope)
df_gnss = produce_geo_gnss_solved_stations(plot=False,
add_distance_to_coast=True)
new = sites.T.values.ravel()
if scope == 'annual':
new = [x for x in new.astype(str) if x != 'nan']
df_gnss = df_gnss.reindex(new)
df_gnss['ID'] = df_gnss.index.str.upper()
pd.options.display.float_format = '{:.2f}'.format
df = df_gnss[['name', 'ID', 'lat', 'lon', 'alt', 'distance']]
df['alt'] = df['alt'].map('{:,.0f}'.format)
df['distance'] = df['distance'].astype(int)
cols = ['GNSS Station name', 'Station ID', 'Latitude [N]',
'Longitude [E]', 'Altitude [m a.s.l]', 'Distance from shore [km]']
df.columns = cols
if scope != 'annual':
df.loc['spir', 'GNSS Station name'] = 'Sapir'
if remove_distance:
df = df.iloc[:, 0:-1]
if add_location:
groups = group_sites_to_xarray(upper=False, scope=scope)
coastal = groups.sel(group='coastal').values
coastal = coastal[~pd.isnull(coastal)]
highland = groups.sel(group='highland').values
highland = highland[~pd.isnull(highland)]
eastern = groups.sel(group='eastern').values
eastern = eastern[~pd.isnull(eastern)]
df.loc[coastal, 'Location'] = 'Coastal'
df.loc[highland, 'Location'] = 'Highland'
df.loc[eastern, 'Location'] = 'Eastern'
if removed is not None:
df = df.loc[[x for x in df.index if x not in removed], :]
if merged is not None:
return df
print(df.to_latex(index=False))
return df
def produce_table_stats(thresh=50, add_location=True, add_height=True):
"""add plot sd to height with se_sd errorbars"""
from PW_stations import produce_pw_statistics
from PW_stations import produce_geo_gnss_solved_stations
import pandas as pd
import xarray as xr
sites = group_sites_to_xarray(upper=False, scope='annual')
new = sites.T.values.ravel()
sites = group_sites_to_xarray(upper=False, scope='annual')
new = [x for x in new.astype(str) if x != 'nan']
pw_mm = xr.load_dataset(
work_yuval /
'GNSS_PW_monthly_thresh_{:.0f}.nc'.format(thresh))
pw_mm = pw_mm[new]
df = produce_pw_statistics(
thresh=thresh, resample_to_mm=False, pw_input=pw_mm)
if add_location:
cols = [x for x in df.columns]
cols.insert(1, 'Location')
gr_df = sites.to_dataframe('sites')
location = [gr_df[gr_df == x].dropna().index.values.item()[
1].title() for x in new]
df['Location'] = location
df = df[cols]
if add_height:
cols = [x for x in df.columns]
if add_location:
cols.insert(2, 'Height [m a.s.l]')
else:
cols.insert(1, 'Height [m a.s.l]')
df_gnss = produce_geo_gnss_solved_stations(plot=False,
add_distance_to_coast=False)
# pd.options.display.float_format = '{:.2f}'.format
df['Height [m a.s.l]'] = df_gnss['alt'].map('{:.0f}'.format)
df = df[cols]
print(df.to_latex(index=False))
return df
def plot_pwv_longterm_trend(path=work_yuval, model_name='LR', save=True,
fontsize=16, add_era5=True):
import matplotlib.pyplot as plt
from aux_gps import linear_fit_using_scipy_da_ts
# from PW_stations import ML_Switcher
import xarray as xr
from aux_gps import anomalize_xr
"""TSEN and LR for linear fit"""
# load GNSS Israel:
# pw = xr.load_dataset(path / 'GNSS_PW_monthly_thresh_50_homogenized.nc')
pw = xr.load_dataset(
path / 'GNSS_PW_monthly_thresh_50.nc').sel(time=slice('1998', None))
pw_anoms = anomalize_xr(pw, 'MS', verbose=False)
pw_mean = pw_anoms.to_array('station').mean('station')
pw_std = pw_anoms.to_array('station').std('station')
pw_weights = 1 / pw_anoms.to_array('station').count('station')
# add ERA5:
era5 = xr.load_dataset(work_yuval / 'GNSS_era5_monthly_PW.nc')
era5_anoms = anomalize_xr(era5, 'MS', verbose=False)
era5_anoms = era5_anoms.sel(time=slice(
pw_mean.time.min(), pw_mean.time.max()))
era5_mean = era5_anoms.to_array('station').mean('station')
era5_std = era5_anoms.to_array('station').std('station')
# init linear models
# ml = ML_Switcher()
# model = ml.pick_model(model_name)
if add_era5:
fig, ax = plt.subplots(2, 1, figsize=(15, 7.5))
trend, trend_hi, trend_lo, slope, slope_hi, slope_lo = linear_fit_using_scipy_da_ts(pw_mean, model=model_name, slope_factor=3650.25,
plot=False, ax=None, units=None, method='curve_fit', weights=pw_weights)
pwln = pw_mean.plot(ax=ax[0], color='k', marker='o', linewidth=1.5)
trendln = trend.plot(ax=ax[0], color='r', linewidth=2)
trend_hi.plot.line('r--', ax=ax[0], linewidth=1.5)
trend_lo.plot.line('r--', ax=ax[0], linewidth=1.5)
trend_label = '{} model, slope={:.2f} ({:.2f}, {:.2f}) mm/decade'.format(
model_name, slope, slope_lo, slope_hi)
handles = pwln+trendln
labels = ['PWV-mean']
labels.append(trend_label)
ax[0].legend(handles=handles, labels=labels, loc='upper left',
fontsize=fontsize)
ax[0].grid()
ax[0].set_xlabel('')
ax[0].set_ylabel('PWV mean anomalies [mm]', fontsize=fontsize)
ax[0].tick_params(labelsize=fontsize)
trend1, trend_hi1, trend_lo1, slope1, slope_hi1, slope_lo1 = linear_fit_using_scipy_da_ts(era5_mean, model=model_name, slope_factor=3650.25,
plot=False, ax=None, units=None, method='curve_fit', weights=era5_std)
era5ln = era5_mean.plot(ax=ax[1], color='k', marker='o', linewidth=1.5)
trendln1 = trend1.plot(ax=ax[1], color='r', linewidth=2)
trend_hi1.plot.line('r--', ax=ax[1], linewidth=1.5)
trend_lo1.plot.line('r--', ax=ax[1], linewidth=1.5)
trend_label = '{} model, slope={:.2f} ({:.2f}, {:.2f}) mm/decade'.format(
model_name, slope1, slope_lo1, slope_hi1)
handles = era5ln+trendln1
labels = ['ERA5-mean']
labels.append(trend_label)
ax[1].legend(handles=handles, labels=labels, loc='upper left',
fontsize=fontsize)
ax[1].grid()
ax[1].set_xlabel('')
ax[1].set_ylabel('PWV mean anomalies [mm]', fontsize=fontsize)
ax[1].tick_params(labelsize=fontsize)
else:
fig, ax = plt.subplots(1, 1, figsize=(15, 5.5))
trend, trend_hi, trend_lo, slope, slope_hi, slope_lo = linear_fit_using_scipy_da_ts(pw_mean, model=model_name, slope_factor=3650.25,
plot=False, ax=None, units=None)
pwln = pw_mean.plot(ax=ax, color='k', marker='o', linewidth=1.5)
trendln = trend.plot(ax=ax, color='r', linewidth=2)
trend_hi.plot.line('r--', ax=ax, linewidth=1.5)
trend_lo.plot.line('r--', ax=ax, linewidth=1.5)
trend_label = '{} model, slope={:.2f} ({:.2f}, {:.2f}) mm/decade'.format(
model_name, slope, slope_lo, slope_hi)
handles = pwln+trendln
labels = ['PWV-mean']
labels.append(trend_label)
ax.legend(handles=handles, labels=labels, loc='upper left',
fontsize=fontsize)
ax.grid()
ax.set_xlabel('')
ax.set_ylabel('PWV mean anomalies [mm]', fontsize=fontsize)
ax.tick_params(labelsize=fontsize)
fig.suptitle('PWV mean anomalies and linear trend',
fontweight='bold', fontsize=fontsize)
fig.tight_layout()
if save:
filename = 'pwv_mean_trend_{}.png'.format(model_name)
plt.savefig(savefig_path / filename, orientation='portrait')
return ax
def plot_trend_filled_pwv_and_era5_barh_plot(path=work_yuval):
import xarray as xr
from aux_gps import path_glob
from PW_stations import process_mkt_from_dataset
import pandas as pd
import seaborn as sns
file = sorted(
path_glob(path, 'GNSS_PW_monthly_homogenized_filled_*.nc'))[0]
gnss = xr.load_dataset(path / file)
era5 = xr.load_dataset(path / 'GNSS_era5_monthly_PW.nc')
era5 = era5.sel(time=slice(gnss.time.min(), gnss.time.max()))
era5 = era5[[x for x in era5 if x in gnss]]
df_gnss = process_mkt_from_dataset(
gnss,
alpha=0.95,
season_selection=None,
seasonal=False,
factor=120,
anomalize=True, CI=True)
df_gnss = add_location_to_GNSS_stations_dataframe(df_gnss)
df_gnss['sig'] = df_gnss['p'].astype(float) <= 0.05
df_era5 = process_mkt_from_dataset(
era5,
alpha=0.95,
season_selection=None,
seasonal=False,
factor=120,
anomalize=True, CI=True)
df_era5 = add_location_to_GNSS_stations_dataframe(df_era5)
df_era5['sig'] = df_era5['p'].astype(float) <= 0.05
df = | pd.concat([df_gnss, df_era5], keys=['GNSS', 'ERA5']) | pandas.concat |
# -----------------------------------------------------------------------------
# WSDM Cup 2017 Classification and Evaluation
#
# Copyright (c) 2017 <NAME>, <NAME>, <NAME>, <NAME>
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in all
# copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
# -----------------------------------------------------------------------------
import itertools
import logging
import pandas as pd
from sklearn import ensemble
from sklearn.externals.joblib import Parallel, delayed
import config
from src import evaluationutils
_logger = logging.getLogger()
########################################################################
# Feature Ranking
########################################################################
def rank_features(training, validation):
_logger.info("Ranking features...")
metrics = _compute_metrics_for_single_features(training, validation)
group_metrics = _compute_metrics_for_feature_groups(training, validation)
metrics = pd.concat([metrics, group_metrics], axis=0)
_output_sorted_by_group(
validation.get_time_label(), validation.get_system_name(),
metrics, validation.get_group_names(), validation.get_subgroup_names())
_logger.info("Ranking features... done.")
def _compute_metrics_for_single_features(training, validation):
"""Return a Pandas data frame with metrics for every single feature."""
arguments = []
for feature in validation.get_features():
# each feature name is a tuple itself and
# here we take the last element of this tuple
training2 = training.select_feature(feature[-1])
validation2 = validation.select_feature(feature[-1])
argument = (training2, validation2, feature, )
arguments.append(argument)
result_list = Parallel(n_jobs=config.FEATURE_RANKING_N_JOBS,
backend='multiprocessing')(
delayed(_compute_feature_metrics_star)(x) for x in arguments)
result = pd.concat(result_list, axis=0)
return result
def _compute_metrics_for_feature_groups(training, validation):
arguments = []
for subgroup in validation.get_subgroups():
# each feature name is a tuple itself and here we take the last
# element of this tuple
training2 = training.select_subgroup(subgroup[-1])
validation2 = validation.select_subgroup(subgroup[-1])
argument = (training2, validation2, subgroup + ('ALL', ), )
arguments.append(argument)
for group in validation.get_groups():
training2 = training.select_group(group)
validation2 = validation.select_group(group)
argument = (training2, validation2, (group, 'ALL', 'ALL'),)
arguments.append(argument)
result_list = Parallel(n_jobs=config.FEATURE_RANKING_N_JOBS,
backend='multiprocessing')(
delayed(_compute_feature_metrics_star)(x) for x in arguments)
result = | pd.concat(result_list, axis=0) | pandas.concat |
# standard library imports
import os
import datetime
import re
import math
import copy
import collections
from functools import wraps
from itertools import combinations
import warnings
import pytz
import importlib
# anaconda distribution defaults
import dateutil
import numpy as np
import pandas as pd
# anaconda distribution defaults
# statistics and machine learning imports
import statsmodels.formula.api as smf
from scipy import stats
# from sklearn.covariance import EllipticEnvelope
import sklearn.covariance as sk_cv
# anaconda distribution defaults
# visualization library imports
import matplotlib.pyplot as plt
from bokeh.io import output_notebook, show
from bokeh.plotting import figure
from bokeh.palettes import Category10, Category20c, Category20b
from bokeh.layouts import gridplot
from bokeh.models import Legend, HoverTool, tools, ColumnDataSource
# visualization library imports
hv_spec = importlib.util.find_spec('holoviews')
if hv_spec is not None:
import holoviews as hv
from holoviews.plotting.links import DataLink
else:
warnings.warn('Some plotting functions will not work without the '
'holoviews package.')
# pvlib imports
pvlib_spec = importlib.util.find_spec('pvlib')
if pvlib_spec is not None:
from pvlib.location import Location
from pvlib.pvsystem import PVSystem
from pvlib.tracking import SingleAxisTracker
from pvlib.pvsystem import retrieve_sam
from pvlib.modelchain import ModelChain
from pvlib.clearsky import detect_clearsky
else:
warnings.warn('Clear sky functions will not work without the '
'pvlib package.')
plot_colors_brewer = {'real_pwr': ['#2b8cbe', '#7bccc4', '#bae4bc', '#f0f9e8'],
'irr-poa': ['#e31a1c', '#fd8d3c', '#fecc5c', '#ffffb2'],
'irr-ghi': ['#91003f', '#e7298a', '#c994c7', '#e7e1ef'],
'temp-amb': ['#238443', '#78c679', '#c2e699', '#ffffcc'],
'temp-mod': ['#88419d', '#8c96c6', '#b3cde3', '#edf8fb'],
'wind': ['#238b45', '#66c2a4', '#b2e2e2', '#edf8fb']}
met_keys = ['poa', 't_amb', 'w_vel', 'power']
# The search strings for types cannot be duplicated across types.
type_defs = collections.OrderedDict([
('irr', [['irradiance', 'irr', 'plane of array', 'poa', 'ghi',
'global', 'glob', 'w/m^2', 'w/m2', 'w/m', 'w/'],
(-10, 1500)]),
('temp', [['temperature', 'temp', 'degrees', 'deg', 'ambient',
'amb', 'cell temperature', 'TArray'],
(-49, 127)]),
('wind', [['wind', 'speed'],
(0, 18)]),
('pf', [['power factor', 'factor', 'pf'],
(-1, 1)]),
('op_state', [['operating state', 'state', 'op', 'status'],
(0, 10)]),
('real_pwr', [['real power', 'ac power', 'e_grid'],
(-1000000, 1000000000000)]), # set to very lax bounds
('shade', [['fshdbm', 'shd', 'shade'], (0, 1)]),
('pvsyt_losses', [['IL Pmax', 'IL Pmin', 'IL Vmax', 'IL Vmin'],
(-1000000000, 100000000)]),
('index', [['index'], ('', 'z')])])
sub_type_defs = collections.OrderedDict([
('ghi', [['sun2', 'global horizontal', 'ghi', 'global',
'GlobHor']]),
('poa', [['sun', 'plane of array', 'poa', 'GlobInc']]),
('amb', [['TempF', 'ambient', 'amb']]),
('mod', [['Temp1', 'module', 'mod', 'TArray']]),
('mtr', [['revenue meter', 'rev meter', 'billing meter', 'meter']]),
('inv', [['inverter', 'inv']])])
irr_sensors_defs = {'ref_cell': [['reference cell', 'reference', 'ref',
'referance', 'pvel']],
'pyran': [['pyranometer', 'pyran']],
'clear_sky':[['csky']]}
columns = ['pts_after_filter', 'pts_removed', 'filter_arguments']
def update_summary(func):
"""
Todo
----
not in place
Check if summary is updated when function is called with inplace=False.
It should not be.
"""
@wraps(func)
def wrapper(self, *args, **kwargs):
pts_before = self.df_flt.shape[0]
if pts_before == 0:
pts_before = self.df.shape[0]
self.summary_ix.append((self.name, 'count'))
self.summary.append({columns[0]: pts_before,
columns[1]: 0,
columns[2]: 'no filters'})
ret_val = func(self, *args, **kwargs)
arg_str = args.__repr__()
lst = arg_str.split(',')
arg_lst = [item.strip("'() ") for item in lst]
# arg_lst_one = arg_lst[0]
# if arg_lst_one == 'das' or arg_lst_one == 'sim':
# arg_lst = arg_lst[1:]
# arg_str = ', '.join(arg_lst)
kwarg_str = kwargs.__repr__()
kwarg_str = kwarg_str.strip('{}')
if len(arg_str) == 0 and len(kwarg_str) == 0:
arg_str = 'no arguments'
elif len(arg_str) == 0:
arg_str = kwarg_str
else:
arg_str = arg_str + ', ' + kwarg_str
pts_after = self.df_flt.shape[0]
pts_removed = pts_before - pts_after
self.summary_ix.append((self.name, func.__name__))
self.summary.append({columns[0]: pts_after,
columns[1]: pts_removed,
columns[2]: arg_str})
if pts_after == 0:
warnings.warn('The last filter removed all data! '
'Calling additional filtering or visualization '
'methods that reference the df_flt attribute will '
'raise an error.')
return ret_val
return wrapper
def cntg_eoy(df, start, end):
"""
Shifts data before or after new year to form a contigous time period.
This function shifts data from the end of the year a year back or data from
the begining of the year a year forward, to create a contiguous time period.
Intended to be used on historical typical year data.
If start date is in dataframe, then data at the beginning of the year will
be moved ahead one year. If end date is in dataframe, then data at the end
of the year will be moved back one year.
cntg (contiguous); eoy (end of year)
Parameters
----------
df: pandas DataFrame
Dataframe to be adjusted.
start: pandas Timestamp
Start date for time period.
end: pandas Timestamp
End date for time period.
Todo
----
Need to test and debug this for years not matching.
"""
if df.index[0].year == start.year:
df_beg = df.loc[start:, :]
df_end = df.copy()
df_end.index = df_end.index + pd.DateOffset(days=365)
df_end = df_end.loc[:end, :]
elif df.index[0].year == end.year:
df_end = df.loc[:end, :]
df_beg = df.copy()
df_beg.index = df_beg.index - pd.DateOffset(days=365)
df_beg = df_beg.loc[start:, :]
df_return = pd.concat([df_beg, df_end], axis=0)
ix_ser = df_return.index.to_series()
df_return['index'] = ix_ser.apply(lambda x: x.strftime('%m/%d/%Y %H %M'))
return df_return
def spans_year(start_date, end_date):
"""
Returns boolean indicating if dates passes are in the same year.
Parameters
----------
start_date: pandas Timestamp
end_date: pandas Timestamp
"""
if start_date.year != end_date.year:
return True
else:
return False
def wrap_seasons(df, freq):
"""
Rearrange an 8760 so a quarterly groupby will result in seasonal groups.
Parameters
----------
df : DataFrame
Dataframe to be rearranged.
freq : str
String pandas offset alias to specify aggregattion frequency
for reporting condition calculation.
Returns
-------
DataFrame
Todo
----
Write unit test
BQ-NOV vs BQS vs QS
Need to review if BQ is the correct offset alias vs BQS or QS.
"""
check_freqs = ['BQ-JAN', 'BQ-FEB', 'BQ-APR', 'BQ-MAY', 'BQ-JUL',
'BQ-AUG', 'BQ-OCT', 'BQ-NOV']
mnth_int = {'JAN': 1, 'FEB': 2, 'APR': 4, 'MAY': 5, 'JUL': 7,
'AUG': 8, 'OCT': 10, 'NOV': 11}
if freq in check_freqs:
warnings.warn('DataFrame index adjusted to be continous through new'
'year, but not returned or set to attribute for user.'
'This is not an issue if using RCs with'
'predict_capacities.')
if isinstance(freq, str):
mnth = mnth_int[freq.split('-')[1]]
else:
mnth = freq.startingMonth
year = df.index[0].year
mnths_eoy = 12 - mnth
mnths_boy = 3 - mnths_eoy
if int(mnth) >= 10:
str_date = str(mnths_boy) + '/' + str(year)
else:
str_date = str(mnth) + '/' + str(year)
tdelta = df.index[1] - df.index[0]
date_to_offset = df.loc[str_date].index[-1].to_pydatetime()
start = date_to_offset + tdelta
end = date_to_offset + pd.DateOffset(years=1)
if mnth < 8 or mnth >= 10:
df = cntg_eoy(df, start, end)
else:
df = cntg_eoy(df, end, start)
return df
else:
return df
def perc_wrap(p):
def numpy_percentile(x):
return np.percentile(x.T, p, interpolation='nearest')
return numpy_percentile
def perc_bounds(perc):
"""
perc_flt : float or tuple, default None
Percentage or tuple of percentages used to filter around reporting
irradiance in the irrRC_balanced function. Required argument when
irr_bal is True.
"""
if isinstance(perc, tuple):
perc_low = perc[0] / 100
perc_high = perc[1] / 100
else:
perc_low = perc / 100
perc_high = perc / 100
low = 1 - (perc_low)
high = 1 + (perc_high)
return (low, high)
def perc_difference(x, y):
"""
Calculate percent difference of two values.
"""
if x == y == 0:
return 0
else:
return abs(x - y) / ((x + y) / 2)
def check_all_perc_diff_comb(series, perc_diff):
"""
Check series for pairs of values with percent difference above perc_diff.
Calculates the percent difference between all combinations of two values in
the passed series and checks if all of them are below the passed perc_diff.
Parameters
----------
series : pd.Series
Pandas series of values to check.
perc_diff : float
Percent difference threshold value as decimal i.e. 5% is 0.05.
Returns
-------
bool
"""
c = combinations(series.__iter__(), 2)
return all([perc_difference(x, y) < perc_diff for x, y in c])
def sensor_filter(df, perc_diff):
"""
Check dataframe for rows with inconsistent values.
Applies check_all_perc_diff_comb function along rows of passed dataframe.
Parameters
----------
df : pandas DataFrame
perc_diff : float
Percent difference as decimal.
"""
if df.shape[1] >= 2:
bool_ser = df.apply(check_all_perc_diff_comb, perc_diff=perc_diff,
axis=1)
return df[bool_ser].index
elif df.shape[1] == 1:
return df.index
def flt_irr(df, irr_col, low, high, ref_val=None):
"""
Top level filter on irradiance values.
Parameters
----------
df : DataFrame
Dataframe to be filtered.
irr_col : str
String that is the name of the column with the irradiance data.
low : float or int
Minimum value as fraction (0.8) or absolute 200 (W/m^2)
high : float or int
Max value as fraction (1.2) or absolute 800 (W/m^2)
ref_val : float or int
Must provide arg when min/max are fractions
Returns
-------
DataFrame
"""
if ref_val is not None:
low *= ref_val
high *= ref_val
df_renamed = df.rename(columns={irr_col: 'poa'})
flt_str = '@low <= ' + 'poa' + ' <= @high'
indx = df_renamed.query(flt_str).index
return df.loc[indx, :]
def filter_grps(grps, rcs, irr_col, low, high, **kwargs):
"""
Apply irradiance filter around passsed reporting irradiances to groupby.
For each group in the grps argument the irradiance is filtered by a
percentage around the reporting irradiance provided in rcs.
Parameters
----------
grps : pandas groupby
Groupby object with time groups (months, seasons, etc.).
rcs : pandas DataFrame
Dataframe of reporting conditions. Use the rep_cond method to generate
a dataframe for this argument.
**kwargs
Passed to pandas Grouper to control label and closed side of intervals.
See pandas Grouper doucmentation for details. Default is left labeled
and left closed.
Returns
-------
pandas groupby
"""
flt_dfs = []
freq = list(grps.groups.keys())[0].freq
for grp_name, grp_df in grps:
ref_val = rcs.loc[grp_name, 'poa']
grp_df_flt = flt_irr(grp_df, irr_col, low, high, ref_val=ref_val)
flt_dfs.append(grp_df_flt)
df_flt = pd.concat(flt_dfs)
df_flt_grpby = df_flt.groupby(pd.Grouper(freq=freq, **kwargs))
return df_flt_grpby
def irrRC_balanced(df, low, high, irr_col='GlobInc', plot=False):
"""
Iteratively calculates reporting irradiance that achieves 40/60 balance.
This function is intended to implement a strict interpratation of common
contract language that specifies the reporting irradiance be determined by
finding the irradiance that results in a balance of points within a
+/- percent range of the reporting irradiance. This function
iterates to a solution for the reporting irradiance by calculating the
irradiance that has 10 datpoints in the filtered dataset above it, then
filtering for a percentage of points around that irradiance, calculating
what percentile the reporting irradiance is in. This procedure continues
until 40% of the points in the filtered dataset are above the calculated
reporting irradiance.
Parameters
----------
df: pandas DataFrame
DataFrame containing irradiance data for calculating the irradiance
reporting condition.
low: float
Bottom value for irradiance filter, usually between 0.5 and 0.8.
high: float
Top value for irradiance filter, usually between 1.2 and 1.5.
irr_col: str
String that is the name of the column with the irradiance data.
plot: bool, default False
Plots graphical view of algorithim searching for reporting irradiance.
Useful for troubleshooting or understanding the method.
Returns
-------
Tuple
Float reporting irradiance and filtered dataframe.
"""
if plot:
irr = df[irr_col].values
x = np.ones(irr.shape[0])
plt.plot(x, irr, 'o', markerfacecolor=(0.5, 0.7, 0.5, 0.1))
plt.ylabel('irr')
x_inc = 1.01
vals_above = 10
perc = 100.
pt_qty = 0
loop_cnt = 0
pt_qty_array = []
# print('--------------- MONTH START --------------')
while perc > 0.6 or pt_qty < 50:
# print('####### LOOP START #######')
df_count = df.shape[0]
df_perc = 1 - (vals_above / df_count)
# print('in percent: {}'.format(df_perc))
irr_RC = (df[irr_col].agg(perc_wrap(df_perc * 100)))
# print('ref irr: {}'.format(irr_RC))
flt_df = flt_irr(df, irr_col, low, high, ref_val=irr_RC)
# print('number of vals: {}'.format(df.shape))
pt_qty = flt_df.shape[0]
# print('flt pt qty: {}'.format(pt_qty))
perc = stats.percentileofscore(flt_df[irr_col], irr_RC) / 100
# print('out percent: {}'.format(perc))
vals_above += 1
pt_qty_array.append(pt_qty)
if perc <= 0.6 and pt_qty <= pt_qty_array[loop_cnt - 1]:
break
loop_cnt += 1
if plot:
x_inc += 0.02
y1 = irr_RC * low
y2 = irr_RC * high
plt.plot(x_inc, irr_RC, 'ro')
plt.plot([x_inc, x_inc], [y1, y2])
if plot:
plt.show()
return(irr_RC, flt_df)
def fit_model(df, fml='power ~ poa + I(poa * poa) + I(poa * t_amb) + I(poa * w_vel) - 1'):
"""
Fits linear regression using statsmodels to dataframe passed.
Dataframe must be first argument for use with pandas groupby object
apply method.
Parameters
----------
df : pandas dataframe
fml : str
Formula to fit refer to statsmodels and patsy documentation for format.
Default is the formula in ASTM E2848.
Returns
-------
Statsmodels linear model regression results wrapper object.
"""
mod = smf.ols(formula=fml, data=df)
reg = mod.fit()
return reg
def predict(regs, rcs):
"""
Calculates predicted values for given linear models and predictor values.
Evaluates the first linear model in the iterable with the first row of the
predictor values in the dataframe. Passed arguments must be aligned.
Parameters
----------
regs : iterable of statsmodels regression results wrappers
rcs : pandas dataframe
Dataframe of predictor values used to evaluate each linear model.
The column names must match the strings used in the regression formuala.
Returns
-------
Pandas series of predicted values.
"""
pred_cap = pd.Series()
for i, mod in enumerate(regs):
RC_df = pd.DataFrame(rcs.iloc[i, :]).T
pred_cap = pred_cap.append(mod.predict(RC_df))
return pred_cap
def pred_summary(grps, rcs, allowance, **kwargs):
"""
Creates summary table of reporting conditions, pred cap, and gauranteed cap.
This method does not calculate reporting conditions.
Parameters
----------
grps : pandas groupby object
Solar data grouped by season or month used to calculate reporting
conditions. This argument is used to fit models for each group.
rcs : pandas dataframe
Dataframe of reporting conditions used to predict capacities.
allowance : float
Percent allowance to calculate gauranteed capacity from predicted capacity.
Returns
-------
Dataframe of reporting conditions, model coefficients, predicted capacities
gauranteed capacities, and points in each grouping.
"""
regs = grps.apply(fit_model, **kwargs)
predictions = predict(regs, rcs)
params = regs.apply(lambda x: x.params.transpose())
pt_qty = grps.agg('count').iloc[:, 0]
predictions.index = pt_qty.index
params.index = pt_qty.index
rcs.index = pt_qty.index
predictions.name = 'PredCap'
for rc_col_name in rcs.columns:
for param_col_name in params.columns:
if rc_col_name == param_col_name:
params.rename(columns={param_col_name: param_col_name + '-param'},
inplace=True)
results = | pd.concat([rcs, predictions, params], axis=1) | pandas.concat |
"""
Name : c8_55_merge_business_GDP.py
Book : Python for Finance (2nd ed.)
Publisher: Packt Publishing Ltd.
Author : <NAME>
Date : 6/6/2017
email : <EMAIL>
<EMAIL>
"""
import datetime
import scipy as sp
import numpy as np
import pandas as pd
import pandas_datareader.data as web
#
cycle= | pd.read_pickle("c:/temp/businessCycle.pkl") | pandas.read_pickle |
import random
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
from sklearn import svm
from sklearn.metrics import accuracy_score
def generate_random_dataset(size):
""" Generate a random dataset and that follows a quadratic distribution
"""
x = []
y = []
target = []
for i in range(size):
# class zero
x.append(np.round(random.uniform(0, 2.5), 1))
y.append(np.round(random.uniform(0, 20), 1))
target.append(0)
# class one
x.append(np.round(random.uniform(1, 5), 2))
y.append(np.round(random.uniform(20, 25), 2))
target.append(1)
x.append(np.round(random.uniform(3, 5), 2))
y.append(np.round(random.uniform(5, 25), 2))
target.append(1)
df_x = pd.DataFrame(data=x)
df_y = pd.DataFrame(data=y)
df_target = pd.DataFrame(data=target)
data_frame = pd.concat([df_x, df_y], ignore_index=True, axis=1)
data_frame = | pd.concat([data_frame, df_target], ignore_index=True, axis=1) | pandas.concat |
import pandas as pd
import numpy as np
import random
import datetime
from tqdm import tqdm
from tr.core.resources import f1_in_tasks, f1_in_checks
from tr.core.utils import dict_to_list, diff_time_list, get_slots, diff_time_list_peak_season
from tr.core.utils import advance_date, days_between_dates, convert_iso_to_timestamp
from tr.core.utils import look_o_dict, load_pickle, save_pickle
from collections import OrderedDict, defaultdict
def excel_to_book(file_input: str):
print("INFO: parsing xlsx to runtime book")
try:
# returns an ordered dict
book = pd.read_excel(file_input, sheet_name=None)
except Exception as e:
print(e)
print('Error parsing the excel file into a dict book buddy!')
print("INFO: xlsx to runtime book completed")
return book
# Function 4: Given a string checks if it is a Month/Year or day.
# Returns time in months if it was a 'Y' or 'M', otherwise 0 is returned.
def preprocessMonths(x):
# that, if x is a string,
if type(x) is str:
if x[-1] == 'M':
return float(x[0:len(x) - 2])
elif x[-1] == 'Y':
return float(x[0:len(x) - 2]) * 12
else:
return 0
else:
return
# Function 5: Given a string checks if it is a day
# Return amount of days if it was a 'D', otherwise 0 is returned
def preprocessDays(x):
# that, if x is a string,
if type(x) is str:
if x[-1] == 'D':
return float(x[0:len(x) - 2])
else:
return 0
else:
return
# Function that changes everything unequal to 0 to 1 necessary for tasks by block columns
def preprocesstask(x):
# that, if x is a string,
if type(x) is str:
return 1
def book_to_kwargs_MPO(book):
print("#########################")
print("INFO: processing from runtime checks book")
""" given an MPO input, compute dict where keys are aircraft ids and the rest
of sheet info is organized by aircraft id """
aircraft_info = get_aircraft_info_MPO(book)
calendar_restrictions = get_restrictions_MPO(book)
# each type of maintenance as several restrictions we will devide in 2
# time and hangar restrictions
m_type_restriction = {}
m_type_restriction = {'time_type': 'day'}
a_time = dict_to_list(calendar_restrictions['A_NOT_ALLOWED']['DATE'])
c_time = diff_time_list(calendar_restrictions['C_NOT_ALLOWED'])
c_peak = diff_time_list_peak_season(calendar_restrictions['C_PEAK'])
all_time = dict_to_list(calendar_restrictions['PUBLIC_HOLIDAYS']['DATE'])
a_resources = {'slots': get_slots(calendar_restrictions['MORE_A_SLOTS'])}
c_resources = {'slots': get_slots(calendar_restrictions['MORE_C_SLOTS'])}
m_type_restriction['a-type'] = {'time': a_time, 'resources': a_resources}
m_type_restriction['c-type'] = {
'time': c_time,
'resources': c_resources,
'c_peak': c_peak,
'c_allowed': c_time
}
m_type_restriction['all'] = {'time': all_time}
end = datetime.datetime(2023, 1, 1, 0, 0)
start_date = pd.to_datetime(book['ADDITIONAL'][2019][1])
end_date = pd.to_datetime(end)
m_type_restriction['start_date'] = start_date
m_type_restriction['end_date'] = end_date
# # all these restrictions will restrict the general calendar
# # for
print("INFO: information from runtime parsed with success")
print("#########################")
return {
'aircraft_info': aircraft_info,
'restrictions': m_type_restriction,
}
def get_restrictions_MPO(book):
print('INFO: gathering restrictions info')
restrictions_info = OrderedDict()
for sheet_name in book.keys():
if 'A/C TAIL' not in book[sheet_name].keys():
# for column_idx in book[sheet_name].keys():
restrictions_info[sheet_name] = book[sheet_name].to_dict()
print('INFO: restrictions info completed')
return restrictions_info
def get_aircraft_info_MPO(book):
print('INFO: gathering aircraft info')
aircraft_info = OrderedDict()
for sheet_name in book.keys():
if 'A/C TAIL' in book[sheet_name].keys():
# create ordered dict to store aircraft info
for _ in range(len(book[sheet_name]['A/C TAIL'])):
a_id = book[sheet_name]['A/C TAIL'][_]
if a_id not in list(aircraft_info.keys()):
aircraft_info[a_id] = OrderedDict()
if sheet_name not in list(aircraft_info[a_id].keys()):
aircraft_info[a_id][sheet_name] = OrderedDict()
# fill the info of other columns, pandas already adds idx to equal
# value columns
for column_idx in book[sheet_name].keys():
if column_idx != 'A/C TAIL':
for _ in range(len(book[sheet_name]['A/C TAIL'])):
a_id = book[sheet_name]['A/C TAIL'][_]
aircraft_info[a_id][sheet_name][column_idx] = book[sheet_name][column_idx][
_]
print('INFO: aircraft info completed')
return aircraft_info
def book_to_kwargs_tasks(book):
print("#########################")
print("INFO: processing from runtime tasks book")
# given an MPO input, compute dict where keys are
# aircraft ids and the rest of sheet info is organized by aircraft id
sheet_name = 'TASK_LIST'
df = book[sheet_name]
# equivalent to Preprocess.py/PreprocessTasks
def process_df(df):
for _ in df.keys():
df[_] = df[_].apply(lambda x: x.strip() if type(x) is str else x)
# df['PER FH'].fillna(0, inplace=True)
# df['PER FC'].fillna(0, inplace=True)
# df['LIMIT FH'].fillna(False, inplace=True)
# df['LIMIT FC'].fillna(False, inplace=True)
# df['LIMIT EXEC DT'].fillna(False, inplace=True)
# df['LAST EXEC FC'].fillna(False, inplace=True)
# df['LAST EXEC FH'].fillna(False, inplace=True)
# df['LAST EXEC DT'].fillna(False, inplace=True)
if not isinstance(df['LIMIT EXEC DT'][0], pd.Timestamp):
df['LIMIT EXEC DT'] = | pd.to_datetime(df['LIMIT EXEC DT'], format='%m/%d/%Y') | pandas.to_datetime |
import numpy as np
import pandas as pd
from numba import njit, typeof
from numba.typed import List
from datetime import datetime, timedelta
import pytest
import vectorbt as vbt
from vectorbt.portfolio.enums import *
from vectorbt.generic.enums import drawdown_dt
from vectorbt import settings
from vectorbt.utils.random import set_seed
from vectorbt.portfolio import nb
from tests.utils import record_arrays_close
seed = 42
day_dt = np.timedelta64(86400000000000)
settings.returns['year_freq'] = '252 days' # same as empyrical
price = pd.Series([1., 2., 3., 4., 5.], index=pd.Index([
datetime(2020, 1, 1),
datetime(2020, 1, 2),
datetime(2020, 1, 3),
datetime(2020, 1, 4),
datetime(2020, 1, 5)
]))
price_wide = price.vbt.tile(3, keys=['a', 'b', 'c'])
big_price = pd.DataFrame(np.random.uniform(size=(1000,)))
big_price.index = [datetime(2018, 1, 1) + timedelta(days=i) for i in range(1000)]
big_price_wide = big_price.vbt.tile(1000)
# ############# nb ############# #
def assert_same_tuple(tup1, tup2):
for i in range(len(tup1)):
assert tup1[i] == tup2[i] or np.isnan(tup1[i]) and np.isnan(tup2[i])
def test_process_order_nb():
# Errors, ignored and rejected orders
log_record = np.empty(1, dtype=log_dt)[0]
log_record[0] = 0
log_record[1] = 0
log_record[2] = 0
log_record[3] = 0
log_record[-1] = 0
cash_now, shares_now, order_result = nb.process_order_nb(
100., 100., 10., 1100.,
nb.create_order_nb(), log_record)
assert cash_now == 100.
assert shares_now == 100.
assert_same_tuple(order_result, OrderResult(
size=np.nan, price=np.nan, fees=np.nan, side=-1, status=1, status_info=0))
cash_now, shares_now, order_result = nb.process_order_nb(
100., 100., 10., 1100.,
nb.create_order_nb(size=10), log_record)
assert cash_now == 100.
assert shares_now == 100.
assert_same_tuple(order_result, OrderResult(
size=np.nan, price=np.nan, fees=np.nan, side=-1, status=1, status_info=1))
with pytest.raises(Exception) as e_info:
_ = nb.process_order_nb(
-100., 100., 10., 1100.,
nb.create_order_nb(size=10, price=10), log_record)
with pytest.raises(Exception) as e_info:
_ = nb.process_order_nb(
np.nan, 100., 10., 1100.,
nb.create_order_nb(size=10, price=10), log_record)
with pytest.raises(Exception) as e_info:
_ = nb.process_order_nb(
100., np.inf, 10., 1100.,
nb.create_order_nb(size=10, price=10), log_record)
with pytest.raises(Exception) as e_info:
_ = nb.process_order_nb(
100., np.nan, 10., 1100.,
nb.create_order_nb(size=10, price=10), log_record)
with pytest.raises(Exception) as e_info:
_ = nb.process_order_nb(
100., 100., 10., 1100.,
nb.create_order_nb(size=10, price=10, size_type=-2), log_record)
with pytest.raises(Exception) as e_info:
_ = nb.process_order_nb(
100., 100., 10., 1100.,
nb.create_order_nb(size=10, price=10, size_type=20), log_record)
with pytest.raises(Exception) as e_info:
_ = nb.process_order_nb(
100., 100., 10., 1100.,
nb.create_order_nb(size=10, price=10, direction=-2), log_record)
with pytest.raises(Exception) as e_info:
_ = nb.process_order_nb(
100., 100., 10., 1100.,
nb.create_order_nb(size=10, price=10, direction=20), log_record)
with pytest.raises(Exception) as e_info:
_ = nb.process_order_nb(
100., -100., 10., 1100.,
nb.create_order_nb(size=10, price=10, direction=Direction.LongOnly), log_record)
with pytest.raises(Exception) as e_info:
_ = nb.process_order_nb(
100., 100., 10., 1100.,
nb.create_order_nb(size=10, price=10, direction=Direction.ShortOnly), log_record)
with pytest.raises(Exception) as e_info:
_ = nb.process_order_nb(
100., 100., 10., 1100.,
nb.create_order_nb(size=10, price=np.inf), log_record)
with pytest.raises(Exception) as e_info:
_ = nb.process_order_nb(
100., 100., 10., 1100.,
nb.create_order_nb(size=10, price=-10), log_record)
with pytest.raises(Exception) as e_info:
_ = nb.process_order_nb(
100., 100., 10., 1100.,
nb.create_order_nb(size=10, price=10, fees=np.inf), log_record)
with pytest.raises(Exception) as e_info:
_ = nb.process_order_nb(
100., 100., 10., 1100.,
nb.create_order_nb(size=10, price=10, fees=-1), log_record)
with pytest.raises(Exception) as e_info:
_ = nb.process_order_nb(
100., 100., 10., 1100.,
nb.create_order_nb(size=10, price=10, fixed_fees=np.inf), log_record)
with pytest.raises(Exception) as e_info:
_ = nb.process_order_nb(
100., 100., 10., 1100.,
nb.create_order_nb(size=10, price=10, fixed_fees=-1), log_record)
with pytest.raises(Exception) as e_info:
_ = nb.process_order_nb(
100., 100., 10., 1100.,
nb.create_order_nb(size=10, price=10, slippage=np.inf), log_record)
with pytest.raises(Exception) as e_info:
_ = nb.process_order_nb(
100., 100., 10., 1100.,
nb.create_order_nb(size=10, price=10, slippage=-1), log_record)
with pytest.raises(Exception) as e_info:
_ = nb.process_order_nb(
100., 100., 10., 1100.,
nb.create_order_nb(size=10, price=10, min_size=np.inf), log_record)
with pytest.raises(Exception) as e_info:
_ = nb.process_order_nb(
100., 100., 10., 1100.,
nb.create_order_nb(size=10, price=10, min_size=-1), log_record)
with pytest.raises(Exception) as e_info:
_ = nb.process_order_nb(
100., 100., 10., 1100.,
nb.create_order_nb(size=10, price=10, max_size=0), log_record)
with pytest.raises(Exception) as e_info:
_ = nb.process_order_nb(
100., 100., 10., 1100.,
nb.create_order_nb(size=10, price=10, max_size=-10), log_record)
with pytest.raises(Exception) as e_info:
_ = nb.process_order_nb(
100., 100., 10., 1100.,
nb.create_order_nb(size=10, price=10, reject_prob=np.nan), log_record)
with pytest.raises(Exception) as e_info:
_ = nb.process_order_nb(
100., 100., 10., 1100.,
nb.create_order_nb(size=10, price=10, reject_prob=-1), log_record)
with pytest.raises(Exception) as e_info:
_ = nb.process_order_nb(
100., 100., 10., 1100.,
nb.create_order_nb(size=10, price=10, reject_prob=2), log_record)
cash_now, shares_now, order_result = nb.process_order_nb(
100., 100., 10., np.nan,
nb.create_order_nb(size=1, price=10, size_type=SizeType.TargetPercent), log_record)
assert cash_now == 100.
assert shares_now == 100.
assert_same_tuple(order_result, OrderResult(
size=np.nan, price=np.nan, fees=np.nan, side=-1, status=1, status_info=3))
cash_now, shares_now, order_result = nb.process_order_nb(
100., 100., 10., -10.,
nb.create_order_nb(size=1, price=10, size_type=SizeType.TargetPercent), log_record)
assert cash_now == 100.
assert shares_now == 100.
assert_same_tuple(order_result, OrderResult(
size=np.nan, price=np.nan, fees=np.nan, side=-1, status=2, status_info=4))
with pytest.raises(Exception) as e_info:
_ = nb.process_order_nb(
100., 100., np.inf, 1100.,
nb.create_order_nb(size=10, price=10, size_type=SizeType.TargetValue), log_record)
with pytest.raises(Exception) as e_info:
_ = nb.process_order_nb(
100., 100., -10., 1100.,
nb.create_order_nb(size=10, price=10, size_type=SizeType.TargetValue), log_record)
cash_now, shares_now, order_result = nb.process_order_nb(
100., 100., np.nan, 1100.,
nb.create_order_nb(size=10, price=10, size_type=SizeType.TargetValue), log_record)
assert cash_now == 100.
assert shares_now == 100.
assert_same_tuple(order_result, OrderResult(
size=np.nan, price=np.nan, fees=np.nan, side=-1, status=1, status_info=2))
cash_now, shares_now, order_result = nb.process_order_nb(
100., -10., 10., 1100.,
nb.create_order_nb(size=np.inf, price=10, direction=Direction.ShortOnly), log_record)
assert cash_now == 100.
assert shares_now == -10.
assert_same_tuple(order_result, OrderResult(
size=np.nan, price=np.nan, fees=np.nan, side=-1, status=2, status_info=6))
cash_now, shares_now, order_result = nb.process_order_nb(
100., -10., 10., 1100.,
nb.create_order_nb(size=-np.inf, price=10, direction=Direction.All), log_record)
assert cash_now == 100.
assert shares_now == -10.
assert_same_tuple(order_result, OrderResult(
size=np.nan, price=np.nan, fees=np.nan, side=-1, status=2, status_info=6))
cash_now, shares_now, order_result = nb.process_order_nb(
100., 10., 10., 1100.,
nb.create_order_nb(size=0, price=10), log_record)
assert cash_now == 100.
assert shares_now == 10.
assert_same_tuple(order_result, OrderResult(
size=np.nan, price=np.nan, fees=np.nan, side=-1, status=1, status_info=5))
with pytest.raises(Exception) as e_info:
_ = nb.process_order_nb(
100., 100., 10., 1100.,
nb.create_order_nb(size=15, price=10, max_size=10, allow_partial=False, raise_reject=True), log_record)
cash_now, shares_now, order_result = nb.process_order_nb(
100., 100., 10., 1100.,
nb.create_order_nb(size=15, price=10, max_size=10, allow_partial=False), log_record)
assert cash_now == 100.
assert shares_now == 100.
assert_same_tuple(order_result, OrderResult(
size=np.nan, price=np.nan, fees=np.nan, side=-1, status=2, status_info=9))
with pytest.raises(Exception) as e_info:
_ = nb.process_order_nb(
100., 100., 10., 1100.,
nb.create_order_nb(size=10, price=10, reject_prob=1., raise_reject=True), log_record)
cash_now, shares_now, order_result = nb.process_order_nb(
100., 100., 10., 1100.,
nb.create_order_nb(size=10, price=10, reject_prob=1.), log_record)
assert cash_now == 100.
assert shares_now == 100.
assert_same_tuple(order_result, OrderResult(
size=np.nan, price=np.nan, fees=np.nan, side=-1, status=2, status_info=10))
cash_now, shares_now, order_result = nb.process_order_nb(
0., 100., 10., 1100.,
nb.create_order_nb(size=10, price=10, direction=Direction.LongOnly), log_record)
assert cash_now == 0.
assert shares_now == 100.
assert_same_tuple(order_result, OrderResult(
size=np.nan, price=np.nan, fees=np.nan, side=-1, status=2, status_info=7))
cash_now, shares_now, order_result = nb.process_order_nb(
0., 100., 10., 1100.,
nb.create_order_nb(size=10, price=10, direction=Direction.All), log_record)
assert cash_now == 0.
assert shares_now == 100.
assert_same_tuple(order_result, OrderResult(
size=np.nan, price=np.nan, fees=np.nan, side=-1, status=2, status_info=7))
with pytest.raises(Exception) as e_info:
_ = nb.process_order_nb(
np.inf, 100., 10., 1100.,
nb.create_order_nb(size=np.inf, price=10, direction=Direction.LongOnly), log_record)
with pytest.raises(Exception) as e_info:
_ = nb.process_order_nb(
np.inf, 100., 10., 1100.,
nb.create_order_nb(size=np.inf, price=10, direction=Direction.All), log_record)
cash_now, shares_now, order_result = nb.process_order_nb(
100., 0., 10., 1100.,
nb.create_order_nb(size=-10, price=10, direction=Direction.ShortOnly), log_record)
assert cash_now == 100.
assert shares_now == 0.
assert_same_tuple(order_result, OrderResult(
size=np.nan, price=np.nan, fees=np.nan, side=-1, status=2, status_info=8))
with pytest.raises(Exception) as e_info:
_ = nb.process_order_nb(
np.inf, 100., 10., 1100.,
nb.create_order_nb(size=np.inf, price=10, direction=Direction.ShortOnly), log_record)
with pytest.raises(Exception) as e_info:
_ = nb.process_order_nb(
np.inf, 100., 10., 1100.,
nb.create_order_nb(size=-np.inf, price=10, direction=Direction.All), log_record)
cash_now, shares_now, order_result = nb.process_order_nb(
100., 0., 10., 1100.,
nb.create_order_nb(size=-10, price=10, direction=Direction.LongOnly), log_record)
assert cash_now == 100.
assert shares_now == 0.
assert_same_tuple(order_result, OrderResult(
size=np.nan, price=np.nan, fees=np.nan, side=-1, status=2, status_info=8))
with pytest.raises(Exception) as e_info:
_ = nb.process_order_nb(
100., 100., 10., 1100.,
nb.create_order_nb(size=10, price=10, fixed_fees=100, raise_reject=True), log_record)
cash_now, shares_now, order_result = nb.process_order_nb(
100., 100., 10., 1100.,
nb.create_order_nb(size=10, price=10, fixed_fees=100), log_record)
assert cash_now == 100.
assert shares_now == 100.
assert_same_tuple(order_result, OrderResult(
size=np.nan, price=np.nan, fees=np.nan, side=-1, status=2, status_info=11))
with pytest.raises(Exception) as e_info:
_ = nb.process_order_nb(
100., 100., 10., 1100.,
nb.create_order_nb(size=10, price=10, min_size=100, raise_reject=True), log_record)
cash_now, shares_now, order_result = nb.process_order_nb(
100., 100., 10., 1100.,
nb.create_order_nb(size=10, price=10, min_size=100), log_record)
assert cash_now == 100.
assert shares_now == 100.
assert_same_tuple(order_result, OrderResult(
size=np.nan, price=np.nan, fees=np.nan, side=-1, status=2, status_info=12))
with pytest.raises(Exception) as e_info:
_ = nb.process_order_nb(
100., 100., 10., 1100.,
nb.create_order_nb(size=100, price=10, allow_partial=False, raise_reject=True), log_record)
cash_now, shares_now, order_result = nb.process_order_nb(
100., 100., 10., 1100.,
nb.create_order_nb(size=100, price=10, allow_partial=False), log_record)
assert cash_now == 100.
assert shares_now == 100.
assert_same_tuple(order_result, OrderResult(
size=np.nan, price=np.nan, fees=np.nan, side=-1, status=2, status_info=13))
with pytest.raises(Exception) as e_info:
_ = nb.process_order_nb(
100., 100., 10., 1100.,
nb.create_order_nb(size=-10, price=10, min_size=100, raise_reject=True), log_record)
cash_now, shares_now, order_result = nb.process_order_nb(
100., 100., 10., 1100.,
nb.create_order_nb(size=-10, price=10, min_size=100), log_record)
assert cash_now == 100.
assert shares_now == 100.
assert_same_tuple(order_result, OrderResult(
size=np.nan, price=np.nan, fees=np.nan, side=-1, status=2, status_info=12))
with pytest.raises(Exception) as e_info:
_ = nb.process_order_nb(
100., 100., 10., 1100.,
nb.create_order_nb(size=-200, price=10, direction=Direction.LongOnly, allow_partial=False,
raise_reject=True),
log_record)
cash_now, shares_now, order_result = nb.process_order_nb(
100., 100., 10., 1100.,
nb.create_order_nb(size=-200, price=10, direction=Direction.LongOnly, allow_partial=False), log_record)
assert cash_now == 100.
assert shares_now == 100.
assert_same_tuple(order_result, OrderResult(
size=np.nan, price=np.nan, fees=np.nan, side=-1, status=2, status_info=13))
with pytest.raises(Exception) as e_info:
_ = nb.process_order_nb(
100., 100., 10., 1100.,
nb.create_order_nb(size=-10, price=10, fixed_fees=1000, raise_reject=True), log_record)
cash_now, shares_now, order_result = nb.process_order_nb(
100., 100., 10., 1100.,
nb.create_order_nb(size=-10, price=10, fixed_fees=1000), log_record)
assert cash_now == 100.
assert shares_now == 100.
assert_same_tuple(order_result, OrderResult(
size=np.nan, price=np.nan, fees=np.nan, side=-1, status=2, status_info=11))
# Calculations
cash_now, shares_now, order_result = nb.process_order_nb(
100., 0., 10., 100.,
nb.create_order_nb(size=10, price=10, fees=0.1, fixed_fees=1, slippage=0.1), log_record)
assert cash_now == 0.
assert shares_now == 8.18181818181818
assert_same_tuple(order_result, OrderResult(
size=8.18181818181818, price=11.0, fees=10.000000000000014, side=0, status=0, status_info=-1))
cash_now, shares_now, order_result = nb.process_order_nb(
100., 0., 10., 100.,
nb.create_order_nb(size=100, price=10, fees=0.1, fixed_fees=1, slippage=0.1), log_record)
assert cash_now == 0.
assert shares_now == 8.18181818181818
assert_same_tuple(order_result, OrderResult(
size=8.18181818181818, price=11.0, fees=10.000000000000014, side=0, status=0, status_info=-1))
cash_now, shares_now, order_result = nb.process_order_nb(
100., 0., 10., 100.,
nb.create_order_nb(size=-10, price=10, fees=0.1, fixed_fees=1, slippage=0.1), log_record)
assert cash_now == 180.
assert shares_now == -10.
assert_same_tuple(order_result, OrderResult(
size=10.0, price=9.0, fees=10.0, side=1, status=0, status_info=-1))
cash_now, shares_now, order_result = nb.process_order_nb(
100., 0., 10., 100.,
nb.create_order_nb(size=-100, price=10, fees=0.1, fixed_fees=1, slippage=0.1), log_record)
assert cash_now == 909.
assert shares_now == -100.
assert_same_tuple(order_result, OrderResult(
size=100.0, price=9.0, fees=91.0, side=1, status=0, status_info=-1))
cash_now, shares_now, order_result = nb.process_order_nb(
100., 0., 10., 100.,
nb.create_order_nb(size=10, price=10, size_type=SizeType.TargetShares), log_record)
assert cash_now == 0.
assert shares_now == 10.
assert_same_tuple(order_result, OrderResult(
size=10., price=10.0, fees=0., side=0, status=0, status_info=-1))
cash_now, shares_now, order_result = nb.process_order_nb(
100., 0., 10., 100.,
nb.create_order_nb(size=-10, price=10, size_type=SizeType.TargetShares), log_record)
assert cash_now == 200.
assert shares_now == -10.
assert_same_tuple(order_result, OrderResult(
size=10., price=10.0, fees=0., side=1, status=0, status_info=-1))
cash_now, shares_now, order_result = nb.process_order_nb(
100., 0., 10., 100.,
nb.create_order_nb(size=100, price=10, size_type=SizeType.TargetValue), log_record)
assert cash_now == 0.
assert shares_now == 10.
assert_same_tuple(order_result, OrderResult(
size=10., price=10.0, fees=0., side=0, status=0, status_info=-1))
cash_now, shares_now, order_result = nb.process_order_nb(
100., 0., 10., 100.,
nb.create_order_nb(size=-100, price=10, size_type=SizeType.TargetValue), log_record)
assert cash_now == 200.
assert shares_now == -10.
assert_same_tuple(order_result, OrderResult(
size=10., price=10.0, fees=0., side=1, status=0, status_info=-1))
cash_now, shares_now, order_result = nb.process_order_nb(
100., 0., 10., 100.,
nb.create_order_nb(size=1, price=10, size_type=SizeType.TargetPercent), log_record)
assert cash_now == 0.
assert shares_now == 10.
assert_same_tuple(order_result, OrderResult(
size=10., price=10.0, fees=0., side=0, status=0, status_info=-1))
cash_now, shares_now, order_result = nb.process_order_nb(
100., 0., 10., 100.,
nb.create_order_nb(size=-1, price=10, size_type=SizeType.TargetPercent), log_record)
assert cash_now == 200.
assert shares_now == -10.
assert_same_tuple(order_result, OrderResult(
size=10., price=10.0, fees=0., side=1, status=0, status_info=-1))
cash_now, shares_now, order_result = nb.process_order_nb(
100., 0., 10., 100.,
nb.create_order_nb(size=1, price=10, size_type=SizeType.Percent), log_record)
assert cash_now == 0.
assert shares_now == 10.
assert_same_tuple(order_result, OrderResult(
size=10., price=10.0, fees=0., side=0, status=0, status_info=-1))
cash_now, shares_now, order_result = nb.process_order_nb(
100., 0., 10., 100.,
nb.create_order_nb(size=0.5, price=10, size_type=SizeType.Percent, fixed_fees=1.), log_record)
assert cash_now == 50.
assert shares_now == 4.9
assert_same_tuple(order_result, OrderResult(
size=4.9, price=10.0, fees=1., side=0, status=0, status_info=-1))
cash_now, shares_now, order_result = nb.process_order_nb(
0., 10., 10., 100.,
nb.create_order_nb(size=-1, price=10, size_type=SizeType.Percent), log_record)
assert cash_now == 100.
assert shares_now == 0.
assert_same_tuple(order_result, OrderResult(
size=10., price=10.0, fees=0., side=1, status=0, status_info=-1))
cash_now, shares_now, order_result = nb.process_order_nb(
0., 10., 10., 100.,
nb.create_order_nb(size=-0.5, price=10, size_type=SizeType.Percent, fixed_fees=1.), log_record)
assert cash_now == 49.
assert shares_now == 5.
assert_same_tuple(order_result, OrderResult(
size=5., price=10.0, fees=1., side=1, status=0, status_info=-1))
cash_now, shares_now, order_result = nb.process_order_nb(
100., -10., 10., 100.,
nb.create_order_nb(size=1., price=10, size_type=SizeType.Percent), log_record)
assert cash_now == 0.
assert shares_now == 0.
assert_same_tuple(order_result, OrderResult(
size=10., price=10.0, fees=0., side=0, status=0, status_info=-1))
cash_now, shares_now, order_result = nb.process_order_nb(
0., -10., 10., 100.,
nb.create_order_nb(size=-1., price=10, size_type=SizeType.Percent), log_record)
assert cash_now == 100.
assert shares_now == -20.
assert_same_tuple(order_result, OrderResult(
size=10., price=10.0, fees=0., side=1, status=0, status_info=-1))
cash_now, shares_now, order_result = nb.process_order_nb(
100., 0., 10., 100.,
nb.create_order_nb(size=np.inf, price=10), log_record)
assert cash_now == 0.
assert shares_now == 10.
assert_same_tuple(order_result, OrderResult(
size=10., price=10.0, fees=0., side=0, status=0, status_info=-1))
cash_now, shares_now, order_result = nb.process_order_nb(
100., 0., 10., 100.,
nb.create_order_nb(size=-np.inf, price=10), log_record)
assert cash_now == 200.
assert shares_now == -10.
assert_same_tuple(order_result, OrderResult(
size=10., price=10.0, fees=0., side=1, status=0, status_info=-1))
cash_now, shares_now, order_result = nb.process_order_nb(
150., -5., 10., 100.,
nb.create_order_nb(size=-np.inf, price=10), log_record)
assert cash_now == 200.
assert shares_now == -10.
assert_same_tuple(order_result, OrderResult(
size=5., price=10.0, fees=0., side=1, status=0, status_info=-1))
# Logging
_ = nb.process_order_nb(
100., 0., 10., 100.,
nb.create_order_nb(log=True), log_record)
assert_same_tuple(log_record, (
0, 0, 0, 0, 100., 0., 10., 100., np.nan, 0, 2, np.nan, 0., 0., 0., 0., np.inf, 0.,
True, False, True, 100., 0., np.nan, np.nan, np.nan, -1, 1, 0, 0
))
_ = nb.process_order_nb(
100., 0., 10., 100.,
nb.create_order_nb(size=np.inf, price=10, log=True), log_record)
assert_same_tuple(log_record, (
0, 0, 0, 0, 100., 0., 10., 100., np.inf, 0, 2, 10., 0., 0., 0., 0., np.inf, 0.,
True, False, True, 0., 10., 10., 10., 0., 0, 0, -1, 0
))
_ = nb.process_order_nb(
100., 0., 10., 100.,
nb.create_order_nb(size=-np.inf, price=10, log=True), log_record)
assert_same_tuple(log_record, (
0, 0, 0, 0, 100., 0., 10., 100., -np.inf, 0, 2, 10., 0., 0., 0., 0., np.inf, 0.,
True, False, True, 200., -10., 10., 10., 0., 1, 0, -1, 0
))
def test_build_call_seq_nb():
group_lens = np.array([1, 2, 3, 4])
np.testing.assert_array_equal(
nb.build_call_seq_nb((10, 10), group_lens, CallSeqType.Default),
nb.build_call_seq((10, 10), group_lens, CallSeqType.Default)
)
np.testing.assert_array_equal(
nb.build_call_seq_nb((10, 10), group_lens, CallSeqType.Reversed),
nb.build_call_seq((10, 10), group_lens, CallSeqType.Reversed)
)
set_seed(seed)
out1 = nb.build_call_seq_nb((10, 10), group_lens, CallSeqType.Random)
set_seed(seed)
out2 = nb.build_call_seq((10, 10), group_lens, CallSeqType.Random)
np.testing.assert_array_equal(out1, out2)
# ############# from_signals ############# #
entries = pd.Series([True, True, True, False, False], index=price.index)
entries_wide = entries.vbt.tile(3, keys=['a', 'b', 'c'])
exits = pd.Series([False, False, True, True, True], index=price.index)
exits_wide = exits.vbt.tile(3, keys=['a', 'b', 'c'])
def from_signals_all(price=price, entries=entries, exits=exits, **kwargs):
return vbt.Portfolio.from_signals(price, entries, exits, direction='all', **kwargs)
def from_signals_longonly(price=price, entries=entries, exits=exits, **kwargs):
return vbt.Portfolio.from_signals(price, entries, exits, direction='longonly', **kwargs)
def from_signals_shortonly(price=price, entries=entries, exits=exits, **kwargs):
return vbt.Portfolio.from_signals(price, entries, exits, direction='shortonly', **kwargs)
class TestFromSignals:
def test_one_column(self):
record_arrays_close(
from_signals_all().order_records,
np.array([
(0, 0, 0, 100., 1., 0., 0), (1, 3, 0, 200., 4., 0., 1)
], dtype=order_dt)
)
record_arrays_close(
from_signals_longonly().order_records,
np.array([
(0, 0, 0, 100., 1., 0., 0), (1, 3, 0, 100., 4., 0., 1)
], dtype=order_dt)
)
record_arrays_close(
from_signals_shortonly().order_records,
np.array([
(0, 0, 0, 100., 1., 0., 1), (1, 3, 0, 50., 4., 0., 0)
], dtype=order_dt)
)
portfolio = from_signals_all()
pd.testing.assert_index_equal(
portfolio.wrapper.index,
pd.DatetimeIndex(['2020-01-01', '2020-01-02', '2020-01-03', '2020-01-04', '2020-01-05'])
)
pd.testing.assert_index_equal(
portfolio.wrapper.columns,
pd.Int64Index([0], dtype='int64')
)
assert portfolio.wrapper.ndim == 1
assert portfolio.wrapper.freq == day_dt
assert portfolio.wrapper.grouper.group_by is None
def test_multiple_columns(self):
record_arrays_close(
from_signals_all(price=price_wide).order_records,
np.array([
(0, 0, 0, 100., 1., 0., 0), (1, 3, 0, 200., 4., 0., 1),
(2, 0, 1, 100., 1., 0., 0), (3, 3, 1, 200., 4., 0., 1),
(4, 0, 2, 100., 1., 0., 0), (5, 3, 2, 200., 4., 0., 1)
], dtype=order_dt)
)
record_arrays_close(
from_signals_longonly(price=price_wide).order_records,
np.array([
(0, 0, 0, 100., 1., 0., 0), (1, 3, 0, 100., 4., 0., 1),
(2, 0, 1, 100., 1., 0., 0), (3, 3, 1, 100., 4., 0., 1),
(4, 0, 2, 100., 1., 0., 0), (5, 3, 2, 100., 4., 0., 1)
], dtype=order_dt)
)
record_arrays_close(
from_signals_shortonly(price=price_wide).order_records,
np.array([
(0, 0, 0, 100., 1., 0., 1), (1, 3, 0, 50., 4., 0., 0),
(2, 0, 1, 100., 1., 0., 1), (3, 3, 1, 50., 4., 0., 0),
(4, 0, 2, 100., 1., 0., 1), (5, 3, 2, 50., 4., 0., 0)
], dtype=order_dt)
)
portfolio = from_signals_all(price=price_wide)
pd.testing.assert_index_equal(
portfolio.wrapper.index,
pd.DatetimeIndex(['2020-01-01', '2020-01-02', '2020-01-03', '2020-01-04', '2020-01-05'])
)
pd.testing.assert_index_equal(
portfolio.wrapper.columns,
pd.Index(['a', 'b', 'c'], dtype='object')
)
assert portfolio.wrapper.ndim == 2
assert portfolio.wrapper.freq == day_dt
assert portfolio.wrapper.grouper.group_by is None
def test_size(self):
record_arrays_close(
from_signals_all(size=[[-1, 0, 1, np.inf]]).order_records,
np.array([
(0, 0, 0, 1.0, 1.0, 0.0, 0), (1, 3, 0, 2.0, 4.0, 0.0, 1), (2, 0, 2, 1.0, 1.0, 0.0, 0),
(3, 3, 2, 2.0, 4.0, 0.0, 1), (4, 0, 3, 100.0, 1.0, 0.0, 0), (5, 3, 3, 200.0, 4.0, 0.0, 1)
], dtype=order_dt)
)
record_arrays_close(
from_signals_longonly(size=[[-1, 0, 1, np.inf]]).order_records,
np.array([
(0, 0, 0, 1.0, 1.0, 0.0, 0), (1, 3, 0, 1.0, 4.0, 0.0, 1), (2, 0, 2, 1.0, 1.0, 0.0, 0),
(3, 3, 2, 1.0, 4.0, 0.0, 1), (4, 0, 3, 100.0, 1.0, 0.0, 0), (5, 3, 3, 100.0, 4.0, 0.0, 1)
], dtype=order_dt)
)
record_arrays_close(
from_signals_shortonly(size=[[-1, 0, 1, np.inf]]).order_records,
np.array([
(0, 0, 0, 1.0, 1.0, 0.0, 1), (1, 3, 0, 1.0, 4.0, 0.0, 0), (2, 0, 2, 1.0, 1.0, 0.0, 1),
(3, 3, 2, 1.0, 4.0, 0.0, 0), (4, 0, 3, 100.0, 1.0, 0.0, 1), (5, 3, 3, 50.0, 4.0, 0.0, 0)
], dtype=order_dt)
)
def test_percent(self):
with pytest.raises(Exception) as e_info:
_ = from_signals_all(size=0.5, size_type='percent')
record_arrays_close(
from_signals_all(size=0.5, size_type='percent', close_first=True).order_records,
np.array([
(0, 0, 0, 50., 1., 0., 0), (1, 3, 0, 50., 4., 0., 1)
], dtype=order_dt)
)
record_arrays_close(
from_signals_all(size=0.5, size_type='percent', close_first=True, accumulate=True).order_records,
np.array([
(0, 0, 0, 50., 1., 0., 0), (1, 1, 0, 12.5, 2., 0., 0),
(2, 3, 0, 31.25, 4., 0., 1), (3, 4, 0, 15.625, 5., 0., 1)
], dtype=order_dt)
)
record_arrays_close(
from_signals_longonly(size=0.5, size_type='percent').order_records,
np.array([
(0, 0, 0, 50., 1., 0., 0), (1, 3, 0, 50., 4., 0., 1)
], dtype=order_dt)
)
record_arrays_close(
from_signals_shortonly(size=0.5, size_type='percent').order_records,
np.array([], dtype=order_dt)
)
record_arrays_close(
from_signals_longonly(
price=price_wide, size=0.5, size_type='percent',
group_by=np.array([0, 0, 0]), cash_sharing=True).order_records,
np.array([
(0, 0, 0, 50., 1., 0., 0), (1, 0, 1, 25., 1., 0., 0),
(2, 0, 2, 12.5, 1., 0., 0), (3, 3, 0, 50., 4., 0., 1),
(4, 3, 1, 25., 4., 0., 1), (5, 3, 2, 12.5, 4., 0., 1)
], dtype=order_dt)
)
def test_price(self):
record_arrays_close(
from_signals_all(price=price * 1.01).order_records,
np.array([
(0, 0, 0, 99.00990099009901, 1.01, 0.0, 0), (1, 3, 0, 198.01980198019803, 4.04, 0.0, 1)
], dtype=order_dt)
)
record_arrays_close(
from_signals_longonly(price=price * 1.01).order_records,
np.array([
(0, 0, 0, 99.00990099, 1.01, 0., 0), (1, 3, 0, 99.00990099, 4.04, 0., 1)
], dtype=order_dt)
)
record_arrays_close(
from_signals_shortonly(price=price * 1.01).order_records,
np.array([
(0, 0, 0, 99.00990099009901, 1.01, 0.0, 1), (1, 3, 0, 49.504950495049506, 4.04, 0.0, 0)
], dtype=order_dt)
)
def test_fees(self):
record_arrays_close(
from_signals_all(size=1, fees=[[0., 0.1, 1.]]).order_records,
np.array([
(0, 0, 0, 1.0, 1.0, 0.0, 0), (1, 3, 0, 2.0, 4.0, 0.0, 1), (2, 0, 1, 1.0, 1.0, 0.1, 0),
(3, 3, 1, 2.0, 4.0, 0.8, 1), (4, 0, 2, 1.0, 1.0, 1.0, 0), (5, 3, 2, 2.0, 4.0, 8.0, 1)
], dtype=order_dt)
)
record_arrays_close(
from_signals_longonly(size=1, fees=[[0., 0.1, 1.]]).order_records,
np.array([
(0, 0, 0, 1.0, 1.0, 0.0, 0), (1, 3, 0, 1.0, 4.0, 0.0, 1), (2, 0, 1, 1.0, 1.0, 0.1, 0),
(3, 3, 1, 1.0, 4.0, 0.4, 1), (4, 0, 2, 1.0, 1.0, 1.0, 0), (5, 3, 2, 1.0, 4.0, 4.0, 1)
], dtype=order_dt)
)
record_arrays_close(
from_signals_shortonly(size=1, fees=[[0., 0.1, 1.]]).order_records,
np.array([
(0, 0, 0, 1.0, 1.0, 0.0, 1), (1, 3, 0, 1.0, 4.0, 0.0, 0), (2, 0, 1, 1.0, 1.0, 0.1, 1),
(3, 3, 1, 1.0, 4.0, 0.4, 0), (4, 0, 2, 1.0, 1.0, 1.0, 1), (5, 3, 2, 1.0, 4.0, 4.0, 0)
], dtype=order_dt)
)
def test_fixed_fees(self):
record_arrays_close(
from_signals_all(size=1, fixed_fees=[[0., 0.1, 1.]]).order_records,
np.array([
(0, 0, 0, 1.0, 1.0, 0.0, 0), (1, 3, 0, 2.0, 4.0, 0.0, 1), (2, 0, 1, 1.0, 1.0, 0.1, 0),
(3, 3, 1, 2.0, 4.0, 0.1, 1), (4, 0, 2, 1.0, 1.0, 1.0, 0), (5, 3, 2, 2.0, 4.0, 1.0, 1)
], dtype=order_dt)
)
record_arrays_close(
from_signals_longonly(size=1, fixed_fees=[[0., 0.1, 1.]]).order_records,
np.array([
(0, 0, 0, 1.0, 1.0, 0.0, 0), (1, 3, 0, 1.0, 4.0, 0.0, 1), (2, 0, 1, 1.0, 1.0, 0.1, 0),
(3, 3, 1, 1.0, 4.0, 0.1, 1), (4, 0, 2, 1.0, 1.0, 1.0, 0), (5, 3, 2, 1.0, 4.0, 1.0, 1)
], dtype=order_dt)
)
record_arrays_close(
from_signals_shortonly(size=1, fixed_fees=[[0., 0.1, 1.]]).order_records,
np.array([
(0, 0, 0, 1.0, 1.0, 0.0, 1), (1, 3, 0, 1.0, 4.0, 0.0, 0), (2, 0, 1, 1.0, 1.0, 0.1, 1),
(3, 3, 1, 1.0, 4.0, 0.1, 0), (4, 0, 2, 1.0, 1.0, 1.0, 1), (5, 3, 2, 1.0, 4.0, 1.0, 0)
], dtype=order_dt)
)
def test_slippage(self):
record_arrays_close(
from_signals_all(size=1, slippage=[[0., 0.1, 1.]]).order_records,
np.array([
(0, 0, 0, 1.0, 1.0, 0.0, 0), (1, 3, 0, 2.0, 4.0, 0.0, 1), (2, 0, 1, 1.0, 1.1, 0.0, 0),
(3, 3, 1, 2.0, 3.6, 0.0, 1), (4, 0, 2, 1.0, 2.0, 0.0, 0), (5, 3, 2, 2.0, 0.0, 0.0, 1)
], dtype=order_dt)
)
record_arrays_close(
from_signals_longonly(size=1, slippage=[[0., 0.1, 1.]]).order_records,
np.array([
(0, 0, 0, 1.0, 1.0, 0.0, 0), (1, 3, 0, 1.0, 4.0, 0.0, 1), (2, 0, 1, 1.0, 1.1, 0.0, 0),
(3, 3, 1, 1.0, 3.6, 0.0, 1), (4, 0, 2, 1.0, 2.0, 0.0, 0), (5, 3, 2, 1.0, 0.0, 0.0, 1)
], dtype=order_dt)
)
record_arrays_close(
from_signals_shortonly(size=1, slippage=[[0., 0.1, 1.]]).order_records,
np.array([
(0, 0, 0, 1.0, 1.0, 0.0, 1), (1, 3, 0, 1.0, 4.0, 0.0, 0), (2, 0, 1, 1.0, 0.9, 0.0, 1),
(3, 3, 1, 1.0, 4.4, 0.0, 0), (4, 0, 2, 1.0, 0.0, 0.0, 1), (5, 3, 2, 1.0, 8.0, 0.0, 0)
], dtype=order_dt)
)
def test_min_size(self):
record_arrays_close(
from_signals_all(size=1, min_size=[[0., 1., 2.]]).order_records,
np.array([
(0, 0, 0, 1.0, 1.0, 0.0, 0), (1, 3, 0, 2.0, 4.0, 0.0, 1), (2, 0, 1, 1.0, 1.0, 0.0, 0),
(3, 3, 1, 2.0, 4.0, 0.0, 1)
], dtype=order_dt)
)
record_arrays_close(
from_signals_longonly(size=1, min_size=[[0., 1., 2.]]).order_records,
np.array([
(0, 0, 0, 1.0, 1.0, 0.0, 0), (1, 3, 0, 1.0, 4.0, 0.0, 1), (2, 0, 1, 1.0, 1.0, 0.0, 0),
(3, 3, 1, 1.0, 4.0, 0.0, 1)
], dtype=order_dt)
)
record_arrays_close(
from_signals_shortonly(size=1, min_size=[[0., 1., 2.]]).order_records,
np.array([
(0, 0, 0, 1.0, 1.0, 0.0, 1), (1, 3, 0, 1.0, 4.0, 0.0, 0), (2, 0, 1, 1.0, 1.0, 0.0, 1),
(3, 3, 1, 1.0, 4.0, 0.0, 0)
], dtype=order_dt)
)
def test_max_size(self):
record_arrays_close(
from_signals_all(size=1, max_size=[[0.5, 1., np.inf]]).order_records,
np.array([
(0, 0, 0, 0.5, 1.0, 0.0, 0), (1, 3, 0, 0.5, 4.0, 0.0, 1), (2, 4, 0, 0.5, 5.0, 0.0, 1),
(3, 0, 1, 1.0, 1.0, 0.0, 0), (4, 3, 1, 1.0, 4.0, 0.0, 1), (5, 4, 1, 1.0, 5.0, 0.0, 1),
(6, 0, 2, 1.0, 1.0, 0.0, 0), (7, 3, 2, 2.0, 4.0, 0.0, 1)
], dtype=order_dt)
)
record_arrays_close(
from_signals_longonly(size=1, max_size=[[0.5, 1., np.inf]]).order_records,
np.array([
(0, 0, 0, 0.5, 1.0, 0.0, 0), (1, 3, 0, 0.5, 4.0, 0.0, 1), (2, 0, 1, 1.0, 1.0, 0.0, 0),
(3, 3, 1, 1.0, 4.0, 0.0, 1), (4, 0, 2, 1.0, 1.0, 0.0, 0), (5, 3, 2, 1.0, 4.0, 0.0, 1)
], dtype=order_dt)
)
record_arrays_close(
from_signals_shortonly(size=1, max_size=[[0.5, 1., np.inf]]).order_records,
np.array([
(0, 0, 0, 0.5, 1.0, 0.0, 1), (1, 3, 0, 0.5, 4.0, 0.0, 0), (2, 0, 1, 1.0, 1.0, 0.0, 1),
(3, 3, 1, 1.0, 4.0, 0.0, 0), (4, 0, 2, 1.0, 1.0, 0.0, 1), (5, 3, 2, 1.0, 4.0, 0.0, 0)
], dtype=order_dt)
)
def test_reject_prob(self):
record_arrays_close(
from_signals_all(size=1., reject_prob=[[0., 0.5, 1.]], seed=42).order_records,
np.array([
(0, 0, 0, 1.0, 1.0, 0.0, 0), (1, 3, 0, 2.0, 4.0, 0.0, 1), (2, 1, 1, 1.0, 2.0, 0.0, 0),
(3, 3, 1, 2.0, 4.0, 0.0, 1)
], dtype=order_dt)
)
record_arrays_close(
from_signals_longonly(size=1., reject_prob=[[0., 0.5, 1.]], seed=42).order_records,
np.array([
(0, 0, 0, 1.0, 1.0, 0.0, 0), (1, 3, 0, 1.0, 4.0, 0.0, 1), (2, 1, 1, 1.0, 2.0, 0.0, 0),
(3, 3, 1, 1.0, 4.0, 0.0, 1)
], dtype=order_dt)
)
record_arrays_close(
from_signals_shortonly(size=1., reject_prob=[[0., 0.5, 1.]], seed=42).order_records,
np.array([
(0, 0, 0, 1.0, 1.0, 0.0, 1), (1, 3, 0, 1.0, 4.0, 0.0, 0), (2, 1, 1, 1.0, 2.0, 0.0, 1),
(3, 3, 1, 1.0, 4.0, 0.0, 0)
], dtype=order_dt)
)
def test_close_first(self):
record_arrays_close(
from_signals_all(close_first=[[False, True]]).order_records,
np.array([
(0, 0, 0, 100.0, 1.0, 0.0, 0), (1, 3, 0, 200.0, 4.0, 0.0, 1), (2, 0, 1, 100.0, 1.0, 0.0, 0),
(3, 3, 1, 100.0, 4.0, 0.0, 1), (4, 4, 1, 80.0, 5.0, 0.0, 1)
], dtype=order_dt)
)
record_arrays_close(
from_signals_all(
price=pd.Series(price.values[::-1], index=price.index),
entries=pd.Series(entries.values[::-1], index=price.index),
exits=pd.Series(exits.values[::-1], index=price.index),
close_first=[[False, True]]
).order_records,
np.array([
(0, 0, 0, 20.0, 5.0, 0.0, 1), (1, 3, 0, 100.0, 2.0, 0.0, 0), (2, 0, 1, 20.0, 5.0, 0.0, 1),
(3, 3, 1, 20.0, 2.0, 0.0, 0), (4, 4, 1, 160.0, 1.0, 0.0, 0)
], dtype=order_dt)
)
def test_allow_partial(self):
record_arrays_close(
from_signals_all(size=1000, allow_partial=[[True, False]]).order_records,
np.array([
(0, 0, 0, 100.0, 1.0, 0.0, 0), (1, 3, 0, 1100.0, 4.0, 0.0, 1), (2, 3, 1, 1000.0, 4.0, 0.0, 1)
], dtype=order_dt)
)
record_arrays_close(
from_signals_longonly(size=1000, allow_partial=[[True, False]]).order_records,
np.array([
(0, 0, 0, 100.0, 1.0, 0.0, 0), (1, 3, 0, 100.0, 4.0, 0.0, 1)
], dtype=order_dt)
)
record_arrays_close(
from_signals_shortonly(size=1000, allow_partial=[[True, False]]).order_records,
np.array([
(0, 0, 0, 1000.0, 1.0, 0.0, 1), (1, 3, 0, 275.0, 4.0, 0.0, 0), (2, 0, 1, 1000.0, 1.0, 0.0, 1)
], dtype=order_dt)
)
record_arrays_close(
from_signals_all(size=np.inf, allow_partial=[[True, False]]).order_records,
np.array([
(0, 0, 0, 100.0, 1.0, 0.0, 0), (1, 3, 0, 200.0, 4.0, 0.0, 1), (2, 0, 1, 100.0, 1.0, 0.0, 0),
(3, 3, 1, 200.0, 4.0, 0.0, 1)
], dtype=order_dt)
)
record_arrays_close(
from_signals_longonly(size=np.inf, allow_partial=[[True, False]]).order_records,
np.array([
(0, 0, 0, 100.0, 1.0, 0.0, 0), (1, 3, 0, 100.0, 4.0, 0.0, 1), (2, 0, 1, 100.0, 1.0, 0.0, 0),
(3, 3, 1, 100.0, 4.0, 0.0, 1)
], dtype=order_dt)
)
record_arrays_close(
from_signals_shortonly(size=np.inf, allow_partial=[[True, False]]).order_records,
np.array([
(0, 0, 0, 100.0, 1.0, 0.0, 1), (1, 3, 0, 50.0, 4.0, 0.0, 0), (2, 0, 1, 100.0, 1.0, 0.0, 1)
], dtype=order_dt)
)
def test_raise_reject(self):
record_arrays_close(
from_signals_all(size=1000, allow_partial=True, raise_reject=True).order_records,
np.array([
(0, 0, 0, 100.0, 1.0, 0.0, 0), (1, 3, 0, 1100.0, 4.0, 0.0, 1)
], dtype=order_dt)
)
record_arrays_close(
from_signals_longonly(size=1000, allow_partial=True, raise_reject=True).order_records,
np.array([
(0, 0, 0, 100.0, 1.0, 0.0, 0), (1, 3, 0, 100.0, 4.0, 0.0, 1)
], dtype=order_dt)
)
with pytest.raises(Exception) as e_info:
_ = from_signals_shortonly(size=1000, allow_partial=True, raise_reject=True).order_records
with pytest.raises(Exception) as e_info:
_ = from_signals_all(size=1000, allow_partial=False, raise_reject=True).order_records
with pytest.raises(Exception) as e_info:
_ = from_signals_longonly(size=1000, allow_partial=False, raise_reject=True).order_records
with pytest.raises(Exception) as e_info:
_ = from_signals_shortonly(size=1000, allow_partial=False, raise_reject=True).order_records
def test_accumulate(self):
record_arrays_close(
from_signals_all(size=1, accumulate=True).order_records,
np.array([
(0, 0, 0, 1.0, 1.0, 0.0, 0), (1, 1, 0, 1.0, 2.0, 0.0, 0), (2, 3, 0, 1.0, 4.0, 0.0, 1),
(3, 4, 0, 1.0, 5.0, 0.0, 1)
], dtype=order_dt)
)
record_arrays_close(
from_signals_longonly(size=1, accumulate=True).order_records,
np.array([
(0, 0, 0, 1.0, 1.0, 0.0, 0), (1, 1, 0, 1.0, 2.0, 0.0, 0), (2, 3, 0, 1.0, 4.0, 0.0, 1),
(3, 4, 0, 1.0, 5.0, 0.0, 1)
], dtype=order_dt)
)
record_arrays_close(
from_signals_shortonly(size=1, accumulate=True).order_records,
np.array([
(0, 0, 0, 1.0, 1.0, 0.0, 1), (1, 1, 0, 1.0, 2.0, 0.0, 1), (2, 3, 0, 1.0, 4.0, 0.0, 0),
(3, 4, 0, 1.0, 5.0, 0.0, 0)
], dtype=order_dt)
)
def test_log(self):
record_arrays_close(
from_signals_all(log=True).log_records,
np.array([
(0, 0, 0, 0, 100.0, 0.0, 1.0, 100.0, np.inf, 0, 2, 1.0, 0.0, 0.0, 0.0, 1e-08, np.inf, 0.0,
True, False, True, 0.0, 100.0, 100.0, 1.0, 0.0, 0, 0, -1, 0),
(1, 3, 0, 0, 0.0, 100.0, 4.0, 400.0, -np.inf, 0, 2, 4.0, 0.0, 0.0, 0.0, 1e-08, np.inf, 0.0,
True, False, True, 800.0, -100.0, 200.0, 4.0, 0.0, 1, 0, -1, 1)
], dtype=log_dt)
)
def test_conflict_mode(self):
kwargs = dict(
price=price.iloc[:3],
entries=pd.DataFrame([
[True, True, True, True, True],
[True, True, True, True, False],
[True, True, True, True, True]
]),
exits=pd.DataFrame([
[True, True, True, True, True],
[False, False, False, False, True],
[True, True, True, True, True]
]),
size=1.,
conflict_mode=[[
'ignore',
'entry',
'exit',
'opposite',
'opposite'
]]
)
record_arrays_close(
from_signals_all(**kwargs).order_records,
np.array([
(0, 1, 0, 1.0, 2.0, 0.0, 0), (1, 0, 1, 1.0, 1.0, 0.0, 0), (2, 0, 2, 1.0, 1.0, 0.0, 1),
(3, 1, 2, 2.0, 2.0, 0.0, 0), (4, 2, 2, 2.0, 3.0, 0.0, 1), (5, 1, 3, 1.0, 2.0, 0.0, 0),
(6, 2, 3, 2.0, 3.0, 0.0, 1), (7, 1, 4, 1.0, 2.0, 0.0, 1), (8, 2, 4, 2.0, 3.0, 0.0, 0)
], dtype=order_dt)
)
record_arrays_close(
from_signals_longonly(**kwargs).order_records,
np.array([
(0, 1, 0, 1.0, 2.0, 0.0, 0), (1, 0, 1, 1.0, 1.0, 0.0, 0), (2, 1, 2, 1.0, 2.0, 0.0, 0),
(3, 2, 2, 1.0, 3.0, 0.0, 1), (4, 1, 3, 1.0, 2.0, 0.0, 0), (5, 2, 3, 1.0, 3.0, 0.0, 1)
], dtype=order_dt)
)
record_arrays_close(
from_signals_shortonly(**kwargs).order_records,
np.array([
(0, 1, 0, 1.0, 2.0, 0.0, 1), (1, 0, 1, 1.0, 1.0, 0.0, 1), (2, 1, 2, 1.0, 2.0, 0.0, 1),
(3, 2, 2, 1.0, 3.0, 0.0, 0), (4, 1, 3, 1.0, 2.0, 0.0, 1), (5, 2, 3, 1.0, 3.0, 0.0, 0)
], dtype=order_dt)
)
def test_init_cash(self):
record_arrays_close(
from_signals_all(price=price_wide, size=1., init_cash=[0., 1., 100.]).order_records,
np.array([
(0, 3, 0, 1.0, 4.0, 0.0, 1), (1, 0, 1, 1.0, 1.0, 0.0, 0), (2, 3, 1, 2.0, 4.0, 0.0, 1),
(3, 0, 2, 1.0, 1.0, 0.0, 0), (4, 3, 2, 2.0, 4.0, 0.0, 1)
], dtype=order_dt)
)
record_arrays_close(
from_signals_longonly(price=price_wide, size=1., init_cash=[0., 1., 100.]).order_records,
np.array([
(0, 0, 1, 1.0, 1.0, 0.0, 0), (1, 3, 1, 1.0, 4.0, 0.0, 1), (2, 0, 2, 1.0, 1.0, 0.0, 0),
(3, 3, 2, 1.0, 4.0, 0.0, 1)
], dtype=order_dt)
)
record_arrays_close(
from_signals_shortonly(price=price_wide, size=1., init_cash=[0., 1., 100.]).order_records,
np.array([
(0, 0, 0, 1.0, 1.0, 0.0, 1), (1, 3, 0, 0.25, 4.0, 0.0, 0), (2, 0, 1, 1.0, 1.0, 0.0, 1),
(3, 3, 1, 0.5, 4.0, 0.0, 0), (4, 0, 2, 1.0, 1.0, 0.0, 1), (5, 3, 2, 1.0, 4.0, 0.0, 0)
], dtype=order_dt)
)
with pytest.raises(Exception) as e_info:
_ = from_signals_all(init_cash=np.inf).order_records
with pytest.raises(Exception) as e_info:
_ = from_signals_longonly(init_cash=np.inf).order_records
with pytest.raises(Exception) as e_info:
_ = from_signals_shortonly(init_cash=np.inf).order_records
def test_group_by(self):
portfolio = from_signals_all(price=price_wide, group_by=np.array([0, 0, 1]))
record_arrays_close(
portfolio.order_records,
np.array([
(0, 0, 0, 100.0, 1.0, 0.0, 0), (1, 3, 0, 200.0, 4.0, 0.0, 1), (2, 0, 1, 100.0, 1.0, 0.0, 0),
(3, 3, 1, 200.0, 4.0, 0.0, 1), (4, 0, 2, 100.0, 1.0, 0.0, 0), (5, 3, 2, 200.0, 4.0, 0.0, 1)
], dtype=order_dt)
)
pd.testing.assert_index_equal(
portfolio.wrapper.grouper.group_by,
| pd.Int64Index([0, 0, 1], dtype='int64') | pandas.Int64Index |
import pandas as pd
Names = ["Manhattan", "LaGuardia", "JFK"]
def prepare_data(dt):
dt_str = dt.strftime("%Y-%m-%d")
pkt = pd.date_range(start=dt_str + ' 00:00:00', end=dt_str + ' 23:59:59', periods=72)
dff = pd.DataFrame({'pickup_time': pkt})
dff['pickup_time'] = pd.to_datetime(dff['pickup_time'].dt.strftime("%Y-%m-%d %H"))
dff['location'] = Names * 24
dff['timeofday'] = dff.pickup_time.dt.hour
dff['dayofweek'] = dff.pickup_time.dt.day_name()
dff['dayofmonth'] = dff.pickup_time.dt.day
dff['dayofyear'] = dff.pickup_time.dt.dayofyear
data = dff[['location', 'timeofday', 'dayofweek', 'dayofmonth', 'dayofyear']]
return dff, data
def display_data(orig_data, predictions):
orig_data['trip_count'] = predictions
pickups = orig_data.groupby(by=['pickup_time', ], as_index=False, dropna=False, )['trip_count'].sum()
pickups.columns = ['pickup_time', 'total_trip']
pickups = | pd.merge(left=orig_data, right=pickups, how='inner', on='pickup_time') | pandas.merge |
# being a bit too dynamic
# pylint: disable=E1101
import datetime
import warnings
import re
from math import ceil
from collections import namedtuple
from contextlib import contextmanager
from distutils.version import LooseVersion
import numpy as np
from pandas.util.decorators import cache_readonly, deprecate_kwarg
import pandas.core.common as com
from pandas.core.common import AbstractMethodError
from pandas.core.generic import _shared_docs, _shared_doc_kwargs
from pandas.core.index import Index, MultiIndex
from pandas.core.series import Series, remove_na
from pandas.tseries.index import DatetimeIndex
from pandas.tseries.period import PeriodIndex, Period
import pandas.tseries.frequencies as frequencies
from pandas.tseries.offsets import DateOffset
from pandas.compat import range, lrange, lmap, map, zip, string_types
import pandas.compat as compat
from pandas.util.decorators import Appender
try: # mpl optional
import pandas.tseries.converter as conv
conv.register() # needs to override so set_xlim works with str/number
except ImportError:
pass
# Extracted from https://gist.github.com/huyng/816622
# this is the rcParams set when setting display.with_mpl_style
# to True.
mpl_stylesheet = {
'axes.axisbelow': True,
'axes.color_cycle': ['#348ABD',
'#7A68A6',
'#A60628',
'#467821',
'#CF4457',
'#188487',
'#E24A33'],
'axes.edgecolor': '#bcbcbc',
'axes.facecolor': '#eeeeee',
'axes.grid': True,
'axes.labelcolor': '#555555',
'axes.labelsize': 'large',
'axes.linewidth': 1.0,
'axes.titlesize': 'x-large',
'figure.edgecolor': 'white',
'figure.facecolor': 'white',
'figure.figsize': (6.0, 4.0),
'figure.subplot.hspace': 0.5,
'font.family': 'monospace',
'font.monospace': ['Andale Mono',
'Nimbus Mono L',
'Courier New',
'Courier',
'Fixed',
'Terminal',
'monospace'],
'font.size': 10,
'interactive': True,
'keymap.all_axes': ['a'],
'keymap.back': ['left', 'c', 'backspace'],
'keymap.forward': ['right', 'v'],
'keymap.fullscreen': ['f'],
'keymap.grid': ['g'],
'keymap.home': ['h', 'r', 'home'],
'keymap.pan': ['p'],
'keymap.save': ['s'],
'keymap.xscale': ['L', 'k'],
'keymap.yscale': ['l'],
'keymap.zoom': ['o'],
'legend.fancybox': True,
'lines.antialiased': True,
'lines.linewidth': 1.0,
'patch.antialiased': True,
'patch.edgecolor': '#EEEEEE',
'patch.facecolor': '#348ABD',
'patch.linewidth': 0.5,
'toolbar': 'toolbar2',
'xtick.color': '#555555',
'xtick.direction': 'in',
'xtick.major.pad': 6.0,
'xtick.major.size': 0.0,
'xtick.minor.pad': 6.0,
'xtick.minor.size': 0.0,
'ytick.color': '#555555',
'ytick.direction': 'in',
'ytick.major.pad': 6.0,
'ytick.major.size': 0.0,
'ytick.minor.pad': 6.0,
'ytick.minor.size': 0.0
}
def _get_standard_kind(kind):
return {'density': 'kde'}.get(kind, kind)
def _get_standard_colors(num_colors=None, colormap=None, color_type='default',
color=None):
import matplotlib.pyplot as plt
if color is None and colormap is not None:
if isinstance(colormap, compat.string_types):
import matplotlib.cm as cm
cmap = colormap
colormap = cm.get_cmap(colormap)
if colormap is None:
raise ValueError("Colormap {0} is not recognized".format(cmap))
colors = lmap(colormap, np.linspace(0, 1, num=num_colors))
elif color is not None:
if colormap is not None:
warnings.warn("'color' and 'colormap' cannot be used "
"simultaneously. Using 'color'")
colors = color
else:
if color_type == 'default':
# need to call list() on the result to copy so we don't
# modify the global rcParams below
colors = list(plt.rcParams.get('axes.color_cycle',
list('bgrcmyk')))
if isinstance(colors, compat.string_types):
colors = list(colors)
elif color_type == 'random':
import random
def random_color(column):
random.seed(column)
return [random.random() for _ in range(3)]
colors = lmap(random_color, lrange(num_colors))
else:
raise ValueError("color_type must be either 'default' or 'random'")
if isinstance(colors, compat.string_types):
import matplotlib.colors
conv = matplotlib.colors.ColorConverter()
def _maybe_valid_colors(colors):
try:
[conv.to_rgba(c) for c in colors]
return True
except ValueError:
return False
# check whether the string can be convertable to single color
maybe_single_color = _maybe_valid_colors([colors])
# check whether each character can be convertable to colors
maybe_color_cycle = _maybe_valid_colors(list(colors))
if maybe_single_color and maybe_color_cycle and len(colors) > 1:
msg = ("'{0}' can be parsed as both single color and "
"color cycle. Specify each color using a list "
"like ['{0}'] or {1}")
raise ValueError(msg.format(colors, list(colors)))
elif maybe_single_color:
colors = [colors]
else:
# ``colors`` is regarded as color cycle.
# mpl will raise error any of them is invalid
pass
if len(colors) != num_colors:
multiple = num_colors//len(colors) - 1
mod = num_colors % len(colors)
colors += multiple * colors
colors += colors[:mod]
return colors
class _Options(dict):
"""
Stores pandas plotting options.
Allows for parameter aliasing so you can just use parameter names that are
the same as the plot function parameters, but is stored in a canonical
format that makes it easy to breakdown into groups later
"""
# alias so the names are same as plotting method parameter names
_ALIASES = {'x_compat': 'xaxis.compat'}
_DEFAULT_KEYS = ['xaxis.compat']
def __init__(self):
self['xaxis.compat'] = False
def __getitem__(self, key):
key = self._get_canonical_key(key)
if key not in self:
raise ValueError('%s is not a valid pandas plotting option' % key)
return super(_Options, self).__getitem__(key)
def __setitem__(self, key, value):
key = self._get_canonical_key(key)
return super(_Options, self).__setitem__(key, value)
def __delitem__(self, key):
key = self._get_canonical_key(key)
if key in self._DEFAULT_KEYS:
raise ValueError('Cannot remove default parameter %s' % key)
return super(_Options, self).__delitem__(key)
def __contains__(self, key):
key = self._get_canonical_key(key)
return super(_Options, self).__contains__(key)
def reset(self):
"""
Reset the option store to its initial state
Returns
-------
None
"""
self.__init__()
def _get_canonical_key(self, key):
return self._ALIASES.get(key, key)
@contextmanager
def use(self, key, value):
"""
Temporarily set a parameter value using the with statement.
Aliasing allowed.
"""
old_value = self[key]
try:
self[key] = value
yield self
finally:
self[key] = old_value
plot_params = _Options()
def scatter_matrix(frame, alpha=0.5, figsize=None, ax=None, grid=False,
diagonal='hist', marker='.', density_kwds=None,
hist_kwds=None, range_padding=0.05, **kwds):
"""
Draw a matrix of scatter plots.
Parameters
----------
frame : DataFrame
alpha : float, optional
amount of transparency applied
figsize : (float,float), optional
a tuple (width, height) in inches
ax : Matplotlib axis object, optional
grid : bool, optional
setting this to True will show the grid
diagonal : {'hist', 'kde'}
pick between 'kde' and 'hist' for
either Kernel Density Estimation or Histogram
plot in the diagonal
marker : str, optional
Matplotlib marker type, default '.'
hist_kwds : other plotting keyword arguments
To be passed to hist function
density_kwds : other plotting keyword arguments
To be passed to kernel density estimate plot
range_padding : float, optional
relative extension of axis range in x and y
with respect to (x_max - x_min) or (y_max - y_min),
default 0.05
kwds : other plotting keyword arguments
To be passed to scatter function
Examples
--------
>>> df = DataFrame(np.random.randn(1000, 4), columns=['A','B','C','D'])
>>> scatter_matrix(df, alpha=0.2)
"""
import matplotlib.pyplot as plt
from matplotlib.artist import setp
df = frame._get_numeric_data()
n = df.columns.size
naxes = n * n
fig, axes = _subplots(naxes=naxes, figsize=figsize, ax=ax,
squeeze=False)
# no gaps between subplots
fig.subplots_adjust(wspace=0, hspace=0)
mask = com.notnull(df)
marker = _get_marker_compat(marker)
hist_kwds = hist_kwds or {}
density_kwds = density_kwds or {}
# workaround because `c='b'` is hardcoded in matplotlibs scatter method
kwds.setdefault('c', plt.rcParams['patch.facecolor'])
boundaries_list = []
for a in df.columns:
values = df[a].values[mask[a].values]
rmin_, rmax_ = np.min(values), np.max(values)
rdelta_ext = (rmax_ - rmin_) * range_padding / 2.
boundaries_list.append((rmin_ - rdelta_ext, rmax_+ rdelta_ext))
for i, a in zip(lrange(n), df.columns):
for j, b in zip(lrange(n), df.columns):
ax = axes[i, j]
if i == j:
values = df[a].values[mask[a].values]
# Deal with the diagonal by drawing a histogram there.
if diagonal == 'hist':
ax.hist(values, **hist_kwds)
elif diagonal in ('kde', 'density'):
from scipy.stats import gaussian_kde
y = values
gkde = gaussian_kde(y)
ind = np.linspace(y.min(), y.max(), 1000)
ax.plot(ind, gkde.evaluate(ind), **density_kwds)
ax.set_xlim(boundaries_list[i])
else:
common = (mask[a] & mask[b]).values
ax.scatter(df[b][common], df[a][common],
marker=marker, alpha=alpha, **kwds)
ax.set_xlim(boundaries_list[j])
ax.set_ylim(boundaries_list[i])
ax.set_xlabel(b)
ax.set_ylabel(a)
if j!= 0:
ax.yaxis.set_visible(False)
if i != n-1:
ax.xaxis.set_visible(False)
if len(df.columns) > 1:
lim1 = boundaries_list[0]
locs = axes[0][1].yaxis.get_majorticklocs()
locs = locs[(lim1[0] <= locs) & (locs <= lim1[1])]
adj = (locs - lim1[0]) / (lim1[1] - lim1[0])
lim0 = axes[0][0].get_ylim()
adj = adj * (lim0[1] - lim0[0]) + lim0[0]
axes[0][0].yaxis.set_ticks(adj)
if np.all(locs == locs.astype(int)):
# if all ticks are int
locs = locs.astype(int)
axes[0][0].yaxis.set_ticklabels(locs)
_set_ticks_props(axes, xlabelsize=8, xrot=90, ylabelsize=8, yrot=0)
return axes
def _gca():
import matplotlib.pyplot as plt
return plt.gca()
def _gcf():
import matplotlib.pyplot as plt
return plt.gcf()
def _get_marker_compat(marker):
import matplotlib.lines as mlines
import matplotlib as mpl
if mpl.__version__ < '1.1.0' and marker == '.':
return 'o'
if marker not in mlines.lineMarkers:
return 'o'
return marker
def radviz(frame, class_column, ax=None, color=None, colormap=None, **kwds):
"""RadViz - a multivariate data visualization algorithm
Parameters:
-----------
frame: DataFrame
class_column: str
Column name containing class names
ax: Matplotlib axis object, optional
color: list or tuple, optional
Colors to use for the different classes
colormap : str or matplotlib colormap object, default None
Colormap to select colors from. If string, load colormap with that name
from matplotlib.
kwds: keywords
Options to pass to matplotlib scatter plotting method
Returns:
--------
ax: Matplotlib axis object
"""
import matplotlib.pyplot as plt
import matplotlib.patches as patches
def normalize(series):
a = min(series)
b = max(series)
return (series - a) / (b - a)
n = len(frame)
classes = frame[class_column].drop_duplicates()
class_col = frame[class_column]
df = frame.drop(class_column, axis=1).apply(normalize)
if ax is None:
ax = plt.gca(xlim=[-1, 1], ylim=[-1, 1])
to_plot = {}
colors = _get_standard_colors(num_colors=len(classes), colormap=colormap,
color_type='random', color=color)
for kls in classes:
to_plot[kls] = [[], []]
m = len(frame.columns) - 1
s = np.array([(np.cos(t), np.sin(t))
for t in [2.0 * np.pi * (i / float(m))
for i in range(m)]])
for i in range(n):
row = df.iloc[i].values
row_ = np.repeat(np.expand_dims(row, axis=1), 2, axis=1)
y = (s * row_).sum(axis=0) / row.sum()
kls = class_col.iat[i]
to_plot[kls][0].append(y[0])
to_plot[kls][1].append(y[1])
for i, kls in enumerate(classes):
ax.scatter(to_plot[kls][0], to_plot[kls][1], color=colors[i],
label=com.pprint_thing(kls), **kwds)
ax.legend()
ax.add_patch(patches.Circle((0.0, 0.0), radius=1.0, facecolor='none'))
for xy, name in zip(s, df.columns):
ax.add_patch(patches.Circle(xy, radius=0.025, facecolor='gray'))
if xy[0] < 0.0 and xy[1] < 0.0:
ax.text(xy[0] - 0.025, xy[1] - 0.025, name,
ha='right', va='top', size='small')
elif xy[0] < 0.0 and xy[1] >= 0.0:
ax.text(xy[0] - 0.025, xy[1] + 0.025, name,
ha='right', va='bottom', size='small')
elif xy[0] >= 0.0 and xy[1] < 0.0:
ax.text(xy[0] + 0.025, xy[1] - 0.025, name,
ha='left', va='top', size='small')
elif xy[0] >= 0.0 and xy[1] >= 0.0:
ax.text(xy[0] + 0.025, xy[1] + 0.025, name,
ha='left', va='bottom', size='small')
ax.axis('equal')
return ax
@deprecate_kwarg(old_arg_name='data', new_arg_name='frame')
def andrews_curves(frame, class_column, ax=None, samples=200, color=None,
colormap=None, **kwds):
"""
Parameters:
-----------
frame : DataFrame
Data to be plotted, preferably normalized to (0.0, 1.0)
class_column : Name of the column containing class names
ax : matplotlib axes object, default None
samples : Number of points to plot in each curve
color: list or tuple, optional
Colors to use for the different classes
colormap : str or matplotlib colormap object, default None
Colormap to select colors from. If string, load colormap with that name
from matplotlib.
kwds: keywords
Options to pass to matplotlib plotting method
Returns:
--------
ax: Matplotlib axis object
"""
from math import sqrt, pi, sin, cos
import matplotlib.pyplot as plt
def function(amplitudes):
def f(x):
x1 = amplitudes[0]
result = x1 / sqrt(2.0)
harmonic = 1.0
for x_even, x_odd in zip(amplitudes[1::2], amplitudes[2::2]):
result += (x_even * sin(harmonic * x) +
x_odd * cos(harmonic * x))
harmonic += 1.0
if len(amplitudes) % 2 != 0:
result += amplitudes[-1] * sin(harmonic * x)
return result
return f
n = len(frame)
class_col = frame[class_column]
classes = frame[class_column].drop_duplicates()
df = frame.drop(class_column, axis=1)
x = [-pi + 2.0 * pi * (t / float(samples)) for t in range(samples)]
used_legends = set([])
color_values = _get_standard_colors(num_colors=len(classes),
colormap=colormap, color_type='random',
color=color)
colors = dict(zip(classes, color_values))
if ax is None:
ax = plt.gca(xlim=(-pi, pi))
for i in range(n):
row = df.iloc[i].values
f = function(row)
y = [f(t) for t in x]
kls = class_col.iat[i]
label = com.pprint_thing(kls)
if label not in used_legends:
used_legends.add(label)
ax.plot(x, y, color=colors[kls], label=label, **kwds)
else:
ax.plot(x, y, color=colors[kls], **kwds)
ax.legend(loc='upper right')
ax.grid()
return ax
def bootstrap_plot(series, fig=None, size=50, samples=500, **kwds):
"""Bootstrap plot.
Parameters:
-----------
series: Time series
fig: matplotlib figure object, optional
size: number of data points to consider during each sampling
samples: number of times the bootstrap procedure is performed
kwds: optional keyword arguments for plotting commands, must be accepted
by both hist and plot
Returns:
--------
fig: matplotlib figure
"""
import random
import matplotlib.pyplot as plt
# random.sample(ndarray, int) fails on python 3.3, sigh
data = list(series.values)
samplings = [random.sample(data, size) for _ in range(samples)]
means = np.array([np.mean(sampling) for sampling in samplings])
medians = np.array([np.median(sampling) for sampling in samplings])
midranges = np.array([(min(sampling) + max(sampling)) * 0.5
for sampling in samplings])
if fig is None:
fig = plt.figure()
x = lrange(samples)
axes = []
ax1 = fig.add_subplot(2, 3, 1)
ax1.set_xlabel("Sample")
axes.append(ax1)
ax1.plot(x, means, **kwds)
ax2 = fig.add_subplot(2, 3, 2)
ax2.set_xlabel("Sample")
axes.append(ax2)
ax2.plot(x, medians, **kwds)
ax3 = fig.add_subplot(2, 3, 3)
ax3.set_xlabel("Sample")
axes.append(ax3)
ax3.plot(x, midranges, **kwds)
ax4 = fig.add_subplot(2, 3, 4)
ax4.set_xlabel("Mean")
axes.append(ax4)
ax4.hist(means, **kwds)
ax5 = fig.add_subplot(2, 3, 5)
ax5.set_xlabel("Median")
axes.append(ax5)
ax5.hist(medians, **kwds)
ax6 = fig.add_subplot(2, 3, 6)
ax6.set_xlabel("Midrange")
axes.append(ax6)
ax6.hist(midranges, **kwds)
for axis in axes:
plt.setp(axis.get_xticklabels(), fontsize=8)
plt.setp(axis.get_yticklabels(), fontsize=8)
return fig
@deprecate_kwarg(old_arg_name='colors', new_arg_name='color')
@deprecate_kwarg(old_arg_name='data', new_arg_name='frame')
def parallel_coordinates(frame, class_column, cols=None, ax=None, color=None,
use_columns=False, xticks=None, colormap=None,
axvlines=True, **kwds):
"""Parallel coordinates plotting.
Parameters
----------
frame: DataFrame
class_column: str
Column name containing class names
cols: list, optional
A list of column names to use
ax: matplotlib.axis, optional
matplotlib axis object
color: list or tuple, optional
Colors to use for the different classes
use_columns: bool, optional
If true, columns will be used as xticks
xticks: list or tuple, optional
A list of values to use for xticks
colormap: str or matplotlib colormap, default None
Colormap to use for line colors.
axvlines: bool, optional
If true, vertical lines will be added at each xtick
kwds: keywords
Options to pass to matplotlib plotting method
Returns
-------
ax: matplotlib axis object
Examples
--------
>>> from pandas import read_csv
>>> from pandas.tools.plotting import parallel_coordinates
>>> from matplotlib import pyplot as plt
>>> df = read_csv('https://raw.github.com/pydata/pandas/master/pandas/tests/data/iris.csv')
>>> parallel_coordinates(df, 'Name', color=('#556270', '#4ECDC4', '#C7F464'))
>>> plt.show()
"""
import matplotlib.pyplot as plt
n = len(frame)
classes = frame[class_column].drop_duplicates()
class_col = frame[class_column]
if cols is None:
df = frame.drop(class_column, axis=1)
else:
df = frame[cols]
used_legends = set([])
ncols = len(df.columns)
# determine values to use for xticks
if use_columns is True:
if not np.all(np.isreal(list(df.columns))):
raise ValueError('Columns must be numeric to be used as xticks')
x = df.columns
elif xticks is not None:
if not np.all(np.isreal(xticks)):
raise ValueError('xticks specified must be numeric')
elif len(xticks) != ncols:
raise ValueError('Length of xticks must match number of columns')
x = xticks
else:
x = lrange(ncols)
if ax is None:
ax = plt.gca()
color_values = _get_standard_colors(num_colors=len(classes),
colormap=colormap, color_type='random',
color=color)
colors = dict(zip(classes, color_values))
for i in range(n):
y = df.iloc[i].values
kls = class_col.iat[i]
label = com.pprint_thing(kls)
if label not in used_legends:
used_legends.add(label)
ax.plot(x, y, color=colors[kls], label=label, **kwds)
else:
ax.plot(x, y, color=colors[kls], **kwds)
if axvlines:
for i in x:
ax.axvline(i, linewidth=1, color='black')
ax.set_xticks(x)
ax.set_xticklabels(df.columns)
ax.set_xlim(x[0], x[-1])
ax.legend(loc='upper right')
ax.grid()
return ax
def lag_plot(series, lag=1, ax=None, **kwds):
"""Lag plot for time series.
Parameters:
-----------
series: Time series
lag: lag of the scatter plot, default 1
ax: Matplotlib axis object, optional
kwds: Matplotlib scatter method keyword arguments, optional
Returns:
--------
ax: Matplotlib axis object
"""
import matplotlib.pyplot as plt
# workaround because `c='b'` is hardcoded in matplotlibs scatter method
kwds.setdefault('c', plt.rcParams['patch.facecolor'])
data = series.values
y1 = data[:-lag]
y2 = data[lag:]
if ax is None:
ax = plt.gca()
ax.set_xlabel("y(t)")
ax.set_ylabel("y(t + %s)" % lag)
ax.scatter(y1, y2, **kwds)
return ax
def autocorrelation_plot(series, ax=None, **kwds):
"""Autocorrelation plot for time series.
Parameters:
-----------
series: Time series
ax: Matplotlib axis object, optional
kwds : keywords
Options to pass to matplotlib plotting method
Returns:
-----------
ax: Matplotlib axis object
"""
import matplotlib.pyplot as plt
n = len(series)
data = np.asarray(series)
if ax is None:
ax = plt.gca(xlim=(1, n), ylim=(-1.0, 1.0))
mean = np.mean(data)
c0 = np.sum((data - mean) ** 2) / float(n)
def r(h):
return ((data[:n - h] - mean) * (data[h:] - mean)).sum() / float(n) / c0
x = np.arange(n) + 1
y = lmap(r, x)
z95 = 1.959963984540054
z99 = 2.5758293035489004
ax.axhline(y=z99 / np.sqrt(n), linestyle='--', color='grey')
ax.axhline(y=z95 / np.sqrt(n), color='grey')
ax.axhline(y=0.0, color='black')
ax.axhline(y=-z95 / np.sqrt(n), color='grey')
ax.axhline(y=-z99 / np.sqrt(n), linestyle='--', color='grey')
ax.set_xlabel("Lag")
ax.set_ylabel("Autocorrelation")
ax.plot(x, y, **kwds)
if 'label' in kwds:
ax.legend()
ax.grid()
return ax
class MPLPlot(object):
"""
Base class for assembling a pandas plot using matplotlib
Parameters
----------
data :
"""
_layout_type = 'vertical'
_default_rot = 0
orientation = None
_pop_attributes = ['label', 'style', 'logy', 'logx', 'loglog',
'mark_right', 'stacked']
_attr_defaults = {'logy': False, 'logx': False, 'loglog': False,
'mark_right': True, 'stacked': False}
def __init__(self, data, kind=None, by=None, subplots=False, sharex=None,
sharey=False, use_index=True,
figsize=None, grid=None, legend=True, rot=None,
ax=None, fig=None, title=None, xlim=None, ylim=None,
xticks=None, yticks=None,
sort_columns=False, fontsize=None,
secondary_y=False, colormap=None,
table=False, layout=None, **kwds):
self.data = data
self.by = by
self.kind = kind
self.sort_columns = sort_columns
self.subplots = subplots
if sharex is None:
if ax is None:
self.sharex = True
else:
# if we get an axis, the users should do the visibility setting...
self.sharex = False
else:
self.sharex = sharex
self.sharey = sharey
self.figsize = figsize
self.layout = layout
self.xticks = xticks
self.yticks = yticks
self.xlim = xlim
self.ylim = ylim
self.title = title
self.use_index = use_index
self.fontsize = fontsize
if rot is not None:
self.rot = rot
# need to know for format_date_labels since it's rotated to 30 by
# default
self._rot_set = True
else:
self._rot_set = False
if isinstance(self._default_rot, dict):
self.rot = self._default_rot[self.kind]
else:
self.rot = self._default_rot
if grid is None:
grid = False if secondary_y else self.plt.rcParams['axes.grid']
self.grid = grid
self.legend = legend
self.legend_handles = []
self.legend_labels = []
for attr in self._pop_attributes:
value = kwds.pop(attr, self._attr_defaults.get(attr, None))
setattr(self, attr, value)
self.ax = ax
self.fig = fig
self.axes = None
# parse errorbar input if given
xerr = kwds.pop('xerr', None)
yerr = kwds.pop('yerr', None)
self.errors = {}
for kw, err in zip(['xerr', 'yerr'], [xerr, yerr]):
self.errors[kw] = self._parse_errorbars(kw, err)
if not isinstance(secondary_y, (bool, tuple, list, np.ndarray, Index)):
secondary_y = [secondary_y]
self.secondary_y = secondary_y
# ugly TypeError if user passes matplotlib's `cmap` name.
# Probably better to accept either.
if 'cmap' in kwds and colormap:
raise TypeError("Only specify one of `cmap` and `colormap`.")
elif 'cmap' in kwds:
self.colormap = kwds.pop('cmap')
else:
self.colormap = colormap
self.table = table
self.kwds = kwds
self._validate_color_args()
def _validate_color_args(self):
if 'color' not in self.kwds and 'colors' in self.kwds:
warnings.warn(("'colors' is being deprecated. Please use 'color'"
"instead of 'colors'"))
colors = self.kwds.pop('colors')
self.kwds['color'] = colors
if ('color' in self.kwds and self.nseries == 1):
# support series.plot(color='green')
self.kwds['color'] = [self.kwds['color']]
if ('color' in self.kwds or 'colors' in self.kwds) and \
self.colormap is not None:
warnings.warn("'color' and 'colormap' cannot be used "
"simultaneously. Using 'color'")
if 'color' in self.kwds and self.style is not None:
if com.is_list_like(self.style):
styles = self.style
else:
styles = [self.style]
# need only a single match
for s in styles:
if re.match('^[a-z]+?', s) is not None:
raise ValueError("Cannot pass 'style' string with a color "
"symbol and 'color' keyword argument. Please"
" use one or the other or pass 'style' "
"without a color symbol")
def _iter_data(self, data=None, keep_index=False, fillna=None):
if data is None:
data = self.data
if fillna is not None:
data = data.fillna(fillna)
if self.sort_columns:
columns = com._try_sort(data.columns)
else:
columns = data.columns
for col in columns:
if keep_index is True:
yield col, data[col]
else:
yield col, data[col].values
@property
def nseries(self):
if self.data.ndim == 1:
return 1
else:
return self.data.shape[1]
def draw(self):
self.plt.draw_if_interactive()
def generate(self):
self._args_adjust()
self._compute_plot_data()
self._setup_subplots()
self._make_plot()
self._add_table()
self._make_legend()
self._post_plot_logic()
self._adorn_subplots()
def _args_adjust(self):
pass
def _has_plotted_object(self, ax):
"""check whether ax has data"""
return (len(ax.lines) != 0 or
len(ax.artists) != 0 or
len(ax.containers) != 0)
def _maybe_right_yaxis(self, ax, axes_num):
if not self.on_right(axes_num):
# secondary axes may be passed via ax kw
return self._get_ax_layer(ax)
if hasattr(ax, 'right_ax'):
# if it has right_ax proparty, ``ax`` must be left axes
return ax.right_ax
elif hasattr(ax, 'left_ax'):
# if it has left_ax proparty, ``ax`` must be right axes
return ax
else:
# otherwise, create twin axes
orig_ax, new_ax = ax, ax.twinx()
new_ax._get_lines.color_cycle = orig_ax._get_lines.color_cycle
orig_ax.right_ax, new_ax.left_ax = new_ax, orig_ax
if not self._has_plotted_object(orig_ax): # no data on left y
orig_ax.get_yaxis().set_visible(False)
return new_ax
def _setup_subplots(self):
if self.subplots:
fig, axes = _subplots(naxes=self.nseries,
sharex=self.sharex, sharey=self.sharey,
figsize=self.figsize, ax=self.ax,
layout=self.layout,
layout_type=self._layout_type)
else:
if self.ax is None:
fig = self.plt.figure(figsize=self.figsize)
axes = fig.add_subplot(111)
else:
fig = self.ax.get_figure()
if self.figsize is not None:
fig.set_size_inches(self.figsize)
axes = self.ax
axes = _flatten(axes)
if self.logx or self.loglog:
[a.set_xscale('log') for a in axes]
if self.logy or self.loglog:
[a.set_yscale('log') for a in axes]
self.fig = fig
self.axes = axes
@property
def result(self):
"""
Return result axes
"""
if self.subplots:
if self.layout is not None and not com.is_list_like(self.ax):
return self.axes.reshape(*self.layout)
else:
return self.axes
else:
sec_true = isinstance(self.secondary_y, bool) and self.secondary_y
all_sec = (com.is_list_like(self.secondary_y) and
len(self.secondary_y) == self.nseries)
if (sec_true or all_sec):
# if all data is plotted on secondary, return right axes
return self._get_ax_layer(self.axes[0], primary=False)
else:
return self.axes[0]
def _compute_plot_data(self):
data = self.data
if isinstance(data, Series):
label = self.label
if label is None and data.name is None:
label = 'None'
data = data.to_frame(name=label)
numeric_data = data.convert_objects()._get_numeric_data()
try:
is_empty = numeric_data.empty
except AttributeError:
is_empty = not len(numeric_data)
# no empty frames or series allowed
if is_empty:
raise TypeError('Empty {0!r}: no numeric data to '
'plot'.format(numeric_data.__class__.__name__))
self.data = numeric_data
def _make_plot(self):
raise AbstractMethodError(self)
def _add_table(self):
if self.table is False:
return
elif self.table is True:
data = self.data.transpose()
else:
data = self.table
ax = self._get_ax(0)
table(ax, data)
def _post_plot_logic(self):
pass
def _adorn_subplots(self):
to_adorn = self.axes
if len(self.axes) > 0:
all_axes = self._get_axes()
nrows, ncols = self._get_axes_layout()
_handle_shared_axes(axarr=all_axes, nplots=len(all_axes),
naxes=nrows * ncols, nrows=nrows,
ncols=ncols, sharex=self.sharex,
sharey=self.sharey)
for ax in to_adorn:
if self.yticks is not None:
ax.set_yticks(self.yticks)
if self.xticks is not None:
ax.set_xticks(self.xticks)
if self.ylim is not None:
ax.set_ylim(self.ylim)
if self.xlim is not None:
ax.set_xlim(self.xlim)
ax.grid(self.grid)
if self.title:
if self.subplots:
self.fig.suptitle(self.title)
else:
self.axes[0].set_title(self.title)
labels = [com.pprint_thing(key) for key in self.data.index]
labels = dict(zip(range(len(self.data.index)), labels))
for ax in self.axes:
if self.orientation == 'vertical' or self.orientation is None:
if self._need_to_set_index:
xticklabels = [labels.get(x, '') for x in ax.get_xticks()]
ax.set_xticklabels(xticklabels)
self._apply_axis_properties(ax.xaxis, rot=self.rot,
fontsize=self.fontsize)
self._apply_axis_properties(ax.yaxis, fontsize=self.fontsize)
elif self.orientation == 'horizontal':
if self._need_to_set_index:
yticklabels = [labels.get(y, '') for y in ax.get_yticks()]
ax.set_yticklabels(yticklabels)
self._apply_axis_properties(ax.yaxis, rot=self.rot,
fontsize=self.fontsize)
self._apply_axis_properties(ax.xaxis, fontsize=self.fontsize)
def _apply_axis_properties(self, axis, rot=None, fontsize=None):
labels = axis.get_majorticklabels() + axis.get_minorticklabels()
for label in labels:
if rot is not None:
label.set_rotation(rot)
if fontsize is not None:
label.set_fontsize(fontsize)
@property
def legend_title(self):
if not isinstance(self.data.columns, MultiIndex):
name = self.data.columns.name
if name is not None:
name = com.pprint_thing(name)
return name
else:
stringified = map(com.pprint_thing,
self.data.columns.names)
return ','.join(stringified)
def _add_legend_handle(self, handle, label, index=None):
if not label is None:
if self.mark_right and index is not None:
if self.on_right(index):
label = label + ' (right)'
self.legend_handles.append(handle)
self.legend_labels.append(label)
def _make_legend(self):
ax, leg = self._get_ax_legend(self.axes[0])
handles = []
labels = []
title = ''
if not self.subplots:
if not leg is None:
title = leg.get_title().get_text()
handles = leg.legendHandles
labels = [x.get_text() for x in leg.get_texts()]
if self.legend:
if self.legend == 'reverse':
self.legend_handles = reversed(self.legend_handles)
self.legend_labels = reversed(self.legend_labels)
handles += self.legend_handles
labels += self.legend_labels
if not self.legend_title is None:
title = self.legend_title
if len(handles) > 0:
ax.legend(handles, labels, loc='best', title=title)
elif self.subplots and self.legend:
for ax in self.axes:
if ax.get_visible():
ax.legend(loc='best')
def _get_ax_legend(self, ax):
leg = ax.get_legend()
other_ax = (getattr(ax, 'left_ax', None) or
getattr(ax, 'right_ax', None))
other_leg = None
if other_ax is not None:
other_leg = other_ax.get_legend()
if leg is None and other_leg is not None:
leg = other_leg
ax = other_ax
return ax, leg
@cache_readonly
def plt(self):
import matplotlib.pyplot as plt
return plt
_need_to_set_index = False
def _get_xticks(self, convert_period=False):
index = self.data.index
is_datetype = index.inferred_type in ('datetime', 'date',
'datetime64', 'time')
if self.use_index:
if convert_period and isinstance(index, PeriodIndex):
self.data = self.data.reindex(index=index.order())
x = self.data.index.to_timestamp()._mpl_repr()
elif index.is_numeric():
"""
Matplotlib supports numeric values or datetime objects as
xaxis values. Taking LBYL approach here, by the time
matplotlib raises exception when using non numeric/datetime
values for xaxis, several actions are already taken by plt.
"""
x = index._mpl_repr()
elif is_datetype:
self.data = self.data.sort_index()
x = self.data.index._mpl_repr()
else:
self._need_to_set_index = True
x = lrange(len(index))
else:
x = lrange(len(index))
return x
def _is_datetype(self):
index = self.data.index
return (isinstance(index, (PeriodIndex, DatetimeIndex)) or
index.inferred_type in ('datetime', 'date', 'datetime64',
'time'))
def _get_plot_function(self):
'''
Returns the matplotlib plotting function (plot or errorbar) based on
the presence of errorbar keywords.
'''
errorbar = any(e is not None for e in self.errors.values())
def plotf(ax, x, y, style=None, **kwds):
mask = com.isnull(y)
if mask.any():
y = np.ma.array(y)
y = np.ma.masked_where(mask, y)
if errorbar:
return self.plt.Axes.errorbar(ax, x, y, **kwds)
else:
# prevent style kwarg from going to errorbar, where it is unsupported
if style is not None:
args = (ax, x, y, style)
else:
args = (ax, x, y)
return self.plt.Axes.plot(*args, **kwds)
return plotf
def _get_index_name(self):
if isinstance(self.data.index, MultiIndex):
name = self.data.index.names
if any(x is not None for x in name):
name = ','.join([com.pprint_thing(x) for x in name])
else:
name = None
else:
name = self.data.index.name
if name is not None:
name = com.pprint_thing(name)
return name
@classmethod
def _get_ax_layer(cls, ax, primary=True):
"""get left (primary) or right (secondary) axes"""
if primary:
return getattr(ax, 'left_ax', ax)
else:
return getattr(ax, 'right_ax', ax)
def _get_ax(self, i):
# get the twinx ax if appropriate
if self.subplots:
ax = self.axes[i]
ax = self._maybe_right_yaxis(ax, i)
self.axes[i] = ax
else:
ax = self.axes[0]
ax = self._maybe_right_yaxis(ax, i)
ax.get_yaxis().set_visible(True)
return ax
def on_right(self, i):
if isinstance(self.secondary_y, bool):
return self.secondary_y
if isinstance(self.secondary_y, (tuple, list, np.ndarray, Index)):
return self.data.columns[i] in self.secondary_y
def _get_style(self, i, col_name):
style = ''
if self.subplots:
style = 'k'
if self.style is not None:
if isinstance(self.style, list):
try:
style = self.style[i]
except IndexError:
pass
elif isinstance(self.style, dict):
style = self.style.get(col_name, style)
else:
style = self.style
return style or None
def _get_colors(self, num_colors=None, color_kwds='color'):
if num_colors is None:
num_colors = self.nseries
return _get_standard_colors(num_colors=num_colors,
colormap=self.colormap,
color=self.kwds.get(color_kwds))
def _maybe_add_color(self, colors, kwds, style, i):
has_color = 'color' in kwds or self.colormap is not None
if has_color and (style is None or re.match('[a-z]+', style) is None):
kwds['color'] = colors[i % len(colors)]
def _parse_errorbars(self, label, err):
'''
Look for error keyword arguments and return the actual errorbar data
or return the error DataFrame/dict
Error bars can be specified in several ways:
Series: the user provides a pandas.Series object of the same
length as the data
ndarray: provides a np.ndarray of the same length as the data
DataFrame/dict: error values are paired with keys matching the
key in the plotted DataFrame
str: the name of the column within the plotted DataFrame
'''
if err is None:
return None
from pandas import DataFrame, Series
def match_labels(data, e):
e = e.reindex_axis(data.index)
return e
# key-matched DataFrame
if isinstance(err, DataFrame):
err = match_labels(self.data, err)
# key-matched dict
elif isinstance(err, dict):
pass
# Series of error values
elif isinstance(err, Series):
# broadcast error series across data
err = match_labels(self.data, err)
err = np.atleast_2d(err)
err = np.tile(err, (self.nseries, 1))
# errors are a column in the dataframe
elif isinstance(err, string_types):
evalues = self.data[err].values
self.data = self.data[self.data.columns.drop(err)]
err = np.atleast_2d(evalues)
err = np.tile(err, (self.nseries, 1))
elif com.is_list_like(err):
if com.is_iterator(err):
err = np.atleast_2d(list(err))
else:
# raw error values
err = np.atleast_2d(err)
err_shape = err.shape
# asymmetrical error bars
if err.ndim == 3:
if (err_shape[0] != self.nseries) or \
(err_shape[1] != 2) or \
(err_shape[2] != len(self.data)):
msg = "Asymmetrical error bars should be provided " + \
"with the shape (%u, 2, %u)" % \
(self.nseries, len(self.data))
raise ValueError(msg)
# broadcast errors to each data series
if len(err) == 1:
err = np.tile(err, (self.nseries, 1))
elif com.is_number(err):
err = np.tile([err], (self.nseries, len(self.data)))
else:
msg = "No valid %s detected" % label
raise ValueError(msg)
return err
def _get_errorbars(self, label=None, index=None, xerr=True, yerr=True):
from pandas import DataFrame
errors = {}
for kw, flag in zip(['xerr', 'yerr'], [xerr, yerr]):
if flag:
err = self.errors[kw]
# user provided label-matched dataframe of errors
if isinstance(err, (DataFrame, dict)):
if label is not None and label in err.keys():
err = err[label]
else:
err = None
elif index is not None and err is not None:
err = err[index]
if err is not None:
errors[kw] = err
return errors
def _get_axes(self):
return self.axes[0].get_figure().get_axes()
def _get_axes_layout(self):
axes = self._get_axes()
x_set = set()
y_set = set()
for ax in axes:
# check axes coordinates to estimate layout
points = ax.get_position().get_points()
x_set.add(points[0][0])
y_set.add(points[0][1])
return (len(y_set), len(x_set))
class ScatterPlot(MPLPlot):
_layout_type = 'single'
def __init__(self, data, x, y, c=None, **kwargs):
MPLPlot.__init__(self, data, **kwargs)
if x is None or y is None:
raise ValueError( 'scatter requires and x and y column')
if com.is_integer(x) and not self.data.columns.holds_integer():
x = self.data.columns[x]
if com.is_integer(y) and not self.data.columns.holds_integer():
y = self.data.columns[y]
if com.is_integer(c) and not self.data.columns.holds_integer():
c = self.data.columns[c]
self.x = x
self.y = y
self.c = c
@property
def nseries(self):
return 1
def _make_plot(self):
import matplotlib as mpl
mpl_ge_1_3_1 = str(mpl.__version__) >= LooseVersion('1.3.1')
import matplotlib.pyplot as plt
x, y, c, data = self.x, self.y, self.c, self.data
ax = self.axes[0]
c_is_column = com.is_hashable(c) and c in self.data.columns
# plot a colorbar only if a colormap is provided or necessary
cb = self.kwds.pop('colorbar', self.colormap or c_is_column)
# pandas uses colormap, matplotlib uses cmap.
cmap = self.colormap or 'Greys'
cmap = plt.cm.get_cmap(cmap)
if c is None:
c_values = self.plt.rcParams['patch.facecolor']
elif c_is_column:
c_values = self.data[c].values
else:
c_values = c
if self.legend and hasattr(self, 'label'):
label = self.label
else:
label = None
scatter = ax.scatter(data[x].values, data[y].values, c=c_values,
label=label, cmap=cmap, **self.kwds)
if cb:
img = ax.collections[0]
kws = dict(ax=ax)
if mpl_ge_1_3_1:
kws['label'] = c if c_is_column else ''
self.fig.colorbar(img, **kws)
if label is not None:
self._add_legend_handle(scatter, label)
else:
self.legend = False
errors_x = self._get_errorbars(label=x, index=0, yerr=False)
errors_y = self._get_errorbars(label=y, index=0, xerr=False)
if len(errors_x) > 0 or len(errors_y) > 0:
err_kwds = dict(errors_x, **errors_y)
err_kwds['ecolor'] = scatter.get_facecolor()[0]
ax.errorbar(data[x].values, data[y].values, linestyle='none', **err_kwds)
def _post_plot_logic(self):
ax = self.axes[0]
x, y = self.x, self.y
ax.set_ylabel(com.pprint_thing(y))
ax.set_xlabel(com.pprint_thing(x))
class HexBinPlot(MPLPlot):
_layout_type = 'single'
def __init__(self, data, x, y, C=None, **kwargs):
MPLPlot.__init__(self, data, **kwargs)
if x is None or y is None:
raise ValueError('hexbin requires and x and y column')
if com.is_integer(x) and not self.data.columns.holds_integer():
x = self.data.columns[x]
if com.is_integer(y) and not self.data.columns.holds_integer():
y = self.data.columns[y]
if com.is_integer(C) and not self.data.columns.holds_integer():
C = self.data.columns[C]
self.x = x
self.y = y
self.C = C
@property
def nseries(self):
return 1
def _make_plot(self):
import matplotlib.pyplot as plt
x, y, data, C = self.x, self.y, self.data, self.C
ax = self.axes[0]
# pandas uses colormap, matplotlib uses cmap.
cmap = self.colormap or 'BuGn'
cmap = plt.cm.get_cmap(cmap)
cb = self.kwds.pop('colorbar', True)
if C is None:
c_values = None
else:
c_values = data[C].values
ax.hexbin(data[x].values, data[y].values, C=c_values, cmap=cmap,
**self.kwds)
if cb:
img = ax.collections[0]
self.fig.colorbar(img, ax=ax)
def _make_legend(self):
pass
def _post_plot_logic(self):
ax = self.axes[0]
x, y = self.x, self.y
ax.set_ylabel(com.pprint_thing(y))
ax.set_xlabel(com.pprint_thing(x))
class LinePlot(MPLPlot):
_default_rot = 0
orientation = 'vertical'
def __init__(self, data, **kwargs):
MPLPlot.__init__(self, data, **kwargs)
if self.stacked:
self.data = self.data.fillna(value=0)
self.x_compat = plot_params['x_compat']
if 'x_compat' in self.kwds:
self.x_compat = bool(self.kwds.pop('x_compat'))
def _index_freq(self):
freq = getattr(self.data.index, 'freq', None)
if freq is None:
freq = getattr(self.data.index, 'inferred_freq', None)
if freq == 'B':
weekdays = np.unique(self.data.index.dayofweek)
if (5 in weekdays) or (6 in weekdays):
freq = None
return freq
def _is_dynamic_freq(self, freq):
if isinstance(freq, DateOffset):
freq = freq.rule_code
else:
freq = frequencies.get_base_alias(freq)
freq = frequencies.get_period_alias(freq)
return freq is not None and self._no_base(freq)
def _no_base(self, freq):
# hack this for 0.10.1, creating more technical debt...sigh
if isinstance(self.data.index, DatetimeIndex):
base = frequencies.get_freq(freq)
x = self.data.index
if (base <= frequencies.FreqGroup.FR_DAY):
return x[:1].is_normalized
return Period(x[0], freq).to_timestamp(tz=x.tz) == x[0]
return True
def _use_dynamic_x(self):
freq = self._index_freq()
ax = self._get_ax(0)
ax_freq = getattr(ax, 'freq', None)
if freq is None: # convert irregular if axes has freq info
freq = ax_freq
else: # do not use tsplot if irregular was plotted first
if (ax_freq is None) and (len(ax.get_lines()) > 0):
return False
return (freq is not None) and self._is_dynamic_freq(freq)
def _is_ts_plot(self):
# this is slightly deceptive
return not self.x_compat and self.use_index and self._use_dynamic_x()
def _make_plot(self):
self._initialize_prior(len(self.data))
if self._is_ts_plot():
data = self._maybe_convert_index(self.data)
x = data.index # dummy, not used
plotf = self._get_ts_plot_function()
it = self._iter_data(data=data, keep_index=True)
else:
x = self._get_xticks(convert_period=True)
plotf = self._get_plot_function()
it = self._iter_data()
colors = self._get_colors()
for i, (label, y) in enumerate(it):
ax = self._get_ax(i)
style = self._get_style(i, label)
kwds = self.kwds.copy()
self._maybe_add_color(colors, kwds, style, i)
errors = self._get_errorbars(label=label, index=i)
kwds = dict(kwds, **errors)
label = com.pprint_thing(label) # .encode('utf-8')
kwds['label'] = label
newlines = plotf(ax, x, y, style=style, column_num=i, **kwds)
self._add_legend_handle(newlines[0], label, index=i)
lines = _get_all_lines(ax)
left, right = _get_xlim(lines)
ax.set_xlim(left, right)
def _get_stacked_values(self, y, label):
if self.stacked:
if (y >= 0).all():
return self._pos_prior + y
elif (y <= 0).all():
return self._neg_prior + y
else:
raise ValueError('When stacked is True, each column must be either all positive or negative.'
'{0} contains both positive and negative values'.format(label))
else:
return y
def _get_plot_function(self):
f = MPLPlot._get_plot_function(self)
def plotf(ax, x, y, style=None, column_num=None, **kwds):
# column_num is used to get the target column from protf in line and area plots
if column_num == 0:
self._initialize_prior(len(self.data))
y_values = self._get_stacked_values(y, kwds['label'])
lines = f(ax, x, y_values, style=style, **kwds)
self._update_prior(y)
return lines
return plotf
def _get_ts_plot_function(self):
from pandas.tseries.plotting import tsplot
plotf = self._get_plot_function()
def _plot(ax, x, data, style=None, **kwds):
# accept x to be consistent with normal plot func,
# x is not passed to tsplot as it uses data.index as x coordinate
lines = tsplot(data, plotf, ax=ax, style=style, **kwds)
return lines
return _plot
def _initialize_prior(self, n):
self._pos_prior = np.zeros(n)
self._neg_prior = np.zeros(n)
def _update_prior(self, y):
if self.stacked and not self.subplots:
# tsplot resample may changedata length
if len(self._pos_prior) != len(y):
self._initialize_prior(len(y))
if (y >= 0).all():
self._pos_prior += y
elif (y <= 0).all():
self._neg_prior += y
def _maybe_convert_index(self, data):
# tsplot converts automatically, but don't want to convert index
# over and over for DataFrames
if isinstance(data.index, DatetimeIndex):
freq = getattr(data.index, 'freq', None)
if freq is None:
freq = getattr(data.index, 'inferred_freq', None)
if isinstance(freq, DateOffset):
freq = freq.rule_code
if freq is None:
ax = self._get_ax(0)
freq = getattr(ax, 'freq', None)
if freq is None:
raise ValueError('Could not get frequency alias for plotting')
freq = frequencies.get_base_alias(freq)
freq = frequencies.get_period_alias(freq)
data.index = data.index.to_period(freq=freq)
return data
def _post_plot_logic(self):
df = self.data
condition = (not self._use_dynamic_x()
and df.index.is_all_dates
and not self.subplots
or (self.subplots and self.sharex))
index_name = self._get_index_name()
for ax in self.axes:
if condition:
# irregular TS rotated 30 deg. by default
# probably a better place to check / set this.
if not self._rot_set:
self.rot = 30
format_date_labels(ax, rot=self.rot)
if index_name is not None and self.use_index:
ax.set_xlabel(index_name)
class AreaPlot(LinePlot):
def __init__(self, data, **kwargs):
kwargs.setdefault('stacked', True)
data = data.fillna(value=0)
LinePlot.__init__(self, data, **kwargs)
if not self.stacked:
# use smaller alpha to distinguish overlap
self.kwds.setdefault('alpha', 0.5)
def _get_plot_function(self):
if self.logy or self.loglog:
raise ValueError("Log-y scales are not supported in area plot")
else:
f = MPLPlot._get_plot_function(self)
def plotf(ax, x, y, style=None, column_num=None, **kwds):
if column_num == 0:
self._initialize_prior(len(self.data))
y_values = self._get_stacked_values(y, kwds['label'])
lines = f(ax, x, y_values, style=style, **kwds)
# get data from the line to get coordinates for fill_between
xdata, y_values = lines[0].get_data(orig=False)
if (y >= 0).all():
start = self._pos_prior
elif (y <= 0).all():
start = self._neg_prior
else:
start = np.zeros(len(y))
if not 'color' in kwds:
kwds['color'] = lines[0].get_color()
self.plt.Axes.fill_between(ax, xdata, start, y_values, **kwds)
self._update_prior(y)
return lines
return plotf
def _add_legend_handle(self, handle, label, index=None):
from matplotlib.patches import Rectangle
# Because fill_between isn't supported in legend,
# specifically add Rectangle handle here
alpha = self.kwds.get('alpha', None)
handle = Rectangle((0, 0), 1, 1, fc=handle.get_color(), alpha=alpha)
LinePlot._add_legend_handle(self, handle, label, index=index)
def _post_plot_logic(self):
LinePlot._post_plot_logic(self)
if self.ylim is None:
if (self.data >= 0).all().all():
for ax in self.axes:
ax.set_ylim(0, None)
elif (self.data <= 0).all().all():
for ax in self.axes:
ax.set_ylim(None, 0)
class BarPlot(MPLPlot):
_default_rot = {'bar': 90, 'barh': 0}
def __init__(self, data, **kwargs):
self.bar_width = kwargs.pop('width', 0.5)
pos = kwargs.pop('position', 0.5)
kwargs.setdefault('align', 'center')
self.tick_pos = np.arange(len(data))
self.bottom = kwargs.pop('bottom', 0)
self.left = kwargs.pop('left', 0)
self.log = kwargs.pop('log',False)
MPLPlot.__init__(self, data, **kwargs)
if self.stacked or self.subplots:
self.tickoffset = self.bar_width * pos
if kwargs['align'] == 'edge':
self.lim_offset = self.bar_width / 2
else:
self.lim_offset = 0
else:
if kwargs['align'] == 'edge':
w = self.bar_width / self.nseries
self.tickoffset = self.bar_width * (pos - 0.5) + w * 0.5
self.lim_offset = w * 0.5
else:
self.tickoffset = self.bar_width * pos
self.lim_offset = 0
self.ax_pos = self.tick_pos - self.tickoffset
def _args_adjust(self):
if com.is_list_like(self.bottom):
self.bottom = np.array(self.bottom)
if com.is_list_like(self.left):
self.left = np.array(self.left)
def _get_plot_function(self):
if self.kind == 'bar':
def f(ax, x, y, w, start=None, **kwds):
start = start + self.bottom
return ax.bar(x, y, w, bottom=start, log=self.log, **kwds)
elif self.kind == 'barh':
def f(ax, x, y, w, start=None, log=self.log, **kwds):
start = start + self.left
return ax.barh(x, y, w, left=start, log=self.log, **kwds)
else:
raise ValueError("BarPlot kind must be either 'bar' or 'barh'")
return f
def _make_plot(self):
import matplotlib as mpl
colors = self._get_colors()
ncolors = len(colors)
bar_f = self._get_plot_function()
pos_prior = neg_prior = np.zeros(len(self.data))
K = self.nseries
for i, (label, y) in enumerate(self._iter_data(fillna=0)):
ax = self._get_ax(i)
kwds = self.kwds.copy()
kwds['color'] = colors[i % ncolors]
errors = self._get_errorbars(label=label, index=i)
kwds = dict(kwds, **errors)
label = com.pprint_thing(label)
if (('yerr' in kwds) or ('xerr' in kwds)) \
and (kwds.get('ecolor') is None):
kwds['ecolor'] = mpl.rcParams['xtick.color']
start = 0
if self.log and (y >= 1).all():
start = 1
if self.subplots:
w = self.bar_width / 2
rect = bar_f(ax, self.ax_pos + w, y, self.bar_width,
start=start, label=label, **kwds)
ax.set_title(label)
elif self.stacked:
mask = y > 0
start = np.where(mask, pos_prior, neg_prior)
w = self.bar_width / 2
rect = bar_f(ax, self.ax_pos + w, y, self.bar_width,
start=start, label=label, **kwds)
pos_prior = pos_prior + np.where(mask, y, 0)
neg_prior = neg_prior + np.where(mask, 0, y)
else:
w = self.bar_width / K
rect = bar_f(ax, self.ax_pos + (i + 0.5) * w, y, w,
start=start, label=label, **kwds)
self._add_legend_handle(rect, label, index=i)
def _post_plot_logic(self):
for ax in self.axes:
if self.use_index:
str_index = [com.pprint_thing(key) for key in self.data.index]
else:
str_index = [ | com.pprint_thing(key) | pandas.core.common.pprint_thing |
import numpy as np
import pandas as pd
from hics.slice_similarity import continuous_similarity_matrix, categorical_similarity_matrix
from hics.slice_selection import select_by_similarity
class ScoredSlices:
def __init__(self, categorical, continuous, to_keep=5, threshold=None):
self.continuous = {feature: pd.DataFrame(columns=['to_value', 'from_value'])
for feature in continuous}
self.categorical = {feature['name']: pd.DataFrame(columns=feature['values'])
for feature in categorical}
self.scores = pd.Series()
self.to_keep = to_keep
if threshold is None:
self.threshold = ScoredSlices.default_threshold(len(categorical) + len(continuous))
else:
self.threshold = threshold
def add_slices(self, slices):
if isinstance(slices, dict):
self.add_from_dict(slices)
else:
self.add_from_object(slices)
def add_from_object(self, slices):
self.scores = self.scores.append(pd.Series(slices.scores)).sort_values(ascending=False, inplace=False)
for feature, df in slices.continuous.items():
self.continuous[feature] = pd.concat([self.continuous[feature], df], ignore_index=True)
self.continuous[feature] = self.continuous[feature].loc[self.scores.index, :].reset_index(drop=True)
for feature, df in slices.categorical.items():
self.categorical[feature] = pd.concat([self.categorical[feature], df], ignore_index=True)
self.categorical[feature] = self.categorical[feature].loc[self.scores.index, :].reset_index(drop=True)
self.scores.reset_index(drop=True, inplace=True)
def add_from_dict(self, slices):
new_scores = pd.Series(slices['scores'])
self.scores = self.scores.append(new_scores, ignore_index=True).sort_values(ascending=False, inplace=False)
for feature in self.continuous:
content = pd.DataFrame(slices['features'][feature])
self.continuous[feature] = pd.concat([self.continuous[feature], content], ignore_index=True)
self.continuous[feature] = self.continuous[feature].loc[self.scores.index, :].reset_index(drop=True)
for feature in self.categorical:
content = pd.DataFrame(slices['features'][feature], columns=self.categorical[feature].columns)
self.categorical[feature] = | pd.concat([self.categorical[feature], content], ignore_index=True) | pandas.concat |
"""
The aim of this project was to build a classifier on the titanic kaggle dataset.
"""
### import libraries
import numpy as np
import pandas as pd
import matplotlib
matplotlib.use('Pdf')
import matplotlib.pyplot as plt
# import data preprocessing modules
from sklearn.preprocessing import Imputer
from sklearn.preprocessing import LabelEncoder
from sklearn.preprocessing import MinMaxScaler
from sklearn.preprocessing import LabelBinarizer
# import model selection modules
from sklearn.model_selection import train_test_split
from sklearn.model_selection import KFold
from sklearn.model_selection import cross_val_score
# import classifier modules
from sklearn.neighbors import KNeighborsClassifier
from sklearn.tree import DecisionTreeClassifier
from sklearn.ensemble import RandomForestClassifier
from sklearn.naive_bayes import GaussianNB
from sklearn.svm import SVC
import tensorflow as tf
from keras.models import Sequential
from keras.layers import Dense
# import model evaluation metrics modules
from sklearn.metrics import precision_score
from sklearn.metrics import accuracy_score
from sklearn.metrics import f1_score
from sklearn.metrics import recall_score
from sklearn.metrics import confusion_matrix
### load data
train_data = | pd.read_csv("train.csv") | pandas.read_csv |
"""
Description:
-----------
This script hosts many helper functions to make notebooks cleaner. The hope is to not distract users with ugly code.
"""
import numpy as np
import pandas as pd
import matplotlib.patheffects as path_effects
import matplotlib
import matplotlib.pyplot as plt
#outlines for text
pe1 = [path_effects.withStroke(linewidth=2,
foreground="k")]
pe2 = [path_effects.withStroke(linewidth=2,
foreground="w")]
def show_vals(da,ax):
vals = da.values
x = np.arange(0,vals.shape[0])
y = np.arange(0,vals.shape[1])
X,Y = np.meshgrid(x,y)
X = np.ravel(X)
Y = np.ravel(Y)
V = np.ravel(vals)
for i in np.arange(0,len(X)):
fillstr = np.asarray(np.round(V[i],2),dtype=str)
fillstr = np.char.ljust(fillstr,4,'0')
if np.round(V[i],2) > 0.5:
ax.text(X[i]-0.2,Y[i],fillstr,color='k')
else:
ax.text(X[i]-0.2,Y[i],fillstr,color='w')
return
def draw_zoom_window(ax,a,b):
ax.plot([a,a,a+10,a+10,a],[b,b+10,b+10,b,b],'-k',lw=3)
ax.plot([a,a,a+10,a+10,a],[b,b+10,b+10,b,b],'-',color='dodgerblue',lw=2)
return a,b
def get_right_units_vil(vil):
"""they scaled VIL weird, so this unscales it"""
tmp = np.zeros(vil.shape)
idx = np.where(vil <=5)
tmp[idx] = 0
idx = np.where((vil>5)*(vil <= 18))
tmp[idx] = (vil[idx] -2)/90.66
idx = np.where(vil>18)
tmp[idx] = np.exp((vil[idx] - 83.9)/38.9)
return tmp
def plot_feature_loc(da,ax,q = [0,1,10,25,50,75,90,99,100]):
""" This will plot representative pixels matching the quantiles given """
vals = np.nanpercentile(da,q)
xs = []
ys = []
for v in vals:
local_idx = np.where(np.round(da.values,1) == np.round(v,1))
if len(local_idx[0]) > 1:
ii = np.random.choice(np.arange(0,len(local_idx[0])),size=1)
xs.append(local_idx[0][ii[0]])
ys.append(local_idx[1][ii[0]])
else:
ii = 0
xs.append(local_idx[0][ii])
ys.append(local_idx[1][ii])
markerlist = ['min','$01$','$10$','$25$','$50$','$75$','$90$','$99$','max']
zlist = list(zip(xs,ys))
for i,(x,y) in enumerate(zlist):
ax.text(y,x,markerlist[i],path_effects=pe2)
return
def adjust_keys(df,keyadd,dask=False,dropevent=False):
if dask:
keys = df.columns
newkeys = []
newkeys.append('dtime')
newkeys = newkeys + list(keys[1:-1]+keyadd)
newkeys.append(keys[-1])
else:
keys = df.keys()
newkeys = list(keys[:-1]+keyadd)
newkeys.append(keys[-1])
df.columns = newkeys
if dropevent:
df = df.drop(columns='event')
if dask:
df['dtime'] = df['dtime'].astype(np.datetime64)
return df
def clear_nan(X,y):
tmp = np.hstack([X,y.reshape([y.shape[0],1])])
df_tmp = | pd.DataFrame(tmp) | pandas.DataFrame |
"""
Auxiliary module for storage and easy calculation of statistics and metrics
that are provided by data generated by other modules, namely `caupo.cluster_tags`
"""
import argparse
from pathlib import Path
from typing import List
import numpy as np
import pandas as pd
import ludovico
from caupo.embeddings import get_embedder_function_short_names
VALID_FREQUENCIES = [
'daily',
'weekly',
'monthly',
]
def calculate_valid_entries(frequency: str, data: pd.DataFrame) -> pd.DataFrame:
"""Given raw result data, finds amount of valid entries"""
assert frequency in VALID_FREQUENCIES, "Unknown frequency value"
data = data.loc[data["frequency"] == frequency]
data["valid_entries"] = data["sil_score"].apply(lambda x: "NaN" if str(x) == "None" else x).astype("float32")
data.dropna()
grouped_data = data[["algorithm", "embedder", "valid_entries"]].groupby(["algorithm", "embedder"])
return grouped_data.count()
def calculate_average_n_clusters(frequency: str, data: pd.DataFrame) -> pd.DataFrame:
"""Given raw result data, finds average n_clusters data for a given frequency"""
assert frequency in VALID_FREQUENCIES, "Unknown frequency value"
data = data.loc[data["frequency"] == frequency]
data = data.loc[data["n_clusters"] != "None"]
data = data.loc[data["sil_score"] != "None"]
data["n_clusters"] = data["n_clusters"].astype("int")
grouped_data = data[["algorithm", "embedder", "n_clusters"]].groupby(["algorithm", "embedder"])
return grouped_data.mean().sort_values(by=["n_clusters"], ascending=False)
def calculate_average_noise_percentage(frequency: str, data: pd.DataFrame) -> pd.DataFrame:
"""Given raw result data, finds average noise_percentage data for a given frequency"""
assert frequency in VALID_FREQUENCIES, "Unknown frequency value"
data = data.loc[data["frequency"] == frequency]
data = data.loc[data["noise_percentage"] != "None"]
data = data.loc[data["sil_score"] != "None"]
data["noise_percentage"] = data["noise_percentage"].astype("float32") * 100.0
grouped_data = data[["algorithm", "embedder", "noise_percentage"]].groupby(["algorithm", "embedder"])
return grouped_data.mean().sort_values(by=["noise_percentage"], ascending=False)
def calculate_average_avg_cluster_size(frequency: str, data: pd.DataFrame) -> pd.DataFrame:
"""Given raw result data, finds average avg_cluster_size data for a given frequency"""
assert frequency in VALID_FREQUENCIES, "Unknown frequency value"
data = data.loc[data["frequency"] == frequency]
data = data.loc[data["avg_cluster_size"] != "None"]
data = data.loc[data["sil_score"] != "None"]
data["avg_cluster_size"] = data["avg_cluster_size"].astype("float32")
grouped_data = data[["algorithm", "embedder", "avg_cluster_size"]].groupby(["algorithm", "embedder"])
return grouped_data.mean().sort_values(by=["avg_cluster_size"], ascending=False)
def calculate_average_silhouette(frequency: str, data: pd.DataFrame) -> pd.DataFrame:
"""Given raw result data, finds average silhouette data for a given frequency"""
assert frequency in VALID_FREQUENCIES, "Unknown frequency value"
data = data.loc[data["frequency"] == frequency]
data["sil_score"] = data["sil_score"].apply(lambda x: "NaN" if str(x) == "None" else x).astype("float32")
grouped_data = data[["algorithm", "embedder", "sil_score"]].groupby(["algorithm", "embedder"])
return grouped_data.mean().sort_values(by=["sil_score"], ascending=False)
def calculate_average_davies_bouldin(frequency: str, data: pd.DataFrame) -> pd.DataFrame:
"""Given raw result data, finds average davies bouldin data for a given frequency"""
assert frequency in VALID_FREQUENCIES, "Unknown frequency value"
data = data.loc[data["frequency"] == frequency]
data["db_score"] = data["db_score"].apply(lambda x: "NaN" if str(x) == "None" else x).astype("float32")
grouped_data = data[["algorithm", "embedder", "db_score"]].groupby(["algorithm", "embedder"])
return grouped_data.mean().sort_values(by=["db_score"], ascending=False)
def calculate_average_calinski_harabasz(frequency: str, data: pd.DataFrame) -> pd.DataFrame:
"""Given raw result data, finds average davies bouldin data for a given frequency"""
assert frequency in VALID_FREQUENCIES, "Unknown frequency value"
data = data.loc[data["frequency"] == frequency]
data["ch_score"] = data["ch_score"].apply(lambda x: "NaN" if str(x) == "None" else x).astype("float32")
grouped_data = data[["algorithm", "embedder", "ch_score"]].groupby(["algorithm", "embedder"])
return grouped_data.mean().sort_values(by=["ch_score"], ascending=False)
def calculate_consolidated_data(frequency: str, data: pd.DataFrame) -> pd.DataFrame:
"""Given raw result data, calculates a consolidated dataframe"""
assert frequency in VALID_FREQUENCIES, "Unknown frequency value"
avg_silhouette_scores = calculate_average_silhouette(frequency, data.copy())
valid_entries = calculate_valid_entries(frequency, data.copy())
consolidated = pd.concat([avg_silhouette_scores, valid_entries], axis=1)
max_entries_value = np.max(consolidated['valid_entries'].tolist())
consolidated["weighted_score"] = (consolidated["sil_score"] * consolidated["valid_entries"]) / max_entries_value
return consolidated.sort_values(by=["weighted_score", "sil_score"], ascending=False)
def consolidate_three_averages(frequency: str, data: pd.DataFrame) -> pd.DataFrame:
"""Given raw result data, consolidates the average of the three measurements"""
assert frequency in VALID_FREQUENCIES, "Unknown frequency value"
avg_silhouette_scores = calculate_average_silhouette(frequency, data.copy())
avg_davies_bouldin = calculate_average_davies_bouldin(frequency, data.copy())
avg_calinski_harabasz = calculate_average_calinski_harabasz(frequency, data.copy())
consolidated = pd.concat(
[avg_silhouette_scores, avg_davies_bouldin, avg_calinski_harabasz],
axis=1
).round(3).reset_index().rename(
columns={
'embedder': 'Modelo',
'algorithm': 'Algoritmo',
'sil_score': 'Silueta',
'db_score': 'Davies-Bouldin',
'ch_score': 'Calinski-Harabasz',
}
)
short_names = get_embedder_function_short_names()
consolidated["Modelo"] = [
short_names[modelo]
for modelo in consolidated["Modelo"].tolist()
]
return consolidated.sort_values(by=["Modelo", "Algoritmo"])
def consolidate_three_weighted_averages(frequency: str, data: pd.DataFrame) -> pd.DataFrame:
"""
Given raw result data, consolidates the weighted average of the three measurements
according to the valid entries they did
"""
assert frequency in VALID_FREQUENCIES, "Unknown frequency value"
avg_silhouette_scores = calculate_average_silhouette(frequency, data.copy())
avg_davies_bouldin = calculate_average_davies_bouldin(frequency, data.copy())
avg_calinski_harabasz = calculate_average_calinski_harabasz(frequency, data.copy())
valid_entries = calculate_valid_entries(frequency, data.copy())
consolidated = pd.concat(
[avg_silhouette_scores, avg_davies_bouldin, avg_calinski_harabasz, valid_entries],
axis=1
).reset_index()
max_entries_value = np.max(consolidated['valid_entries'].tolist())
consolidated["sil_score"] = (consolidated["sil_score"] * consolidated["valid_entries"]) / max_entries_value
consolidated["db_score"] = (consolidated["db_score"] * consolidated["valid_entries"]) / max_entries_value
consolidated["ch_score"] = (consolidated["ch_score"] * consolidated["valid_entries"]) / max_entries_value
consolidated = consolidated.rename(
columns={
'embedder': 'Modelo',
'algorithm': 'Algoritmo',
'sil_score': 'Silueta',
'db_score': 'Davies-Bouldin',
'ch_score': 'Calinski-Harabasz',
'valid_entries': 'Resultados válidos',
}
)
short_names = get_embedder_function_short_names()
consolidated["Modelo"] = [
short_names[modelo]
for modelo in consolidated["Modelo"].tolist()
]
return consolidated.round(3).sort_values(by=["Modelo", "Algoritmo"])
def consolidate_cluster_nature_values(frequency: str, data: pd.DataFrame) -> pd.DataFrame:
"""
Given raw result data, consolidates the weighted average of the three measurements
according to the valid entries they did
"""
assert frequency in VALID_FREQUENCIES, "Unknown frequency value"
avg_n_clusters = calculate_average_n_clusters(frequency, data.copy())
avg_noise_percentage = calculate_average_noise_percentage(frequency, data.copy())
avg_cluster_size = calculate_average_avg_cluster_size(frequency, data.copy())
consolidated = pd.concat(
[avg_n_clusters, avg_noise_percentage, avg_cluster_size],
axis=1
).reset_index()
consolidated = consolidated.rename(
columns={
'embedder': 'Modelo',
'algorithm': 'Algoritmo',
'n_clusters': 'Cantidad de clústers',
'noise_percentage': 'Ruido (%)',
'avg_cluster_size': 'Tamaño de clústers',
}
)
short_names = get_embedder_function_short_names()
consolidated["Modelo"] = [
short_names[modelo]
for modelo in consolidated["Modelo"].tolist()
]
consolidated = consolidated.round(3)
# workaround
for alg, mod in [
("Mean-Shift", "BERT: paraphrase",),
("Mean-Shift", "BERT: distiluse",),
]:
combination_does_not_exist = (
consolidated
.loc[consolidated["Modelo"] == mod]
.loc[consolidated["Algoritmo"] == alg]
.shape[0]) == 0
if combination_does_not_exist:
print(f"Filling {mod}-{alg}-{frequency} on third table")
consolidated = consolidated.append({
'Modelo': mod,
'Algoritmo': alg,
'Cantidad de clústers': float('nan'),
'Ruido (%)': float('nan'),
'Tamaño de clústers': float('nan'),
}, ignore_index=True)
return consolidated.sort_values(by=["Modelo", "Algoritmo"]).round(3)
def read_csv(file_path: Path) -> pd.DataFrame:
"""Given a path to a file, reads the file and returns a dataframe"""
return pd.read_csv(file_path)
def main() -> None:
"""Read input arguments and calculates and returns results"""
parser = argparse.ArgumentParser()
parser.add_argument("--frequency", metavar="FREQUENCY", type=str, default="daily",
choices=VALID_FREQUENCIES)
args = parser.parse_args()
print(f"Received frequency `{args.frequency}`")
file_path = Path(f"outputs/cluster_tags/{args.frequency}/results.csv")
output_file_path = Path(f"outputs/cluster_tags/{args.frequency}/aggregated_results.csv")
output_table_file_path = Path(f"outputs/cluster_tags/{args.frequency}/tables.txt")
assert file_path.exists(), f"The file {file_path} does not exist"
print("Reading data")
data = read_csv(file_path)
print("Eliminating DBSCAN from data")
data = data[data["algorithm"] != "DBSCAN"]
print("Eliminating 50 dimension models from data")
data = data[~data["embedder"].str.contains("50")]
frequency_name = "diaria" if args.frequency == 'daily' else 'semanal'
# Get average of silhouette score
consolidated_data = calculate_consolidated_data(args.frequency, data.copy())
with pd.option_context('display.max_rows', None, 'display.max_columns', None):
print("Avg. Silhouette Score & valid entries for each algorithm and embedding, over all entries")
print(consolidated_data)
consolidated_data.to_csv(output_file_path)
table_list: List[str] = []
# Get consolidated table with three measurements
consolidated_three_averages_data = consolidate_three_averages(args.frequency, data.copy())
with pd.option_context('display.max_rows', None, 'display.max_columns', None):
print("Avg metrics for each algorithm and embedding, over all entries")
print(consolidated_three_averages_data)
print(f"Printing TeX table for Three averages with frequency={args.frequency}")
table_three_averages = ludovico.generate_comparison_for_two_columns(
consolidated_three_averages_data,
"Modelo",
"Algoritmo",
["Silueta", "Davies-Bouldin"],
add_hlines=True,
data_highlight={
'Silueta': 'max',
'Davies-Bouldin': 'min',
},
table_width=1,
table_label=f"tabla_tres_metricas_{frequency_name}",
table_name=(
"Promedio de métricas de validación interna según configuración "
f"experimental con frecuencia {frequency_name}"
),
table_long_name=(
"Promedio de métricas de validación interna (coeficiente de silueta, "
"coeficiente de Davies-Bouldin y coeficiente de Calinski-Harabasz) según "
f"algoritmo y modelo utilizados con frecuencia {frequency_name}."
)
)
table_list.append(table_three_averages)
print(table_three_averages)
# Get consolidated table with three weighted measurements
consolidated_three_weighted_averages_data = consolidate_three_weighted_averages(args.frequency, data.copy())
with pd.option_context('display.max_rows', None, 'display.max_columns', None):
print("Weighted avg metrics for each algorithm and embedding, over all entries")
print(consolidated_three_weighted_averages_data)
print(f"Printing TeX table for Three weighted averages with frequency={args.frequency}")
table_three_weighted_averages = ludovico.generate_comparison_for_two_columns(
consolidated_three_weighted_averages_data,
"Modelo",
"Algoritmo",
["Silueta", "Davies-Bouldin", "Resultados válidos"],
add_hlines=True,
data_highlight={
'Silueta': 'max',
'Davies-Bouldin': 'min',
'Resultados válidos': 'max',
},
table_width=1,
table_label=f"tabla_tres_metricas_ponderadas_{frequency_name}",
table_name=(
"Promedio ponderado de métricas de validación interna por resultados válidos "
f"según configuración experimental con frecuencia {frequency_name}"
),
table_long_name=(
"Promedio ponderado de métricas de validación interna (coeficiente de silueta, "
"coeficiente de Davies-Bouldin y coeficiente de Calinski-Harabasz) por resultados válidos"
f" según algoritmo y modelo utilizados con frecuencia {frequency_name}."
)
)
table_list.append(table_three_weighted_averages)
print(table_three_weighted_averages)
# Get consolidated table with cluster nature measurements
consolidated_cluster_data = consolidate_cluster_nature_values(args.frequency, data.copy())
with pd.option_context('display.max_rows', None, 'display.max_columns', None):
print("Cluster nature metrics for each algorithm and embedding, over all entries")
print(consolidated_cluster_data)
print(f"Printing TeX table for Cluster nature metrics with frequency={args.frequency}")
table_cluster_data = ludovico.generate_comparison_for_two_columns(
consolidated_cluster_data,
"Modelo",
"Algoritmo",
["Cantidad de clústers", "Tamaño de clústers", "Ruido (%)",],
add_hlines=True,
table_width=1,
table_label=f"tabla_nat_clusters_{frequency_name}",
table_name=(
"Promedios de cantidad de clústers, tamaño de clústers, y porcentaje de ruido "
f"según configuración experimental con frecuencia {frequency_name}"
),
table_long_name=(
"Promedios de cantidad de clústers, tamaño de clústers, y porcentaje de ruido"
f" según algoritmo y modelo utilizados con frecuencia {frequency_name}."
)
)
table_list.append(table_cluster_data)
print(table_cluster_data)
# Table for specific case: april 28 2021
data_april_28 = data[data["tag"] == "2021-04-28"]
consolidated_three_averages_data_april_28 = consolidate_three_averages(args.frequency, data_april_28.copy())
with | pd.option_context('display.max_rows', None, 'display.max_columns', None) | pandas.option_context |
#!/usr/bin/env python
# coding: utf-8
# Create an image of confusion matrix from prediction results
#
# Usage:
# $ ./confusion_matrix.py <config.yml> <labels.txt> <output filename> <predict_results.txt>
# $ ./confusion_matrix.py /config/config.yml logs/$MODEL-PREFIX-lables.txt logs/confusion_matrix.png logs/predict_results.txt
#
# References
# http://hayataka2049.hatenablog.jp/entry/2016/12/15/222339
# http://qiita.com/hik0107/items/67ad4cfbc9e84032fc6b
# http://minus9d.hatenablog.com/entry/2015/07/16/231608
#
import sys
import yaml
import matplotlib
matplotlib.use('agg')
import matplotlib.pyplot as plt
import pandas as pd
import seaborn as sns
import unicodedata
from pandas_ml import ConfusionMatrix
from sklearn.metrics import confusion_matrix
def is_japanese(string):
for ch in string:
name = unicodedata.name(ch)
if "CJK UNIFIED" in name \
or "HIRAGANA" in name \
or "KATAKANA" in name:
return True
return False
try:
reload(sys)
sys.setdefaultencoding('utf-8')
except NameError:
pass
config_file = sys.argv[1]
labels_file = sys.argv[2]
output_file = sys.argv[3]
result_file = sys.argv[4]
with open(config_file) as rf:
config = yaml.safe_load(rf)
with open(labels_file) as sf:
labels = [l.split(' ')[-1].strip() for l in sf.readlines()]
try:
cm_fontsize = config['test'].get('confusion_matrix_fontsize', 12)
cm_figsize = config['test'].get('confusion_matrix_figsize', 'auto')
if cm_figsize == 'auto':
num_class = len(labels)
if 0 < num_class <= 10:
cm_figsize = '8,6'
elif 10 < num_class <= 30:
cm_figsize = '12,9'
else:
cm_figsize = '16,12'
cm_figsize = tuple(float(i) for i in cm_figsize.split(','))
except AttributeError:
print('Error: Missing test and/or data section at config.yml')
sys.exit(1)
with open(result_file) as rf:
lines = rf.readlines()
model_prefix = lines[0][14:].strip()
model_epoch = int(lines[1][13:].split(',')[0].strip())
target_data = lines[2]
results = [(l.split(' ')[0], l.split(' ')[1], l.split(' ')[2]) for l in lines[3:]]
y_true = [labels[int(i[1])] for i in results]
y_pred = [labels[int(i[2])] for i in results]
if is_japanese(''.join(labels)):
matplotlib.rcParams['font.family'] = 'IPAexGothic'
sns.set(font=['IPAexGothic'])
else:
sns.set()
fig = plt.figure(figsize = cm_figsize)
plt.rcParams["font.size"] = cm_fontsize
cmx_data = confusion_matrix(y_true, y_pred, labels=labels)
df_cmx = | pd.DataFrame(cmx_data, index=labels, columns=labels) | pandas.DataFrame |
'''
展示权重的重要性得分
'''
import os
import matplotlib.pyplot as plt
import pandas as pd
from pyecharts import options as opts
from pyecharts.charts import Timeline, Bar, HeatMap, Line, Page
from pyecharts.faker import Faker
from pyecharts.globals import ThemeType
import numpy as np
PROJECT_PATH = os.path.abspath(os.path.dirname(__file__))
DATA_PATH = os.path.join(PROJECT_PATH, '../data/fishBehavior')
plt.rc('font', family='Times New Roman')
fontsize = 12.5
ANGLE_NAME = ['Angle_0.0', 'Angle_20.0', 'Angle_40.0', 'Angle_60.0', 'Angle_80.0', 'Angle_100.0', 'Angle_120.0',
'Angle_140.0', 'Angle_160.0']
ACC_NAME = ['AccSpeed_0.0','AccSpeed_2.0','AccSpeed_4.0','AccSpeed_6.0','AccSpeed_8.0']
def format_data(data: pd.DataFrame, time_list: list, name_list: list) -> dict:
data = data.T.to_dict()
fdata = {}
for t_id, vdata in data.items():
fdata[time_list[t_id]] = [v for region, v in vdata.items()]
for min_t in time_list:
temp = fdata[min_t]
for i in range(len(temp)):
fdata[min_t][i] = {"name": name_list[i], "value": temp[i]}
return fdata
#####################################################################################
# 2002 - 2011 年的数据
def get_year_overlap_chart(total_data, time_mim: int) -> Bar:
bar = (
Bar()
.add_xaxis(xaxis_data=name_list)
)
bar.add_yaxis(
series_name="velocity",
y_axis=total_data["velocity"][time_mim],
is_selected=True,
label_opts=opts.LabelOpts(is_show=False),
stack=f'stack1'
)
bar.add_yaxis(
series_name="distance",
y_axis=total_data["distance"][time_mim],
is_selected=True,
label_opts=opts.LabelOpts(is_show=False),
stack=f'stack1'
)
bar.add_yaxis(
series_name="velocity",
y_axis=total_data["velocity"][time_mim],
is_selected=True,
label_opts=opts.LabelOpts(is_show=False),
stack=f'stack2'
)
bar.add_yaxis(
series_name="distance",
y_axis=total_data["distance"][time_mim],
is_selected=True,
label_opts=opts.LabelOpts(is_show=False),
stack=f'stack2'
)
# print(total_data["bottom_time"][time_mim])
# print(Faker.values())
# exit(33)
# bar.add_yaxis("moving time", [31, 58, 80, 26], stack="stack1", category_gap="50%")
# bar.add_yaxis("static time", [31, 58, 80, 26], stack="stack1", category_gap="50%")
bar.set_global_opts(
title_opts=opts.TitleOpts(
title="{}分钟后,斑马鱼运动指标".format(time_mim)
),
datazoom_opts=opts.DataZoomOpts(),
tooltip_opts=opts.TooltipOpts(
is_show=True, trigger="axis", axis_pointer_type="shadow"
),
)
return bar
def getLine(v_data, name):
l = (
Line()
.add_xaxis(xaxis_data=[str(_) for _ in time_list])
.add_yaxis(
series_name="1_1",
y_axis=v_data['1_1'],
label_opts=opts.LabelOpts(is_show=False),
)
.add_yaxis(
series_name="2_CK",
y_axis=v_data['2_CK'],
label_opts=opts.LabelOpts(is_show=False),
)
.add_yaxis(
series_name="3_1",
y_axis=v_data['3_1'],
label_opts=opts.LabelOpts(is_show=False),
)
.add_yaxis(
series_name="4_1",
y_axis=v_data['4_1'],
label_opts=opts.LabelOpts(is_show=False),
)
.set_series_opts(
areastyle_opts=opts.AreaStyleOpts(opacity=0.5),
label_opts=opts.LabelOpts(is_show=False),
)
.set_global_opts(
title_opts=opts.TitleOpts(title=name),
tooltip_opts=opts.TooltipOpts(trigger="axis"),
datazoom_opts=opts.DataZoomOpts(),
yaxis_opts=opts.AxisOpts(
type_="value",
axistick_opts=opts.AxisTickOpts(is_show=True),
splitline_opts=opts.SplitLineOpts(is_show=True),
),
xaxis_opts=opts.AxisOpts(type_="category", boundary_gap=False),
)
)
return l
def getStackBar(top_data, bottom_time, name1, name2, name):
def format(t):
region = {}
for i in name_list:
td = t[i].values
list1 = []
for v in td:
list1.append({
"value": v,
"percent": v,
})
region[i] = list1
return region
td = format(top_data)
bd = format(bottom_time)
c = (
Bar(init_opts=opts.InitOpts(theme=ThemeType.LIGHT))
.add_xaxis(["Time " + str(_) + ":" + "/".join(name_list) for _ in time_list])
)
for idx, i in enumerate(name_list):
c.add_yaxis(name1, td[i], stack=f'stack{idx}')
c.add_yaxis(name2, bd[i], stack=f'stack{idx}')
c.set_series_opts(
label_opts=opts.LabelOpts(is_show=False)
)
c.set_global_opts(
xaxis_opts=opts.AxisOpts(axislabel_opts=opts.LabelOpts(rotate=-15)),
datazoom_opts=opts.DataZoomOpts(),
title_opts=opts.TitleOpts(title=name)
)
return c
def getHeatMap(data, time_list, name):
def formatHeatmapData(rdata):
heat_data = []
rdata = np.around(rdata, decimals=3)
for t in range(rdata.shape[0]):
for a in range(rdata.shape[1]):
heat_data.append([t, a, rdata[t][a]])
return heat_data
c = (
HeatMap()
)
c.add_xaxis(time_list)
for region_name, v in data.items():
heat_data = formatHeatmapData(data[region_name].values)
if 'Acceleration' in name:
c.add_yaxis(
region_name,
ACC_NAME,
heat_data,
label_opts=opts.LabelOpts(is_show=True, position="inside"),
)
elif 'Angle' in name:
c.add_yaxis(
region_name,
ANGLE_NAME,
heat_data,
label_opts=opts.LabelOpts(is_show=True, position="inside"),
)
c.set_global_opts(
title_opts=opts.TitleOpts(title=name),
datazoom_opts=opts.DataZoomOpts(),
visualmap_opts=opts.VisualMapOpts(min_=0, max_=1),
)
return c
if __name__ == '__main__':
import argparse
import pandas as pd
ap = argparse.ArgumentParser()
ap.add_argument("-tid", "--t_ID", default="D01")
ap.add_argument("-lid", "--l_ID", default="D02")
ap.add_argument("-rid", "--r_ID", default="D04")
ap.add_argument("-iP", "--indicatorPath", default="E:\\data\\3D_pre/exp_pre/indicators/")
ap.add_argument("-o", "--outputPath", default="E:\\data\\3D_pre/exp_pre/results/")
args = vars(ap.parse_args())
outputPath = args["outputPath"]
if not os.path.exists(outputPath):
os.mkdir(outputPath)
files = os.listdir(args["indicatorPath"])
all_nos = []
for ifile in files:
no = ifile.split("_")[0]
start_no, end_no = no.split("-")
str_start_no = start_no.zfill(4)
str_end_no = end_no.zfill(4)
if (str_start_no, str_end_no) in all_nos:
continue
else:
all_nos.append((str_start_no, str_end_no))
all_nos.sort()
time_list = [_ for _ in range(0, int(all_nos[-1][1]))]
total_data = {}
name_list = [
"1_1",
"2_CK",
"3_1",
"4_1"
]
v_data = pd.DataFrame()
d_data = pd.DataFrame()
top_data = pd.DataFrame()
bottom_time = pd.DataFrame()
stop_time = pd.DataFrame()
moving_time = pd.DataFrame()
angle_data = {region_name: None for region_name in name_list}
acc_data = {region_name: None for region_name in name_list}
for ino in all_nos:
no_v_data = pd.DataFrame()
no_d_data = pd.DataFrame()
no_top_data = pd.DataFrame()
no_bottom_time = pd.DataFrame()
no_stop_time = pd.DataFrame()
no_moving_time = pd.DataFrame()
for RegionName in name_list:
indicator_file = os.path.join(args["indicatorPath"], str(int(ino[0]))+"-"+str(int(ino[1]))+ "_" + RegionName)
print(indicator_file)
data = pd.read_csv(indicator_file)
# velocity and distance
no_v_data = pd.concat([no_v_data, data[['velocity']]], axis=1)
no_d_data = pd.concat([no_d_data, data[['distance']]], axis=1)
no_top_data = pd.concat([no_top_data, data[['top_time']]], axis=1)
no_bottom_time = pd.concat([no_bottom_time, data[['bottom_time']]], axis=1)
no_stop_time = pd.concat([no_stop_time, data[['stop_time']]], axis=1)
v_data = pd.concat([v_data, no_v_data], axis=0)
d_data = pd.concat([d_data, no_d_data], axis=0)
top_data = | pd.concat([top_data, no_top_data], axis=0) | pandas.concat |
# ex01.py
#
# Functions and data useful in exercise 1 (bacterial GC content, etc.) of
# the BS32010 course at the University of Dundee
from Bio import SeqIO # For working with sequence data
from Bio.Graphics.ColorSpiral import get_color_dict # For defining colours
import matplotlib.pyplot as plt # For creating graphics
import pandas as pd # For working with dataframes
import os # For working with local files
bact_datadir = "genome_data/gc_content"
bact_files = {"Mycoplasma genitalium": ("NC_018495.fna",
"NC_018496.fna",
"NC_018497.fna",
"NC_018498.fna"),
"Mycoplasma pneumoniae": ("NC_000912.fna",
"NC_016807.fna",
"NC_017504.fna",
"NC_020076.fna"),
"Nostoc punctiforme": ("NC_010628.fna",),
"Escherichia coli": ("NC_000913.fna",
"NC_002695.fna",
"NC_004431.fna",
"NC_010468.fna"),
"Mycobacterium tuberculosis": ("NC_016934.fna",
"NC_017523.fna",
"NC_022350.fna",
"NC_000962.fna")}
bacteria = bact_files.keys()
unknown = pd.DataFrame([dict(species="Unknown", length=4391174,
GC=0.656209, color=(1, 0.2, 0.2)), ])
def calc_size_gc(*names):
""" When passed names corresponding to the bacteria
listed in bact_files, returns a Pandas dataframe
representing sequence length and GC content for
each chromosome.
"""
# Use a Pandas DataFrame to hold data. Dataframes are
# useful objects/concepts, and support a number of
# operations that we will exploit later.
df = | pd.DataFrame(columns=['species', 'length', 'GC', 'color']) | pandas.DataFrame |
import matplotlib.pyplot as plt
import pandas as pd
import numpy as np
from scipy.stats import mannwhitneyu
from EvaluationFunctions.LoadEvaluationWhereFileNames import load_evaluation_where_filenames
from EvaluationFunctions.LoadResultsSAW_WHERE import load_results_SAW_where
from Evaluation.Plot_DistributionDimensionWhere import plot_distribution_dimension_where
from Evaluation.Plot_pvalueOverTime import plot_pvalue_over_time
from Evaluation.Plot_DriftScorePerDimension import plot_drift_score_per_dimension
# 0. Read in file names of experiment
experiments = ['1Dim_Broken_NAE-IAW', '10Dim_Broken_NAE-IAW', '50Dim_Broken_NAE-IAW', '100Dim_Broken_NAE-IAW']
# '1Dim_Broken_RAE-IAW', '10Dim_Broken_RAE-IAW', '50Dim_Broken_RAE-IAW', '100Dim_Broken_RAE-IAW']
# experiments = ['50Dim_BigBatchTraining_Broken_NAE-IAW', '50Dim_LongAETraining_Broken_NAE-IAW', '50Dim_NewDesign_Broken_NAE-IAW']
accuracies = []
for experiment in experiments:
accuracies_experiment = {}
where_file_names, drift_dims = load_evaluation_where_filenames(experiment=experiment)
result_folder = "SAW_Autoencoder_ADWIN_Training"
file_names = ["FILE_NAME_re_all", "FILE_NAME_re_old_pattern"]
experiment_names = ["new pattern", "old pattern"]
# 1. Read in Files
for experiment_idx in range(len(file_names)):
# 1.1 Prepare both errors per dimension
# Load results
errors_per_dim, errors_per_dim_after_drift = load_results_SAW_where(
where_file_names=where_file_names, file_names=file_names, result_folder=result_folder)
# Take only values of the patterns before the detected drift point
instances_before_detected_drift = len(errors_per_dim) - len(errors_per_dim_after_drift)
errors_per_dim_before_drift = errors_per_dim[(instances_before_detected_drift - 200)
:- len(errors_per_dim_after_drift)]
# Take only values within 200 instances
errors_per_dim_after_drift = errors_per_dim_after_drift[:200]
# 1.2 Fill array and apply statistical test incrementally
# Iterate through instances
p_time_points = []
drift_scores_time_points = []
df_p_value = pd.DataFrame()
# Start with at least two instances for statistical test
for time_idx in range(1, errors_per_dim_after_drift.shape[0], 1):
# Incremental
incremental_errors_per_dim_after_drift = errors_per_dim_after_drift[:time_idx+1]
# Iterate through dimensions
p_dimensions = []
drift_scores_dimensions = []
for dim in range(errors_per_dim.shape[1]):
sample_error_per_dim_before_drift = errors_per_dim_before_drift[:, dim]
sample_error_per_dim_after_drift = incremental_errors_per_dim_after_drift[:, dim]
# Calculate Mann-Whitney U Test statistic
U1, p = mannwhitneyu(sample_error_per_dim_before_drift, sample_error_per_dim_after_drift)
# Calculate Drift Score
drift_score = abs(np.median(sample_error_per_dim_before_drift) - np.median(sample_error_per_dim_after_drift))
p_dimensions.append(p)
drift_scores_dimensions.append(drift_score)
p_time_points.append(p_dimensions)
drift_scores_time_points.append(drift_scores_dimensions)
# Create column indicating drift dimensions
values_drift_dim = ['Non-drift dimension'] * 100
values_drift_dim[drift_dims] = ['Drift dimension'] * len(range(*drift_dims.indices(100000)))
# Create column indication time
values_time = [time_idx+1] * 100
if (time_idx+1) % 10 == 0 or time_idx == 1:
# Create data frame
df_p_value_time_point = | pd.DataFrame(p_dimensions, columns=['p-value']) | pandas.DataFrame |
import sys
import pandas as pd
import numpy as np
from scipy import stats
from itertools import compress
import statsmodels.stats.multitest as smt
import scikits.bootstrap as bootstrap
from sklearn.decomposition import PCA
from .scaler import scaler
from .imputeData import imputeData
class statistics:
usage = """Generate a table of parametric or non-parametric statistics and merges them with the Peak Table (node table).
Initial_Parameters
----------
peaktable : Pandas dataframe containing peak data. Must contain 'Name' and 'Label'.
datatable : Pandas dataframe matrix containing values for statistical analysis
Methods
-------
set_params : Set parameters -
parametric: Perform parametric statistical analysis, assuming the data is normally distributed (default: True)
log_data: Perform a log ('natural', base 2 or base 10) on all data prior to statistical analysis (default: (False, 2))
scale_data: Scale the data ('standard' (centers to the mean and scales to unit variance), 'minmax' (scales between 0 and 1), 'maxabs' (scales to the absolute maximum value), 'robust' (centers to the median and scales to between 25th and 75th quantile range) (default: (True, 'standard'))
impute_data: Impute any missing values using KNN impute with a set number of nearest neighbours (default: (False, 3))
group_column_name: The group column name used in the datatable (default: None)
control_group_name: The control group name in the datatable, if available (default: None)
group_alpha_CI: The alpha value for group confidence intervals (default: 0.05)
fold_change_alpha_CI: The alpha value for mean/median fold change confidence intervals (default: 0.05)
pca_alpha_CI: The alpha value for the PCA confidence intervals (default: 0.05)
total_missing: Calculate the total missing values per feature (Default: False)
group_missing: Calculate the missing values per feature per group (if group_column_name not None) (Default: False)
pca_loadings: Calculate PC1 and PC2 loadings for each feature (Default: True)
normality_test: Determine normal distribution across whole dataset using Shapiro-Wilk test (pvalues < 0.05 ~ non-normal distribution) (default: True)
group_normality_test: Determine normal distribution across each group (if group_column_name not None) using Shapiro-Wilk test (pvalues < 0.05 ~ non-normal distribution) (default: True)
group_mean_CI: Determine the mean with bootstrapped CI across each group (if parametric = True and group_column_name not None) (default: True)
group_median_CI: Determine the median with bootstrapped CI across each group (if parametric = False and group_column_name not None) (default: True)
mean_fold_change: Calculate the mean fold change with bootstrapped confidence intervals (if parametric = True, group_column_name not None and control_group_name not None) (default: False)
median_fold_change: Calculate the median fold change with bootstrapped confidence intervals (if parametric = False, group_column_name not None and control_group_name not None) (default: False)
levene_twoGroup: Test null hypothesis that control group and each of the other groups come from populations with equal variances (if group_column_name not None and control_group_name not None) (default: False)
levene_allGroup: Test null hypothesis that all groups come from populations with equal variances (if group_column_name not None) (default: False)
oneway_Anova_test: Test null hypothesis that all groups have the same population mean, with included Benjamini-Hochberg FDR (if parametric = True and group_column_name not None) (default: False)
kruskal_wallis_test: Test null hypothesis that population median of all groups are equal, with included Benjamini-Hochberg FDR (if parametric = False and group_column_name not None) (default: False)
ttest_oneGroup: Calculate the T-test for the mean across all the data (one group), with included Benjamini-Hochberg FDR (if parametric = True, group_column_name is None or there is only 1 group in the data) (default: False)
ttest_twoGroup: Calculate the T-test for the mean of two groups, with one group being the control group, with included Benjamini-Hochberg FDR (if parametric = True, group_column_name not None and control_group_name not None) (default: False)
mann_whitney_u_test: Compute the Mann-Whitney U test to determine differences in distribution between two groups, with one being the control group, with included Benjamini-Hochberg FDR (if parametric = False, group_column_name not None and control_group_name not None) (default: False)
help : Print this help text
calculate : Performs the statistical calculations and outputs the Peak Table (node table) with the results appended.
"""
def __init__(self, peaktable, datatable):
peaktable = self.__checkPeakTable(self.__checkData(peaktable))
datatable = self.__checkData(datatable)
#Slice the meta-data, and select only peaks from the peaktable for processing, and add the meta-data back
meta = datatable.T[~datatable.T.index.isin(peaktable['Name'])].T.reset_index(drop=True)
dat = datatable[peaktable['Name']].reset_index()
datatable = pd.concat([meta, dat], axis=1).set_index(['index'])
datatable.index.name = None
self.__peaktable = peaktable
self.__datatable = datatable
self.set_params()
def help(self):
print(statistics.usage)
def set_params(self, parametric=True, log_data=(False,2), scale_data=(False, 'standard'), impute_data=(False, 3), group_column_name=None, control_group_name=None, group_alpha_CI=0.05, fold_change_alpha_CI=0.05, pca_alpha_CI=0.05, total_missing=False, group_missing=False, pca_loadings=True, normality_test=True, group_normality_test=True, group_mean_CI=True, group_median_CI=True, mean_fold_change=False, median_fold_change=False, kruskal_wallis_test=False, levene_twoGroup=False, levene_allGroup=False, oneway_Anova_test=False, ttest_oneGroup=False, ttest_twoGroup=False, mann_whitney_u_test=False):
parametric, log_data, scale_data, impute_data, group_column_name, control_group_name, group_alpha_CI, fold_change_alpha_CI, pca_alpha_CI, total_missing, group_missing, pca_loadings, normality_test, group_normality_test, group_mean_CI, group_median_CI, mean_fold_change, median_fold_change, oneway_Anova_test, kruskal_wallis_test, levene_twoGroup, levene_allGroup, ttest_oneGroup, ttest_twoGroup, mann_whitney_u_test = self.__paramCheck(parametric, log_data, scale_data, impute_data, group_column_name, control_group_name, group_alpha_CI, fold_change_alpha_CI, pca_alpha_CI, total_missing, group_missing, pca_loadings, normality_test, group_normality_test, group_mean_CI, group_median_CI, mean_fold_change, median_fold_change, oneway_Anova_test, kruskal_wallis_test, levene_twoGroup, levene_allGroup, ttest_oneGroup, ttest_twoGroup, mann_whitney_u_test)
self.__parametric = parametric;
self.__log_data = log_data;
self.__scale_data = scale_data;
self.__impute_data = impute_data;
self.__group_column_name = group_column_name;
self.__control_group_name = control_group_name;
self.__group_alpha_CI = group_alpha_CI;
self.__fold_change_alpha_CI = fold_change_alpha_CI;
self.__pca_alpha_CI = pca_alpha_CI;
self.__total_missing = total_missing;
self.__group_missing = group_missing;
self.__pca_loadings = pca_loadings;
self.__normality_test = normality_test;
self.__group_normality_test = group_normality_test;
self.__group_mean_CI = group_mean_CI;
self.__group_median_CI = group_median_CI;
self.__mean_fold_change = mean_fold_change;
self.__median_fold_change = median_fold_change;
self.__oneway_Anova_test = oneway_Anova_test;
self.__kruskal_wallis_test = kruskal_wallis_test;
self.__levene_twoGroup = levene_twoGroup;
self.__levene_allGroup = levene_allGroup;
self.__ttest_oneGroup = ttest_oneGroup;
self.__ttest_twoGroup = ttest_twoGroup;
self.__mann_whitney_u_test = mann_whitney_u_test;
def calculate(self):
peaktable = self.__peaktable
datatable = self.__datatable
parametric = self.__parametric
log_data = self.__log_data
scale_data = self.__scale_data
impute_data = self.__impute_data
group_column_name = self.__group_column_name
control_group_name = self.__control_group_name
group_alpha_CI = self.__group_alpha_CI
fold_change_alpha_CI = self.__fold_change_alpha_CI
pca_alpha_CI = self.__pca_alpha_CI
total_missing = self.__total_missing
group_missing = self.__group_missing
pca_loadings = self.__pca_loadings
normality_test = self.__normality_test
group_normality_test = self.__group_normality_test
group_mean_CI = self.__group_mean_CI
group_median_CI = self.__group_median_CI
mean_fold_change = self.__mean_fold_change
median_fold_change = self.__median_fold_change
kruskal_wallis_test = self.__kruskal_wallis_test
levene_twoGroup = self.__levene_twoGroup
levene_allGroup = self.__levene_allGroup
oneway_Anova_test = self.__oneway_Anova_test
ttest_oneGroup = self.__ttest_oneGroup
ttest_twoGroup = self.__ttest_twoGroup
mann_whitney_u_test = self.__mann_whitney_u_test
peakNames = list(peaktable['Name'].values)
meta = datatable.T[~datatable.T.index.isin(peakNames)].T.reset_index(drop=True)
peakData = datatable[peakNames].reset_index(drop=True)
(log_bool, log_base) = log_data;
if log_bool:
if isinstance(log_base, str) and log_base.lower() == 'natural':
peakData = peakData.applymap(np.log)
elif log_base == 2:
peakData = peakData.applymap(np.log2)
elif log_base == 10:
peakData = peakData.applymap(np.log10)
else:
print("Error: The chosen log type is invalid.")
sys.exit()
(scale_bool, scale_type) = scale_data
if scale_bool:
if isinstance(scale_type, str) and scale_type.lower() == 'standard':
peakData = scaler(peakData, type=scale_type.lower()).reset_index(drop=True)
elif isinstance(scale_type, str) and scale_type.lower() == 'minmax':
peakData = scaler(peakData, type=scale_type.lower()).reset_index(drop=True)
elif isinstance(scale_type, str) and scale_type.lower() == 'maxabs':
peakData = scaler(peakData, type=scale_type.lower()).reset_index(drop=True)
elif isinstance(scale_type, str) and scale_type.lower() == 'robust':
peakData = scaler(peakData, type=scale_type.lower()).reset_index(drop=True)
else:
print("Error: The chosen scale type is invalid.")
sys.exit()
(impute_bool, k) = impute_data;
if impute_bool:
peakData = imputeData(peakData, k=k).reset_index(drop=True)
if not isinstance(peakData, pd.DataFrame):
peakData = pd.DataFrame(peakData, columns=list(peakNames)).reset_index(drop=True)
#Add the meta data back in with the logged, scaled, or imputed data
datatable = pd.concat([meta, peakData], axis=1).reset_index(drop=True)
statsData = pd.DataFrame()
if group_column_name is not None:
groups = np.unique(datatable[group_column_name].values)
groupData = []
# Append each group to a list
for group in groups:
groupData.append(datatable.loc[datatable[group_column_name] == group])
#Iterate over each peak/feature and calculate statistics
for peakName in peakNames:
statsDataDict = {}
groupDict = {}
df_totalGrpMissing = pd.DataFrame()
totalGrpMissingTitles = []
df_meanFold = pd.DataFrame()
df_medianFold = pd.DataFrame()
df_mannWhitney = | pd.DataFrame() | pandas.DataFrame |
from tsod.base import Detector
import pytest
import numpy as np
import pandas as pd
from tsod.custom_exceptions import WrongInputDataType
from tsod.detectors import (
RangeDetector,
DiffDetector,
CombinedDetector,
RollingStandardDeviationDetector,
ConstantValueDetector,
ConstantGradientDetector,
GradientDetector,
)
from tsod.features import create_dataset
from tsod.hampel import HampelDetector
from tsod.autoencoders import AutoEncoder
from tsod.autoencoder_lstm import AutoEncoderLSTM
from tests.data_generation import create_random_walk_with_outliers
@pytest.fixture
def data_series():
n_steps = 100
(
time_series_with_outliers,
outlier_indices,
random_walk,
) = create_random_walk_with_outliers(n_steps)
time = pd.date_range(start="2020", periods=n_steps, freq="1H")
return (
pd.Series(time_series_with_outliers, index=time),
outlier_indices,
pd.Series(random_walk, index=time),
)
@pytest.fixture
def range_data():
normal_data = np.array([0, np.nan, 1, 0, 2, np.nan, 3.14, 4])
abnormal_data = np.array([-1.0, np.nan, 2.0, np.nan, 1.0, 0.0, 4.1, 10.0])
expected_anomalies = np.array([True, False, False, False, False, False, True, True])
assert len(expected_anomalies) == len(abnormal_data)
return normal_data, abnormal_data, expected_anomalies
@pytest.fixture
def range_data_series(range_data):
normal_data, abnormal_data, expected_anomalies = range_data
time = pd.date_range(start="2020", periods=len(normal_data), freq="1H")
return (
pd.Series(normal_data, index=time),
pd.Series(abnormal_data, index=time),
expected_anomalies,
)
@pytest.fixture
def constant_gradient_data_series(range_data):
normal_data = np.array([0, np.nan, 1, 1.1, 1.4, 1.5555, 3.14, 4])
abnormal_data = np.array([-1, 2.0, 2.1, 2.2, 2.3, 2.4, 4, 10])
expected_anomalies = np.array([False, True, True, True, True, True, False, False])
time = pd.date_range(start="2020", periods=len(normal_data), freq="1H")
return (
pd.Series(normal_data, index=time),
pd.Series(abnormal_data, index=time),
expected_anomalies,
)
@pytest.fixture
def constant_data_series(range_data):
normal_data = np.array([0, np.nan, 1, 1.1, 1.4, 1.5555, 3.14, 4])
abnormal_data = np.array([-1, np.nan, 1, 1, 1, 1, 4, 10])
expected_anomalies = np.array([False, False, True, True, True, True, False, False])
time = pd.date_range(start="2020", periods=len(normal_data), freq="1H")
return (
pd.Series(normal_data, index=time),
pd.Series(abnormal_data, index=time),
expected_anomalies,
)
def test_base_detector_exceptions(range_data, range_data_series):
data, _, _ = range_data
data_series, _, _ = range_data_series
detector = RangeDetector()
pytest.raises(WrongInputDataType, detector.fit, data)
def test_range_detector(range_data_series):
data, _, _ = range_data_series
detector = RangeDetector(0, 2)
anomalies = detector.detect(data)
expected_anomalies = [False, False, False, False, False, False, True, True]
assert len(anomalies) == len(data)
assert sum(anomalies) == 2
assert all(expected_anomalies == anomalies)
def test_range_detector_autoset(range_data_series):
data, _, _ = range_data_series
anomalies = RangeDetector(min_value=3).detect(data)
assert sum(anomalies) == 4
anomalies = RangeDetector(max_value=3).detect(data)
assert sum(anomalies) == 2
def test_combined_fit(range_data_series):
normal_data, abnormal_data, labels = range_data_series
cd = CombinedDetector([ConstantValueDetector(), RangeDetector()])
cd.fit(normal_data)
anomalies = cd.detect(abnormal_data)
assert all(anomalies == labels)
def test_combined_wrong_type():
with pytest.raises(ValueError):
CombinedDetector([ConstantValueDetector, RangeDetector()]) #
def test_combined_access_items():
cd = CombinedDetector([ConstantValueDetector(), RangeDetector()])
assert isinstance(cd[0], Detector)
assert isinstance(cd[0], ConstantValueDetector)
assert isinstance(cd[1], RangeDetector)
assert isinstance(cd[-1], RangeDetector)
def test_range_detector_quantile():
np.random.seed(42)
train = np.random.normal(size=1000)
test = np.random.normal(size=1000)
train[42] = -6.5
train[560] = 10.5
test[142] = -4.5
test[960] = 5.5
normal_data_incl_two_outliers = pd.Series(train)
test_data = pd.Series(test)
# all test data is within range of train data, no anomalies detected
nqdetector = RangeDetector().fit(normal_data_incl_two_outliers)
detected_anomalies = nqdetector.detect(test_data)
assert sum(detected_anomalies) == 0
# exclude extreme values
detector = RangeDetector(quantiles=[0.001, 0.999]).fit(
normal_data_incl_two_outliers
)
detected_anomalies = detector.detect(test_data)
assert sum(detected_anomalies) == 2
assert detector._min > normal_data_incl_two_outliers.min()
assert detector._max < normal_data_incl_two_outliers.max()
def test_diff_detector_autoset(range_data_series):
normal_data, abnormal_data, expected_anomalies = range_data_series
detector = DiffDetector().fit(normal_data)
detected_anomalies = detector.detect(abnormal_data)
assert sum(detected_anomalies) == 2
def test_combined_detector():
df = pd.read_csv("tests/data/example.csv", parse_dates=True, index_col=0)
combined = CombinedDetector(
[
ConstantValueDetector(),
RangeDetector(max_value=2.0),
]
)
series = df.value
res = combined.detect(series)
assert isinstance(res, pd.Series)
def test_rollingstddev_detector():
np.random.seed(42)
normal_data = pd.Series(np.random.normal(scale=1.0, size=1000)) + 10.0 * np.sin(
np.linspace(0, 10, num=1000)
)
abnormal_data = pd.Series(np.random.normal(scale=2.0, size=100))
all_data = pd.concat([normal_data, abnormal_data])
detector = RollingStandardDeviationDetector()
anomalies = detector.detect(normal_data)
assert sum(anomalies) == 0
detector.fit(normal_data)
anomalies = detector.detect(normal_data)
assert sum(anomalies) == 0
anomalies = detector.detect(all_data)
assert sum(anomalies) > 0
# Manual specification
detector = RollingStandardDeviationDetector(max_std=2.0)
anomalies = detector.detect(normal_data)
assert sum(anomalies) == 0
anomalies = detector.detect(all_data)
assert sum(anomalies) > 0
def test_hampel_detector(data_series):
data_with_anomalies, expected_anomalies_indices, _ = data_series
detector = HampelDetector()
anomalies = detector.detect(data_with_anomalies)
anomalies_indices = np.array(np.where(anomalies)).flatten()
# Validate if the found anomalies are also in the expected anomaly set
# NB Not necessarily all of them
assert all(i in expected_anomalies_indices for i in anomalies_indices)
def test_autoencoder_detector(data_series):
data_with_anomalies, expected_anomalies_indices, normal_data = data_series
detector = AutoEncoder(
hidden_neurons=[1, 1, 1, 1], epochs=1
) # TODO add lagged features to increase layer size
detector.fit(normal_data)
anomalies = detector.detect(data_with_anomalies)
anomalies_indices = np.array(np.where(anomalies)).flatten()
# Validate if the found anomalies are also in the expected anomaly set
# NB Not necessarily all of them
# assert all(i in expected_anomalies_indices for i in anomalies_indices)
def test_autoencoderlstm_detector(data_series):
data_with_anomalies, expected_anomalies_indices, normal_data = data_series
detector = AutoEncoderLSTM()
detector.fit(data_with_anomalies)
anomalies = detector.detect(data_with_anomalies)
anomalies_indices = np.array(np.where(anomalies)).flatten()
def test_constant_value_detector(constant_data_series):
good_data, abnormal_data, _ = constant_data_series
detector = ConstantValueDetector(2, 0.0001)
anomalies = detector.detect(good_data)
assert len(anomalies) == len(good_data)
assert sum(anomalies) == 0
detector = ConstantValueDetector(3, 0.0001)
anomalies = detector.detect(abnormal_data)
assert len(anomalies) == len(abnormal_data)
assert sum(anomalies) == 4
def test_constant_gradient_detector(constant_gradient_data_series):
good_data, abnormal_data, _ = constant_gradient_data_series
detector = ConstantGradientDetector(3)
anomalies = detector.detect(good_data)
assert len(anomalies) == len(good_data)
assert sum(anomalies) == 0
detector = ConstantGradientDetector(3)
anomalies = detector.detect(abnormal_data)
assert len(anomalies) == len(abnormal_data)
assert sum(anomalies) == 5
def test_gradient_detector_constant_gradient(constant_gradient_data_series):
good_data, _, _ = constant_gradient_data_series
detector = GradientDetector(1.0)
anomalies = detector.detect(good_data)
assert len(anomalies) == len(good_data)
assert sum(anomalies) == 0
def test_gradient_detector_sudden_jump():
normal_data = np.array(
[
-0.5,
-0.6,
0.6,
0.6,
0.1,
0.6,
0.4,
0.8,
0.7,
1.5,
1.6,
1.1,
0.3,
2.1,
0.7,
0.3,
-1.7,
-0.3,
0.0,
-1.0,
]
)
abnormal_data = np.array(
[
-0.5,
-1.5,
1.5,
0.6,
0.1,
0.6,
0.4,
0.8,
0.7,
1.5,
1.6,
1.1,
0.3,
2.1,
0.7,
0.3,
-1.7,
-0.3,
0.0,
-1.0,
]
)
expected_anomalies = np.repeat(False, len(normal_data))
expected_anomalies[2] = True
time = pd.date_range(start="2020", periods=len(normal_data), freq="1H")
normal_data = pd.Series(normal_data, index=time)
abnormal_data = | pd.Series(abnormal_data, index=time) | pandas.Series |
import pytest
import collections
from pathlib import Path
import pandas as pd
from mbf_genomics import DelayedDataFrame
from mbf_genomics.annotator import Constant, Annotator
import pypipegraph as ppg
from pypipegraph.testing import run_pipegraph, force_load
from pandas.testing import assert_frame_equal
from mbf_genomics.util import find_annos_from_column
class LenAnno(Annotator):
def __init__(self, name):
self.columns = [name]
def calc(self, df):
return pd.DataFrame(
{self.columns[0]: ["%s%i" % (self.columns[0], len(df))] * len(df)}
)
@pytest.mark.usefixtures("no_pipegraph")
@pytest.mark.usefixtures("clear_annotators")
class Test_DelayedDataFrameDirect:
def test_create(self):
test_df = pd.DataFrame({"A": [1, 2]})
def load():
return test_df
a = DelayedDataFrame("shu", load)
assert_frame_equal(a.df, test_df)
assert a.non_annotator_columns == "A"
def test_create_from_df(self):
test_df = pd.DataFrame({"A": [1, 2]})
a = DelayedDataFrame("shu", test_df)
assert_frame_equal(a.df, test_df)
assert a.non_annotator_columns == "A"
def test_write(self):
test_df = pd.DataFrame({"A": [1, 2]})
def load():
return test_df
a = DelayedDataFrame("shu", load, result_dir="sha")
assert Path("sha").exists()
assert_frame_equal(a.df, test_df)
assert a.non_annotator_columns == "A"
fn = a.write()[1]
assert "/sha" in str(fn.parent.absolute())
assert fn.exists()
assert_frame_equal(pd.read_csv(fn, sep="\t"), test_df)
def test_write_excel(self):
test_df = pd.DataFrame({"A": [1, 2]})
def load():
return test_df
a = DelayedDataFrame("shu", load, result_dir="sha")
assert Path("sha").exists()
assert_frame_equal(a.df, test_df)
assert a.non_annotator_columns == "A"
fn = a.write("sha.xls")[1]
assert fn.exists()
assert_frame_equal(pd.read_excel(fn), test_df)
def test_write_excel2(self):
data = {}
for i in range(0, 257):
c = "A%i" % i
d = [1, 1]
data[c] = d
test_df = pd.DataFrame(data)
def load():
return test_df
a = DelayedDataFrame("shu", load, result_dir="sha")
fn = a.write("sha.xls")[1]
assert fn.exists()
assert_frame_equal(pd.read_excel(fn), test_df)
def test_write_mangle(self):
test_df = pd.DataFrame({"A": [1, 2], "B": ["c", "d"]})
def load():
return test_df
a = DelayedDataFrame("shu", load)
assert_frame_equal(a.df, test_df)
assert (a.non_annotator_columns == ["A", "B"]).all()
def mangle(df):
df = df.drop("A", axis=1)
df = df[df.B == "c"]
return df
fn = a.write("test.csv", mangle)[1]
assert fn.exists()
assert_frame_equal(pd.read_csv(fn, sep="\t"), mangle(test_df))
def test_magic(self):
test_df = pd.DataFrame({"A": [1, 2], "B": ["c", "d"]})
a = DelayedDataFrame("shu", lambda: test_df)
assert hash(a)
assert a.name in str(a)
assert a.name in repr(a)
def test_annotator(self):
a = DelayedDataFrame(
"shu", lambda: pd.DataFrame({"A": [1, 2], "B": ["c", "d"]})
)
a += Constant("column", "value")
a.annotate()
assert "column" in a.df.columns
assert (a.df["column"] == "value").all()
def test_add_non_anno(self):
a = DelayedDataFrame(
"shu", lambda: pd.DataFrame({"A": [1, 2], "B": ["c", "d"]})
)
with pytest.raises(TypeError):
a += 5
def test_annotator_wrong_columns(self):
class WrongConstant(Annotator):
def __init__(self, column_name, value):
self.columns = [column_name]
self.value = value
def calc(self, df):
return pd.DataFrame({"shu": self.value}, index=df.index)
a = DelayedDataFrame(
"shu", lambda: pd.DataFrame({"A": [1, 2], "B": ["c", "d"]})
)
with pytest.raises(ValueError):
a += WrongConstant("column", "value")
def test_annotator_minimum_columns(self):
a = DelayedDataFrame(
"shu", lambda: pd.DataFrame({"A": [1, 2], "B": ["c", "d"]})
)
assert "Direct" in str(a.load_strategy)
class MissingCalc(Annotator):
column_names = ["shu"]
with pytest.raises(AttributeError):
a += MissingCalc()
class EmptyColumnNames(Annotator):
columns = []
def calc(self, df):
return pd.DataFrame({})
with pytest.raises(IndexError):
a += EmptyColumnNames()
class EmptyColumnNamesButCacheName(Annotator):
cache_name = "shu"
columns = []
def calc(self, df):
return pd.DataFrame({})
with pytest.raises(IndexError):
a += EmptyColumnNamesButCacheName()
class MissingColumnNames(Annotator):
def calc(self, df):
pass
with pytest.raises(AttributeError):
a += MissingColumnNames()
class NonListColumns(Annotator):
columns = "shu"
def calc(self, df):
pass
with pytest.raises(ValueError):
a += NonListColumns()
def test_DynamicColumNames(self):
a = DelayedDataFrame(
"shu", lambda: pd.DataFrame({"A": [1, 2], "B": ["c", "d"]})
)
class Dynamic(Annotator):
@property
def columns(self):
return ["a"]
def calc(self, df):
return pd.DataFrame({"a": ["x", "y"]})
a += Dynamic()
a.annotate()
assert_frame_equal(
a.df, pd.DataFrame({"A": [1, 2], "B": ["c", "d"], "a": ["x", "y"]})
)
def test_annos_added_only_once(self):
count = [0]
class CountingConstant(Annotator):
def __init__(self, column_name, value):
count[0] += 1
self.columns = [column_name]
self.value = value
def calc(self, df):
return pd.DataFrame({self.columns[0]: self.value}, index=df.index)
a = DelayedDataFrame(
"shu", lambda: pd.DataFrame({"A": [1, 2], "B": ["c", "d"]})
)
c = CountingConstant("hello", "c")
a += c
a.annotate()
assert "hello" in a.df.columns
assert count[0] == 1
a += c # this get's ignored
def test_annos_same_column_different_anno(self):
count = [0]
class CountingConstant(Annotator):
def __init__(self, column_name, value):
count[0] += 1
self.columns = [column_name]
self.value = value
def calc(self, df):
return pd.DataFrame({self.columns[0]: self.value}, index=df.index)
a = DelayedDataFrame(
"shu", lambda: pd.DataFrame({"A": [1, 2], "B": ["c", "d"]})
)
c = CountingConstant("hello", "c")
a += c
a.annotate()
assert "hello" in a.df.columns
assert count[0] == 1
c = CountingConstant("hello2", "c")
a += c
a.annotate()
assert "hello2" in a.df.columns
assert count[0] == 2
d = CountingConstant("hello2", "d")
assert c is not d
with pytest.raises(ValueError):
a += d
def test_annos_same_column_different_anno2(self):
class A(Annotator):
cache_name = "hello"
columns = ["aa"]
def calc(self, df):
return pd.DataFrame({self.columns[0]: "a"}, index=df.index)
class B(Annotator):
cache_name = "hello2"
columns = ["aa"]
def calc(self, df):
return pd.DataFrame({self.columns[0]: "a"}, index=df.index)
a = DelayedDataFrame(
"shu", lambda: pd.DataFrame({"A": [1, 2], "B": ["c", "d"]})
)
a += A()
with pytest.raises(ValueError):
a += B()
def test_annos_dependening(self):
class A(Annotator):
cache_name = "hello"
columns = ["aa"]
def calc(self, df):
return pd.DataFrame({self.columns[0]: "a"}, index=df.index)
class B(Annotator):
cache_name = "hello2"
columns = ["ab"]
def calc(self, df):
return df["aa"] + "b"
def dep_annos(self):
return [A()]
a = DelayedDataFrame(
"shu", lambda: pd.DataFrame({"A": [1, 2], "B": ["c", "d"]})
)
a += B()
a.annotate()
assert "ab" in a.df.columns
assert "aa" in a.df.columns
assert (a.df["ab"] == (a.df["aa"] + "b")).all()
def test_annos_dependening_none(self):
class A(Annotator):
cache_name = "hello"
columns = ["aa"]
def calc(self, df):
return pd.DataFrame({self.columns[0]: "a"}, index=df.index)
class B(Annotator):
cache_name = "hello2"
columns = ["ab"]
def calc(self, df):
return df["aa"] + "b"
def dep_annos(self):
return [None, A(), None]
a = DelayedDataFrame(
"shu", lambda: pd.DataFrame({"A": [1, 2], "B": ["c", "d"]})
)
a += B()
a.annotate()
assert "ab" in a.df.columns
assert "aa" in a.df.columns
assert (a.df["ab"] == (a.df["aa"] + "b")).all()
def test_filtering(self):
class A(Annotator):
cache_name = "A"
columns = ["aa"]
def calc(self, df):
return pd.DataFrame({self.columns[0]: "a"}, index=df.index)
class B(Annotator):
cache_name = "B"
columns = ["ab"]
def calc(self, df):
return df["aa"] + "b"
def dep_annos(self):
return [A()]
a = DelayedDataFrame(
"shu", lambda: pd.DataFrame({"A": [1, 2], "B": ["c", "d"]})
)
a += Constant("C", "c")
assert "C" in a.df.columns
b = a.filter("sha", lambda df: df["A"] == 1)
assert "C" in b.df.columns
a += A()
assert "aa" in a.df.columns
assert "aa" in b.df.columns
b += B()
assert "ab" in b.df.columns
assert not "ab" in a.df.columns
def test_filtering2(self):
counts = collections.Counter()
class A(Annotator):
cache_name = "A"
columns = ["aa"]
def calc(self, df):
counts["A"] += 1
return pd.DataFrame({self.columns[0]: "a"}, index=df.index)
class B(Annotator):
cache_name = "B"
columns = ["ab"]
def calc(self, df):
counts["B"] += 1
return df["aa"] + "b"
def dep_annos(self):
return [A()]
a = DelayedDataFrame(
"shu", lambda: pd.DataFrame({"A": [1, 2], "B": ["c", "d"]})
)
b = a.filter("sha", lambda df: df["A"] == 1)
b += B()
assert "aa" in b.df.columns
assert "ab" in b.df.columns
assert not "aa" in a.df.columns
assert not "ab" in a.df.columns
assert counts["A"] == 1
a += A()
assert "aa" in a.df.columns
assert counts["A"] == 2 # no two recalcs
assert not "ab" in a.df.columns
a += B()
assert "ab" in a.df.columns
assert counts["A"] == 2 # no two recalcs
assert counts["B"] == 2 # no two recalcs
def test_filtering_result_dir(self):
counts = collections.Counter()
class A(Annotator):
cache_name = "A"
columns = ["aa"]
def calc(self, df):
counts["A"] += 1
return pd.DataFrame({self.columns[0]: "a"}, index=df.index)
a = DelayedDataFrame(
"shu", lambda: | pd.DataFrame({"A": [1, 2], "B": ["c", "d"]}) | pandas.DataFrame |
# Import Libraries
import time
import pandas as pd
import numpy as np
import tensorflow as tf
from tensorflow import keras
from tensorflow.keras import layers
# Import Libraries
from scipy import stats
import matplotlib.pyplot as plt
# import time
# Import Libraries
import math
class YinsDL:
print("---------------------------------------------------------------------")
print(
"""
Yin's Deep Learning Package
Copyright © W.Y.N. Associates, LLC, 2009 – Present
For more information, please go to https://wyn-associates.com/
""" )
print("---------------------------------------------------------------------")
# Define function
def NN3_Classifier(
X_train, y_train, X_test, y_test,
l1_act='relu', l2_act='relu', l3_act='softmax',
layer1size=128, layer2size=64, layer3size=2,
optimizer='adam',
loss='sparse_categorical_crossentropy',
metrics=['accuracy'],
num_of_epochs=10,
plotROC=True,
verbose=True):
"""
MANUAL:
# One can use the following example.
house_sales = pd.read_csv('../data/kc_house_data.csv')
house_sales.head(3)
house_sales = house_sales.drop(['id', 'zipcode', 'lat', 'long', 'date'], axis=1)
house_sales.info()
X_all = house_sales.drop('price', axis=1)
y = np.log(house_sales.price)
y_binary = (y > y.mean()).astype(int)
y_binary
X_all.head(3), y_binary.head(3)
from sklearn.model_selection import train_test_split
X_train, X_test, y_train, y_test = train_test_split(X_all, y_binary, test_size=0.3, random_state=0)
print(X_train.shape, X_test.shape)
print(y_train)
testresult = NN3_Classifier(X_train, y_train, X_test, y_test,
l1_act='relu', l2_act='relu', l3_act='softmax',
layer1size=128, layer2size=64, layer3size=2,
num_of_epochs=50)
"""
# TensorFlow and tf.keras
import tensorflow as tf
from tensorflow import keras
# Helper libraries
import numpy as np
import matplotlib.pyplot as plt
if verbose:
print("Tensorflow Version:")
print(tf.__version__)
# Normalize
# Helper Function
def helpNormalize(X):
return (X - X.mean()) / np.std(X)
X_train = X_train.apply(helpNormalize, axis=1)
X_test = X_test.apply(helpNormalize, axis=1)
# Model
model = tf.keras.Sequential([
keras.layers.Dense(units=layer1size, input_shape=[X_train.shape[1]]),
keras.layers.Dense(units=layer2size, activation=l2_act),
keras.layers.Dense(units=layer3size, activation=l3_act)
])
if verbose:
print("Summary of Network Architecture:")
model.summary()
# Compile
model.compile(optimizer=optimizer,
loss=loss,
metrics=metrics)
# Model Fitting
model.fit(X_train, y_train, epochs=num_of_epochs)
# Prediction
predictions = model.predict(X_test)
# Performance
from sklearn.metrics import confusion_matrix
import numpy as np
import pandas as pd
y_test_hat = np.argmax(predictions, axis=1)
confusion = confusion_matrix(y_test, y_test_hat)
confusion = pd.DataFrame(confusion)
test_acc = sum(np.diag(confusion)) / sum(sum(np.array(confusion)))
# Print
if verbose:
print("Confusion Matrix:")
print(confusion)
print("Test Accuracy:", round(test_acc, 4))
# ROCAUC
if layer3size == 2:
from sklearn.metrics import roc_curve, auc, roc_auc_score
fpr, tpr, thresholds = roc_curve(y_test, y_test_hat)
areaUnderROC = auc(fpr, tpr)
resultsROC = {
'false positive rate': fpr,
'true positive rate': tpr,
'thresholds': thresholds,
'auc': round(areaUnderROC, 3)
}
if verbose:
print(f'Test AUC: {areaUnderROC}')
if plotROC:
plt.figure()
plt.plot(fpr, tpr, color='r', lw=2, label='ROC curve')
plt.plot([0, 1], [0, 1], color='k', lw=2, linestyle='--')
plt.xlim([0.0, 1.0])
plt.ylim([0.0, 1.05])
plt.xlabel('False Positive Rate')
plt.ylabel('True Positive Rate')
plt.title('Receiver operating characteristic: \
Area under the curve = {0:0.2f}'.format(areaUnderROC))
plt.legend(loc="lower right")
plt.show()
else:
resultsROC = "Response not in two classes."
# Output
return {
'Data': [X_train, y_train, X_test, y_test],
'Shape': [X_train.shape, len(y_train), X_test.shape, len(y_test)],
'Model Fitting': model,
'Performance': {
'response': {'response': y_test, 'estimated response': y_test_hat},
'test_acc': test_acc,
'confusion': confusion
},
'Results of ROC': resultsROC
}
# End of function
# Define function
def NN10_Classifier(
X_train, y_train, X_test, y_test,
l1_act='relu', l2_act='relu', l3_act='relu', l4_act='relu', l5_act='relu',
l6_act='relu', l7_act='relu', l8_act='relu', l9_act='relu', l10_act='softmax',
layer1size=128, layer2size=64, layer3size=64, layer4size=64, layer5size=64,
layer6size=64, layer7size=64, layer8size=64, layer9size=64, layer10size=2,
optimizer='adam',
loss='sparse_categorical_crossentropy',
metrics=['accuracy'],
num_of_epochs=10,
plotROC=True,
verbose=True):
"""
MANUAL:
# One can use the following example.
house_sales = pd.read_csv('../data/kc_house_data.csv')
house_sales.head(3)
house_sales = house_sales.drop(['id', 'zipcode', 'lat', 'long', 'date'], axis=1)
house_sales.info()
X_all = house_sales.drop('price', axis=1)
y = np.log(house_sales.price)
y_binary = (y > y.mean()).astype(int)
y_binary
X_all.head(3), y_binary.head(3)
from sklearn.model_selection import train_test_split
X_train, X_test, y_train, y_test = train_test_split(X_all, y_binary, test_size=0.3, random_state=0)
print(X_train.shape, X_test.shape)
print(y_train)
testresult = NN10_Classifier(
X_train, y_train, X_test, y_test,
l1_act='relu', l2_act='relu', l3_act='relu', l4_act='relu', l5_act='relu',
l6_act='relu', l7_act='relu', l8_act='relu', l9_act='relu', l10_act='softmax',
layer1size=128, layer2size=64, layer3size=64, layer4size=64, layer5size=64,
layer6size=64, layer7size=64, layer8size=64, layer9size=64, layer10size=2,
plotROC=True,
num_of_epochs=50)
"""
# TensorFlow and tf.keras
import tensorflow as tf
from tensorflow import keras
# Helper libraries
import numpy as np
import matplotlib.pyplot as plt
if verbose:
print("Tensorflow Version:")
print(tf.__version__)
# Normalize
# Helper Function
def helpNormalize(X):
return (X - X.mean()) / np.std(X)
X_train = X_train.apply(helpNormalize, axis=1)
X_test = X_test.apply(helpNormalize, axis=1)
# Model
model = tf.keras.Sequential([
keras.layers.Dense(units=layer1size, input_shape=[X_train.shape[1]]),
keras.layers.Dense(units=layer2size, activation=l2_act),
keras.layers.Dense(units=layer3size, activation=l3_act),
keras.layers.Dense(units=layer4size, activation=l4_act),
keras.layers.Dense(units=layer5size, activation=l5_act),
keras.layers.Dense(units=layer6size, activation=l6_act),
keras.layers.Dense(units=layer7size, activation=l7_act),
keras.layers.Dense(units=layer8size, activation=l8_act),
keras.layers.Dense(units=layer9size, activation=l9_act),
keras.layers.Dense(units=layer10size, activation=l10_act)
])
if verbose:
print("Summary of Network Architecture:")
model.summary()
# Compile
model.compile(optimizer=optimizer,
loss=loss,
metrics=metrics)
# Model Fitting
model.fit(X_train, y_train, epochs=num_of_epochs)
# Prediction
predictions = model.predict(X_test)
# Performance
from sklearn.metrics import confusion_matrix
import numpy as np
import pandas as pd
y_test_hat = np.argmax(predictions, axis=1)
confusion = confusion_matrix(y_test, y_test_hat)
confusion = | pd.DataFrame(confusion) | pandas.DataFrame |
"""Trains and evaluates the random forest classifier for scratch segmentation"""
import os
import pickle
import pandas as pd
from sklearn.model_selection import train_test_split
from sklearn.ensemble import RandomForestClassifier
from sklearn import metrics
from feature_extraction import calc_features, addTruthLabel
# Training and their ground truths image path and files
imgpath = r"C:\Users\shubh\Documents\steel_detection\metal_nut\test\scratch"
gdpath = r"C:\Users\shubh\Documents\steel_detection\metal_nut\ground_truth\scratch"
imglist = ["007.png", "011.png", "014.png", "015.png", "016.png", "017.png"]
gdlist = ["007_mask.png", "011_mask.png", "014_mask.png", "015_mask.png", "016_mask.png", "017_mask.png"]
imgfiles = [os.path.join(imgpath,file) for file in imglist]
gdfiles = [os.path.join(gdpath,file) for file in gdlist]
# Adding all the features of different images in one dataframe
df = pd.DataFrame([])
for i in range(len(imgfiles)):
if df.empty:
df = calc_features(imgfiles[i])
df = addTruthLabel(df,gdfiles[i])
else:
dftemp = calc_features(imgfiles[i])
dftemp = addTruthLabel(dftemp,gdfiles[i])
df = | pd.concat([df,dftemp]) | pandas.concat |
"""Interface for running a registry of models on a registry of validations."""
from typing import Optional, Tuple
from kotsu.typing import Model, Results, Validation
import functools
import logging
import os
import time
import pandas as pd
from kotsu import store
from kotsu.registration import ModelRegistry, ModelSpec, ValidationRegistry, ValidationSpec
logger = logging.getLogger(__name__)
def run(
model_registry: ModelRegistry,
validation_registry: ValidationRegistry,
results_path: str = "./validation_results.csv",
skip_if_prior_result: bool = True,
artefacts_store_directory: Optional[str] = None,
run_params: dict = {},
):
"""Run a registry of models on a registry of validations.
Args:
model_registry: A ModelRegistry containing the registry of models to be run through
validations.
validation_registry: A ValidationRegistry containing the registry of validations to run
each model through.
results_path: The file path to which the results will be written to, and results from prior
runs will be read from.
skip_if_prior_result: Flag, if True then will not run validation-model combinations
that are found in the results at given results_path. If False then all combinations
will be ran and any prior results in results_path will be completely overwritten.
artefacts_store_directory: A directory path or URI location to store extra output
artefacts of the validations and models.
run_params: A dictionary of optional run parameters.
"""
results_df = pd.DataFrame(columns=["validation_id", "model_id", "runtime_secs"])
results_df["runtime_secs"] = results_df["runtime_secs"].astype(int)
if skip_if_prior_result:
try:
results_df = | pd.read_csv(results_path) | pandas.read_csv |
#!/usr/bin/env python3
import urllib.request
import subprocess
import os
import sys
from pathlib import Path
from datetime import datetime, timedelta
import pandas as pd
def contains_future_date(series):
try:
return (series > datetime.now()).any()
except TypeError as err:
raise TypeError(f'series must have datetime compatible dtype')
def main():
# Download data
cases_data_url = 'https://data.nsw.gov.au/data/dataset/97ea2424-abaf-4f3e-a9f2-b5c883f42b6a/resource/2776dbb8-f807-4fb2-b1ed-184a6fc2c8aa/download/covid-19-cases-by-notification-date-location-and-likely-source-of-infection.csv'
tests_data_url = 'https://data.nsw.gov.au/data/dataset/60616720-3c60-4c52-b499-751f31e3b132/resource/fb95de01-ad82-4716-ab9a-e15cf2c78556/download/covid-19-tests-by-date-and-postcode-local-health-district-and-local-government-area-aggregated.csv'
urls = [cases_data_url, tests_data_url]
files = [url.split('/')[-1] for url in urls]
url_filename_pairs = zip(urls, files)
for url, filename in url_filename_pairs:
urllib.request.urlretrieve(url, filename)
# Test for invalid dates
date_columns = {'case': 'notification_date',
'test': 'test_date'}
names = date_columns.keys()
name_file_pairs = zip(names, files)
dfs = {}
for name, file in name_file_pairs:
df = pd.read_csv(file, dtype=str)
date_column = date_columns[name]
dates = | pd.to_datetime(df[date_column]) | pandas.to_datetime |
"""
Script to calculate Level of Travel Time Reliability (LOTTR) per FHWA
guidelines. Calculations for Auto/Bus traffic.
By <NAME>, Metro, <EMAIL>
NOTE: SCRIPT RELIES ON PANDAS v.0.23.0 OR GREATER!
"""
import os
import pandas as pd
import numpy as np
import datetime as dt
def calc_pct_reliability(df_pct):
"""
Calculates percent reliability of interstate and non-interstate network.
Args: df_pct, a pandas dataframe.
Returns: int_rel_pct, % interstate reliability.
non_int_rel_pct, % non interstate reliability.
"""
# Auto, Bus interstate
df_int = df_pct.loc[df_pct['interstate'] == 1]
df_int_sum = df_int['ttr'].sum()
df_int_rel = df_int.loc[df_int['reliable'] == 1]
int_rel_pct = df_int_rel['ttr'].sum() / df_int_sum
# Auto, Bus non-interstate
df_non_int = df_pct.loc[df_pct['interstate'] != 1]
df_non_int_sum = df_non_int['ttr'].sum()
df_non_int_rel = df_non_int.loc[df_non_int['reliable'] == 1]
non_int_rel_pct = df_non_int_rel['ttr'].sum() / df_non_int_sum
return int_rel_pct, non_int_rel_pct
def calc_ttr(df_ttr):
"""Calculate travel time reliability for auto and bus.
Args: df_ttr, a pandas dataframe.
Returns: df_ttr, a pandas dataframe with new columns:
VOLa, Yearly Auto volumes.
VOLb, Yearly Bus volumes.
ttr, Travel Time Reliability.
"""
# Working vehicle occupancy assumptions:
VOCa = 1.4
VOCb = 12.6
df_ttr['VOLa'] = df_ttr['pct_auto'] * df_ttr['dir_aadt'] * 365
df_ttr['VOLb'] = df_ttr['pct_bus'] * df_ttr['dir_aadt'] * 365
df_ttr['ttr'] = (df_ttr['miles'] * df_ttr['VOLa'] * VOCa
+ df_ttr['miles'] * df_ttr['VOLb'] *VOCb)
return df_ttr
def AADT_splits(df_spl):
"""Calculates AADT per vehicle type.
Args: df_spl, a pandas dataframe.
Returns: df_spl, a pandas dataframe containing new columns:
dir_aadt: directional aadt
aadt_auto: auto aadt
pct_auto, pct_bus, pct_truck : percentage mode splits of auto and bus.
"""
df_spl['dir_aadt'] = (df_spl['aadt']/df_spl['faciltype']).round()
df_spl['aadt_auto'] = df_spl['dir_aadt'] - \
(df_spl['aadt_singl'] + df_spl['aadt_combi'])
df_spl['pct_auto'] = df_spl['aadt_auto'] / df_spl['dir_aadt']
df_spl['pct_bus'] = df_spl['aadt_singl'] / df_spl['dir_aadt']
return df_spl
def check_reliable(df_rel):
"""Check reliability of TMCs across time periods.
Args: df_rel, a pandas dataframe.
Returns: df_rel, a pandas dataframe with new column:
reliable, with value 1 if all time periods are reliable.
"""
df_rel['reliable'] = np.where(
(df_rel['MF_6_9'] < 1.5)
& (df_rel['MF_10_15'] < 1.5)
& (df_rel['MF_16_19'] < 1.5)
& (df_rel['SATSUN_6_19'] < 1.5),
1, 0)
return df_rel
def calc_lottr(days, time_period, df_lottr):
"""Calculates LOTTR (Level of Travel Time Reliability) using FHWA metrics.
Args: df_lottr, a pandas dataframe.
Returns: df_lottr, a pandas dataframe with new columns:
80_pct_tt, 95th percentile calculation.
50_pct_tt, 50th percentile calculation.
tttr, completed truck travel time reliability calculation.
"""
df_lottr['80_pct_tt'] = df_lottr['travel_time_seconds']
df_lottr['50_pct_tt'] = df_lottr['travel_time_seconds']
tmc_operations = ({'80_pct_tt': lambda x: np.percentile(x, 80),
'50_pct_tt': lambda x: np.percentile(x, 50)})
df_lottr = df_lottr.groupby('tmc_code', as_index=False).agg(tmc_operations)
column_name = '{0}_{1}'.format(days, time_period)
df_lottr[column_name] = df_lottr['80_pct_tt'] / df_lottr['50_pct_tt']
#df_lottr = df_lottr.drop('travel_time_seconds', axis=1)
return df_lottr
def agg_travel_time_sat_sun(df_tt):
"""Aggregates weekend truck travel time reliability values.
Args: df_tt, a pandas dataframe.
Returns: df_ttr_all_times, a pandas dataframe with stacked truck travel
time reliability numbers for easy group_by characteristics.
"""
# create 'clean' dataframe consisting of non-duplicated TMCs
tmc_list = df_tt['tmc_code'].drop_duplicates().values.tolist()
tmc_format = {'tmc_code': tmc_list}
df_tmc = pd.DataFrame.from_dict(tmc_format)
df_6_19 = df_tt[df_tt['measurement_tstamp'].dt.hour.isin(
[6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19])]
df = calc_lottr('SATSUN', '6_19', df_6_19)
df_tmc = pd.merge(df_tmc, df, on='tmc_code', how='left')
return df_tmc
def agg_travel_times_mf(df_tt):
"""Aggregates weekday truck travel time reliability values.
Args: df_tt, a pandas dataframe.
Returns: df_ttr_all_times, a pandas dataframe with stacked truck travel
time reliability numbers for easy group_by characteristics.
"""
# create 'clean' dataframe consisting of non-duplicated TMCs
tmc_list = df_tt['tmc_code'].drop_duplicates().values.tolist()
tmc_format = {'tmc_code': tmc_list}
df_tmc = pd.DataFrame.from_dict(tmc_format)
df_6_9 = df_tt[df_tt['measurement_tstamp'].dt.hour.isin([6, 7, 8, 9])]
df_10_15 = df_tt[df_tt['measurement_tstamp'].dt.hour.isin([10, 11, 12, 13,
14, 15])]
df_16_19 = df_tt[df_tt['measurement_tstamp'].dt.hour.isin([16, 17, 18, 19])]
data = {'6_9': df_6_9, '10_15': df_10_15, '16_19': df_16_19}
for key, value in data.items():
df = calc_lottr('MF', key, value)
df_tmc = | pd.merge(df_tmc, df, on='tmc_code', how='left') | pandas.merge |
import os
import pandas as pd
import numpy as np
from itertools import chain
from codifyComplexes.CodifyComplexException import CodifyComplexException
from .DataLoaderClass import DataLoader
class AbstractProtocol(DataLoader):
'''
This class is a template for codification protocols
'''
DESIRED_ORDER=["L", "R", "P"]
def __init__(self, dataRootPath, cMapPath, prevStepPaths, singleChainfeatsToInclude, pairfeatsToInclude=None, verbose=False):
'''
@param dataRootPath: str. A path to computedFeatures directory that contains needed features. Example:
computedFeatures/
common/
contactMaps/
seqStep/
conservation/
...
structStep/
PSAIA/
VORONOI/
...
@param cMapPath: str. A path to a dir that contains the contact map of the protein complex
@param prevStepPaths: str or str[]. A path to previous results files directory. If it is None, contactMaps files will be used
to define which residue pairs are in contact. Can also be a str[] if multiple feedback_path's
wanted
@param singleChainfeatsToInclude: List that contains the paths where single chain features needed for complex
codification are located. Must have the following format:
["featName":(relativePath_from_dataRootPath, listOfColumnNumbers, dictForNamedColums)]
dictForNamedColums= {"myFeatName":colNumber} or Empty
@param pairfeatsToInclude: List that contains the paths where pair features needed for complex
codification are located. Must have the following format:
["featName":(relativePath_from_dataRootPath, listOfColumnNumbers, dictForNamedColums)]
dictForNamedColums= {"myFeatName":colNumber} or {}
@param verbose: bool.
'''
DataLoader.__init__(self, dataRootPath, singleChainfeatsToInclude, pairfeatsToInclude, verbose)
self.dataRootPath= dataRootPath
self.cMapPath= cMapPath
self.verbose= verbose
if prevStepPaths is None:
self.prevFnamesList=None
else:
self.prevFnamesList= prevStepPaths if isinstance(prevStepPaths,list) else [prevStepPaths]
self.prevFnamesList= [os.path.join(onePath, fname) for onePath in self.prevFnamesList for fname in os.listdir(onePath)
if fname.endswith(".res.tab")]
if not self.useCorrMut:
self.pairfeatsToInclude = [ elem for elem in self.pairfeatsToInclude if elem[0]!="corrMut"]
if len(self.pairfeatsToInclude)==0:
self.pairfeatsToInclude = None
def applyProtocol( self, prefixComplex, prefixL, prefixR):
'''
This method is the basic skeleton for applyProtocol of subclasses
Given a prefix that identifies the complex and prefixes that identifies
the ligand and the receptor, this method integrates the information that
is contained in self.dataRootPath and is described in self.singleChainfeatsToInclude
@param prefixComplex: str. A prefix that identifies a complex
@param prefixL: str. A prefix that identifies the ligand of the complex
@param prefixR: str. A prefix that identifies the receptor of the complex
@return df: pandas.DataFrame. A pandas.Dataframe in which each row represents
a pair of amino acids in direct form (L to R).
Column names are:
'chainIdL', 'structResIdL', 'resNameL', 'chainIdR', 'structResIdR', 'resNameR', 'categ'
[propertiesP .... propertiesL .... propertiesR]
'''
if self.prevFnamesList is None:
cmapNameList= list(self.getFullNamesIterInPath( prefixComplex, self.cMapPath))
if len(cmapNameList)>1:
raise ValueError("There are more than 1 Contact map for %s in %s path"%(prefixComplex,self.cMapPath))
allPairsCodified= self.loadDataFile(cmapNameList)
else:
allPairsCodified= self.loadPreviousResults(prefixComplex)
if not self.pairfeatsToInclude is None:
allPairsCodified= self.addPairFeatures(prefixComplex, allPairsCodified)
lFeats= self.loadSingleChainFeatures( prefixL, "l")
rFeats= self.loadSingleChainFeatures( prefixR, "r")
# print(lFeats["chainIdL"].unique(), rFeats["chainIdR"].unique(), allPairsCodified["chainIdL"].unique(), allPairsCodified["chainIdR"].unique())
allPairsCodified= self.combinePairwiseAndSingleChainFeats(allPairsCodified,lFeats, rFeats)
assert allPairsCodified.shape[0]>1, "Error, %s dataset is empty"%prefixComplex
#Reorder columns
allPairsCodified= self.reorderColumns(allPairsCodified)
return allPairsCodified
def reorderColumns(self, allPairsCodified):
colNames= list(allPairsCodified.columns)
categIndex= colNames.index("categ")
lFeatNames= [elem for elem in colNames[(categIndex+1):] if elem[-1]=="L"]
rFeatNames= [elem for elem in colNames[(categIndex+1):] if elem[-1]=="R"]
pairwiseFeatNames= [elem for elem in colNames[(categIndex+1):] if elem.endswith("_P")]
colOrder= list(colNames[:(categIndex+1)])
for featType in AbstractProtocol.DESIRED_ORDER:
if featType=="L":
colOrder+=lFeatNames
elif featType=="R":
colOrder+=rFeatNames
elif featType=="P":
colOrder+=pairwiseFeatNames
else:
raise ValueError("just L, R or P allowed in idAbstractProtocol.DESIRED_ORDER")
allPairsCodified= allPairsCodified[ colOrder ]
allPairsCodified.columns= colOrder
return allPairsCodified
def loadPreviousResults(self, prefixComplex):
'''
Loads previous results. Returns a pandas.DataFrame that contains in each row
the scores of previous steps for a given pair of amino acids.
@param prefixComplex: str. A prefixOneChainType that identifies the receptor or ligand
@return df: pandas.DataFrame. A pandas.Dataframe in which each row represents
one amino acid
Column names are:
'chainIdL', 'structResIdL', 'resNameL', 'chainIdR', 'structResIdR', 'resNameR',
'categ', [prev_step_scores]
'''
previousResultsList= [fname for fname in self.prevFnamesList if fname.endswith(prefixComplex+".res.tab")]
assert len(previousResultsList)>0, "No previous results"
for fname in previousResultsList:
if self.verbose: print("loading previous resuls for %s"%(prefixComplex))
prevResults= self.loadDataFile( iter([fname]) )
prevResults.loc[:,"prediction_norm"]= (prevResults["prediction"] - np.mean(prevResults["prediction"])) / np.std(prevResults["prediction"])
break # break to add just one type of previous predictions
if len(previousResultsList)>1:
for fname in previousResultsList[1:]: #Load the remaining previous predictions
if self.verbose: print("loading previous resuls for %s"%(prefixComplex))
prevNrow= prevResults.shape[0]
newData= self.loadDataFile( iter([fname] ))
newData.loc[:,"prediction_norm"]= (newData["prediction"] - np.mean(newData["prediction"])) / np.std(newData["prediction"])
prevResults= pd.merge(prevResults, newData, how='inner', on=["chainIdL", "structResIdL","resNameL",
"chainIdR", "structResIdR","resNameR","categ"])
curNrow= prevResults.shape[0]
if prevNrow<1:
raise CodifyComplexException(("Error merging previous results in %s. There are 0 rows "+
"in previous features")%(prefixComplex ))
elif (abs(float(prevNrow- curNrow)) / prevNrow) > 0.2:
print("%s Nrows previously/now %d/%d %s"%(prefixComplex, prevNrow, curNrow,fname))
raise CodifyComplexException(("Error merging previous results in %s. There are a different number of residues "+
"compared to previous file")%(prefixComplex))
return prevResults
def loadSingleChainFeatures(self, prefixOneChainType, chainType):
'''
Loads all features files computed for ligand or receptor chains. Returns a pandas.DataFrame
that contains in each row all features from all files for each amino acid. Just amino acids
that appears in each file will be included. Others will be ruled out (intersection)
@param prefixOneChainType: str. A prefixOneChainType that identifies the receptor or ligand
@param chainType: str. "l" for ligand and "r" for receptor
@return df: pandas.DataFrame. A pandas.Dataframe in which each row represents
one amino acid
Column names are:
'chainId%s', 'structResId%s', 'resName%s', [properties]
%s is L if chainType=="l" and R if chainType=="r"
'''
assert chainType=="l" or chainType=="r"
chainType= chainType.upper()
oneChainTypeFeats= None
# self.verbose=True
for featNum in range(len(self.singleChainfeatsToInclude)): #Load just one single chain feature
featName, params= self.getParamsForLoadingFile( prefixOneChainType, featNum)
if self.verbose: print("loading %s for %s"%(featName,prefixOneChainType))
oneChainTypeFeats= self.loadDataFile(*params)
break # break to add just one type of feature. Next will be added in following loop
if len(self.singleChainfeatsToInclude)>1:
for featNum in range(len(self.singleChainfeatsToInclude))[1:]: #Load the remaining single chain feature
featName, params= self.getParamsForLoadingFile( prefixOneChainType, featNum)
if self.verbose: print("loading %s for %s"%(featName, prefixOneChainType))
prevNrow= oneChainTypeFeats.shape[0]
newData= self.loadDataFile(*params)
# print(newData.head())
# print(newData.shape, newData["chainId"].unique())
# print(prefixOneChainType, chainType, featName)
# raw_input("enter to continue")
# print(oneChainTypeFeats.head())
# print(oneChainTypeFeats.shape, oneChainTypeFeats["chainId"].unique())
# raw_input("enter to continue")
# print(list(oneChainTypeFeats.columns), list(newData.columns),oneChainTypeFeats.shape)
oneChainTypeFeats= pd.merge(oneChainTypeFeats, newData, how='inner', on=["chainId", "structResId","resName"])
# print(oneChainTypeFeats.columns, oneChainTypeFeats.shape)
# raw_input()
curNrow= oneChainTypeFeats.shape[0]
if prevNrow<1:
raise CodifyComplexException(("Error merging previous single chain feature %s in %s. There are 0 rows "+
"in previous feature to %s")%(featName, prefixOneChainType, featName))
elif (abs(float(prevNrow- curNrow)) / prevNrow) > 0.2:
print("Nrows previously/now", prevNrow, curNrow)
print("%s Nrows previously/now %d/%d %s"%(prefixOneChainType, prevNrow, curNrow,featName))
raise CodifyComplexException(("Error merging single chain feature %s in %s. There are a different number of residues "+
"in %s compared to previous features")%(featName, prefixOneChainType, featName))
oneChainTypeFeats.rename(columns={elem:elem+chainType for elem in list(oneChainTypeFeats.columns.values)}, inplace=True)
return oneChainTypeFeats
def addPairFeatures(self, prefix, allPairs):
'''
Loads all files computed for pairwise features and adds them to pairs residue df contained in allPairs.
Returns a pandas.DataFrame that contains in each row all pairwise features from all files for each pair of amino acids.
Just amino acid pairs that appears in each file will be included. Others will be ruled out (intersection)
@param prefix: str. A prefix that identifies the complex
@param allPairs: pandas.DataFrame. A pandas.Dataframe in which each row represents one amino acid pair
Column names are:
"chainIdL", "structResIdL","resNameL","chainIdR", "structResIdR","resNameR", "categ", [previousScores]
@return df: pandas.DataFrame. A pandas.Dataframe in which each row represents one amino acid pair
Column names are:
"chainIdL", "structResIdL","resNameL","chainIdR", "structResIdR","resNameR", "categ", [properties]
'''
pairTypeFeats= None
self.verbose=True
for featNum in range(len(self.pairfeatsToInclude)): #Load just one single chain feature
featName, params= self.getParamsForLoadingFile( prefix, featNum, lookForPairFeats=True)
if self.verbose: print("loading %s for %s"%(featName,prefix))
pairTypeFeats= self.loadDataFile(*params)
break # break to add just one type of feature. Next will be added in following loop
if len(self.pairfeatsToInclude)>1:
for featNum in range(len(self.pairfeatsToInclude))[1:]: #Load the remaining single chain feature
featName, params= self.getParamsForLoadingFile( prefix, featNum, lookForPairFeats=True)
if self.verbose: print("loading %s for %s"%(featName, prefix))
prevNrow= pairTypeFeats.shape[0]
newData= self.loadDataFile(*params)
pairTypeFeats= pd.merge(pairTypeFeats, newData, how='inner', on=["chainIdL", "structResIdL","resNameL",
"chainIdR", "structResIdR","resNameR"])
curNrow= pairTypeFeats.shape[0]
if prevNrow<1:
raise CodifyComplexException(("Error merging previous pair feature %s in %s. There are 0 rows "+
"in previous feature to %s")%(featName, prefix, featName))
elif (abs(float(prevNrow- curNrow)) / prevNrow) > 0.2:
print("Nrows previously/now", prevNrow, curNrow)
print("%s Nrows previously/now %d/%d %s"%(prefix, prevNrow, curNrow,featName))
raise CodifyComplexException(("Error merging pair feature %s in %s. There are a different number of residues "+
"in %s compared to previous features")%(featName, prefix, featName))
#check if _P has being assigned to allPairs pairwise features
nPs= sum( (1 for elem in pairTypeFeats.columns.values if elem.endswith("_P")))
if nPs>0: #if so add _P to pairwise features
pairTypeFeats.rename(columns={elem:elem+"_P" for elem in set(pairTypeFeats.columns.values
).difference(AbstractProtocol.ARE_STR_TYPE_COLUMNS)}, inplace=True)
# print(allPairs.shape)
prevNrow= allPairs.shape[0]
allPairs= pd.merge(allPairs, pairTypeFeats, how='inner', on=["chainIdL", "structResIdL","resNameL",
"chainIdR", "structResIdR","resNameR"])
# print(allPairs.head())
# print(allPairs.shape)
# raw_input("enter")
curNrow= allPairs.shape[0]
if abs(float(prevNrow- curNrow)) / prevNrow > 0.05:
print("Nrows previously/now", prevNrow, curNrow)
print("%s Nrows previously/now %d/%d %s"%(prefix, prevNrow, curNrow,featName))
raise CodifyComplexException(("Error merging pair feature %s in %s. There are a different number of residues "+
"in %s compared to previous features")%(featName, prefix, featName))
return allPairs
def combinePairwiseAndSingleChainFeats(self, pairFeats, singleFeatsL, singleFeatsR):
'''
Merges pairFeats pandas.DataFrame with singleFeatsL and singleFeatsR dataFrames.
singleFeatsL has n rows (as many as ligand residues) and singleFeatsR has m rows
(as many as ligand residues), and pairsFeats has ~ n*m rows
(some amino acids pairs might not be considered due to several reasons).
@param pairFeats: pandas.DataFrame. A pandas.Dataframe in which each row represents the properties of
a residue of the receptor
Column names are:
'chainIdL', 'structResIdL', 'resNameL', 'chainIdR', 'structResIdR', 'resNameR', 'categ'
[propertiesP]
@param singleFeatsL: pandas.DataFrame. A pandas.Dataframe in which each row represents the properties of
a residue of the ligand
Column names are:
'chainIdL', 'structResIdL', 'resNameL',[propertiesL]
@param singleFeatsR: pandas.DataFrame. A pandas.Dataframe in which each row represents the properties of
a residue of the receptor
Column names are:
'chainIdR', 'structResIdR', 'resNameR',[propertiesR]
@return directPairs: pandas.DataFrame. A pandas.Dataframe in which each row represents
a pair of amino acids in direct form (L to R).
Column names are:
'chainIdL', 'structResIdL', 'resNameL', 'chainIdR', 'structResIdR', 'resNameR', 'categ'
[properties_P .... propertiesL .... propertiesR]
'''
if singleFeatsL.shape[1]!= singleFeatsR.shape[1]:
print( "singleFeatsL and singleFeatsR have different number of variables")
featsLNo_chain= set( (elem[:-1] for elem in singleFeatsL.columns))
featsRNo_chain= set( (elem[:-1] for elem in singleFeatsR.columns))
print("L %d R %d"%(len(singleFeatsL.columns),len(singleFeatsR.columns)) ,"L dif R",
sorted(featsLNo_chain.difference(featsRNo_chain)), "R diff L",
sorted(featsRNo_chain.difference(featsLNo_chain)) )
raise ValueError( "singleFeatsL and singleFeatsR have different number of variables")
otherPairColumns= set(pairFeats.columns.values).difference(set(AbstractProtocol.ARE_STR_TYPE_COLUMNS+["categ"]))
if len(otherPairColumns)>0: #add _P suffix to all pairwise features
pairFeats.rename(columns={elem:elem+"_P" for elem in otherPairColumns}, inplace=True)
directPairs= pd.merge(pairFeats, singleFeatsL, how='inner', on=None)
directPairs= pd.merge(directPairs, singleFeatsR, how='inner', on=None)
return directPairs
def prepareDataForPairwiseAggregat(self, df):
'''
abstract method
'''
return None
def addPairwiseAggregation(self, df):
'''
Adds environment pairwise features to a df that contains pairwise features (named featName_P)
@param df: pandas.DataFrame. A pandas.Dataframe in which each row represents
a pair of amino acids in direct form (ligand, receptor)
Column names are:
'chainIdL', 'structResIdL', 'resNameL', 'chainIdR', 'structResIdR', 'resNameR', 'categ'
[properties_P .... propertiesL .... propertiesR] #no defined order for properties
@return newDf: pandas.DataFrame. A pandas.Dataframe in which each row represents
a pair of amino acids in direct form (L to R). New columns will have been added, all then
named %sAggr%d%s(numAggr/factorAggr, numberOfNewFeature, ligand/receptor)
Column names are:
'chainIdL', 'structResIdL', 'resNameL', 'chainIdR', 'structResIdR', 'resNameR', 'categ'
[ properties_P propertiesR propertiesL ] #no defined order for properties
'''
pairwiseDf, ids2RowL, ids2RowR, neigsids2rowL, neigsids2rowR = self.prepareDataForPairwiseAggregat(df)
nElems= df.shape[0]
l_2_r_neigs= [[] for i in range(df.shape[0])]
r_2_l_neigs= [[] for i in range(df.shape[0])]
l_neigs_2_r_neigs= [[] for i in range(df.shape[0])]
for idL in ids2RowL:
rowsInvolvingL= ids2RowL[idL]
rowsNeigsL= neigsids2rowL[idL]
for idR in ids2RowR:
rowsInvolvingR= ids2RowR[idR]
rowsNeigsR= neigsids2rowR[idR]
row_Index= tuple(rowsInvolvingL.intersection(rowsInvolvingR))[0]
l_2_r_neigs[row_Index]= tuple(rowsInvolvingL.intersection(rowsNeigsR))
r_2_l_neigs[row_Index]= tuple(rowsInvolvingR.intersection(rowsNeigsL))
l_neigs_2_r_neigs[row_Index]= tuple(rowsNeigsR.intersection(rowsNeigsL))
numericAggreL2r= computeNumericAggr(pairwiseDf, l_2_r_neigs)
numericAggreR2l= computeNumericAggr(pairwiseDf, r_2_l_neigs)
numericAggreN2N= computeNumericAggr(pairwiseDf, l_neigs_2_r_neigs)
numericAggreL2r.columns= [ "l2r-pair"+elem for elem in numericAggreL2r.columns]
numericAggreR2l.columns= [ "r2l-pair"+elem for elem in numericAggreR2l.columns]
numericAggreN2N.columns= [ "n2n-pair"+elem for elem in numericAggreN2N.columns]
df= pd.concat([df, numericAggreL2r, numericAggreR2l, numericAggreN2N ], axis=1)
return df
def computeNumericAggr(df, selectedRows):
'''
Compute aggregation functions (min, max, mean and sum) for the rows of data. Each row is aggregated
over the rows included in selectedRows
@param data: pandas.DataFrame(nrow, nfeatures). The numeric data to aggregate. Each row i is averaged, sum... with
the rows such that selectedRows[i,:]==True
@param selectedRows: [[]]: int (nrow, variableLength)): Each row i is averaged, with the rows included in
selectedRows[i]
@return np.array (nrow, 4*nfeatures)
'''
# s= time.time()
data= df.values
nVars= data.shape[1]
aggregatedResults= -(2**10)* np.ones( (data.shape[0], nVars* 4))
splitPoint2= 2*nVars
splitPoint3= 3*nVars
splitPoint4= 4*nVars
for i in range(data.shape[0]):
dataToAggregate= data[selectedRows[i], :]
if dataToAggregate.shape[0]>0:
aggregatedResults[i, 0:nVars]= np.mean( dataToAggregate, axis=0)
aggregatedResults[i, nVars:splitPoint2]= np.max( dataToAggregate, axis=0)
aggregatedResults[i, splitPoint2:splitPoint3]= np.min( dataToAggregate, axis=0)
aggregatedResults[i, splitPoint3:splitPoint4]= np.sum( dataToAggregate, axis=0)
aggregatedResults= | pd.DataFrame(aggregatedResults) | pandas.DataFrame |
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Fri Jan 26 16:02:22 2018
@author: joyce
"""
import pandas as pd
import numpy as np
import pymysql
from sklearn import linear_model
import time
from functools import wraps
config = {
'host': 'magiquant.mysql.rds.aliyuncs.com',
'port': 3306,
'user':'haoamc',
'passwd':'<PASSWORD>',
'db': 'quant'
}
def timer(function):
@wraps(function)
def function_timer(*args, **kwargs):
t0 = time.time()
result = function(*args, **kwargs)
t1 = time.time()
print ("Total time running %s: %s seconds" %(function.__name__, str(round((t1-t0), 2))))
return result
return function_timer
@timer
def get_stockdata_from_sql(mode,begin,end,name):
"""
get stock market data from sql,include: [Open,High,Low,Close,Pctchg,Vol,
Amount,total_shares,free_float_shares,Vwap]
"""
try:
conn = pymysql.connect(**config)
cursor = conn.cursor()
if mode == 0:
query = "SELECT stock_id,%s FROM stock_market_data WHERE trade_date='%s';"%(name,begin)
else:
query = "SELECT trade_date,stock_id,%s FROM stock_market_data WHERE trade_date >='%s' \
AND trade_date <= '%s';"%(name,begin,end)
cursor.execute(query)
date = pd.DataFrame(list(cursor.fetchall()))
if mode == 0:
date.columns =['ID','name']
else:
date.columns =['date','ID','name']
date = date.set_index('ID')
date.columns = ['date',name]
date = date.set_index([date['date'],date.index],drop = True)
del date['date']
return date
finally:
if conn:
conn.close()
@timer
def get_indexdata_from_sql(mode,begin,end,name,index):
"""
get stock market data from sql,include: [open,high,low,close,pctchg]
"""
try:
conn = pymysql.connect(**config)
cursor = conn.cursor()
if mode == 0:
query = "SELECT stock_id,%s FROM index_market_data WHERE trade_date='%s' AND stock_id ='%s';"%(name,begin,index)
else:
query = "SELECT trade_date,stock_id,%s FROM index_market_data WHERE trade_date >='%s' \
AND trade_date <= '%s' AND stock_id ='%s';"%(name,begin,end,index)
cursor.execute(query)
date = pd.DataFrame(list(cursor.fetchall()))
if mode == 0:
date.columns =['ID','name']
else:
date.columns =['date','ID','name']
date = date.set_index('ID')
date.columns = ['date',name]
date = date.set_index([date['date'],date.index],drop = True)
del date['date']
return date
finally:
if conn:
conn.close()
@timer
def get_tradedate(begin, end):
"""
get tradedate between begin date and end date
Params:
begin:
str,eg: '1999-01-01'
end:
str,eg: '2017-12-31'
Return:
pd.DataFrame
"""
try:
conn = pymysql.connect(**config)
cursor = conn.cursor()
query = "SELECT calendar_date FROM trade_calendar WHERE is_trade_day= 1 AND \
calendar_date>='" + begin + "' AND calendar_date<='" + end + "';"
cursor.execute(query)
date = pd.DataFrame(list(cursor.fetchall()))
return date
finally:
if conn:
conn.close()
def get_fama(begin,end,name,index):
"""
get fama factor from sql
Params:
begin:
str,eg:"1990-01-01"
end:
str:eg:"2017-12-31"
index:
str, index id ,eg :'000300.SH'
name:
the name of fama factors ['SMB','HML','MKT']
"""
try:
conn = pymysql.connect(**config)
cursor = conn.cursor()
query = "SELECT trade_date,%s FROM fama_factor WHERE \
stock_id = '%s' AND trade_date >= '%s' AND trade_date <= '%s';"\
%(name,index,begin,end)
cursor.execute(query)
data = pd.DataFrame(list(cursor.fetchall()))
data.columns = ['date',name]
return data
finally:
if conn:
conn.close()
@timer
def Corr(df,num):
"""
Params:
data:
pd.DataFrame,multi-index = ['date','id']
num:
int
Return:
pd.DataFrame,multi-index = ['date','id']
"""
df.columns = ['r1','r2']
df1 = df['r1']
df2 = df['r2']
df1_unstack = df1.unstack()
df2_unstack = df2.unstack()
corr = df1_unstack.rolling(num).corr(df2_unstack)
corr = corr.stack()
corr = pd.DataFrame(corr)
corr.columns = ['corr']
return corr
@timer
def Cov(df,num):
"""
Params:
data:
pd.DataFrame,multi-index = ['date','id']
num:
int
Return:
pd.DataFrame,multi-index = ['date','id']
"""
df.columns = ['r1','r2']
df1 = df['r1']
df2 = df['r2']
df1_unstack = df1.unstack()
df2_unstack = df2.unstack()
corr = df1_unstack.rolling(num).cov(df2_unstack)
corr = corr.stack()
corr = pd.DataFrame(corr)
corr.columns = ['cov']
return corr
@timer
def Delta(df,num):
"""
Params:
df:
pd.DataFrame,multi-index = ['date','ID'],columns = ['alpha']
num:
int
Return:
pd.DataFrame,multi-inde = ['date','ID']
"""
df_unstack = df.unstack()
df_temp = df_unstack.shift(num)
df_temp1 = df_unstack - df_temp
df_final = df_temp1.stack()
return df_final
@timer
def Delay(df,num):
"""
Params:
df:
pd.DataFrame,multi-index = ['date','ID'],columns = ['alpha']
num:
int
Return:
pd.DataFrame,multi-index = ['date','ID']
"""
df_unstack = df.unstack()
df_temp = df_unstack.shift(num)
df_final = df_temp.stack()
return df_final
@timer
def Rank(df):
"""
Params:
df: pd.DataFrame,multi-index = ['date','ID'],columns = ['alpha']
Return:
pd.DataFrame,multi-index = ['date','ID'],columns = ['alpha']
"""
df = df.swaplevel(0,1)
df_mod = df.unstack()
df_rank = df_mod.rank(axis = 1)
df_final_temp = df_rank.stack()
# 内外层索引进行交换
df_final = df_final_temp.swaplevel(0,1)
return df_final
@timer
def Cross_max(df1,df2):
"""
Params:
df1:
pd.DataFrame,multi-index = ['date','ID']
df2:
pd.DataFrame,multi-index = ['date','ID']
"""
df = pd.concat([df1,df2],axis =1 ,join = 'inner')
df_max = np.max(df,axis = 1)
return df_max
@timer
def Cross_min(df1,df2):
"""
Params:
df1:
pd.DataFrame,multi-index = ['date','ID']
df2:
pd.DataFrame,multi-index = ['date','ID']
"""
df = pd.concat([df1,df2],axis =1 ,join = 'inner')
df_min = np.min(df,axis = 1)
return df_min
@timer
def Sum(df,num):
"""
Params:
df:
pd.DataFrame,multi-index = ['date','ID']
Returns:
df:
pd.DataFrame,multi-index = ['date','ID']
"""
df_unstack = df.unstack(level = 'ID')
df_temp = df_unstack.rolling(num).sum()
df_final = df_temp.stack()
return df_final
@timer
def Mean(df,num):
"""
Params:
df:
pd.DataFrame,multi-index = ['date','ID']
Returns:
df:
pd.DataFrame,multi-index = ['date','ID']
"""
df_unstack = df.unstack()
df_temp = df_unstack.rolling(num).mean()
df_final = df_temp.stack()
return df_final
@timer
def STD(df,num):
"""
Params:
df:
pd.DataFrame,multi-index = ['date','ID']
Returns:
df:
pd.DataFrame,multi-index = ['date','ID']
"""
df_unstack = df.unstack()
df_temp = df_unstack.rolling(num).std()
df_final = df_temp.stack()
return df_final
@timer
def TsRank(df,num):
"""
Params:
df:
pd.DataFrame,multi-index = ['date','ID']
num:
int
Returns:
df:
pd.DataFrame,multi-index = ['date','ID']
"""
df = df.swaplevel(0,1)
df_unstack = df.unstack()
date = df_unstack.index.tolist()
ts_rank = pd.DataFrame([])
for i in range(num,len(date)):
df = df_unstack.iloc[i-num:i,:]
df_rank = df.rank(axis = 0)
ts_rank_temp = pd.DataFrame(df_rank.iloc[num-1,:]).T
ts_rank = pd.concat([ts_rank,ts_rank_temp],axis = 0)
ts_rank = ts_rank.stack()
ts_rank = ts_rank.swaplevel(0,1)
return ts_rank
@timer
def TsMax(df,num):
"""
Params:
df:
pd.DataFrame,multi-index = ['date','ID']
num:
int
Returns:
df:
pd.DataFrame,multi-index = ['date','ID']
"""
df_unstack = df.unstack()
df_temp = df_unstack.rolling(num).max()
df_final = df_temp.stack()
return df_final
@timer
def TsMin(df,num):
"""
Params:
df:
pd.DataFrame,multi-index = ['date','ID']
num:
int
Returns:
df:
pd.DataFrame,multi-index = ['date','ID']
"""
df_unstack = df.unstack()
df_temp = df_unstack.rolling(num).min()
df_final = df_temp.stack()
return df_final
@timer
def DecayLinear(df,num):
"""
Params:
df:
pd.DataFrame,multi-index = ['date','ID']
num:
int
Returns:
df:
pd.DataFrame,multi-index = ['date','ID']
"""
df_unstack = df.unstack()
df2 = df.swaplevel(0,1)
df_unstack2 = df2.unstack()
secID = df_unstack2.index.tolist()
array = np.arange(num,0,-1)
w = pd.DataFrame(array/array.sum())
date = df_unstack.index.tolist()
df_wma = pd.DataFrame([])
for i in range(num,len(date)):
df_temp = df_unstack.iloc[i-num:i,:]
df_temp1 = np.multiply(df_temp,w)
df_wma_temp = pd.DataFrame(df_temp1.sum(axis = 0))
df_wma_temp['date'] = date[i]
df_wma_temp['secID'] = secID
df_wma_tep = df_wma_temp.set_index([df_wma_temp['date'],df_wma_temp['secID']],drop = True)
del df_wma_tep['date'],df_wma_tep['secID']
df_wma = pd.concat([df_wma,df_wma_tep],axis = 0)
return pd.DataFrame(df_wma)
@timer
def SUMIF(mode,condi,num,temp1,temp2):
"""
Params:
mode = 0 or 1, if mode = 0, represent >; 1: >=
temp1 = pd.DataFrame,multi-index = ['date','ID']
temp2 = pd.DataFrame,multi-index = ['date','ID']
num = int
Return:
pd.DataFrame
"""
data = pd.concat([condi,temp1,temp2], axis = 1, join = 'inner')
data.columns = ['condi','temp1','temp2']
if mode == 0:
data['condi'][data['temp1'] <= data['temp2']] = 0
else:
data['condi'][data['temp1'] > data['temp2']] =0
result_unstack = data['condi'].unstack()
result = result_unstack.rolling(num,min_periods = num).sum()
result_stack = result.stack()
return pd.DataFrame(result_stack)
@timer
def Count(mode,temp1,temp2,num):
"""
Params:
mode = 0 or 1, if mode = 0, represent >; 1: >=
temp1 = pd.DataFrame,multi-index = ['date','ID']
temp2 = pd.DataFrame,multi-index = ['date','ID']
num = int
Return:
pd.DataFrame
"""
data = pd.concat([temp1,temp2],axis = 1,join = 'inner')
data.columns = ['c1','c2']
data['result'] = 0
if mode == 0:
data['result'][data['c1'] > data['c2']] = 1
else:
data['result'][data['c1'] >= data['c2']] = 1
result_unstack = data['result'].unstack()
result = result_unstack.rolling(num,min_periods = num).sum()
result_stack = result.stack()
return pd.DataFrame(result_stack)
@timer
def SMA(df,num1,num2):
df_unstack = df.unstack()
if num2 == 1:
sma_temp = df_unstack.ewm(com = num1,ignore_na = True, min_periods = num1).mean()
else:
sma_temp = df_unstack.ewm(span = num1,ignore_na = True, min_periods = num1).mean()
sma = sma_temp.stack()
return sma
@timer
def DTM(df_open,df_high):
df_open_delay = Delay(df_open,1)
data = pd.concat([df_open,df_high,df_open_delay],axis = 1, join = 'inner')
data.columns = ['open','high','open_delay']
diff = pd.DataFrame(data['high'] - data['open'])
open_delta= pd.DataFrame(data['open'] - data['open_delay'])
data1 = pd.concat([diff,open_delta],axis = 1, join = 'inner')
temp = pd.DataFrame(np.max(data1,axis = 1))
temp.columns = ['temp']
data_temp = pd.concat([data,temp],axis = 1, join = 'inner')
data_temp['alpha'] = data_temp['temp']
data_temp['alpha'][data_temp['open'] <= data_temp['open_delay']] = 0
return | pd.DataFrame(data_temp['alpha']) | pandas.DataFrame |
# Now we'll learn how to merge data sets by linking rows by keys.
import numpy as np
import pandas as pd
from pandas import Series, DataFrame
# Let's make a dframe
dframe1 = DataFrame(
{'key': ['X', 'Z', 'Y', 'Z', 'X', 'X'], 'data_set_1': np.arange(6)})
# Now lets make another dframe
dframe2 = DataFrame({'key': ['Q', 'Y', 'Z'], 'data_set_2': [1, 2, 3]})
# Now we can use merge the dataframes, this is a "many-to-one" situation
# Merge will automatically choose overlapping columns to merge on
pd.merge(dframe1, dframe2)
# Note no overlapping 'X's
# We could have also specified which column to merge on
pd.merge(dframe1, dframe2, on='key')
# We can choose which DataFrame's keys to use, this will choose left (dframe1)
pd.merge(dframe1, dframe2, on='key', how='left')
# Choosing the one on the right (dframe2)
| pd.merge(dframe1, dframe2, on='key', how='right') | pandas.merge |
# Data Prep
# extract table for each team
# reduce to player, age, pos, gp, min, mpg, usage #
# identify team name
# Data Calculation
# average team age
# weighted average team age by total minutes
# weighted average team age by USG% (statistical leader awards use a 58 / 82 (70.7%) threshold to be considered)
# weighted average team age by total minutes ... by position
# Data Export
# export new data (by date) to local CSV file
from bs4 import BeautifulSoup
import math
import numpy as np
import os
import pandas as pd
import re
from urllib.request import urlopen
## Functions ##
# Data Prep #
def ctg_extract(url, columns, remove_rank):
html = urlopen(url)
soup = BeautifulSoup(html, 'html.parser')
table = soup.find("table")
df = pd.read_html(str(table))[0]
df = df.drop(remove_rank, axis=1, level=1)
df.columns = df.columns.droplevel(-1)
df = df[columns]
return df
def ctg_team_name(url):
html = urlopen(url)
soup = BeautifulSoup(html, 'html.parser')
html_team_name = str(soup.findAll("span", {"class": "hidden-mobile"}))
team_name = re.search(">(.*)<", html_team_name)
team_name = team_name.group(1)
return team_name
# Data Calcs #
def avg_age_by_mpg(df, min_var, age_var):
df = df.copy()
df['minute_weight'] = df[min_var] / df[min_var].sum()
df['age_weight'] = df['minute_weight'] * df[age_var]
avg_age_by_mpg = round(df['age_weight'].sum(),2)
return avg_age_by_mpg
### note: using threshold of 58 / 82 (70.7%) of GP
### note: gp calculated as sum of minutes and raised to ceiling integer (as CTG tosses out garbage time, risks an 81 GP total)
def avg_age_by_usg(df, min_var, age_var, usg_var, gp_var):
total_gp_threshold = math.ceil(df[min_var].sum() / 240) * (58/82)
df = df.loc[df[gp_var] >= total_gp_threshold].copy()
df[usg_var] = df[usg_var].str.rstrip('%').astype('float') / 100.0
df['usage_weight'] = df[usg_var] / df[usg_var].sum()
df['age_weight'] = df['usage_weight'] * df[age_var]
avg_age_by_usg = round(df['age_weight'].sum(),2)
return avg_age_by_usg
def avg_age_by_position_by_mpg(df, min_var, age_var, pos_var):
pos_list = list(df[pos_var].unique())
return_df = | pd.DataFrame() | pandas.DataFrame |
import json
import os
import urllib
import pandas as pd
from bokeh.models import ColumnDataSource, HoverTool, Legend, LegendItem
from bokeh.sampledata import us_states
from bokeh.tile_providers import Vendors, get_provider
from postreise.plot.canvas import create_map_canvas
from postreise.plot.check import _check_func_kwargs
from postreise.plot.projection_helpers import project_borders
def download_states_shapefile():
"""Downloads shapefile for U.S. states.
:return: (*str*) -- path to shapefile.
"""
shapes_path = os.path.join(os.path.dirname(os.path.abspath(__file__)), "shapes")
os.makedirs(shapes_path, exist_ok=True)
url_base = "https://besciences.blob.core.windows.net/us-shapefiles/"
shape_filename = "cb_2018_us_state_20m"
shape_entensions = ["cpg", "dbf", "prj", "shp", "shx"]
for ext in shape_entensions:
filepath = os.path.join(shapes_path, f"{shape_filename}.{ext}")
if not os.path.isfile(filepath):
r = urllib.request.urlopen(f"{url_base}{shape_filename}.{ext}")
with open(filepath, "wb") as f:
f.write(r.read())
filename = os.path.join(shapes_path, f"{shape_filename}.shp")
return filename
def download_states_json():
"""Downloads json file containing coordinates for U.S. state outlines.
:return: (*str*) -- path to json file.
"""
shapes_path = os.path.join(os.path.dirname(os.path.abspath(__file__)), "shapes")
os.makedirs(shapes_path, exist_ok=True)
json_filename = "state_shapes.json"
filepath = os.path.join(shapes_path, json_filename)
if not os.path.isfile(filepath):
url_base = "https://besciences.blob.core.windows.net/us-shapefiles/"
r = urllib.request.urlopen(f"{url_base}{json_filename}")
with open(filepath, "wb") as f:
f.write(r.read())
return filepath
def get_state_borders():
"""Get state borders as a dictionary of coordinate arrays.
:return: (*dict*) -- dictionary with keys from the specified shapefile column,
values are dict with keys of {"lat", "lon"}, values are coordinates, padded by
nan values to indicate the end of each polygon before the start of the next one.
"""
try:
json_filepath = download_states_json()
with open(json_filepath, "r") as f:
us_states_dat = json.load(f)
except Exception:
# In case we can't get the json file, use the bokeh shapes
us_states_dat = us_states.data
return us_states_dat
def expand_data_source(patches, state2data, key_name):
"""Add data to a bokeh patch object
:param bokeh.models.renderers.GlyphRenderer patches: states as glyphs.
:param dict state2data: keys are states abbreviation and values are data.
:param str key_name: name to use for the key in data source.
:return: (*bokeh.models.renderers.GlyphRenderer*) -- updated patches.
:raises TypeError:
if ``state2data`` is not a dict.
if ``key_name`` is not a str.
:raises ValueError:
if states in ``state2data`` and ``patches`` differ.
"""
if not isinstance(state2data, dict):
raise TypeError("state2data must be a dict")
if not isinstance(key_name, str):
raise TypeError("key_name must be a str")
if len(set(patches.data_source.data["state_name"]) - set(state2data.keys())) != 0:
raise ValueError("states in patches are missing from state2data")
patches.data_source.data[key_name] = [
state2data[s] for s in patches.data_source.data["state_name"]
]
return patches
def add_state_borders(
canvas,
state_list=None,
background_map=False,
line_color="grey",
line_width=1,
fill_alpha=1,
):
"""Add state borders onto canvas.
:param bokeh.plotting.figure canvas: canvas.
:param list state_list: list of states to display, default to continental US.
:param bool background_map: add background behind state borders.
:param str line_color: color of state outlines.
:param int/float line_width: thickness of state outlines.
:param int/float fill_alpha: opaqueness for state patches.
:return: (*bokeh.plotting.figure.Figure*) -- canvas with state borders.
:raises TypeError:
if ``background_map`` is not a boolean.
if ``line_color`` is not a str.
if ``line_width`` is not a int or float.
if ``fill_alpha`` is not a int or float.
"""
if not isinstance(background_map, bool):
raise TypeError("background map must be a bool")
if not isinstance(line_color, str):
raise TypeError("line_color must be a str")
if not isinstance(line_width, (int, float)):
raise TypeError("line_width must be a int or float")
if not isinstance(fill_alpha, (int, float)):
raise TypeError("fill_alpha must be a int or float")
us_states_dat = get_state_borders()
if state_list is None:
state_list = set(us_states_dat.keys()) - {"AK", "HI", "DC", "PR"}
all_state_xs, all_state_ys = project_borders(us_states_dat, state_list=state_list)
if background_map:
canvas.add_tile(get_provider(Vendors.CARTODBPOSITRON_RETINA))
canvas.patches(
"xs",
"ys",
source=ColumnDataSource(
{
"xs": all_state_xs,
"ys": all_state_ys,
"state_color": ["white"] * len(state_list),
"state_name": list(state_list),
}
),
fill_color="state_color",
line_color=line_color,
fill_alpha=fill_alpha,
line_width=line_width,
name="states",
)
return canvas
def add_state_tooltips(canvas, tooltip_title, state2label):
"""Add tooltip to states.
:param bokeh.plotting.figure canvas: canvas.
:param dict state2label: keys are states abbreviation and values are labels.
:return: (*bokeh.plotting.figure.Figure*) -- canvas with toolips.
"""
if not isinstance(tooltip_title, str):
raise TypeError("tooltip title must be a str")
patches = canvas.select_one({"name": "states"})
patches = expand_data_source(patches, state2label, "state_label")
hover = HoverTool(
tooltips=[("State", "@state_name"), (f"{tooltip_title}", "@state_label")],
renderers=[patches],
)
canvas.add_tools(hover)
return canvas
def add_state_colors(canvas, state2color):
"""Color states.
:param bokeh.plotting.figure canvas: canvas.
:param dict state2color: keys are states abbreviation and values are colors.
:return: (*bokeh.plotting.figure.Figure*) -- canvas with colored state.
"""
patches = canvas.select_one({"name": "states"})
patches = expand_data_source(patches, state2color, "state_color")
return canvas
def add_state_legends(
canvas,
state2label=None,
title=None,
location="bottom_right",
title_size="12pt",
label_size="12pt",
):
"""Add legend.
:param bokeh.plotting.figure canvas: canvas.
:param dict state2label: keys are states abbreviation and values are labels.
:param str title: title for legend.
:param str location: legend location on canvas. Default is bottom right.
:param str title_size: legend title font size. Default is 12pt.
:param str label_size: legend labels font size. Default is 12pt.
:return: (*bokeh.plotting.figure.Figure*) -- canvas with colored state.
:raises TypeError:
if ``title`` is not None or str.
if ``location`` is not a str.
if ``title_size`` is not a str.
if ``label_size`` is not a str.
"""
if title is not None and not isinstance(title, str):
raise TypeError("title must be a str")
if location is not None and not isinstance(location, str):
raise TypeError("location must be a str")
if title_size is not None and not isinstance(title_size, str):
raise TypeError("title_size must be a str")
if label_size is not None and not isinstance(label_size, str):
raise TypeError("label_size must be a str")
patches = canvas.select_one({"name": "states"})
patches = expand_data_source(patches, state2label, "state_legend")
group_legend = (
| pd.DataFrame({"legend": patches.data_source.data["state_legend"]}) | pandas.DataFrame |
# -*- coding: utf-8 -*-
"""
Created on Tue Jan 23 16:00:33 2018
@author: tcmorgan2
Reads in wind speed data from tab delimited text files.
The first portion of the file contains header information.
The second portion contains average and standard deviation in wind speed of some time interval.
Imported files are passed through a sqlite database for temporary storage and processing.
A netCDF of raw estimated values is saved into the rawWindData folder. This file includes any gaps in data collection.
"""
import pandas as pd
import re
import logging
import sqlite3 as lite
import numpy as np
import os
from netCDF4 import Dataset
from MiGRIDS.InputHandler.processInputDataFrame import processInputDataFrame
#String, String -> dataframe
def readWindData(inputDict):
'''imports all MET data files in a folder and converts parameters to a dataframe.
:param inputDict: [Dictionary] a dictionary containing file location, datetime and channel information
:return: [Dictionary],[pandas.DataFrame] a dictionary of files that were read and the resulting dataframe of values is returned
'''
#DATETIME = 'Date_&_Time_Stamp'
DATETIME = inputDict['dateColumnName']
def readAsHeader(file, header_dict, componentName):
'''extracts the header information from a MET file.
:param file [File] a MET file to be read.
:param header_dict [Dictionary] a dictionary of header information
:param componentName [String] the name of the channel of interest.
:return [Dictionary] of header information for the file.
'''
inline = file.readline().split('\t')
inline = [re.sub(r"\s+", '_', x.strip()) for x in inline] # strip whitespaces at ends and replaces spaces with underscores
#assumes 'Date & Time Stamp' is the first column name where the dataframe starts.
#we return the dictionary of header information
if inline[0] == DATETIME:
names = inline
return header_dict, names
else:
#assumes that header information for channels are prefixed with 'Channel ...'
if inline[0][0:3] == 'Cha':
#start a new component
componentName = 'CH' + inline[1].rstrip()
header_dict[componentName] = {}
if (componentName is not None) & (len(inline) > 1):
header_dict[componentName][inline[0].rstrip()] = inline[1].rstrip()
return readAsHeader(file, header_dict, componentName)
def readAsData(file, names):
'''reast the data portion of a MET file into a dataframe
:param file [File] the MET file to be read
:param names [ListOf String] the channels to be read.
:return [DataFrame] of values for specified channels with datetime index'''
rowList = []
for line in file:
#these are the data lines with column names
value_dict = {}
cols = line.split('\t')
for i in range(len(names)):
value_dict[names[i]] = cols[i]
rowList.append(value_dict)
filedf = pd.DataFrame(rowList)
return filedf
#if a new channel speficication is encountered within the input files it gets incremented with an appended number
#i.e. Channel 3 was windspeed in input file 1 but in input file 6 it becomes wind direction thus the channel name becomes CH3_1
def channelUp(channel, existing, increment = 1) :
newchannel = channel + '_' + str(increment)
if newchannel not in existing.keys():
return newchannel
else:
increment +=1
return channelUp(channel, existing, increment)
#checks it the channel input information just read in matches the information stored in the working header dictionary
def checkMatch(channel, h, combinedHeader):
for attribute in combinedHeader[channel].keys():
if combinedHeader[channel][attribute]!= h[channel][attribute]:
return False
return True
#adds a new channel to the combined header dictionary if it doesn't exist yet
def addChannel(channel, h, combinedHeader, oc):
combinedHeader[channel]={'Description':h[oc]['Description'],
'Height':h[oc]['Height'],
'Offset':h[oc]['Offset'],
'Scale_Factor':h[oc]['Scale_Factor'],
'Units':h[oc]['Units']}
return combinedHeader
# a data class for estimating and storing windspeed data collected at intervals
class windRecord():
def __init__(self, sigma=25, mu=250, minws = 0, maxws = 20, datetime = None):
self.sigma = sigma
self.mu = mu
self.distribution = None
self.minws = minws
self.maxws = maxws
self.datetime = datetime
def getDatetime(self):
return self.datetime
#finds the previous value based on timestamp
def getStart(self, duration, df):
#find the wind record immediately prior to current windrecord
previousrecordtime = self.getDatetime() - duration
sorteddf = df.sort_values('time')
myvalue = sorteddf['values'][sorteddf['time'] < previousrecordtime][-1:]
if len(myvalue) > 1:
myvalue = myvalue[0]
elif len(myvalue) == 0:
myvalue = None
return myvalue
#self, integer, numeric,string, integer
def getValues(self, elapsed_time, start,interval, tau = None):
mu = self.mu
sigma = self.sigma
timestep = pd.Timedelta(interval).seconds
#number of records to estimate
n = int(elapsed_time/timestep)
#tau scales the relationship between time and change in value of x
#larger values result in larger drift and diffusion
if tau is None:
tau = n
x = np.zeros(n)
#renormalized variables
sigma_bis = sigma * np.sqrt(2.0 /n)
sqrtdt = np.sqrt(timestep)
x[0] = start
#np.random is the random gaussian with mean 0
for i in range(n-1):
x[i+1] = x[i] + timestep*(-(x[i]-mu)/tau) + sigma_bis * sqrtdt * np.random.randn()
return x
def estimateDistribution(self, records,interval, start = None, tau = None):
if start is None:
start = self.minws
tau = records
x = self.getValues(records, start, interval, tau)
t = pd.date_range(self.datetime - pd.to_timedelta(pd.Timedelta(interval).seconds * records, unit='s'), periods=records,freq='s')
self.distribution = [x,t]
return
#a dictionary of files that are read
fileDict = {}
df = pd.DataFrame()
for root, dirs, files in os.walk(inputDict['fileLocation']):
for f in files:
with open(os.path.join(root, f), 'r',errors='ignore') as file:
#read the header information of each file
if (file.name)[-3:] == 'txt':
print(os.path.basename(file.name))
data = pd.DataFrame()
headerDict = {}
headerDict, names = readAsHeader(file, headerDict, None)
fileDict[file.name] = headerDict
for n in names:
if n not in df.columns:
df[n] = None
data[n] = None
#read the data from each file
fileData = readAsData(file, names)
df = pd.concat([df, fileData], axis=0, ignore_index=True)
if file.name in fileDict.keys():
fileDict[file.name]['rows'] = len(fileData)
df = df.set_index(pd.to_datetime(df[DATETIME]))
df = df.apply(pd.to_numeric, errors='ignore')
df = df.sort_index()
combinedHeader = {}
fileLog = fileDict
#check that there isn't mismatched header information
for f in fileLog.keys():
h = fileLog[f]
rows = 0
for channel in h.keys():
if channel == 'rows':
rows += h[channel]
else:
if channel in combinedHeader.keys():
#check that the values match
if not checkMatch(channel, h, combinedHeader):
addChannel(channelUp(channel, combinedHeader), h, combinedHeader, channel)
else:
#add the channel
addChannel(channel, h, combinedHeader, channel)
def createNetCDF(df, increment):
# create a netcdf file
dtype = 'float'
# column = df.columns.values[i]
ncName = os.path.join(inputDict['fileLocation'], (str(increment) + 'WS.nc'))
rootgrp = Dataset(ncName, 'w', format='NETCDF4') # create netCDF object
rootgrp.createDimension('time', None) # create dimension for all called time
# create the time variable
rootgrp.createVariable('time', dtype, 'time') # create a var using the varnames
rootgrp.variables['time'][:] = pd.to_timedelta(pd.Series(df.index)).values.dt.total_seconds().astype(int)
# create the value variable
rootgrp.createVariable('value', dtype, 'time') # create a var using the varnames
rootgrp.variables['value'][:] = np.array(winddf['values']) # fill with values
# assign attributes
rootgrp.variables['time'].units = 'seconds' # set unit attribute
rootgrp.variables['value'].units = 'm/s' # set unit attribute
rootgrp.variables['value'].Scale = 1 # set unit attribute
rootgrp.variables['value'].offset = 0 # set unit attribute
# close file
rootgrp.close()
#now we need to fill in the gaps between sampling points
#apply to every row, 10 minutes = 600 seconds
def fillWindRecords(df, channels):
database = os.path.join(inputDict['fileLocation'], 'wind.db')
connection = lite.connect(database)
for k in channels:
logging.info(k)
newdf = df.copy()
newdf = newdf.sort_index()
newColumns = [x.replace(k,'').rstrip() for x in newdf.columns]
newdf.columns = newColumns
valuesdf = pd.DataFrame()
valuesdf['time'] = None
valuesdf['values'] = None
newdf['date'] = | pd.to_datetime(newdf.index) | pandas.to_datetime |
import numpy as np
import pandas as pd
def _generate_X_train_1():
# Generate X_train dataframe with missing values
# Initialise parameters
n_rows = 10
nominal_categories = {'stanage': 0.3, 'burbage': 0.2, 'almscliff': 0.2, 'froggatt': 0.15, 'blacknor':0.15}
# Generate dataframe
np.random.seed(0)
X_train = pd.DataFrame({
'numeric_1': np.random.choice(100, n_rows),
'numeric_2': np.random.choice(10, n_rows, replace=False),
'numeric_3': np.random.choice(22, n_rows, replace=False),
'numeric_4': np.random.choice(5, n_rows),
'nominal': np.random.choice(list(nominal_categories.keys()), n_rows, replace=True, p=list(nominal_categories.values())),
})
np.random.seed(0)
X_train = X_train.mask((np.random.random(size=X_train.shape) > 0.75))
# Add boolean (this doesn't work before mask applied as this turns bool dtype to float if NaNs exist)
bool_elements = {True: 0.6, False: 0.4}
np.random.seed(0)
bool_list = list(np.random.choice(list(bool_elements.keys()), n_rows, replace=True, p=list(bool_elements.values())))
np.random.seed(0)
mask = list(np.random.random(size=len(bool_list)) > 0.5)
X_train['bool'] = [(x if m else np.nan) for (x, m) in zip(bool_list, mask)]
# Add timestamp column
timestamps = pd.date_range(start=pd.datetime(2019, 1, 6), end=pd.datetime(2020, 1, 20), periods=n_rows)
timestamps = | pd.Series(timestamps.values) | pandas.Series |
# coding: utf-8
# <h1 align="center"> Lending Club Loan Analysis </h1> <br>
# ## Company Information:
# Lending Club is a peer to peer lending company based in the United States, in which investors provide funds for potential borrowers and investors earn a profit depending on the risk they take (the borrowers credit score). Lending Club provides the "bridge" between investors and borrowers. For more basic information about the company please check out the wikipedia article about the company. <br><br>
#
#
# <a src="https://en.wikipedia.org/wiki/Lending_Club"> Lending Club Information </a>
#
#
#
#
# ## How Lending Club Works?
# <img src="http://echeck.org/wp-content/uploads/2016/12/Showing-how-the-lending-club-works-and-makes-money-1.png"><br><br>
#
#
# ## Outline: <br><br>
# I. Introduction <br>
# a) [General Information](#general_information)<br>
# b) [Similar Distributions](#similar_distributions)<br><br>
#
# II. <b>Good Loans vs Bad Loans</b><br>
# a) [Types of Loans](#types_of_loans)<br>
# b) [Loans issued by Region](#by_region)<br>
# c) [A Deeper Look into Bad Loans](#deeper_bad_loans)<br><br>
#
# III. <b>The Business Perspective</b><br>
# a) [Understanding the Operative side of Business](#operative_side)<br>
# b) [Analysis by Income Category](#income_category) <br><br>
#
# IV. <b>Assesing Risks</b><br>
# a) [Understanding the Risky Side of Business](#risky_side)<br>
# b) [The importance of Credit Scores](#credit_scores)<br>
# c) [What determines a bad loan](#determines_bad_loan)<br>
# d) [Defaulted Loans](#defaulted_loans)
#
# ## References:
# 1) <a src="https://www.kaggle.com/arthurtok/global-religion-1945-2010-plotly-pandas-visuals"> Global Religion 1945-2010: Plotly & Pandas visuals</a> by Anisotropic <br>
# 2) <a src="https://www.kaggle.com/vigilanf/loan-metrics-by-state"> Loan Metrics By State </a> by <NAME><br>
# 3) Hands on Machine Learning by <NAME> <br>
# 4) <a src="https://www.youtube.com/watch?v=oYbVFhK_olY&list=PLSPWNkAMSvv5DKeSVDbEbUKSsK4Z-GgiP"> Deep Learning with Neural Networks and TensorFlow </a> by Sentdex
# # Introduction:
# ## General Information:
# <a id="general_information"></a>
# In[ ]:
# Import our libraries we are going to use for our data analysis.
import pandas as pd
import seaborn as sns
import numpy as np
import matplotlib.pyplot as plt
# Plotly visualizations
from plotly import tools
import plotly.plotly as py
import plotly.figure_factory as ff
import plotly.graph_objs as go
from plotly.offline import download_plotlyjs, init_notebook_mode, plot, iplot
init_notebook_mode(connected=True)
# plotly.tools.set_credentials_file(username='AlexanderBach', api_key='o4fx6i1MtEIJQxfWYvU1')
get_ipython().run_line_magic('matplotlib', 'inline')
df = pd.read_csv('../input/loan.csv', low_memory=False)
# Copy of the dataframe
original_df = df.copy()
df.head()
# In[ ]:
df.info()
# In[ ]:
# Replace the name of some columns
df = df.rename(columns={"loan_amnt": "loan_amount", "funded_amnt": "funded_amount", "funded_amnt_inv": "investor_funds",
"int_rate": "interest_rate", "annual_inc": "annual_income"})
# Drop irrelevant columns
df.drop(['id', 'member_id', 'emp_title', 'url', 'desc', 'zip_code', 'title'], axis=1, inplace=True)
# ## Similar Distributions:
# <a id="similar_distributions"></a>
# We will start by exploring the distribution of the loan amounts and see when did the loan amount issued increased significantly. <br>
#
# <h4> What we need to know: </h4> <br>
# <ul>
# <li> Understand what amount was <b>mostly issued</b> to borrowers. </li>
# <li> Which <b>year</b> issued the most loans. </li>
# <li> The distribution of loan amounts is a <b>multinomial distribution </b>.</li>
# </ul>
#
#
#
# <h4> Summary: </h4><br>
# <ul>
# <li> Most of the <b>loans issued</b> were in the range of 10,000 to 20,000 USD. </li>
# <li> The <b>year of 2015</b> was the year were most loans were issued.</li>
# <li> Loans were issued in an <b>incremental manner</b>. (Possible due to a recovery in the U.S economy) </li>
# <li> The loans <b>applied</b> by potential borrowers, the amount <b>issued</b> to the borrowers and the amount <b>funded</b> by investors are similarly distributed, <b>meaning</b> that it is most likely that qualified borrowers are going to get the loan they had applied for. </li>
#
# </ul>
#
#
#
#
# In[ ]:
fig, ax = plt.subplots(1, 3, figsize=(16,5))
loan_amount = df["loan_amount"].values
funded_amount = df["funded_amount"].values
investor_funds = df["investor_funds"].values
sns.distplot(loan_amount, ax=ax[0], color="#F7522F")
ax[0].set_title("Loan Applied by the Borrower", fontsize=14)
sns.distplot(funded_amount, ax=ax[1], color="#2F8FF7")
ax[1].set_title("Amount Funded by the Lender", fontsize=14)
sns.distplot(investor_funds, ax=ax[2], color="#2EAD46")
ax[2].set_title("Total committed by Investors", fontsize=14)
# In[ ]:
# Lets' transform the issue dates by year.
df['issue_d'].head()
dt_series = | pd.to_datetime(df['issue_d']) | pandas.to_datetime |
import os
import nltk
import requests
from flask import Flask, request
import datetime as dt
import time
from nltk.sentiment.vader import SentimentIntensityAnalyzer
import numpy as np
from models import db, StateModel
from flask_migrate import Migrate
from flask_sqlalchemy import SQLAlchemy
from constants import LIST_OF_STATES
from searchtweets import ResultStream, gen_request_parameters, load_credentials, collect_results
import pandas as pd
nltk.download('vader_lexicon')
vader = SentimentIntensityAnalyzer()
app = Flask(__name__, static_folder='../build', static_url_path='/')
# TODO: This should be an environment variable
app.config['SQLALCHEMY_DATABASE_URI'] = os.getenv('SQLALCHEMY_DATABASE_URI')
app.config['SQLALCHEMY_TRACK_MODIFICATIONS'] = False
db.init_app(app)
migrate = Migrate(app, db)
@app.route('/')
def index():
return app.send_static_file('index.html')
@app.route('/api/time')
def get_current_time():
return {'time': time.ctime()}
def fetch_new_tweets():
# TODO: Allow this to look for env variables
#
search_args = load_credentials(
"./.twitter_keys.yaml", yaml_key="search_tweets_v2", env_overwrite=False)
resp_dict = dict()
for state in LIST_OF_STATES:
qry_str = "entity:\"" + state + "\" -is:retweet lang:en -has:links -has:mentions -context:11.689566306014617600 -context:11.706083902411055104 -context:11.769193663230468096"
query = gen_request_parameters(qry_str, results_per_call=100)
try:
tweets = collect_results(query, max_tweets=200,
result_stream_args=search_args)
df = | pd.DataFrame.from_dict(tweets) | pandas.DataFrame.from_dict |
"""
A warehouse for constant values required to initilize the PUDL Database.
This constants module stores and organizes a bunch of constant values which are
used throughout PUDL to populate static lists within the data packages or for
data cleaning purposes.
"""
import pandas as pd
import sqlalchemy as sa
######################################################################
# Constants used within the init.py module.
######################################################################
prime_movers = [
'steam_turbine',
'gas_turbine',
'hydro',
'internal_combustion',
'solar_pv',
'wind_turbine'
]
"""list: A list of the types of prime movers"""
rto_iso = {
'CAISO': 'California ISO',
'ERCOT': 'Electric Reliability Council of Texas',
'MISO': 'Midcontinent ISO',
'ISO-NE': 'ISO New England',
'NYISO': 'New York ISO',
'PJM': 'PJM Interconnection',
'SPP': 'Southwest Power Pool'
}
"""dict: A dictionary containing ISO/RTO abbreviations (keys) and names (values)
"""
us_states = {
'AK': 'Alaska',
'AL': 'Alabama',
'AR': 'Arkansas',
'AS': 'American Samoa',
'AZ': 'Arizona',
'CA': 'California',
'CO': 'Colorado',
'CT': 'Connecticut',
'DC': 'District of Columbia',
'DE': 'Delaware',
'FL': 'Florida',
'GA': 'Georgia',
'GU': 'Guam',
'HI': 'Hawaii',
'IA': 'Iowa',
'ID': 'Idaho',
'IL': 'Illinois',
'IN': 'Indiana',
'KS': 'Kansas',
'KY': 'Kentucky',
'LA': 'Louisiana',
'MA': 'Massachusetts',
'MD': 'Maryland',
'ME': 'Maine',
'MI': 'Michigan',
'MN': 'Minnesota',
'MO': 'Missouri',
'MP': 'Northern Mariana Islands',
'MS': 'Mississippi',
'MT': 'Montana',
'NA': 'National',
'NC': 'North Carolina',
'ND': 'North Dakota',
'NE': 'Nebraska',
'NH': 'New Hampshire',
'NJ': 'New Jersey',
'NM': 'New Mexico',
'NV': 'Nevada',
'NY': 'New York',
'OH': 'Ohio',
'OK': 'Oklahoma',
'OR': 'Oregon',
'PA': 'Pennsylvania',
'PR': 'Puerto Rico',
'RI': 'Rhode Island',
'SC': 'South Carolina',
'SD': 'South Dakota',
'TN': 'Tennessee',
'TX': 'Texas',
'UT': 'Utah',
'VA': 'Virginia',
'VI': 'Virgin Islands',
'VT': 'Vermont',
'WA': 'Washington',
'WI': 'Wisconsin',
'WV': 'West Virginia',
'WY': 'Wyoming'
}
"""dict: A dictionary containing US state abbreviations (keys) and names
(values)
"""
canada_prov_terr = {
'AB': 'Alberta',
'BC': 'British Columbia',
'CN': 'Canada',
'MB': 'Manitoba',
'NB': 'New Brunswick',
'NS': 'Nova Scotia',
'NL': 'Newfoundland and Labrador',
'NT': 'Northwest Territories',
'NU': 'Nunavut',
'ON': 'Ontario',
'PE': 'Prince Edwards Island',
'QC': 'Quebec',
'SK': 'Saskatchewan',
'YT': 'Yukon Territory',
}
"""dict: A dictionary containing Canadian provinces' and territories'
abbreviations (keys) and names (values)
"""
cems_states = {k: v for k, v in us_states.items() if v not in
{'Alaska',
'American Samoa',
'Guam',
'Hawaii',
'Northern Mariana Islands',
'National',
'Puerto Rico',
'Virgin Islands'}
}
"""dict: A dictionary containing US state abbreviations (keys) and names
(values) that are present in the CEMS dataset
"""
# This is imperfect for states that have split timezones. See:
# https://en.wikipedia.org/wiki/List_of_time_offsets_by_U.S._state_and_territory
# For states that are split, I went with where there seem to be more people
# List of timezones in pytz.common_timezones
# Canada: https://en.wikipedia.org/wiki/Time_in_Canada#IANA_time_zone_database
state_tz_approx = {
"AK": "US/Alaska", # Alaska; Not in CEMS
"AL": "US/Central", # Alabama
"AR": "US/Central", # Arkansas
"AS": "Pacific/Pago_Pago", # American Samoa; Not in CEMS
"AZ": "US/Arizona", # Arizona
"CA": "US/Pacific", # California
"CO": "US/Mountain", # Colorado
"CT": "US/Eastern", # Connecticut
"DC": "US/Eastern", # District of Columbia
"DE": "US/Eastern", # Delaware
"FL": "US/Eastern", # Florida (split state)
"GA": "US/Eastern", # Georgia
"GU": "Pacific/Guam", # Guam; Not in CEMS
"HI": "US/Hawaii", # Hawaii; Not in CEMS
"IA": "US/Central", # Iowa
"ID": "US/Mountain", # Idaho (split state)
"IL": "US/Central", # Illinois
"IN": "US/Eastern", # Indiana (split state)
"KS": "US/Central", # Kansas (split state)
"KY": "US/Eastern", # Kentucky (split state)
"LA": "US/Central", # Louisiana
"MA": "US/Eastern", # Massachusetts
"MD": "US/Eastern", # Maryland
"ME": "US/Eastern", # Maine
"MI": "America/Detroit", # Michigan (split state)
"MN": "US/Central", # Minnesota
"MO": "US/Central", # Missouri
"MP": "Pacific/Saipan", # Northern Mariana Islands; Not in CEMS
"MS": "US/Central", # Mississippi
"MT": "US/Mountain", # Montana
"NC": "US/Eastern", # North Carolina
"ND": "US/Central", # North Dakota (split state)
"NE": "US/Central", # Nebraska (split state)
"NH": "US/Eastern", # New Hampshire
"NJ": "US/Eastern", # New Jersey
"NM": "US/Mountain", # New Mexico
"NV": "US/Pacific", # Nevada
"NY": "US/Eastern", # New York
"OH": "US/Eastern", # Ohio
"OK": "US/Central", # Oklahoma
"OR": "US/Pacific", # Oregon (split state)
"PA": "US/Eastern", # Pennsylvania
"PR": "America/Puerto_Rico", # Puerto Rico; Not in CEMS
"RI": "US/Eastern", # Rhode Island
"SC": "US/Eastern", # South Carolina
"SD": "US/Central", # South Dakota (split state)
"TN": "US/Central", # Tennessee
"TX": "US/Central", # Texas
"UT": "US/Mountain", # Utah
"VA": "US/Eastern", # Virginia
"VI": "America/Puerto_Rico", # Virgin Islands; Not in CEMS
"VT": "US/Eastern", # Vermont
"WA": "US/Pacific", # Washington
"WI": "US/Central", # Wisconsin
"WV": "US/Eastern", # West Virginia
"WY": "US/Mountain", # Wyoming
# Canada (none of these are in CEMS)
"AB": "America/Edmonton", # Alberta
"BC": "America/Vancouver", # British Columbia (split province)
"MB": "America/Winnipeg", # Manitoba
"NB": "America/Moncton", # New Brunswick
"NS": "America/Halifax", # Nova Scotia
"NL": "America/St_Johns", # Newfoundland and Labrador (split province)
"NT": "America/Yellowknife", # Northwest Territories (split province)
"NU": "America/Iqaluit", # Nunavut (split province)
"ON": "America/Toronto", # Ontario (split province)
"PE": "America/Halifax", # Prince Edwards Island
"QC": "America/Montreal", # Quebec (split province)
"SK": "America/Regina", # Saskatchewan (split province)
"YT": "America/Whitehorse", # Yukon Territory
}
"""dict: A dictionary containing US and Canadian state/territory abbreviations
(keys) and timezones (values)
"""
ferc1_power_purchase_type = {
'RQ': 'requirement',
'LF': 'long_firm',
'IF': 'intermediate_firm',
'SF': 'short_firm',
'LU': 'long_unit',
'IU': 'intermediate_unit',
'EX': 'electricity_exchange',
'OS': 'other_service',
'AD': 'adjustment'
}
"""dict: A dictionary of abbreviations (keys) and types (values) for power
purchase agreements from FERC Form 1.
"""
# Dictionary mapping DBF files (w/o .DBF file extension) to DB table names
ferc1_dbf2tbl = {
'F1_1': 'f1_respondent_id',
'F1_2': 'f1_acb_epda',
'F1_3': 'f1_accumdepr_prvsn',
'F1_4': 'f1_accumdfrrdtaxcr',
'F1_5': 'f1_adit_190_detail',
'F1_6': 'f1_adit_190_notes',
'F1_7': 'f1_adit_amrt_prop',
'F1_8': 'f1_adit_other',
'F1_9': 'f1_adit_other_prop',
'F1_10': 'f1_allowances',
'F1_11': 'f1_bal_sheet_cr',
'F1_12': 'f1_capital_stock',
'F1_13': 'f1_cash_flow',
'F1_14': 'f1_cmmn_utlty_p_e',
'F1_15': 'f1_comp_balance_db',
'F1_16': 'f1_construction',
'F1_17': 'f1_control_respdnt',
'F1_18': 'f1_co_directors',
'F1_19': 'f1_cptl_stk_expns',
'F1_20': 'f1_csscslc_pcsircs',
'F1_21': 'f1_dacs_epda',
'F1_22': 'f1_dscnt_cptl_stk',
'F1_23': 'f1_edcfu_epda',
'F1_24': 'f1_elctrc_erg_acct',
'F1_25': 'f1_elctrc_oper_rev',
'F1_26': 'f1_elc_oper_rev_nb',
'F1_27': 'f1_elc_op_mnt_expn',
'F1_28': 'f1_electric',
'F1_29': 'f1_envrnmntl_expns',
'F1_30': 'f1_envrnmntl_fclty',
'F1_31': 'f1_fuel',
'F1_32': 'f1_general_info',
'F1_33': 'f1_gnrt_plant',
'F1_34': 'f1_important_chg',
'F1_35': 'f1_incm_stmnt_2',
'F1_36': 'f1_income_stmnt',
'F1_37': 'f1_miscgen_expnelc',
'F1_38': 'f1_misc_dfrrd_dr',
'F1_39': 'f1_mthly_peak_otpt',
'F1_40': 'f1_mtrl_spply',
'F1_41': 'f1_nbr_elc_deptemp',
'F1_42': 'f1_nonutility_prop',
'F1_43': 'f1_note_fin_stmnt', # 37% of DB
'F1_44': 'f1_nuclear_fuel',
'F1_45': 'f1_officers_co',
'F1_46': 'f1_othr_dfrrd_cr',
'F1_47': 'f1_othr_pd_in_cptl',
'F1_48': 'f1_othr_reg_assets',
'F1_49': 'f1_othr_reg_liab',
'F1_50': 'f1_overhead',
'F1_51': 'f1_pccidica',
'F1_52': 'f1_plant_in_srvce',
'F1_53': 'f1_pumped_storage',
'F1_54': 'f1_purchased_pwr',
'F1_55': 'f1_reconrpt_netinc',
'F1_56': 'f1_reg_comm_expn',
'F1_57': 'f1_respdnt_control',
'F1_58': 'f1_retained_erng',
'F1_59': 'f1_r_d_demo_actvty',
'F1_60': 'f1_sales_by_sched',
'F1_61': 'f1_sale_for_resale',
'F1_62': 'f1_sbsdry_totals',
'F1_63': 'f1_schedules_list',
'F1_64': 'f1_security_holder',
'F1_65': 'f1_slry_wg_dstrbtn',
'F1_66': 'f1_substations',
'F1_67': 'f1_taxacc_ppchrgyr',
'F1_68': 'f1_unrcvrd_cost',
'F1_69': 'f1_utltyplnt_smmry',
'F1_70': 'f1_work',
'F1_71': 'f1_xmssn_adds',
'F1_72': 'f1_xmssn_elc_bothr',
'F1_73': 'f1_xmssn_elc_fothr',
'F1_74': 'f1_xmssn_line',
'F1_75': 'f1_xtraordnry_loss',
'F1_76': 'f1_codes_val',
'F1_77': 'f1_sched_lit_tbl',
'F1_78': 'f1_audit_log',
'F1_79': 'f1_col_lit_tbl',
'F1_80': 'f1_load_file_names',
'F1_81': 'f1_privilege',
'F1_82': 'f1_sys_error_log',
'F1_83': 'f1_unique_num_val',
'F1_84': 'f1_row_lit_tbl',
'F1_85': 'f1_footnote_data',
'F1_86': 'f1_hydro',
'F1_87': 'f1_footnote_tbl', # 52% of DB
'F1_88': 'f1_ident_attsttn',
'F1_89': 'f1_steam',
'F1_90': 'f1_leased',
'F1_91': 'f1_sbsdry_detail',
'F1_92': 'f1_plant',
'F1_93': 'f1_long_term_debt',
'F1_106_2009': 'f1_106_2009',
'F1_106A_2009': 'f1_106a_2009',
'F1_106B_2009': 'f1_106b_2009',
'F1_208_ELC_DEP': 'f1_208_elc_dep',
'F1_231_TRN_STDYCST': 'f1_231_trn_stdycst',
'F1_324_ELC_EXPNS': 'f1_324_elc_expns',
'F1_325_ELC_CUST': 'f1_325_elc_cust',
'F1_331_TRANSISO': 'f1_331_transiso',
'F1_338_DEP_DEPL': 'f1_338_dep_depl',
'F1_397_ISORTO_STL': 'f1_397_isorto_stl',
'F1_398_ANCL_PS': 'f1_398_ancl_ps',
'F1_399_MTH_PEAK': 'f1_399_mth_peak',
'F1_400_SYS_PEAK': 'f1_400_sys_peak',
'F1_400A_ISO_PEAK': 'f1_400a_iso_peak',
'F1_429_TRANS_AFF': 'f1_429_trans_aff',
'F1_ALLOWANCES_NOX': 'f1_allowances_nox',
'F1_CMPINC_HEDGE_A': 'f1_cmpinc_hedge_a',
'F1_CMPINC_HEDGE': 'f1_cmpinc_hedge',
'F1_EMAIL': 'f1_email',
'F1_RG_TRN_SRV_REV': 'f1_rg_trn_srv_rev',
'F1_S0_CHECKS': 'f1_s0_checks',
'F1_S0_FILING_LOG': 'f1_s0_filing_log',
'F1_SECURITY': 'f1_security'
# 'F1_PINS': 'f1_pins', # private data, not publicized.
# 'F1_FREEZE': 'f1_freeze', # private data, not publicized
}
"""dict: A dictionary mapping FERC Form 1 DBF files(w / o .DBF file extension)
(keys) to database table names (values).
"""
ferc1_huge_tables = {
'f1_footnote_tbl',
'f1_footnote_data',
'f1_note_fin_stmnt',
}
"""set: A set containing large FERC Form 1 tables.
"""
# Invert the map above so we can go either way as needed
ferc1_tbl2dbf = {v: k for k, v in ferc1_dbf2tbl.items()}
"""dict: A dictionary mapping database table names (keys) to FERC Form 1 DBF
files(w / o .DBF file extension) (values).
"""
# This dictionary maps the strings which are used to denote field types in the
# DBF objects to the corresponding generic SQLAlchemy Column types:
# These definitions come from a combination of the dbfread example program
# dbf2sqlite and this DBF file format documentation page:
# http://www.dbase.com/KnowledgeBase/int/db7_file_fmt.htm
# Un-mapped types left as 'XXX' which should obviously make an error...
dbf_typemap = {
'C': sa.String,
'D': sa.Date,
'F': sa.Float,
'I': sa.Integer,
'L': sa.Boolean,
'M': sa.Text, # 10 digit .DBT block number, stored as a string...
'N': sa.Float,
'T': sa.DateTime,
'0': sa.Integer, # based on dbf2sqlite mapping
'B': 'XXX', # .DBT block number, binary string
'@': 'XXX', # Timestamp... Date = Julian Day, Time is in milliseconds?
'+': 'XXX', # Autoincrement (e.g. for IDs)
'O': 'XXX', # Double, 8 bytes
'G': 'XXX', # OLE 10 digit/byte number of a .DBT block, stored as string
}
"""dict: A dictionary mapping field types in the DBF objects (keys) to the
corresponding generic SQLAlchemy Column types.
"""
# This is the set of tables which have been successfully integrated into PUDL:
ferc1_pudl_tables = (
'fuel_ferc1', # Plant-level data, linked to plants_steam_ferc1
'plants_steam_ferc1', # Plant-level data
'plants_small_ferc1', # Plant-level data
'plants_hydro_ferc1', # Plant-level data
'plants_pumped_storage_ferc1', # Plant-level data
'purchased_power_ferc1', # Inter-utility electricity transactions
'plant_in_service_ferc1', # Row-mapped plant accounting data.
# 'accumulated_depreciation_ferc1' # Requires row-mapping to be useful.
)
"""tuple: A tuple containing the FERC Form 1 tables that can be successfully
integrated into PUDL.
"""
table_map_ferc1_pudl = {
'fuel_ferc1': 'f1_fuel',
'plants_steam_ferc1': 'f1_steam',
'plants_small_ferc1': 'f1_gnrt_plant',
'plants_hydro_ferc1': 'f1_hydro',
'plants_pumped_storage_ferc1': 'f1_pumped_storage',
'plant_in_service_ferc1': 'f1_plant_in_srvce',
'purchased_power_ferc1': 'f1_purchased_pwr',
# 'accumulated_depreciation_ferc1': 'f1_accumdepr_prvsn'
}
"""dict: A dictionary mapping PUDL table names (keys) to the corresponding FERC
Form 1 DBF table names.
"""
# This is the list of EIA923 tables that can be successfully pulled into PUDL
eia923_pudl_tables = ('generation_fuel_eia923',
'boiler_fuel_eia923',
'generation_eia923',
'coalmine_eia923',
'fuel_receipts_costs_eia923')
"""tuple: A tuple containing the EIA923 tables that can be successfully
integrated into PUDL.
"""
epaipm_pudl_tables = (
'transmission_single_epaipm',
'transmission_joint_epaipm',
'load_curves_epaipm',
'plant_region_map_epaipm',
)
"""tuple: A tuple containing the EPA IPM tables that can be successfully
integrated into PUDL.
"""
# List of entity tables
entity_tables = ['utilities_entity_eia',
'plants_entity_eia',
'generators_entity_eia',
'boilers_entity_eia',
'regions_entity_epaipm', ]
"""list: A list of PUDL entity tables.
"""
xlsx_maps_pkg = 'pudl.package_data.meta.xlsx_maps'
"""string: The location of the xlsx maps within the PUDL package data."""
##############################################################################
# EIA 923 Spreadsheet Metadata
##############################################################################
##############################################################################
# EIA 860 Spreadsheet Metadata
##############################################################################
# This is the list of EIA860 tables that can be successfully pulled into PUDL
eia860_pudl_tables = (
'boiler_generator_assn_eia860',
'utilities_eia860',
'plants_eia860',
'generators_eia860',
'ownership_eia860'
)
"""tuple: A tuple enumerating EIA 860 tables for which PUDL's ETL works."""
# The set of FERC Form 1 tables that have the same composite primary keys: [
# respondent_id, report_year, report_prd, row_number, spplmnt_num ].
# TODO: THIS ONLY PERTAINS TO 2015 AND MAY NEED TO BE ADJUSTED BY YEAR...
ferc1_data_tables = (
'f1_acb_epda', 'f1_accumdepr_prvsn', 'f1_accumdfrrdtaxcr',
'f1_adit_190_detail', 'f1_adit_190_notes', 'f1_adit_amrt_prop',
'f1_adit_other', 'f1_adit_other_prop', 'f1_allowances', 'f1_bal_sheet_cr',
'f1_capital_stock', 'f1_cash_flow', 'f1_cmmn_utlty_p_e',
'f1_comp_balance_db', 'f1_construction', 'f1_control_respdnt',
'f1_co_directors', 'f1_cptl_stk_expns', 'f1_csscslc_pcsircs',
'f1_dacs_epda', 'f1_dscnt_cptl_stk', 'f1_edcfu_epda', 'f1_elctrc_erg_acct',
'f1_elctrc_oper_rev', 'f1_elc_oper_rev_nb', 'f1_elc_op_mnt_expn',
'f1_electric', 'f1_envrnmntl_expns', 'f1_envrnmntl_fclty', 'f1_fuel',
'f1_general_info', 'f1_gnrt_plant', 'f1_important_chg', 'f1_incm_stmnt_2',
'f1_income_stmnt', 'f1_miscgen_expnelc', 'f1_misc_dfrrd_dr',
'f1_mthly_peak_otpt', 'f1_mtrl_spply', 'f1_nbr_elc_deptemp',
'f1_nonutility_prop', 'f1_note_fin_stmnt', 'f1_nuclear_fuel',
'f1_officers_co', 'f1_othr_dfrrd_cr', 'f1_othr_pd_in_cptl',
'f1_othr_reg_assets', 'f1_othr_reg_liab', 'f1_overhead', 'f1_pccidica',
'f1_plant_in_srvce', 'f1_pumped_storage', 'f1_purchased_pwr',
'f1_reconrpt_netinc', 'f1_reg_comm_expn', 'f1_respdnt_control',
'f1_retained_erng', 'f1_r_d_demo_actvty', 'f1_sales_by_sched',
'f1_sale_for_resale', 'f1_sbsdry_totals', 'f1_schedules_list',
'f1_security_holder', 'f1_slry_wg_dstrbtn', 'f1_substations',
'f1_taxacc_ppchrgyr', 'f1_unrcvrd_cost', 'f1_utltyplnt_smmry', 'f1_work',
'f1_xmssn_adds', 'f1_xmssn_elc_bothr', 'f1_xmssn_elc_fothr',
'f1_xmssn_line', 'f1_xtraordnry_loss',
'f1_hydro', 'f1_steam', 'f1_leased', 'f1_sbsdry_detail',
'f1_plant', 'f1_long_term_debt', 'f1_106_2009', 'f1_106a_2009',
'f1_106b_2009', 'f1_208_elc_dep', 'f1_231_trn_stdycst', 'f1_324_elc_expns',
'f1_325_elc_cust', 'f1_331_transiso', 'f1_338_dep_depl',
'f1_397_isorto_stl', 'f1_398_ancl_ps', 'f1_399_mth_peak',
'f1_400_sys_peak', 'f1_400a_iso_peak', 'f1_429_trans_aff',
'f1_allowances_nox', 'f1_cmpinc_hedge_a', 'f1_cmpinc_hedge',
'f1_rg_trn_srv_rev')
"""tuple: A tuple containing the FERC Form 1 tables that have the same composite
primary keys: [respondent_id, report_year, report_prd, row_number,
spplmnt_num].
"""
# Line numbers, and corresponding FERC account number
# from FERC Form 1 pages 204-207, Electric Plant in Service.
# Descriptions from: https://www.law.cornell.edu/cfr/text/18/part-101
ferc_electric_plant_accounts = pd.DataFrame.from_records([
# 1. Intangible Plant
(2, '301', 'Intangible: Organization'),
(3, '302', 'Intangible: Franchises and consents'),
(4, '303', 'Intangible: Miscellaneous intangible plant'),
(5, 'subtotal_intangible', 'Subtotal: Intangible Plant'),
# 2. Production Plant
# A. steam production
(8, '310', 'Steam production: Land and land rights'),
(9, '311', 'Steam production: Structures and improvements'),
(10, '312', 'Steam production: Boiler plant equipment'),
(11, '313', 'Steam production: Engines and engine-driven generators'),
(12, '314', 'Steam production: Turbogenerator units'),
(13, '315', 'Steam production: Accessory electric equipment'),
(14, '316', 'Steam production: Miscellaneous power plant equipment'),
(15, '317', 'Steam production: Asset retirement costs for steam production\
plant'),
(16, 'subtotal_steam_production', 'Subtotal: Steam Production Plant'),
# B. nuclear production
(18, '320', 'Nuclear production: Land and land rights (Major only)'),
(19, '321', 'Nuclear production: Structures and improvements (Major\
only)'),
(20, '322', 'Nuclear production: Reactor plant equipment (Major only)'),
(21, '323', 'Nuclear production: Turbogenerator units (Major only)'),
(22, '324', 'Nuclear production: Accessory electric equipment (Major\
only)'),
(23, '325', 'Nuclear production: Miscellaneous power plant equipment\
(Major only)'),
(24, '326', 'Nuclear production: Asset retirement costs for nuclear\
production plant (Major only)'),
(25, 'subtotal_nuclear_produciton', 'Subtotal: Nuclear Production Plant'),
# C. hydraulic production
(27, '330', 'Hydraulic production: Land and land rights'),
(28, '331', 'Hydraulic production: Structures and improvements'),
(29, '332', 'Hydraulic production: Reservoirs, dams, and waterways'),
(30, '333', 'Hydraulic production: Water wheels, turbines and generators'),
(31, '334', 'Hydraulic production: Accessory electric equipment'),
(32, '335', 'Hydraulic production: Miscellaneous power plant equipment'),
(33, '336', 'Hydraulic production: Roads, railroads and bridges'),
(34, '337', 'Hydraulic production: Asset retirement costs for hydraulic\
production plant'),
(35, 'subtotal_hydraulic_production', 'Subtotal: Hydraulic Production\
Plant'),
# D. other production
(37, '340', 'Other production: Land and land rights'),
(38, '341', 'Other production: Structures and improvements'),
(39, '342', 'Other production: Fuel holders, producers, and accessories'),
(40, '343', 'Other production: Prime movers'),
(41, '344', 'Other production: Generators'),
(42, '345', 'Other production: Accessory electric equipment'),
(43, '346', 'Other production: Miscellaneous power plant equipment'),
(44, '347', 'Other production: Asset retirement costs for other production\
plant'),
(None, '348', 'Other production: Energy Storage Equipment'),
(45, 'subtotal_other_production', 'Subtotal: Other Production Plant'),
(46, 'subtotal_production', 'Subtotal: Production Plant'),
# 3. Transmission Plant,
(48, '350', 'Transmission: Land and land rights'),
(None, '351', 'Transmission: Energy Storage Equipment'),
(49, '352', 'Transmission: Structures and improvements'),
(50, '353', 'Transmission: Station equipment'),
(51, '354', 'Transmission: Towers and fixtures'),
(52, '355', 'Transmission: Poles and fixtures'),
(53, '356', 'Transmission: Overhead conductors and devices'),
(54, '357', 'Transmission: Underground conduit'),
(55, '358', 'Transmission: Underground conductors and devices'),
(56, '359', 'Transmission: Roads and trails'),
(57, '359.1', 'Transmission: Asset retirement costs for transmission\
plant'),
(58, 'subtotal_transmission', 'Subtotal: Transmission Plant'),
# 4. Distribution Plant
(60, '360', 'Distribution: Land and land rights'),
(61, '361', 'Distribution: Structures and improvements'),
(62, '362', 'Distribution: Station equipment'),
(63, '363', 'Distribution: Storage battery equipment'),
(64, '364', 'Distribution: Poles, towers and fixtures'),
(65, '365', 'Distribution: Overhead conductors and devices'),
(66, '366', 'Distribution: Underground conduit'),
(67, '367', 'Distribution: Underground conductors and devices'),
(68, '368', 'Distribution: Line transformers'),
(69, '369', 'Distribution: Services'),
(70, '370', 'Distribution: Meters'),
(71, '371', 'Distribution: Installations on customers\' premises'),
(72, '372', 'Distribution: Leased property on customers\' premises'),
(73, '373', 'Distribution: Street lighting and signal systems'),
(74, '374', 'Distribution: Asset retirement costs for distribution plant'),
(75, 'subtotal_distribution', 'Subtotal: Distribution Plant'),
# 5. Regional Transmission and Market Operation Plant
(77, '380', 'Regional transmission: Land and land rights'),
(78, '381', 'Regional transmission: Structures and improvements'),
(79, '382', 'Regional transmission: Computer hardware'),
(80, '383', 'Regional transmission: Computer software'),
(81, '384', 'Regional transmission: Communication Equipment'),
(82, '385', 'Regional transmission: Miscellaneous Regional Transmission\
and Market Operation Plant'),
(83, '386', 'Regional transmission: Asset Retirement Costs for Regional\
Transmission and Market Operation\
Plant'),
(84, 'subtotal_regional_transmission', 'Subtotal: Transmission and Market\
Operation Plant'),
(None, '387', 'Regional transmission: [Reserved]'),
# 6. General Plant
(86, '389', 'General: Land and land rights'),
(87, '390', 'General: Structures and improvements'),
(88, '391', 'General: Office furniture and equipment'),
(89, '392', 'General: Transportation equipment'),
(90, '393', 'General: Stores equipment'),
(91, '394', 'General: Tools, shop and garage equipment'),
(92, '395', 'General: Laboratory equipment'),
(93, '396', 'General: Power operated equipment'),
(94, '397', 'General: Communication equipment'),
(95, '398', 'General: Miscellaneous equipment'),
(96, 'subtotal_general', 'Subtotal: General Plant'),
(97, '399', 'General: Other tangible property'),
(98, '399.1', 'General: Asset retirement costs for general plant'),
(99, 'total_general', 'TOTAL General Plant'),
(100, '101_and_106', 'Electric plant in service (Major only)'),
(101, '102_purchased', 'Electric plant purchased'),
(102, '102_sold', 'Electric plant sold'),
(103, '103', 'Experimental plant unclassified'),
(104, 'total_electric_plant', 'TOTAL Electric Plant in Service')],
columns=['row_number', 'ferc_account_id', 'ferc_account_description'])
"""list: A list of tuples containing row numbers, FERC account IDs, and FERC
account descriptions from FERC Form 1 pages 204 - 207, Electric Plant in
Service.
"""
# Line numbers, and corresponding FERC account number
# from FERC Form 1 page 219, ACCUMULATED PROVISION FOR DEPRECIATION
# OF ELECTRIC UTILITY PLANT (Account 108).
ferc_accumulated_depreciation = pd.DataFrame.from_records([
# Section A. Balances and Changes During Year
(1, 'balance_beginning_of_year', 'Balance Beginning of Year'),
(3, 'depreciation_expense', '(403) Depreciation Expense'),
(4, 'depreciation_expense_asset_retirement', \
'(403.1) Depreciation Expense for Asset Retirement Costs'),
(5, 'expense_electric_plant_leased_to_others', \
'(413) Exp. of Elec. Plt. Leas. to Others'),
(6, 'transportation_expenses_clearing',\
'Transportation Expenses-Clearing'),
(7, 'other_clearing_accounts', 'Other Clearing Accounts'),
(8, 'other_accounts_specified',\
'Other Accounts (Specify, details in footnote):'),
# blank: might also be other charges like line 17.
(9, 'other_charges', 'Other Charges:'),
(10, 'total_depreciation_provision_for_year',\
'TOTAL Deprec. Prov for Year (Enter Total of lines 3 thru 9)'),
(11, 'net_charges_for_plant_retired', 'Net Charges for Plant Retired:'),
(12, 'book_cost_of_plant_retired', 'Book Cost of Plant Retired'),
(13, 'cost_of_removal', 'Cost of Removal'),
(14, 'salvage_credit', 'Salvage (Credit)'),
(15, 'total_net_charges_for_plant_retired',\
'TOTAL Net Chrgs. for Plant Ret. (Enter Total of lines 12 thru 14)'),
(16, 'other_debit_or_credit_items',\
'Other Debit or Cr. Items (Describe, details in footnote):'),
# blank: can be "Other Charges", e.g. in 2012 for PSCo.
(17, 'other_charges_2', 'Other Charges 2'),
(18, 'book_cost_or_asset_retirement_costs_retired',\
'Book Cost or Asset Retirement Costs Retired'),
(19, 'balance_end_of_year', \
'Balance End of Year (Enter Totals of lines 1, 10, 15, 16, and 18)'),
# Section B. Balances at End of Year According to Functional Classification
(20, 'steam_production_end_of_year', 'Steam Production'),
(21, 'nuclear_production_end_of_year', 'Nuclear Production'),
(22, 'hydraulic_production_end_of_year',\
'Hydraulic Production-Conventional'),
(23, 'pumped_storage_end_of_year', 'Hydraulic Production-Pumped Storage'),
(24, 'other_production', 'Other Production'),
(25, 'transmission', 'Transmission'),
(26, 'distribution', 'Distribution'),
(27, 'regional_transmission_and_market_operation',
'Regional Transmission and Market Operation'),
(28, 'general', 'General'),
(29, 'total', 'TOTAL (Enter Total of lines 20 thru 28)')],
columns=['row_number', 'line_id', 'ferc_account_description'])
"""list: A list of tuples containing row numbers, FERC account IDs, and FERC
account descriptions from FERC Form 1 page 219, Accumulated Provision for
Depreciation of electric utility plant(Account 108).
"""
######################################################################
# Constants from EIA From 923 used within init.py module
######################################################################
# From Page 7 of EIA Form 923, Census Region a US state is located in
census_region = {
'NEW': 'New England',
'MAT': 'Middle Atlantic',
'SAT': 'South Atlantic',
'ESC': 'East South Central',
'WSC': 'West South Central',
'ENC': 'East North Central',
'WNC': 'West North Central',
'MTN': 'Mountain',
'PACC': 'Pacific Contiguous (OR, WA, CA)',
'PACN': 'Pacific Non-Contiguous (AK, HI)',
}
"""dict: A dictionary mapping Census Region abbreviations (keys) to Census
Region names (values).
"""
# From Page 7 of EIA Form923
# Static list of NERC (North American Electric Reliability Corporation)
# regions, used for where plant is located
nerc_region = {
'NPCC': 'Northeast Power Coordinating Council',
'ASCC': 'Alaska Systems Coordinating Council',
'HICC': 'Hawaiian Islands Coordinating Council',
'MRO': 'Midwest Reliability Organization',
'SERC': 'SERC Reliability Corporation',
'RFC': 'Reliability First Corporation',
'SPP': 'Southwest Power Pool',
'TRE': 'Texas Regional Entity',
'FRCC': 'Florida Reliability Coordinating Council',
'WECC': 'Western Electricity Coordinating Council'
}
"""dict: A dictionary mapping NERC Region abbreviations (keys) to NERC
Region names (values).
"""
# From Page 7 of EIA Form 923 EIA’s internal consolidated NAICS sectors.
# For internal purposes, EIA consolidates NAICS categories into seven groups.
sector_eia = {
# traditional regulated electric utilities
'1': 'Electric Utility',
# Independent power producers which are not cogenerators
'2': 'NAICS-22 Non-Cogen',
# Independent power producers which are cogenerators, but whose
# primary business purpose is the sale of electricity to the public
'3': 'NAICS-22 Cogen',
# Commercial non-cogeneration facilities that produce electric power,
# are connected to the gird, and can sell power to the public
'4': 'Commercial NAICS Non-Cogen',
# Commercial cogeneration facilities that produce electric power, are
# connected to the grid, and can sell power to the public
'5': 'Commercial NAICS Cogen',
# Industrial non-cogeneration facilities that produce electric power, are
# connected to the gird, and can sell power to the public
'6': 'Industrial NAICS Non-Cogen',
# Industrial cogeneration facilities that produce electric power, are
# connected to the gird, and can sell power to the public
'7': 'Industrial NAICS Cogen'
}
"""dict: A dictionary mapping EIA numeric codes (keys) to EIA’s internal
consolidated NAICS sectors (values).
"""
# EIA 923: EIA Type of prime mover:
prime_movers_eia923 = {
'BA': 'Energy Storage, Battery',
'BT': 'Turbines Used in a Binary Cycle. Including those used for geothermal applications',
'CA': 'Combined-Cycle -- Steam Part',
'CC': 'Combined-Cycle, Total Unit',
'CE': 'Energy Storage, Compressed Air',
'CP': 'Energy Storage, Concentrated Solar Power',
'CS': 'Combined-Cycle Single-Shaft Combustion Turbine and Steam Turbine share of single',
'CT': 'Combined-Cycle Combustion Turbine Part',
'ES': 'Energy Storage, Other (Specify on Schedule 9, Comments)',
'FC': 'Fuel Cell',
'FW': 'Energy Storage, Flywheel',
'GT': 'Combustion (Gas) Turbine. Including Jet Engine design',
'HA': 'Hydrokinetic, Axial Flow Turbine',
'HB': 'Hydrokinetic, Wave Buoy',
'HK': 'Hydrokinetic, Other',
'HY': 'Hydraulic Turbine. Including turbines associated with delivery of water by pipeline.',
'IC': 'Internal Combustion (diesel, piston, reciprocating) Engine',
'PS': 'Energy Storage, Reversible Hydraulic Turbine (Pumped Storage)',
'OT': 'Other',
'ST': 'Steam Turbine. Including Nuclear, Geothermal, and Solar Steam (does not include Combined Cycle).',
'PV': 'Photovoltaic',
'WT': 'Wind Turbine, Onshore',
'WS': 'Wind Turbine, Offshore'
}
"""dict: A dictionary mapping EIA 923 prime mover codes (keys) and prime mover
names / descriptions (values).
"""
# EIA 923: The fuel code reported to EIA.Two or three letter alphanumeric:
fuel_type_eia923 = {
'AB': 'Agricultural By-Products',
'ANT': 'Anthracite Coal',
'BFG': 'Blast Furnace Gas',
'BIT': 'Bituminous Coal',
'BLQ': 'Black Liquor',
'CBL': 'Coal, Blended',
'DFO': 'Distillate Fuel Oil. Including diesel, No. 1, No. 2, and No. 4 fuel oils.',
'GEO': 'Geothermal',
'JF': 'Jet Fuel',
'KER': 'Kerosene',
'LFG': 'Landfill Gas',
'LIG': 'Lignite Coal',
'MSB': 'Biogenic Municipal Solid Waste',
'MSN': 'Non-biogenic Municipal Solid Waste',
'MSW': 'Municipal Solid Waste',
'MWH': 'Electricity used for energy storage',
'NG': 'Natural Gas',
'NUC': 'Nuclear. Including Uranium, Plutonium, and Thorium.',
'OBG': 'Other Biomass Gas. Including digester gas, methane, and other biomass gases.',
'OBL': 'Other Biomass Liquids',
'OBS': 'Other Biomass Solids',
'OG': 'Other Gas',
'OTH': 'Other Fuel',
'PC': 'Petroleum Coke',
'PG': 'Gaseous Propane',
'PUR': 'Purchased Steam',
'RC': 'Refined Coal',
'RFO': 'Residual Fuel Oil. Including No. 5 & 6 fuel oils and bunker C fuel oil.',
'SC': 'Coal-based Synfuel. Including briquettes, pellets, or extrusions, which are formed by binding materials or processes that recycle materials.',
'SGC': 'Coal-Derived Synthesis Gas',
'SGP': 'Synthesis Gas from Petroleum Coke',
'SLW': 'Sludge Waste',
'SUB': 'Subbituminous Coal',
'SUN': 'Solar',
'TDF': 'Tire-derived Fuels',
'WAT': 'Water at a Conventional Hydroelectric Turbine and water used in Wave Buoy Hydrokinetic Technology, current Hydrokinetic Technology, Tidal Hydrokinetic Technology, and Pumping Energy for Reversible (Pumped Storage) Hydroelectric Turbines.',
'WC': 'Waste/Other Coal. Including anthracite culm, bituminous gob, fine coal, lignite waste, waste coal.',
'WDL': 'Wood Waste Liquids, excluding Black Liquor. Including red liquor, sludge wood, spent sulfite liquor, and other wood-based liquids.',
'WDS': 'Wood/Wood Waste Solids. Including paper pellets, railroad ties, utility polies, wood chips, bark, and other wood waste solids.',
'WH': 'Waste Heat not directly attributed to a fuel source',
'WND': 'Wind',
'WO': 'Waste/Other Oil. Including crude oil, liquid butane, liquid propane, naphtha, oil waste, re-refined moto oil, sludge oil, tar oil, or other petroleum-based liquid wastes.'
}
"""dict: A dictionary mapping EIA 923 fuel type codes (keys) and fuel type
names / descriptions (values).
"""
# Fuel type strings for EIA 923 generator fuel table
fuel_type_eia923_gen_fuel_coal_strings = [
'ant', 'bit', 'cbl', 'lig', 'pc', 'rc', 'sc', 'sub', 'wc', ]
"""list: The list of EIA 923 Generation Fuel strings associated with coal fuel.
"""
fuel_type_eia923_gen_fuel_oil_strings = [
'dfo', 'rfo', 'wo', 'jf', 'ker', ]
"""list: The list of EIA 923 Generation Fuel strings associated with oil fuel.
"""
fuel_type_eia923_gen_fuel_gas_strings = [
'bfg', 'lfg', 'ng', 'og', 'obg', 'pg', 'sgc', 'sgp', ]
"""list: The list of EIA 923 Generation Fuel strings associated with gas fuel.
"""
fuel_type_eia923_gen_fuel_solar_strings = ['sun', ]
"""list: The list of EIA 923 Generation Fuel strings associated with solar
power.
"""
fuel_type_eia923_gen_fuel_wind_strings = ['wnd', ]
"""list: The list of EIA 923 Generation Fuel strings associated with wind
power.
"""
fuel_type_eia923_gen_fuel_hydro_strings = ['wat', ]
"""list: The list of EIA 923 Generation Fuel strings associated with hydro
power.
"""
fuel_type_eia923_gen_fuel_nuclear_strings = ['nuc', ]
"""list: The list of EIA 923 Generation Fuel strings associated with nuclear
power.
"""
fuel_type_eia923_gen_fuel_waste_strings = [
'ab', 'blq', 'msb', 'msn', 'msw', 'obl', 'obs', 'slw', 'tdf', 'wdl', 'wds']
"""list: The list of EIA 923 Generation Fuel strings associated with solid waste
fuel.
"""
fuel_type_eia923_gen_fuel_other_strings = ['geo', 'mwh', 'oth', 'pur', 'wh', ]
"""list: The list of EIA 923 Generation Fuel strings associated with geothermal
power.
"""
fuel_type_eia923_gen_fuel_simple_map = {
'coal': fuel_type_eia923_gen_fuel_coal_strings,
'oil': fuel_type_eia923_gen_fuel_oil_strings,
'gas': fuel_type_eia923_gen_fuel_gas_strings,
'solar': fuel_type_eia923_gen_fuel_solar_strings,
'wind': fuel_type_eia923_gen_fuel_wind_strings,
'hydro': fuel_type_eia923_gen_fuel_hydro_strings,
'nuclear': fuel_type_eia923_gen_fuel_nuclear_strings,
'waste': fuel_type_eia923_gen_fuel_waste_strings,
'other': fuel_type_eia923_gen_fuel_other_strings,
}
"""dict: A dictionary mapping EIA 923 Generation Fuel fuel types (keys) to lists
of strings associated with that fuel type (values).
"""
# Fuel type strings for EIA 923 boiler fuel table
fuel_type_eia923_boiler_fuel_coal_strings = [
'ant', 'bit', 'lig', 'pc', 'rc', 'sc', 'sub', 'wc', ]
"""list: A list of strings from EIA 923 Boiler Fuel associated with fuel type
coal.
"""
fuel_type_eia923_boiler_fuel_oil_strings = ['dfo', 'rfo', 'wo', 'jf', 'ker', ]
"""list: A list of strings from EIA 923 Boiler Fuel associated with fuel type
oil.
"""
fuel_type_eia923_boiler_fuel_gas_strings = [
'bfg', 'lfg', 'ng', 'og', 'obg', 'pg', 'sgc', 'sgp', ]
"""list: A list of strings from EIA 923 Boiler Fuel associated with fuel type
gas.
"""
fuel_type_eia923_boiler_fuel_waste_strings = [
'ab', 'blq', 'msb', 'msn', 'obl', 'obs', 'slw', 'tdf', 'wdl', 'wds', ]
"""list: A list of strings from EIA 923 Boiler Fuel associated with fuel type
waste.
"""
fuel_type_eia923_boiler_fuel_other_strings = ['oth', 'pur', 'wh', ]
"""list: A list of strings from EIA 923 Boiler Fuel associated with fuel type
other.
"""
fuel_type_eia923_boiler_fuel_simple_map = {
'coal': fuel_type_eia923_boiler_fuel_coal_strings,
'oil': fuel_type_eia923_boiler_fuel_oil_strings,
'gas': fuel_type_eia923_boiler_fuel_gas_strings,
'waste': fuel_type_eia923_boiler_fuel_waste_strings,
'other': fuel_type_eia923_boiler_fuel_other_strings,
}
"""dict: A dictionary mapping EIA 923 Boiler Fuel fuel types (keys) to lists
of strings associated with that fuel type (values).
"""
# PUDL consolidation of EIA923 AER fuel type strings into same categories as
# 'energy_source_eia923' plus additional renewable and nuclear categories.
# These classifications are not currently used, as the EIA fuel type and energy
# source designations provide more detailed information.
aer_coal_strings = ['col', 'woc', 'pc']
"""list: A list of EIA 923 AER fuel type strings associated with coal.
"""
aer_gas_strings = ['mlg', 'ng', 'oog']
"""list: A list of EIA 923 AER fuel type strings associated with gas.
"""
aer_oil_strings = ['dfo', 'rfo', 'woo']
"""list: A list of EIA 923 AER fuel type strings associated with oil.
"""
aer_solar_strings = ['sun']
"""list: A list of EIA 923 AER fuel type strings associated with solar power.
"""
aer_wind_strings = ['wnd']
"""list: A list of EIA 923 AER fuel type strings associated with wind power.
"""
aer_hydro_strings = ['hps', 'hyc']
"""list: A list of EIA 923 AER fuel type strings associated with hydro power.
"""
aer_nuclear_strings = ['nuc']
"""list: A list of EIA 923 AER fuel type strings associated with nuclear power.
"""
aer_waste_strings = ['www']
"""list: A list of EIA 923 AER fuel type strings associated with waste.
"""
aer_other_strings = ['geo', 'orw', 'oth']
"""list: A list of EIA 923 AER fuel type strings associated with other fuel.
"""
aer_fuel_type_strings = {
'coal': aer_coal_strings,
'gas': aer_gas_strings,
'oil': aer_oil_strings,
'solar': aer_solar_strings,
'wind': aer_wind_strings,
'hydro': aer_hydro_strings,
'nuclear': aer_nuclear_strings,
'waste': aer_waste_strings,
'other': aer_other_strings
}
"""dict: A dictionary mapping EIA 923 AER fuel types (keys) to lists
of strings associated with that fuel type (values).
"""
# EIA 923: A partial aggregation of the reported fuel type codes into
# larger categories used by EIA in, for example,
# the Annual Energy Review (AER).Two or three letter alphanumeric.
# See the Fuel Code table (Table 5), below:
fuel_type_aer_eia923 = {
'SUN': 'Solar PV and thermal',
'COL': 'Coal',
'DFO': 'Distillate Petroleum',
'GEO': 'Geothermal',
'HPS': 'Hydroelectric Pumped Storage',
'HYC': 'Hydroelectric Conventional',
'MLG': 'Biogenic Municipal Solid Waste and Landfill Gas',
'NG': 'Natural Gas',
'NUC': 'Nuclear',
'OOG': 'Other Gases',
'ORW': 'Other Renewables',
'OTH': 'Other (including nonbiogenic MSW)',
'PC': 'Petroleum Coke',
'RFO': 'Residual Petroleum',
'WND': 'Wind',
'WOC': 'Waste Coal',
'WOO': 'Waste Oil',
'WWW': 'Wood and Wood Waste'
}
"""dict: A dictionary mapping EIA 923 AER fuel types (keys) to lists
of strings associated with that fuel type (values).
"""
fuel_type_eia860_coal_strings = ['ant', 'bit', 'cbl', 'lig', 'pc', 'rc', 'sc',
'sub', 'wc', 'coal', 'petroleum coke', 'col',
'woc']
"""list: A list of strings from EIA 860 associated with fuel type coal.
"""
fuel_type_eia860_oil_strings = ['dfo', 'jf', 'ker', 'rfo', 'wo', 'woo',
'petroleum']
"""list: A list of strings from EIA 860 associated with fuel type oil.
"""
fuel_type_eia860_gas_strings = ['bfg', 'lfg', 'mlg', 'ng', 'obg', 'og', 'pg',
'sgc', 'sgp', 'natural gas', 'other gas',
'oog', 'sg']
"""list: A list of strings from EIA 860 associated with fuel type gas.
"""
fuel_type_eia860_solar_strings = ['sun', 'solar']
"""list: A list of strings from EIA 860 associated with solar power.
"""
fuel_type_eia860_wind_strings = ['wnd', 'wind', 'wt']
"""list: A list of strings from EIA 860 associated with wind power.
"""
fuel_type_eia860_hydro_strings = ['wat', 'hyc', 'hps', 'hydro']
"""list: A list of strings from EIA 860 associated with hydro power.
"""
fuel_type_eia860_nuclear_strings = ['nuc', 'nuclear']
"""list: A list of strings from EIA 860 associated with nuclear power.
"""
fuel_type_eia860_waste_strings = ['ab', 'blq', 'bm', 'msb', 'msn', 'obl',
'obs', 'slw', 'tdf', 'wdl', 'wds', 'biomass',
'msw', 'www']
"""list: A list of strings from EIA 860 associated with fuel type waste.
"""
fuel_type_eia860_other_strings = ['mwh', 'oth', 'pur', 'wh', 'geo', 'none',
'orw', 'other']
"""list: A list of strings from EIA 860 associated with fuel type other.
"""
fuel_type_eia860_simple_map = {
'coal': fuel_type_eia860_coal_strings,
'oil': fuel_type_eia860_oil_strings,
'gas': fuel_type_eia860_gas_strings,
'solar': fuel_type_eia860_solar_strings,
'wind': fuel_type_eia860_wind_strings,
'hydro': fuel_type_eia860_hydro_strings,
'nuclear': fuel_type_eia860_nuclear_strings,
'waste': fuel_type_eia860_waste_strings,
'other': fuel_type_eia860_other_strings,
}
"""dict: A dictionary mapping EIA 860 fuel types (keys) to lists
of strings associated with that fuel type (values).
"""
# EIA 923/860: Lumping of energy source categories.
energy_source_eia_simple_map = {
'coal': ['ANT', 'BIT', 'LIG', 'PC', 'SUB', 'WC', 'RC'],
'oil': ['DFO', 'JF', 'KER', 'RFO', 'WO'],
'gas': ['BFG', 'LFG', 'NG', 'OBG', 'OG', 'PG', 'SG', 'SGC', 'SGP'],
'solar': ['SUN'],
'wind': ['WND'],
'hydro': ['WAT'],
'nuclear': ['NUC'],
'waste': ['AB', 'BLQ', 'MSW', 'OBL', 'OBS', 'SLW', 'TDF', 'WDL', 'WDS'],
'other': ['GEO', 'MWH', 'OTH', 'PUR', 'WH']
}
"""dict: A dictionary mapping EIA fuel types (keys) to fuel codes (values).
"""
fuel_group_eia923_simple_map = {
'coal': ['coal', 'petroleum coke'],
'oil': ['petroleum'],
'gas': ['natural gas', 'other gas']
}
"""dict: A dictionary mapping EIA 923 simple fuel types("oil", "coal", "gas")
(keys) to fuel types (values).
"""
# EIA 923: The type of physical units fuel consumption is reported in.
# All consumption is reported in either short tons for solids,
# thousands of cubic feet for gases, and barrels for liquids.
fuel_units_eia923 = {
'mcf': 'Thousands of cubic feet (for gases)',
'short_tons': 'Short tons (for solids)',
'barrels': 'Barrels (for liquids)'
}
"""dict: A dictionary mapping EIA 923 fuel units (keys) to fuel unit
descriptions (values).
"""
# EIA 923: Designates the purchase type under which receipts occurred
# in the reporting month. One or two character alphanumeric:
contract_type_eia923 = {
'C': 'Contract - Fuel received under a purchase order or contract with a term of one year or longer. Contracts with a shorter term are considered spot purchases ',
'NC': 'New Contract - Fuel received under a purchase order or contract with duration of one year or longer, under which deliveries were first made during the reporting month',
'N': 'New Contract - see NC code. This abbreviation existed only in 2008 before being replaced by NC.',
'S': 'Spot Purchase',
'T': 'Tolling Agreement – Fuel received under a tolling agreement (bartering arrangement of fuel for generation)'
}
"""dict: A dictionary mapping EIA 923 contract codes (keys) to contract
descriptions (values) for each month in the Fuel Receipts and Costs table.
"""
# EIA 923: The fuel code associated with the fuel receipt.
# Defined on Page 7 of EIA Form 923
# Two or three character alphanumeric:
energy_source_eia923 = {
'ANT': 'Anthracite Coal',
'BFG': 'Blast Furnace Gas',
'BM': 'Biomass',
'BIT': 'Bituminous Coal',
'DFO': 'Distillate Fuel Oil. Including diesel, No. 1, No. 2, and No. 4 fuel oils.',
'JF': 'Jet Fuel',
'KER': 'Kerosene',
'LIG': 'Lignite Coal',
'NG': 'Natural Gas',
'PC': 'Petroleum Coke',
'PG': 'Gaseous Propone',
'OG': 'Other Gas',
'RC': 'Refined Coal',
'RFO': 'Residual Fuel Oil. Including No. 5 & 6 fuel oils and bunker C fuel oil.',
'SG': 'Synthesis Gas from Petroleum Coke',
'SGP': 'Petroleum Coke Derived Synthesis Gas',
'SC': 'Coal-based Synfuel. Including briquettes, pellets, or extrusions, which are formed by binding materials or processes that recycle materials.',
'SUB': 'Subbituminous Coal',
'WC': 'Waste/Other Coal. Including anthracite culm, bituminous gob, fine coal, lignite waste, waste coal.',
'WO': 'Waste/Other Oil. Including crude oil, liquid butane, liquid propane, naphtha, oil waste, re-refined moto oil, sludge oil, tar oil, or other petroleum-based liquid wastes.',
}
"""dict: A dictionary mapping fuel codes (keys) to fuel descriptions (values)
for each fuel receipt from the EIA 923 Fuel Receipts and Costs table.
"""
# EIA 923 Fuel Group, from Page 7 EIA Form 923
# Groups fossil fuel energy sources into fuel groups that are located in the
# Electric Power Monthly: Coal, Natural Gas, Petroleum, Petroleum Coke.
fuel_group_eia923 = (
'coal',
'natural_gas',
'petroleum',
'petroleum_coke',
'other_gas'
)
"""tuple: A tuple containing EIA 923 fuel groups.
"""
# EIA 923: Type of Coal Mine as defined on Page 7 of EIA Form 923
coalmine_type_eia923 = {
'P': 'Preparation Plant',
'S': 'Surface',
'U': 'Underground',
'US': 'Both an underground and surface mine with most coal extracted from underground',
'SU': 'Both an underground and surface mine with most coal extracted from surface',
}
"""dict: A dictionary mapping EIA 923 coal mine type codes (keys) to
descriptions (values).
"""
# EIA 923: State abbreviation related to coal mine location.
# Country abbreviations are also used in this category, but they are
# non-standard because of collisions with US state names. Instead of using
# the provided non-standard names, we convert to ISO-3166-1 three letter
# country codes https://en.wikipedia.org/wiki/ISO_3166-1_alpha-3
coalmine_country_eia923 = {
'AU': 'AUS', # Australia
'CL': 'COL', # Colombia
'CN': 'CAN', # Canada
'IS': 'IDN', # Indonesia
'PL': 'POL', # Poland
'RS': 'RUS', # Russia
'UK': 'GBR', # United Kingdom of Great Britain
'VZ': 'VEN', # Venezuela
'OT': 'other_country',
'IM': 'unknown'
}
"""dict: A dictionary mapping coal mine country codes (keys) to ISO-3166-1 three
letter country codes (values).
"""
# EIA 923: Mode for the longest / second longest distance.
transport_modes_eia923 = {
'RR': 'Rail: Shipments of fuel moved to consumers by rail \
(private or public/commercial). Included is coal hauled to or \
away from a railroad siding by truck if the truck did not use public\
roads.',
'RV': 'River: Shipments of fuel moved to consumers via river by barge. \
Not included are shipments to Great Lakes coal loading docks, \
tidewater piers, or coastal ports.',
'GL': 'Great Lakes: Shipments of coal moved to consumers via \
the Great Lakes. These shipments are moved via the Great Lakes \
coal loading docks, which are identified by name and location as \
follows: Conneaut Coal Storage & Transfer, Conneaut, Ohio; \
NS Coal Dock (Ashtabula Coal Dock), Ashtabula, Ohio; \
Sandusky Coal Pier, Sandusky, Ohio; Toledo Docks, Toledo, Ohio; \
KCBX Terminals Inc., Chicago, Illinois; \
Superior Midwest Energy Terminal, Superior, Wisconsin',
'TP': 'Tidewater Piers and Coastal Ports: Shipments of coal moved to \
Tidewater Piers and Coastal Ports for further shipments to consumers \
via coastal water or ocean. The Tidewater Piers and Coastal Ports \
are identified by name and location as follows: Dominion Terminal \
Associates, Newport News, Virginia; McDuffie Coal Terminal, Mobile, \
Alabama; IC Railmarine Terminal, Convent, Louisiana; \
International Marine Terminals, Myrtle Grove, Louisiana; \
Cooper/T. Smith Stevedoring Co. Inc., Darrow, Louisiana; \
Seward Terminal Inc., Seward, Alaska; Los Angeles Export Terminal, \
Inc., Los Angeles, California; Levin-Richmond Terminal Corp., \
Richmond, California; Baltimore Terminal, Baltimore, Maryland; \
Norfolk Southern Lamberts Point P-6, Norfolk, Virginia; \
Chesapeake Bay Piers, Baltimore, Maryland; Pier IX Terminal Company, \
Newport News, Virginia; Electro-Coal Transport Corp., Davant, \
Louisiana',
'WT': 'Water: Shipments of fuel moved to consumers by other waterways.',
'TR': 'Truck: Shipments of fuel moved to consumers by truck. \
Not included is fuel hauled to or away from a railroad siding by \
truck on non-public roads.',
'tr': 'Truck: Shipments of fuel moved to consumers by truck. \
Not included is fuel hauled to or away from a railroad siding by \
truck on non-public roads.',
'TC': 'Tramway/Conveyor: Shipments of fuel moved to consumers \
by tramway or conveyor.',
'SP': 'Slurry Pipeline: Shipments of coal moved to consumers \
by slurry pipeline.',
'PL': 'Pipeline: Shipments of fuel moved to consumers by pipeline'
}
"""dict: A dictionary mapping primary and secondary transportation mode codes
(keys) to descriptions (values).
"""
# we need to include all of the columns which we want to keep for either the
# entity or annual tables. The order here matters. We need to harvest the plant
# location before harvesting the location of the utilites for example.
entities = {
'plants': [
# base cols
['plant_id_eia'],
# static cols
['balancing_authority_code_eia', 'balancing_authority_name_eia',
'city', 'county', 'ferc_cogen_status',
'ferc_exempt_wholesale_generator', 'ferc_small_power_producer',
'grid_voltage_2_kv', 'grid_voltage_3_kv', 'grid_voltage_kv',
'iso_rto_code', 'latitude', 'longitude', 'service_area',
'plant_name_eia', 'primary_purpose_naics_id',
'sector_id', 'sector_name', 'state', 'street_address', 'zip_code'],
# annual cols
['ash_impoundment', 'ash_impoundment_lined', 'ash_impoundment_status',
'datum', 'energy_storage', 'ferc_cogen_docket_no', 'water_source',
'ferc_exempt_wholesale_generator_docket_no',
'ferc_small_power_producer_docket_no',
'liquefied_natural_gas_storage',
'natural_gas_local_distribution_company', 'natural_gas_storage',
'natural_gas_pipeline_name_1', 'natural_gas_pipeline_name_2',
'natural_gas_pipeline_name_3', 'nerc_region', 'net_metering',
'pipeline_notes', 'regulatory_status_code',
'transmission_distribution_owner_id',
'transmission_distribution_owner_name',
'transmission_distribution_owner_state', 'utility_id_eia'],
# need type fixing
{},
],
'generators': [
# base cols
['plant_id_eia', 'generator_id'],
# static cols
['prime_mover_code', 'duct_burners', 'operating_date',
'topping_bottoming_code', 'solid_fuel_gasification',
'pulverized_coal_tech', 'fluidized_bed_tech', 'subcritical_tech',
'supercritical_tech', 'ultrasupercritical_tech', 'stoker_tech',
'other_combustion_tech', 'bypass_heat_recovery',
'rto_iso_lmp_node_id', 'rto_iso_location_wholesale_reporting_id',
'associated_combined_heat_power', 'original_planned_operating_date',
'operating_switch', 'previously_canceled'],
# annual cols
['capacity_mw', 'fuel_type_code_pudl', 'multiple_fuels',
'ownership_code', 'owned_by_non_utility', 'deliver_power_transgrid',
'summer_capacity_mw', 'winter_capacity_mw', 'summer_capacity_estimate',
'winter_capacity_estimate', 'minimum_load_mw', 'distributed_generation',
'technology_description', 'reactive_power_output_mvar',
'energy_source_code_1', 'energy_source_code_2',
'energy_source_code_3', 'energy_source_code_4',
'energy_source_code_5', 'energy_source_code_6',
'energy_source_1_transport_1', 'energy_source_1_transport_2',
'energy_source_1_transport_3', 'energy_source_2_transport_1',
'energy_source_2_transport_2', 'energy_source_2_transport_3',
'startup_source_code_1', 'startup_source_code_2',
'startup_source_code_3', 'startup_source_code_4',
'time_cold_shutdown_full_load_code', 'syncronized_transmission_grid',
'turbines_num', 'operational_status_code', 'operational_status',
'planned_modifications', 'planned_net_summer_capacity_uprate_mw',
'planned_net_winter_capacity_uprate_mw', 'planned_new_capacity_mw',
'planned_uprate_date', 'planned_net_summer_capacity_derate_mw',
'planned_net_winter_capacity_derate_mw', 'planned_derate_date',
'planned_new_prime_mover_code', 'planned_energy_source_code_1',
'planned_repower_date', 'other_planned_modifications',
'other_modifications_date', 'planned_retirement_date',
'carbon_capture', 'cofire_fuels', 'switch_oil_gas',
'turbines_inverters_hydrokinetics', 'nameplate_power_factor',
'uprate_derate_during_year', 'uprate_derate_completed_date',
'current_planned_operating_date', 'summer_estimated_capability_mw',
'winter_estimated_capability_mw', 'retirement_date',
'utility_id_eia', 'data_source'],
# need type fixing
{}
],
# utilities must come after plants. plant location needs to be
# removed before the utility locations are compiled
'utilities': [
# base cols
['utility_id_eia'],
# static cols
['utility_name_eia'],
# annual cols
['street_address', 'city', 'state', 'zip_code', 'entity_type',
'plants_reported_owner', 'plants_reported_operator',
'plants_reported_asset_manager', 'plants_reported_other_relationship',
'attention_line', 'address_2', 'zip_code_4',
'contact_firstname', 'contact_lastname', 'contact_title',
'contact_firstname_2', 'contact_lastname_2', 'contact_title_2',
'phone_extension_1', 'phone_extension_2', 'phone_number_1',
'phone_number_2'],
# need type fixing
{'utility_id_eia': 'int64', }, ],
'boilers': [
# base cols
['plant_id_eia', 'boiler_id'],
# static cols
['prime_mover_code'],
# annual cols
[],
# need type fixing
{},
]
}
"""dict: A dictionary containing table name strings (keys) and lists of columns
to keep for those tables (values).
"""
epacems_tables = ("hourly_emissions_epacems")
"""tuple: A tuple containing tables of EPA CEMS data to pull into PUDL.
"""
files_dict_epaipm = {
'transmission_single_epaipm': '*table_3-21*',
'transmission_joint_epaipm': '*transmission_joint_ipm*',
'load_curves_epaipm': '*table_2-2_*',
'plant_region_map_epaipm': '*needs_v6*',
}
"""dict: A dictionary of EPA IPM tables and strings that files of those tables
contain.
"""
epaipm_url_ext = {
'transmission_single_epaipm': 'table_3-21_annual_transmission_capabilities_of_u.s._model_regions_in_epa_platform_v6_-_2021.xlsx',
'load_curves_epaipm': 'table_2-2_load_duration_curves_used_in_epa_platform_v6.xlsx',
'plant_region_map_epaipm': 'needs_v6_november_2018_reference_case_0.xlsx',
}
"""dict: A dictionary of EPA IPM tables and associated URLs extensions for
downloading that table's data.
"""
epaipm_region_names = [
'ERC_PHDL', 'ERC_REST', 'ERC_FRNT', 'ERC_GWAY', 'ERC_WEST',
'FRCC', 'NENG_CT', 'NENGREST', 'NENG_ME', 'MIS_AR', 'MIS_IL',
'MIS_INKY', 'MIS_IA', 'MIS_MIDA', 'MIS_LA', 'MIS_LMI', 'MIS_MNWI',
'MIS_D_MS', 'MIS_MO', 'MIS_MAPP', 'MIS_AMSO', 'MIS_WOTA',
'MIS_WUMS', 'NY_Z_A', 'NY_Z_B', 'NY_Z_C&E', 'NY_Z_D', 'NY_Z_F',
'NY_Z_G-I', 'NY_Z_J', 'NY_Z_K', 'PJM_West', 'PJM_AP', 'PJM_ATSI',
'PJM_COMD', 'PJM_Dom', 'PJM_EMAC', 'PJM_PENE', 'PJM_SMAC',
'PJM_WMAC', 'S_C_KY', 'S_C_TVA', 'S_D_AECI', 'S_SOU', 'S_VACA',
'SPP_NEBR', 'SPP_N', 'SPP_SPS', 'SPP_WEST', 'SPP_KIAM', 'SPP_WAUE',
'WECC_AZ', 'WEC_BANC', 'WECC_CO', 'WECC_ID', 'WECC_IID',
'WEC_LADW', 'WECC_MT', 'WECC_NM', 'WEC_CALN', 'WECC_NNV',
'WECC_PNW', 'WEC_SDGE', 'WECC_SCE', 'WECC_SNV', 'WECC_UT',
'WECC_WY', 'CN_AB', 'CN_BC', 'CN_NL', 'CN_MB', 'CN_NB', 'CN_NF',
'CN_NS', 'CN_ON', 'CN_PE', 'CN_PQ', 'CN_SK',
]
"""list: A list of EPA IPM region names."""
epaipm_region_aggregations = {
'PJM': [
'PJM_AP', 'PJM_ATSI', 'PJM_COMD', 'PJM_Dom',
'PJM_EMAC', 'PJM_PENE', 'PJM_SMAC', 'PJM_WMAC'
],
'NYISO': [
'NY_Z_A', 'NY_Z_B', 'NY_Z_C&E', 'NY_Z_D',
'NY_Z_F', 'NY_Z_G-I', 'NY_Z_J', 'NY_Z_K'
],
'ISONE': ['NENG_CT', 'NENGREST', 'NENG_ME'],
'MISO': [
'MIS_AR', 'MIS_IL', 'MIS_INKY', 'MIS_IA',
'MIS_MIDA', 'MIS_LA', 'MIS_LMI', 'MIS_MNWI', 'MIS_D_MS',
'MIS_MO', 'MIS_MAPP', 'MIS_AMSO', 'MIS_WOTA', 'MIS_WUMS'
],
'SPP': [
'SPP_NEBR', 'SPP_N', 'SPP_SPS', 'SPP_WEST', 'SPP_KIAM', 'SPP_WAUE'
],
'WECC_NW': [
'WECC_CO', 'WECC_ID', 'WECC_MT', 'WECC_NNV',
'WECC_PNW', 'WECC_UT', 'WECC_WY'
]
}
"""
dict: A dictionary containing EPA IPM regions (keys) and lists of their
associated abbreviations (values).
"""
epaipm_rename_dict = {
'transmission_single_epaipm': {
'From': 'region_from',
'To': 'region_to',
'Capacity TTC (MW)': 'firm_ttc_mw',
'Energy TTC (MW)': 'nonfirm_ttc_mw',
'Transmission Tariff (2016 mills/kWh)': 'tariff_mills_kwh',
},
'load_curves_epaipm': {
'day': 'day_of_year',
'region': 'region_id_epaipm',
},
'plant_region_map_epaipm': {
'ORIS Plant Code': 'plant_id_eia',
'Region Name': 'region',
},
}
glue_pudl_tables = ('plants_eia', 'plants_ferc', 'plants', 'utilities_eia',
'utilities_ferc', 'utilities', 'utility_plant_assn')
"""
dict: A dictionary of dictionaries containing EPA IPM tables (keys) and items
for each table to be renamed along with the replacement name (values).
"""
data_sources = (
'eia860',
'eia861',
'eia923',
'epacems',
'epaipm',
'ferc1',
'ferc714',
# 'pudl'
)
"""tuple: A tuple containing the data sources we are able to pull into PUDL."""
# All the years for which we ought to be able to download these data sources
data_years = {
'eia860': tuple(range(2001, 2020)),
'eia861': tuple(range(1990, 2020)),
'eia923': tuple(range(2001, 2020)),
'epacems': tuple(range(1995, 2021)),
'epaipm': (None, ),
'ferc1': tuple(range(1994, 2020)),
'ferc714': (None, ),
}
"""
dict: A dictionary of data sources (keys) and tuples containing the years
that we expect to be able to download for each data source (values).
"""
# The full set of years we currently expect to be able to ingest, per source:
working_partitions = {
'eia860': {
'years': tuple(range(2004, 2020))
},
'eia860m': {
'year_month': '2020-11'
},
'eia861': {
'years': tuple(range(2001, 2020))
},
'eia923': {
'years': tuple(range(2001, 2020))
},
'epacems': {
'years': tuple(range(1995, 2021)),
'states': tuple(cems_states.keys())},
'ferc1': {
'years': tuple(range(1994, 2020))
},
'ferc714': {},
}
"""
dict: A dictionary of data sources (keys) and dictionaries (values) of names of
partition type (sub-key) and paritions (sub-value) containing the paritions
such as tuples of years for each data source that are able to be ingested
into PUDL.
"""
pudl_tables = {
'eia860': eia860_pudl_tables,
'eia861': (
"service_territory_eia861",
"balancing_authority_eia861",
"sales_eia861",
"advanced_metering_infrastructure_eia861",
"demand_response_eia861",
"demand_side_management_eia861",
"distributed_generation_eia861",
"distribution_systems_eia861",
"dynamic_pricing_eia861",
"energy_efficiency_eia861",
"green_pricing_eia861",
"mergers_eia861",
"net_metering_eia861",
"non_net_metering_eia861",
"operational_data_eia861",
"reliability_eia861",
"utility_data_eia861",
),
'eia923': eia923_pudl_tables,
'epacems': epacems_tables,
'epaipm': epaipm_pudl_tables,
'ferc1': ferc1_pudl_tables,
'ferc714': (
"respondent_id_ferc714",
"id_certification_ferc714",
"gen_plants_ba_ferc714",
"demand_monthly_ba_ferc714",
"net_energy_load_ba_ferc714",
"adjacency_ba_ferc714",
"interchange_ba_ferc714",
"lambda_hourly_ba_ferc714",
"lambda_description_ferc714",
"description_pa_ferc714",
"demand_forecast_pa_ferc714",
"demand_hourly_pa_ferc714",
),
'glue': glue_pudl_tables,
}
"""
dict: A dictionary containing data sources (keys) and the list of associated
tables from that datasource that can be pulled into PUDL (values).
"""
base_data_urls = {
'eia860': 'https://www.eia.gov/electricity/data/eia860',
'eia861': 'https://www.eia.gov/electricity/data/eia861/zip',
'eia923': 'https://www.eia.gov/electricity/data/eia923',
'epacems': 'ftp://newftp.epa.gov/dmdnload/emissions/hourly/monthly',
'ferc1': 'ftp://eforms1.ferc.gov/f1allyears',
'ferc714': 'https://www.ferc.gov/docs-filing/forms/form-714/data',
'ferceqr': 'ftp://eqrdownload.ferc.gov/DownloadRepositoryProd/BulkNew/CSV',
'msha': 'https://arlweb.msha.gov/OpenGovernmentData/DataSets',
'epaipm': 'https://www.epa.gov/sites/production/files/2019-03',
'pudl': 'https://catalyst.coop/pudl/'
}
"""
dict: A dictionary containing data sources (keys) and their base data URLs
(values).
"""
need_fix_inting = {
'plants_steam_ferc1': ('construction_year', 'installation_year'),
'plants_small_ferc1': ('construction_year', 'ferc_license_id'),
'plants_hydro_ferc1': ('construction_year', 'installation_year',),
'plants_pumped_storage_ferc1': ('construction_year', 'installation_year',),
'hourly_emissions_epacems': ('facility_id', 'unit_id_epa',),
}
"""
dict: A dictionary containing tables (keys) and column names (values)
containing integer - type columns whose null values need fixing.
"""
contributors = {
"catalyst-cooperative": {
"title": "Catalyst Cooperative",
"path": "https://catalyst.coop/",
"role": "publisher",
"email": "<EMAIL>",
"organization": "Catalyst Cooperative",
},
"zane-selvans": {
"title": "<NAME>",
"email": "<EMAIL>",
"path": "https://amateurearthling.org/",
"role": "wrangler",
"organization": "Catalyst Cooperative"
},
"christina-gosnell": {
"title": "<NAME>",
"email": "<EMAIL>",
"role": "contributor",
"organization": "Catalyst Cooperative",
},
"steven-winter": {
"title": "<NAME>",
"email": "<EMAIL>",
"role": "contributor",
"organization": "Catalyst Cooperative",
},
"alana-wilson": {
"title": "<NAME>",
"email": "<EMAIL>",
"role": "contributor",
"organization": "Catalyst Cooperative",
},
"karl-dunkle-werner": {
"title": "<NAME>",
"email": "<EMAIL>",
"path": "https://karldw.org/",
"role": "contributor",
"organization": "UC Berkeley",
},
'greg-schivley': {
"title": "<NAME>",
"role": "contributor",
},
}
"""
dict: A dictionary of dictionaries containing organization names (keys) and
their attributes (values).
"""
data_source_info = {
"eia860": {
"title": "EIA Form 860",
"path": "https://www.eia.gov/electricity/data/eia860/",
},
"eia861": {
"title": "EIA Form 861",
"path": "https://www.eia.gov/electricity/data/eia861/",
},
"eia923": {
"title": "EIA Form 923",
"path": "https://www.eia.gov/electricity/data/eia923/",
},
"eiawater": {
"title": "EIA Water Use for Power",
"path": "https://www.eia.gov/electricity/data/water/",
},
"epacems": {
"title": "EPA Air Markets Program Data",
"path": "https://ampd.epa.gov/ampd/",
},
"epaipm": {
"title": "EPA Integrated Planning Model",
"path": "https://www.epa.gov/airmarkets/national-electric-energy-data-system-needs-v6",
},
"ferc1": {
"title": "FERC Form 1",
"path": "https://www.ferc.gov/docs-filing/forms/form-1/data.asp",
},
"ferc714": {
"title": "FERC Form 714",
"path": "https://www.ferc.gov/docs-filing/forms/form-714/data.asp",
},
"ferceqr": {
"title": "FERC Electric Quarterly Report",
"path": "https://www.ferc.gov/docs-filing/eqr.asp",
},
"msha": {
"title": "Mining Safety and Health Administration",
"path": "https://www.msha.gov/mine-data-retrieval-system",
},
"phmsa": {
"title": "Pipelines and Hazardous Materials Safety Administration",
"path": "https://www.phmsa.dot.gov/data-and-statistics/pipeline/data-and-statistics-overview",
},
"pudl": {
"title": "The Public Utility Data Liberation Project (PUDL)",
"path": "https://catalyst.coop/pudl/",
"email": "<EMAIL>",
},
}
"""
dict: A dictionary of dictionaries containing datasources (keys) and
associated attributes (values)
"""
contributors_by_source = {
"pudl": [
"catalyst-cooperative",
"zane-selvans",
"christina-gosnell",
"steven-winter",
"alana-wilson",
"karl-dunkle-werner",
],
"eia923": [
"catalyst-cooperative",
"zane-selvans",
"christina-gosnell",
"steven-winter",
],
"eia860": [
"catalyst-cooperative",
"zane-selvans",
"christina-gosnell",
"steven-winter",
"alana-wilson",
],
"ferc1": [
"catalyst-cooperative",
"zane-selvans",
"christina-gosnell",
"steven-winter",
"alana-wilson",
],
"epacems": [
"catalyst-cooperative",
"karl-dunkle-werner",
"zane-selvans",
],
"epaipm": [
"greg-schivley",
],
}
"""
dict: A dictionary of data sources (keys) and lists of contributors (values).
"""
licenses = {
"cc-by-4.0": {
"name": "CC-BY-4.0",
"title": "Creative Commons Attribution 4.0",
"path": "https://creativecommons.org/licenses/by/4.0/"
},
"us-govt": {
"name": "other-pd",
"title": "U.S. Government Work",
"path": "http://www.usa.gov/publicdomain/label/1.0/",
}
}
"""
dict: A dictionary of dictionaries containing license types and their
attributes.
"""
output_formats = [
'sqlite',
'parquet',
'datapkg',
]
"""list: A list of types of PUDL output formats."""
keywords_by_data_source = {
'pudl': [
'us', 'electricity',
],
'eia860': [
'electricity', 'electric', 'boiler', 'generator', 'plant', 'utility',
'fuel', 'coal', 'natural gas', 'prime mover', 'eia860', 'retirement',
'capacity', 'planned', 'proposed', 'energy', 'hydro', 'solar', 'wind',
'nuclear', 'form 860', 'eia', 'annual', 'gas', 'ownership', 'steam',
'turbine', 'combustion', 'combined cycle', 'eia',
'energy information administration'
],
'eia923': [
'fuel', 'boiler', 'generator', 'plant', 'utility', 'cost', 'price',
'natural gas', 'coal', 'eia923', 'energy', 'electricity', 'form 923',
'receipts', 'generation', 'net generation', 'monthly', 'annual', 'gas',
'fuel consumption', 'MWh', 'energy information administration', 'eia',
'mercury', 'sulfur', 'ash', 'lignite', 'bituminous', 'subbituminous',
'heat content'
],
'epacems': [
'epa', 'us', 'emissions', 'pollution', 'ghg', 'so2', 'co2', 'sox',
'nox', 'load', 'utility', 'electricity', 'plant', 'generator', 'unit',
'generation', 'capacity', 'output', 'power', 'heat content', 'mmbtu',
'steam', 'cems', 'continuous emissions monitoring system', 'hourly'
'environmental protection agency', 'ampd', 'air markets program data',
],
'ferc1': [
'electricity', 'electric', 'utility', 'plant', 'steam', 'generation',
'cost', 'expense', 'price', 'heat content', 'ferc', 'form 1',
'federal energy regulatory commission', 'capital', 'accounting',
'depreciation', 'finance', 'plant in service', 'hydro', 'coal',
'natural gas', 'gas', 'opex', 'capex', 'accounts', 'investment',
'capacity'
],
'ferc714': [
'electricity', 'electric', 'utility', 'planning area', 'form 714',
'balancing authority', 'demand', 'system lambda', 'ferc',
'federal energy regulatory commission', "hourly", "generation",
"interchange", "forecast", "load", "adjacency", "plants",
],
'epaipm': [
'epaipm', 'integrated planning',
]
}
"""dict: A dictionary of datasets (keys) and keywords (values). """
ENTITY_TYPE_DICT = {
'M': 'Municipal',
'C': 'Cooperative',
'R': 'Retail Power Marketer',
'I': 'Investor Owned',
'P': 'Political Subdivision',
'T': 'Transmission',
'S': 'State',
'W': 'Wholesale Power Marketer',
'F': 'Federal',
'A': 'Municipal Mktg Authority',
'G': 'Community Choice Aggregator',
'D': 'Nonutility DSM Administrator',
'B': 'Behind the Meter',
'Q': 'Independent Power Producer',
'IND': 'Industrial',
'COM': 'Commercial',
'PR': 'Private', # Added by AES for OD table (Arbitrary moniker)
'PO': 'Power Marketer', # Added by AES for OD table
'U': 'Unknown', # Added by AES for OD table
'O': 'Other' # Added by AES for OD table
}
# Confirm these designations -- educated guess based on the form instructions
MOMENTARY_INTERRUPTION_DEF = { # Added by AES for R table
'L': 'Less than 1 minute',
'F': 'Less than or equal to 5 minutes',
'O': 'Other',
}
# https://www.eia.gov/electricity/data/eia411/#tabs_NERC-3
RECOGNIZED_NERC_REGIONS = [
'BASN', # ASSESSMENT AREA Basin (WECC)
'CALN', # ASSESSMENT AREA California (WECC)
'CALS', # ASSESSMENT AREA California (WECC)
'DSW', # ASSESSMENT AREA Desert Southwest (WECC)
'ASCC', # Alaska
'ISONE', # ISO New England (NPCC)
'ERCOT', # lumped under TRE in 2017 Form instructions
'NORW', # ASSESSMENT AREA Northwest (WECC)
'NYISO', # ISO (NPCC)
'PJM', # RTO
'ROCK', # ASSESSMENT AREA Rockies (WECC)
'ECAR', # OLD RE Now part of RFC and SERC
'FRCC', # included in 2017 Form instructions, recently joined with SERC
'HICC', # Hawaii
'MAAC', # OLD RE Now part of RFC
'MAIN', # OLD RE Now part of SERC, RFC, MRO
'MAPP', # OLD/NEW RE Became part of MRO, resurfaced in 2010
'MRO', # RE included in 2017 Form instructions
'NPCC', # RE included in 2017 Form instructions
'RFC', # RE included in 2017 Form instructions
'SERC', # RE included in 2017 Form instructions
'SPP', # RE included in 2017 Form instructions
'TRE', # RE included in 2017 Form instructions (included ERCOT)
'WECC', # RE included in 2017 Form instructions
'WSCC', # OLD RE pre-2002 version of WECC
'MISO', # ISO unclear whether technically a regional entity, but lots of entries
'ECAR_MAAC',
'MAPP_WECC',
'RFC_SERC',
'SPP_WECC',
'MRO_WECC',
'ERCOT_SPP',
'SPP_TRE',
'ERCOT_TRE',
'MISO_TRE',
'VI', # Virgin Islands
'GU', # Guam
'PR', # Puerto Rico
'AS', # American Samoa
'UNK',
]
CUSTOMER_CLASSES = [
"commercial",
"industrial",
"direct_connection",
"other",
"residential",
"total",
"transportation"
]
TECH_CLASSES = [
'backup', # WHERE Is this used? because removed from DG table b/c not a real component
'chp_cogen',
'combustion_turbine',
'fuel_cell',
'hydro',
'internal_combustion',
'other',
'pv',
'steam',
'storage_pv',
'all_storage', # need 'all' as prefix so as not to confuse with other storage category
'total',
'virtual_pv',
'wind',
]
REVENUE_CLASSES = [
'retail_sales',
'unbundled',
'delivery_customers',
'sales_for_resale',
'credits_or_adjustments',
'other',
'transmission',
'total',
]
RELIABILITY_STANDARDS = [
'ieee_standard',
'other_standard'
]
FUEL_CLASSES = [
'gas',
'oil',
'other',
'renewable',
'water',
'wind',
'wood',
]
RTO_CLASSES = [
'caiso',
'ercot',
'pjm',
'nyiso',
'spp',
'miso',
'isone',
'other'
]
ESTIMATED_OR_ACTUAL = {'E': 'estimated', 'A': 'actual'}
TRANSIT_TYPE_DICT = {
'CV': 'conveyer',
'PL': 'pipeline',
'RR': 'railroad',
'TK': 'truck',
'WA': 'water',
'UN': 'unknown',
}
"""dict: A dictionary of datasets (keys) and keywords (values). """
column_dtypes = {
"ferc1": { # Obviously this is not yet a complete list...
"construction_year": pd.Int64Dtype(),
"installation_year": pd.Int64Dtype(),
"plant_id_ferc1": pd.Int64Dtype(),
"plant_id_pudl": pd.Int64Dtype(),
"report_date": "datetime64[ns]",
"report_year": pd.Int64Dtype(),
"utility_id_ferc1": pd.Int64Dtype(),
"utility_id_pudl": pd.Int64Dtype(),
},
"ferc714": { # INCOMPLETE
"demand_mwh": float,
"demand_annual_mwh": float,
"eia_code": pd.Int64Dtype(),
"peak_demand_summer_mw": float,
"peak_demand_winter_mw": float,
"report_date": "datetime64[ns]",
"respondent_id_ferc714": pd.Int64Dtype(),
"respondent_name_ferc714": pd.StringDtype(),
"respondent_type": pd.CategoricalDtype(categories=[
"utility", "balancing_authority",
]),
"timezone": pd.CategoricalDtype(categories=[
"America/New_York", "America/Chicago", "America/Denver",
"America/Los_Angeles", "America/Anchorage", "Pacific/Honolulu"]),
"utc_datetime": "datetime64[ns]",
},
"epacems": {
'state': pd.StringDtype(),
'plant_id_eia': pd.Int64Dtype(), # Nullable Integer
'unitid': pd.StringDtype(),
'operating_datetime_utc': "datetime64[ns]",
'operating_time_hours': float,
'gross_load_mw': float,
'steam_load_1000_lbs': float,
'so2_mass_lbs': float,
'so2_mass_measurement_code': pd.StringDtype(),
'nox_rate_lbs_mmbtu': float,
'nox_rate_measurement_code': pd.StringDtype(),
'nox_mass_lbs': float,
'nox_mass_measurement_code': pd.StringDtype(),
'co2_mass_tons': float,
'co2_mass_measurement_code': pd.StringDtype(),
'heat_content_mmbtu': float,
'facility_id': pd.Int64Dtype(), # Nullable Integer
'unit_id_epa': pd.Int64Dtype(), # Nullable Integer
},
"eia": {
'actual_peak_demand_savings_mw': float, # Added by AES for DR table
'address_2': pd.StringDtype(), # Added by AES for 860 utilities table
'advanced_metering_infrastructure': pd.Int64Dtype(), # Added by AES for AMI table
# Added by AES for UD misc table
'alternative_fuel_vehicle_2_activity': pd.BooleanDtype(),
'alternative_fuel_vehicle_activity': pd.BooleanDtype(),
'annual_indirect_program_cost': float,
'annual_total_cost': float,
'ash_content_pct': float,
'ash_impoundment': pd.BooleanDtype(),
'ash_impoundment_lined': pd.BooleanDtype(),
# TODO: convert this field to more descriptive words
'ash_impoundment_status': pd.StringDtype(),
'associated_combined_heat_power': pd.BooleanDtype(),
'attention_line': pd.StringDtype(),
'automated_meter_reading': pd.Int64Dtype(), # Added by AES for AMI table
'backup_capacity_mw': float, # Added by AES for NNM & DG misc table
'balancing_authority_code_eia': pd.CategoricalDtype(),
'balancing_authority_id_eia': pd.Int64Dtype(),
'balancing_authority_name_eia': pd.StringDtype(),
'bga_source': pd.StringDtype(),
'boiler_id': pd.StringDtype(),
'bunded_activity': pd.BooleanDtype(),
'business_model': pd.CategoricalDtype(categories=[
"retail", "energy_services"]),
'buy_distribution_activity': pd.BooleanDtype(),
'buying_transmission_activity': pd.BooleanDtype(),
'bypass_heat_recovery': pd.BooleanDtype(),
'caidi_w_major_event_days_minus_loss_of_service_minutes': float,
'caidi_w_major_event_dats_minutes': float,
'caidi_wo_major_event_days_minutes': float,
'capacity_mw': float,
'carbon_capture': pd.BooleanDtype(),
'chlorine_content_ppm': float,
'circuits_with_voltage_optimization': pd.Int64Dtype(),
'city': pd.StringDtype(),
'cofire_fuels': pd.BooleanDtype(),
'consumed_by_facility_mwh': float,
'consumed_by_respondent_without_charge_mwh': float,
'contact_firstname': pd.StringDtype(),
'contact_firstname_2': pd.StringDtype(),
'contact_lastname': pd.StringDtype(),
'contact_lastname_2': pd.StringDtype(),
'contact_title': pd.StringDtype(),
'contact_title_2': pd.StringDtype(),
'contract_expiration_date': 'datetime64[ns]',
'contract_type_code': pd.StringDtype(),
'county': pd.StringDtype(),
'county_id_fips': pd.StringDtype(), # Must preserve leading zeroes
'credits_or_adjustments': float,
'critical_peak_pricing': pd.BooleanDtype(),
'critical_peak_rebate': pd.BooleanDtype(),
'current_planned_operating_date': 'datetime64[ns]',
'customers': float,
'customer_class': pd.CategoricalDtype(categories=CUSTOMER_CLASSES),
'customer_incentives_cost': float,
'customer_incentives_incremental_cost': float,
'customer_incentives_incremental_life_cycle_cost': float,
'customer_other_costs_incremental_life_cycle_cost': float,
'daily_digital_access_customers': pd.Int64Dtype(),
'data_observed': pd.BooleanDtype(),
'datum': pd.StringDtype(),
'deliver_power_transgrid': pd.BooleanDtype(),
'delivery_customers': float,
'direct_load_control_customers': pd.Int64Dtype(),
'distributed_generation': pd.BooleanDtype(),
'distributed_generation_owned_capacity_mw': float,
'distribution_activity': pd.BooleanDtype(),
'distribution_circuits': pd.Int64Dtype(),
'duct_burners': pd.BooleanDtype(),
'energy_displaced_mwh': float,
'energy_efficiency_annual_cost': float,
'energy_efficiency_annual_actual_peak_reduction_mw': float,
'energy_efficiency_annual_effects_mwh': float,
'energy_efficiency_annual_incentive_payment': float,
'energy_efficiency_incremental_actual_peak_reduction_mw': float,
'energy_efficiency_incremental_effects_mwh': float,
'energy_savings_estimates_independently_verified': pd.BooleanDtype(),
'energy_savings_independently_verified': pd.BooleanDtype(),
'energy_savings_mwh': float,
'energy_served_ami_mwh': float,
'energy_source_1_transport_1': pd.CategoricalDtype(categories=TRANSIT_TYPE_DICT.values()),
'energy_source_1_transport_2': pd.CategoricalDtype(categories=TRANSIT_TYPE_DICT.values()),
'energy_source_1_transport_3': pd.CategoricalDtype(categories=TRANSIT_TYPE_DICT.values()),
'energy_source_2_transport_1': pd.CategoricalDtype(categories=TRANSIT_TYPE_DICT.values()),
'energy_source_2_transport_2': pd.CategoricalDtype(categories=TRANSIT_TYPE_DICT.values()),
'energy_source_2_transport_3': pd.CategoricalDtype(categories=TRANSIT_TYPE_DICT.values()),
'energy_source_code': pd.StringDtype(),
'energy_source_code_1': pd.StringDtype(),
'energy_source_code_2': pd.StringDtype(),
'energy_source_code_3': pd.StringDtype(),
'energy_source_code_4': pd.StringDtype(),
'energy_source_code_5': pd.StringDtype(),
'energy_source_code_6': pd.StringDtype(),
'energy_storage': pd.BooleanDtype(),
'entity_type': pd.CategoricalDtype(categories=ENTITY_TYPE_DICT.values()),
'estimated_or_actual_capacity_data': pd.CategoricalDtype(categories=ESTIMATED_OR_ACTUAL.values()),
'estimated_or_actual_fuel_data': pd.CategoricalDtype(categories=ESTIMATED_OR_ACTUAL.values()),
'estimated_or_actual_tech_data': pd.CategoricalDtype(categories=ESTIMATED_OR_ACTUAL.values()),
'exchange_energy_delivered_mwh': float,
'exchange_energy_recieved_mwh': float,
'ferc_cogen_docket_no': pd.StringDtype(),
'ferc_cogen_status': pd.BooleanDtype(),
'ferc_exempt_wholesale_generator': pd.BooleanDtype(),
'ferc_exempt_wholesale_generator_docket_no': pd.StringDtype(),
'ferc_small_power_producer': pd.BooleanDtype(),
'ferc_small_power_producer_docket_no': pd.StringDtype(),
'fluidized_bed_tech': pd.BooleanDtype(),
'fraction_owned': float,
'fuel_class': pd.StringDtype(),
'fuel_consumed_for_electricity_mmbtu': float,
'fuel_consumed_for_electricity_units': float,
'fuel_consumed_mmbtu': float,
'fuel_consumed_units': float,
'fuel_cost_per_mmbtu': float,
'fuel_group_code': pd.StringDtype(),
'fuel_group_code_simple': pd.StringDtype(),
'fuel_mmbtu_per_unit': float,
'fuel_pct': float,
'fuel_qty_units': float,
# are fuel_type and fuel_type_code the same??
# fuel_type includes 40 code-like things.. WAT, SUN, NUC, etc.
'fuel_type': pd.StringDtype(),
# from the boiler_fuel_eia923 table, there are 30 code-like things, like NG, BIT, LIG
'fuel_type_code': pd.StringDtype(),
'fuel_type_code_aer': pd.StringDtype(),
'fuel_type_code_pudl': pd.StringDtype(),
'furnished_without_charge_mwh': float,
'generation_activity': pd.BooleanDtype(),
# this is a mix of integer-like values (2 or 5) and strings like AUGSF
'generator_id': pd.StringDtype(),
'generators_number': float,
'generators_num_less_1_mw': float,
'green_pricing_revenue': float,
'grid_voltage_2_kv': float,
'grid_voltage_3_kv': float,
'grid_voltage_kv': float,
'heat_content_mmbtu_per_unit': float,
'highest_distribution_voltage_kv': float,
'home_area_network': pd.Int64Dtype(),
'inactive_accounts_included': pd.BooleanDtype(),
'incremental_energy_savings_mwh': float,
'incremental_life_cycle_energy_savings_mwh': float,
'incremental_life_cycle_peak_reduction_mwh': float,
'incremental_peak_reduction_mw': float,
'iso_rto_code': pd.StringDtype(),
'latitude': float,
'liquefied_natural_gas_storage': pd.BooleanDtype(),
'load_management_annual_cost': float,
'load_management_annual_actual_peak_reduction_mw': float,
'load_management_annual_effects_mwh': float,
'load_management_annual_incentive_payment': float,
'load_management_annual_potential_peak_reduction_mw': float,
'load_management_incremental_actual_peak_reduction_mw': float,
'load_management_incremental_effects_mwh': float,
'load_management_incremental_potential_peak_reduction_mw': float,
'longitude': float,
'major_program_changes': pd.BooleanDtype(),
'mercury_content_ppm': float,
'merge_address': pd.StringDtype(),
'merge_city': pd.StringDtype(),
'merge_company': pd.StringDtype(),
'merge_date': 'datetime64[ns]',
'merge_state': pd.StringDtype(),
'mine_id_msha': pd.Int64Dtype(),
'mine_id_pudl': pd.Int64Dtype(),
'mine_name': pd.StringDtype(),
'mine_type_code': pd.StringDtype(),
'minimum_load_mw': float,
'moisture_content_pct': float,
'momentary_interruption_definition': pd.CategoricalDtype(categories=MOMENTARY_INTERRUPTION_DEF.values()),
'multiple_fuels': pd.BooleanDtype(),
'nameplate_power_factor': float,
'natural_gas_delivery_contract_type_code': pd.StringDtype(),
'natural_gas_local_distribution_company': pd.StringDtype(),
'natural_gas_pipeline_name_1': pd.StringDtype(),
'natural_gas_pipeline_name_2': pd.StringDtype(),
'natural_gas_pipeline_name_3': pd.StringDtype(),
'natural_gas_storage': pd.BooleanDtype(),
'natural_gas_transport_code': pd.StringDtype(),
'nerc_region': pd.CategoricalDtype(categories=RECOGNIZED_NERC_REGIONS),
'nerc_regions_of_operation': pd.CategoricalDtype(categories=RECOGNIZED_NERC_REGIONS),
'net_generation_mwh': float,
'net_metering': pd.BooleanDtype(),
'net_power_exchanged_mwh': float,
'net_wheeled_power_mwh': float,
'new_parent': pd.StringDtype(),
'non_amr_ami': pd.Int64Dtype(),
'nuclear_unit_id': pd.Int64Dtype(),
'operates_generating_plant': pd.BooleanDtype(),
'operating_date': 'datetime64[ns]',
'operating_switch': pd.StringDtype(),
# TODO: double check this for early 860 years
'operational_status': pd.StringDtype(),
'operational_status_code': pd.StringDtype(),
'original_planned_operating_date': 'datetime64[ns]',
'other': float,
'other_combustion_tech': pd.BooleanDtype(),
'other_costs': float,
'other_costs_incremental_cost': float,
'other_modifications_date': 'datetime64[ns]',
'other_planned_modifications': pd.BooleanDtype(),
'outages_recorded_automatically': pd.BooleanDtype(),
'owned_by_non_utility': pd.BooleanDtype(),
'owner_city': pd.StringDtype(),
'owner_name': pd.StringDtype(),
'owner_state': pd.StringDtype(),
'owner_street_address': pd.StringDtype(),
'owner_utility_id_eia': pd.Int64Dtype(),
'owner_zip_code': pd.StringDtype(),
# we should transition these into readable codes, not a one letter thing
'ownership_code': pd.StringDtype(),
'phone_extension_1': pd.StringDtype(),
'phone_extension_2': pd.StringDtype(),
'phone_number_1': pd.StringDtype(),
'phone_number_2': pd.StringDtype(),
'pipeline_notes': pd.StringDtype(),
'planned_derate_date': 'datetime64[ns]',
'planned_energy_source_code_1': pd.StringDtype(),
'planned_modifications': pd.BooleanDtype(),
'planned_net_summer_capacity_derate_mw': float,
'planned_net_summer_capacity_uprate_mw': float,
'planned_net_winter_capacity_derate_mw': float,
'planned_net_winter_capacity_uprate_mw': float,
'planned_new_capacity_mw': float,
'planned_new_prime_mover_code': pd.StringDtype(),
'planned_repower_date': 'datetime64[ns]',
'planned_retirement_date': 'datetime64[ns]',
'planned_uprate_date': 'datetime64[ns]',
'plant_id_eia': pd.Int64Dtype(),
'plant_id_epa': pd.Int64Dtype(),
'plant_id_pudl': pd.Int64Dtype(),
'plant_name_eia': pd.StringDtype(),
'plants_reported_asset_manager': pd.BooleanDtype(),
'plants_reported_operator': pd.BooleanDtype(),
'plants_reported_other_relationship': pd.BooleanDtype(),
'plants_reported_owner': pd.BooleanDtype(),
'point_source_unit_id_epa': pd.StringDtype(),
'potential_peak_demand_savings_mw': float,
'pulverized_coal_tech': pd.BooleanDtype(),
'previously_canceled': pd.BooleanDtype(),
'price_responsive_programes': pd.BooleanDtype(),
'price_responsiveness_customers': pd.Int64Dtype(),
'primary_transportation_mode_code': pd.StringDtype(),
'primary_purpose_naics_id': pd.Int64Dtype(),
'prime_mover_code': pd.StringDtype(),
'pv_current_flow_type': pd.CategoricalDtype(categories=['AC', 'DC']),
'reactive_power_output_mvar': float,
'real_time_pricing_program': pd.BooleanDtype(),
'rec_revenue': float,
'rec_sales_mwh': float,
'regulatory_status_code': pd.StringDtype(),
'report_date': 'datetime64[ns]',
'reported_as_another_company': pd.StringDtype(),
'retail_marketing_activity': pd.BooleanDtype(),
'retail_sales': float,
'retail_sales_mwh': float,
'retirement_date': 'datetime64[ns]',
'revenue_class': pd.CategoricalDtype(categories=REVENUE_CLASSES),
'rto_iso_lmp_node_id': pd.StringDtype(),
'rto_iso_location_wholesale_reporting_id': pd.StringDtype(),
'rtos_of_operation': pd.StringDtype(),
'saidi_w_major_event_dats_minus_loss_of_service_minutes': float,
'saidi_w_major_event_days_minutes': float,
'saidi_wo_major_event_days_minutes': float,
'saifi_w_major_event_days_customers': float,
'saifi_w_major_event_days_minus_loss_of_service_customers': float,
'saifi_wo_major_event_days_customers': float,
'sales_for_resale': float,
'sales_for_resale_mwh': float,
'sales_mwh': float,
'sales_revenue': float,
'sales_to_ultimate_consumers_mwh': float,
'secondary_transportation_mode_code': pd.StringDtype(),
'sector_id': pd.Int64Dtype(),
'sector_name': pd.StringDtype(),
'service_area': pd.StringDtype(),
'service_type': pd.CategoricalDtype(categories=[
"bundled", "energy", "delivery",
]),
'short_form': pd.BooleanDtype(),
'sold_to_utility_mwh': float,
'solid_fuel_gasification': pd.BooleanDtype(),
'data_source': pd.StringDtype(),
'standard': pd.CategoricalDtype(categories=RELIABILITY_STANDARDS),
'startup_source_code_1': pd.StringDtype(),
'startup_source_code_2': pd.StringDtype(),
'startup_source_code_3': pd.StringDtype(),
'startup_source_code_4': pd.StringDtype(),
'state': pd.StringDtype(),
'state_id_fips': pd.StringDtype(), # Must preserve leading zeroes
'street_address': pd.StringDtype(),
'stoker_tech': pd.BooleanDtype(),
'storage_capacity_mw': float,
'storage_customers': pd.Int64Dtype(),
'subcritical_tech': pd.BooleanDtype(),
'sulfur_content_pct': float,
'summer_capacity_mw': float,
'summer_capacity_estimate': pd.BooleanDtype(),
# TODO: check if there is any data pre-2016
'summer_estimated_capability_mw': float,
'summer_peak_demand_mw': float,
'supercritical_tech': pd.BooleanDtype(),
'supplier_name': pd.StringDtype(),
'switch_oil_gas': pd.BooleanDtype(),
'syncronized_transmission_grid': pd.BooleanDtype(),
# Added by AES for NM & DG tech table (might want to consider merging with another fuel label)
'tech_class': pd.CategoricalDtype(categories=TECH_CLASSES),
'technology_description': pd.StringDtype(),
'time_cold_shutdown_full_load_code': pd.StringDtype(),
'time_of_use_pricing_program': pd.BooleanDtype(),
'time_responsive_programs': pd.BooleanDtype(),
'time_responsiveness_customers': pd.Int64Dtype(),
'timezone': pd.StringDtype(),
'topping_bottoming_code': pd.StringDtype(),
'total': float,
'total_capacity_less_1_mw': float,
'total_meters': pd.Int64Dtype(),
'total_disposition_mwh': float,
'total_energy_losses_mwh': float,
'total_sources_mwh': float,
'transmission': float,
'transmission_activity': pd.BooleanDtype(),
'transmission_by_other_losses_mwh': float,
'transmission_distribution_owner_id': pd.Int64Dtype(),
'transmission_distribution_owner_name': pd.StringDtype(),
'transmission_distribution_owner_state': pd.StringDtype(),
'turbines_inverters_hydrokinetics': float,
'turbines_num': pd.Int64Dtype(), # TODO: check if any turbines show up pre-2016
'ultrasupercritical_tech': pd.BooleanDtype(),
'unbundled_revenues': float,
'unit_id_eia': pd.StringDtype(),
'unit_id_pudl': pd.Int64Dtype(),
'uprate_derate_completed_date': 'datetime64[ns]',
'uprate_derate_during_year': pd.BooleanDtype(),
'utility_id_eia': pd.Int64Dtype(),
'utility_id_pudl': pd.Int64Dtype(),
'utility_name_eia': pd.StringDtype(),
'utility_owned_capacity_mw': float, # Added by AES for NNM table
'variable_peak_pricing_program': | pd.BooleanDtype() | pandas.BooleanDtype |
#------------------------------#
# working with large amounts #
# of data. #
#------------------------------#
import pandas as pd
df = pd.read_csv('C:/src/learn-pandas/pandas code/pokemon_data.csv')
new_df = pd.DataFrame(columns=df.columns)
for x in pd.read_csv('C:/src/learn-pandas/pandas code/modified.csv', chunksize=5):
results = df.groupby(['Type 1']).count()
new_df = | pd.concat([new_df, results]) | pandas.concat |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.