prompt
stringlengths 19
1.03M
| completion
stringlengths 4
2.12k
| api
stringlengths 8
90
|
---|---|---|
import os, sys, re, getopt, functools, pysam
import pandas as pd
import numpy as np
from plotnine import *
from PIL import Image
from ATACFragQC import __version__
class ArgumentList:
file_bam = ''
file_ref = ''
file_out = False
quality = 50
isize = 147
cn_len = 10
chr_filter = ''
chr_list = ''
pic_list = 'a,b,c'
def __init__(self):
self.file_bam = ''
self.file_ref = ''
self.file_out = False
self.quality = 50
self.isize = 147
cn_len = 10
self.chr_filter = ''
self.chr_list = ''
self.pic_list = 'a,b,c'
def chr_cmp(a, b):
sa = str(a)
sb = str(b)
la = len(sa)
lb = len(sb)
lm = min(la, lb)
for i in range(0, lm):
if sa[i] != sb[i]:
oa = ord(sa[i]) if sa[i] != 'M' and sa[i] != 'm' else 0x7A
ob = ord(sb[i]) if sb[i] != 'M' and sb[i] != 'm' else 0x7A
if oa < 0x3A and oa > 0x2F and ob < 0x3A and ob > 0x2F and la != lb:
return la - lb
cd = oa - ob
return cd
return la - lb
def bedScan(args):
(pathname, extension) = os.path.splitext(args.file_bam)
(filepath, filename) = os.path.split(pathname)
if not os.path.isfile(args.file_bam+'.bai'):
print('There is no index file for the bam...')
return
print('Processing the reference...')
ref_raw = pd.read_table(args.file_ref, comment='#', header=None, dtype={0:str})
ref_raw.columns = ['seq_id', 'source', 'type', 'start', 'end', 'score', 'strand', 'phase', 'attributes']
type_list = list(set(ref_raw['type']))
type_test = 'transcript' if 'transcript' in type_list else 'gene'
if not type_test in type_list:
print('There is no suitable term in the gtf to confirm TSSs...')
return
chr_list = list(set(ref_raw['seq_id']))
if args.chr_list != '':
chr_list = list(set(args.chr_list.split(',')).intersection(set(chr_list)))
if args.cn_len > 0:
chr_list = [x for x in chr_list if len(x) < min(args.cn_len, min(list(map(lambda x: len(x), chr_list)))*4)]
if len(chr_list) == 0:
print('There is no chromosome would be calculated...')
return
fs = pysam.AlignmentFile(args.file_bam, 'rb')
chr_detect = []
fs_header = fs.header.to_dict()
if 'SQ' in fs_header.keys():
for term in fs_header['SQ']:
if 'SN' in term.keys():
chr_detect.append(str(term['SN']))
if len(chr_detect) > 0:
chr_list = list(set(chr_list).intersection(set(chr_detect)))
if len(chr_list) == 0:
print('There is no chromosome would be calculated...')
return
chr_list = sorted(list(set(chr_list).difference(set(args.chr_filter.split(',')))), key=functools.cmp_to_key(chr_cmp))
chr_list_frag = [x for x in chr_list if re.match(r'.*[Mm]+,*', x) == None]
ref = ref_raw[(ref_raw['type'] == type_test) & (ref_raw['strand'] != '-') & ref_raw['seq_id'].str.match('^'+chr_list_frag[0]+'$') & (ref_raw['start'] > 1000)]
for term in chr_list_frag[1:]:
ref = ref.append(ref_raw[(ref_raw['type'] == type_test) & (ref_raw['strand'] != '-') & ref_raw['seq_id'].str.match('^'+term+'$') & (ref_raw['start'] > 1000)])
ref = ref[['seq_id', 'strand', 'start', 'end']].sort_values(by=['seq_id', 'start', 'strand'])
ref = ref.drop_duplicates(subset=['seq_id', 'start', 'strand'], keep='first')
ref.loc[ref['strand'] == '+', 'start'] -= 1000
ref.loc[ref['strand'] == '+', 'end'] = ref.loc[ref['strand'] == '+', 'start'] + 2000
ref.index = list(range(ref.index.size))
print('Scaning the distribution of fragments...')
chr_count = {}
len_count = [0] * 501
for chr in chr_list:
count = 0
for read in fs.fetch(chr):
if read.flag == 99 and read.mapq > args.quality and read.isize < 501:
len_count[read.isize] += 1
count += 1
chr_count[chr] = count
len_count = pd.DataFrame({'V1': list(range(1, 501)), 'V2': len_count[1:]})
chr_count = pd.DataFrame({'V1': list(chr_count.keys()), 'V2': list(chr_count.values())})
chr_count['V1'] = pd.Categorical(chr_count['V1'], categories=chr_list, ordered=True)
print('Scaning the fragments around TSSs...')
dist_count = []
for index, row in ref.iterrows():
count = np.zeros(2001, dtype=np.int64)
for frag in fs.fetch(row['seq_id'], row['start'], row['end']):
if frag.flag == 99 and frag.mapq > args.quality and frag.isize < args.isize:
count[range(max(0, frag.pos - row['start']), min(2001, frag.pos + frag.isize - row['start']))] += 1
if sum(count) > 0:
dist_count.append(count)
dist_count = np.array(dist_count)
if dist_count.size > 0:
factors = np.mean(dist_count[:, list(range(0,100))+list(range(1901,2001))], axis=1)
factors[factors == 0] = np.mean(factors)
dist_count = pd.DataFrame({'V1': list(range(-900, 901)), 'V2': list(np.mean(dist_count[:, 100:1901] / factors.reshape(len(factors), 1), axis=0))})
print('Saving the results...')
if args.file_out:
chr_count.to_csv(pathname+'_chr.tsv', sep='\t', index=False, header=False)
len_count.to_csv(pathname+'_fl.tsv', sep='\t', index=False, header=False)
if dist_count.size > 0:
dist_count.to_csv(pathname+'_tss.tsv', sep='\t', index=False, header=False)
| pd.DataFrame({'V1': factors}) | pandas.DataFrame |
import pandas as pd
import numpy as np
np.random.seed(99)
from sklearn.model_selection import train_test_split
from sklearn.model_selection import KFold
from sklearn.model_selection import GridSearchCV
from sklearn.multioutput import MultiOutputClassifier, MultiOutputRegressor
from sklearn.multiclass import OneVsRestClassifier
import xgboost as xgb
from xgboost.sklearn import XGBClassifier
from xgboost.sklearn import XGBRegressor
from sklearn.model_selection import train_test_split
from sklearn.multiclass import OneVsRestClassifier
from sklearn.preprocessing import LabelEncoder
import lightgbm as lgbm
from sklearn.model_selection import KFold, cross_val_score,StratifiedKFold
import seaborn as sns
from sklearn.preprocessing import OneHotEncoder, LabelEncoder, label_binarize
import csv
import re
from xgboost import XGBRegressor, XGBClassifier
from sklearn.metrics import mean_squared_log_error, mean_squared_error,balanced_accuracy_score
from scipy import stats
from sklearn.model_selection import RandomizedSearchCV
import scipy as sp
import time
import copy
from sklearn.preprocessing import StandardScaler, MinMaxScaler
from collections import Counter, defaultdict
import pdb
################# All these imports are needed for the pipeline #######
import time
from sklearn.preprocessing import StandardScaler, MinMaxScaler
from sklearn.impute import SimpleImputer
from sklearn.preprocessing import OneHotEncoder
from sklearn.feature_extraction.text import CountVectorizer
from sklearn.linear_model import LogisticRegression
from sklearn.compose import make_column_transformer
from sklearn.pipeline import make_pipeline
from sklearn.preprocessing import LabelEncoder, LabelBinarizer
from sklearn.base import BaseEstimator, TransformerMixin #gives fit_transform method for free
import pdb
from sklearn.base import TransformerMixin
from collections import defaultdict
from sklearn.preprocessing import MaxAbsScaler
from sklearn.preprocessing import FunctionTransformer
###################################################################################################
############# This is where you import from other Auto_TS modules ############
from ..utils import My_LabelEncoder, My_LabelEncoder_Pipe
from ..utils import left_subtract
#################################################################################
def complex_XGBoost_model(X_train, y_train, X_test, log_y=False, GPU_flag=False,
scaler = '', enc_method='label', n_splits=5,
num_boost_round=1000, verbose=-1):
"""
This model is called complex because it handle multi-label, mulit-class datasets which XGBoost ordinarily cant.
Just send in X_train, y_train and what you want to predict, X_test
It will automatically split X_train into multiple folds (10) and train and predict each time on X_test.
It will then use average (or use mode) to combine the results and give you a y_test.
It will automatically detect modeltype as "Regression" or 'Classification'
It will also add MultiOutputClassifier and MultiOutputRegressor to multi_label problems.
The underlying estimators in all cases is XGB. So you get the best of both worlds.
Inputs:
------------
X_train: pandas dataframe only: do not send in numpy arrays. This is the X_train of your dataset.
y_train: pandas Series or DataFrame only: do not send in numpy arrays. This is the y_train of your dataset.
X_test: pandas dataframe only: do not send in numpy arrays. This is the X_test of your dataset.
log_y: default = False: If True, it means use the log of the target variable "y" to train and test.
GPU_flag: if your machine has a GPU set this flag and it will use XGBoost GPU to speed up processing.
scaler : default is StandardScaler(). But you can send in MinMaxScaler() as input to change it or any other scaler.
enc_method: default is 'label' encoding. But you can choose 'glmm' as an alternative. But those are the only two.
verbose: default = 0. Choosing 1 will give you lot more output.
Outputs:
------------
y_preds: Predicted values for your X_XGB_test dataframe.
It has been averaged after repeatedly predicting on X_XGB_test. So likely to be better than one model.
"""
X_XGB = copy.deepcopy(X_train)
Y_XGB = copy.deepcopy(y_train)
X_XGB_test = copy.deepcopy(X_test)
####################################
start_time = time.time()
top_num = 10
if isinstance(Y_XGB, pd.Series):
targets = [Y_XGB.name]
else:
targets = Y_XGB.columns.tolist()
if len(targets) == 1:
multi_label = False
if isinstance(Y_XGB, pd.DataFrame):
Y_XGB = pd.Series(Y_XGB.values.ravel(),name=targets[0], index=Y_XGB.index)
else:
multi_label = True
modeltype, _ = analyze_problem_type(Y_XGB, targets)
columns = X_XGB.columns
##### Now continue with scaler pre-processing ###########
if isinstance(scaler, str):
if not scaler == '':
scaler = scaler.lower()
if scaler == 'standard':
scaler = StandardScaler()
elif scaler == 'minmax':
scaler = MinMaxScaler()
else:
scaler = StandardScaler()
######### G P U P R O C E S S I N G B E G I N S ############
###### This is where we set the CPU and GPU parameters for XGBoost
if GPU_flag:
GPU_exists = check_if_GPU_exists()
else:
GPU_exists = False
##### Set the Scoring Parameters here based on each model and preferences of user ###
cpu_params = {}
param = {}
cpu_params['tree_method'] = 'hist'
cpu_params['gpu_id'] = 0
cpu_params['updater'] = 'grow_colmaker'
cpu_params['predictor'] = 'cpu_predictor'
if GPU_exists:
param['tree_method'] = 'gpu_hist'
param['gpu_id'] = 0
param['updater'] = 'grow_gpu_hist' #'prune'
param['predictor'] = 'gpu_predictor'
print(' Hyper Param Tuning XGBoost with GPU parameters. This will take time. Please be patient...')
else:
param = copy.deepcopy(cpu_params)
print(' Hyper Param Tuning XGBoost with CPU parameters. This will take time. Please be patient...')
#################################################################################
if modeltype == 'Regression':
if log_y:
Y_XGB.loc[Y_XGB==0] = 1e-15 ### just set something that is zero to a very small number
######### Now set the number of rows we need to tune hyper params ###
scoreFunction = { "precision": "precision_weighted","recall": "recall_weighted"}
random_search_flag = True
#### We need a small validation data set for hyper-param tuning #########################
hyper_frac = 0.2
#### now select a random sample from X_XGB ##
if modeltype == 'Regression':
X_train, X_valid, Y_train, Y_valid = train_test_split(X_XGB, Y_XGB, test_size=hyper_frac,
random_state=999)
else:
X_train, X_valid, Y_train, Y_valid = train_test_split(X_XGB, Y_XGB, test_size=hyper_frac,
random_state=999, stratify = Y_XGB)
###### This step is needed for making sure y is transformed to log_y ####################
if modeltype == 'Regression' and log_y:
Y_train = np.log(Y_train)
Y_valid = np.log(Y_valid)
#### First convert test data into numeric using train data ###
X_train, Y_train, X_valid, Y_valid, scaler = data_transform(X_train, Y_train, X_valid, Y_valid,
modeltype, multi_label, scaler=scaler, enc_method=enc_method)
###### Time to hyper-param tune model using randomizedsearchcv and partial train data #########
num_boost_round = xgbm_model_fit(random_search_flag, X_train, Y_train, X_valid, Y_valid, modeltype,
multi_label, log_y, num_boost_round=num_boost_round)
#### First convert test data into numeric using train data ###############################
if not isinstance(X_XGB_test, str):
x_train, y_train, x_test, _, _ = data_transform(X_XGB, Y_XGB, X_XGB_test, "",
modeltype, multi_label, scaler=scaler, enc_method=enc_method)
###### Time to train the hyper-tuned model on full train data ##########################
random_search_flag = False
model = xgbm_model_fit(random_search_flag, x_train, y_train, x_test, "", modeltype,
multi_label, log_y, num_boost_round=num_boost_round)
############# Time to get feature importances based on full train data ################
if multi_label:
for i,target_name in enumerate(targets):
each_model = model.estimators_[i]
imp_feats = dict(zip(x_train.columns, each_model.feature_importances_))
importances = pd.Series(imp_feats).sort_values(ascending=False)[:top_num].values
important_features = pd.Series(imp_feats).sort_values(ascending=False)[:top_num].index.tolist()
print('Top 10 features for {}: {}'.format(target_name, important_features))
else:
imp_feats = model.get_score(fmap='', importance_type='gain')
importances = pd.Series(imp_feats).sort_values(ascending=False)[:top_num].values
important_features = pd.Series(imp_feats).sort_values(ascending=False)[:top_num].index.tolist()
print('Top 10 features:\n%s' %important_features[:top_num])
####### order this in the same order in which they were collected ######
feature_importances = pd.DataFrame(importances,
index = important_features,
columns=['importance'])
###### Time to consolidate the predictions on test data ################################
if not multi_label and not isinstance(X_XGB_test, str):
x_test = xgb.DMatrix(x_test)
if isinstance(X_XGB_test, str):
print('No predictions since X_XGB_test is empty string. Returning...')
return {}
if modeltype == 'Regression':
if not isinstance(X_XGB_test, str):
if log_y:
pred_xgbs = np.exp(model.predict(x_test))
else:
pred_xgbs = model.predict(x_test)
#### if there is no test data just return empty strings ###
else:
pred_xgbs = []
else:
if multi_label:
pred_xgbs = model.predict(x_test)
pred_probas = model.predict_proba(x_test)
else:
pred_probas = model.predict(x_test)
if modeltype =='Multi_Classification':
pred_xgbs = pred_probas.argmax(axis=1)
else:
pred_xgbs = (pred_probas>0.5).astype(int)
##### once the entire model is trained on full train data ##################
print(' Time taken for training XGBoost on entire train data (in minutes) = %0.1f' %(
(time.time()-start_time)/60))
if multi_label:
for i,target_name in enumerate(targets):
each_model = model.estimators_[i]
xgb.plot_importance(each_model, importance_type='gain', title='XGBoost model feature importances for %s' %target_name)
else:
xgb.plot_importance(model, importance_type='gain', title='XGBoost final model feature importances')
print('Returning the following:')
print(' Model = %s' %model)
print(' Scaler = %s' %scaler)
if modeltype == 'Regression':
if not isinstance(X_XGB_test, str):
print(' (3) sample predictions:%s' %pred_xgbs[:3])
return (pred_xgbs, scaler, model)
else:
if not isinstance(X_XGB_test, str):
print(' (3) sample predictions (may need to be transformed to original labels):%s' %pred_xgbs[:3])
print(' (3) sample predicted probabilities:%s' %pred_probas[:3])
return (pred_xgbs, scaler, model)
##############################################################################################
import xgboost as xgb
def xgbm_model_fit(random_search_flag, x_train, y_train, x_test, y_test, modeltype,
multi_label, log_y, num_boost_round=100):
start_time = time.time()
if multi_label and not random_search_flag:
model = num_boost_round
else:
rand_params = {
'learning_rate': sp.stats.uniform(scale=1),
'gamma': sp.stats.randint(0, 100),
'n_estimators': sp.stats.randint(100,500),
"max_depth": sp.stats.randint(3, 15),
}
if modeltype == 'Regression':
objective = 'reg:squarederror'
eval_metric = 'rmse'
shuffle = False
stratified = False
num_class = 0
score_name = 'Score'
scale_pos_weight = 1
else:
if modeltype =='Binary_Classification':
objective='binary:logistic'
eval_metric = 'error' ## dont foolishly change to auc or aucpr since it doesnt work in finding feature imps later
shuffle = True
stratified = True
num_class = 1
score_name = 'Error Rate'
scale_pos_weight = get_scale_pos_weight(y_train)
else:
objective = 'multi:softprob'
eval_metric = 'merror' ## dont foolishly change to auc or aucpr since it doesnt work in finding feature imps later
shuffle = True
stratified = True
if multi_label:
num_class = y_train.nunique().max()
else:
if isinstance(y_train, np.ndarray):
num_class = np.unique(y_train).max() + 1
elif isinstance(y_train, pd.Series):
num_class = y_train.nunique()
else:
num_class = y_train.nunique().max()
score_name = 'Multiclass Error Rate'
scale_pos_weight = 1 ### use sample_weights in multi-class settings ##
######################################################
final_params = {
'booster' :'gbtree',
'colsample_bytree': 0.5,
'alpha': 0.015,
'gamma': 4,
'learning_rate': 0.01,
'max_depth': 8,
'min_child_weight': 2,
'reg_lambda': 0.5,
'subsample': 0.7,
'random_state': 99,
'objective': objective,
'eval_metric': eval_metric,
'verbosity': 0,
'n_jobs': -1,
'scale_pos_weight':scale_pos_weight,
'num_class': num_class,
'silent': True
}
####### This is where we split into single and multi label ############
if multi_label:
###### This is for Multi_Label problems ############
rand_params = {'estimator__learning_rate':[0.1, 0.5, 0.01, 0.05],
'estimator__n_estimators':[50, 100, 150, 200, 250],
'estimator__gamma':[2, 4, 8, 16, 32],
'estimator__max_depth':[3, 5, 8, 12],
}
if random_search_flag:
if modeltype == 'Regression':
clf = XGBRegressor(n_jobs=-1, random_state=999, max_depth=6)
clf.set_params(**final_params)
model = MultiOutputRegressor(clf, n_jobs=-1)
else:
clf = XGBClassifier(n_jobs=-1, random_state=999, max_depth=6)
clf.set_params(**final_params)
model = MultiOutputClassifier(clf, n_jobs=-1)
if modeltype == 'Regression':
scoring = 'neg_mean_squared_error'
else:
scoring = 'precision'
model = RandomizedSearchCV(model,
param_distributions = rand_params,
n_iter = 15,
return_train_score = True,
random_state = 99,
n_jobs=-1,
cv = 3,
refit=True,
scoring = scoring,
verbose = False)
model.fit(x_train, y_train)
print('Time taken for Hyper Param tuning of multi_label XGBoost (in minutes) = %0.1f' %(
(time.time()-start_time)/60))
cv_results = | pd.DataFrame(model.cv_results_) | pandas.DataFrame |
import pandas as pd
sec_file = 'uniprot_sec_ac.txt'
lines = open(sec_file, 'rt').readlines()
for i, l in enumerate(lines):
if l.startswith('Secondary AC'):
entry_lines = lines[i+2:]
sec_id = []
prim_id = []
for l in entry_lines:
s, p = l.split()
sec_id.append(s)
prim_id.append(p)
d = {'Secondary_ID': sec_id, 'Primary_ID': prim_id}
df = | pd.DataFrame(data=d) | pandas.DataFrame |
import json
import numpy as np
import pandas as pd
import os
def scan(file_path):
for file in os.listdir(file_path):
file_real = file_path + "/" + file
if os.path.isdir(file_real):
scan(file_real)
else:
if file_real.endswith("json"):
file_handle(file_real)
def file_handle(file):
file_path, file_name = os.path.split(file)
data_path = file_path + "/excel数据/"
if not os.path.exists(data_path):
os.mkdir(data_path)
with open(file, 'rb') as f:
data = f.readlines()[0]
res_data = json.loads(data.decode('utf-8').strip('\ufeff')).get('data')
pd_spr = | pd.DataFrame(res_data) | pandas.DataFrame |
import pandas as pd
import os
import re
import numpy as np
import argparse
def get_args_from_command_line():
"""Parse the command line arguments."""
parser = argparse.ArgumentParser()
parser.add_argument("--country_code", type=str,
default="US")
parser.add_argument("--method", type=str)
parser.add_argument("--threshold", type=float,
default=0.95)
parser.add_argument("--topk", type=int)
args = parser.parse_args()
return args
if __name__ == '__main__':
args = get_args_from_command_line()
rank_threshold_dict = {
'is_hired_1mo': {
0: 95249,
1: 31756,
2: 4819,
3: 23401,
4: 3834},
'is_unemployed': {
0: 2177012,
1: 4260,
2: 5719,
3: 108568,
4: 26946},
'job_offer': {
0: 3101900,
1: 1235111,
2: 562596,
3: 523967,
4: 1258549},
'job_search': {
0: 1501613,
1: 136421,
2: 205400,
3: 36363, 4: 456326},
'lost_job_1mo': {
0: 89397,
1: 1,
2: 130115,
3: 11613,
4: 0}}
list_folder_dict = {
'US': ['iter_0-convbert-969622-evaluation', 'iter_1-convbert-3050798-evaluation',
'iter_2-convbert-3134867-evaluation', 'iter_3-convbert-3174249-evaluation',
'iter_4-convbert-3297962-evaluation']}
country_code = args.country_code
data_path = f'/home/manuto/Documents/world_bank/bert_twitter_labor/twitter-labor-data/data/active_learning/evaluation_inference/{country_code}'
results_dict = dict()
for inference_folder in list_folder_dict[country_code]:
for label in ['is_hired_1mo', 'lost_job_1mo', 'job_search', 'is_unemployed', 'job_offer']:
if label not in results_dict.keys():
results_dict[label] = dict()
csv_path = os.path.join(data_path, inference_folder, f'{label}.csv')
iter_number = int(re.findall('iter_(\d)', inference_folder)[0])
df = pd.read_csv(csv_path)
results_dict[label][f'iter_{iter_number}'] = dict()
# compute and save metrics
precision_top20 = df[:20]['class'].value_counts(dropna=False, normalize=True)[1]
if args.method == 'topk':
df_top_T = df.loc[df['rank'] < args.topk]
elif args.method == 'threshold':
df_top_T = df.loc[df['rank'] < rank_threshold_dict[label][iter_number]]
if df_top_T.shape[0] < 2:
precision_top_T = np.nan
else:
precision_top_T = df_top_T['class'].value_counts(dropna=False, normalize=True)[1]
results_dict[label][f'iter_{iter_number}']['precision_top20'] = precision_top20
results_dict[label][f'iter_{iter_number}']['precision_top_T'] = precision_top_T
# print(pd.DataFrame.from_dict(results_dict).T)
# organize results
results_df = | pd.DataFrame.from_dict(results_dict) | pandas.DataFrame.from_dict |
import sys
sys.path.append('gen')
from collections import defaultdict
from pathlib import Path
import argparse
import datetime
import locale
import logging
from dash import Dash, dcc, html
from sqlitedict import SqliteDict
import grpc
import pandas as pd
from gen import users_pb2
from models import constants as cnst
from models import currency, instruments, operations, positions
from models import positions as pstns
from models import prices, stats
from models.base_classes import ApiContext, Currency
from models.operations import Operation
from views.plots import Plot
from views.tables import Table
DB_NAME = 'my_db.sqlite'
TOKEN = Path('.token').read_text()
locale.setlocale(locale.LC_ALL, ('RU', 'UTF8'))
pd.options.display.float_format = '{:,.2f}'.format
| pd.set_option('display.max_rows', None) | pandas.set_option |
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Thu Jul 12 16:44:53 2018.
@author: dmitriy
"""
import datetime as dt
import os
import time as t
from datetime import datetime
from typing import Any, List, Tuple, Iterable
import pandas as pd
import psycopg2
import requests
# import all the necessary libraries
from bs4 import BeautifulSoup
from dotenv import find_dotenv, load_dotenv
from selenium import webdriver
from selenium.webdriver.common.desired_capabilities import DesiredCapabilities
from selenium.webdriver.common.keys import Keys
from selenium.webdriver.firefox.options import Options as FirefoxOptions
load_dotenv()
def catch_error(something: str) -> str:
"""Try catch an exception.
__Attributes__
something: Where we will search for Exception.
__Returns__
something if not error or "" if error is.
"""
try:
something
except IndexError:
something = ""
print("An Index Error!")
return something
else:
# print(something)
return something
def create_conn() -> Tuple[Any, Any]:
"""Create connection to PostgreSQL DB."""
DATABASE_URL = os.environ["DATABASE_URL"]
conn = psycopg2.connect(DATABASE_URL)
cursor = conn.cursor()
return conn, cursor
def create_table(conn, cursor) -> None:
"""Create a table if not exists in DB."""
sql_query = """CREATE TABLE IF NOT EXISTS messages
(
id serial NOT NULL,
title character varying(255),
date4 date,
time3 character varying(50),
author character varying(50),
counts int,
sometext character varying(512),
category character varying(50),
date2 character varying(255),
CONSTRAINT persons_pkey PRIMARY KEY (id)
)"""
cursor.execute(sql_query)
def fill_table(conn, cursor, csv_file_name) -> None:
"""Fill empty table with data from csv."""
sql = """COPY messages(title, date4, time3, author, counts,
sometext, category, date2) FROM STDIN DELIMITER ',' CSV HEADER;"""
cursor.copy_expert(sql, open(csv_file_name, "r"))
def check_table(conn, cursor):
"""See data in our table as df."""
df = pd.read_sql("SELECT * FROM messages", conn)
print(df.head(5))
print(len(df))
return df
def delete_duplicates(conn, cursor) -> None:
"""Delete duplicates in table."""
# Delete duplicates
sql_query = """DELETE FROM messages
WHERE id IN
(SELECT id
FROM
(SELECT id,
ROW_NUMBER() OVER( PARTITION BY title
ORDER BY id ) AS row_num
FROM messages ) t
WHERE t.row_num > 1 );"""
cursor.execute(sql_query)
def delete_counts_null(conn, cursor) -> None:
"""Delete duplicates in table."""
# Delete duplicates
sql_query = """DELETE FROM messages
WHERE counts IS NULL;"""
cursor.execute(sql_query)
def sort_by_dates(conn, cursor):
"""Sort data by date4 column."""
# to_char(date2, 'HH12:MI PM DD/MM/YYYY') newsdate,
sql_query = """SELECT * FROM messages
GROUP BY messages.id
ORDER BY messages.date4 DESC;"""
df = pd.read_sql(sql_query, conn)
print(df.head(5))
print(len(df))
return df
def drop_table(conn, cursor) -> None:
"""Drop a table if exists."""
sql_query = "DROP TABLE IF EXISTS messages;"
cursor.execute(sql_query)
def save_changes(conn, cursor) -> None:
"""Commit changes to DB."""
# Make the changes to the database persistent
conn.commit()
def close_conn(conn, cursor) -> None:
"""Close connection to DB."""
conn.close()
cursor.close()
def get_count(adres: str) -> List[int]:
# options = FirefoxOptions()
# options.add_argument("--headless")
# caps = DesiredCapabilities.FIREFOX.copy()
# caps['marionette'] = False
from selenium.webdriver.firefox.options import Options
options = Options()
options.headless = True
from selenium.webdriver.common.desired_capabilities import DesiredCapabilities
cap = DesiredCapabilities().FIREFOX
cap["marionette"] = True
options.binary = os.environ['FIREFOX_BIN']
driver = webdriver.Firefox(
capabilities=cap,
options=options,
executable_path=os.environ["GECKODRIVER_PATH"]
)
# driver = webdriver.Firefox(options=options)
driver.get(adres)
elems = driver.find_elements_by_xpath("//span[contains(@class, 'comments part')]")
# Without those two lines wouldn't recognize disqus count correctly
[i.get_attribute('a') for i in elems]
[i.text for i in elems]
list_counts = [int(i.text) if i.text is not '' else 0 for i in elems]
# list_counts = [i for i in list_counts if i != 0]
# list_counts = list_counts[5:29]
# Filter out other counts from bottom and right corner of page
list_counts = list_counts[:-31]
driver.close()
return list_counts
def prep_dashboard(conn, cursor):
"""Get dates from DB; needed for DatePicker in Dashboard."""
df = sort_by_dates(conn, cursor)
df["date4"] = pd.to_datetime(df["date2"], format="%I:%M %p %d/%m/%Y")
last_date_string = df["date4"].dt.strftime("%d %B, %Y").tolist()[0]
first_date_string = df["date4"].dt.strftime("%d %B, %Y").tolist()[-1]
month_allowed = df["date4"].dt.strftime("%m-%Y").tolist()[0]
df["date4"] = df["date4"].dt.strftime("%Y-%m-%d")
first_date, last_date = df["date4"].min(), df["date4"].max()
df = check_table(conn, cursor)
df["date4"] = pd.to_datetime(df["date2"], format="%I:%M %p %d/%m/%Y")
df["date4"] = df["date4"].apply(lambda x: dt.datetime.strftime(x, "%Y-%m-%d %H:%M"))
df = df.sort_values("date4", ascending=False)
df = df.reset_index(drop=True)
return (
last_date_string,
first_date_string,
month_allowed,
first_date,
last_date,
df,
)
def get_dates(df) -> Tuple[str, str]:
"""Get last date and date today."""
# print(df.columns)
# print(df.dtypes)
today_date = datetime.now().strftime("%d-%m-%Y")
print(today_date)
df["date3"] = pd.to_datetime(df["date2"], format="%I:%M %p %d/%m/%Y")
# df["date3"] = pd.to_datetime(df["date2"], format="%I:%M %p %m/%d/%Y")
df["date3"] = df["date3"].dt.strftime("%d-%m-%Y")
last_date = df["date3"].tolist()[0]
# last_date = df["date4"].max()
print(last_date)
del df["date3"]
return last_date, today_date
def prep_list_dates(last_date: str, today_date: str) -> List[str]:
"""Prep list with dates."""
list2 = [last_date, today_date]
return list2
# list2 = ['18-03-2012', '21-03-2012'] # d-m-y
# list1 = [i for i in range(1, args["count"])]
def get_search_query(list2: List[str], t: int) -> str:
"""Create a query, that we will parse.
__Attributes__
list2: List, which contain start and end date.
t: if 1 change search query (look below)
__Returns__
search_query: List, which contain search query.
"""
if t > 1:
search_query = (
"https://itc.ua/page/"
+ str(t)
+ "/?s&after="
+ str(list2[0])
+ "&before="
+ str(list2[1])
)
else:
search_query = (
"https://itc.ua/?s&after=" + str(list2[0]) + "&before=" + str(list2[1])
)
return search_query
def prep_numbers_2(count: int, list2: List[str]) -> List[str]:
"""Prep list to parse.
__Attributes__
list2: list with dates.
__Returns__
listadres: list with data to parse.
"""
# https://itc.ua/?s&after=10-01-2019&before=12-01-2019
# https://itc.ua/page/3/?s&after=10-01-2019&before=12-01-2019
# Create list which contains how many pages you want to scraping
numbers = list(range(0, count))
# Create an empty list for all of the page adresses
listadres = []
# Use for loop to fill list above with page adresses
for i in numbers:
if i == 1:
example = (
"https://itc.ua/?s&after=" + str(list2[0]) + "&before=" + str(list2[1])
)
else:
example = (
"https://itc.ua/page/"
+ str(i)
+ "/?s&after="
+ str(list2[0])
+ "&before="
+ str(list2[1])
)
listadres.append(example)
# Check output
print(listadres)
return listadres
def onepage(adres: str):
"""Take one query and get DataFrame.
__Attributes__
adres: one query.
__Returns__
df: DataFrame with parsed values.
"""
# Use headers to prevent hide our script
headers = {"User-Agent": "Mozilla/5.0"}
# Get page
page = requests.get(adres, headers=headers) # read_timeout=5
# Get all of the html code
soup = BeautifulSoup(page.content, "html.parser")
header = soup.find("header", class_="entry-header")
# header_text = header.get_text()
if header is not None:
print("That's all!")
return None
# Find title the topic
title = soup.find_all("h2", class_="entry-title")
# Find time when topic is published
time = soup.find_all("time", class_="published")
# Find time when topic is updated
timeup = soup.find_all("time", class_="screen-reader-text updated")
# Find author the topic
author = soup.find_all("a", class_="screen-reader-text fn")
# Find how many comments have topic
# counts = soup.find_all("span", class_="comments part")
listcounts = get_count(adres)
# counts = soup.find_all("a", class_="disqus-comment-count")
# Find preface for topic
sometext = soup.find_all("div", class_="entry-excerpt hidden-xs")
# Category
category = soup.find_all("span", class_="cat part text-uppercase")
# Create an empty lists
listtitle = []
listtime = []
listtimeup = []
listauthor = []
# listcounts = []
listsometext = []
listcategory = []
limit = min(
[
len(list(title)),
len(list(time)),
len(list(timeup)),
len(list(timeup)),
len(list(author)),
len(listcounts),
len(list(sometext)),
len(list(category)),
]
)
# Fill the lists above our scraping date
for i in range(0, limit):
k = title[i].get_text().replace("\n", "").replace("\t", "")
listtitle.append(k)
# listtitle = " ".join(title[i].get_text().split())
# listtitle = " ".join(title[i].get_text().split())
ll = time[i].get_text().replace("\n", "").replace("\t", "")
listtime.append(ll)
# listtime = " ".join(time[i].get_text().split())
m = timeup[i].get_text().replace("\n", "").replace("\t", "")
listtimeup.append(m)
# listtimeup = " ".join(timeup[i].get_text().split())
try:
n = author[i].get_text().replace("\n", "").replace("\t", "")
except IndexError:
n = ""
listauthor.append(n)
# n = catch_error(author[i].get_text())
# listauthor.append(n)
"""
o = counts[i].get_text().replace("\n", "").replace("\t", "")
listcounts.append(o)
"""
# listcounts = " ".join(counts[i].get_text().split())
try:
p = sometext[i].get_text().replace("\n", "").replace("\t", "")
except IndexError:
p = ""
listsometext.append(p)
c = category[i].get_text()
listcategory.append(c)
# listcategory = " ".join(category[i].get_text().split())
# Create DataFrame, that will contains info from lists
df = pd.DataFrame(
{
"title": listtitle,
"date4": listtime,
"time3": listtimeup,
"author": listauthor,
"counts": listcounts,
"sometext": listsometext,
"category": listcategory,
}
)
# Function will return that DataFrame
return df
def calc2(listadres: List[str]):
"""Take list and return df with parsed data.
__Attributes__
listadres: list with prepared adresses.
__Returns__
df: DataFrame with parsed values.
"""
# Create an empty DataFrame
df = pd.DataFrame()
# Adding each new page in one DataFrame
for c, v in enumerate(listadres):
if onepage(v) is None:
break
else:
t.sleep(1.5)
df = pd.concat([df, onepage(v)], ignore_index=True)
print("Parsed {} pages".format(c + 1))
listadres.append(get_search_query(list2, c + 1))
t.sleep(1.5)
return df
def get_one_csv(df):
"""Prepare DataFrame.
__Attributes__
df: DataFrame with parsed data.
__Returns__
df: DataFrame sorted by date abd without duplicates.
"""
# 3:19 PM 13/12/2018
# Change datetime format to what we want (example above)
df["date4"] = df["date4"].str.strip().str.replace(" в ", "/")
df["date2"] = pd.to_datetime(df["date4"], format="%d.%m.%Y/%H:%M")
df["date2"] = df["date2"].apply(
lambda x: dt.datetime.strftime(x, "%I:%M %p %d/%m/%Y")
)
df.drop_duplicates(subset="title", inplace=True)
df["date4"] = pd.to_datetime(df["date2"], format="%I:%M %p %d/%m/%Y")
df["date4"] = df["date4"].apply(lambda x: dt.datetime.strftime(x, "%Y-%m-%d %H:%M"))
df = df.sort_values("date4", ascending=False)
df = df.reset_index(drop=True)
return df
def remove_csv(names: List[str]) -> None:
"""Remove all csv."""
for i in names:
if os.path.exists(i):
os.remove(i)
print("Remove complete")
if __name__ == "__main__":
print("Begin")
start0 = t.time()
names = ["itctray41.csv", "itctray42.csv", "itctray43.csv"]
print("Connnect to DB")
conn, cursor = create_conn()
print("Create table if not exists in DB")
create_table(conn, cursor)
"""
conn, cursor = create_conn()
print("Create table if not exists in DB")
create_table(conn, cursor)
# print("Fill DB with data from csv")
fill_table(conn, cursor, "itctray.csv")
"""
print("Get old df from DB")
df1 = sort_by_dates(conn, cursor)
# df1 = df1.select_dtypes(include=['object']).applymap(lambda x: x.strip() if x else x)
df1.to_csv(names[0], index=False)
print("Get datetime now and last date from old df")
last_date, today_date = get_dates(df1)
# 27.05 15.03
print(last_date)
print(today_date)
# drop_table(conn, cursor)
# save_changes(conn, cursor)
# print("Close connection to DB")
# close_conn(conn, cursor)
"""
today_date = "27-05-2019"
last_date = "25-05-2019"
"""
print("Parse data")
list2 = prep_list_dates(last_date, today_date)
print("1")
listadres = prep_numbers_2(1, list2)
df2 = calc2(listadres)
# print(df.date2.tolist()[:10])
df2 = get_one_csv(df2)
# df = df.select_dtypes(include=['object']).applymap(lambda x: x.strip() if x else x)
old_columns = df2.columns.tolist()
df2['id'] = [i for i in range(len(df1), len(df1) + len(df2))]
new_columns = ['id'] + old_columns
df2 = df2[new_columns]
df2['date4'] = df2['date4'].apply(lambda x: x.split(" ")[0])
df2.to_csv(names[1], index=False)
df3 = | pd.concat([df2, df1]) | pandas.concat |
import pytest
import numpy as np
import pandas as pd
from ..linkages import sortLinkages
from ..linkages import calcDeltas
### Create test data set
linkage_ids = ["a", "b", "c"]
linkage_lengths = [4, 5, 6]
linkage_members_ids = []
for i, lid in enumerate(linkage_ids):
linkage_members_ids += [lid for j in range(linkage_lengths[i])]
obs_ids = [f"o{i:04d}" for i in range(len(linkage_members_ids))]
times = []
for i in range(len(linkage_ids)):
times += [np.arange(59000, 59000 + linkage_lengths[i])]
times = np.concatenate(times)
LINKAGES = pd.DataFrame({
"linkage_id" : linkage_ids
})
LINKAGE_MEMBERS = pd.DataFrame({
"linkage_id" : linkage_members_ids,
"obs_id" : obs_ids,
"mjd_utc" : times,
})
OBSERVATIONS = pd.DataFrame({
"obs_id" : obs_ids,
"mjd_utc" : times,
})
OBSERVATIONS.sort_values(
by=["mjd_utc", "obs_id"],
inplace=True,
ignore_index=True
)
def test_sortLinkages_timePresent():
# Scramble the linkages dataframe
len_linkages = len(LINKAGES)
scramble = np.random.choice(len_linkages, len_linkages, replace=False)
linkages_unsorted = LINKAGES.loc[scramble].reset_index(
drop=True
)
# Scramble the linkage_members dataframe
len_members = len(LINKAGE_MEMBERS)
scramble = np.random.choice(len_members, len_members, replace=False)
linkage_members_unsorted = LINKAGE_MEMBERS.loc[scramble].reset_index(
drop=True
)
# Sort scrambled linkages
linkages_sorted, linkage_members_sorted = sortLinkages(
linkages_unsorted,
linkage_members_unsorted,
OBSERVATIONS,
linkage_id_col="linkage_id"
)
# Make sure they returned dataframes match those created
pd.testing.assert_frame_equal(LINKAGES, linkages_sorted)
pd.testing.assert_frame_equal(LINKAGE_MEMBERS, linkage_members_sorted)
def test_sortLinkages_timeMissing():
# Scramble the linkages dataframe
len_linkages = len(LINKAGES)
scramble = np.random.choice(len_linkages, len_linkages, replace=False)
linkages_unsorted = LINKAGES.loc[scramble].reset_index(
drop=True
)
# Scramble the linkage_members dataframe
len_members = len(LINKAGE_MEMBERS)
scramble = np.random.choice(len_members, len_members, replace=False)
linkage_members_unsorted = LINKAGE_MEMBERS.loc[scramble].reset_index(
drop=True
)
linkage_members_unsorted.drop(
columns=["mjd_utc"],
inplace=True
)
# Sort scrambled linkages
linkages_sorted, linkage_members_sorted = sortLinkages(
linkages_unsorted,
linkage_members_unsorted,
OBSERVATIONS,
linkage_id_col="linkage_id"
)
# Make sure they returned dataframes match those created
pd.testing.assert_frame_equal(LINKAGES, linkages_sorted)
| pd.testing.assert_frame_equal(LINKAGE_MEMBERS[["linkage_id", "obs_id"]], linkage_members_sorted) | pandas.testing.assert_frame_equal |
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
import os
import time
import sys
import json
import numpy as np
import pandas as pd
from pathlib import Path
import subprocess as subp
import traceback
from sklearn.model_selection import train_test_split, TimeSeriesSplit
import sklearn.metrics as skmet
from autogluon.tabular import TabularPredictor
from sklearn.metrics import accuracy_score
this_dir = Path(os.getcwd()).resolve()
bert_sort_root = this_dir.parent.parent
DATA_DIR = bert_sort_root / 'benchmarks'
test_info_path = bert_sort_root / 'automl' / 'task_spec.json'
output_dir = Path('output')
if not output_dir.exists():
output_dir.mkdir()
with open(test_info_path) as f:
test_info = json.load(f)
def drop_columns(dataset, project_key, ignore):
'''drop columns for ignored fields such as IDs'''
if ignore:
for c in ignore:
if c in dataset.columns:
print('dropping column %s' % (c))
dataset = dataset.drop(c, axis=1)
return dataset
def split(X, y, project_key, random_seed):
'''split data sets with 75%/25% ratios for training and testing, respectively'''
X_training, X_test, y_training, y_test = train_test_split(X, y, test_size=0.25, random_state=int(random_seed))
return X_training, X_test, y_training, y_test
train_time=5 #set maximum allowance time for each experiment (per data set per Method) in minutes
datasets = ['audiology',
'bank',
'car_eval',
'labor-relations',
'Nursery',
'Post-Operative',
'Pittsburgh_Bridges',
'cat-in-the-dat-ii'
#'Coil_1999',
#'uci-automobile',
]
file_extensions = [
'_Raw.csv',
'_bs__roberta.csv',
'_EncodedBERT.csv' ,
'_OrdinalEncoder.csv',
'_GroundTruth.csv'
]
seeds=['108',
'180',
'234',
'309',
# '533' used for 5 seeds
]
max_seeds=None
for seed in seeds[:max_seeds]:
for dataname in datasets:
for tail in file_extensions:
try:
print(f'Processing:{dataname}, Seed:{seed}, Encoded Method:{tail}')
# construct the path of benchmark
subfolder_name = dataname + '_' + str(seed) + '_m' + str(train_time) + tail
result_output_path = 'output/' + subfolder_name
Path(result_output_path).mkdir(exist_ok=True)
ignore = None
target_column_name = test_info[dataname]['target_feature']
#print('target_columns:', target_column_name)
target_dataset = test_info[dataname]['target_dataset']
task_type = test_info[dataname]['task']
if 'ignore' in test_info[dataname]:
ignore_column_name = test_info[dataname]['ignore']
if isinstance(ignore_column_name, list):
ignore_column_name = ','.join(ignore_column_name)
else:
ignore_column_name = None
filepath = DATA_DIR / (('%s' % (target_dataset)) + tail)
if ignore_column_name:
ignore = ignore_column_name.split(',')
if dataname == 'bank' and tail == '':
main_df = | pd.read_csv(filepath, sep=';', header='infer') | pandas.read_csv |
import matplotlib
matplotlib.use('Agg')
import pdb
import sys
import Pipelines as pl
import pandas as pd
from datetime import datetime
import numpy as np
import time
# saving the models for the iteration tests:
# to save the models for the iteration tests, we will save a dataframe (in the form of the final dataframe from Analyzer...) instead of a full model, because it is too computationally expensive, and as of this day, we are running out of room on QUEST.
def str2bool(v):
return v.lower() in ("yes", "true", "t", "1")
def main(data_folder, output_path, target_dataset, my_iterating_param, param_test_style, param_tests, n_trials):
current_time = datetime.now().strftime('%Y-%m-%d_%H:%M:%S')
default_params = {'data_folder':data_folder, 'file_path':target_dataset, 'td_window':4,'min_lag':0,'max_lag':2,'n_trees':500,'permutation_n':5, 'lag_method':'mean_mean', 'calc_mse':False, 'bootstrap_n':50,'n_trials':n_trials, 'run_time':current_time, 'sort_by': 'rank','iterating_param':my_iterating_param, 'filter_noisy':True, 'alpha': None}
overall_df = | pd.DataFrame() | pandas.DataFrame |
from configs import Level, LEVEL_MAP
from db.DBConnector import close_connection
from refactoring_statistics.plot_utils import box_plot_seaborn
from refactoring_statistics.query_utils import get_metrics_refactoring_level, get_metrics_refactorings, retrieve_columns
from utils.log import log_init, log_close, log
import time
import datetime
import pandas as pd
from pathlib import Path
from os import path
REFACTORING_SAMPLES = 50000
REFACTORING_LEVELS = [Level.Class, Level.Method, Level.Variable, Level.Field, Level.Other]
IMG_FORMAT = "svg"
DATASET = ""
# metrics
CLASS_METRICS_Fields = ["classCbo",
# "classLcom",
"classLCC",
"classTCC",
"classRfc",
"classWmc"]
CLASS_METRICS_REDUCED_Fields = ["classCbo",
"classTCC",
"classWmc"]
CLASS_LARGE_Fields = ["classLcom", "classLoc"]
CLASS_ATTRIBUTES_QTY_Fields = ["classUniqueWordsQty", "classNumberOfMethods", "classStringLiteralsQty", "classNumberOfPublicFields", "classVariablesQty"]
# plot all refactoring for each level
def metrics_refactorings_individual_levels(dataset, save_dir, yticks, metrics, title, file_descriptor):
# refactoring metrics per level
for level in REFACTORING_LEVELS:
fig_path_box = f"{save_dir}{file_descriptor}_{str(level)}_log_box_plot_{dataset}.{IMG_FORMAT}"
if not path.exists(fig_path_box):
refactoring_metrics_level = get_metrics_refactorings(level, dataset, LEVEL_MAP[level], metrics, REFACTORING_SAMPLES)
refactoring_metrics_level = pd.melt(refactoring_metrics_level, id_vars="refactoring", var_name="Metric", value_vars=metrics, value_name="values")
refactoring_metrics_level['refactoring'] = refactoring_metrics_level['refactoring'].astype('category')
refactoring_metrics_level['Metric'] = refactoring_metrics_level['Metric'].astype('category')
box_plot_seaborn(refactoring_metrics_level, f"{title} {str(level)}", fig_path_box, scale="log", yticks=yticks, hue="refactoring")
else:
log(f"--Skipped box plot at {fig_path_box}, because it already exists.")
# plot the metrics for each refactoring level
def metrics_refactoring_levels(dataset, save_dir, yticks, metrics, title, file_descriptor):
fig_path_box = f"{save_dir}{file_descriptor}_log_box_plot_{dataset}.{IMG_FORMAT}"
if not path.exists(fig_path_box):
combined_refactoring_metrics = | pd.DataFrame() | pandas.DataFrame |
"""
Coding: UTF-8
Author: Randal
Time: 2021/2/20
E-mail: <EMAIL>
Description: This is a simple toolkit for data extraction of text.
The most important function in the script is about word frequency statistics.
Using re, I generalized the process in words counting, regardless of any preset
word segmentation. Besides, many interesting functions, like getting top sentences are built here.
All rights reserved.
"""
import xlwings as xw
import pandas as pd
import numpy as np
import os
import re
from alive_progress import alive_bar
from alive_progress import show_bars, show_spinners
import jieba
import datetime
from sklearn.feature_extraction.text import CountVectorizer, TfidfVectorizer
import math
class jieba_vectorizer(CountVectorizer):
def __init__(self, tf, userdict, stopwords, orient=False):
"""
:param tf: 输入的样本框,{axis: 1, 0: id, 1: 标题, 2: 正文, 3: 来源, 4: freq}
:param stopwords: 停用词表的路径
:param user_dict_link: 关键词清单的路径
:param orient: {True: 返回的 DTM 只包括关键词清单中的词,False: 返回 DTM 中包含全部词语}
:return: 可以直接使用的词向量样本
"""
self.userdict = userdict
self.orient = orient
self.stopwords = stopwords
jieba.load_userdict(self.userdict) # 载入关键词词典
tf = tf.copy() # 防止对函数之外的原样本框造成改动
print('切词中,请稍候……')
rule = re.compile(u'[^\u4e00-\u9fa5]') # 清洗所有样本,只保留汉字
for i in range(0, tf.shape[0]):
try:
tf.iloc[i, 2] = rule.sub('', tf.iloc[i, 2])
except TypeError:
print('样本清洗Error: doc_id = ' + str(i))
continue
if self.stopwords is not None:
stopwords = txt_to_list(self.stopwords) # 载入停用词表
else:
stopwords = []
# 开始切词
words = []
items = range(0, len(tf))
with alive_bar(len(items), force_tty=True, bar='circles') as bar:
for i, row in tf.iterrows():
item = row['正文']
result = jieba.cut(item)
# 同时过滤停用词
word = ''
for element in result:
if element not in stopwords:
if element != '\t':
word += element
word += " "
words.append(word)
bar()
# CountVectorizer() 可以自动完成词频统计,通过fit_transform生成文本向量和词袋库
# 如果需要换成 tfidfVectorizer, 把下面三行修改一下就可以了
vect = CountVectorizer()
X = vect.fit_transform(words)
self.vectorizer = vect
matrix = X
X = X.toarray()
# 二维ndarray可以展示在pycharm里,但是和DataFrame性质完全不同
# ndarray 没有 index 和 column
features = vect.get_feature_names()
XX = pd.DataFrame(X, index=tf['id'], columns=features)
self.DTM0 = matrix
self.DTM = XX
self.features = features
# # 下面是之前走的弯路,不足一哂
# words_bag = vect.vocabulary_
# # 字典的转置(注意只适用于vk一一对应的情况,1v多k请参考setdefault)
# bag_words = dict((v, k) for k, v in words_bag.items())
#
# # 字典元素的排列顺序不等于字典元素值的排列顺序
# lst = []
# for i in range(0, len(XX.columns)):
# lst.append(bag_words[i])
# XX.columns = lst
if orient:
dict_filter = txt_to_list(self.userdict)
for word in features:
if word not in dict_filter:
XX.drop([word], axis=1, inplace=True)
self.DTM_key = XX
def get_feature_names(self):
return self.features
def strip_non_keywords(self, df):
ff = df.copy()
dict_filter = txt_to_list(self.userdict)
for word in self.features:
if word not in dict_filter:
ff.drop([word], axis=1, inplace=True)
return ff
def make_doc_freq(word, doc):
"""
:param word: 指的是要对其进行词频统计的关键词
:param doc: 指的是要遍历的文本
:return: lst: 返回字典,记录关键词在文本当中出现的频次以及上下文
"""
# 使用正则表达式进行匹配, 拼接成pattern
# re.S表示会自动换行
# finditer是findall的迭代器版本,通过遍历可以依次打印出子串所在的位置
it = re.finditer(word, doc, re.S)
# match.group()可以返回子串,match.span()可以返回索引
lst = []
for match in it:
lst.append(match.span())
freq = dict()
freq['Frequency'] = len(lst)
# 将上下文结果也整理为一个字典
context = dict()
for i in range(0, len(lst)):
# 将span的范围前后各扩展不多于10个字符,得到上下文
try:
# 为了划出适宜的前后文范围,需要设定索引的最大值和最小值
# 因此要比较span+10和doc极大值,span-10和doc极小值
# 最大值在两者间取小,最小值在两者间取大
MAX = min(lst[i][1] + 10, len(doc))
MIN = max(0, lst[i][0] - 10)
# 取得上下文
context[str(i)] = doc[MIN: MAX]
except IndexError:
print('IndexError: ' + word)
freq['Context'] = context
return freq
def make_info_freq(name, pattern, doc):
"""
:param name: 指的是对其进行词频统计的形式
:param pattern: 指的是对其进行词频统计的正则表达式
:param doc: 指的是要遍历的文本
:return: lst: 返回字典,记录关键词在文本当中出现的频次以及上下文
注:该函数返回字典中的context元素为元组:(关键词,上下文)
"""
# 使用正则表达式进行匹配, 拼接成pattern
# re.S表示会自动换行
# finditer是findall的迭代器版本,通过遍历可以依次打印出子串所在的位置
it = re.finditer(pattern[0], doc, re.S)
# match.group()可以返回子串,match.span()可以返回索引
cls = pattern[1]
lst = []
for match in it:
lst.append(match.span())
freq = dict()
freq['Frequency'] = len(lst)
freq['Name'] = name
# 将上下文结果也整理为一个字典
context = dict()
for i in range(0, len(lst)):
# 将span的范围前后各扩展不多于10个字符,得到上下文
try:
# 为了划出适宜的前后文范围,需要设定索引的最大值和最小值
# 因此要比较span+10和doc极大值,span-10和doc极小值
# 最大值在两者间取小,最小值在两者间取大
MAX = min(lst[i][1] + 10, len(doc))
MIN = max(0, lst[i][0] - 10)
# 取得匹配到的关键词,并做掐头去尾处理
word = match_cut(doc[lst[i][0]: lst[i][1]], cls)
# 将关键词和上下文打包,存储到 context 条目中
context[str(i)] = (word, doc[MIN: MAX])
except IndexError:
print('IndexError: ' + name)
freq['Context'] = context
return freq
def make_docs_freq(word, docs):
"""
:param word: 指的是要对其进行词频统计的关键词
:param docs: 是要遍历的文本的集合,必须是pandas DataFrame的形式,至少包含id列 (iloc: 0),正文列 (iloc: 2) 和预留出的频次列 (iloc: 4)
:return: 返回字典,其中包括“单关键词-单文本”的词频字典集合,以及计数结果汇总
"""
freq = dict()
# 因为总频数是通过"+="的方式计算,不是简单赋值,所以要预设为0
freq['Total Frequency'] = 0
docs = docs.copy() # 防止对函数之外的原样本框造成改动
for i in range(0, len(docs)):
# 对于每个文档,都形成一个字典,字典包括关键词在该文档出现的频数和上下文
# id需要在第0列,正文需要在第2列
freq['Doc' + str(docs.iloc[i, 0])] = make_doc_freq(word, docs.iloc[i, 2])
# 在给每个文档形成字典的同时,对于总概率进行滚动加总
freq['Total Frequency'] += freq['Doc' + str(docs.iloc[i, 0])]['Frequency']
docs.iloc[i, 4] = freq['Doc' + str(docs.iloc[i, 0])]['Frequency']
# 接下来建立一个DFC(doc-freq-context)统计面板,汇总所有文档对应的词频数和上下文
# 首先构建(id, freq)的字典映射
xs = docs['id']
ys = docs['freq']
# zip(迭代器)是一个很好用的方法,建议多用
id_freq = {x: y for x, y in zip(xs, ys)}
# 新建一个空壳DataFrame,接下来把数据一条一条粘贴进去
data = pd.DataFrame(columns=['id', 'freq', 'word', 'num', 'context'])
for item in xs:
doc = freq['Doc' + str(item)]
num = doc['Frequency']
context = doc['Context']
for i in range(0, num):
strip = {'id': item, 'freq': id_freq[item], 'word': word, 'num': i, 'context': context[str(i)]}
# 默认orient参数等于columns
# 如果字典的值是标量,那就必须传递一个index,这是规定
strip = pd.DataFrame(strip, index=[None])
# df的append方法只能通过重新赋值来进行修改
data = data.append(strip)
data.set_index(['id', 'freq', 'word'], drop=True, inplace=True)
freq['DFC'] = data
return freq
def make_infos_freq(name, pattern, docs):
"""
:param name: 指的是对其进行词频统计的形式
:param pattern: 指的是对其进行词频统计的(正则表达式, 裁剪方法)
:param docs: 是要遍历的文本的集合,必须是pandas DataFrame的形式,至少包含id列(iloc: 0)和正文列(iloc: 2)
:return: 返回字典,其中包括“单关键词-单文本”的词频字典集合,以及计数结果汇总
"""
freq = dict()
# 因为总频数是通过"+="的方式计算,不是简单赋值,所以要预设为0
freq['Total Frequency'] = 0
docs = docs.copy() # 防止对函数之外的原样本框造成改动
items = range(0, len(docs))
with alive_bar(len(items), force_tty=True, bar='circles') as bar:
for i in items:
# 对于每个文档,都形成一个字典,字典包括关键词在该文档出现的频数和上下文
# id需要在第0列,正文需要在第2列
# pattern 要全须全尾地传递进去,因为make_info_freq两个参数都要用
freq['Doc' + str(docs.iloc[i, 0])] = make_info_freq(name, pattern, docs.iloc[i, 2])
# 在给每个文档形成字典的同时,对于总概率进行滚动加总
freq['Total Frequency'] += freq['Doc' + str(docs.iloc[i, 0])]['Frequency']
docs.iloc[i, 4] = freq['Doc' + str(docs.iloc[i, 0])]['Frequency']
bar()
# 接下来建立一个DFC(doc-freq-context)统计面板,汇总所有文档对应的词频数和上下文
# 首先构建(id, freq)的字典映射
xs = docs['id']
ys = docs['freq']
# zip(迭代器)是一个很好用的方法,建议多用
id_freq = {x: y for x, y in zip(xs, ys)}
# 新建一个空壳DataFrame,接下来把数据一条一条粘贴进去
data = pd.DataFrame(columns=['id', 'freq', 'form', 'word', 'num', 'context'])
for item in xs:
doc = freq['Doc' + str(item)]
num = doc['Frequency']
# 从(关键词,上下文)中取出两个元素
context = doc['Context']
for i in range(0, num):
# context 中的关键词已经 match_cut 完毕,不需要重复处理
strip = {'id': item, 'form': name, 'freq': id_freq[item], 'word': context[str(i)][0],
'num': i, 'context': context[str(i)][1]}
# 默认orient参数等于columns
# 如果字典的值是标量,那就必须传递一个index,这是规定
strip = pd.DataFrame(strip, index=[None])
# df的append方法只能通过重新赋值来进行修改
data = data.append(strip)
data.set_index(['id', 'freq', 'form', 'word'], drop=True, inplace=True)
freq['DFC'] = data
print(name + ' Completed')
return freq
def words_docs_freq(words, docs):
"""
:param words: 表示要对其做词频统计的关键词清单
:param docs: 是要遍历的文本的集合,必须是pandas DataFrame的形式,至少包含id列、正文列、和频率列
:return: 返回字典,其中包括“单关键词-多文本”的词频字典集合,以及最终的DFC(doc-frequency-context)和DTM(doc-term matrix)
"""
freqs = dict()
# 与此同时新建一个空壳DataFrame,用于汇总DFC
data = pd.DataFrame()
# 新建一个空壳,用于汇总DTM(Doc-Term-Matrix)
dtm = pd.DataFrame(None, columns=words, index=docs['id'])
# 来吧,一个循环搞定所有
items = range(len(words))
with alive_bar(len(items), force_tty=True, bar='blocks') as bar:
for word in words:
freq = make_docs_freq(word, docs)
freqs[word] = freq
data = data.append(freq['DFC'])
for item in docs['id']:
dtm.loc[item, word] = freq['Doc' + str(item)]['Frequency']
bar()
# 记得要sort一下,不然排序的方式不对(应该按照doc id来排列)
data.sort_index(inplace=True)
freqs['DFC'] = data
freqs['DTM'] = dtm
return freqs
def infos_docs_freq(infos, docs):
"""
:param docs: 是要遍历的文本的集合,必须是pandas DataFrame的形式,至少包含id列和正文列
:param infos: 指的是正则表达式的列表,格式为字典,key是示例,如“(1)”,value 是正则表达式,如“([0-9])”
:return: 返回字典,其中包括“单关键词-多文本”的词频字典集合,以及最终的DFC(doc-frequency-context)和DTM(doc-term matrix)
"""
freqs = dict()
# 与此同时新建一个空壳DataFrame,用于汇总DFC
data = pd.DataFrame()
# 新建一个空壳,用于汇总DTM(Doc-Term-Matrix)
dtm = pd.DataFrame(None, columns=list(infos.keys()), index=docs['id'])
# 来吧,一个循环搞定所有
items = range(len(infos))
with alive_bar(len(items), force_tty=True, bar='blocks') as bar:
for k, v in infos.items():
freq = make_infos_freq(k, v, docs)
freqs[k] = freq
data = data.append(freq['DFC'])
for item in docs['id']:
dtm.loc[item, k] = freq['Doc' + str(item)]['Frequency']
bar()
# 记得要sort一下,不然排序的方式不对(应该按照doc id来排列)
data.sort_index(inplace=True)
freqs['DFC'] = data
freqs['DTM'] = dtm
return freqs
def massive_pop(infos, doc):
"""
:param infos: List,表示被删除内容对应的正则表达式
:param doc: 表示正文
:return: 返回一个完成删除的文本
"""
for info in infos:
doc = re.sub(info, '', doc)
return doc
def massive_sub(infos, doc):
"""
:param infos: Dict, 表示被替换内容对应的正则表达式及替换对象
:param doc: 表示正文
:return: 返回一个完成替换的文本
"""
for v, k in infos:
doc = re.sub(v, k, doc)
return doc
# 接下来取每个样本的前n句话(或者不多于前n句话的内容),再做一次进行对比
# 取前十句话的原理是,对!?。等表示语义结束的符号进行计数,满十次为止
def top_n_sent(n, doc, percentile=1):
"""
:param n: n指句子的数量,这个函数会返回一段文本中前n句话,若文本内容不多于n句,则全文输出
:param word: 指正文内容
:param percentile: 按照分位数来取句子时,要输入的分位,比如一共有十句话,取50%分位就是5句
如果有11句话,向下取整也是输出5句
:return: 返回字符串:前n句话
"""
info = '[。?!]'
# 在这个函数体内,函数主体语句的作用域大于循环体,因此循环内的变量相当于局部变量
# 因此想在循环外直接返回,就会出现没有定义的错误,因此可以做一个全局声明
# 但是不建议这样做,因为如果函数外有一个变量恰巧和局部变量重名,那函数外的变量也会被改变
# 因此还是推荐多使用迭代器,把循环包裹成迭代器,可以解决很多问题
# 而且已经封装好的迭代器,例如re.findall_iter,就不用另外再去写了,调用起来很方便
# 如下,第一行代码的作用是用列表包裹迭代器,形成一个生成器的列表
# 每个生成器都存在自己的 Attribute
re_iter = list(re.finditer(info, doc))
# max_iter 是 re 匹配到的最大次数
max_iter = len(re_iter)
# 这一句表示,正文过于简短,或者没有标点,此时直接输出全文
if max_iter == 0:
return doc
# 考虑 percentile 的情况,如果总共有11句,就舍弃掉原来的 n,直接改为总句数的 percentile 对应的句子数
# 注意是向下取整
if percentile != 1:
n = math.ceil(percentile * max_iter)
# 如果匹配到至少一句,循环自然结束,输出结果
if n > 0:
return doc[0: re_iter[n - 1].end()]
# 如果正文过于简短,或设定的百分比过低,一句话都凑不齐,此时直接输出第一句
elif n == 0:
return doc[0: re_iter[0].end()]
# 如果匹配到的句子数大于 n,此时只取前 n 句
if max_iter >= n:
return doc[0: re_iter[n - 1].end()]
# 如果匹配到的句子不足 n 句,直接输出全部内容
elif 0 < max_iter < n:
return doc[0: re_iter[-1].end()]
# 为减少重名的可能,尽量在函数体内减少变量的使用
def dtm_sort_filter(dtm, keymap, name=None):
"""
:param dtm: 前面生成的词频统计矩阵:Doc-Term-Matrix
:param keymap: 字典,标明了 类别-关键词列表 两者关系
:param name: 最终生成 Excel 文件的名称(需要包括后缀)
:return: 返回一个字典,字典包含两个 pandas.DataFrame: 一个是表示各个种类是否存在的二进制表,另一个是最终的种类数
"""
dtm = dtm.applymap(lambda x: 1 if x != 0 else 0)
strips = {}
for i, row in dtm.iterrows():
strip = {}
for k, v in keymap.items():
strip[k] = 0
for item in v:
try:
strip[k] += row[item]
except KeyError:
pass
strips[i] = strip
dtm_class = pd.DataFrame.from_dict(strips, orient='index')
dtm_class = dtm_class.applymap(lambda x: 1 if x != 0 else 0)
dtm_final = dtm_class.agg(np.sum, axis=1)
result = {'DTM_class': dtm_class, 'DTM_final': dtm_final}
return result
def dtm_point_giver(dtm, keymap, scoremap, name=None):
"""
:param dtm: 前面生成的词频统计矩阵:Doc-Term-Matrix
:param keymap: 字典,{TypeA: [word1, word2, word3, ……], TypeB: ……}
:param scoremap: 字典,标明了 类别-分值 两者关系
:param name: 最终生成 Excel 文件的名称(需要包括后缀)
:return: 返回一个 pandas.DataFrame,表格有两列,一列是文本id,一列是文本的分值(所有关键词的分值取最高)
"""
dtm = dtm.applymap(lambda x: 1 if x != 0 else 0)
# 非 keymap 中词会被过滤掉
strips = {}
for i, row in dtm.iterrows():
strip = {}
for k, v in keymap.items():
strip[k] = 0
for item in v:
try:
strip[k] += row[item]
except KeyError:
pass
strips[i] = strip
dtm_class = pd.DataFrame.from_dict(strips, orient='index')
dtm_class = dtm_class.applymap(lambda x: 1 if x != 0 else 0)
# 找到 columns 对应的分值
keywords = list(dtm_class.columns)
multiplier = []
for keyword in keywords:
multiplier.append(scoremap[keyword])
# DataFrame 的乘法运算,不会改变其 index 和 columns
dtm_score = dtm_class.mul(multiplier, axis=1)
# 取一个最大值来赋分
dtm_score = dtm_score.agg(np.max, axis=1)
return dtm_score
def dfc_sort_filter(dfc, keymap, name=None):
"""
:param dfc: 前面生成的词频统计明细表:Doc-Frequency-Context
:param keymap: 字典,标明了 关键词-所属种类 两者关系
:param name: 最终生成 Excel 文件的名称(需要包括后缀)
:return: 返回一个 pandas.DataFrame,表格有两列,一列是文本id,一列是文本中所包含的业务种类数
"""
# 接下来把关键词从 dfc 的 Multi-index 中拿出来(这个index本质上就是一个ndarray)
# 拿出来关键词就可以用字典进行映射
# 先新建一列class-id,准备放置映射的结果
dfc.insert(0, 'cls-id', None)
# 开始遍历
for i in range(0, len(dfc.index)):
dfc.iloc[i, 0] = keymap[dfc.index[i][2]]
# 理论上就可以直接通过 excel 的分类计数功能来看业务种类数了
# 失败了,excel不能看种类数,只能给所有值做计数,因此还需要借助python的unique语句
# dfc.to_excel('被监管业务统计.xlsx')
# 可以对于每一种index做一个计数,使用loc索引到的对象是一个DataFrame
# 先拿到一个doc id的列表
did = []
for item in dfc.index.unique():
did.append(item[0])
did = list(pd.Series(did).unique())
# 接下来获得每一类的结果,注:多重索引的取值值得关注
uni = {}
for item in did:
uni[item] = len(dfc.loc[item, :, :]['cls-id'].unique())
# 把生成的字典转换为以键值行索引的 DataFrame
uni = pd.DataFrame.from_dict(uni, orient='index')
uni.fillna(0, axis=1, inplace=True)
# uni.to_excel(name)
return uni
def dfc_point_giver(dfc, keymap, name=None):
"""
:param dfc: 前面生成的词频统计明细表:Doc-Frequency-Context
:param keymap: 字典,标明了 关键词-分值 两者关系
:param name: 最终生成 Excel 文件的名称(需要包括后缀)
:return: 返回一个 pandas.DataFrame,表格有两列,一列是文本id,一列是文本的分值(所有关键词的分值取最高)
"""
dfc.insert(0, 'point', None)
# 开始遍历
for i in range(0, len(dfc.index)):
dfc.iloc[i, 0] = keymap[dfc.index[i][2]]
# 可以对于每一种index做一个计数,使用loc索引到的对象是一个DataFrame
# 先拿到一个doc id的列表
did = []
for item in dfc.index.unique():
did.append(item[0])
did = list(pd.Series(did).unique())
# 接下来获得每一类的结果,注:多重索引的取值值得关注
uni = {}
for item in did:
uni[item] = max(dfc.loc[item, :, :]['point'].unique())
# 把生成的字典转换为以键值行索引的 DataFrame
uni = pd.DataFrame.from_dict(uni, orient='index')
uni.fillna(0, axis=1, inplace=True)
# uni.to_excel(name)
return uni
def dfc_sort_counter(dfc, name=None):
"""
:param dfc: 前面生成的词频统计明细表:Doc-Frequency-Context
:param name: 最终生成 Excel 文件的名称(需要包括后缀)
:return: 返回一个 pandas.DataFrame,表格有两列,一列是文本id,一列是文本中所包含的业务种类数
"""
# 可以对于每一种index做一个计数,使用loc索引到的对象是一个DataFrame
dfc.insert(0, 'form', None)
for i in range(0, dfc.shape[0]):
dfc.iloc[i, 0] = dfc.index[i][2]
# 先拿到一个doc id的列表
did = []
for item in dfc.index.unique():
did.append(item[0])
did = list(pd.Series(did).unique())
# 接下来获得每一类的结果,注:多重索引的取值值得关注
uni = {}
for item in did:
uni[item] = len(dfc.loc[item, :, :, :]['form'].unique())
# 把生成的字典转换为以键值行索引的 DataFrame
uni = | pd.DataFrame.from_dict(uni, orient='index') | pandas.DataFrame.from_dict |
# -*- coding: utf-8 -*-
"""System transmission plots.
This code creates transmission line and interface plots.
@author: <NAME>, <NAME>
"""
import os
import logging
import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
import matplotlib.cm as cm
import matplotlib.colors as mcolors
import matplotlib.dates as mdates
import marmot.config.mconfig as mconfig
from marmot.plottingmodules.plotutils.plot_library import PlotLibrary
from marmot.plottingmodules.plotutils.plot_data_helper import PlotDataHelper
from marmot.plottingmodules.plotutils.plot_exceptions import (MissingInputData, DataSavedInModule,
UnderDevelopment, InputSheetError, MissingMetaData, UnsupportedAggregation, MissingZoneData)
class MPlot(PlotDataHelper):
"""transmission MPlot class.
All the plotting modules use this same class name.
This class contains plotting methods that are grouped based on the
current module name.
The transmission.py module contains methods that are
related to the transmission network.
MPlot inherits from the PlotDataHelper class to assist in creating figures.
"""
def __init__(self, argument_dict: dict):
"""
Args:
argument_dict (dict): Dictionary containing all
arguments passed from MarmotPlot.
"""
# iterate over items in argument_dict and set as properties of class
# see key_list in Marmot_plot_main for list of properties
for prop in argument_dict:
self.__setattr__(prop, argument_dict[prop])
# Instantiation of MPlotHelperFunctions
super().__init__(self.Marmot_Solutions_folder, self.AGG_BY, self.ordered_gen,
self.PLEXOS_color_dict, self.Scenarios, self.ylabels,
self.xlabels, self.gen_names_dict, self.TECH_SUBSET,
Region_Mapping=self.Region_Mapping)
self.logger = logging.getLogger('marmot_plot.'+__name__)
self.font_defaults = mconfig.parser("font_settings")
def line_util(self, **kwargs):
"""Creates a timeseries line plot of transmission lineflow utilization for each region.
Utilization is plotted between 0 and 1 on the y-axis.
The plot will default to showing the 10 highest utilized lines. A Line category
can also be passed instead, using the property field in the Marmot_plot_select.csv
Each scenarios is plotted on a separate Facet plot.
This methods calls _util() to create the figure.
Returns:
dict: Dictionary containing the created plot and its data table.
"""
outputs = self._util(**kwargs)
return outputs
def line_hist(self, **kwargs):
"""Creates a histogram of transmission lineflow utilization for each region.
Utilization is plotted between 0 and 1 on the x-axis, with # lines on the y-axis.
Each bar is equal to a 0.05 utilization rate
The plot will default to showing all lines. A Line category can also be passed
instead using the property field in the Marmot_plot_select.csv
Each scenarios is plotted on a separate Facet plot.
This methods calls _util() and passes the hist=True argument to create the figure.
Returns:
dict: Dictionary containing the created plot and its data table.
"""
outputs = self._util(hist=True, **kwargs)
return outputs
def _util(self, hist: bool = False, prop: str = None,
start_date_range: str = None,
end_date_range: str = None, **_):
"""Creates utilization plots, line plot and histograms
This methods is called from line_util() and line_hist()
Args:
hist (bool, optional): If True creates a histogram of utilization.
Defaults to False.
prop (str, optional): Optional PLEXOS line category to display.
Defaults to None.
start_date_range (str, optional): Defines a start date at which to represent data from.
Defaults to None.
end_date_range (str, optional): Defines a end date at which to represent data to.
Defaults to None.
Returns:
dict: Dictionary containing the created plot and its data table.
"""
outputs = {}
# List of properties needed by the plot, properties are a set of tuples and contain 3 parts:
# required True/False, property name and scenarios required, scenarios must be a list.
properties = [(True,"line_Flow",self.Scenarios),
(True,"line_Import_Limit",self.Scenarios)]
# Runs get_formatted_data within PlotDataHelper to populate PlotDataHelper dictionary
# with all required properties, returns a 1 if required data is missing
check_input_data = self.get_formatted_data(properties)
if 1 in check_input_data:
return MissingInputData()
# sets up x, y dimensions of plot
ncols, nrows = self.set_facet_col_row_dimensions(facet=True,
multi_scenario=self.Scenarios)
grid_size = ncols*nrows
# Used to calculate any excess axis to delete
plot_number = len(self.Scenarios)
excess_axs = grid_size - plot_number
for zone_input in self.Zones:
self.logger.info(f"For all lines touching Zone = {zone_input}")
mplt = PlotLibrary(nrows, ncols, sharey=True,
squeeze=False, ravel_axs=True)
fig, axs = mplt.get_figure()
plt.subplots_adjust(wspace=0.1, hspace=0.25)
data_table=[]
for n, scenario in enumerate(self.Scenarios):
self.logger.info(f"Scenario = {str(scenario)}")
# gets correct metadata based on area aggregation
if self.AGG_BY=='zone':
zone_lines = self.meta.zone_lines(scenario)
else:
zone_lines = self.meta.region_lines(scenario)
try:
zone_lines = zone_lines.set_index([self.AGG_BY])
except:
self.logger.warning("Column to Aggregate by is missing")
continue
try:
zone_lines = zone_lines.xs(zone_input)
zone_lines=zone_lines['line_name'].unique()
except KeyError:
self.logger.warning('No data to plot for scenario')
outputs[zone_input] = MissingZoneData()
continue
flow = self["line_Flow"].get(scenario).copy()
#Limit to only lines touching to this zone
flow = flow[flow.index.get_level_values('line_name').isin(zone_lines)]
if self.shift_leapday == True:
flow = self.adjust_for_leapday(flow)
limits = self["line_Import_Limit"].get(scenario).copy()
limits = limits.droplevel('timestamp').drop_duplicates()
limits.mask(limits[0]==0.0,other=0.01,inplace=True) #if limit is zero set to small value
# This checks for a nan in string. If no scenario selected, do nothing.
if pd.notna(prop):
self.logger.info(f"Line category = {str(prop)}")
line_relations = self.meta.lines(scenario).rename(columns={"name":"line_name"}).set_index(["line_name"])
flow=pd.merge(flow,line_relations, left_index=True,
right_index=True)
flow=flow[flow["category"] == prop]
flow=flow.drop('category',axis=1)
flow = pd.merge(flow,limits[0].abs(),on = 'line_name',how='left')
flow['Util']=(flow['0_x'].abs()/flow['0_y']).fillna(0)
#If greater than 1 because exceeds flow limit, report as 1
flow['Util'][flow['Util'] > 1] = 1
annual_util=flow['Util'].groupby(["line_name"]).mean().rename(scenario)
# top annual utilized lines
top_utilization = annual_util.nlargest(10, keep='first')
color_dict = dict(zip(self.Scenarios,self.color_list))
if hist == True:
mplt.histogram(annual_util, color_dict,label=scenario, sub_pos=n)
else:
for line in top_utilization.index.get_level_values(level='line_name').unique():
duration_curve = flow.loc[line].sort_values(by='Util',
ascending=False).reset_index(drop=True)
mplt.lineplot(duration_curve, 'Util' ,label=line, sub_pos=n)
axs[n].set_ylim((0,1.1))
data_table.append(annual_util)
mplt.add_legend()
#Remove extra axes
mplt.remove_excess_axs(excess_axs,grid_size)
# add facet labels
mplt.add_facet_labels(xlabels=self.xlabels,
ylabels = self.ylabels)
if hist == True:
if pd.notna(prop):
prop_name = 'All Lines'
else:
prop_name = prop
plt.ylabel('Number of lines', color='black',
rotation='vertical', labelpad=30)
plt.xlabel(f'Line Utilization: {prop_name}', color='black',
rotation='horizontal', labelpad=30)
else:
if pd.notna(prop):
prop_name ='Top 10 Lines'
else:
prop_name = prop
plt.ylabel(f'Line Utilization: {prop_name}', color='black',
rotation='vertical', labelpad=60)
plt.xlabel('Intervals', color='black',
rotation='horizontal', labelpad=20)
if mconfig.parser("plot_title_as_region"):
mplt.add_main_title(zone_input)
try:
del annual_util,
except:
continue
Data_Out = pd.concat(data_table)
outputs[zone_input] = {'fig': fig,'data_table':Data_Out}
return outputs
def int_flow_ind(self, figure_name: str = None, prop: str = None,
start_date_range: str = None,
end_date_range: str = None, **_):
"""Creates a line plot of interchange flows and their import and export limits.
Each interchange is potted on a separate facet plot.
The plot includes every interchange that originates or ends in the aggregation zone.
This can be adjusted by passing a comma separated string of interchanges to the property input.
The code will create either a timeseries or duration curve depending on
if the word 'duration_curve' is in the figure_name.
To make a duration curve, ensure the word 'duration_curve' is found in the figure_name.
Args:
figure_name (str, optional): User defined figure output name.
Defaults to None.
prop (str, optional): Comma separated string of interchanges.
Defaults to None.
start_date_range (str, optional): Defines a start date at which to represent data from.
Defaults to None.
end_date_range (str, optional): Defines a end date at which to represent data to.
Defaults to None.
Returns:
dict: dictionary containing the created plot and its data table
"""
duration_curve=False
if 'duration_curve' in figure_name:
duration_curve = True
# List of properties needed by the plot, properties are a set of tuples and contain 3 parts:
# required True/False, property name and scenarios required, scenarios must be a list.
properties = [(True,"interface_Flow",self.Scenarios),
(True,"interface_Import_Limit",self.Scenarios),
(True,"interface_Export_Limit",self.Scenarios)]
# Runs get_formatted_data within PlotDataHelper to populate PlotDataHelper dictionary
# with all required properties, returns a 1 if required data is missing
check_input_data = self.get_formatted_data(properties)
if 1 in check_input_data:
return MissingInputData()
scenario = self.Scenarios[0]
outputs = {}
if pd.notna(start_date_range):
self.logger.info(f"Plotting specific date range: \
{str(start_date_range)} to {str(end_date_range)}")
for zone_input in self.Zones:
self.logger.info(f"For all interfaces touching Zone = {zone_input}")
Data_Table_Out = pd.DataFrame()
# gets correct metadata based on area aggregation
if self.AGG_BY=='zone':
zone_lines = self.meta.zone_lines(scenario)
else:
zone_lines = self.meta.region_lines(scenario)
try:
zone_lines = zone_lines.set_index([self.AGG_BY])
except:
self.logger.info("Column to Aggregate by is missing")
continue
zone_lines = zone_lines.xs(zone_input)
zone_lines = zone_lines['line_name'].unique()
#Map lines to interfaces
all_ints = self.meta.interface_lines(scenario) #Map lines to interfaces
all_ints.index = all_ints.line
ints = all_ints.loc[all_ints.index.intersection(zone_lines)]
#flow = flow[flow.index.get_level_values('interface_name').isin(ints.interface)] #Limit to only interfaces touching to this zone
#flow = flow.droplevel('interface_category')
export_limits = self["interface_Export_Limit"].get(scenario).copy().droplevel('timestamp')
export_limits.mask(export_limits[0]==0.0,other=0.01,inplace=True) #if limit is zero set to small value
export_limits = export_limits[export_limits.index.get_level_values('interface_name').isin(ints.interface)]
export_limits = export_limits[export_limits[0].abs() < 99998] #Filter out unenforced interfaces.
#Drop unnecessary columns.
export_limits.reset_index(inplace = True)
export_limits.drop(columns=['interface_category', 'units'], inplace=True)
export_limits.set_index('interface_name',inplace = True)
import_limits = self["interface_Import_Limit"].get(scenario).copy().droplevel('timestamp')
import_limits.mask(import_limits[0]==0.0,other=0.01,inplace=True) #if limit is zero set to small value
import_limits = import_limits[import_limits.index.get_level_values('interface_name').isin(ints.interface)]
import_limits = import_limits[import_limits[0].abs() < 99998] #Filter out unenforced interfaces.
reported_ints = import_limits.index.get_level_values('interface_name').unique()
#Drop unnecessary columns.
import_limits.reset_index(inplace = True)
import_limits.drop(columns=['interface_category', 'units'], inplace=True)
import_limits.set_index('interface_name',inplace = True)
#Extract time index
ti = self["interface_Flow"][self.Scenarios[0]].index.get_level_values('timestamp').unique()
if pd.notna(prop):
interf_list = prop.split(',')
self.logger.info('Plotting only interfaces specified in Marmot_plot_select.csv')
self.logger.info(interf_list)
else:
interf_list = reported_ints.copy()
self.logger.info('Plotting full time series results.')
xdim,ydim = self.set_x_y_dimension(len(interf_list))
mplt = PlotLibrary(ydim, xdim, squeeze=False,
ravel_axs=True)
fig, axs = mplt.get_figure()
grid_size = xdim * ydim
excess_axs = grid_size - len(interf_list)
plt.subplots_adjust(wspace=0.05, hspace=0.2)
missing_ints = 0
chunks = []
n = -1
for interf in interf_list:
n += 1
#Remove leading spaces
if interf[0] == ' ':
interf = interf[1:]
if interf in reported_ints:
chunks_interf = []
single_exp_lim = export_limits.loc[interf] / 1000 #TODO: Use auto unit converter
single_imp_lim = import_limits.loc[interf] / 1000
#Check if all hours have the same limit.
check = single_exp_lim.to_numpy()
identical = check[0] == check.all()
limits = pd.concat([single_exp_lim,single_imp_lim],axis = 1)
limits.columns = ['export limit','import limit']
limits.index = ti
for scenario in self.Scenarios:
flow = self["interface_Flow"].get(scenario)
single_int = flow.xs(interf, level='interface_name') / 1000
single_int.index = single_int.index.droplevel(['interface_category','units'])
single_int.columns = [interf]
single_int = single_int.reset_index().set_index('timestamp')
limits = limits.reset_index().set_index('timestamp')
if self.shift_leapday == True:
single_int = self.adjust_for_leapday(single_int)
if | pd.notna(start_date_range) | pandas.notna |
import pandas as pd
import numpy as np
from copy import deepcopy
from rdkit import Chem
from data import *
from sklearn.externals import joblib
from sklearn.manifold import TSNE
import matplotlib
#matplotlib.use('TkAgg')
import matplotlib.pyplot as plot
def can_smile(smi_list):
can_list = []
for item in smi_list:
if Chem.MolFromSmiles(item) is not None:
can_item = Chem.MolToSmiles(Chem.MolFromSmiles(item))
can_list.append(can_item)
return can_list
def drop_internal_overlap(fname):
"""
Drop internal overlap within generated SMILES file
Args:
fname: path to the file containing SMILES separated by \n
Returns: None. Save output file.
"""
smi_list = []
with open (fname) as f:
for line in f:
smi_list.append(line.rstrip())
can_smi_list = can_smile(smi_list)
unique_lst = set(can_smi_list)
unique_lst = list(unique_lst)
outf = pd.DataFrame()
outf['Cano_SMILES'] = pd.Series(data=unique_lst)
outf.to_csv('Unique_'+fname, index=False)
def get_smi_list_overlap(large, small):
"""
Args:
large: list containing the SMILE structures for transfer training
small: list containing the SMILE structures for transfer sampling
Returns: num of repeat SMILES, num of unique SMILES in transfer sampling, list of unique SMILES
"""
large_can, small_can = can_smile(large), can_smile(small)
small_copy = deepcopy(small_can)
overlap = set(small_can).intersection(large_can)
#for item in overlap:
#list(filter(lambda a: a != item, small_copy))
small_copy_novel = [item for item in small_copy if item not in large_can]
return len(overlap), len(small_copy), small_copy_novel
def predict_property(model_file, fps):
"""
Function to predict the properties of generated molecules
Args:
model_file: File containing pre-trained ML model for prediction
fps: list of molecular fingerprints
Returns: list of predicted valued
"""
model = joblib.load(model_file)
return model.predict(fps)
def save_predict_results():
"""
Predict the gap and dip of generated SMILES from files and save the results
Also save the generated with gap < 2, dip <3.66 as promising candidates
Returns:
"""
ori_lst = []
for i in range(1, 4):
ori_df = pd.read_csv('Training_Model'+str(i)+'.csv')
ori_list = ori_df['SMILES'].tolist()
ori_lst.append(ori_list)
frames = []
gen_mols = []
gen_fps = []
for i, group in enumerate(['all', 'class3', 'prom']):
gen_df = pd.read_csv('novel_sampled_cano_script_'+group+'_until.csv')
gen_list = gen_df['SMILES'].tolist()
print('Number of molecules in training for model {} is {}'.format(i+1, len(ori_lst[i])))
over, num, smi_list = get_smi_list_overlap(ori_lst[i], gen_list)
smi_mols = get_mols(smi_list)
smi_fps, failed_mols = get_fingerprints(smi_mols)
for idx in sorted(failed_mols, reverse=True):
del smi_list[idx]
smi_df = | pd.Series(data=smi_list, name='SMILES') | pandas.Series |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
#
# QTPyLib: Quantitative Trading Python Library
# https://github.com/ranaroussi/qtpylib
#
# Copyright 2016-2018 <NAME>
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import warnings
import sys
from datetime import datetime, timedelta
import numpy as np
import pandas as pd
from pandas.core.base import PandasObject
# =============================================
# check min, python version
from .supertrend import SuperTrend
# =============================================
warnings.simplefilter(action="ignore", category=RuntimeWarning)
# =============================================
def numpy_rolling_window(data, window):
shape = data.shape[:-1] + (data.shape[-1] - window + 1, window)
strides = data.strides + (data.strides[-1],)
return np.lib.stride_tricks.as_strided(data, shape=shape, strides=strides)
def numpy_rolling_series(func):
def func_wrapper(data, window, as_source=False):
series = data.values if isinstance(data, pd.Series) else data
new_series = np.empty(len(series)) * np.nan
calculated = func(series, window)
new_series[-len(calculated):] = calculated
if as_source and isinstance(data, pd.Series):
return pd.Series(index=data.index, data=new_series)
return new_series
return func_wrapper
@numpy_rolling_series
def numpy_rolling_mean(data, window, as_source=False):
return np.mean(numpy_rolling_window(data, window), axis=-1)
@numpy_rolling_series
def numpy_rolling_std(data, window, as_source=False):
return np.std(numpy_rolling_window(data, window), axis=-1, ddof=1)
# ---------------------------------------------
def session(df, start='17:00', end='16:00'):
""" remove previous globex day from df """
if df.empty:
return df
# get start/end/now as decimals
int_start = list(map(int, start.split(':')))
int_start = (int_start[0] + int_start[1] - 1 / 100) - 0.0001
int_end = list(map(int, end.split(':')))
int_end = int_end[0] + int_end[1] / 100
int_now = (df[-1:].index.hour[0] + (df[:1].index.minute[0]) / 100)
# same-dat session?
is_same_day = int_end > int_start
# set pointers
curr = prev = df[-1:].index[0].strftime('%Y-%m-%d')
# globex/forex session
if not is_same_day:
prev = (datetime.strptime(curr, '%Y-%m-%d') -
timedelta(1)).strftime('%Y-%m-%d')
# slice
if int_now >= int_start:
df = df[df.index >= curr + ' ' + start]
else:
df = df[df.index >= prev + ' ' + start]
return df.copy()
# ---------------------------------------------
def heikinashi(bars):
bars = bars.copy()
bars['ha_close'] = (bars['open'] + bars['high'] +
bars['low'] + bars['close']) / 4
# ha open
bars.at[0, 'ha_open'] = (bars.at[0, 'open'] + bars.at[0, 'close']) / 2
for i in range(1, len(bars)):
bars.at[i, 'ha_open'] = (bars.at[i - 1, 'ha_open'] + bars.at[i - 1, 'ha_close']) / 2
bars['ha_high'] = bars.loc[:, ['high', 'ha_open', 'ha_close']].max(axis=1)
bars['ha_low'] = bars.loc[:, ['low', 'ha_open', 'ha_close']].min(axis=1)
return pd.DataFrame(index=bars.index,
data={'open': bars['ha_open'],
'high': bars['ha_high'],
'low': bars['ha_low'],
'close': bars['ha_close']})
# ---------------------------------------------
def tdi(series, rsi_lookback=13, rsi_smooth_len=2,
rsi_signal_len=7, bb_lookback=34, bb_std=1.6185):
rsi_data = rsi(series, rsi_lookback)
rsi_smooth = sma(rsi_data, rsi_smooth_len)
rsi_signal = sma(rsi_data, rsi_signal_len)
bb_series = bollinger_bands(rsi_data, bb_lookback, bb_std)
return pd.DataFrame(index=series.index, data={
"rsi": rsi_data,
"rsi_signal": rsi_signal,
"rsi_smooth": rsi_smooth,
"rsi_bb_upper": bb_series['upper'],
"rsi_bb_lower": bb_series['lower'],
"rsi_bb_mid": bb_series['mid']
})
# ---------------------------------------------
def awesome_oscillator(df, weighted=False, fast=5, slow=34):
midprice = (df['high'] + df['low']) / 2
if weighted:
ao = (midprice.ewm(fast).mean() - midprice.ewm(slow).mean()).values
else:
ao = numpy_rolling_mean(midprice, fast) - \
numpy_rolling_mean(midprice, slow)
return pd.Series(index=df.index, data=ao)
# ---------------------------------------------
def nans(length=1):
mtx = np.empty(length)
mtx[:] = np.nan
return mtx
# ---------------------------------------------
def typical_price(bars):
res = (bars['high'] + bars['low'] + bars['close']) / 3.
return pd.Series(index=bars.index, data=res)
# ---------------------------------------------
def mid_price(bars):
res = (bars['high'] + bars['low']) / 2.
return pd.Series(index=bars.index, data=res)
# ---------------------------------------------
def ibs(bars):
""" Internal bar strength """
res = np.round((bars['close'] - bars['low']) /
(bars['high'] - bars['low']), 2)
return pd.Series(index=bars.index, data=res)
# ---------------------------------------------
def true_range(bars):
return pd.DataFrame({
"hl": bars['high'] - bars['low'],
"hc": abs(bars['high'] - bars['close'].shift(1)),
"lc": abs(bars['low'] - bars['close'].shift(1))
}).max(axis=1)
# ---------------------------------------------
def atr(bars, window=14, exp=False):
tr = true_range(bars)
if exp:
res = rolling_weighted_mean(tr, window)
else:
res = rolling_mean(tr, window)
return pd.Series(res)
# ---------------------------------------------
def crossed(series1, series2, direction=None):
if isinstance(series1, np.ndarray):
series1 = pd.Series(series1)
if isinstance(series2, (float, int, np.ndarray)):
series2 = pd.Series(index=series1.index, data=series2)
if direction is None or direction == "above":
above = pd.Series((series1 > series2) & (
series1.shift(1) <= series2.shift(1)))
if direction is None or direction == "below":
below = pd.Series((series1 < series2) & (
series1.shift(1) >= series2.shift(1)))
if direction is None:
return above or below
return above if direction == "above" else below
def crossed_above(series1, series2):
return crossed(series1, series2, "above")
def crossed_below(series1, series2):
return crossed(series1, series2, "below")
# ---------------------------------------------
def rolling_std(series, window=200, min_periods=None):
min_periods = window if min_periods is None else min_periods
if min_periods == window and len(series) > window:
return numpy_rolling_std(series, window, True)
else:
try:
return series.rolling(window=window, min_periods=min_periods).std()
except Exception as e:
return pd.Series(series).rolling(window=window, min_periods=min_periods).std()
# ---------------------------------------------
def rolling_mean(series, window=200, min_periods=None):
min_periods = window if min_periods is None else min_periods
if min_periods == window and len(series) > window:
return numpy_rolling_mean(series, window, True)
else:
try:
return series.rolling(window=window, min_periods=min_periods).mean()
except Exception as e:
return pd.Series(series).rolling(window=window, min_periods=min_periods).mean()
# ---------------------------------------------
def rolling_min(series, window=14, min_periods=None):
min_periods = window if min_periods is None else min_periods
try:
return series.rolling(window=window, min_periods=min_periods).min()
except Exception as e:
return pd.Series(series).rolling(window=window, min_periods=min_periods).min()
# ---------------------------------------------
def rolling_max(series, window=14, min_periods=None):
min_periods = window if min_periods is None else min_periods
try:
return series.rolling(window=window, min_periods=min_periods).min()
except Exception as e:
return pd.Series(series).rolling(window=window, min_periods=min_periods).min()
# ---------------------------------------------
def rolling_weighted_mean(series, window=200, min_periods=None):
min_periods = window if min_periods is None else min_periods
try:
return series.ewm(span=window, min_periods=min_periods).mean()
except Exception as e:
return | pd.ewma(series, span=window, min_periods=min_periods) | pandas.ewma |
from Bio import SeqIO
from src.inputValueException import InputValueException
import os
import pandas as pd
import re
# calculates kmer frequencies
# k: kmer-length
# peak: peak-position, where sequences should be aligned
# selected: input files
# no_sec_peak: status (-1= no structural data available, 0= False, 1= True) if only kmer
# with peak position should be saved
def calcFrequency(k, peak, selected, no_sec_peak):
profile1 = dict() # for file1
if no_sec_peak == -1:
profile2 = dict() # for file2
else: # if structural data is available, use profile2 as list
# therefore method is called twice
profile2 = [] # saves alphabet of structural fasta-files
kmer = ''
for file in selected: # selects data
if file == selected[0]: # Name of first File
profile = profile1
else:
profile = profile2
for record in SeqIO.parse(file, "fasta"): # reads fasta-file
sequence = str(record.seq)
seq_length = len(sequence)
if peak is not None:
if seq_length < peak: # is thrown if peak is greater than sequence length
raise InputValueException(
'ERROR: Invalid peak. Must be smaller or equal than sequence length. For help use option -h.')
sequence = createPeakPosition(peak, sequence)
if seq_length <= k:
raise InputValueException( # is thrown if k is greater or equal than sequence length
"ERROR: Invalid k. Must be smaller than sequence length. For help use option -h.")
# if structural data is available, generate alphabet
if no_sec_peak > -1:
if no_sec_peak is 1:
sequence = sequence.upper()
for c in sequence:
if c not in profile2:
profile2.append(c) # profile2 is used as alphabet if profiles for structural data are created
for i in range(0, (seq_length - k + 1)):
if i == 0:
kmer = sequence[0:k] # init first kmer
else:
kmer = ''.join([kmer[1:], sequence[k + i - 1]])
# if peak position for RNA-structure is needed, only save k-mer containing capital letter
if no_sec_peak is 0 and not re.findall('[A-Z]', kmer):
continue
if profile.get(kmer) is not None:
profile[kmer] += 1
else:
profile[kmer] = 1
return [profile1, profile2]
# determines sequence length of first sequence of file
# it is assumed, that every sequence has same length
# file: not empty fasta file
def getSeqLength(file):
records = list(SeqIO.parse(file, "fasta").records)
seq_len = len(records[0].seq)
return seq_len
# changes sequence with only capital letters to sequence with only one capital letter (peak position)
# peak: peak-position, where sequences should be aligned
# seq: sequence
def createPeakPosition(peak, seq):
sequence = seq.lower()
peak_val = seq[peak - 1]
res = ''.join([sequence[:peak - 1], peak_val, sequence[peak:]])
return res
# table which contains kmer-frequencies as coordinates (kmer: x:(file1) = fre1,y:(file2)= fre2)
def createDataFrame(p1, p2, selected):
x_axis = [] # frequency count from file 1
y_axis = [] # frequency count from file 2
kmer_list = []
profile1 = p1.getProfile()
profile2 = p2.getProfile()
file1_kmer = list(p1.getProfile().keys())
file2_kmer = list(p2.getProfile().keys())
# calculates coordinates
intersec = set(file1_kmer).intersection(file2_kmer) # ascertains kmeres which appear in both files
# all kmers, which are in profile1 but not in profile2
p1_diff = set(file1_kmer).difference(file2_kmer)
# all kmers, which are in profile2 but not in profile1
p2_diff = set(file2_kmer).difference(file1_kmer)
for kmer in intersec:
x_axis.append(profile1[kmer])
y_axis.append(profile2[kmer])
kmer_list.append(kmer)
for k1 in p1_diff:
kmer_freq = profile1[k1]
x_axis.append(kmer_freq)
y_axis.append(0)
kmer_list.append(k1)
for k2 in p2_diff:
kmer_freq = profile2[k2]
x_axis.append(0)
y_axis.append(kmer_freq)
kmer_list.append(k2)
file_name1 = os.path.basename(selected[0])
file_name2 = os.path.basename(selected[1])
res = | pd.DataFrame(x_axis, index=kmer_list, columns=[file_name1]) | pandas.DataFrame |
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
from __future__ import division
from __future__ import absolute_import
from __future__ import print_function
# Created at UC Berkeley 2015
# Authors: <NAME>
# ==============================================================================
'''This code trains and evaluates a CRF model for MHG scansion based
on the paper presented at the NAACL-CLFL 2016 by <NAME> and
<NAME>. This model is for the held-out data.'''
import codecs
from pickle import dump
from CLFL_mdf_classification import confusion_matrix
import pycrfsuite
import numpy as np
import pandas as pd
from scan_text_rev import only_four_stresses
from process_feats import syllable2features, line2features, line2labels, line2tokens
from prep_crf import prep_crf
from new_bio_class_report import bio_classification_report
# open hand-tagged data
with open("Data/CLFL_dev-data.txt", 'r', encoding='utf-8') as f:
training_tagged = f.read()
# add features to data
ftuples = prep_crf(training_tagged)
# open hand-tagged data
with open("Data/CLFL_held-out.txt", 'r', encoding='utf-8') as f:
heldout = f.read()
# add features to data
htuples = prep_crf(heldout)
sylls_list = [[t[0] for t in l] for l in htuples]
test_lines = htuples
train_lines = ftuples
X_train = [line2features(s) for s in train_lines]
y_train = [line2labels(s) for s in train_lines]
X_test = [line2features(s) for s in test_lines]
y_test = [line2labels(s) for s in test_lines]
trainer = pycrfsuite.Trainer(verbose=False)
for xseq, yseq in zip(X_train, y_train):
trainer.append(xseq, yseq)
# parameters to fiddle with
trainer.set_params({
'c1': 1.3, # coefficient for L1 penalty
'c2': 10e-4, # coefficient for L2 penalty
'num_memories': 6, # default is 6
# 'max_iterations': 100, # stop earlier
# include transitions that are possible, but not observed
'feature.possible_transitions': False,
# 'max_linesearch': 1000,
# 'linesearch': 'Backtracking'
# 'feature.minfreq': 5
# 'feature.possible_states': True,
})
# run trainer and tagger
trainer.params()
trainer.train('MHGMETRICS_heldout.crfsuite')
tagger = pycrfsuite.Tagger()
tagger.open('MHGMETRICS_heldout.crfsuite')
# y_pred = [tagger.tag(xseq) for xseq in X_test]
y_pred = only_four_stresses(X_test, tagger, sylls_list)
bioc = bio_classification_report(y_test, y_pred)
p, r, f1, s = bioc[0]
tot_avgs = []
for v in (np.average(p, weights=s),
np.average(r, weights=s),
np.average(f1, weights=s)):
tot_avgs.append(v)
toext = [0] * (len(s) - 3)
tot_avgs.extend(toext)
all_s = [sum(s)] * len(s)
rep = bioc[1]
all_labels = []
for word in rep.split():
if word.isupper():
all_labels.append(word)
ext_labels = [
"DOPPEL",
"EL",
"HALB",
"HALB_HAUPT",
"MORA",
"MORA_HAUPT"]
abs_labels = [l for l in ext_labels if l not in all_labels]
# print(bio_classification_report(y_test, y_pred)[1])
data = {
"labels": all_labels,
"precision": p,
"recall": r,
"f1": f1,
"support": s,
"tots": tot_avgs,
"all_s": all_s}
df = | pd.DataFrame(data) | pandas.DataFrame |
import pandas as pd
from collections import deque
import sys
def addExtension(tVal):
return str(tVal) + ".png"
if __name__ == '__main__':
dataDir = sys.argv[1]
timestampPath = sys.argv[2]
gyroPath = sys.argv[3]
# ----------#
timestampLabels = ["#timestamp [ns]", "filename"]
timestamps = pd.read_csv(timestampPath, header=None, dtype=str)
outputFilename = dataDir + "/timestamps.csv"
data = {
timestampLabels[0]: timestamps[0], timestampLabels[1]: timestamps[0] + ".png"
}
dataframe = pd.DataFrame(data=data)
dataframe.to_csv(outputFilename, sep=',', encoding='utf-8', index=False, line_terminator='\r\n')
# ----------#
imuLabels = [
"#timestamp [ns]",
"w_RS_S_x [rad s^-1]",
"w_RS_S_y [rad s^-1]",
"w_RS_S_z [rad s^-1]",
"a_RS_S_x [m s^-2]",
"a_RS_S_y [m s^-2]",
"a_RS_S_z [m s^-2]"
]
gyro = pd.read_csv(gyroPath, header=None)
outputFilename = dataDir + "/gyro_accel.csv"
if len(sys.argv) == 5:
accelPath = sys.argv[4]
accel = pd.read_csv(accelPath, header=None)
data = pd.merge_asof(gyro, accel, on=3, direction='nearest')
data = data.reindex(columns=[3, '0_x', '1_x', '2_x', '0_y', '1_y', '2_y'])
data.columns = [imuLabels[0], imuLabels[1], imuLabels[2], imuLabels[3], imuLabels[4], imuLabels[5], imuLabels[6]]
else:
data = {
imuLabels[0]: gyro[3],
imuLabels[1]: gyro[0],
imuLabels[2]: gyro[1],
imuLabels[3]: gyro[2],
imuLabels[4]: "",
imuLabels[5]: "",
imuLabels[6]: ""
}
dataframe = | pd.DataFrame(data=data) | pandas.DataFrame |
from pandas import Series, DataFrame
daeshin = {'open': [11650, 11100, 11200, 11100, 11000],
'high': [12100, 11800, 11200, 11100, 11150],
'low' : [11600, 11050, 10900, 10950, 10900],
'close': [11900, 11600, 11000, 11100, 11050]}
#daeshin_day = DataFrame(daeshin)
daeshin_day = | DataFrame(daeshin, columns=['open', 'high', 'low', 'close']) | pandas.DataFrame |
import logging
from concurrent.futures import ThreadPoolExecutor
import numpy as np
import pandas as pd
from msi_recal.join_by_mz import join_by_mz
from msi_recal.math import get_centroid_peaks, is_valid_formula_adduct
from msi_recal.mean_spectrum import hybrid_mean_spectrum
from msi_recal.params import RecalParams
logger = logging.getLogger(__name__)
def _spectral_score(ref_ints: np.ndarray, ints: np.ndarray):
"""Calculates a spectral score based on the relative intensities of isotopic peaks."""
if len(ref_ints) > 1:
# Sort peaks by decreasing predicted intensity and normalize relative to the first peak
order = np.argsort(ref_ints)[::-1]
ints = ints[order[1:]] / ints[order[0]]
ref_ints = ref_ints[order[1:]] / ref_ints[order[0]]
ints_ratio_error = np.abs(ints / (ints + ref_ints) - 0.5) * 2
return 1 - np.average(ints_ratio_error, weights=ref_ints)
else:
return 0
def calc_spectral_scores(
spectrum, db_hits, params: RecalParams, sigma_1: float, limit_of_detection: float
) -> pd.DataFrame:
"""For each DB match, searches for isotopic peaks with the same approximate mass error and
calculates a spectral score"""
# Make list of expected isotopic peaks for each DB hit
spectral_peaks = []
for db_hit in db_hits.itertuples():
if 'coverage' in db_hits.columns:
min_abundance = min(limit_of_detection / db_hit.ints / db_hit.coverage, 0.9)
else:
min_abundance = min(limit_of_detection / db_hit.ints, 0.9)
mol_peaks = get_centroid_peaks(
db_hit.formula,
db_hit.adduct,
db_hit.charge,
min_abundance,
params.instrument_model,
)
# Recalc error as centroid may be slightly different to monoisotopic peak
mz_error = db_hit.mz - mol_peaks[0][0]
for mz, ref_ints in mol_peaks:
spectral_peaks.append((db_hit[0], mz + mz_error, ref_ints))
# Search for peaks in the spectrum
spectral_peaks = | pd.DataFrame(spectral_peaks, columns=['hit_index', 'ref_mz', 'ref_ints']) | pandas.DataFrame |
from league import League
import playerID
from authorize import Authorize
from team import Team
from player import Player
from utils.building_utils import getUrl
from itertools import chain
import pandas as pd
import numpy as np
import requests
import math
from tabulate import tabulate as table
import os
import sys
from fpdf import FPDF
import argparse
import progressbar
parser = argparse.ArgumentParser()
parser.add_argument("week", help='Get week of the season')
args = parser.parse_args()
week = int(args.week)
# Define user and season year
user_id = 'desi'
year = 2021
# Get login credentials for leagues
# login = pd.read_csv('C:\\Users\\desid\\Documents\\Fantasy_Football\\espn-api-v3\\login.csv')
# _, username, password, league_id, swid, espn_s2 = login[login['id'] == user_id].values[0]
username = 'cgeer98'
password = '<PASSWORD>'
league_id = 916709
swid = '{75C7094F-C467-4145-8709-4FC467C1457E}'
espn_s2 = 'AECbQaX7HoUGyJ5X5cmNlFHVs%2FmDl0RKfnVV%2FazefK9PxoSfENQFF6ULNnR421xium4UYV5dC0GsOhS%2BeigBuhk1abpSjhlXDCJnIGt0PjUHCZpV6qF5S9qMS40ichi2XnVZFSKwAid6h8bFbWA4eHclC%2BJHqMyirQ85yLRG6zc6nULRaovpF2Cx2j5U55OuvwTnI2HCztRnEJIVucnKxlem7pAidup27BIggM3c42%2BrH7vXUlRaIYXhjE%2BGH3cWbL88H8AcpIQpG%2Bft96vAZXuB'
root = '/Users/christiangeer/Fantasy_Sports/football/power_rankings/espn-api-v3/'
# Generate cookies payload and API endpoint
cookies = {'swid' : swid, 'espn_s2' : espn_s2}
url = getUrl(year, league_id)
league = League(league_id, year, username, password, swid, espn_s2)
print(league, "\n")
# import dynasty process values
# dynastyProcessValues = pd.read_csv("/Users/christiangeer/Fantasy_Sports/Fantasy_FF/data/files/values-players.csv")
# dynastyProcessValues = dynastyProcessValues[["player","value_1qb"]]
# create for loop to add team names from team objects into list
teams = league.teams
teams_list = list(teams.values())
team_names = []
for team in teams_list:
team_name = team.teamName
team_names.append(team_name)
# create list of the weekly scores for the season
seasonScores = []
for team in teams_list:
weeklyScores = team.scores
seasonScores.append(weeklyScores)
# turn into dataframes
seasonScores_df = | pd.DataFrame(data=seasonScores) | pandas.DataFrame |
from collections import Counter
from sklearn.cross_validation import cross_val_score
import pandas as pd
import numpy as np
# pandas importando data frame d. f.
df = | pd.read_csv('situacao_cliente.csv') | pandas.read_csv |
import pandas as pd
import numpy as np
import datetime as dt
import math
#输入H 文件名
def cal_riskrt(H,source):
source=source.iloc[:,0:6]
source=source.drop(columns=["Unnamed: 0"])
source=source.set_index('date').dropna(subset=['long_rt','short_rt','long_short_rt'],how='all')
#新建一个数据框记录各种指标
df=pd.DataFrame(columns=['rt','volatility','mdd','sharpe','calmar'],index=['long','short','long_short','excess'])
#计算多头各项指标
rt=pd.DataFrame(source['long_rt'])
rt['prod'] = np.cumprod(rt['long_rt'] + 1)
holding_period = pd.to_datetime(rt.index.values[-1]) - pd.to_datetime(rt.index.values[0])
# #年化收益率
annual_ret = pow(rt['prod'][-1], 365 / holding_period.days) - 1
# #年化波动率
volatility = rt['long_rt'].std() * (math.sqrt(250 / H))
# #sharpe
sharpe = annual_ret / volatility
# #计算最大回撤
rt['max2here'] = rt['prod'].expanding(1).max()
rt['dd2here'] = (rt['prod'] / rt['max2here']) - 1
mdd = rt['dd2here'].min()
calmar = annual_ret / abs(mdd)
df.loc['long','rt']=annual_ret
df.loc['long','volatility']=volatility
df.loc['long','mdd']=mdd
df.loc['long','sharpe']=sharpe
df.loc['long','calmar']=calmar
#计算空头组合的指标(对照组)
rt = pd.DataFrame(source['short_rt'])
rt['short_rt']=rt['short_rt']
rt['prod'] = np.cumprod(rt['short_rt'] + 1)
holding_period = pd.to_datetime(rt.index.values[-1]) - pd.to_datetime(rt.index.values[0])
# #年化收益率
annual_ret = pow(rt['prod'][-1], 365 / holding_period.days) - 1
# #年化波动率
volatility = rt['short_rt'].std() * (math.sqrt(250 / H))
# #sharpe
sharpe = annual_ret / volatility
# #计算最大回撤
rt['max2here'] = rt['prod'].expanding(1).max()
rt['dd2here'] = (rt['prod'] / rt['max2here']) - 1
mdd = rt['dd2here'].min()
calmar = annual_ret / abs(mdd)
df.loc['short', 'rt'] = annual_ret
df.loc['short', 'volatility'] = volatility
df.loc['short', 'mdd'] = mdd
df.loc['short', 'sharpe'] = sharpe
df.loc['short', 'calmar'] = calmar
# 计算多空组合的指标
rt = pd.DataFrame(source['long_short_rt'])
rt['long_short_rt'] = rt['long_short_rt']
rt['prod'] = np.cumprod(rt['long_short_rt'] + 1)
holding_period = pd.to_datetime(rt.index.values[-1]) - pd.to_datetime(rt.index.values[0])
# #年化收益率
annual_ret = pow(rt['prod'][-1], 365 / holding_period.days) - 1
# #年化波动率
volatility = rt['long_short_rt'].std() * (math.sqrt(250 / H))
# #sharpe
sharpe = annual_ret / volatility
# #计算最大回撤
rt['max2here'] = rt['prod'].expanding(1).max()
rt['dd2here'] = (rt['prod'] / rt['max2here']) - 1
mdd = rt['dd2here'].min()
calmar = annual_ret / abs(mdd)
df.loc['long_short', 'rt'] = annual_ret
df.loc['long_short', 'volatility'] = volatility
df.loc['long_short', 'mdd'] = mdd
df.loc['long_short', 'sharpe'] = sharpe
df.loc['long_short', 'calmar'] = calmar
# 计算超额收益的指标
rt = pd.DataFrame(source['long_rt']-source['benchmark'])
rt.columns=['excess_rt']
rt['prod'] = np.cumprod(rt['excess_rt'] + 1)
holding_period = pd.to_datetime(rt.index.values[-1]) - pd.to_datetime(rt.index.values[0])
# #年化收益率
annual_ret = pow(rt['prod'][-1], 365 / holding_period.days) - 1
# #年化波动率
volatility = rt['excess_rt'].std() * (math.sqrt(250 / H))
# #sharpe
sharpe = annual_ret / volatility
# #计算最大回撤
rt['max2here'] = rt['prod'].expanding(1).max()
rt['dd2here'] = (rt['prod'] / rt['max2here']) - 1
mdd = rt['dd2here'].min()
calmar = annual_ret / abs(mdd)
df.loc['excess', 'rt'] = annual_ret
df.loc['excess', 'volatility'] = volatility
df.loc['excess', 'mdd'] = mdd
df.loc['excess', 'sharpe'] = sharpe
df.loc['excess', 'calmar'] = calmar
return df
rt_df=pd.read_csv("../draw/inv_level_H30.csv")
risk_rt=cal_riskrt(20,rt_df)
risk_rt.to_csv("inv_level.csv")
rt_df=pd.read_csv("../draw/warehouseR90H5.csv")
risk_rt=cal_riskrt(5,rt_df)
risk_rt.to_csv("warehouse.csv")
rt_df= | pd.read_csv("../draw/rollrt2H35.csv") | pandas.read_csv |
####################################################################################################
"""
dashboard.py
This script implements a dashboard-application for the efficient planning of the municipal
enforcement process, based on housing fraud signals, within the municipality of Amsterdam.
<NAME> & <NAME> 2019
Basic intro on working with Dash: https://dash.plot.ly/getting-started
Example dashboards using maps in Dash (from dash-gallery.plotly.host/Portal):
github.com/plotly/dash-sample-apps/blob/master/apps/dash-oil-and-gas/app.py
github.com/plotly/dash-oil-gas-ternary
This dashboard took some inspiration from this video:
https://www.youtube.com/watch?v=lu0PtsMor4E
Inspiration has also been taken from the corresponding codebase:
https://github.com/amyoshino/Dash_Tutorial_Series (careful: this repo seems to be full of errors!!)
"""
####################################################################################################
#############
## Imports ##
#############
import dash
import dash_core_components as dcc
import dash_html_components as html
import dash_table as dt
import dash_table.FormatTemplate as FormatTemplate
from dash.dependencies import Input, Output, State, ClientsideFunction
import pandas as pd
import urllib
import json
import sys
import os
import re
import q
from copy import deepcopy
import plotly.graph_objs as go
# Add the parent paths to sys.path, so our own modules on the root dir can also be imported.
SCRIPT_PATH = os.path.abspath(__file__)
SCRIPT_DIR = os.path.dirname(SCRIPT_PATH)
PARENT_PATH = os.path.join(SCRIPT_DIR, os.path.pardir)
sys.path.append(PARENT_PATH)
# Import own modules.
import config
import dashboard_helper
#################################
## Load server or mock-up data ##
#################################
# Try to create a list of 100 meldingen from the data.
try:
df = dashboard_helper.process_recent_signals()
print('Succesfully created prediction for recent signals.')
except:
df = pd.read_csv(os.path.join(SCRIPT_DIR, 'mockup_dataset.csv'), sep=';', skipinitialspace=True)
print('Cannot generate predictions from the data. Falling back to using the mockup_dataset.csv')
df_proactief = pd.read_csv(os.path.join(SCRIPT_DIR, 'mockup_dataset_proactief.csv'), sep=';', skipinitialspace=True)
df_unsupervised = pd.read_csv(os.path.join(SCRIPT_DIR, 'mockup_dataset_unsupervised.csv'), sep=';', skipinitialspace=True)
#########################
## Define site visuals ##
#########################
colors = {'paper': '#DDDDDD',
'background': '#F2F2F2',
'container_background': '#F9F9F9',
'text': '#1E4363',
'marker': '#1E4363',
'fraud': 'rgb(200, 50, 50)',
'no_fraud': 'rgb(150, 150, 150)',
'selected': 'rgb(75, 75, 75)',
}
###############################
## Set some global variables ##
###############################
# Get dictionary of columns for DataTable.
SELECTED_COLUMNS = ['fraude_kans', 'woonfraude', 'adres_id', 'sdl_naam', 'categorie', 'eigenaar']
TABLE_COLUMNS = [{'name': i, 'id': i} for i in SELECTED_COLUMNS]
# Define styling for the first column (fraude_kans), to reduce the decimals after comma.
TABLE_COLUMNS[0]['name'] = 'Fraude kans (%)'
TABLE_COLUMNS[0]['type'] = 'numeric'
TABLE_COLUMNS[0]['format'] = FormatTemplate.percentage(2)
##########################
## Define the dashboard ##
##########################
# external_stylesheets = ['https://codepen.io/chriddyp/pen/bWLwgP.css']
# app = dash.Dash(__name__, external_stylesheets=external_stylesheets)
app = dash.Dash(__name__)
server = app.server
app.title = 'Woonfraude Dashboard'
# Defines the meldingen tab.
meldingen_tab = html.Div(
[
# Div containing a selection of the data based on dropdown selection.
html.Div(id='intermediate_value', style={'display': 'none'}),
# Divs contain a lists of points which have been selected with on-clicks on the map.
html.Div(id='point_selection', style={'display': 'none'}),
html.Div(id='filtered_point_selection', style={'display': 'none'}),
# Row containing filters, info boxes, and map.
html.Div(
[
# Filters div.
html.Div(
[
# Create drop down filter for categories.
html.P('Selecteer categorieën:', className="control_label"),
dcc.Dropdown(
id='categorie_dropdown',
placeholder='Selecteer categorieën',
options=[{'label': x, 'value': x} for x in sorted(df.categorie.unique())],
multi=True,
value=df.categorie.unique(),
),
# Create drop down filter for city parts.
html.P('Selecteer stadsdelen:', className="control_label"),
dcc.Dropdown(
id='stadsdeel_dropdown',
placeholder='Selecteer stadsdelen',
options=[{'label': x, 'value': x} for x in sorted(df.sdl_naam.unique())],
multi=True,
value=sorted(df.sdl_naam.unique()),
),
# Show info of items selected on map (using click).
html.Div(
[
html.P('Geselecteerde adressen:', className="control_label"),
dt.DataTable(
id='filtered_point_selection_table',
columns = TABLE_COLUMNS[1:-1],
sort_action='native',
sort_by=[{'column_id': 'fraude_kans', 'direction': 'desc'}],
page_action='native',
page_current=0,
page_size=20,
style_data_conditional=[
{
'if': {
'column_id': 'woonfraude',
'filter_query': '{woonfraude} eq True'
},
'backgroundColor': colors['fraud'],
},
{
'if': {
'column_id': 'woonfraude',
'filter_query': '{woonfraude} eq False'
},
'backgroundColor': colors['no_fraud'],
},
{
'if': {
'column_id': 'fraude_kans',
'filter_query': '{fraude_kans} ge 0.5'
},
'backgroundColor': colors['fraud'],
},
{
'if': {
'column_id': 'fraude_kans',
'filter_query': '{fraude_kans} lt 0.5'
},
'backgroundColor': colors['no_fraud'],
},
]
),
],
),
# Link to download csv with all selected addresses.
html.A(
'Download lijst geselecteerde adressen (CSV)',
id='download_selected_addresses_list',
download="geselecteerde_adressen.csv",
href="",
target="_blank",
),
# Button test.
html.P(''),
html.Button('Test', id='button'),
html.P('', id='button_n_clicks')
],
id='leftCol',
className="pretty_container four columns",
),
# Widgets and map div.
html.Div(
[
# # Row with 4 statistics widgets
# html.Div(
# [
# # Aantal meldingen (info box).
# html.Div(
# [
# html.P("Aantal meldingen"),
# html.H6(
# id="aantal_meldingen",
# className="info_text"
# )
# ],
# className="pretty_container"
# ),
# # Percentage fraude verwacht (info box).
# html.Div(
# [
# html.P("% Fraude verwacht"),
# html.H6(
# id="percentage_fraude_verwacht",
# className="info_text"
# )
# ],
# className="pretty_container"
# ),
# # Aantal geselecteerde meldingen (info box).
# html.Div(
# [
# html.P("Aantal geselecteerde meldingen"),
# html.H6(
# id="aantal_geselecteerde_meldingen",
# className="info_text"
# )
# ],
# className="pretty_container",
# style={'backgroundColor': '#F7D7D7'}
# ),
# # Percentage fraude verwacht bij geselecteerde meldingen (info box).
# html.Div(
# [
# html.P("% Fraude verwacht bij geselecteerde meldingen"),
# html.H6(
# id="percentage_fraude_verwacht_geselecteerd",
# className="info_text"
# )
# ],
# className="pretty_container",
# style={'backgroundColor': '#F7D7D7'}
# ),
# ],
# id="infoContainer",
# className="row"
# ),
# Map with selectable points.
html.Div(
dcc.Graph(
id='map',
config={'displayModeBar': False}, # Turned off to disable selection with box/lasso etc.
),
className="pretty_container",
# style={'height': 500}
),
],
id="rightCol",
className="eight columns"
),
],
className="row",
),
# Data table div.
html.Div(
[
# Filtered entries data table.
html.Div(
[
html.P('Gefilterde meldingen'),
dt.DataTable(
id='filtered_table',
columns = TABLE_COLUMNS,
sort_action='native',
sort_by=[{'column_id': 'fraude_kans', 'direction': 'desc'}],
# filter_action='native', # Maybe turn off? A text field to filter feels clunky..
# row_selectable='multi',
# selected_rows=[],
page_action='native',
page_current=0,
page_size=20,
style_data_conditional=[
{
'if': {
'column_id': 'woonfraude',
'filter_query': '{woonfraude} eq True'
},
'backgroundColor': colors['fraud'],
},
{
'if': {
'column_id': 'woonfraude',
'filter_query': '{woonfraude} eq False'
},
'backgroundColor': colors['no_fraud'],
},
{
'if': {
'column_id': 'fraude_kans',
'filter_query': '{fraude_kans} ge 0.5'
},
'backgroundColor': colors['fraud'],
},
{
'if': {
'column_id': 'fraude_kans',
'filter_query': '{fraude_kans} lt 0.5'
},
'backgroundColor': colors['no_fraud'],
}
]
),
],
className="pretty_container eight columns",
),
# Filtered entries stadsdeel split (pie chart).
html.Div(
[
html.P("Gefilterde meldingen - Stadsdeel split"),
dcc.Graph(
id="stadsdeel_split",
config={'displayModeBar': False},
)
],
id="stadsdeel",
className="pretty_container two columns"
),
# Filtered entries categorie split (pie chart).
html.Div(
[
html.P("Gefilterde meldingen - Categorie split"),
dcc.Graph(
id="categorie_split",
config={'displayModeBar': False},
)
],
id="categorie",
className="pretty_container two columns"
),
],
className="row"
),
],
id="mainContainer",
style={
"display": "flex",
"flex-direction": "column"
}
)
# Defines the proactief tab.
proactief_tab = html.Div(
[
# For creating a map_proactief callback function with an empty input.
html.Div(id='none_proactief',children=[],style={'display': 'none'}),
# Div for containing a selection of the data based on filters.
html.Div(id='intermediate_value_proactief', style={'display': 'none'}),
# Row containing filters, info boxes, and map.
html.Div(
[
# Filters div.
html.Div(
[
# Create range slider for number of meldingen.
html.P('Minimaal aantal meldingen op adres:', className="control_label"),
dcc.RangeSlider(
id='aantal_meldingen_rangeslider_proactief',
min=min(df_proactief.aantal_meldingen),
max=max(df_proactief.aantal_meldingen),
marks={i: f"{i}" for i in range(min(df_proactief.aantal_meldingen), max(df_proactief.aantal_meldingen)+1)},
value=[min(df_proactief.aantal_meldingen), max(df_proactief.aantal_meldingen)]
),
# Padding (temporary hack)
html.P(' '),
# Create slider for number of adults.
html.P('Aantal volwassenen', className="control_label"),
dcc.RangeSlider(
id='aantal_volwassenen_rangeslider_proactief',
min=min(df_proactief.aantal_volwassenen),
max=max(df_proactief.aantal_volwassenen),
marks={i: f"{i}" for i in range(min(df_proactief.aantal_volwassenen), max(df_proactief.aantal_volwassenen)+1)},
value=[min(df_proactief.aantal_volwassenen), max(df_proactief.aantal_volwassenen)]
),
# Padding (temporary hack)
html.P(' '),
# Create m2 per person slider.
html.P('Aantal m2 per persoon:', className="control_label"),
dcc.RangeSlider(
id='aantal_m2_per_persoon_rangeslider_proactief',
min=min(df_proactief.m2_per_persoon),
max=max(df_proactief.m2_per_persoon),
marks={i: f"{i}" for i in range(min(df_proactief.m2_per_persoon), max(df_proactief.m2_per_persoon)+1, 3)},
value=[min(df_proactief.m2_per_persoon), max(df_proactief.m2_per_persoon)]
),
# Padding (temporary hack)
html.P(' '),
# Create drop down filter for city parts.
html.P('Selecteer stadsdelen:', className="control_label"),
dcc.Dropdown(
id='stadsdeel_dropdown_proactief',
placeholder='Selecteer stadsdelen',
options=[{'label': x, 'value': x} for x in sorted(df_proactief.sdl_naam.unique())],
multi=True,
value=sorted(df_proactief.sdl_naam.unique()),
),
# Create hotline dropdown.
html.P('Is hotline melding:', className="control_label"),
dcc.Dropdown(
id='hotline_dropdown_proactief',
placeholder='Selecteer waarden',
options=[{'label': 'Ja', 'value': 'True'}, {'label': 'Nee', 'value': 'False'}],
multi=True,
value=['True', 'False']
),
# Create gebruikersdoel dropdown.
html.P('Selecteer gebruikersdoel:', className="control_label"),
dcc.Dropdown(
id='gebruikersdoel_dropdown_proactief',
placeholder='Selecteer gebruikersdoel',
options=[{'label': x, 'value': x} for x in sorted(df_proactief.gebruikersdoel.unique())],
multi=True,
value=sorted(df_proactief.gebruikersdoel.unique()),
),
# Create profiel dropdown.
html.P('Selecteer profiel:', className="control_label"),
dcc.Dropdown(
id='profiel_dropdown_proactief',
placeholder='Selecteer profiel',
options=[{'label': x, 'value': x} for x in sorted(df_proactief.profiel.unique())],
multi=True,
value=sorted(df_proactief.profiel.unique()),
),
],
id='leftCol_proactief',
className="pretty_container four columns",
),
# Map div.
html.Div(
[
# Map with selectable points.
html.Div(
dcc.Graph(
id='map_proactief',
config={'displayModeBar': False}, # Turned off to disable selection with box/lasso etc.
),
className="pretty_container",
# style={'height': 500}
),
],
id="rightCol_proactief",
className="eight columns"
),
],
className="row",
),
# Data table div.
html.Div(
[
# Filtered entries data table.
html.Div(
[
html.P('Gefilterde meldingen'),
dt.DataTable(
id='filtered_table_proactief',
columns = TABLE_COLUMNS,
sort_action='native',
sort_by=[{'column_id': 'fraude_kans', 'direction': 'desc'}],
# filter_action='native', # Maybe turn off? A text field to filter feels clunky..
# row_selectable='multi',
# selected_rows=[],
page_action='native',
page_current=0,
page_size=20,
style_data_conditional=[
{
'if': {
'column_id': 'woonfraude',
'filter_query': '{woonfraude} eq True'
},
'backgroundColor': colors['fraud'],
},
{
'if': {
'column_id': 'woonfraude',
'filter_query': '{woonfraude} eq False'
},
'backgroundColor': colors['no_fraud'],
},
{
'if': {
'column_id': 'fraude_kans',
'filter_query': '{fraude_kans} ge 0.5'
},
'backgroundColor': colors['fraud'],
},
{
'if': {
'column_id': 'fraude_kans',
'filter_query': '{fraude_kans} lt 0.5'
},
'backgroundColor': colors['no_fraud'],
}
]
),
],
className="pretty_container ten columns",
),
# Filtered entries stadsdeel split (pie chart).
html.Div(
[
html.P("Gefilterde meldingen - Stadsdeel split"),
dcc.Graph(
id="stadsdeel_split_proactief",
config={'displayModeBar': False},
)
],
id="stadsdeel_proactief",
className="pretty_container two columns"
),
],
className="row"
),
# html.Div(
# dcc.Graph(
# id='map_proactief',
# config={'displayModeBar': False}, # Turned off to disable selection with box/lasso etc.
# ),
# className="pretty_container",
# ),
],
style={
"display": "flex",
"flex-direction": "column"
}
)
# Defines the unsupervised tab.
unsupervised_tab = html.Div(
[
# For creating a map_unsupervised callback function with an empty input.
html.Div(id='none_unsupervised',children=[],style={'display': 'none'}),
# Div for containing a selection of the data based on filters.
html.Div(id='intermediate_value_unsupervised', style={'display': 'none'}),
html.Div(
dcc.Graph(
id='map_unsupervised',
config={'displayModeBar': False}, # Turned off to disable selection with box/lasso etc.
),
className="pretty_container",
),
],
style={
"display": "flex",
"flex-direction": "column"
}
)
# Combines the two tabs into a single app.
app.layout = html.Div([
# Title
html.H1("Woonfraude Dashboard", style={'textAlign': 'center'}),
# Tabs for meldingen & proactieve handhaving.
dcc.Tabs(id='tabs', value='meldingen_tab', children=[
dcc.Tab(label='Meldingen', value='meldingen_tab', children=[meldingen_tab]),
dcc.Tab(label='Proactieve handhaving', value='proactief_tab', children=[proactief_tab]),
dcc.Tab(label='Unsupervised', value='unsupervised_tab', children=[unsupervised_tab]),
])
])
# Updates the intermediate data based on the dropdown selection.
@app.callback(
Output('intermediate_value', 'children'),
[Input('categorie_dropdown', 'value'),
Input('stadsdeel_dropdown', 'value')]
)
def create_data_selection(selected_categories, selected_stadsdelen):
# Create a copy of the original dataframe.
df_filtered = deepcopy(df)
# Filter the original dataframe by selected categories.
df_filtered = df_filtered[df_filtered.categorie.isin(selected_categories)]
# Filter the dataframe by selected stadsdelen.
df_filtered = df_filtered[df_filtered.sdl_naam.isin(selected_stadsdelen)]
return df_filtered.to_json(date_format='iso', orient='split')
'''
# Updates the aantal_meldingen info box.
@app.callback(
Output('aantal_meldingen', 'children'),
[Input('intermediate_value', 'children')]
)
def count_items(intermediate_value):
# Load the pre-filtered version of the dataframe.
df = pd.read_json(intermediate_value, orient='split')
return len(df)
# Updates the percentage_fraude_verwacht info box.
@app.callback(
Output('percentage_fraude_verwacht', 'children'),
[Input('intermediate_value', 'children')]
)
def compute_fraud_percentage(intermediate_value):
# Load the pre-filtered version of the dataframe.
df = pd.read_json(intermediate_value, orient='split')
# Compute what percentage of cases is expected to be fraudulent. If/else to prevent division by 0.
if len(df.woonfraude) > 0:
fraude_percentage = len(df.woonfraude[df.woonfraude == True]) / len(df.woonfraude) * 100
else:
fraude_percentage = 0
# Return truncated value (better for printing on dashboard)
return round(fraude_percentage, 1)
# Updates the aantal_geselecteerde_meldingen info box.
@app.callback(
Output('aantal_geselecteerde_meldingen', 'children'),
[Input('filtered_point_selection', 'children')]
)
def count_items_selected(filtered_point_selection):
# Just return the amount of filtered selected points.
return len(filtered_point_selection)
# Updates the percentage_fraude_verwacht_geselecteerd info box.
@app.callback(
Output('percentage_fraude_verwacht_geselecteerd', 'children'),
[Input('intermediate_value', 'children'),
Input('filtered_point_selection', 'children')]
)
def compute_fraud_percentage_selected(intermediate_value, filtered_point_selection):
# Load the pre-filtered version of the dataframe.
df = pd.read_json(intermediate_value, orient='split')
# Reduce the dataframe using the point selection.
df = df[df.adres_id.isin(filtered_point_selection)]
# Compute what percentage of cases is expected to be fraudulent. If/else to prevent division by 0.
if len(df.woonfraude) > 0:
fraude_percentage = len(df.woonfraude[df.woonfraude == True]) / len(df.woonfraude) * 100
else:
fraude_percentage = 0
# Return truncated value (better for printing on dashboard)
return round(fraude_percentage, 1)
'''
# Updates the map based on dropdown-selections.
@app.callback(
Output('map', 'figure'),
[Input('intermediate_value', 'children'),
Input('point_selection', 'children')],
[State('map', 'figure')]
)
def plot_map(intermediate_value, point_selection, map_state):
# Define which input triggers the callback (map.figure or intermediate_value.children).
trigger_event = dash.callback_context.triggered[0]['prop_id']
# Load the pre-filtered version of the dataframe.
df_map = pd.read_json(intermediate_value, orient='split')
# Select positive and negative samples for plotting.
pos = df_map[df_map.woonfraude==True]
neg = df_map[df_map.woonfraude==False]
# Create a df of the selected points, for highlighting.
selected_point_ids = [int(x) for x in point_selection]
sel = df_map.loc[df_map.adres_id.isin(selected_point_ids)]
# Create texts for when hovering the mouse over items.
def make_hover_string(row):
return f"Adres id: {row.adres_id}\
<br>Categorie: {row.categorie}\
<br>Aantal inwoners: {row.aantal_personen}\
<br>Aantal achternamen: {row.aantal_achternamen}\
<br>Eigenaar: {row.eigenaar}"
pos_text = pos.apply(make_hover_string, axis=1)
neg_text = neg.apply(make_hover_string, axis=1)
sel_text = sel.apply(make_hover_string, axis=1)
figure={
'data': [
# Plot border for selected samples (plot first, so its behind the pos/neg samples).
go.Scattermapbox(
name='Geselecteerd',
lat=sel['wzs_lat'],
lon=sel['wzs_lon'],
text=sel_text,
mode='markers',
marker=dict(
size=17,
color=colors['selected'],
),
),
# Plot positive samples.
go.Scattermapbox(
name='Woonfraude verwacht',
lat=pos['wzs_lat'],
lon=pos['wzs_lon'],
text=pos_text,
hoverinfo='text',
mode='markers',
marker=dict(
size=12,
color=colors['fraud'],
),
),
# Plot negative samples.
go.Scattermapbox(
name='<NAME>',
lat=neg['wzs_lat'],
lon=neg['wzs_lon'],
text=neg_text,
hoverinfo='text',
mode='markers',
marker=dict(
size=12,
color=colors['no_fraud'],
),
),
],
'layout': go.Layout(
uirevision='never',
autosize=True,
hovermode='closest',
# width=1000,
height=700,
margin=go.layout.Margin(l=0, r=0, b=0, t=0, pad=0),
showlegend=False, # Set to False, since legend selection breaks custom point selection.
legend=dict(orientation='h'),
plot_bgcolor=colors['background'],
paper_bgcolor=colors['paper'],
mapbox=dict(
accesstoken=config.mapbox_access_token,
style="light",
center=dict(
lat=52.36,
lon=4.89
),
zoom=11,
),
)
}
return figure
# Updates the table showing all data points after dropdown-selections.
@app.callback(
Output('filtered_table', 'data'),
[Input('intermediate_value', 'children')]
)
def generate_filtered_table(intermediate_value):
# Load the pre-filtered version of the dataframe.
df_table = pd.read_json(intermediate_value, orient='split')
# Transform True and False boolean values to strings.
df_table.woonfraude = df_table.woonfraude.replace({True: 'True', False: 'False'})
# Only use a selection of the columns.
df_table = df_table[SELECTED_COLUMNS]
# Create a table, with all positive woonfraude examples at the top.
columns = [{"name": i, "id": i} for i in df_table.columns]
data = df_table.to_dict('records')
return data
# Enable the selection of map points using click-events.
@app.callback(
Output('point_selection', 'children'),
[Input('map', 'clickData'),
Input('intermediate_value', 'children')],
[State('point_selection', 'children')]
)
def update_point_selection_on_click(clickData, intermediate_value, existing_point_selection):
"""
Update point selection with newly selected points, or according to dropdown filters.
The input "intermediate_value:children" is only used to activate a callback.
"""
# Define which input triggers the callback (map.clickData or intermediate_value.children).
trigger_event = dash.callback_context.triggered[0]['prop_id']
# Re-use previous point selection (if it already existed).
point_selection = []
if existing_point_selection != None:
point_selection = existing_point_selection
# Add a clicked point to the selection, or remove it when it already existed in the selection.
if trigger_event == 'map.clickData':
if clickData != None:
point_id = re.match("Adres id: (\d+)", clickData['points'][0]['text']).group(1)
if point_id in point_selection:
point_selection.remove(point_id)
else:
point_selection.append(point_id)
return point_selection
# Create a filtered version of the point_selection, based on the categorie and stadsdeel filters.
@app.callback(
Output('filtered_point_selection', 'children'),
[Input('point_selection', 'children'),
Input('intermediate_value', 'children')]
)
def show_selected(existing_point_selection, intermediate_value):
# Re-use previous point selection (if it already existed).
point_selection = []
if existing_point_selection != None:
point_selection = existing_point_selection
# Filter any previously selected points, if the dropdown selections rule them out.
df = pd.read_json(intermediate_value, orient='split') # Load the pre-filtered version of the dataframe.
point_ids_list = [str(x) for x in list(df.adres_id)]
for point_id in point_selection:
if point_id not in point_ids_list:
point_selection.remove(point_id)
return point_selection
# Updates the table showing a list of the selected & filtered points.
@app.callback(
Output('filtered_point_selection_table', 'data'),
[Input('intermediate_value', 'children'),
Input('filtered_point_selection', 'children')]
)
def generate_filtered_point_selection_table(intermediate_value, filtered_point_selection):
# First check if any points have been selected.
if filtered_point_selection == []:
return []
else:
# Turn list of point_ids into a list of numbers instead of strings
point_selection = [int(x) for x in filtered_point_selection]
# Load the pre-filtered version of the dataframe.
df = pd.read_json(intermediate_value, orient='split')
# Reduce the dataframe using the point selection.
df = df[df.adres_id.isin(point_selection)]
# Transform True and False boolean values to strings.
df.woonfraude = df.woonfraude.replace({True: 'True', False: 'False'})
# Only use a selection of the columns.
df = df[SELECTED_COLUMNS]
# Create a table, with all positive woonfraude examples at the top.
columns = [{"name": i, "id": i} for i in df.columns]
data = df.to_dict('records')
return data
# TODO: CHANGE WHEN THE DOWNLOAD LINK IS UPDATED WITH NEW DATA.
# NOW THIS CODE BELOW IS RAN EVERY TIME A POINT IS (DE)SELECTED,
# THIS IS TERRIBLY INEFFICIENT. ACCEPTABLE FOR THE MVP, BUT SHOULD BE CHANGED.
# Creates a download link for the filtered_point_selection_table data.
@app.callback(
Output('download_selected_addresses_list', 'href'),
[Input('filtered_point_selection_table', 'data')])
def update_download_link(filtered_point_selection_table):
"""Updates the csv download link with the data in the filtered point selection table."""
if filtered_point_selection_table == []:
point_selection = []
else:
# Turn list of point_ids into a list of numbers instead of strings
point_selection = filtered_point_selection_table
# Convert to df, then to csv string, then return for downloading.
df = pd.DataFrame(point_selection)
csv_string = df.to_csv(index=False, encoding='utf-8', sep=';')
csv_string = "data:text/csv;charset=utf-8," + urllib.parse.quote(csv_string)
return csv_string
# Test for our button output.
@app.callback(
Output('button_n_clicks', 'children'),
[Input('button', 'n_clicks')])
def show_number_of_button_clicks(button_n_clicks):
return str(button_n_clicks)
# Updates the stadsdeel split PIE chart.
@app.callback(
Output('stadsdeel_split', 'figure'),
[Input('intermediate_value', 'children')]
)
def make_stadsdeel_pie_chart(intermediate_value):
# Load the pre-filtered version of the dataframe.
df = | pd.read_json(intermediate_value, orient='split') | pandas.read_json |
#
# Copyright 2015 Quantopian, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
Tests for the zipline.assets package
"""
from contextlib import contextmanager
from datetime import timedelta
from functools import partial
import pickle
import sys
from types import GetSetDescriptorType
from unittest import TestCase
import uuid
import warnings
from nose_parameterized import parameterized
from numpy import full, int32, int64
import pandas as pd
from pandas.util.testing import assert_frame_equal
from six import PY2, viewkeys
import sqlalchemy as sa
from zipline.assets import (
Asset,
Equity,
Future,
AssetDBWriter,
AssetFinder,
)
from zipline.assets.synthetic import (
make_commodity_future_info,
make_rotating_equity_info,
make_simple_equity_info,
)
from six import itervalues, integer_types
from toolz import valmap
from zipline.assets.asset_writer import (
check_version_info,
write_version_info,
_futures_defaults,
SQLITE_MAX_VARIABLE_NUMBER,
)
from zipline.assets.asset_db_schema import ASSET_DB_VERSION
from zipline.assets.asset_db_migrations import (
downgrade
)
from zipline.errors import (
EquitiesNotFound,
FutureContractsNotFound,
MultipleSymbolsFound,
MultipleValuesFoundForField,
MultipleValuesFoundForSid,
NoValueForSid,
AssetDBVersionError,
SidsNotFound,
SymbolNotFound,
AssetDBImpossibleDowngrade,
ValueNotFoundForField,
)
from zipline.testing import (
all_subindices,
empty_assets_db,
parameter_space,
tmp_assets_db,
)
from zipline.testing.predicates import assert_equal
from zipline.testing.fixtures import (
WithAssetFinder,
ZiplineTestCase,
WithTradingCalendars,
)
from zipline.utils.range import range
@contextmanager
def build_lookup_generic_cases(asset_finder_type):
"""
Generate test cases for the type of asset finder specific by
asset_finder_type for test_lookup_generic.
"""
unique_start = pd.Timestamp('2013-01-01', tz='UTC')
unique_end = pd.Timestamp('2014-01-01', tz='UTC')
dupe_0_start = pd.Timestamp('2013-01-01', tz='UTC')
dupe_0_end = dupe_0_start + timedelta(days=1)
dupe_1_start = pd.Timestamp('2013-01-03', tz='UTC')
dupe_1_end = dupe_1_start + timedelta(days=1)
equities = pd.DataFrame.from_records(
[
{
'sid': 0,
'symbol': 'duplicated',
'start_date': dupe_0_start.value,
'end_date': dupe_0_end.value,
'exchange': 'TEST',
},
{
'sid': 1,
'symbol': 'duplicated',
'start_date': dupe_1_start.value,
'end_date': dupe_1_end.value,
'exchange': 'TEST',
},
{
'sid': 2,
'symbol': 'unique',
'start_date': unique_start.value,
'end_date': unique_end.value,
'exchange': 'TEST',
},
],
index='sid'
)
fof14_sid = 10000
futures = pd.DataFrame.from_records(
[
{
'sid': fof14_sid,
'symbol': 'FOF14',
'root_symbol': 'FO',
'start_date': unique_start.value,
'end_date': unique_end.value,
'exchange': 'FUT',
},
],
index='sid'
)
root_symbols = pd.DataFrame({
'root_symbol': ['FO'],
'root_symbol_id': [1],
'exchange': ['CME'],
})
with tmp_assets_db(
equities=equities, futures=futures, root_symbols=root_symbols) \
as assets_db:
finder = asset_finder_type(assets_db)
dupe_0, dupe_1, unique = assets = [
finder.retrieve_asset(i)
for i in range(3)
]
fof14 = finder.retrieve_asset(fof14_sid)
cf = finder.create_continuous_future(
root_symbol=fof14.root_symbol, offset=0, roll_style='volume',
)
dupe_0_start = dupe_0.start_date
dupe_1_start = dupe_1.start_date
yield (
##
# Scalars
# Asset object
(finder, assets[0], None, assets[0]),
(finder, assets[1], None, assets[1]),
(finder, assets[2], None, assets[2]),
# int
(finder, 0, None, assets[0]),
(finder, 1, None, assets[1]),
(finder, 2, None, assets[2]),
# Duplicated symbol with resolution date
(finder, 'DUPLICATED', dupe_0_start, dupe_0),
(finder, 'DUPLICATED', dupe_1_start, dupe_1),
# Unique symbol, with or without resolution date.
(finder, 'UNIQUE', unique_start, unique),
(finder, 'UNIQUE', None, unique),
# Futures
(finder, 'FOF14', None, fof14),
# Future symbols should be unique, but including as_of date
# make sure that code path is exercised.
(finder, 'FOF14', unique_start, fof14),
# Futures int
(finder, fof14_sid, None, fof14),
# Future symbols should be unique, but including as_of date
# make sure that code path is exercised.
(finder, fof14_sid, unique_start, fof14),
# ContinuousFuture
(finder, cf, None, cf),
##
# Iterables
# Iterables of Asset objects.
(finder, assets, None, assets),
(finder, iter(assets), None, assets),
# Iterables of ints
(finder, (0, 1), None, assets[:-1]),
(finder, iter((0, 1)), None, assets[:-1]),
# Iterables of symbols.
(finder, ('DUPLICATED', 'UNIQUE'), dupe_0_start, [dupe_0, unique]),
(finder, ('DUPLICATED', 'UNIQUE'), dupe_1_start, [dupe_1, unique]),
# Mixed types
(finder,
('DUPLICATED', 2, 'UNIQUE', 1, dupe_1),
dupe_0_start,
[dupe_0, assets[2], unique, assets[1], dupe_1]),
# Futures and Equities
(finder, ['FOF14', 0], None, [fof14, assets[0]]),
# ContinuousFuture and Equity
(finder, [cf, 0], None, [cf, assets[0]]),
)
class AssetTestCase(TestCase):
# Dynamically list the Asset properties we want to test.
asset_attrs = [name for name, value in vars(Asset).items()
if isinstance(value, GetSetDescriptorType)]
# Very wow
asset = Asset(
1337,
symbol="DOGE",
asset_name="DOGECOIN",
start_date=pd.Timestamp('2013-12-08 9:31AM', tz='UTC'),
end_date=pd.Timestamp('2014-06-25 11:21AM', tz='UTC'),
first_traded=pd.Timestamp('2013-12-08 9:31AM', tz='UTC'),
auto_close_date=pd.Timestamp('2014-06-26 11:21AM', tz='UTC'),
exchange='THE MOON',
)
asset3 = Asset(3, exchange="test")
asset4 = Asset(4, exchange="test")
asset5 = Asset(5, exchange="still testing")
def test_asset_object(self):
the_asset = Asset(5061, exchange="bar")
self.assertEquals({5061: 'foo'}[the_asset], 'foo')
self.assertEquals(the_asset, 5061)
self.assertEquals(5061, the_asset)
self.assertEquals(the_asset, the_asset)
self.assertEquals(int(the_asset), 5061)
self.assertEquals(str(the_asset), 'Asset(5061)')
def test_to_and_from_dict(self):
asset_from_dict = Asset.from_dict(self.asset.to_dict())
for attr in self.asset_attrs:
self.assertEqual(
getattr(self.asset, attr), getattr(asset_from_dict, attr),
)
def test_asset_is_pickleable(self):
asset_unpickled = pickle.loads(pickle.dumps(self.asset))
for attr in self.asset_attrs:
self.assertEqual(
getattr(self.asset, attr), getattr(asset_unpickled, attr),
)
def test_asset_comparisons(self):
s_23 = Asset(23, exchange="test")
s_24 = Asset(24, exchange="test")
self.assertEqual(s_23, s_23)
self.assertEqual(s_23, 23)
self.assertEqual(23, s_23)
self.assertEqual(int32(23), s_23)
self.assertEqual(int64(23), s_23)
self.assertEqual(s_23, int32(23))
self.assertEqual(s_23, int64(23))
# Check all int types (includes long on py2):
for int_type in integer_types:
self.assertEqual(int_type(23), s_23)
self.assertEqual(s_23, int_type(23))
self.assertNotEqual(s_23, s_24)
self.assertNotEqual(s_23, 24)
self.assertNotEqual(s_23, "23")
self.assertNotEqual(s_23, 23.5)
self.assertNotEqual(s_23, [])
self.assertNotEqual(s_23, None)
# Compare to a value that doesn't fit into a platform int:
self.assertNotEqual(s_23, sys.maxsize + 1)
self.assertLess(s_23, s_24)
self.assertLess(s_23, 24)
self.assertGreater(24, s_23)
self.assertGreater(s_24, s_23)
def test_lt(self):
self.assertTrue(self.asset3 < self.asset4)
self.assertFalse(self.asset4 < self.asset4)
self.assertFalse(self.asset5 < self.asset4)
def test_le(self):
self.assertTrue(self.asset3 <= self.asset4)
self.assertTrue(self.asset4 <= self.asset4)
self.assertFalse(self.asset5 <= self.asset4)
def test_eq(self):
self.assertFalse(self.asset3 == self.asset4)
self.assertTrue(self.asset4 == self.asset4)
self.assertFalse(self.asset5 == self.asset4)
def test_ge(self):
self.assertFalse(self.asset3 >= self.asset4)
self.assertTrue(self.asset4 >= self.asset4)
self.assertTrue(self.asset5 >= self.asset4)
def test_gt(self):
self.assertFalse(self.asset3 > self.asset4)
self.assertFalse(self.asset4 > self.asset4)
self.assertTrue(self.asset5 > self.asset4)
def test_type_mismatch(self):
if sys.version_info.major < 3:
self.assertIsNotNone(self.asset3 < 'a')
self.assertIsNotNone('a' < self.asset3)
else:
with self.assertRaises(TypeError):
self.asset3 < 'a'
with self.assertRaises(TypeError):
'a' < self.asset3
class TestFuture(WithAssetFinder, ZiplineTestCase):
@classmethod
def make_futures_info(cls):
return pd.DataFrame.from_dict(
{
2468: {
'symbol': 'OMH15',
'root_symbol': 'OM',
'notice_date': pd.Timestamp('2014-01-20', tz='UTC'),
'expiration_date': pd.Timestamp('2014-02-20', tz='UTC'),
'auto_close_date': pd.Timestamp('2014-01-18', tz='UTC'),
'tick_size': .01,
'multiplier': 500.0,
'exchange': "TEST",
},
0: {
'symbol': 'CLG06',
'root_symbol': 'CL',
'start_date': pd.Timestamp('2005-12-01', tz='UTC'),
'notice_date': pd.Timestamp('2005-12-20', tz='UTC'),
'expiration_date': pd.Timestamp('2006-01-20', tz='UTC'),
'multiplier': 1.0,
'exchange': 'TEST',
},
},
orient='index',
)
@classmethod
def init_class_fixtures(cls):
super(TestFuture, cls).init_class_fixtures()
cls.future = cls.asset_finder.lookup_future_symbol('OMH15')
cls.future2 = cls.asset_finder.lookup_future_symbol('CLG06')
def test_str(self):
strd = str(self.future)
self.assertEqual("Future(2468 [OMH15])", strd)
def test_repr(self):
reprd = repr(self.future)
self.assertIn("Future", reprd)
self.assertIn("2468", reprd)
self.assertIn("OMH15", reprd)
self.assertIn("root_symbol=%s'OM'" % ('u' if PY2 else ''), reprd)
self.assertIn(
"notice_date=Timestamp('2014-01-20 00:00:00+0000', tz='UTC')",
reprd,
)
self.assertIn(
"expiration_date=Timestamp('2014-02-20 00:00:00+0000'",
reprd,
)
self.assertIn(
"auto_close_date=Timestamp('2014-01-18 00:00:00+0000'",
reprd,
)
self.assertIn("tick_size=0.01", reprd)
self.assertIn("multiplier=500", reprd)
def test_reduce(self):
assert_equal(
pickle.loads(pickle.dumps(self.future)).to_dict(),
self.future.to_dict(),
)
def test_to_and_from_dict(self):
dictd = self.future.to_dict()
for field in _futures_defaults.keys():
self.assertTrue(field in dictd)
from_dict = Future.from_dict(dictd)
self.assertTrue(isinstance(from_dict, Future))
self.assertEqual(self.future, from_dict)
def test_root_symbol(self):
self.assertEqual('OM', self.future.root_symbol)
def test_lookup_future_symbol(self):
"""
Test the lookup_future_symbol method.
"""
om = TestFuture.asset_finder.lookup_future_symbol('OMH15')
self.assertEqual(om.sid, 2468)
self.assertEqual(om.symbol, 'OMH15')
self.assertEqual(om.root_symbol, 'OM')
self.assertEqual(om.notice_date, pd.Timestamp('2014-01-20', tz='UTC'))
self.assertEqual(om.expiration_date,
pd.Timestamp('2014-02-20', tz='UTC'))
self.assertEqual(om.auto_close_date,
pd.Timestamp('2014-01-18', tz='UTC'))
cl = TestFuture.asset_finder.lookup_future_symbol('CLG06')
self.assertEqual(cl.sid, 0)
self.assertEqual(cl.symbol, 'CLG06')
self.assertEqual(cl.root_symbol, 'CL')
self.assertEqual(cl.start_date, pd.Timestamp('2005-12-01', tz='UTC'))
self.assertEqual(cl.notice_date, pd.Timestamp('2005-12-20', tz='UTC'))
self.assertEqual(cl.expiration_date,
pd.Timestamp('2006-01-20', tz='UTC'))
with self.assertRaises(SymbolNotFound):
TestFuture.asset_finder.lookup_future_symbol('')
with self.assertRaises(SymbolNotFound):
TestFuture.asset_finder.lookup_future_symbol('#&?!')
with self.assertRaises(SymbolNotFound):
TestFuture.asset_finder.lookup_future_symbol('FOOBAR')
with self.assertRaises(SymbolNotFound):
TestFuture.asset_finder.lookup_future_symbol('XXX99')
class AssetFinderTestCase(WithTradingCalendars, ZiplineTestCase):
asset_finder_type = AssetFinder
def write_assets(self, **kwargs):
self._asset_writer.write(**kwargs)
def init_instance_fixtures(self):
super(AssetFinderTestCase, self).init_instance_fixtures()
conn = self.enter_instance_context(empty_assets_db())
self._asset_writer = AssetDBWriter(conn)
self.asset_finder = self.asset_finder_type(conn)
def test_blocked_lookup_symbol_query(self):
# we will try to query for more variables than sqlite supports
# to make sure we are properly chunking on the client side
as_of = pd.Timestamp('2013-01-01', tz='UTC')
# we need more sids than we can query from sqlite
nsids = SQLITE_MAX_VARIABLE_NUMBER + 10
sids = range(nsids)
frame = pd.DataFrame.from_records(
[
{
'sid': sid,
'symbol': 'TEST.%d' % sid,
'start_date': as_of.value,
'end_date': as_of.value,
'exchange': uuid.uuid4().hex
}
for sid in sids
]
)
self.write_assets(equities=frame)
assets = self.asset_finder.retrieve_equities(sids)
assert_equal(viewkeys(assets), set(sids))
def test_lookup_symbol_delimited(self):
as_of = pd.Timestamp('2013-01-01', tz='UTC')
frame = pd.DataFrame.from_records(
[
{
'sid': i,
'symbol': 'TEST.%d' % i,
'company_name': "company%d" % i,
'start_date': as_of.value,
'end_date': as_of.value,
'exchange': uuid.uuid4().hex
}
for i in range(3)
]
)
self.write_assets(equities=frame)
finder = self.asset_finder
asset_0, asset_1, asset_2 = (
finder.retrieve_asset(i) for i in range(3)
)
# we do it twice to catch caching bugs
for i in range(2):
with self.assertRaises(SymbolNotFound):
finder.lookup_symbol('TEST', as_of)
with self.assertRaises(SymbolNotFound):
finder.lookup_symbol('TEST1', as_of)
# '@' is not a supported delimiter
with self.assertRaises(SymbolNotFound):
finder.lookup_symbol('TEST@1', as_of)
# Adding an unnecessary fuzzy shouldn't matter.
for fuzzy_char in ['-', '/', '_', '.']:
self.assertEqual(
asset_1,
finder.lookup_symbol('TEST%s1' % fuzzy_char, as_of)
)
def test_lookup_symbol_fuzzy(self):
metadata = pd.DataFrame.from_records([
{'symbol': 'PRTY_HRD', 'exchange': "TEST"},
{'symbol': 'BRKA', 'exchange': "TEST"},
{'symbol': 'BRK_A', 'exchange': "TEST"},
])
self.write_assets(equities=metadata)
finder = self.asset_finder
dt = pd.Timestamp('2013-01-01', tz='UTC')
# Try combos of looking up PRTYHRD with and without a time or fuzzy
# Both non-fuzzys get no result
with self.assertRaises(SymbolNotFound):
finder.lookup_symbol('PRTYHRD', None)
with self.assertRaises(SymbolNotFound):
finder.lookup_symbol('PRTYHRD', dt)
# Both fuzzys work
self.assertEqual(0, finder.lookup_symbol('PRTYHRD', None, fuzzy=True))
self.assertEqual(0, finder.lookup_symbol('PRTYHRD', dt, fuzzy=True))
# Try combos of looking up PRTY_HRD, all returning sid 0
self.assertEqual(0, finder.lookup_symbol('PRTY_HRD', None))
self.assertEqual(0, finder.lookup_symbol('PRTY_HRD', dt))
self.assertEqual(0, finder.lookup_symbol('PRTY_HRD', None, fuzzy=True))
self.assertEqual(0, finder.lookup_symbol('PRTY_HRD', dt, fuzzy=True))
# Try combos of looking up BRKA, all returning sid 1
self.assertEqual(1, finder.lookup_symbol('BRKA', None))
self.assertEqual(1, finder.lookup_symbol('BRKA', dt))
self.assertEqual(1, finder.lookup_symbol('BRKA', None, fuzzy=True))
self.assertEqual(1, finder.lookup_symbol('BRKA', dt, fuzzy=True))
# Try combos of looking up BRK_A, all returning sid 2
self.assertEqual(2, finder.lookup_symbol('BRK_A', None))
self.assertEqual(2, finder.lookup_symbol('BRK_A', dt))
self.assertEqual(2, finder.lookup_symbol('BRK_A', None, fuzzy=True))
self.assertEqual(2, finder.lookup_symbol('BRK_A', dt, fuzzy=True))
def test_lookup_symbol_change_ticker(self):
T = partial(pd.Timestamp, tz='utc')
metadata = pd.DataFrame.from_records(
[
# sid 0
{
'symbol': 'A',
'asset_name': 'Asset A',
'start_date': T('2014-01-01'),
'end_date': T('2014-01-05'),
'exchange': "TEST",
},
{
'symbol': 'B',
'asset_name': 'Asset B',
'start_date': T('2014-01-06'),
'end_date': T('2014-01-10'),
'exchange': "TEST",
},
# sid 1
{
'symbol': 'C',
'asset_name': 'Asset C',
'start_date': T('2014-01-01'),
'end_date': T('2014-01-05'),
'exchange': "TEST",
},
{
'symbol': 'A', # claiming the unused symbol 'A'
'asset_name': 'Asset A',
'start_date': T('2014-01-06'),
'end_date': T('2014-01-10'),
'exchange': "TEST",
},
],
index=[0, 0, 1, 1],
)
self.write_assets(equities=metadata)
finder = self.asset_finder
# note: these assertions walk forward in time, starting at assertions
# about ownership before the start_date and ending with assertions
# after the end_date; new assertions should be inserted in the correct
# locations
# no one held 'A' before 01
with self.assertRaises(SymbolNotFound):
finder.lookup_symbol('A', T('2013-12-31'))
# no one held 'C' before 01
with self.assertRaises(SymbolNotFound):
finder.lookup_symbol('C', T('2013-12-31'))
for asof in pd.date_range('2014-01-01', '2014-01-05', tz='utc'):
# from 01 through 05 sid 0 held 'A'
A_result = finder.lookup_symbol('A', asof)
assert_equal(
A_result,
finder.retrieve_asset(0),
msg=str(asof),
)
# The symbol and asset_name should always be the last held values
assert_equal(A_result.symbol, 'B')
assert_equal(A_result.asset_name, 'Asset B')
# from 01 through 05 sid 1 held 'C'
C_result = finder.lookup_symbol('C', asof)
assert_equal(
C_result,
finder.retrieve_asset(1),
msg=str(asof),
)
# The symbol and asset_name should always be the last held values
assert_equal(C_result.symbol, 'A')
assert_equal(C_result.asset_name, 'Asset A')
# no one held 'B' before 06
with self.assertRaises(SymbolNotFound):
finder.lookup_symbol('B', T('2014-01-05'))
# no one held 'C' after 06, however, no one has claimed it yet
# so it still maps to sid 1
assert_equal(
finder.lookup_symbol('C', T('2014-01-07')),
finder.retrieve_asset(1),
)
for asof in pd.date_range('2014-01-06', '2014-01-11', tz='utc'):
# from 06 through 10 sid 0 held 'B'
# we test through the 11th because sid 1 is the last to hold 'B'
# so it should ffill
B_result = finder.lookup_symbol('B', asof)
assert_equal(
B_result,
finder.retrieve_asset(0),
msg=str(asof),
)
assert_equal(B_result.symbol, 'B')
assert_equal(B_result.asset_name, 'Asset B')
# from 06 through 10 sid 1 held 'A'
# we test through the 11th because sid 1 is the last to hold 'A'
# so it should ffill
A_result = finder.lookup_symbol('A', asof)
assert_equal(
A_result,
finder.retrieve_asset(1),
msg=str(asof),
)
assert_equal(A_result.symbol, 'A')
assert_equal(A_result.asset_name, 'Asset A')
def test_lookup_symbol(self):
# Incrementing by two so that start and end dates for each
# generated Asset don't overlap (each Asset's end_date is the
# day after its start date.)
dates = pd.date_range('2013-01-01', freq='2D', periods=5, tz='UTC')
df = pd.DataFrame.from_records(
[
{
'sid': i,
'symbol': 'existing',
'start_date': date.value,
'end_date': (date + timedelta(days=1)).value,
'exchange': 'NYSE',
}
for i, date in enumerate(dates)
]
)
self.write_assets(equities=df)
finder = self.asset_finder
for _ in range(2): # Run checks twice to test for caching bugs.
with self.assertRaises(SymbolNotFound):
finder.lookup_symbol('NON_EXISTING', dates[0])
with self.assertRaises(MultipleSymbolsFound):
finder.lookup_symbol('EXISTING', None)
for i, date in enumerate(dates):
# Verify that we correctly resolve multiple symbols using
# the supplied date
result = finder.lookup_symbol('EXISTING', date)
self.assertEqual(result.symbol, 'EXISTING')
self.assertEqual(result.sid, i)
def test_fail_to_write_overlapping_data(self):
df = pd.DataFrame.from_records(
[
{
'sid': 1,
'symbol': 'multiple',
'start_date': pd.Timestamp('2010-01-01'),
'end_date': pd.Timestamp('2012-01-01'),
'exchange': 'NYSE'
},
# Same as asset 1, but with a later end date.
{
'sid': 2,
'symbol': 'multiple',
'start_date': pd.Timestamp('2010-01-01'),
'end_date': pd.Timestamp('2013-01-01'),
'exchange': 'NYSE'
},
# Same as asset 1, but with a later start_date
{
'sid': 3,
'symbol': 'multiple',
'start_date': pd.Timestamp('2011-01-01'),
'end_date': pd.Timestamp('2012-01-01'),
'exchange': 'NYSE'
},
]
)
with self.assertRaises(ValueError) as e:
self.write_assets(equities=df)
self.assertEqual(
str(e.exception),
"Ambiguous ownership for 1 symbol, multiple assets held the"
" following symbols:\n"
"MULTIPLE:\n"
" intersections: (('2010-01-01 00:00:00', '2012-01-01 00:00:00'),"
" ('2011-01-01 00:00:00', '2012-01-01 00:00:00'))\n"
" start_date end_date\n"
" sid \n"
" 1 2010-01-01 2012-01-01\n"
" 2 2010-01-01 2013-01-01\n"
" 3 2011-01-01 2012-01-01"
)
def test_lookup_generic(self):
"""
Ensure that lookup_generic works with various permutations of inputs.
"""
with build_lookup_generic_cases(self.asset_finder_type) as cases:
for finder, symbols, reference_date, expected in cases:
results, missing = finder.lookup_generic(symbols,
reference_date)
self.assertEqual(results, expected)
self.assertEqual(missing, [])
def test_lookup_none_raises(self):
"""
If lookup_symbol is vectorized across multiple symbols, and one of them
is None, want to raise a TypeError.
"""
with self.assertRaises(TypeError):
self.asset_finder.lookup_symbol(None, pd.Timestamp('2013-01-01'))
def test_lookup_mult_are_one(self):
"""
Ensure that multiple symbols that return the same sid are collapsed to
a single returned asset.
"""
date = pd.Timestamp('2013-01-01', tz='UTC')
df = pd.DataFrame.from_records(
[
{
'sid': 1,
'symbol': symbol,
'start_date': date.value,
'end_date': (date + timedelta(days=30)).value,
'exchange': 'NYSE',
}
for symbol in ('FOOB', 'FOO_B')
]
)
self.write_assets(equities=df)
finder = self.asset_finder
# If we are able to resolve this with any result, means that we did not
# raise a MultipleSymbolError.
result = finder.lookup_symbol('FOO/B', date + timedelta(1), fuzzy=True)
self.assertEqual(result.sid, 1)
def test_endless_multiple_resolves(self):
"""
Situation:
1. Asset 1 w/ symbol FOOB changes to FOO_B, and then is delisted.
2. Asset 2 is listed with symbol FOO_B.
If someone asks for FOO_B with fuzzy matching after 2 has been listed,
they should be able to correctly get 2.
"""
date = pd.Timestamp('2013-01-01', tz='UTC')
df = pd.DataFrame.from_records(
[
{
'sid': 1,
'symbol': 'FOOB',
'start_date': date.value,
'end_date': date.max.value,
'exchange': 'NYSE',
},
{
'sid': 1,
'symbol': 'FOO_B',
'start_date': (date + timedelta(days=31)).value,
'end_date': (date + timedelta(days=60)).value,
'exchange': 'NYSE',
},
{
'sid': 2,
'symbol': 'FOO_B',
'start_date': (date + timedelta(days=61)).value,
'end_date': date.max.value,
'exchange': 'NYSE',
},
]
)
self.write_assets(equities=df)
finder = self.asset_finder
# If we are able to resolve this with any result, means that we did not
# raise a MultipleSymbolError.
result = finder.lookup_symbol(
'FOO/B',
date + timedelta(days=90),
fuzzy=True
)
self.assertEqual(result.sid, 2)
def test_lookup_generic_handle_missing(self):
data = pd.DataFrame.from_records(
[
{
'sid': 0,
'symbol': 'real',
'start_date': pd.Timestamp('2013-1-1', tz='UTC'),
'end_date': pd.Timestamp('2014-1-1', tz='UTC'),
'exchange': 'TEST',
},
{
'sid': 1,
'symbol': 'also_real',
'start_date': pd.Timestamp('2013-1-1', tz='UTC'),
'end_date': pd.Timestamp('2014-1-1', tz='UTC'),
'exchange': 'TEST',
},
# Sid whose end date is before our query date. We should
# still correctly find it.
{
'sid': 2,
'symbol': 'real_but_old',
'start_date': pd.Timestamp('2002-1-1', tz='UTC'),
'end_date': pd.Timestamp('2003-1-1', tz='UTC'),
'exchange': 'TEST',
},
# Sid whose start_date is **after** our query date. We should
# **not** find it.
{
'sid': 3,
'symbol': 'real_but_in_the_future',
'start_date': pd.Timestamp('2014-1-1', tz='UTC'),
'end_date': pd.Timestamp('2020-1-1', tz='UTC'),
'exchange': 'THE FUTURE',
},
]
)
self.write_assets(equities=data)
finder = self.asset_finder
results, missing = finder.lookup_generic(
['REAL', 1, 'FAKE', 'REAL_BUT_OLD', 'REAL_BUT_IN_THE_FUTURE'],
pd.Timestamp('2013-02-01', tz='UTC'),
)
self.assertEqual(len(results), 3)
self.assertEqual(results[0].symbol, 'REAL')
self.assertEqual(results[0].sid, 0)
self.assertEqual(results[1].symbol, 'ALSO_REAL')
self.assertEqual(results[1].sid, 1)
self.assertEqual(results[2].symbol, 'REAL_BUT_OLD')
self.assertEqual(results[2].sid, 2)
self.assertEqual(len(missing), 2)
self.assertEqual(missing[0], 'FAKE')
self.assertEqual(missing[1], 'REAL_BUT_IN_THE_FUTURE')
def test_security_dates_warning(self):
# Build an asset with an end_date
eq_end = pd.Timestamp('2012-01-01', tz='UTC')
equity_asset = Equity(1, symbol="TESTEQ", end_date=eq_end,
exchange="TEST")
# Catch all warnings
with warnings.catch_warnings(record=True) as w:
# Cause all warnings to always be triggered
warnings.simplefilter("always")
equity_asset.security_start_date
equity_asset.security_end_date
equity_asset.security_name
# Verify the warning
self.assertEqual(3, len(w))
for warning in w:
self.assertTrue(issubclass(warning.category,
DeprecationWarning))
def test_map_identifier_index_to_sids(self):
# Build an empty finder and some Assets
dt = pd.Timestamp('2014-01-01', tz='UTC')
finder = self.asset_finder
asset1 = Equity(1, symbol="AAPL", exchange="TEST")
asset2 = Equity(2, symbol="GOOG", exchange="TEST")
asset200 = Future(200, symbol="CLK15", exchange="TEST")
asset201 = Future(201, symbol="CLM15", exchange="TEST")
# Check for correct mapping and types
pre_map = [asset1, asset2, asset200, asset201]
post_map = finder.map_identifier_index_to_sids(pre_map, dt)
self.assertListEqual([1, 2, 200, 201], post_map)
for sid in post_map:
self.assertIsInstance(sid, int)
# Change order and check mapping again
pre_map = [asset201, asset2, asset200, asset1]
post_map = finder.map_identifier_index_to_sids(pre_map, dt)
self.assertListEqual([201, 2, 200, 1], post_map)
def test_compute_lifetimes(self):
num_assets = 4
trading_day = self.trading_calendar.day
first_start = pd.Timestamp('2015-04-01', tz='UTC')
frame = make_rotating_equity_info(
num_assets=num_assets,
first_start=first_start,
frequency=trading_day,
periods_between_starts=3,
asset_lifetime=5
)
self.write_assets(equities=frame)
finder = self.asset_finder
all_dates = pd.date_range(
start=first_start,
end=frame.end_date.max(),
freq=trading_day,
)
for dates in all_subindices(all_dates):
expected_with_start_raw = full(
shape=(len(dates), num_assets),
fill_value=False,
dtype=bool,
)
expected_no_start_raw = full(
shape=(len(dates), num_assets),
fill_value=False,
dtype=bool,
)
for i, date in enumerate(dates):
it = frame[['start_date', 'end_date']].itertuples()
for j, start, end in it:
# This way of doing the checks is redundant, but very
# clear.
if start <= date <= end:
expected_with_start_raw[i, j] = True
if start < date:
expected_no_start_raw[i, j] = True
expected_with_start = pd.DataFrame(
data=expected_with_start_raw,
index=dates,
columns=frame.index.values,
)
result = finder.lifetimes(dates, include_start_date=True)
assert_frame_equal(result, expected_with_start)
expected_no_start = pd.DataFrame(
data=expected_no_start_raw,
index=dates,
columns=frame.index.values,
)
result = finder.lifetimes(dates, include_start_date=False)
assert_frame_equal(result, expected_no_start)
def test_sids(self):
# Ensure that the sids property of the AssetFinder is functioning
self.write_assets(equities=make_simple_equity_info(
[0, 1, 2],
pd.Timestamp('2014-01-01'),
pd.Timestamp('2014-01-02'),
))
self.assertEqual({0, 1, 2}, set(self.asset_finder.sids))
def test_lookup_by_supplementary_field(self):
equities = pd.DataFrame.from_records(
[
{
'sid': 0,
'symbol': 'A',
'start_date': pd.Timestamp('2013-1-1', tz='UTC'),
'end_date': pd.Timestamp('2014-1-1', tz='UTC'),
'exchange': 'TEST',
},
{
'sid': 1,
'symbol': 'B',
'start_date': pd.Timestamp('2013-1-1', tz='UTC'),
'end_date': pd.Timestamp('2014-1-1', tz='UTC'),
'exchange': 'TEST',
},
{
'sid': 2,
'symbol': 'C',
'start_date': pd.Timestamp('2013-7-1', tz='UTC'),
'end_date': pd.Timestamp('2014-1-1', tz='UTC'),
'exchange': 'TEST',
},
]
)
equity_supplementary_mappings = pd.DataFrame.from_records(
[
{
'sid': 0,
'field': 'ALT_ID',
'value': '100000000',
'start_date': pd.Timestamp('2013-1-1', tz='UTC'),
'end_date': pd.Timestamp('2013-6-28', tz='UTC'),
},
{
'sid': 1,
'field': 'ALT_ID',
'value': '100000001',
'start_date': pd.Timestamp('2013-1-1', tz='UTC'),
'end_date': pd.Timestamp('2014-1-1', tz='UTC'),
},
{
'sid': 0,
'field': 'ALT_ID',
'value': '100000002',
'start_date': pd.Timestamp('2013-7-1', tz='UTC'),
'end_date': pd.Timestamp('2014-1-1', tz='UTC'),
},
{
'sid': 2,
'field': 'ALT_ID',
'value': '100000000',
'start_date': pd.Timestamp('2013-7-1', tz='UTC'),
'end_date': pd.Timestamp('2014-1-1', tz='UTC'),
},
]
)
self.write_assets(
equities=equities,
equity_supplementary_mappings=equity_supplementary_mappings,
)
af = self.asset_finder
# Before sid 0 has changed ALT_ID.
dt = pd.Timestamp('2013-6-28', tz='UTC')
asset_0 = af.lookup_by_supplementary_field('ALT_ID', '100000000', dt)
self.assertEqual(asset_0.sid, 0)
asset_1 = af.lookup_by_supplementary_field('ALT_ID', '100000001', dt)
self.assertEqual(asset_1.sid, 1)
# We don't know about this ALT_ID yet.
with self.assertRaisesRegexp(
ValueNotFoundForField,
"Value '{}' was not found for field '{}'.".format(
'100000002',
'ALT_ID',
)
):
af.lookup_by_supplementary_field('ALT_ID', '100000002', dt)
# After all assets have ended.
dt = pd.Timestamp('2014-01-02', tz='UTC')
asset_2 = af.lookup_by_supplementary_field('ALT_ID', '100000000', dt)
self.assertEqual(asset_2.sid, 2)
asset_1 = af.lookup_by_supplementary_field('ALT_ID', '100000001', dt)
self.assertEqual(asset_1.sid, 1)
asset_0 = af.lookup_by_supplementary_field('ALT_ID', '100000002', dt)
self.assertEqual(asset_0.sid, 0)
# At this point both sids 0 and 2 have held this value, so an
# as_of_date is required.
expected_in_repr = (
"Multiple occurrences of the value '{}' found for field '{}'."
).format('100000000', 'ALT_ID')
with self.assertRaisesRegexp(
MultipleValuesFoundForField,
expected_in_repr,
):
af.lookup_by_supplementary_field('ALT_ID', '100000000', None)
def test_get_supplementary_field(self):
equities = pd.DataFrame.from_records(
[
{
'sid': 0,
'symbol': 'A',
'start_date': pd.Timestamp('2013-1-1', tz='UTC'),
'end_date': pd.Timestamp('2014-1-1', tz='UTC'),
'exchange': 'TEST',
},
{
'sid': 1,
'symbol': 'B',
'start_date': pd.Timestamp('2013-1-1', tz='UTC'),
'end_date': pd.Timestamp('2014-1-1', tz='UTC'),
'exchange': 'TEST',
},
{
'sid': 2,
'symbol': 'C',
'start_date': pd.Timestamp('2013-7-1', tz='UTC'),
'end_date': pd.Timestamp('2014-1-1', tz='UTC'),
'exchange': 'TEST',
},
]
)
equity_supplementary_mappings = pd.DataFrame.from_records(
[
{
'sid': 0,
'field': 'ALT_ID',
'value': '100000000',
'start_date': pd.Timestamp('2013-1-1', tz='UTC'),
'end_date': pd.Timestamp('2013-6-28', tz='UTC'),
},
{
'sid': 1,
'field': 'ALT_ID',
'value': '100000001',
'start_date': pd.Timestamp('2013-1-1', tz='UTC'),
'end_date': pd.Timestamp('2014-1-1', tz='UTC'),
},
{
'sid': 0,
'field': 'ALT_ID',
'value': '100000002',
'start_date': | pd.Timestamp('2013-7-1', tz='UTC') | pandas.Timestamp |
from numpy.core.fromnumeric import var
import pytest
import pandas as pd
import numpy as np
from dowhy import CausalModel
class TestIDIdentifier(object):
def test_1(self):
treatment = "T"
outcome = "Y"
causal_graph = "digraph{T->Y;}"
columns = list(treatment) + list(outcome)
df = pd.DataFrame(columns=columns)
# Calculate causal effect twice: once for unit (t=1, c=0), once for specific increase (t=100, c=50)
causal_model = CausalModel(df, treatment, outcome, graph=causal_graph)
identified_estimand = causal_model.identify_effect(method_name="id-algorithm")
# Only P(Y|T) should be present for test to succeed.
identified_str = identified_estimand.__str__()
gt_str = "Predictor: P(Y|T)"
assert identified_str == gt_str
def test_2(self):
'''
Test undirected edge between treatment and outcome.
'''
treatment = "T"
outcome = "Y"
causal_graph = "digraph{T->Y; Y->T;}"
columns = list(treatment) + list(outcome)
df = pd.DataFrame(columns=columns)
# Calculate causal effect twice: once for unit (t=1, c=0), once for specific increase (t=100, c=50)
causal_model = CausalModel(df, treatment, outcome, graph=causal_graph)
# Since undirected graph, identify effect must throw an error.
with pytest.raises(Exception):
identified_estimand = causal_model.identify_effect(method_name="id-algorithm")
def test_3(self):
treatment = "T"
outcome = "Y"
variables = ["X1"]
causal_graph = "digraph{T->X1;X1->Y;}"
columns = list(treatment) + list(outcome) + list(variables)
df = | pd.DataFrame(columns=columns) | pandas.DataFrame |
# -*- coding: utf-8 -*-
"""
This enables to parameterize a desired scenario to mock a multi-partner ML project.
"""
import datetime
import re
import uuid
from pathlib import Path
import matplotlib.pyplot as plt
import numpy as np
import pandas as pd
from loguru import logger
from sklearn.preprocessing import LabelEncoder
from . import contributivity, constants
from . import dataset as dataset_module
from .corruption import Corruption, NoCorruption, IMPLEMENTED_CORRUPTION, Duplication
from .mpl_utils import AGGREGATORS
from .multi_partner_learning import MULTI_PARTNER_LEARNING_APPROACHES
from .partner import Partner
from .splitter import Splitter, IMPLEMENTED_SPLITTERS
class Scenario:
def __init__(
self,
partners_count,
amounts_per_partner,
dataset=None,
dataset_name=constants.MNIST,
dataset_proportion=1,
samples_split_option='random',
corruption_parameters=None,
init_model_from="random_initialization",
multi_partner_learning_approach="fedavg",
aggregation_weighting="data-volume",
gradient_updates_per_pass_count=constants.DEFAULT_GRADIENT_UPDATES_PER_PASS_COUNT,
minibatch_count=constants.DEFAULT_BATCH_COUNT,
epoch_count=constants.DEFAULT_EPOCH_COUNT,
is_early_stopping=True,
contributivity_methods=None,
is_quick_demo=False,
save_path=None,
scenario_id=1,
val_set='global',
test_set='global',
**kwargs,
):
"""
:param partners_count: int, number of partners. Example: partners_count = 3
:param amounts_per_partner: [float]. Fractions of the
original dataset each partner receives to mock a collaborative ML scenario where each partner provides data
for the ML training.
:param dataset: dataset.Dataset object. Use it if you want to use your own dataset, otherwise use dataset_name.
:param dataset_name: str. 'mnist', 'cifar10', 'esc50' and 'titanic' are currently supported (default: mnist)
:param dataset_proportion: float (default: 1)
:param samples_split_option: Splitter object, or its string identifier (for instance 'random', or 'stratified')
Define the strategy to use to split the data samples between the partners.
Default, RandomSplitter.
:param corruption_parameters: list of Corruption object, or its string identifier, one ofr each partner.
Enable to artificially corrupt partner's data.
For instance: [Permutation(proportion=0.2), 'random', 'not-corrupted']
:param init_model_from: None (default) or path
:param multi_partner_learning_approach: 'fedavg' (default), 'seq-pure', 'seq-with-final-agg' or 'seqavg'
Define the multi-partner learning approach
:param aggregation_weighting: 'data_volume' (default), 'uniform' or 'local_score'
:param gradient_updates_per_pass_count: int
:param minibatch_count: int
:param epoch_count: int
:param is_early_stopping: boolean. Stop the training if scores on val_set reach a plateau
:param contributivity_methods: A declarative list `[]` of the contributivity measurement methods to be executed.
:param is_quick_demo: boolean. Useful for debugging
:param save_path: path where to save the scenario outputs. By default, they are not saved!
:param scenario_id: str
:param **kwargs:
"""
# ---------------------------------------------------------------------
# Initialization of the dataset defined in the config of the experiment
# ---------------------------------------------------------------------
# Raise Exception if unknown parameters in the config of the scenario
params_known = [
"dataset",
"dataset_name",
"dataset_proportion",
"val_set",
"test_set"
] # Dataset related
params_known += [
"contributivity_methods",
"multi_partner_learning_approach",
"aggregation_weighting",
] # federated learning related
params_known += [
"partners_count",
"amounts_per_partner",
"corruption_parameters",
"samples_split_option",
"samples_split_configuration"
] # Partners related
params_known += [
"gradient_updates_per_pass_count",
"epoch_count",
"minibatch_count",
"is_early_stopping",
] # Computation related
params_known += ["init_model_from"] # Model related
params_known += ["is_quick_demo"]
params_known += ["save_path",
"scenario_name",
"repeat_count"]
unrecognised_parameters = [x for x in kwargs.keys() if (x not in params_known and not x.startswith('mpl_'))]
if len(unrecognised_parameters) > 0:
for x in unrecognised_parameters:
logger.debug(f"Unrecognised parameter: {x}")
raise Exception(
f"Unrecognised parameters {unrecognised_parameters}, check your configuration"
)
# Get and verify which dataset is configured
if isinstance(dataset, dataset_module.Dataset):
self.dataset = dataset
else:
# Reference the module corresponding to the dataset selected and initialize the Dataset object
if dataset_name == constants.MNIST: # default
self.dataset = dataset_module.Mnist()
elif dataset_name == constants.CIFAR10:
self.dataset = dataset_module.Cifar10()
elif dataset_name == constants.TITANIC:
self.dataset = dataset_module.Titanic()
elif dataset_name == constants.ESC50:
self.dataset = dataset_module.Esc50()
elif dataset_name == constants.IMDB:
self.dataset = dataset_module.Imdb()
else:
raise Exception(
f"Dataset named '{dataset_name}' is not supported (yet). You can construct your own "
f"dataset object, or even add it by contributing to the project !"
)
logger.debug(f"Dataset selected: {self.dataset.name}")
# Proportion of the dataset the computation will used
self.dataset_proportion = dataset_proportion
assert (
self.dataset_proportion > 0
), "Error in the config file, dataset_proportion should be > 0"
assert (
self.dataset_proportion <= 1
), "Error in the config file, dataset_proportion should be <= 1"
if self.dataset_proportion < 1:
self.dataset.shorten_dataset_proportion(self.dataset_proportion)
else:
logger.debug("The full dataset will be used (dataset_proportion is configured to 1)")
logger.debug(
f"Computation use the full dataset for scenario #{scenario_id}"
)
# --------------------------------------
# Definition of collaborative scenarios
# --------------------------------------
# Partners mock different partners in a collaborative data science project
self.partners_list = [] # List of all partners defined in the scenario
self.partners_count = partners_count # Number of partners in the scenario
# For configuring the respective sizes of the partners' datasets
# (% of samples of the dataset for each partner, ...
# ... has to sum to 1, and number of items has to equal partners_count)
self.amounts_per_partner = amounts_per_partner
if np.sum(self.amounts_per_partner) != 1:
raise ValueError("The sum of the amount per partners you provided isn't equal to 1")
if len(self.amounts_per_partner) != self.partners_count:
raise AttributeError(f"The amounts_per_partner list should have a size ({len(self.amounts_per_partner)}) "
f"equals to partners_count ({self.partners_count})")
# To configure how validation set and test set will be organized.
if test_set in ['local', 'global']:
self.test_set = test_set
else:
raise ValueError(f'Test set can be \'local\' or \'global\' not {test_set}')
if val_set in ['local', 'global']:
self.val_set = val_set
else:
raise ValueError(f'Validation set can be \'local\' or \'global\' not {val_set}')
# To configure if data samples are split between partners randomly or in a stratified way...
# ... so that they cover distinct areas of the samples space
if isinstance(samples_split_option, Splitter):
if self.val_set != samples_split_option.val_set:
logger.warning('The validation set organisation (local/global) is differently configured between the '
'provided Splitter and Scenario')
if self.test_set != samples_split_option.test_set:
logger.warning('The test set organisation (local/global) is differently configured between the '
'provided Splitter and Scenario')
self.splitter = samples_split_option
else:
splitter_param = {'amounts_per_partner': self.amounts_per_partner,
'val_set': self.val_set,
'test_set': self.test_set,
}
if "samples_split_configuration" in kwargs.keys():
splitter_param.update({'configuration': kwargs["samples_split_configuration"]})
self.splitter = IMPLEMENTED_SPLITTERS[samples_split_option](**splitter_param)
# To configure if the data of the partners are corrupted or not (useful for testing contributivity measures)
if corruption_parameters:
self.corruption_parameters = list(
map(lambda x: x if isinstance(x, Corruption) else IMPLEMENTED_CORRUPTION[x](),
corruption_parameters))
else:
self.corruption_parameters = [NoCorruption() for _ in range(self.partners_count)] # default
# ---------------------------------------------------
# Configuration of the distributed learning approach
# ---------------------------------------------------
self.mpl = None
# Multi-partner learning approach
self.multi_partner_learning_approach = multi_partner_learning_approach
try:
self._multi_partner_learning_approach = MULTI_PARTNER_LEARNING_APPROACHES[
multi_partner_learning_approach]
except KeyError:
text_error = f"Multi-partner learning approach '{multi_partner_learning_approach}' is not a valid "
text_error += "approach. List of supported approach : "
for key in MULTI_PARTNER_LEARNING_APPROACHES.keys():
text_error += f"{key}, "
raise KeyError(text_error)
# Define how federated learning aggregation steps are weighted...
# ... Toggle between 'uniform' (default) and 'data_volume'
self.aggregation_weighting = aggregation_weighting
try:
self._aggregation_weighting = AGGREGATORS[aggregation_weighting]
except KeyError:
raise ValueError(f"aggregation approach '{aggregation_weighting}' is not a valid approach. ")
# Number of epochs, mini-batches and fit_batches in ML training
self.epoch_count = epoch_count
assert (
self.epoch_count > 0
), "Error: in the provided config file, epoch_count should be > 0"
self.minibatch_count = minibatch_count
assert (
self.minibatch_count > 0
), "Error: in the provided config file, minibatch_count should be > 0"
self.gradient_updates_per_pass_count = gradient_updates_per_pass_count
assert self.gradient_updates_per_pass_count > 0, (
"Error: in the provided config file, "
"gradient_updates_per_pass_count should be > 0 "
)
# Early stopping stops ML training when performance increase is not significant anymore
# It is used to optimize the number of epochs and the execution time
self.is_early_stopping = is_early_stopping
# Model used to initialise model
self.init_model_from = init_model_from
if init_model_from == "random_initialization":
self.use_saved_weights = False
else:
self.use_saved_weights = True
# -----------------------------------------------------------------
# Configuration of contributivity measurement contributivity_methods to be tested
# -----------------------------------------------------------------
# List of contributivity measures selected and computed in the scenario
self.contributivity_list = []
# Contributivity methods
self.contributivity_methods = []
if contributivity_methods is not None:
for method in contributivity_methods:
if method in constants.CONTRIBUTIVITY_METHODS:
self.contributivity_methods.append(method)
else:
raise Exception(f"Contributivity method '{method}' is not in contributivity_methods list.")
# -------------
# Miscellaneous
# -------------
# Misc.
self.scenario_id = scenario_id
self.repeat_count = kwargs.get('repeat_count', 1)
# The quick demo parameters overwrites previously defined parameters to make the scenario faster to compute
self.is_quick_demo = is_quick_demo
if self.is_quick_demo and self.dataset_proportion < 1:
raise Exception("Don't start a quick_demo without the full dataset")
if self.is_quick_demo:
# Use less data and/or less epochs to speed up the computations
if len(self.dataset.x_train) > constants.TRAIN_SET_MAX_SIZE_QUICK_DEMO:
index_train = np.random.choice(
self.dataset.x_train.shape[0],
constants.TRAIN_SET_MAX_SIZE_QUICK_DEMO,
replace=False,
)
index_val = np.random.choice(
self.dataset.x_val.shape[0],
constants.VAL_SET_MAX_SIZE_QUICK_DEMO,
replace=False,
)
index_test = np.random.choice(
self.dataset.x_test.shape[0],
constants.TEST_SET_MAX_SIZE_QUICK_DEMO,
replace=False,
)
self.dataset.x_train = self.dataset.x_train[index_train]
self.dataset.y_train = self.dataset.y_train[index_train]
self.dataset.x_val = self.dataset.x_val[index_val]
self.dataset.y_val = self.dataset.y_val[index_val]
self.dataset.x_test = self.dataset.x_test[index_test]
self.dataset.y_test = self.dataset.y_test[index_test]
self.epoch_count = 3
self.minibatch_count = 2
# -----------------
# Output parameters
# -----------------
now = datetime.datetime.now()
now_str = now.strftime("%Y-%m-%d_%Hh%M")
self.scenario_name = kwargs.get('scenario_name',
f"scenario_{self.scenario_id}_repeat_{self.repeat_count}_{now_str}_"
f"{uuid.uuid4().hex[:3]}") # to distinguish identical names
if re.search(r'\s', self.scenario_name):
raise ValueError(
f'The scenario name "{self.scenario_name}"cannot be written with space character, please use '
f'underscore or dash.')
self.short_scenario_name = f"{self.partners_count}_{self.amounts_per_partner}"
if save_path is not None:
self.save_folder = Path(save_path) / self.scenario_name
else:
self.save_folder = None
# -------------------------------------------------------------------
# Select in the kwargs the parameters to be transferred to sub object
# -------------------------------------------------------------------
self.mpl_kwargs = {}
for key, value in kwargs.items():
if key.startswith('mpl_'):
self.mpl_kwargs[key.replace('mpl_', '')] = value
# -----------------------
# Provision the scenario
# -----------------------
self.instantiate_scenario_partners()
self.split_data()
self.compute_batch_sizes()
self.apply_data_alteration_configuration()
# ------------------------------------------------
# Print the description of the scenario configured
# ------------------------------------------------
self.log_scenario_description()
@property
def nb_samples_used(self):
if len(self.partners_list) == 0:
return len(self.dataset.x_train)
else:
return sum([p.final_nb_samples for p in self.partners_list])
@property
def final_relative_nb_samples(self):
return [p.final_nb_samples / self.nb_samples_used for p in self.partners_list]
def copy(self, **kwargs):
params = self.__dict__.copy()
for key in ['partners_list',
'mpl',
'_multi_partner_learning_approach',
'_aggregation_weighting',
'use_saved_weights',
'contributivity_list',
'scenario_name',
'short_scenario_name',
'save_folder',
'splitter']:
del params[key]
if 'is_quick_demo' in kwargs and kwargs['is_quick_demo'] != self.is_quick_demo:
raise ValueError("Attribute 'is_quick_demo' cannot be modified between copies.")
if self.save_folder is not None:
params['save_path'] = self.save_folder.parents[0]
else:
params['save_path'] = None
params['samples_split_option'] = self.splitter.copy()
params.update(kwargs)
return Scenario(**params)
def log_scenario_description(self):
"""Log the description of the scenario configured"""
# Describe scenario
logger.info("Description of data scenario configured:")
logger.info(f" Number of partners defined: {self.partners_count}")
logger.info(f" Data distribution scenario chosen: {self.splitter}")
logger.info(f" Multi-partner learning approach: {self.multi_partner_learning_approach}")
logger.info(f" Weighting option: {self.aggregation_weighting}")
logger.info(f" Iterations parameters: "
f"{self.epoch_count} epochs > "
f"{self.minibatch_count} mini-batches > "
f"{self.gradient_updates_per_pass_count} gradient updates per pass")
# Describe data
logger.info(f"Data loaded: {self.dataset.name}")
if self.is_quick_demo:
logger.info(" Quick demo configuration: number of data samples and epochs "
"are limited to speed up the run")
logger.info(
f" {len(self.dataset.x_train)} train data with {len(self.dataset.y_train)} labels"
)
logger.info(
f" {len(self.dataset.x_val)} val data with {len(self.dataset.y_val)} labels"
)
logger.info(
f" {len(self.dataset.x_test)} test data with {len(self.dataset.y_test)} labels"
)
def append_contributivity(self, contributivity_method):
self.contributivity_list.append(contributivity_method)
def instantiate_scenario_partners(self):
"""Create the partners_list"""
if len(self.partners_list) > 0:
raise Exception('Partners have already been initialized')
self.partners_list = [Partner(i, corruption=self.corruption_parameters[i]) for i in range(self.partners_count)]
def split_data(self):
self.splitter.split(self.partners_list, self.dataset)
return 0
def plot_data_distribution(self):
lb = LabelEncoder().fit([str(y) for y in self.dataset.y_train])
for i, partner in enumerate(self.partners_list):
plt.subplot(self.partners_count, 1, i + 1) # TODO share y axis
data_count = np.bincount(lb.transform([str(y) for y in partner.y_train]))
# Fill with 0
while len(data_count) < self.dataset.num_classes:
data_count = np.append(data_count, 0)
plt.bar(np.arange(0, self.dataset.num_classes), data_count)
plt.ylabel("partner " + str(partner.id))
plt.suptitle("Data distribution")
plt.xlabel("Digits")
(self.save_folder / 'graphs').mkdir(exist_ok=True)
plt.savefig(self.save_folder / "graphs" / "data_distribution.png")
plt.close()
def compute_batch_sizes(self):
# For each partner we compute the batch size in multi-partner and single-partner setups
batch_size_min = 1
batch_size_max = constants.MAX_BATCH_SIZE
if self.partners_count == 1:
p = self.partners_list[0]
batch_size = int(len(p.x_train) / self.gradient_updates_per_pass_count)
p.batch_size = np.clip(batch_size, batch_size_min, batch_size_max)
else:
for p in self.partners_list:
batch_size = int(
len(p.x_train)
/ (self.minibatch_count * self.gradient_updates_per_pass_count)
)
p.batch_size = np.clip(batch_size, batch_size_min, batch_size_max)
for p in self.partners_list:
logger.debug(f" Compute batch sizes, partner #{p.id}: {p.batch_size}")
def apply_data_alteration_configuration(self):
"""perform corruption on partner if needed"""
for partner in self.partners_list:
if isinstance(partner.corruption, Duplication):
if not partner.corruption.duplicated_partner_id:
data_volume = np.array([p.data_volume for p in self.partners_list if p.id != partner.id])
ids = np.array([p.id for p in self.partners_list if p.id != partner.id])
candidates = ids[data_volume >= partner.data_volume * partner.corruption.proportion]
partner.corruption.duplicated_partner_id = np.random.choice(candidates)
partner.corruption.set_duplicated_partner(self.partners_list)
partner.corrupt()
def to_dataframe(self):
df = | pd.DataFrame() | pandas.DataFrame |
"""Wraps sklearn Gradient Boosting Regressor to
1) automate modeling similar to gbm library in R
2) overlay data and descriptive statistics in data visualization
of partial dependencies for better inference
author: <NAME>
date created: 2018-06-15
"""
import sklearn_gbm_ots.sklearn_gbm_extend as sklearn_gbm_extend
import pandas as pd
import sklearn.model_selection as skl_ms
import sklearn.ensemble as skl_e
import sklearn.metrics as skl_metrics
import numpy as np
import sklearn_gbm_ots.pandas_tools as pde
import logging
import sklearn_gbm_ots.file_tools as fs
class GBMwrapper():
def __init__(
self,
df_dataset,
outcome,
weights_col_name=None,
features_list=None,
impute_NAs=True,
tail_threshold=10,
destination_dir='./gbm_output/',
ax_limits_per_feature=None,
show_plots=True,
random_state=None):
"""Creates gbm object for future modeling.
arguments:
df_dataset - dataframe with all data:
numeric/ categorical features, and the outcome
outcome - column name with the outcome
features_list - list of strings representing features columns.
by default all columns will be used except the oucome
impute_NAs - flag to fill NAs with median values
tail_threshold - minimum number of observations per given category.
Categories with smaller counts will be merged to 'Other'
destination_dir - directory where output files will be saved
ax_limits_per_feature - dictionary for customizing plots axes:
{
'feature_name1': {'xlim': [0, 100], 'ylim': [0, 1]},
'feature_name2': {'ylim': [0, 0.5]},
'feature_name5': {'xlim': [-1, 1]},
}
"""
self.random_state = random_state
self.show_plots = show_plots
self.destination_dir = destination_dir
fs.prepare_folder_for(self.destination_dir + 'temp.txt')
self.df_dataset = df_dataset
self.features_list = features_list
self.outcome = outcome
self.impute_NAs = impute_NAs
self.tail_threshold = tail_threshold
if self.features_list is not None:
self.df_dataset = self.df_dataset[
self.features_list + [self.outcome]]
self.drop_na_outcomes()
if weights_col_name is not None:
self.weights = self.df_dataset[weights_col_name]
self.df_dataset = self.df_dataset.drop(
[weights_col_name], axis=1)
else:
self.weights = pd.Series(np.ones(self.df_dataset.shape[0]))
self.ax_limits_per_feature = ax_limits_per_feature
self.default_params = {
'n_estimators': 4000,
'max_depth': 10,
'max_features': 4,
'min_samples_split': 20,
'min_samples_leaf': 10,
'learning_rate': 0.01,
'loss': 'ls'}
self.get_categorical_features()
if self.tail_threshold is not None:
self.remove_long_tails()
self.validate_data()
self.prepare_data()
def remove_long_tails(self):
"""Merges Categories with counts smaller than tail_threshold
to 'Other'"""
for col in self.categorical_features:
v_counts1 = self.df_dataset[col].value_counts()
removed = pde.remove_long_tail(
self.df_dataset,
col,
self.tail_threshold,
'Other')
v_counts2 = self.df_dataset[col].value_counts()
if removed:
logging.info('removed long tail from {}'.format(col))
logging.info('{} before: {}'.format(col, v_counts1))
logging.info('{} after: {}'.format(col, v_counts2))
def get_categorical_features(self):
# Identifies categorical features in the dataset
self.categorical_features = list(
self.df_dataset.dtypes[self.df_dataset.dtypes == 'object'].index)
logging.info('detected categorical_features: {}'.format(
self.categorical_features))
def drop_na_outcomes(self):
# Removes observations with unknown outcome
obs_n1 = self.df_dataset.shape[0]
self.df_dataset = self.df_dataset.dropna(subset = [self.outcome])
obs_n2 = self.df_dataset.shape[0]
if obs_n2 < obs_n1:
logging.warning('dropped {} observations without outcome'.format(
obs_n2 - obs_n1))
def impute_NAs_in_col(self, col):
# Imputes NAs (only with median at the moment)
pde.fill_na_median(self.df_dataset, col)
def validate_data(self):
# Ensures data does not have NAs
na_errors = []
for col in self.df_dataset.columns:
if col not in self.categorical_features:
na_count = sum(pd.isnull(self.df_dataset[col]))
if na_count > 0:
if self.impute_NAs:
self.impute_NAs_in_col(col)
logging.warning('imputed {} NAs in {}'.format(
na_count, col))
else:
na_errors.append(
'Error: column "{}" has {} NA values'.format(
col, na_count))
for na_error in na_errors:
print(na_error)
if len(na_errors) > 0:
raise NameError('Must not have NAs for non-categorical values')
def prepare_data(self):
""" Prepares data for model training:
one-hot encoding for categorical values
setting aside test dataset """
self.X = pd.get_dummies(self.df_dataset.drop([self.outcome], axis=1))
self.y = self.df_dataset[self.outcome]
(self.train_X, self.test_X, self.train_y, self.test_y,
self.train_weights, self.test_weights, self.df_train,
self.df_test) = skl_ms.train_test_split(
self.X, self.y, self.weights, self.df_dataset,
random_state=self.random_state)
self.gbm_tools = sklearn_gbm_extend.ToolsGBM(
self.categorical_features,
self.train_X,
self.train_y,
self.train_weights,
outcome_label=self.outcome,
destination_dir=self.destination_dir,
show_plots=self.show_plots,
random_state=self.random_state)
def update_params(self, params):
self.params = params
if 'random_state' not in self.params:
self.params['random_state'] = self.random_state
def build_model(self, params=None, cv_n_splits=5):
"""Builds model and stores it in the object:
tunes number of estimators (trees) with cross validation
evaluate performance
plots training curves, feature importance, partial dependences
arguments:
params - gbm parameters dictionary (default as specified at init)
cv_n_splits - number of splits for cross-validation (default 5)"""
if params is None:
params = self.default_params
self.update_params(params)
logging.info('Cross-validation parameter optimization started.')
val_scores, std_val_scores = self.gbm_tools.cv_estimate(
params, n_splits=cv_n_splits)
cv_n_trees, cv1se_n_trees = self.gbm_tools.cv_n_tree(
val_scores, std_val_scores)
vertical_lines = {}
selected_n_trees = cv_n_trees
if pd.notnull(cv1se_n_trees) and cv1se_n_trees != 0:
selected_n_trees = cv1se_n_trees
vertical_lines['selected cv min less 1 std at'] = cv1se_n_trees
vertical_lines['cv min at '] = cv_n_trees
else:
vertical_lines['selected cv min at '] = cv_n_trees
logging.info('minium cv error at {} trees'.format(cv_n_trees))
logging.info('minimum cv error within 1 std at {} trees'.format(
cv1se_n_trees))
logging.info('selected n_trees: {}'.format(selected_n_trees))
logging.info('plotting all trees training curves')
self.gbm = skl_e.GradientBoostingRegressor(**self.params)
self.gbm_fit()
self.gbm_tools.plot_gbm_training(
self.gbm, self.test_X, self.test_y, cv_scores=val_scores,
cv_std_scores=std_val_scores,
fig_params={'figsize': (11, 11)},
vertical_lines=vertical_lines)
self.update_n_trees(selected_n_trees)
self.evaluate_performance(self.gbm)
self.plot_output()
return self.gbm
def gbm_fit(self):
"""fits gbm model"""
logging.info('gbm fitting...')
self.gbm.fit(
self.train_X, self.train_y,
sample_weight=self.train_weights)
def update_n_trees(self, new_n_trees):
"""updates number of estimators, then refits and reevalute performance
arguments:
new_n_trees - number of trees (estimators) to be set"""
logging.info('updating n trees with selected {}'.format(
new_n_trees))
self.gbm.n_estimators = new_n_trees
self.params['n_estimators'] = new_n_trees
self.gbm_fit()
self.evaluate_performance(self.gbm)
def plot_output(self, ax_limits=None, file_prefix='', **fig_params):
"""plots feature importance and partial dependencies plots
arguments:
ax_limits - dictionary for customizing plots axes:
{
'feature_name1': {'xlim': [0, 100], 'ylim': [0, 1]},
'feature_name2': {'ylim': [0, 0.5]},
'feature_name5': {'xlim': [-1, 1]}}
if not provided, limits are set based on data
**fig_params - keyword parameters to be passed to plots,
including standard pyplot.figure and:
absolute_yscale - absolute_scale (starts at zero) on y axis
absolute_yticks - flag to subtract mean from y axis pabels
"""
if ax_limits is None:
ax_limits = self.ax_limits_per_feature
logging.info('plotting feature importances and partial dependences')
self.gbm_tools.file_prefix = file_prefix
self.gbm_tools.feature_importances_plot(
self.gbm, {'figsize': (11, 11)})
self.gbm_tools.partial_dependencies_plots(
self.gbm,
self.params,
ax_limits=ax_limits,
**fig_params)
def evaluate_performance(self, gbm):
# evaluate performance of the provided gbm model
logging.info('evaluating performance')
pred_test_y = gbm.predict(self.test_X)
mse = skl_metrics.mean_squared_error(
self.test_y, pred_test_y,
sample_weight=1 / self.test_weights)
vexp = skl_metrics.explained_variance_score(
self.test_y, pred_test_y, sample_weight=1 / self.test_weights)
print('rmse: {:.2f}, variance explained: {:.0f}%'.format(
np.sqrt(mse), vexp * 100.))
predicted_label = 'predicted_{}'.format(self.outcome)
self.df_test = self.df_test.copy()
self.df_test[predicted_label] = pred_test_y
self.df_test['diff'] = self.df_test[predicted_label]\
- self.df_test[self.outcome]
self.df_test['abs diff'] = abs(self.df_test['diff'])
self.df_test[[predicted_label, self.outcome, 'diff']].hist()
print('{} standard deviation: {:.2f},\
prediction error std: {:.2f},\
average absolute error: {:.2f}'.format(
self.outcome,
np.std(self.df_test[self.outcome]),
np.std(self.df_test['diff']),
np.mean(self.df_test['abs diff'])))
def predict_dataset(self, df):
"""Adds column with predictions for provided data
arguments:
df - dataframe of the same format as training dataframe
but without the outcome"""
df[self.outcome + '_predicted'] = self.gbm.predict( | pd.get_dummies(df) | pandas.get_dummies |
#!/usr/bin/env python
from __future__ import print_function
from .tabulate import tabulate as tabulate_
import sys
import pandas as pd
import re
import datetime
def _get_version():
import ph._version
return ph._version.__version__
def print_version():
print(_get_version())
# Command line parsing of (1) --abc and (2) --abc=def
KWARG = re.compile("^--[a-z0-9_-]+$")
KWARG_WITH_VALUE = re.compile("^--[a-z0-9_-]+=")
USAGE_TEXT = """
ph is a command line tool for streaming csv data.
If you have a csv file `a.csv`, you can pipe it through `ph` on the
command line by using
$ cat a.csv | ph columns x y | ph eval "z = x**2 - y" | ph show
Use ph help [command] for help on the individual commands.
A list of available commands follows.
"""
COMMANDS = {}
DOCS = {}
def _gpx(fname):
try:
import gpxpy
except ImportError:
sys.exit("ph gpx needs gpxpy, pip install ph[gpx]")
def from_trackpoint(tp=None):
if tp is None:
return "time", "latitude", "longitude", "elevation", "distance"
p = tp.point
return str(p.time), p.latitude, p.longitude, p.elevation, tp.distance_from_start
with open(fname, "r") as fin:
gpx = gpxpy.parse(fin)
data = gpx.get_points_data()
columns = from_trackpoint()
dfdata = [from_trackpoint(tp) for tp in data]
return pd.DataFrame(dfdata, columns=columns)
def _tsv(*args, **kwargs):
kwargs["sep"] = "\t"
return pd.read_csv(*args, **kwargs)
# These are all lambdas because they lazy load, and some of these
# readers are introduced in later pandas.
READERS = {
"csv": pd.read_csv,
"clipboard": pd.read_clipboard,
"fwf": pd.read_fwf,
"json": pd.read_json,
"html": pd.read_html,
"tsv": _tsv,
"gpx": _gpx,
}
try:
READERS["excel"] = pd.read_excel
READERS["xls"] = pd.read_excel
READERS["odf"] = pd.read_excel
except AttributeError:
pass
try:
READERS["hdf5"] = pd.read_hdf
except AttributeError:
pass
try:
READERS["feather"] = pd.read_feather
except AttributeError:
pass
try:
READERS["parquet"] = pd.read_parquet
except AttributeError:
pass
try:
READERS["orc"] = pd.read_orc
except AttributeError:
pass
try:
READERS["msgpack"] = pd.read_msgpack
except AttributeError:
pass
try:
READERS["stata"] = pd.read_stata
except AttributeError:
pass
try:
READERS["sas"] = pd.read_sas
except AttributeError:
pass
try:
READERS["spss"] = pd.read_spss
except AttributeError:
pass
try:
READERS["pickle"] = pd.read_pickle
except AttributeError:
pass
try:
READERS["gbq"] = pd.read_gbq
except AttributeError:
pass
try:
READERS["google"] = pd.read_gbq
except AttributeError:
pass
try:
READERS["bigquery"] = pd.read_gb
except AttributeError:
pass
WRITERS = {
"csv": "to_csv",
"fwf": "to_fwf",
"json": "to_json",
"html": "to_html",
"clipboard": "to_clipboard",
"xls": "to_excel",
"odf": "to_excel",
"hdf5": "to_hdf",
"feather": "to_feather",
"parquet": "to_parquet",
"orc": "to_orc",
"msgpack": "to_msgpack",
"stata": "to_stata",
"sas": "to_sas",
"spss": "to_spss",
"pickle": "to_pickle",
"gbq": "to_gbq",
"google": "to_gbq",
"bigquery": "to_gbq",
# extras
"tsv": "to_csv",
}
FALSY = ("False", "false", "No", "no", "0", False, 0, "None")
TRUTHY = ("True", "true", "Yes", "yes", "1", True, 1)
def _assert_col(df, col, caller=None):
if col not in df.columns:
if caller is not None:
sys.exit("ph {}: Unknown column {}".format(caller, col))
sys.exit("Unknown column {}".format(col))
def _assert_cols(df, cols, caller=None):
for col in cols:
_assert_col(df, col, caller=caller)
def register(fn, name=None):
if name is None:
name = fn.__name__
COMMANDS[name] = fn
DOCS[name] = fn.__doc__
return fn
def registerx(name):
def inner(fn):
register(fn, name)
return fn
return inner
@register
def dataset(dset=None):
"""Load dataset as csv.
Usage: ph dataset linnerud | ph describe
"""
try:
import sklearn.datasets
except ImportError:
sys.exit("You need scikit-learn. Install ph[data].")
REALDATA = {
"olivetti_faces": sklearn.datasets.fetch_olivetti_faces,
"20newsgroups": sklearn.datasets.fetch_20newsgroups,
"20newsgroups_vectorized": sklearn.datasets.fetch_20newsgroups_vectorized,
"lfw_people": sklearn.datasets.fetch_lfw_people,
"lfw_pairs": sklearn.datasets.fetch_lfw_pairs,
"covtype": sklearn.datasets.fetch_covtype,
"rcv1": sklearn.datasets.fetch_rcv1,
"kddcup99": sklearn.datasets.fetch_kddcup99,
"california_housing": sklearn.datasets.fetch_california_housing,
}
TOYDATA = {
"boston": sklearn.datasets.load_boston,
"iris": sklearn.datasets.load_iris,
"diabetes": sklearn.datasets.load_diabetes,
"digits": sklearn.datasets.load_digits,
"linnerud": sklearn.datasets.load_linnerud,
"wine": sklearn.datasets.load_wine,
"breast_cancer": sklearn.datasets.load_breast_cancer,
}
if dset is None:
print("type,name")
print("\n".join("{},{}".format("real", k) for k in REALDATA))
print("\n".join("{},{}".format("toy", k) for k in TOYDATA))
sys.exit()
if dset not in TOYDATA.keys() | REALDATA.keys():
sys.exit("Unknown dataset {}. See ph help dataset.".format(dset))
if dset in TOYDATA:
data = TOYDATA[dset]()
else:
data = REALDATA[dset]()
try:
df = pd.DataFrame(data.data, columns=data.feature_names)
except AttributeError:
df = pd.DataFrame(data.data)
try:
df["target"] = pd.Series(data.target)
except Exception:
pass
pipeout(df)
@register
def diff(*cols, periods=1, axis=0):
df = pipein()
if not cols:
df = df.diff(periods=periods, axis=axis)
else:
_assert_cols(df, cols, "diff")
columns = list(cols)
df[columns] = df[columns].diff(periods=periods, axis=axis)
pipeout(df)
@register
def dropna(axis=0, how="any", thresh=None):
"""Remove rows (or columns) with N/A values.
Argument: --axis=0
Defaults to axis=0 (columns), use --axis=1 to remove rows.
Argument: --how=any
Defaults to how='any', which removes columns (resp. rows) containing
nan values. Use how='all' to remove columns (resp. rows) containing
only nan values.
Argument: --thresh=5
If --thresh=x is specified, will delete any column (resp. row) with
fewer than x non-na values.
Usage: cat a.csv | ph dropna
cat a.csv | ph dropna --axis=1 # for row-wise
cat a.csv | ph dropna --thresh=5 # keep cols with >= 5 non-na
cat a.csv | ph dropna --how=all # delete only if all vals na
"""
try:
axis = int(axis)
if axis not in (0, 1):
sys.exit("ph dropna --axis=0 or --axis=1, not {}".format(axis))
except ValueError:
sys.exit("ph dropna --axis=0 or --axis=1, not {}".format(axis))
if thresh is not None:
try:
thresh = int(thresh)
except ValueError:
sys.exit("ph dropna --thresh=0 or --thresh=1, not {}".format(thresh))
df = pipein()
try:
df = df.dropna(axis=axis, how=how, thresh=thresh)
except Exception as err:
sys.exit(str(err))
pipeout(df)
def _safe_out(output):
"""Prints output to standard out, catching broken pipe."""
try:
print(output)
except BrokenPipeError:
try:
sys.stdout.close()
except IOError:
pass
try:
sys.stderr.close()
except IOError:
pass
def pipeout(df, sep=",", index=False, *args, **kwargs):
csv = df.to_csv(sep=sep, index=index, *args, **kwargs)
output = csv.rstrip("\n")
_safe_out(output)
def pipein(ftype="csv", **kwargs):
skiprows = kwargs.get("skiprows")
if skiprows is not None:
try:
skiprows = int(skiprows)
if skiprows < 0:
raise ValueError("Negative")
except ValueError:
sys.exit("skiprows must be a non-negative int, not {}".format(skiprows))
kwargs["skiprows"] = skiprows
if kwargs.get("sep") == "\\t":
kwargs["sep"] = "\t"
try:
return READERS[ftype](sys.stdin, **kwargs)
except pd.errors.EmptyDataError:
return pd.DataFrame()
except pd.errors.ParserError as err:
sys.exit(str(err))
@register
def fillna(value=None, method=None, limit=None):
"""Fill na values with a certain value or method, at most `limit` many.
Takes either a value, or a method using (e.g.) --method=ffill.
Argument: value
If provided, replaces all N/A values with prescribed value.
Argument: --method=pad
If provided, value cannot be provided. Allowed methods are
backfill, bfill, pad, ffill
Argument: --limit=x
If provided, limits number of consecutive N/A values to fill.
Usage: cat a.csv | ph fillna 999.75
cat a.csv | ph fillna -1
cat a.csv | ph fillna --method=pad
cat a.csv | ph fillna --method=pad --limit=5
"""
if limit is not None:
try:
limit = int(limit)
except ValueError:
sys.exit("--limit=x must be an integer, not {}".format(limit))
METHODS = ("backfill", "bfill", "pad", "ffill")
if method is not None:
if method not in METHODS:
sys.exit("method must be one of {}, not {}".format(METHODS, method))
pipeout(pipein().fillna(method=method, limit=limit))
elif value is not None:
value = __tryparse(value)
pipeout(pipein().fillna(value=value, limit=limit))
else:
sys.exit("'ph fillna' needs exactly one of value and method")
@register
def query(expr):
"""Using pandas queries.
Usage: cat a.csv | ph query "x > 5"
"""
df = pipein()
new_df = df.query(expr)
pipeout(new_df)
@register
def grep(*expr, case=True, na=float("nan"), regex=True, column=None):
"""Grep (with regex) for content in csv file.
Usage: cat a.csv | ph grep 0
cat a.csv | ph grep search_string
cat a.csv | ph grep "A|B" # search hits a or b
cat a.csv | ph grep "a|b" --case=False # case insensitive
cat a.csv | ph grep 4 --column=x
To disable regex (e.g. simple search for "." or "*" characters, use
--regex=False).
Search only in a specific column with --column=col.
Supports regex search queries such as "0-9A-F" and "\\d" (possibly
double-escaped.)
"""
df = pipein()
if case is True or case in TRUTHY:
case = True
elif case in FALSY:
case = False
else:
sys.exit("ph grep: Unknown --case={} should be True or False".format(case))
if regex is True or regex in TRUTHY:
regex = True
elif regex in FALSY:
regex = False
else:
sys.exit("ph grep: Unknown --regex={} should be True or False".format(regex))
if column is not None:
_assert_col(df, column, "grep")
expr = " ".join(str(e) for e in expr) # force string input
try:
import numpy
except ImportError:
sys.exit("numpy needed for grep. pip install numpy")
retval = df[
numpy.logical_or.reduce(
[
df[col].astype(str).str.contains(expr, case=case, na=na, regex=regex)
for col in (df.columns if column is None else [column])
]
)
]
pipeout(retval)
@register
def appendstr(col, s, newcol=None):
"""Special method to append a string to the end of a column.
Usage: cat e.csv | ph appendstr year -01-01 | ph date year
"""
df = pipein()
if newcol is None:
newcol = col
df[newcol] = df[col].astype(str) + s
pipeout(df)
@register
def split(col, pat=" "):
df = pipein()
_assert_col(df, col, "split")
df[[col, col + "_rhs"]] = df[col].str.split(pat=pat, n=1, expand=True)
pipeout(df)
@register
def strip(*cols, lstrip=False, rstrip=False):
"""Strip (trim) a string.
Usage: cat x.csv | ph strip
cat x.csv | ph strip --lstrip=True
cat x.csv | ph strip --rstrip=True
"""
df = pipein()
if not cols:
cols = list(df.columns)
else:
cols = list(cols)
_assert_cols(df, cols, "strip")
for c in cols:
if lstrip in TRUTHY:
df[c] = df[c].str.lstrip()
elif rstrip in TRUTHY:
df[c] = df[c].str.rstrip()
else:
df[c] = df[c].str.strip()
pipeout(df)
@register
def removeprefix(col, prefix=" "):
"""Remove prefix of contents of a column.
Usage: cat a.csv | ph removeprefix col1 ..
See also @removesuffix @strip
"""
prefix = str(prefix)
plen = len(prefix)
df = pipein()
_assert_col(df, col, "removeprefix")
df[col] = df[col].apply(
lambda s: str(s)[plen:] if str(s).startswith(prefix) else str(s)
)
pipeout(df)
@register
def removesuffix(col, suffix=" "):
"""Remove suffix of contents of a column.
Usage: cat a.csv | ph removesuffix col1 ..
See also @removeprefix @strip
"""
suffix = str(suffix)
plen = len(suffix)
df = pipein()
_assert_col(df, col, "removesuffix")
df[col] = df[col].apply(
lambda s: str(s)[:-plen] if str(s).endswith(suffix) else str(s)
)
pipeout(df)
@register
def astype(type, column=None, newcolumn=None):
"""Cast a column to a different type.
Usage: cat a.csv | ph astype double x [new_x]
"""
df = pipein()
try:
if column is None:
df = df.astype(type)
elif newcolumn is not None:
df[newcolumn] = df[column].astype(type)
else:
df[column] = df[column].astype(type)
except ValueError as err:
sys.exit("Could not convert to {}: {}".format(type, err))
pipeout(df)
@register
def dtypes(t=None):
"""If no argument is provided, output types, otherwise filter on types.
If no argument is provided, output a csv with two columns, "column" and
"dtype". The "column" column contains the names of the columns in the input
csv and the "dtype" column contains their respective types.
If an argument is provided, all columns with the prescribed type is output.
Usage: cat a.csv | ph dtypes
cat a.csv | ph dtypes float64
"""
if t is None:
df = pipein()
newdf = pd.DataFrame(pd.Series(df.columns), columns=["column"])
newdf["dtype"] = pd.Series([str(e) for e in df.dtypes])
pipeout(newdf.T, header=False)
else:
df = pipein().select_dtypes(t)
pipeout(df)
@register
def pivot(columns, index=None, values=None):
"""Reshape csv organized by given index / column values.
Suppose b.csv is
foo,bar,baz,zoo
one,A,1,x
one,B,2,y
one,C,3,z
two,A,4,q
two,B,5,w
two,C,6,t
Usage: cat b.csv | ph pivot bar --index=foo --values=baz
A B C
-- --- --- ---
0 1 2 3
1 4 5 6
"""
pipeout(pipein().pivot(index=index, columns=columns, values=values))
@register
def crosstab(column):
"""Perform a very simplistic crosstabulation on one column of the input csv.
Usage: cat b.csv | ph crosstab foo
"""
newcol = "crosstab_{}".format(column)
df = pd.crosstab(pipein()[column], newcol)
df["id"] = list(df[newcol].index)
pipeout(df)
@register
def groupby(*columns, how="sum", as_index=False):
"""Group by columns, then apply `how` function.
Usage: cat a.csv | ph groupby animal # default to sum
cat a.csv | ph groupby animal --how=mean
cat a.csv | ph groupby animal --how=prod
cat a.csv | ph groupby animal --as_index=True # removes index
"""
columns = list(columns)
if not columns:
sys.exit("Needs at least one column to group by")
df = pipein()
_assert_cols(df, columns, "groupby")
if as_index in TRUTHY:
as_index = True
elif as_index in FALSY:
as_index = False
else:
sys.exit("--as_index=True or False, not {}".format(as_index))
grouped = df.groupby(columns, as_index=as_index)
try:
fn = getattr(grouped, how)
except AttributeError:
sys.exit("Unknown --how={}, should be sum, mean, ...".format(how))
retval = fn()
pipeout(retval)
@register
def rolling(window, *columns, how="sum", win_type=None, std=None, beta=None, tau=None):
"""Rolling window calculations using provided `how` function.
Usage: cat a.csv | ph rolling 3
cat a.csv | ph rolling 5 --how=mean
cat a.csv | ph rolling 5 colA colB --how=mean
cat a.csv | ph rolling 5 --win_type=gaussian --std=7.62
"""
df = pipein()
orig_columns = list(df.columns)
columns = list(columns)
_assert_cols(df, columns, "rolling")
if not columns:
columns = list(df.columns)
noncols = [c for c in df.columns if c not in columns]
rollin = df[columns].rolling(window, win_type=win_type)
nonrollin = df[noncols]
try:
fn = getattr(rollin, how)
except AttributeError:
sys.exit("Unknown --how={}, should be sum, mean, ...".format(how))
if {std, beta, tau} != {None}:
retval = fn(std=std, beta=beta, tau=tau)
else:
retval = fn()
df = pd.concat([retval, nonrollin], axis=1)
for col in orig_columns:
if col not in df.columns:
op = "ph rolling"
sys.exit(
'{}: Could not perform rolling window on column "{}"'.format(op, col)
)
df = df[orig_columns]
pipeout(df)
@register
def ewm(
min_periods=0,
adjust=True,
ignore_na=False,
axis=0,
com=None,
span=None,
halflife=None,
alpha=None,
how="mean",
):
"""Provide exponential weighted functions.
A related set of functions are exponentially weighted versions of
several of the above statistics. A similar interface to rolling and
expanding is accessed through the ewm method to receive an EWM
object. A number of expanding EW (exponentially weighted) methods
are provided:
* mean
* var
* std
* corr
* cov
Usage: cat a.csv | ph ewm --com=0.5 --how=mean
cat a.csv | ph ewm --halflife=0.5 --how=std
"""
if {com, span, halflife, alpha} == {None}:
sys.exit("Must pass one of com, span, halflife, or alpha")
df = pipein()
ewm_ = df.ewm(
min_periods=min_periods,
adjust=adjust,
ignore_na=ignore_na,
axis=axis,
com=com,
span=span,
halflife=halflife,
alpha=alpha,
)
try:
fn = getattr(ewm_, how)
except AttributeError:
sys.exit("Unknown --how={}, should be mean, var, std, corr, cov..".format(how))
retval = fn()
pipeout(retval)
@register
def expanding(min_periods=1, axis=0, how="sum", quantile=None):
"""Provide expanding transformations.
A common alternative to rolling statistics is to use an expanding
window, which yields the value of the statistic with all the data
available up to that point in time.
For working with data, a number of window functions are provided for
computing common window or rolling statistics. Among these are
count, sum, mean, median, correlation, variance, covariance,
standard deviation, skewness, and kurtosis.
Usage: cat a.csv | ph expanding
cat a.csv | ph expanding 1 --how=sum # above equivalent to this
cat a.csv | ph expanding 2
cat a.csv | ph expanding 5 --how=quantile --quantile=0.25
"""
df = pipein()
if quantile is not None:
if how != "quantile":
sys.exit("Use both or none of --how=quantile and --quantile=<float>")
if how == "quantile" and quantile is None:
sys.exit("--how=quantile needs --quantile=<float>, e.g. --quantile=0.25")
expanding_ = df.expanding(min_periods=min_periods, axis=axis)
try:
fn = getattr(expanding_, how)
except AttributeError:
sys.exit("Unknown --how={}, should be sum, mean, max, quantile..".format(how))
if how == "quantile":
retval = fn(quantile)
else:
retval = fn()
pipeout(retval)
@register
def monotonic(column, direction="+"):
"""Check if a certain column is monotonically increasing or decreasing.
Usage: cat a.csv | ph monotonic x
cat a.csv | ph monotonic x + # equivalent to above
cat a.csv | ph monotonic x - # for decreasing
"""
df = pipein()
if column not in df:
sys.exit("Unknown column {}".format(column))
if direction not in "+-":
sys.exit("direction must be either + or -")
print("{}_monotonic".format(column))
if direction == "+":
print(df[column].is_monotonic)
else:
print(df[column].is_monotonic_decreasing)
@register
def iplot(*args, **kwargs):
"""Use plotly/cufflinks for interactive plot.
This option is similar to `plot` but creates an HTML file and opens a
browser for an interactive plot.
Usage: cat a.csv | ph iplot
cat a.csv | ph iplot --kind=bar
cat a.csv | ph iplot --kind=bar --barmode=stack
Depends on cufflinks: pip install ph[iplot].
"""
try:
import cufflinks # noqa
import plotly as py
except ImportError:
sys.exit("iplot needs cufflinks, pip install ph[iplot]")
df = pipein()
fig = df.iplot(*args, asFigure=True, **kwargs)
py.offline.plot(fig)
pipeout(df)
@register
def plot(*args, **kwargs):
"""Plot the csv file.
Usage: ph plot
ph plot --index=col
ph plot --kind=bar
ph plot --kind=scatter --x=col1 --y=col2
ph plot --style=k--
ph plot --logx=True
ph plot --logy=True
ph plot --loglog=True
ph plot --savefig=fname.png
ph plot --savefig=fname.svg
ph plot --savefig=fname.svg --savefig-dpi=300
"""
try:
import matplotlib.pyplot as plt
except ImportError:
sys.exit("plot depends on matplotlib, install ph[plot]")
df = pipein()
index = kwargs.get("index")
if index is not None:
_assert_col(df, index, caller="plot")
df = df.set_index(index)
del kwargs["index"]
for log_ in ("logx", "logy", "loglog"):
if kwargs.get(log_) in TRUTHY:
kwargs[log_] = True
fname = kwargs.get("savefig")
dpi = kwargs.get("savefig-dpi")
if fname:
del kwargs["savefig"]
if dpi:
del kwargs["savefig-dpi"]
fig, ax = plt.subplots()
df.plot(**kwargs, ax=ax)
if index == "date":
fig.autofmt_xdate()
if fname:
plt.tight_layout()
plt.savefig(fname, dpi=dpi)
else:
plt.show()
pipeout(df)
@register
def eval(expr):
"""Eval expr using pandas.DataFrame.eval.
Example: cat a.csv | ph eval "z = x + y"
"""
df = pipein()
pipeout(df.eval(expr))
@register
def normalize(col=None):
"""Normalize a column or an entire dataframe.
Usage: cat a.csv | ph normalize
cat a.csv | ph normalize x
Warning: This is probably not what you want.
"""
df = pipein()
if col is None:
df = (df - df.min()) / (df.max() - df.min())
else:
df[col] = (df[col] - df[col].min()) / (df[col].max() - df[col].min())
pipeout(df)
@register
def date(col=None, unit=None, origin="unix", errors="raise", dayfirst=False, **kwargs):
"""Assemble datetime from multiple columns or from one column
--unit can be D, s, us, ns (defaults to ns, ns from origin)
--origin can be unix, julian, or time offset, e.g. '2000-01-01'
--errors can be raise, coerce, ignore (see pandas.to_datetime)
--format a strptime format string, e.g. '%Y-%m-%d %H:%M:%S'
--utc=True if the input is in utc, i.e. seconds from epoch
Usage: cat a.csv | ph date x
cat a.csv | ph date x --unit=s --origin="1984-05-17 09:30"
cat a.csv | ph date x --dayfirst=True
cat a.csv | ph date # if a.csv contains year, month, date
cat a.csv | ph date x --format="%Y-%m-%d"
cat a.csv | ph date x --utc=True
"""
DATE_ERRORS = ("ignore", "raise", "coerce")
if errors not in DATE_ERRORS:
sys.exit("Errors must be one of {}, not {}.".format(DATE_ERRORS, errors))
dayfirst = dayfirst in TRUTHY
date_parser = None
if "format" in kwargs:
date_parser = lambda d: [
datetime.datetime.strptime(str(e), kwargs["format"]) for e in d
]
if kwargs.get("utc") in TRUTHY:
date_parser = lambda d: [datetime.datetime.utcfromtimestamp(e) for e in d]
df = pipein()
try:
if col is None:
df = pd.to_datetime(df, unit=unit, origin=origin, errors=errors)
else:
_assert_col(df, col, "date")
if date_parser is None:
df[col] = pd.to_datetime(
df[col], unit=unit, origin=origin, errors=errors, dayfirst=dayfirst
)
else:
df[col] = date_parser(df[col])
except Exception as err:
sys.exit(err)
pipeout(df)
@register
def describe():
"""Run DataFrame's describe method.
The result is NOT tabular data, so pipeline ends.
Usage: cat a.csv | ph describe
"""
df = pipein()
try:
out = df.describe()
except ValueError as err:
sys.exit(str(err))
_safe_out(out)
@register
def info():
"""Run DataFrame's info method.
The result is NOT tabular data, so pipeline ends.
Usage: cat a.csv | ph info
"""
print(pipein().info())
@register
def to(ftype, fname=None, sep=None, index=False):
"""Export csv to given format (possibly csv).
Supports csv, html, json, parquet, bigquery, tsv, etc. (see README for full
list).
Usage: cat a.csv | ph to html
cat a.csv | ph to tsv
cat a.csv | ph to csv --index=True
cat a.csv | ph to csv --sep=';'
cat a.csv | ph to clipboard
cat a.csv | ph to json
cat a.csv | ph to parquet out.parquet
"""
if ftype not in WRITERS:
sys.exit("Unknown datatype {}.".format(ftype))
if not fname:
if ftype in ("parquet", "xls", "xlsx", "ods", "pickle"):
sys.exit("{} needs a path".format(ftype))
if ftype == "hdf5":
sys.exit("hdf5 writer not implemented")
if index not in TRUTHY + FALSY:
sys.exit("Index must be True or False, not {}".format(index))
index = index in TRUTHY
if ftype == "fwf":
# pandas has not yet implemented to_fwf
df = pipein()
content = tabulate_(df.values.tolist(), list(df.columns), tablefmt="plain")
if fname:
with open(fname, "w") as wout:
wout.write(content)
else:
print(content)
sys.exit()
if sep is not None:
if ftype != "csv":
sys.exit("Only csv mode supports separator")
writer = WRITERS[ftype]
df = pipein()
fn = getattr(df, writer)
kwargs = {}
if ftype == "tsv":
kwargs["sep"] = "\t"
elif ftype == "csv" and sep is not None:
kwargs["sep"] = sep
if ftype == "json":
index = True
if fname is not None:
print(fn(fname, index=index, **kwargs))
else:
print(fn(index=index, **kwargs))
@registerx("from")
def from_(ftype="csv", **kwargs):
"""Read a certain (default csv) format from standard in and stream out as csv.
Usage: cat a.json | ph from json
cat /etc/passwd | ph from csv --sep=':' --header=None
The following pipes should be equivalent:
cat a.csv
cat a.csv | ph to json | ph from json
cat a.tsv | ph from tsv
cat a.tsv | ph from csv --sep='\t'
cat a.tsv | ph from csv --sep='\t' --thousands=','
In the event that the csv data starts on the first line (i.e. no
header is present), use --header=None.
"""
if "header" in kwargs:
kwargs["header"] = __tryparse(kwargs["header"])
skiprows = kwargs.get("skiprows")
if skiprows is not None:
try:
skiprows = int(skiprows)
if skiprows < 0:
raise ValueError("Negative")
except ValueError:
sys.exit("skiprows must be a non-negative int, not {}".format(skiprows))
kwargs["skiprows"] = skiprows
if kwargs.get("sep") == "\\t":
kwargs["sep"] = "\t"
if ftype == "clipboard":
pipeout(READERS["clipboard"](**kwargs))
return
pipeout(pipein(ftype, **kwargs))
@register
def cat(*fnames, axis="index"):
"""Concatenates all files provided.
Usage: ph cat a.csv b.csv c.csv
ph cat a.csv b.csv c.csv --axis=index # default
ph cat a.csv b.csv c.csv --axis=columns
If no arguments are provided, read from std in.
"""
if axis not in ("index", "columns"):
sys.exit("Unknown axis command '{}'".format(axis))
if not fnames:
pipeout(pipein())
else:
dfs = []
for fname in fnames:
df = pd.read_csv(fname)
dfs.append(df)
retval = pd.concat(dfs, axis=axis)
pipeout(retval)
@register
def merge(fname1, fname2, how="inner", on=None):
"""
Merging two csv files.
Usage: ph merge a.csv b.csv --on=ijk
"""
hows = ("left", "right", "outer", "inner")
if how not in hows:
sys.exit("Unknown merge --how={}, must be one of {}".format(how, hows))
df1 = pd.read_csv(fname1)
df2 = pd.read_csv(fname2)
if on is None:
pipeout(pd.merge(df1, df2, how=how))
else:
pipeout(pd.merge(df1, df2, how=how, on=on))
@register
def tab():
"""Equivalent to `ph to tsv`.
Usage: cat a.csv | ph tab
"""
pipeout(pipein(), sep="\t")
@register
def tabulate(*args, **kwargs):
"""Tabulate the output for pretty-printing.
Usage: cat a.csv | ph tabulate --headers --noindex --format=grid
Takes arguments
* --headers
* --noindex
* --format=[grid, latex, pretty, ...].
For a full list of format styles confer the README.
This function uses the tabulate project available as a standalone
package from PyPI.
Using `tabulate` in a pipeline usually means that the `ph` pipeline ends.
This is because of `tabulate`'s focus on user readability over machine
readability.
"""
headers = tuple()
fmt = kwargs.get("format")
index = True
if "--noindex" in args:
index = False
if "--headers" in args:
headers = "keys"
df = pipein()
out = tabulate_(df, tablefmt=fmt, headers=headers, showindex=index)
_safe_out(out)
@register
def show(noindex=False):
"""Similar to ph tabulate --headers [--noindex].
Usage: cat a.csv | ph show
cat a.csv | ph show --noindex
"""
if noindex:
tabulate("--headers", "--noindex")
else:
tabulate("--headers")
def _print_commands(cmds):
num_cols = 72 // max(len(cmd) for cmd in cmds)
while (len(cmds) % num_cols) != 0:
cmds.append("")
df = pd.DataFrame(pd.Series(cmds).values.reshape(num_cols, -1))
print(tabulate_(df.transpose(), showindex=False))
@registerx("help")
def help_(*args, **kwargs):
"""Writes help (docstring) about the different commands."""
if not args:
print("Usage: ph command arguments")
print(USAGE_TEXT)
_print_commands(sorted(COMMANDS.keys()))
sys.exit(0)
cmd = args[0]
ds = None
if cmd in DOCS:
ds = DOCS[cmd]
else:
try:
fn = getattr(pd.DataFrame, cmd)
ds = getattr(fn, "__doc__")
except AttributeError:
pass
if ds is None:
sys.exit("Unknown command {}".format(cmd))
print("Usage: ph {}".format(cmd))
print(" {}".format(ds.strip()))
def slugify_name(name):
name_ = name
try:
name = float(name_)
except ValueError:
pass
if isinstance(name_, (int, str)):
try:
name = int(name_)
except ValueError:
pass
if isinstance(name, (int, float)):
name = str(name) + "_"
if not name:
return "unnamed"
if name == "_":
return "_"
lead_under = name[0] == "_"
trail_under = name[-1] == "_"
name = name.strip().lower()
unwanted = set(c for c in name if not c.isalnum())
for u in unwanted:
name = name.replace(u, "_").strip()
while "__" in name:
name = name.replace("__", "_").strip()
name = name.strip("_")
if lead_under:
name = "_" + name
if trail_under:
name = name + "_"
return name
@register
def slugify():
"""Slugify the column headers.
Usage: cat a.csv | ph slugify
Removes all non-alphanumeric characters aside from the underscore.
Is useful in scenarios where you have possibly many columns with
very ugly names. Can be a good preprocessor to @rename:
Usage: cat a.csv | ph slugify | ph rename less_bad_name good_name
"""
df = pipein()
df.columns = [slugify_name(name) for name in df.columns]
pipeout(df)
@register
def raw(fname=None):
"""Do your best to read this comma-separated input."""
import csv
if fname is None:
d = csv.reader(sys.stdin)
df = | pd.DataFrame(d) | pandas.DataFrame |
import logging
pvl_logger = logging.getLogger('pvlib')
import datetime
import numpy as np
import numpy.testing as npt
import pandas as pd
from nose.tools import raises, assert_almost_equals
from nose.plugins.skip import SkipTest
from pandas.util.testing import assert_frame_equal
from pvlib.location import Location
from pvlib import solarposition
# setup times and locations to be tested.
times = pd.date_range(start=datetime.datetime(2014,6,24),
end=datetime.datetime(2014,6,26), freq='15Min')
tus = Location(32.2, -111, 'US/Arizona', 700) # no DST issues possible
golden_mst = Location(39.742476, -105.1786, 'MST', 1830.14) # no DST issues possible
golden = Location(39.742476, -105.1786, 'America/Denver', 1830.14) # DST issues possible
times_localized = times.tz_localize(tus.tz)
# the physical tests are run at the same time as the NREL SPA test.
# pyephem reproduces the NREL result to 2 decimal places.
# this doesn't mean that one code is better than the other.
def test_spa_physical():
times = pd.date_range(datetime.datetime(2003,10,17,12,30,30), periods=1, freq='D')
try:
ephem_data = solarposition.spa_c(times, golden_mst, pressure=82000,
temperature=11).ix[0]
except ImportError:
raise SkipTest
assert_almost_equals(39.872046, ephem_data['elevation'], 6)
assert_almost_equals(50.111622, ephem_data['apparent_zenith'], 6)
assert_almost_equals(194.340241, ephem_data['azimuth'], 6)
assert_almost_equals(39.888378, ephem_data['apparent_elevation'], 6)
def test_spa_physical_dst():
times = pd.date_range(datetime.datetime(2003,10,17,13,30,30), periods=1,
freq='D')
try:
ephem_data = solarposition.spa_c(times, golden, pressure=82000,
temperature=11).ix[0]
except ImportError:
raise SkipTest
assert_almost_equals(39.872046, ephem_data['elevation'], 6)
assert_almost_equals(50.111622, ephem_data['apparent_zenith'], 6)
assert_almost_equals(194.340241, ephem_data['azimuth'], 6)
assert_almost_equals(39.888378, ephem_data['apparent_elevation'], 6)
def test_spa_localization():
try:
assert_frame_equal(solarposition.spa_c(times, tus),
solarposition.spa_c(times_localized, tus))
except ImportError:
raise SkipTest
def test_spa_python_numpy_physical():
times = pd.date_range(datetime.datetime(2003,10,17,12,30,30), periods=1, freq='D')
ephem_data = solarposition.spa_python(times, golden_mst, pressure=82000,
temperature=11, delta_t=67,
atmos_refract=0.5667,
how='numpy').ix[0]
assert_almost_equals(39.872046, ephem_data['elevation'], 6)
assert_almost_equals(50.111622, ephem_data['apparent_zenith'], 6)
assert_almost_equals(194.340241, ephem_data['azimuth'], 6)
assert_almost_equals(39.888378, ephem_data['apparent_elevation'], 6)
def test_spa_python_numpy_physical_dst():
times = pd.date_range(datetime.datetime(2003,10,17,13,30,30), periods=1, freq='D')
ephem_data = solarposition.spa_python(times, golden, pressure=82000,
temperature=11, delta_t=67,
atmos_refract=0.5667,
how='numpy').ix[0]
assert_almost_equals(50.111622, ephem_data['apparent_zenith'], 6)
assert_almost_equals(194.340241, ephem_data['azimuth'], 6)
assert_almost_equals(39.888378, ephem_data['apparent_elevation'], 6)
def test_spa_python_numba_physical():
try:
import numba
except ImportError:
raise SkipTest
vers = numba.__version__.split('.')
if int(vers[0] + vers[1]) < 17:
raise SkipTest
times = pd.date_range(datetime.datetime(2003,10,17,12,30,30), periods=1, freq='D')
ephem_data = solarposition.spa_python(times, golden_mst, pressure=82000,
temperature=11, delta_t=67,
atmos_refract=0.5667,
how='numba', numthreads=1).ix[0]
assert_almost_equals(39.872046, ephem_data['elevation'], 6)
assert_almost_equals(50.111622, ephem_data['apparent_zenith'], 6)
assert_almost_equals(194.340241, ephem_data['azimuth'], 6)
assert_almost_equals(39.888378, ephem_data['apparent_elevation'], 6)
def test_spa_python_numba_physical_dst():
try:
import numba
except ImportError:
raise SkipTest
vers = numba.__version__.split('.')
if int(vers[0] + vers[1]) < 17:
raise SkipTest
times = pd.date_range(datetime.datetime(2003,10,17,13,30,30), periods=1, freq='D')
ephem_data = solarposition.spa_python(times, golden, pressure=82000,
temperature=11, delta_t=67,
atmos_refract=0.5667,
how='numba', numthreads=1).ix[0]
assert_almost_equals(50.111622, ephem_data['apparent_zenith'], 6)
assert_almost_equals(194.340241, ephem_data['azimuth'], 6)
assert_almost_equals(39.888378, ephem_data['apparent_elevation'], 6)
def test_spa_python_localization():
assert_frame_equal(solarposition.spa_python(times, tus),
solarposition.spa_python(times_localized, tus))
def test_get_sun_rise_set_transit():
south = Location(-35.0, 0.0, tz='UTC')
times = pd.DatetimeIndex([datetime.datetime(1996, 7, 5, 0),
datetime.datetime(2004, 12, 4, 0)]
).tz_localize('UTC')
sunrise = pd.DatetimeIndex([datetime.datetime(1996, 7, 5, 7, 8, 15, 471676),
datetime.datetime(2004, 12, 4, 4, 38, 57, 27416)]
).tz_localize('UTC').tolist()
sunset = pd.DatetimeIndex([datetime.datetime(1996, 7, 5, 17, 1, 4, 479889),
datetime.datetime(2004, 12, 4, 19, 2, 2, 499704)]
).tz_localize('UTC').tolist()
result = solarposition.get_sun_rise_set_transit(times, south,
delta_t=64.0)
frame = pd.DataFrame({'sunrise':sunrise, 'sunset':sunset}, index=times)
del result['transit']
assert_frame_equal(frame, result)
# tests from USNO
# Golden
golden = Location(39.0, -105.0, tz='MST')
times = pd.DatetimeIndex([datetime.datetime(2015, 1, 2),
datetime.datetime(2015, 8, 2),]
).tz_localize('MST')
sunrise = pd.DatetimeIndex([datetime.datetime(2015, 1, 2, 7, 19, 2, 225169),
datetime.datetime(2015, 8, 2, 5, 1, 26, 963145)
]).tz_localize('MST').tolist()
sunset = pd.DatetimeIndex([datetime.datetime(2015, 1, 2, 16, 49, 10, 13145),
datetime.datetime(2015, 8, 2, 19, 11, 31, 816401)
]).tz_localize('MST').tolist()
result = solarposition.get_sun_rise_set_transit(times, golden, delta_t=64.0)
frame = pd.DataFrame({'sunrise':sunrise, 'sunset':sunset}, index=times)
del result['transit']
| assert_frame_equal(frame, result) | pandas.util.testing.assert_frame_equal |
### Load Necessary Libraries
from bs4 import BeautifulSoup as bs
import pandas as pd
import requests
### Loading page content
page=requests.get('https://www.speedtest.net/global-index#mobile')
cont=page.content
print(page.status_code)
soupobj=bs(cont,'html.parser')
#print(soupobj.prettify()) #printing out soup object
### Data sorting
#adding all country names in empty list-empli for further data frame
countr=soupobj.find_all(class_='country')
empli=[]
for li in countr:
empli.append(li.get_text())
#print(empli)
#adding all internet speed rankings in empty list-empli2 for further data frame
empli2=[]
speed=soupobj.find_all(class_='speed')
for s in speed:
empli2.append(s.get_text())
#print(empli2)
### Removing Duplicates
#removing starting and ending '\n' duplicates from every item of empli.
list3 = [x.replace('\n', '') for x in empli]
#print(list3)
#making datfm-DataFrame
datfm=pd.DataFrame({'Countries':list3,
'Speed in Mbps':empli2})
#Converting dataframe into csv
datfm.to_csv('Sub.csv',index=False)
### Data cleaning
newcsv=pd.read_csv('Sub.csv')
newcsv
Mobiletest=newcsv[1:139] #csv for Mobile-conducted speedtest's across globe
Broadbandtest=newcsv[140:]#csv for Broadband-conducted speedtest's across globe
Mobiletest
Broadbandtest
Mobiletest.to_csv('Mobiletest.csv',index=False)
Broadbandtest.to_csv('Broadbandtest.csv',index=False)
c1=pd.read_csv('Mobiletest.csv')
c2= | pd.read_csv('Broadbandtest.csv') | pandas.read_csv |
from tensorflow.python.keras import Sequential
from pandas_datareader import data
import pandas as pd
from Common.StockMarketIndex.AbstractStockMarketIndex import AbstractStockMarketIndex
from Common.StockMarketIndex.Yahoo.SnP500Index import SnP500Index
from Common.StockMarketIndex.Yahoo.VixIndex import VixIndex
from Common.StockOptions.Yahoo.YahooStockOption import YahooStockOption
stock = 'TSLA'
yahooStockOption: YahooStockOption = YahooStockOption(stock)
total_size: int = len(yahooStockOption.DataFrame)
train_size: int = round(0.85 * total_size)
test_size: int = round(0.1 * total_size)
print(yahooStockOption.DataFrame.describe(include='all'))
print(total_size)
print(train_size)
print(test_size)
sAnP500: AbstractStockMarketIndex = SnP500Index('yahoo', "^GSPC", yahooStockOption.TimeSpan)
vixIndex: AbstractStockMarketIndex = VixIndex('yahoo', "^VIX", yahooStockOption.TimeSpan)
from pandas import DataFrame
import numpy as np
import matplotlib.pyplot as plt
import datetime as dt
from sklearn.preprocessing import MinMaxScaler
from tensorflow.keras.models import Sequential
from tensorflow.keras.layers import Dense, Dropout, LSTM
scaler = MinMaxScaler(feature_range=(0, 1))
prediction_days: int = 60
future_days: int = 1
scaled_data: np.ndarray = scaler.fit_transform(yahooStockOption.DataFrame['Adj Close'].values.reshape(-1, 1))
x_train_list: list = []
y_train_list: list = []
print(type(x_train_list))
for x in range(prediction_days, total_size):
x_train_list.append(scaled_data[x - prediction_days: x, 0])
y_train_list.append(scaled_data[x, 0])
x_train, y_train = np.array(x_train_list), np.array(y_train_list)
print(type(x_train))
x_train = np.reshape(x_train, (x_train.shape[0], x_train.shape[1], 1))
print(type(x_train))
model: Sequential = Sequential()
model.add(LSTM(units=50, return_sequences=True, input_shape=(x_train.shape[1], 1)))
model.add(Dropout(0.2))
model.add(LSTM(units=50, return_sequences=True))
model.add(Dropout(0.2))
model.add(LSTM(units=50))
model.add(Dropout(0.2))
model.add(Dense(units=future_days))
model.compile(optimizer='adam', loss='mean_squared_error')
model.fit(x_train, y_train, epochs=25, batch_size=32)
# data unknown to model
test_start = dt.datetime(2021, 1, 1)
test_end = dt.datetime.now()
test_data = data.DataReader(stock, 'yahoo', test_start, test_end)
actual_prices = test_data['Adj Close'].values
total_dataset = | pd.concat((yahooStockOption.DataFrame['Adj Close'], test_data['Adj Close']), axis=0) | pandas.concat |
# Send same show commands to all devices
# Read devices from an Excel file
import pandas as pd
from netmiko import ConnectHandler
# Read Excel file
excel_file = pd.read_excel(
io="Voice-Gateways-Info.xlsx", sheet_name=0, engine="openpyxl"
)
# Converts Excel file to data frame
df = | pd.DataFrame(excel_file) | pandas.DataFrame |
from Tools import *
from Agent import *
import time
import csv
import graphicDisplayGlobalVarAndFunctions as gvf
import commonVar as common
import pandas as pd
import parameters as par
import numpy as np
import warnings
warnings.filterwarnings("ignore")
# to eliminate an annoying warning at time 1 in time series plot
import warnings
warnings.filterwarnings("ignore", module="matplotlib")
def do1b(address):
if common.cycle == 1:
# setting Figure for the net
if not common.IPython or common.graphicStatus == "PythonViaTerminal":
# the or is about ipython running in a terminal
f=gvf.plt.figure(num=2)
mngr1 = gvf.plt.get_current_fig_manager() # NB, after figure()
mngr1.window.wm_geometry("+650+0")
mngr1.set_window_title("Links Entrepreneurs - Workers")
# having the map of the agent
agL = []
for ag in address.modelSwarm.agentList:
agL.append(ag.number)
agL.sort()
# print "\noActions before drawGraph agents", agL
# print "oActions before drawGraph nodes", common.g.nodes()
# basic action to visualize the networkX output
gvf.openClearNetworkXdisplay()
gvf.drawGraph()
def do2a(address, cycle):
self = address # if necessary
# ask each agent, without parameters
print("Time = ", cycle, "ask all agents to report position")
askEachAgentInCollection(
address.modelSwarm.getAgentList(),
Agent.reportPosition)
def do2b(address, cycle):
self = address # if necessary
# ask a single agent, without parameters
print("Time = ", cycle, "ask first agent to report position")
if address.modelSwarm.getAgentList() != []:
askAgent(address.modelSwarm.getAgentList()[0],
Agent.reportPosition)
def otherSubSteps(subStep, address):
if subStep == "pause":
input("Hit enter key to continue")
return True
elif subStep == "collectStructuralData":
collectStructuralData(address.modelSwarm.agentList, common.cycle)
return True
elif subStep == "collectTimeSeries":
collectTimeSeries(address.modelSwarm.agentList, common.cycle)
return True
elif subStep == "visualizePlot":
visualizePlot()
return True
elif subStep == "prune":
common.prune = True
newValue = input(("Prune links with weight < %d\n" +
"Enter to confirm " +
"or introduce a new level: ") %
common.pruneThreshold)
if newValue != "":
common.pruneThreshold = int(newValue)
return True
# this subStep performs only partially the "end" item; the execution
# will continue in ObserverSwarm.py
elif subStep == "end":
if not common.IPython or common.graphicStatus == "PythonViaTerminal":
# the or is about ipython running in a terminal
# += and ; as first character because a first part
# of the string toBeExecuted is already defined in
# commonVar.py
common.toBeExecuted += ";gvf.plt.figure(2);gvf.plt.close()"
else:
return False
# collect Structural Data
def collectStructuralData(aL, t):
# creating the dataframe
try:
common.str_df
except BaseException:
common.str_df = | pd.DataFrame(columns=['entrepreneurs', 'workers']) | pandas.DataFrame |
#!/usr/bin/env python3
import unittest
import numpy as np
import numpy.testing as nptest
import pandas as pd
import pandas.testing as pdtest
from sklearn.decomposition import PCA
from sklearn.preprocessing import StandardScaler
from datafold.dynfold.transform import (
TSCApplyLambdas,
TSCFeaturePreprocess,
TSCFiniteDifference,
TSCIdentity,
TSCPolynomialFeatures,
TSCPrincipalComponent,
TSCRadialBasis,
TSCTakensEmbedding,
TSCTransformerMixin,
)
from datafold.pcfold.kernels import *
from datafold.pcfold.timeseries.collection import TSCDataFrame, TSCException
def _all_tsc_transformers():
# only finds the ones that are importated (DMAP e.g. is not here)
print(TSCTransformerMixin.__subclasses__())
class TestTSCTransform(unittest.TestCase):
def _setUp_simple_df(self):
idx = pd.MultiIndex.from_arrays(
[[0, 0, 1, 1, 15, 15, 45, 45, 45], [0, 1, 0, 1, 0, 1, 17, 18, 19]]
)
col = ["A", "B"]
self.simple_df = pd.DataFrame(np.random.rand(9, 2), index=idx, columns=col)
def _setUp_takens_df(self):
idx = pd.MultiIndex.from_arrays(
[[0, 0, 1, 1, 15, 15, 45, 45, 45], [0, 1, 0, 1, 0, 1, 17, 18, 19]]
)
col = ["A", "B"]
# Requires non-random values
self.takens_df_short = pd.DataFrame(
np.arange(18).reshape([9, 2]), index=idx, columns=col
)
n_samples_timeseries = 100
idx = pd.MultiIndex.from_product(
[np.array([0, 1]), np.arange(n_samples_timeseries)]
)
self.takens_df_long = pd.DataFrame(
np.random.rand(n_samples_timeseries * 2, 2), index=idx, columns=col
)
def setUp(self) -> None:
self._setUp_simple_df()
self._setUp_takens_df()
def test_is_valid_sklearn_estimator(self):
from sklearn.preprocessing import MinMaxScaler
from sklearn.utils.estimator_checks import check_estimator
TEST_ESTIMATORS = (
TSCIdentity(),
TSCPrincipalComponent(),
TSCFeaturePreprocess(sklearn_transformer=MinMaxScaler()),
TSCFeaturePreprocess(sklearn_transformer=StandardScaler()),
TSCPolynomialFeatures(),
)
for test_estimator in TEST_ESTIMATORS:
for estimator, check in check_estimator(test_estimator, generate_only=True):
try:
check(estimator)
except Exception as e:
print(estimator)
print(check)
raise e
def test_identity0(self):
tsc = TSCDataFrame(self.simple_df)
_id = TSCIdentity()
pdtest.assert_frame_equal(_id.fit_transform(tsc), tsc)
pdtest.assert_frame_equal(_id.inverse_transform(tsc), tsc)
def test_identity1(self):
tsc = TSCDataFrame(self.simple_df)
_id = TSCIdentity(include_const=True)
tsc_plus_const = tsc.copy(deep=True)
tsc_plus_const["const"] = 1
pdtest.assert_frame_equal(_id.fit_transform(tsc.copy()), tsc_plus_const)
pdtest.assert_frame_equal(_id.inverse_transform(tsc_plus_const), tsc)
def test_identity2(self):
data = np.random.rand(5, 5)
data_wo_const = TSCIdentity(include_const=False).fit_transform(data)
data_plus_const = TSCIdentity(include_const=True).fit_transform(data)
nptest.assert_equal(data, data_wo_const)
nptest.assert_equal(data_plus_const, np.column_stack([data, np.ones(5)]))
def test_identity3(self):
data = TSCDataFrame(self.simple_df)
data_wo_const = TSCIdentity(
include_const=False, rename_features=True
).fit_transform(data)
data_with_const = TSCIdentity(
include_const=True, rename_features=True
).fit_transform(data)
data = data.add_suffix("_id")
pdtest.assert_index_equal(data.columns, data_wo_const.columns)
data["const"] = 1
pdtest.assert_index_equal(data.columns, data_with_const.columns)
def test_scale_min_max(self):
tsc_df = TSCDataFrame(self.simple_df)
scale = TSCFeaturePreprocess.from_name("min-max")
scaled_tsc = scale.fit_transform(tsc_df)
# sanity check:
nptest.assert_allclose(scaled_tsc.min().to_numpy(), np.zeros(2), atol=1e-16)
nptest.assert_allclose(scaled_tsc.max().to_numpy(), np.ones(2), atol=1e-16)
# Undoing normalization must give original TSCDataFrame back
pdtest.assert_frame_equal(tsc_df, scale.inverse_transform(scaled_tsc))
def test_scale_standard(self):
tsc_df = TSCDataFrame(self.simple_df)
scale = TSCFeaturePreprocess.from_name("standard")
scaled_tsc = scale.fit_transform(tsc_df)
nptest.assert_array_equal(
scaled_tsc.to_numpy(),
StandardScaler(with_mean=True, with_std=True).fit_transform(
tsc_df.to_numpy()
),
)
# Undoing normalization must give original TSCDataFrame back
pdtest.assert_frame_equal(tsc_df, scale.inverse_transform(scaled_tsc))
def test_sklearn_scaler(self):
tsc_df = TSCDataFrame(self.simple_df)
from sklearn.preprocessing import (
MaxAbsScaler,
PowerTransformer,
QuantileTransformer,
RobustScaler,
)
# each tuple has the class and a dictionary with the init-options
scaler = [
(MaxAbsScaler, dict()),
(PowerTransformer, dict(method="yeo-johnson")),
(PowerTransformer, dict(method="box-cox")),
(
QuantileTransformer,
dict(n_quantiles=tsc_df.shape[0], output_distribution="uniform"),
),
(
QuantileTransformer,
dict(n_quantiles=tsc_df.shape[0], output_distribution="normal"),
),
(RobustScaler, dict()),
]
for cls, kwargs in scaler:
scale = TSCFeaturePreprocess(sklearn_transformer=cls(**kwargs))
tsc_transformed = scale.fit_transform(tsc_df)
# Check the underlying array equals:
nptest.assert_array_equal(
cls(**kwargs).fit_transform(tsc_df.to_numpy()),
tsc_transformed.to_numpy(),
)
# check inverse transform is equal the original TSCDataFrame:
pdtest.assert_frame_equal(tsc_df, scale.inverse_transform(tsc_transformed))
def test_polynomial_feature_transform01(self):
from sklearn.preprocessing import PolynomialFeatures
tsc = TSCDataFrame(self.simple_df)
for degree in [2, 3, 4]:
for include_bias in [True, False]:
actual = TSCPolynomialFeatures(
degree=degree, include_bias=include_bias, include_first_order=True
).fit_transform(tsc)
expected = PolynomialFeatures(
degree=degree, include_bias=include_bias
).fit_transform(tsc.to_numpy())
nptest.assert_array_equal(actual.to_numpy(), expected)
def test_polynomial_feature_transform02(self):
tsc = TSCDataFrame(self.simple_df)
for include_first_order in [True, False]:
poly = TSCPolynomialFeatures(
degree=2, include_bias=True, include_first_order=include_first_order
).fit(tsc)
actual = poly.transform(tsc)
expected = TSCPolynomialFeatures(
degree=2, include_bias=True, include_first_order=False
).fit_transform(tsc)
pdtest.assert_frame_equal(actual, expected)
def test_polynomial_feature_transform03(self):
tsc = TSCDataFrame(self.simple_df)
actual = TSCPolynomialFeatures(
degree=2, include_bias=True, include_first_order=False
).fit_transform(tsc)
pdtest.assert_index_equal(
actual.columns,
pd.Index(["1", "A^2", "A B", "B^2"], name="feature"),
)
actual = TSCPolynomialFeatures(
degree=2, include_bias=False, include_first_order=False
).fit_transform(tsc)
pdtest.assert_index_equal(
actual.columns,
pd.Index(["A^2", "A B", "B^2"], name="feature"),
)
def test_apply_lambda_transform01(self):
# use lambda identity function
tsc = TSCDataFrame(self.simple_df)
lambda_transform = TSCApplyLambdas(lambdas=[lambda x: x]).fit(tsc)
actual = lambda_transform.transform(tsc)
expected = tsc
expected.columns = pd.Index(
["A_lambda0", "B_lambda0"], name=TSCDataFrame.tsc_feature_col_name
)
pdtest.assert_frame_equal(actual, expected)
def test_apply_lambda_transform02(self):
# use numpy function
tsc = TSCDataFrame(self.simple_df)
lambda_transform = TSCApplyLambdas(lambdas=[np.square]).fit(tsc)
actual = lambda_transform.transform(tsc)
expected = tsc.apply(np.square, axis=0, raw=True)
expected.columns = pd.Index(
["A_lambda0", "B_lambda0"], name=TSCDataFrame.tsc_feature_col_name
)
pdtest.assert_frame_equal(actual, expected)
def test_apply_lambda_transform03(self):
# use numpy function
tsc = TSCDataFrame(self.simple_df)
lambda_transform = TSCApplyLambdas(lambdas=[lambda x: x, np.square]).fit(tsc)
actual = lambda_transform.transform(tsc)
identity = tsc
identity.columns = pd.Index(
["A_lambda0", "B_lambda0"], name=TSCDataFrame.tsc_feature_col_name
)
squared = tsc.apply(np.square, axis=0, raw=True)
squared.columns = pd.Index(
["A_lambda1", "B_lambda1"], name=TSCDataFrame.tsc_feature_col_name
)
expected = pd.concat([identity, squared], axis=1)
pdtest.assert_frame_equal(actual, expected)
def test_pca_transform(self):
tsc = TSCDataFrame(self.simple_df)
pca = TSCPrincipalComponent(n_components=1).fit(tsc)
data = pca.transform(tsc)
self.assertIsInstance(data, TSCDataFrame)
pca_sklearn = PCA(n_components=1).fit(tsc.to_numpy())
data_sklearn = pca_sklearn.transform(tsc)
nptest.assert_allclose(data, data_sklearn, atol=1e-15)
nptest.assert_array_equal(
pca.inverse_transform(data).to_numpy(),
pca_sklearn.inverse_transform(data_sklearn),
)
def test_takens_embedding0(self):
simple_df = self.takens_df_short.drop("B", axis=1)
tsc_df = TSCDataFrame(simple_df)
takens = TSCTakensEmbedding(
delays=1,
lag=0,
frequency=1,
)
actual = takens.fit_transform(tsc_df)
self.assertIsInstance(actual, TSCDataFrame)
# First test
actual_numerics = actual.to_numpy() # only compare the numeric values
expected = np.array(
[
[2.0, 0.0],
[6.0, 4.0],
[10.0, 8.0],
[14.0, 12.0],
[16.0, 14.0],
]
)
nptest.assert_equal(actual_numerics, expected)
# Second test
actual_inverse = takens.inverse_transform(actual)
pdtest.assert_frame_equal(tsc_df.drop([0, 17], level=1), actual_inverse)
def test_takens_embedding1(self):
# test kappa = 1
tsc_df = TSCDataFrame.from_single_timeseries(
pd.DataFrame(
np.column_stack([[0, 1, 2, 3, 4, 5], [0, 1, 2, 3, 4, 5]]),
columns=["A", "B"],
dtype=float,
)
)
takens = TSCTakensEmbedding(lag=0, delays=5, frequency=1, kappa=1)
# embedd to a single instance
actual = takens.fit_transform(tsc_df)
self.assertIsInstance(actual, TSCDataFrame)
self.assertTrue(actual.has_degenerate())
self.assertEqual(actual.n_timeseries, 1)
# First test
actual_numerics = actual.to_numpy() # only compare the numeric values
expected = np.array([[5, 4, 3, 2, 1, 0]], dtype=float) * np.exp(
-1.0 * np.array([0, 1, 2, 3, 4, 5])
)
expected = np.repeat(expected, 2, axis=1)
nptest.assert_equal(actual_numerics, expected)
# Second test
actual_inverse = takens.inverse_transform(actual)
expected = tsc_df.final_states(1)
pdtest.assert_frame_equal(actual_inverse, expected)
def test_takens_delay_indices(self):
tsc_short = TSCDataFrame(self.takens_df_short) # better check for errors
tsc_long = TSCDataFrame(self.takens_df_long)
nptest.assert_array_equal(
TSCTakensEmbedding(delays=1, lag=0, frequency=1)
.fit(tsc_short)
.delay_indices_,
np.array([1]),
)
nptest.assert_array_equal(
TSCTakensEmbedding(delays=2, lag=0, frequency=1)
.fit(tsc_long)
.delay_indices_,
np.array([1, 2]),
)
with self.assertRaises(TSCException):
# Data too short
TSCTakensEmbedding(delays=5, lag=0, frequency=1).fit(tsc_short)
nptest.assert_array_equal(
TSCTakensEmbedding(delays=1, lag=1, frequency=1)
.fit(tsc_long)
.delay_indices_,
np.array([2]),
)
nptest.assert_array_equal(
TSCTakensEmbedding(delays=5, lag=1, frequency=1)
.fit(tsc_long)
.delay_indices_,
np.array([2, 3, 4, 5, 6]),
)
nptest.assert_array_equal(
TSCTakensEmbedding(delays=2, lag=2, frequency=2)
.fit(tsc_long)
.delay_indices_,
np.array([3, 5]),
)
nptest.assert_array_equal(
TSCTakensEmbedding(delays=4, lag=2, frequency=2)
.fit(tsc_long)
.delay_indices_,
np.array([3, 5, 7, 9]),
)
with self.assertRaises(ValueError):
TSCTakensEmbedding(delays=1, lag=0, frequency=2).fit(tsc_short)
def test_rbf_1d(self):
func = lambda x: np.exp(x * np.cos(3 * np.pi * x)) - 1
x_vals = np.linspace(0, 1, 100)
y_vals = func(x_vals)
df = pd.DataFrame(y_vals, index=x_vals, columns=["qoi"])
tsc = TSCDataFrame.from_single_timeseries(df)
rbf = TSCRadialBasis(kernel=MultiquadricKernel())
rbf_coeff = rbf.fit_transform(tsc)
rbf_coeff_inverse = rbf.inverse_transform(rbf_coeff)
pdtest.assert_frame_equal(tsc, rbf_coeff_inverse, check_exact=False)
def test_rbf_2d(self):
func = lambda x: np.exp(x * np.cos(3 * np.pi * x)) - 1
x_vals = np.linspace(0, 1, 15)
y_vals = func(x_vals)
df = pd.DataFrame(np.column_stack([x_vals, y_vals]), columns=["qoi1", "qoi2"])
tsc = TSCDataFrame.from_single_timeseries(df)
rbf = TSCRadialBasis(kernel=MultiquadricKernel(epsilon=1.0))
rbf_coeff = rbf.fit_transform(tsc)
rbf_coeff_inverse = rbf.inverse_transform(rbf_coeff)
pdtest.assert_frame_equal(tsc, rbf_coeff_inverse, check_exact=False)
def test_rbf_centers(self):
func = lambda x: np.exp(x * np.cos(3 * np.pi * x)) - 1
x_vals = np.linspace(0, 1, 15)
y_vals = func(x_vals)
df = pd.DataFrame(np.column_stack([x_vals, y_vals]), columns=["qoi1", "qoi2"])
tsc = TSCDataFrame.from_single_timeseries(df)
# Use centers at another location than the data. These can be selected in a
# optimization routine (such as kmean), or randomly put into the phase space.
x_vals_centers = np.linspace(0, 1, 10)
y_vals_centers = func(x_vals_centers)
centers = np.column_stack([x_vals_centers, y_vals_centers])
centers = pd.DataFrame(centers, columns=tsc.columns)
rbf = TSCRadialBasis(kernel=MultiquadricKernel(epsilon=1.0))
rbf = rbf.fit(centers)
rbf_coeff = rbf.transform(tsc)
rbf_coeff_inverse = rbf.inverse_transform(rbf_coeff)
pdtest.assert_index_equal(tsc.index, rbf_coeff_inverse.index)
| pdtest.assert_index_equal(tsc.columns, rbf_coeff_inverse.columns) | pandas.testing.assert_index_equal |
import pandas as pd
import numpy as np
import pytest
from kgextension.endpoints import DBpedia
from kgextension.schema_matching import (
relational_matching,
label_schema_matching,
value_overlap_matching,
string_similarity_matching
)
class TestRelationalMatching:
def test1_default(self):
path_input = "test/data/schema_matching/default_matches_cities_input.csv"
df = | pd.read_csv(path_input) | pandas.read_csv |
# imports
import io
import math
import os
from pathlib import Path
import matplotlib.pyplot as plt
import pandas as pd
import xgboost as xgb
from sklearn.metrics import accuracy_score
# Simple_markings folder. Holds the "events", e.g. 3PM, 2PM, PASS, FOUL, etc...
# Returns a dictionary containing all the players, with a key indicating whether
# they are on the home or away team.
# input: The game_info object we read from one of the csv files
def get_players(frames_dataframe, home, away):
"""
# Return a list of unique players for a given team (home or away)
def get_unique_players(frames_dataframe, places):
players = []
# Looks through each distance column (hp1, hp2, ap4, etc...), and gets a list
# of unique players in that column.
# Looking through all columns for a team gives us all players on the team.
for distance in places:
for i in frames_dataframe[distance].unique().tolist():
if not(math.isnan(i)):
players.append(i)
return players
"""
# print(home)
# print(away)
players = dict()
for p_id in frames_dataframe.player_id:
players[p_id] = {
"team": home
if frames_dataframe.loc[frames_dataframe.player_id == p_id, "team"].iloc[0]
== "home"
else away
}
return players
"""
home_player_ids = get_unique_players(frames_dataframe, ['hp1', 'hp2', 'hp3', 'hp4', 'hp5'])
away_player_ids = get_unique_players(frames_dataframe, ['ap1', 'ap2', 'ap3', 'ap4', 'ap5'])
# Initialize the player dictionary.
# Main key is player id. Holds all information on each player.
players = {player_id: dict() for player_id in (home_player_ids + away_player_ids)}
for player_id, data in players.items():
if player_id in home_player_ids:
data["team"] = "home"
else:
data["team"] = "away"
return players
"""
def get_player_info(players, players_dataframe, attribute):
for player_id, data in players.items():
players[player_id][attribute] = players_dataframe.loc[
players_dataframe.ids_id == player_id, attribute
].iloc[0]
def get_event_info(players, game_markings_dataframe, event):
for player_id, data in players.items():
players[player_id][event] = len(
game_markings_dataframe.loc[
(game_markings_dataframe.player_id == player_id)
& (game_markings_dataframe.event == event)
]
)
# Uses the game/player information gathered above to get the number of points made
# by each team during actual gameplay. Since there is no information about free
# throws made from foul plays, we cannot actually determine the final score or
# which team won.
def get_scoreboard(game_df, home, away):
home_score = 0
away_score = 0
for i in range(len(game_df["2PM"])):
if game_df.iloc[i]["team"] == home:
home_score += game_df["2PM"].iloc[i] * 2
if game_df.iloc[i]["team"] == away:
away_score += game_df["2PM"].iloc[i] * 2
for i in range(len(game_df["3PM"])):
if game_df.iloc[i]["team"] == home:
home_score += game_df["3PM"].iloc[i] * 3
if game_df.iloc[i]["team"] == away:
away_score += game_df["3PM"].iloc[i] * 3
return home_score, away_score
def get_game_names(game_file_names):
game_names = []
for game_file_name in game_file_names:
game_file_name_arr = str(game_file_name).split("/")
game_names.append(game_file_name_arr[-1])
return game_names
def squash(game_dataframe, home, away, home_score, away_score):
game = dict()
game["home"] = home
game["home_points"] = home_score
game["away"] = away
game["away_points"] = away_score
# dataframe_dict = game_dataframe.to_dict("index"))
team_stats = {key: dict() for key in ["home", "away"]}
for player_id, player_stats in game_dataframe.to_dict("index").items():
if player_stats["team"] == home:
if player_stats["pos_name"] not in team_stats["home"]:
team_stats["home"][player_stats["pos_name"]] = dict()
team_stats["home"][player_stats["pos_name"]]["count"] = 0
team_stats["home"][player_stats["pos_name"]]["2PM"] = 0
team_stats["home"][player_stats["pos_name"]]["2PX"] = 0
team_stats["home"][player_stats["pos_name"]]["3PM"] = 0
team_stats["home"][player_stats["pos_name"]]["3PX"] = 0
team_stats["home"][player_stats["pos_name"]]["PASS"] = 0
team_stats["home"][player_stats["pos_name"]]["POSS"] = 0
team_stats["home"][player_stats["pos_name"]]["TO"] = 0
team_stats["home"][player_stats["pos_name"]]["count"] += 1
team_stats["home"][player_stats["pos_name"]]["2PM"] += player_stats["2PM"]
team_stats["home"][player_stats["pos_name"]]["2PX"] += player_stats["2PX"]
team_stats["home"][player_stats["pos_name"]]["3PM"] += player_stats["3PM"]
team_stats["home"][player_stats["pos_name"]]["3PX"] += player_stats["3PX"]
team_stats["home"][player_stats["pos_name"]]["PASS"] += player_stats["PASS"]
team_stats["home"][player_stats["pos_name"]]["POSS"] += player_stats["POSS"]
team_stats["home"][player_stats["pos_name"]]["TO"] += player_stats["TO"]
else:
if player_stats["pos_name"] not in team_stats["away"]:
team_stats["away"][player_stats["pos_name"]] = dict()
team_stats["away"][player_stats["pos_name"]]["count"] = 0
team_stats["away"][player_stats["pos_name"]]["2PM"] = 0
team_stats["away"][player_stats["pos_name"]]["2PX"] = 0
team_stats["away"][player_stats["pos_name"]]["3PM"] = 0
team_stats["away"][player_stats["pos_name"]]["3PX"] = 0
team_stats["away"][player_stats["pos_name"]]["PASS"] = 0
team_stats["away"][player_stats["pos_name"]]["POSS"] = 0
team_stats["away"][player_stats["pos_name"]]["TO"] = 0
team_stats["away"][player_stats["pos_name"]]["count"] += 1
team_stats["away"][player_stats["pos_name"]]["2PM"] += player_stats["2PM"]
team_stats["away"][player_stats["pos_name"]]["2PX"] += player_stats["2PX"]
team_stats["away"][player_stats["pos_name"]]["3PM"] += player_stats["3PM"]
team_stats["away"][player_stats["pos_name"]]["3PX"] += player_stats["3PX"]
team_stats["away"][player_stats["pos_name"]]["PASS"] += player_stats["PASS"]
team_stats["away"][player_stats["pos_name"]]["POSS"] += player_stats["POSS"]
team_stats["away"][player_stats["pos_name"]]["TO"] += player_stats["TO"]
for team, team_info in team_stats.items():
for position, position_stats in team_info.items():
position_stats["2PM"] /= position_stats["count"]
position_stats["2PX"] /= position_stats["count"]
position_stats["3PM"] /= position_stats["count"]
position_stats["3PX"] /= position_stats["count"]
position_stats["PASS"] /= position_stats["count"]
position_stats["POSS"] /= position_stats["count"]
position_stats["TO"] /= position_stats["count"]
del position_stats["count"]
for team, team_info in team_stats.items():
for position, position_stats in team_info.items():
for stat in list(position_stats):
game[team + "_" + position + "_" + stat] = position_stats[stat]
return game
def create_dataset():
players_dataframe = pd.read_csv("meta/players.csv")
games_dataframe = pd.read_csv("meta/games.csv")
teams_dataframe = pd.read_csv("meta/teams.csv")
file_names = get_game_names(list((Path.cwd() / "simple-markings").glob("*.csv")))
game_info = dict()
for file_name in file_names:
if os.path.isfile("simple-frames/" + file_name):
file_name_array = file_name.split("-")
away, home = file_name_array[-2], file_name_array[-1][:3]
frames_dataframe = pd.read_csv("simple-frames/" + file_name)
game_markings_dataframe = pd.read_csv("simple-markings/" + file_name)
players = get_players(frames_dataframe, home, away)
get_player_info(players, players_dataframe, "pos_name")
get_event_info(players, game_markings_dataframe, "PASS")
get_event_info(players, game_markings_dataframe, "2PM")
get_event_info(players, game_markings_dataframe, "2PX")
get_event_info(players, game_markings_dataframe, "3PM")
get_event_info(players, game_markings_dataframe, "3PX")
get_event_info(players, game_markings_dataframe, "POSS")
get_event_info(players, game_markings_dataframe, "TO")
game_dataframe = pd.DataFrame(players).transpose()
home_score, away_score = get_scoreboard(game_dataframe, home, away)
game_info[file_name] = squash(
game_dataframe, home, away, home_score, away_score
)
game_info_dataframe = pd.DataFrame(game_info).transpose()
print(game_info_dataframe)
game_info_dataframe.to_csv(
"all_game_info.csv", sep=",", encoding="utf-8", index_label="game_name"
)
def learn():
def calculate_loss(submission_dict):
for game_index, game_stats in submission_dict.items():
total_predicted_points = (
game_stats["home_points_predicted"]
+ game_stats["away_points_predicted"]
)
total_actual_points = (
game_stats["home_points_actual"] - game_stats["away_points_actual"]
)
# game_stats["percent_accuracy"] = (total_actual_points - loss) / total_actual_points * 100
game_stats["loss"] = abs(total_predicted_points - total_actual_points)
atts_to_exclude = ["home", "away", "game_name"]
full_df = pd.read_csv("all_game_info.csv") # grab the entire csv
for (
col
) in (
full_df.columns.values
): # we exclude all of the point making ones because that is too easy
if "PM" in col:
atts_to_exclude.append(col)
train_df = full_df.sample(
frac=0.8, random_state=200
) # split into training and testing
test_df = full_df.drop(train_df.index)
x_test = test_df.drop(["away_points", "home_points"] + atts_to_exclude, axis=1)
x_train = train_df.drop(["away_points", "home_points"] + atts_to_exclude, axis=1)
y_train_h = train_df["home_points"]
y_train_a = train_df["away_points"]
classifier_h = xgb.XGBClassifier(n_estimators=300, n_jobs=4, silent=1, eta=0.1)
classifier_a = xgb.XGBClassifier(n_estimators=300, n_jobs=4, silent=1, eta=0.1)
classifier_h.fit(x_train, y_train_h)
classifier_a.fit(x_train, y_train_a)
pred_h = classifier_h.predict(x_test)
pred_a = classifier_a.predict(x_test)
submission = pd.DataFrame(
{
"game_name": test_df.game_name,
"home_points_predicted": pred_h,
"away_points_predicted": pred_a,
"home_points_actual": test_df.home_points,
"away_points_actual": test_df.away_points,
}
)
submission_dict = submission.to_dict("index")
# print(submission_dict)
calculate_loss(submission_dict)
submission = | pd.DataFrame(submission_dict) | pandas.DataFrame |
import pandas as pd
def list_platform_metadata_s4():
s4_dict = {
'COSPAR': '1998-017A',
'NORAD': 25260,
'full_name': 'Satellite Pour l’Observation de la Terre',
'instruments': {'Végétation', 'HRVIR', 'DORIS'},
'constellation': 'SPOT',
'launch': '1998-03-24',
'orbit': 'sso',
'equatorial_crossing_time': '10:30'}
return s4_dict
def list_central_wavelength_s4():
""" create dataframe with metadata about SPOT4
Returns
-------
df : datafram
metadata and general multispectral information about the MSI
instrument that is onboard SPOT4, having the following collumns:
* wavelength : central wavelength of the band
* bandwidth : extent of the spectral sensativity
* bandid : number for identification in the meta data
* resolution : spatial resolution of a pixel
* name : general name of the band, if applicable
Example
-------
make a selection by name:
>>> boi = ['red', 'green', 'blue', 'near infrared']
>>> s4_df = list_central_wavelength_s2()
>>> s4_df = s4_df[s4_df['name'].isin(boi)]
>>> s4_df
wavelength bandwidth resolution name bandid
B02 492 66 10 blue 1
B03 560 36 10 green 2
B04 665 31 10 red 3
B08 833 106 10 near infrared 7
"""
wavelength = {"B01": 545, "B02": 640, "B03": 740, "B04": 1665,
}
bandwidth = {"B01": 90, "B02": 70, "B03": 100, "B04": 170,
}
bandid = {"B01": 0, "B02": 1, "B03": 2, "B04": 3,
}
resolution = {"B01": 20, "B02": 20, "B03": 20, "B04": 20,
}
name = {"B01": 'green', "B02" : 'red', "B03" : 'near infrared',
"B04" : 'shortwave infrared',
}
d = {
"wavelength": pd.Series(wavelength),
"bandwidth": pd.Series(bandwidth),
"resolution": pd.Series(resolution),
"name": pd.Series(name),
"bandid": pd.Series(bandid)
}
df = | pd.DataFrame(d) | pandas.DataFrame |
__author__ = 'lucabasa'
__version__ = '1.0'
__status__ = 'development'
import numpy as np
import pandas as pd
from sklearn.metrics import mean_squared_error
from sklearn.model_selection import KFold, RandomizedSearchCV
import lightgbm as lgb
import xgboost as xgb
from sklearn.ensemble import RandomForestRegressor, ExtraTreesRegressor
def lightgbm_train(train, test, target, kfolds):
param = {'num_leaves': 111,
'min_data_in_leaf': 150,
'objective': 'regression',
'max_depth': 9,
'learning_rate': 0.005,
"boosting": "gbdt",
"feature_fraction": 0.7522,
"bagging_freq": 1,
"bagging_fraction": 0.7083 ,
"bagging_seed": 11,
"metric": 'rmse',
"lambda_l1": 0.2634,
"random_seed": 133,
"verbosity": -1}
'''
param = {'num_leaves': 50,
'min_data_in_leaf': 11,
'objective': 'regression',
'max_depth': 5,
'learning_rate': 0.005,
"boosting": "gbdt",
"feature_fraction": 0.8791,
"bagging_freq": 1,
"bagging_fraction": 0.9238 ,
"bagging_seed": 11,
"metric": 'rmse',
"lambda_l1": 4.8679,
"random_seed": 133,
"verbosity": -1}
'''
oof = np.zeros(len(train))
predictions = np.zeros(len(test))
feature_importance_df = pd.DataFrame()
comm_cols = list(set(train.columns).intersection(test.columns))
for fold_, (trn_idx, val_idx) in enumerate(kfolds.split(train.values, target.values)):
print("fold n°{}".format(fold_))
trn_data = lgb.Dataset(train.iloc[trn_idx][comm_cols],
label=target.iloc[trn_idx]
)
val_data = lgb.Dataset(train.iloc[val_idx][comm_cols],
label=target.iloc[val_idx]
)
num_round = 10000
clf = lgb.train(param,
trn_data,
num_round,
valid_sets = [trn_data, val_data],
verbose_eval=500,
early_stopping_rounds = 300)
oof[val_idx] = clf.predict(train.iloc[val_idx][comm_cols], num_iteration=clf.best_iteration)
fold_importance_df = pd.DataFrame()
fold_importance_df["feature"] = comm_cols
fold_importance_df["importance"] = clf.feature_importance()
fold_importance_df["fold"] = fold_ + 1
feature_importance_df = pd.concat([feature_importance_df, fold_importance_df], axis=0)
predictions += clf.predict(test[comm_cols], num_iteration=clf.best_iteration) / kfolds.n_splits
print("CV score: {:<8.5f}".format(mean_squared_error(oof, target)**0.5))
return predictions, mean_squared_error(oof, target)**0.5, feature_importance_df, oof
def xgb_train(train, test, target, kfolds):
oof = np.zeros(len(train))
predictions = np.zeros(len(test))
feature_importance_df = pd.DataFrame()
comm_cols = list(set(train.columns).intersection(test.columns))
for fold_, (trn_idx, val_idx) in enumerate(kfolds.split(train.values, target.values)):
print("fold n°{}".format(fold_))
trn_data = train.iloc[trn_idx][comm_cols]
val_data = train.iloc[val_idx][comm_cols]
trn_target = target.iloc[trn_idx]
val_target = target.iloc[val_idx]
clf = xgb.XGBRegressor(n_estimators=10000,
learning_rate=0.05,
max_depth=6,
n_jobs=6,
subsample=0.99,
random_state=408,
gamma=0.0217,
reg_alpha=0.9411,
colsample_bytree=0.3055).fit(trn_data, trn_target,
eval_set=[(val_data, val_target)],
eval_metric='rmse',
early_stopping_rounds=200,
verbose=500)
oof[val_idx] = clf.predict(train.iloc[val_idx][comm_cols],
ntree_limit=clf.best_iteration)
fold_importance_df = pd.DataFrame()
fold_importance_df["feature"] = comm_cols
fold_importance_df["importance"] = clf.feature_importances_
fold_importance_df["fold"] = fold_ + 1
feature_importance_df = pd.concat([feature_importance_df, fold_importance_df], axis=0)
predictions += clf.predict(test[comm_cols], ntree_limit=clf.best_iteration) / kfolds.n_splits
print("CV score: {:<8.5f}".format(mean_squared_error(oof, target)**0.5))
return predictions, mean_squared_error(oof, target)**0.5, feature_importance_df, oof
def rf_train(train, test, target, kfolds):
oof = np.zeros(len(train))
predictions = np.zeros(len(test))
feature_importance_df = pd.DataFrame()
comm_cols = list(set(train.columns).intersection(test.columns))
'''
grid_param = {'max_depth': np.arange(3,30),
'min_samples_split': np.arange(2, 50),
'min_samples_leaf': np.arange(1,40),
'max_features': ['sqrt', 'log2', None]}
print('Optimizing parameters')
grid = RandomizedSearchCV(RandomForestRegressor(n_estimators=300, n_jobs=4, random_state=345),
param_distributions=grid_param, n_iter=20, cv=kfolds,
random_state=654, n_jobs=-1, scoring='neg_mean_squared_error', verbose=3)
grid.fit(train[comm_cols], target)
best_forest = grid.best_estimator_
print(grid.best_params_)
print(round( (-grid.best_score_ )**0.5 ,3))
'''
best_forest = RandomForestRegressor(n_estimators=1000, n_jobs=-1, random_state=32, max_depth=20, max_features='sqrt')
for fold_, (trn_idx, val_idx) in enumerate(kfolds.split(train.values, target.values)):
print("fold n°{}".format(fold_))
trn_data = train.iloc[trn_idx][comm_cols]
val_data = train.iloc[val_idx][comm_cols]
trn_target = target.iloc[trn_idx]
val_target = target.iloc[val_idx]
clf = best_forest.fit(trn_data, trn_target)
oof[val_idx] = clf.predict(train.iloc[val_idx][comm_cols])
fold_importance_df = pd.DataFrame()
fold_importance_df["feature"] = comm_cols
fold_importance_df["importance"] = clf.feature_importances_
fold_importance_df["fold"] = fold_ + 1
feature_importance_df = | pd.concat([feature_importance_df, fold_importance_df], axis=0) | pandas.concat |
import numpy as np
import pandas as pd
from sklearn.model_selection import train_test_split
from bagging import Bagging
from sklearn import svm
from sklearn import preprocessing
import random
from keras.utils import to_categorical
from opts import DLOption
from dbn_tf import DBN
from nn_tf import NN
from sklearn.metrics import confusion_matrix
import matplotlib.pyplot as plt
import seaborn as sns
from imblearn.over_sampling import SMOTE
from fsvm import fsvmClass
def RepetitionRandomSampling(data, number, rate): # 有放回采样,number为抽样的个数
sample = []
for i in range(int(rate * number)):
sample.append(data[random.randint(0, len(data) - 1)])
return sample
def Voting(data): # 投票法
term = np.transpose(data) # 转置
result = list() # 存储结果
for target in term:
one = list(target).count(1)
zero = list(target).count(0)
if one > zero:
result.append(1)
else:
result.append(0)
return result
#下采样
def lowSampling(df, percent=3/3):
data1 = df[df[0] == 1] # 将多数
data0 = df[df[0] == 0] # 将少数类别的样本放在data0
index = np.random.randint(
len(data1), size=int(percent * (len(df) - len(data1)))) # 随机给定下采样取出样本的序号
lower_data1 = data1.iloc[list(index)] # 下采样
return( | pd.concat([lower_data1, data0]) | pandas.concat |
'''
The analysis module
Handles the analyses of the info and data space for experiment evaluation and design.
'''
from slm_lab.agent import AGENT_DATA_NAMES
from slm_lab.env import ENV_DATA_NAMES
from slm_lab.lib import logger, util, viz
import numpy as np
import os
import pandas as pd
import pydash as ps
import shutil
DATA_AGG_FNS = {
't': 'sum',
'reward': 'sum',
'loss': 'mean',
'explore_var': 'mean',
}
FITNESS_COLS = ['strength', 'speed', 'stability', 'consistency']
# TODO improve to make it work with any reward mean
FITNESS_STD = util.read('slm_lab/spec/_fitness_std.json')
NOISE_WINDOW = 0.05
MA_WINDOW = 100
logger = logger.get_logger(__name__)
'''
Fitness analysis
'''
def calc_strength(aeb_df, rand_epi_reward, std_epi_reward):
'''
For each episode, use the total rewards to calculate the strength as
strength_epi = (reward_epi - reward_rand) / (reward_std - reward_rand)
**Properties:**
- random agent has strength 0, standard agent has strength 1.
- if an agent achieve x2 rewards, the strength is ~x2, and so on.
- strength of learning agent always tends toward positive regardless of the sign of rewards (some environments use negative rewards)
- scale of strength is always standard at 1 and its multiplies, regardless of the scale of actual rewards. Strength stays invariant even as reward gets rescaled.
This allows for standard comparison between agents on the same problem using an intuitive measurement of strength. With proper scaling by a difficulty factor, we can compare across problems of different difficulties.
'''
# use lower clip 0 for noise in reward to dip slighty below rand
return (aeb_df['reward'] - rand_epi_reward).clip(0.) / (std_epi_reward - rand_epi_reward)
def calc_stable_idx(aeb_df, min_strength_ma):
'''Calculate the index (epi) when strength first becomes stable (using moving mean and working backward)'''
above_std_strength_sr = (aeb_df['strength_ma'] >= min_strength_ma)
if above_std_strength_sr.any():
# if it achieved stable (ma) min_strength_ma at some point, the index when
std_strength_ra_idx = above_std_strength_sr.idxmax()
stable_idx = std_strength_ra_idx - (MA_WINDOW - 1)
else:
stable_idx = np.nan
return stable_idx
def calc_std_strength_timestep(aeb_df):
'''
Calculate the timestep needed to achieve stable (within NOISE_WINDOW) std_strength.
For agent failing to achieve std_strength 1, it is meaningless to measure speed or give false interpolation, so set as inf (never).
'''
std_strength = 1.
stable_idx = calc_stable_idx(aeb_df, min_strength_ma=std_strength - NOISE_WINDOW)
if np.isnan(stable_idx):
std_strength_timestep = np.inf
else:
std_strength_timestep = aeb_df.loc[stable_idx, 'total_t'] / std_strength
return std_strength_timestep
def calc_speed(aeb_df, std_timestep):
'''
For each session, measure the moving average for strength with interval = 100 episodes.
Next, measure the total timesteps up to the first episode that first surpasses standard strength, allowing for noise of 0.05.
Finally, calculate speed as
speed = timestep_std / timestep_solved
**Properties:**
- random agent has speed 0, standard agent has speed 1.
- if an agent takes x2 timesteps to exceed standard strength, we can say it is 2x slower.
- the speed of learning agent always tends toward positive regardless of the shape of the rewards curve
- the scale of speed is always standard at 1 and its multiplies, regardless of the absolute timesteps.
For agent failing to achieve standard strength 1, it is meaningless to measure speed or give false interpolation, so the speed is 0.
This allows an intuitive measurement of learning speed and the standard comparison between agents on the same problem.
'''
agent_timestep = calc_std_strength_timestep(aeb_df)
speed = std_timestep / agent_timestep
return speed
def is_noisy_mono_inc(sr):
'''Check if sr is monotonically increasing, (given NOISE_WINDOW = 5%) within noise = 5% * std_strength = 0.05 * 1'''
zero_noise = -NOISE_WINDOW
mono_inc_sr = np.diff(sr) >= zero_noise
# restore sr to same length
mono_inc_sr = np.insert(mono_inc_sr, 0, np.nan)
return mono_inc_sr
def calc_stability(aeb_df):
'''
Find a baseline =
- 0. + noise for very weak solution
- max(strength_ma_epi) - noise for partial solution weak solution
- 1. - noise for solution achieving standard strength and beyond
So we get:
- weak_baseline = 0. + noise
- strong_baseline = min(max(strength_ma_epi), 1.) - noise
- baseline = max(weak_baseline, strong_baseline)
Let epi_baseline be the episode where baseline is first attained. Consider the episodes starting from epi_baseline, let #epi_+ be the number of episodes, and #epi_>= the number of episodes where strength_ma_epi is monotonically increasing.
Calculate stability as
stability = #epi_>= / #epi_+
**Properties:**
- stable agent has value 1, unstable agent < 1, and non-solution = 0.
- allows for drops strength MA of 5% to account for noise, which is invariant to the scale of rewards
- if strength is monotonically increasing (with 5% noise), then it is stable
- sharp gain in strength is considered stable
- monotonically increasing implies strength can keep growing and as long as it does not fall much, it is considered stable
'''
weak_baseline = 0. + NOISE_WINDOW
strong_baseline = min(aeb_df['strength_ma'].max(), 1.) - NOISE_WINDOW
baseline = max(weak_baseline, strong_baseline)
stable_idx = calc_stable_idx(aeb_df, min_strength_ma=baseline)
if np.isnan(stable_idx):
stability = 0.
else:
stable_df = aeb_df.loc[stable_idx:, 'strength_mono_inc']
stability = stable_df.sum() / len(stable_df)
return stability
def calc_consistency(aeb_fitness_df):
'''
Calculate the consistency of trial by the fitness_vectors of its sessions:
consistency = ratio of non-outlier vectors
**Properties:**
- outliers are calculated using MAD modified z-score
- if all the fitness vectors are zero or all strength are zero, consistency = 0
- works for all sorts of session fitness vectors, with the standard scale
When an agent fails to achieve standard strength, it is meaningless to measure consistency or give false interpolation, so consistency is 0.
'''
fitness_vecs = aeb_fitness_df.values
if ~np.any(fitness_vecs) or ~np.any(aeb_fitness_df['strength']):
# no consistency if vectors all 0
consistency = 0.
elif len(fitness_vecs) == 2:
# if only has 2 vectors, check norm_diff
diff_norm = np.linalg.norm(np.diff(fitness_vecs, axis=0)) / np.linalg.norm(np.ones(len(fitness_vecs[0])))
consistency = diff_norm <= NOISE_WINDOW
else:
is_outlier_arr = util.is_outlier(fitness_vecs)
consistency = (~is_outlier_arr).sum() / len(is_outlier_arr)
return consistency
def calc_epi_reward_ma(aeb_df):
'''Calculates the episode reward moving average with the MA_WINDOW'''
rewards = aeb_df['reward']
aeb_df['reward_ma'] = rewards.rolling(window=MA_WINDOW, min_periods=0, center=False).mean()
return aeb_df
def calc_fitness(fitness_vec):
'''
Takes a vector of qualifying standardized dimensions of fitness and compute the normalized length as fitness
L2 norm because it diminishes lower values but amplifies higher values for comparison.
'''
if isinstance(fitness_vec, pd.Series):
fitness_vec = fitness_vec.values
elif isinstance(fitness_vec, pd.DataFrame):
fitness_vec = fitness_vec.iloc[0].values
std_fitness_vector = np.ones(len(fitness_vec))
fitness = np.linalg.norm(fitness_vec) / np.linalg.norm(std_fitness_vector)
return fitness
def calc_aeb_fitness_sr(aeb_df, env_name):
'''Top level method to calculate fitness vector for AEB level data (strength, speed, stability)'''
no_fitness_sr = pd.Series({
'strength': 0., 'speed': 0., 'stability': 0.})
if len(aeb_df) < MA_WINDOW:
logger.warn(f'Run more than {MA_WINDOW} episodes to compute proper fitness')
return no_fitness_sr
std = FITNESS_STD.get(env_name)
if std is None:
std = FITNESS_STD.get('template')
logger.warn(f'The fitness standard for env {env_name} is not built yet. Contact author. Using a template standard for now.')
aeb_df['total_t'] = aeb_df['t'].cumsum()
aeb_df['strength'] = calc_strength(aeb_df, std['rand_epi_reward'], std['std_epi_reward'])
aeb_df['strength_ma'] = aeb_df['strength'].rolling(MA_WINDOW).mean()
aeb_df['strength_mono_inc'] = is_noisy_mono_inc(aeb_df['strength']).astype(int)
strength = aeb_df['strength_ma'].max()
speed = calc_speed(aeb_df, std['std_timestep'])
stability = calc_stability(aeb_df)
aeb_fitness_sr = pd.Series({
'strength': strength, 'speed': speed, 'stability': stability})
return aeb_fitness_sr
'''
Analysis interface methods
'''
def save_spec(spec, info_space, unit='experiment'):
'''Save spec to proper path. Called at Experiment or Trial init.'''
prepath = util.get_prepath(spec, info_space, unit)
util.write(spec, f'{prepath}_spec.json')
def calc_mean_fitness(fitness_df):
'''Method to calculated mean over all bodies for a fitness_df'''
return fitness_df.mean(axis=1, level=3)
def get_session_data(session):
'''
Gather data from session: MDP, Agent, Env data, hashed by aeb; then aggregate.
@returns {dict, dict} session_mdp_data, session_data
'''
session_data = {}
for aeb, body in util.ndenumerate_nonan(session.aeb_space.body_space.data):
session_data[aeb] = body.df.copy()
return session_data
def calc_session_fitness_df(session, session_data):
'''Calculate the session fitness df'''
session_fitness_data = {}
for aeb in session_data:
aeb_df = session_data[aeb]
aeb_df = calc_epi_reward_ma(aeb_df)
util.downcast_float32(aeb_df)
body = session.aeb_space.body_space.data[aeb]
aeb_fitness_sr = calc_aeb_fitness_sr(aeb_df, body.env.name)
aeb_fitness_df = pd.DataFrame([aeb_fitness_sr], index=[session.index])
aeb_fitness_df = aeb_fitness_df.reindex(FITNESS_COLS[:3], axis=1)
session_fitness_data[aeb] = aeb_fitness_df
# form multi_index df, then take mean across all bodies
session_fitness_df = pd.concat(session_fitness_data, axis=1)
mean_fitness_df = calc_mean_fitness(session_fitness_df)
session_fitness = calc_fitness(mean_fitness_df)
logger.info(f'Session mean fitness: {session_fitness}\n{mean_fitness_df}')
return session_fitness_df
def calc_trial_fitness_df(trial):
'''
Calculate the trial fitness df by aggregating from the collected session_data_dict (session_fitness_df's).
Adds a consistency dimension to fitness vector.
'''
trial_fitness_data = {}
try:
all_session_fitness_df = pd.concat(list(trial.session_data_dict.values()))
except ValueError as e:
logger.exception('Sessions failed, no data to analyze. Check stack trace above')
for aeb in util.get_df_aeb_list(all_session_fitness_df):
aeb_fitness_df = all_session_fitness_df.loc[:, aeb]
aeb_fitness_sr = aeb_fitness_df.mean()
consistency = calc_consistency(aeb_fitness_df)
aeb_fitness_sr = aeb_fitness_sr.append(pd.Series({'consistency': consistency}))
aeb_fitness_df = pd.DataFrame([aeb_fitness_sr], index=[trial.index])
aeb_fitness_df = aeb_fitness_df.reindex(FITNESS_COLS, axis=1)
trial_fitness_data[aeb] = aeb_fitness_df
# form multi_index df, then take mean across all bodies
trial_fitness_df = pd.concat(trial_fitness_data, axis=1)
mean_fitness_df = calc_mean_fitness(trial_fitness_df)
trial_fitness_df = mean_fitness_df
trial_fitness = calc_fitness(mean_fitness_df)
logger.info(f'Trial mean fitness: {trial_fitness}\n{mean_fitness_df}')
return trial_fitness_df
def is_unfit(fitness_df):
'''Check if a fitness_df is unfit. Used to determine of trial should stop running more sessions'''
# TODO improve to make it work with any reward mean
mean_fitness_df = calc_mean_fitness(fitness_df)
return mean_fitness_df['strength'].iloc[0] < NOISE_WINDOW
def plot_session(session_spec, info_space, session_data):
'''Plot the session graph, 2 panes: reward, loss & explore_var. Each aeb_df gets its own color'''
graph_x = session_spec['meta'].get('graph_x', 'epi')
aeb_count = len(session_data)
palette = viz.get_palette(aeb_count)
fig = viz.tools.make_subplots(rows=3, cols=1, shared_xaxes=True)
for idx, (a, e, b) in enumerate(session_data):
aeb_str = f'{a}{e}{b}'
aeb_df = session_data[(a, e, b)]
aeb_df.fillna(0, inplace=True) # for saving plot, cant have nan
fig_1 = viz.plot_line(aeb_df, 'reward', graph_x, legend_name=aeb_str, draw=False, trace_kwargs={'legendgroup': aeb_str, 'line': {'color': palette[idx]}})
fig.append_trace(fig_1.data[0], 1, 1)
fig_2 = viz.plot_line(aeb_df, ['loss'], graph_x, y2_col=['explore_var'], trace_kwargs={'legendgroup': aeb_str, 'showlegend': False, 'line': {'color': palette[idx]}}, draw=False)
fig.append_trace(fig_2.data[0], 2, 1)
fig.append_trace(fig_2.data[1], 3, 1)
fig.layout['xaxis1'].update(title=graph_x, zerolinewidth=1)
fig.layout['yaxis1'].update(fig_1.layout['yaxis'])
fig.layout['yaxis1'].update(domain=[0.55, 1])
fig.layout['yaxis2'].update(fig_2.layout['yaxis'])
fig.layout['yaxis2'].update(showgrid=False, domain=[0, 0.45])
fig.layout['yaxis3'].update(fig_2.layout['yaxis2'])
fig.layout['yaxis3'].update(overlaying='y2', anchor='x2')
fig.layout.update(ps.pick(fig_1.layout, ['legend']))
fig.layout.update(title=f'session graph: {session_spec["name"]} t{info_space.get("trial")} s{info_space.get("session")}', width=500, height=600)
viz.plot(fig)
return fig
def gather_aeb_rewards_df(aeb, session_datas):
'''Gather rewards from each session for a body into a df'''
aeb_session_rewards = {}
for s, session_data in session_datas.items():
aeb_df = session_data[aeb]
aeb_reward_sr = aeb_df['reward']
aeb_session_rewards[s] = aeb_reward_sr
aeb_rewards_df = pd.DataFrame(aeb_session_rewards)
return aeb_rewards_df
def build_aeb_reward_fig(aeb_rewards_df, aeb_str, color):
'''Build the aeb_reward envelope figure'''
# TODO need enable total_t for trial graph, and line up signals at the common total_t
mean_sr = aeb_rewards_df.mean(axis=1)
std_sr = aeb_rewards_df.std(axis=1).fillna(0)
max_sr = mean_sr + std_sr
min_sr = mean_sr - std_sr
x = aeb_rewards_df.index.tolist()
max_y = max_sr.tolist()
min_y = min_sr.tolist()
envelope_trace = viz.go.Scatter(
x=x + x[::-1],
y=max_y + min_y[::-1],
fill='tozerox',
fillcolor=viz.lower_opacity(color, 0.2),
line=dict(color='transparent'),
showlegend=False,
legendgroup=aeb_str,
)
df = | pd.DataFrame({'epi': x, 'mean_reward': mean_sr}) | pandas.DataFrame |
import logging
import os
from typing import List, Dict, Optional
import numpy as np
import pandas as pd
import shap
from sklearn.cluster import KMeans
from d3m import container, utils
from d3m.metadata import base as metadata_base, hyperparams, params
from d3m.primitive_interfaces import base
from d3m.primitive_interfaces.base import CallResult
from d3m.primitive_interfaces.supervised_learning import PrimitiveBase
from distil.modeling.forest import ForestCV
from distil.modeling.metrics import classification_metrics, regression_metrics
from distil.utils import CYTHON_DEP
import version
__all__ = ("EnsembleForest",)
logger = logging.getLogger(__name__)
class Hyperparams(hyperparams.Hyperparams):
metric = hyperparams.Enumeration[str](
values=classification_metrics + regression_metrics,
default="f1Macro",
semantic_types=[
"https://metadata.datadrivendiscovery.org/types/ControlParameter"
],
description="The D3M scoring metric to use during the fit phase. This can be any of the regression, classification or "
+ "clustering metrics.",
)
shap_max_dataset_size = hyperparams.Hyperparameter[int](
default=1500,
semantic_types=[
"https://metadata.datadrivendiscovery.org/types/ControlParameter"
],
description="The maximum dataset size on which to apply SHAP interpretation to each sample individually. Otherwise, this number of samples will be"
+ "drawn from the data distribution after clustering (to approximate the distribution) and interpretation will only be applied to these"
+ "samples",
)
n_estimators = hyperparams.UniformInt(
lower=1,
upper=2048,
default=32,
description="The number of trees in the forest.",
semantic_types=[
"https://metadata.datadrivendiscovery.org/types/TuningParameter",
"https://metadata.datadrivendiscovery.org/types/ResourcesUseParameter",
],
)
min_samples_leaf = hyperparams.UniformInt(
lower=1,
upper=31,
default=2,
description="Minimum number of samples to split leaf",
semantic_types=[
"https://metadata.datadrivendiscovery.org/types/TuningParameter",
"https://metadata.datadrivendiscovery.org/types/ResourcesUseParameter",
],
)
class_weight = hyperparams.Enumeration[str](
values=["None", "balanced", "balanced_subsample"],
default="None",
description="todo",
semantic_types=[
"https://metadata.datadrivendiscovery.org/types/TuningParameter"
],
)
estimator = hyperparams.Enumeration[str](
values=["ExtraTrees", "RandomForest"],
default="ExtraTrees",
description="todo",
semantic_types=[
"https://metadata.datadrivendiscovery.org/types/TuningParameter"
],
)
grid_search = hyperparams.Hyperparameter[bool](
default=False,
semantic_types=[
"https://metadata.datadrivendiscovery.org/types/ControlParameter"
],
description="Runs an internal grid search to fit the primitive, ignoring caller supplied values for "
+ "n_estimators, min_samples_leaf, class_weight, estimator",
)
small_dataset_threshold = hyperparams.Hyperparameter[int](
default=2000,
semantic_types=[
"https://metadata.datadrivendiscovery.org/types/ControlParameter"
],
description="If grid_search is true, controls the application of the 'small_dataset_fits' and 'large_dataset_fits' "
+ "parameters - if the input dataset has fewer rows than the threshold value, 'small_dateset_fits' will be used when fitting. "
+ "Otherwise, 'num_large_fits' is used.",
)
small_dataset_fits = hyperparams.Hyperparameter[int](
default=5,
semantic_types=[
"https://metadata.datadrivendiscovery.org/types/ControlParameter"
],
description="If grid_search is true, the number of random forests to fit when using small datasets.",
)
large_dataset_fits = hyperparams.Hyperparameter[int](
default=1,
semantic_types=[
"https://metadata.datadrivendiscovery.org/types/ControlParameter"
],
description="If grid_search is true, the number of random forests to fit when using large datasets.",
)
compute_confidences = hyperparams.Hyperparameter[bool](
default=False,
semantic_types=[
"https://metadata.datadrivendiscovery.org/types/ControlParameter"
],
description="Compute confidence values. Only valid when the task is classification.",
)
n_jobs = hyperparams.Hyperparameter[int](
default=64,
semantic_types=[
"https://metadata.datadrivendiscovery.org/types/ControlParameter"
],
description="The value of the n_jobs parameter for the joblib library",
)
pos_label = hyperparams.Hyperparameter[Optional[str]](
default=None,
semantic_types=[
"https://metadata.datadrivendiscovery.org/types/ControlParameter"
],
description="Name of the positive label in the binary case. If none is provided, second column is assumed to be positive",
)
class Params(params.Params):
model: ForestCV
target_cols: List[str]
label_map: Dict[int, str]
needs_fit: bool
binary: bool
input_hash: pd.Series
class EnsembleForestPrimitive(
PrimitiveBase[container.DataFrame, container.DataFrame, Params, Hyperparams]
):
"""
Generates an ensemble of random forests, with the number of internal models created controlled by the size of the
input dataframe. It accepts a dataframe as input, and returns a dataframe consisting of prediction values only as output.
Columns with string structural types are ignored.
"""
metadata = metadata_base.PrimitiveMetadata(
{
"id": "e0ad06ce-b484-46b0-a478-c567e1ea7e02",
"version": version.__version__,
"name": "EnsembleForest",
"python_path": "d3m.primitives.learner.random_forest.DistilEnsembleForest",
"source": {
"name": "Distil",
"contact": "mailto:<EMAIL>",
"uris": [
"https://github.com/uncharted-distil/distil-primitives/blob/main/distil/primitives/ensemble_forest.py",
"https://github.com/uncharted-distil/distil-primitives",
],
},
"installation": [
CYTHON_DEP,
{
"type": metadata_base.PrimitiveInstallationType.PIP,
"package_uri": "git+https://github.com/uncharted-distil/distil-primitives.git@{git_commit}#egg=distil-primitives".format(
git_commit=utils.current_git_commit(os.path.dirname(__file__)),
),
},
],
"algorithm_types": [
metadata_base.PrimitiveAlgorithmType.RANDOM_FOREST,
],
"primitive_family": metadata_base.PrimitiveFamily.LEARNER,
},
)
def __init__(self, *, hyperparams: Hyperparams, random_seed: int = 0) -> None:
super().__init__(hyperparams=hyperparams, random_seed=random_seed)
# hack to get around typing constraints.
if self.hyperparams["class_weight"] == "None":
class_weight = None
else:
class_weight = self.hyperparams["class_weight"]
grid_search = self.hyperparams["grid_search"]
if grid_search is True:
current_hyperparams = None
else:
current_hyperparams = {
"estimator": self.hyperparams["estimator"],
"n_estimators": self.hyperparams[
"n_estimators"
], # [32, 64, 128, 256, 512, 1024, 2048],
"min_samples_leaf": self.hyperparams[
"min_samples_leaf"
], # '[1, 2, 4, 8, 16, 32],
}
if self.hyperparams["metric"] in classification_metrics:
current_hyperparams.update({"class_weight": class_weight})
else: # regression
current_hyperparams.update({"bootstrap": True})
self._model = ForestCV(
self.hyperparams["metric"],
random_seed=self.random_seed,
hyperparams=current_hyperparams,
grid_search=grid_search,
n_jobs=self.hyperparams["n_jobs"],
)
self._needs_fit = True
self._label_map: Dict[int, str] = {}
self._target_cols: List[str] = []
self._binary = False
def _get_component_columns(
self, output_df: container.DataFrame, source_col_index: int
) -> List[int]:
# Component columns are all column which have as source the referenced
# column index. This includes the aforementioned column index.
component_cols = [source_col_index]
# get the column name
col_name = output_df.metadata.query(
(metadata_base.ALL_ELEMENTS, source_col_index)
)["name"]
# get all columns which have this column as source
for c in range(0, len(output_df.columns)):
src = output_df.metadata.query((metadata_base.ALL_ELEMENTS, c))
if "source_column" in src and src["source_column"] == col_name:
component_cols.append(c)
return component_cols
def set_training_data(
self, *, inputs: container.DataFrame, outputs: container.DataFrame
) -> None:
# At this point anything that needed to be imputed should have been, so we'll
# clear out any remaining NaN values as a last measure.
# if we are doing classification the outputs need to be integer classes.
# label map is used to covert these back on produce.
col = outputs.columns[0]
if self._model.mode == "classification":
factor = pd.factorize(outputs[col])
outputs = pd.DataFrame(factor[0], columns=[col])
self._label_map = {k: v for k, v in enumerate(factor[1])}
self._target_cols = list(outputs.columns)
# remove nans from outputs, apply changes to inputs as well to ensure alignment
self._input_hash = pd.util.hash_pandas_object(inputs)
self._outputs = outputs[
outputs[col] != ""
].dropna() # not in place because we don't want to modify passed input
self._binary = self._outputs.iloc[:, 0].nunique(dropna=True) <= 2
row_diff = outputs.shape[0] - self._outputs.shape[0]
if row_diff != 0:
logger.warn(f"Removed {row_diff} rows due to NaN values in target data.")
self._inputs = inputs.loc[self._outputs.index, :]
else:
self._inputs = inputs
# same in other direction
inputs_rows = self._inputs.shape[0]
inputs_cols = self._inputs.shape[1]
self._inputs = self._inputs.select_dtypes(include="number")
col_diff = inputs_cols - self._inputs.shape[1]
if col_diff != 0:
logger.warn(f"Removed {col_diff} unencoded columns from training data.")
self._inputs = (
self._inputs.dropna()
) # not in place because because selection above doesn't create a copy
row_diff = inputs_rows - self._inputs.shape[0]
if row_diff != 0:
logger.warn(f"Removed {row_diff} rows due to NaN values in training data.")
self._outputs = self._outputs.loc[self._inputs.index, :]
self._model.num_fits = (
self.hyperparams["large_dataset_fits"]
if self._inputs.shape[0] > self.hyperparams["small_dataset_threshold"]
else self.hyperparams["small_dataset_fits"]
)
self._needs_fit = True
def fit(self, *, timeout: float = None, iterations: int = None) -> CallResult[None]:
logger.debug(f"Fitting {__name__}")
if self._needs_fit:
self._model.fit(self._inputs.values, self._outputs.values)
self._needs_fit = False
return CallResult(None)
def produce(
self,
*,
inputs: container.DataFrame,
timeout: float = None,
iterations: int = None,
) -> CallResult[container.DataFrame]:
logger.debug(f"Producing {__name__}")
# force a fit it hasn't yet been done
if self._needs_fit:
self.fit()
# drop any non-numeric columns
# drop all non-numeric columns
num_cols = inputs.shape[1]
inputs = inputs.select_dtypes(include="number")
col_diff = num_cols - inputs.shape[1]
if col_diff > 0:
logger.warn(f"Removed {col_diff} unencoded columns from produce data.")
# create dataframe to hold the result
result = self._model.predict(inputs.values)
if len(self._target_cols) > 1:
result_df = container.DataFrame()
for i, c in enumerate(self._target_cols):
col = container.DataFrame({c: result[:, i]})
result_df = pd.concat([result_df, col], axis=1)
for c in range(result_df.shape[1]):
result_df.metadata = result_df.metadata.add_semantic_type(
(metadata_base.ALL_ELEMENTS, c), "http://schema.org/Float"
)
else:
result_df = container.DataFrame(
{self._target_cols[0]: result}, generate_metadata=True
)
# if we mapped values earlier map them back.
if len(self._label_map) > 0:
# TODO label map will not work if there are multiple output columns.
result_df[self._target_cols[0]] = result_df[self._target_cols[0]].map(
self._label_map
)
# mark the semantic types on the dataframe
for i, _ in enumerate(result_df.columns):
result_df.metadata = result_df.metadata.add_semantic_type(
(metadata_base.ALL_ELEMENTS, i),
"https://metadata.datadrivendiscovery.org/types/PredictedTarget",
)
if (
self._model.mode == "classification"
and self.hyperparams["compute_confidences"]
):
confidence = self._model.predict_proba(inputs.values)
if self._binary:
pos_column = (
0 if self.hyperparams["pos_label"] == self._label_map[0] else 1
)
result_df.insert(
result_df.shape[1], "confidence", confidence[:, pos_column]
)
result_df.metadata = result_df.metadata.add_semantic_type(
(metadata_base.ALL_ELEMENTS, len(result_df.columns) - 1),
"http://schema.org/Float",
)
else:
# add confidence scores as some metrics require them.
confidence = pd.Series(confidence.tolist(), name="confidence")
result_df = pd.concat([result_df, confidence], axis=1)
confidences = [
item
for sublist in result_df["confidence"].values.tolist()
for item in sublist
]
labels = np.array(list(self._label_map.values()) * len(result_df))
index = [
item
for sublist in [
[i] * len(np.unique(labels)) for i in result_df.index
]
for item in sublist
]
result_df_temp = container.DataFrame()
result_df_temp["Class"] = labels
result_df_temp["confidence"] = confidences
result_df_temp.metadata = result_df.metadata
result_df_temp["index_temp"] = index
result_df_temp = result_df_temp.set_index("index_temp")
result_df = result_df_temp
result_df.metadata = result_df.metadata.add_semantic_type(
(metadata_base.ALL_ELEMENTS, len(result_df.columns) - 1),
"https://metadata.datadrivendiscovery.org/types/FloatVector",
)
result_df.metadata = result_df.metadata.add_semantic_type(
(metadata_base.ALL_ELEMENTS, len(result_df.columns) - 1),
"https://metadata.datadrivendiscovery.org/types/Score",
)
result_df.metadata = result_df.metadata.add_semantic_type(
(metadata_base.ALL_ELEMENTS, len(result_df.columns) - 1),
"https://metadata.datadrivendiscovery.org/types/PredictedTarget",
)
logger.debug(f"\n{result_df}")
return base.CallResult(result_df)
def produce_feature_importances(
self,
*,
inputs: container.DataFrame,
timeout: float = None,
iterations: int = None,
) -> CallResult[container.DataFrame]:
logger.debug(f"Producing {__name__} feature weights")
# force a fit it hasn't yet been done
if self._needs_fit:
self.fit()
# extract the feature weights
output = container.DataFrame(
self._model.feature_importances().reshape((1, len(inputs.columns))),
generate_metadata=True,
)
output.columns = inputs.columns
for i in range(len(inputs.columns)):
output.metadata = output.metadata.update_column(
i, {"name": output.columns[i]}
)
# map component columns back to their source - this would cover things like
# a one hot encoding column, that is derived from some original source column
source_col_importances: Dict[str, float] = {}
for col_idx in range(0, len(output.columns)):
col_dict = dict(
inputs.metadata.query((metadata_base.ALL_ELEMENTS, col_idx))
)
# if a column points back to a source column, add that columns importance to the
# total for that source column
if "source_column" in col_dict:
source_col = col_dict["source_column"]
if source_col not in source_col_importances:
source_col_importances[source_col] = 0.0
source_col_importances[source_col] += output.iloc[:, col_idx]
for source_col, importance in source_col_importances.items():
# add the source columns and their importances to the returned data
output_col_length = len(output.columns)
output.insert(output_col_length, source_col, importance, True)
output.metadata = output.metadata.update_column(
output_col_length, {"name": source_col}
)
return CallResult(output)
def produce_shap_values(
self,
*,
inputs: container.DataFrame,
timeout: float = None,
iterations: int = None,
) -> CallResult[container.DataFrame]:
if self._needs_fit:
self.fit()
# don't want to produce SHAP predictions on train set because too computationally intensive
check_rows = min(self._input_hash.shape[0], inputs.shape[0])
if (
pd.util.hash_pandas_object(inputs.head(check_rows))
== self._input_hash.head(check_rows)
).all():
logger.info(
"Not producing SHAP interpretations on train set because of computational considerations"
)
return CallResult(container.DataFrame([]))
# drop any non-numeric columns
num_cols = inputs.shape[1]
inputs = inputs.select_dtypes(include="number")
col_diff = num_cols - inputs.shape[1]
if col_diff > 0:
logger.warn(f"Removed {col_diff} unencoded columns.")
explainer = shap.TreeExplainer(self._model._models[0].model)
max_size = self.hyperparams["shap_max_dataset_size"]
if inputs.shape[0] > max_size:
logger.warning(
f"There are more than {max_size} rows in dataset, sub-sampling ~{max_size} approximately representative rows "
+ "on which to produce interpretations"
)
df = self._shap_sub_sample(inputs)
shap_values = explainer.shap_values(df)
else:
shap_values = explainer.shap_values(pd.DataFrame(inputs))
if self._model.mode == "classification":
logger.info(
f"Returning interpretability values offset from most frequent class in dataset"
)
shap_values = shap_values[np.argmax(explainer.expected_value)]
output_df = container.DataFrame(shap_values, generate_metadata=True)
for i, col in enumerate(inputs.columns):
output_df.metadata = output_df.metadata.update_column(i, {"name": col})
component_cols: Dict[str, List[int]] = {}
for c in range(0, len(output_df.columns)):
col_dict = dict(inputs.metadata.query((metadata_base.ALL_ELEMENTS, c)))
if "source_column" in col_dict:
src = col_dict["source_column"]
if src not in component_cols:
component_cols[src] = []
component_cols[src].append(c)
# build the source column values and add them to the output
for s, cc in component_cols.items():
src_col = output_df.iloc[:, cc].apply(lambda x: sum(x), axis=1)
src_col_index = len(output_df.columns)
output_df.insert(src_col_index, s, src_col)
output_df.metadata = output_df.metadata.add_semantic_type(
(metadata_base.ALL_ELEMENTS, src_col_index),
"https://metadata.datadrivendiscovery.org/types/Attribute",
)
df_dict = dict(output_df.metadata.query((metadata_base.ALL_ELEMENTS,)))
df_dict_1 = dict(output_df.metadata.query((metadata_base.ALL_ELEMENTS,)))
df_dict["dimension"] = df_dict_1
df_dict_1["name"] = "columns"
df_dict_1["semantic_types"] = (
"https://metadata.datadrivendiscovery.org/types/TabularColumn",
)
df_dict_1["length"] = len(output_df.columns)
output_df.metadata = output_df.metadata.update(
(metadata_base.ALL_ELEMENTS,), df_dict
)
return CallResult(output_df)
def _shap_sub_sample(self, inputs: container.DataFrame):
df = pd.DataFrame(inputs)
df["cluster_assignment"] = (
KMeans(random_state=self.random_seed).fit_predict(df).astype(int)
)
n_classes = df["cluster_assignment"].unique()
# deal with cases in which the predictions are all one class
if len(n_classes) == 1:
return df.sample(self.hyperparams["shap_max_dataset_size"]).drop(
columns=["cluster_assignment"]
)
else:
proportion = round(
self.hyperparams["shap_max_dataset_size"] / len(n_classes)
)
dfs = []
for i in n_classes:
# dealing with classes that have less than or equal to their proportional representation
if df[df["cluster_assignment"] == i].shape[0] <= proportion:
dfs.append(df[df["cluster_assignment"] == i])
else:
dfs.append(
df[df["cluster_assignment"] == i].sample(
proportion, random_state=self.random_seed
)
)
sub_sample_df = | pd.concat(dfs) | pandas.concat |
import numpy as np
import pytest
from pandas import DataFrame, Series, concat, isna, notna
import pandas._testing as tm
import pandas.tseries.offsets as offsets
@pytest.mark.parametrize(
"compare_func, roll_func, kwargs",
[
[np.mean, "mean", {}],
[np.nansum, "sum", {}],
[lambda x: np.isfinite(x).astype(float).sum(), "count", {}],
[np.median, "median", {}],
[np.min, "min", {}],
[np.max, "max", {}],
[lambda x: np.std(x, ddof=1), "std", {}],
[lambda x: np.std(x, ddof=0), "std", {"ddof": 0}],
[lambda x: np.var(x, ddof=1), "var", {}],
[lambda x: np.var(x, ddof=0), "var", {"ddof": 0}],
],
)
def test_series(series, compare_func, roll_func, kwargs):
result = getattr(series.rolling(50), roll_func)(**kwargs)
assert isinstance(result, Series)
tm.assert_almost_equal(result.iloc[-1], compare_func(series[-50:]))
@pytest.mark.parametrize(
"compare_func, roll_func, kwargs",
[
[np.mean, "mean", {}],
[np.nansum, "sum", {}],
[lambda x: np.isfinite(x).astype(float).sum(), "count", {}],
[np.median, "median", {}],
[np.min, "min", {}],
[np.max, "max", {}],
[lambda x: np.std(x, ddof=1), "std", {}],
[lambda x: np.std(x, ddof=0), "std", {"ddof": 0}],
[lambda x: np.var(x, ddof=1), "var", {}],
[lambda x: np.var(x, ddof=0), "var", {"ddof": 0}],
],
)
def test_frame(raw, frame, compare_func, roll_func, kwargs):
result = getattr(frame.rolling(50), roll_func)(**kwargs)
assert isinstance(result, DataFrame)
tm.assert_series_equal(
result.iloc[-1, :],
frame.iloc[-50:, :].apply(compare_func, axis=0, raw=raw),
check_names=False,
)
@pytest.mark.parametrize(
"compare_func, roll_func, kwargs, minp",
[
[np.mean, "mean", {}, 10],
[np.nansum, "sum", {}, 10],
[lambda x: np.isfinite(x).astype(float).sum(), "count", {}, 0],
[np.median, "median", {}, 10],
[np.min, "min", {}, 10],
[np.max, "max", {}, 10],
[lambda x: np.std(x, ddof=1), "std", {}, 10],
[lambda x: np.std(x, ddof=0), "std", {"ddof": 0}, 10],
[lambda x: np.var(x, ddof=1), "var", {}, 10],
[lambda x: np.var(x, ddof=0), "var", {"ddof": 0}, 10],
],
)
def test_time_rule_series(series, compare_func, roll_func, kwargs, minp):
win = 25
ser = series[::2].resample("B").mean()
series_result = getattr(ser.rolling(window=win, min_periods=minp), roll_func)(
**kwargs
)
last_date = series_result.index[-1]
prev_date = last_date - 24 * offsets.BDay()
trunc_series = series[::2].truncate(prev_date, last_date)
tm.assert_almost_equal(series_result[-1], compare_func(trunc_series))
@pytest.mark.parametrize(
"compare_func, roll_func, kwargs, minp",
[
[np.mean, "mean", {}, 10],
[np.nansum, "sum", {}, 10],
[lambda x: np.isfinite(x).astype(float).sum(), "count", {}, 0],
[np.median, "median", {}, 10],
[np.min, "min", {}, 10],
[np.max, "max", {}, 10],
[lambda x: np.std(x, ddof=1), "std", {}, 10],
[lambda x: np.std(x, ddof=0), "std", {"ddof": 0}, 10],
[lambda x: np.var(x, ddof=1), "var", {}, 10],
[lambda x: np.var(x, ddof=0), "var", {"ddof": 0}, 10],
],
)
def test_time_rule_frame(raw, frame, compare_func, roll_func, kwargs, minp):
win = 25
frm = frame[::2].resample("B").mean()
frame_result = getattr(frm.rolling(window=win, min_periods=minp), roll_func)(
**kwargs
)
last_date = frame_result.index[-1]
prev_date = last_date - 24 * offsets.BDay()
trunc_frame = frame[::2].truncate(prev_date, last_date)
tm.assert_series_equal(
frame_result.xs(last_date),
trunc_frame.apply(compare_func, raw=raw),
check_names=False,
)
@pytest.mark.parametrize(
"compare_func, roll_func, kwargs",
[
[np.mean, "mean", {}],
[np.nansum, "sum", {}],
[np.median, "median", {}],
[np.min, "min", {}],
[np.max, "max", {}],
[lambda x: np.std(x, ddof=1), "std", {}],
[lambda x: np.std(x, ddof=0), "std", {"ddof": 0}],
[lambda x: np.var(x, ddof=1), "var", {}],
[lambda x: np.var(x, ddof=0), "var", {"ddof": 0}],
],
)
def test_nans(compare_func, roll_func, kwargs):
obj = Series(np.random.randn(50))
obj[:10] = np.NaN
obj[-10:] = np.NaN
result = getattr(obj.rolling(50, min_periods=30), roll_func)(**kwargs)
tm.assert_almost_equal(result.iloc[-1], compare_func(obj[10:-10]))
# min_periods is working correctly
result = getattr(obj.rolling(20, min_periods=15), roll_func)(**kwargs)
assert isna(result.iloc[23])
assert not isna(result.iloc[24])
assert not isna(result.iloc[-6])
assert isna(result.iloc[-5])
obj2 = Series(np.random.randn(20))
result = getattr(obj2.rolling(10, min_periods=5), roll_func)(**kwargs)
assert isna(result.iloc[3])
assert notna(result.iloc[4])
if roll_func != "sum":
result0 = getattr(obj.rolling(20, min_periods=0), roll_func)(**kwargs)
result1 = getattr(obj.rolling(20, min_periods=1), roll_func)(**kwargs)
tm.assert_almost_equal(result0, result1)
def test_nans_count():
obj = Series(np.random.randn(50))
obj[:10] = np.NaN
obj[-10:] = np.NaN
result = obj.rolling(50, min_periods=30).count()
tm.assert_almost_equal(
result.iloc[-1], np.isfinite(obj[10:-10]).astype(float).sum()
)
@pytest.mark.parametrize(
"roll_func, kwargs",
[
["mean", {}],
["sum", {}],
["median", {}],
["min", {}],
["max", {}],
["std", {}],
["std", {"ddof": 0}],
["var", {}],
["var", {"ddof": 0}],
],
)
@pytest.mark.parametrize("minp", [0, 99, 100])
def test_min_periods(series, minp, roll_func, kwargs):
result = getattr(series.rolling(len(series) + 1, min_periods=minp), roll_func)(
**kwargs
)
expected = getattr(series.rolling(len(series), min_periods=minp), roll_func)(
**kwargs
)
nan_mask = | isna(result) | pandas.isna |
# Licensed to Modin Development Team under one or more contributor license agreements.
# See the NOTICE file distributed with this work for additional information regarding
# copyright ownership. The Modin Development Team licenses this file to you under the
# Apache License, Version 2.0 (the "License"); you may not use this file except in
# compliance with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software distributed under
# the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF
# ANY KIND, either express or implied. See the License for the specific language
# governing permissions and limitations under the License.
import numpy as np
import pytest
import pandas
import modin.pandas as pd
from modin.pandas.utils import from_pandas
from .utils import df_equals
pd.DEFAULT_NPARTITIONS = 4
def generate_dfs():
df = pandas.DataFrame(
{
"col1": [0, 1, 2, 3],
"col2": [4, 5, 6, 7],
"col3": [8, 9, 10, 11],
"col4": [12, 13, 14, 15],
"col5": [0, 0, 0, 0],
}
)
df2 = pandas.DataFrame(
{
"col1": [0, 1, 2, 3],
"col2": [4, 5, 6, 7],
"col3": [8, 9, 10, 11],
"col6": [12, 13, 14, 15],
"col7": [0, 0, 0, 0],
}
)
return df, df2
def generate_none_dfs():
df = pandas.DataFrame(
{
"col1": [0, 1, 2, 3],
"col2": [4, 5, None, 7],
"col3": [8, 9, 10, 11],
"col4": [12, 13, 14, 15],
"col5": [None, None, None, None],
}
)
df2 = pandas.DataFrame(
{
"col1": [0, 1, 2, 3],
"col2": [4, 5, 6, 7],
"col3": [8, 9, 10, 11],
"col6": [12, 13, 14, 15],
"col7": [0, 0, 0, 0],
}
)
return df, df2
def test_df_concat():
df, df2 = generate_dfs()
df_equals(pd.concat([df, df2]), pandas.concat([df, df2]))
def test_ray_concat():
df, df2 = generate_dfs()
modin_df, modin_df2 = from_pandas(df), from_pandas(df2)
df_equals(pd.concat([modin_df, modin_df2]), pandas.concat([df, df2]))
def test_ray_concat_with_series():
df, df2 = generate_dfs()
modin_df, modin_df2 = from_pandas(df), from_pandas(df2)
pandas_series = pandas.Series([1, 2, 3, 4], name="new_col")
df_equals(
pd.concat([modin_df, modin_df2, pandas_series], axis=0),
pandas.concat([df, df2, pandas_series], axis=0),
)
df_equals(
pd.concat([modin_df, modin_df2, pandas_series], axis=1),
pandas.concat([df, df2, pandas_series], axis=1),
)
def test_ray_concat_on_index():
df, df2 = generate_dfs()
modin_df, modin_df2 = from_pandas(df), from_pandas(df2)
df_equals(
pd.concat([modin_df, modin_df2], axis="index"),
pandas.concat([df, df2], axis="index"),
)
df_equals(
pd.concat([modin_df, modin_df2], axis="rows"),
pandas.concat([df, df2], axis="rows"),
)
df_equals(
pd.concat([modin_df, modin_df2], axis=0), pandas.concat([df, df2], axis=0)
)
def test_ray_concat_on_column():
df, df2 = generate_dfs()
modin_df, modin_df2 = from_pandas(df), from_pandas(df2)
df_equals(
pd.concat([modin_df, modin_df2], axis=1), pandas.concat([df, df2], axis=1)
)
df_equals(
pd.concat([modin_df, modin_df2], axis="columns"),
| pandas.concat([df, df2], axis="columns") | pandas.concat |
##
# Many of my features are taken from or inspired by public kernels. The
# following is a probably incomplete list of these kernels:
# - https://www.kaggle.com/ggeo79/j-coupling-lightbgm-gpu-dihedral-angle for
# the idea to use dihedral angles on 3J couplings.
# - https://www.kaggle.com/titericz/giba-r-data-table-simple-features-1-17-lb
# mostly for distance features.
# - https://www.kaggle.com/kmat2019/effective-feature provides the idea to
# compute cosine angles between scalar coupling atoms and their nearest
# neighbors.
# - https://www.kaggle.com/seriousran/just-speed-up-calculate-distance-from-benchmark
# for an efficient distance calculation between scalar coupling atoms.
#
# Running this script will give some warnings related to the
# 'explicit valance..' rdkit error. The problem is dicussed here
# https://www.kaggle.com/c/champs-scalar-coupling/discussion/94274#latest-572435
# I hadn't gotten around to implementing the proper solutions discussed there.
import gc
import numpy as np
import pandas as pd
from itertools import combinations
from glob import glob
import deepchem as dc
from rdkit.Chem import rdmolops, ChemicalFeatures
from xyz2mol import read_xyz_file, xyz2mol
from utils import print_progress
import constants as C
#mol_feat_columns = ['ave_bond_length', 'std_bond_length', 'ave_atom_weight']
xyz_filepath_list = os.listdir(C.RAW_DATA_PATH + 'structures')
xyz_filepath_list.sort()
## Functions to create the RDKit mol objects
def mol_from_xyz(filepath, add_hs=True, compute_dist_centre=False):
"""Wrapper function for calling xyz2mol function."""
charged_fragments = True # alternatively radicals are made
# quick is faster for large systems but requires networkx
# if you don't want to install networkx set quick=False and
# uncomment 'import networkx as nx' at the top of the file
quick = True
atomicNumList, charge, xyz_coordinates = read_xyz_file(filepath)
mol, dMat = xyz2mol(atomicNumList, charge, xyz_coordinates,
charged_fragments, quick, check_chiral_stereo=False)
return mol, np.array(xyz_coordinates), dMat
def get_molecules():
"""
Constructs rdkit mol objects derrived from the .xyz files. Also returns:
- mol ids (unique numerical ids)
- set of molecule level features
- arrays of xyz coordinates
- euclidean distance matrices
- graph distance matrices.
All objects are returned in dictionaries with 'mol_name' as keys.
"""
mols, mol_ids, mol_feats = {}, {}, {}
xyzs, dist_matrices, graph_dist_matrices = {}, {}, {}
print('Create molecules and distance matrices.')
for i in range(C.N_MOLS):
print_progress(i, C.N_MOLS)
filepath = xyz_filepath_list[i]
mol_name = filepath.split('/')[-1][:-4]
mol, xyz, dist_matrix = mol_from_xyz(filepath)
mols[mol_name] = mol
xyzs[mol_name] = xyz
dist_matrices[mol_name] = dist_matrix
mol_ids[mol_name] = i
# make padded graph distance matrix dataframes
n_atoms = len(xyz)
graph_dist_matrix = pd.DataFrame(np.pad(
rdmolops.GetDistanceMatrix(mol),
[(0, 0), (0, C.MAX_N_ATOMS - n_atoms)], 'constant'
))
graph_dist_matrix['molecule_id'] = n_atoms * [i]
graph_dist_matrices[mol_name] = graph_dist_matrix
# compute molecule level features
adj_matrix = rdmolops.GetAdjacencyMatrix(mol)
atomic_num_list, _, _ = read_xyz_file(filepath)
dists = dist_matrix.ravel()[np.tril(adj_matrix).ravel()==1]
mol_feats[mol_name] = pd.Series(
[np.mean(dists), np.std(dists), np.mean(atomic_num_list)],
index=mol_feat_columns
)
return mols, mol_ids, mol_feats, xyzs, dist_matrices, graph_dist_matrices
## Functions to create features at the scalar coupling level.
def map_atom_info(df, atom_idx, struct_df):
"""Adds xyz-coordinates of atom_{atom_idx} to 'df'."""
df = pd.merge(df, struct_df, how = 'left',
left_on = ['molecule_name', f'atom_index_{atom_idx}'],
right_on = ['molecule_name', 'atom_index'])
df = df.drop('atom_index', axis=1)
df = df.rename(columns={'atom': f'atom_{atom_idx}',
'x': f'x_{atom_idx}',
'y': f'y_{atom_idx}',
'z': f'z_{atom_idx}'})
return df
def add_dist(df, struct_df):
"""Adds euclidean distance between scalar coupling atoms to 'df'."""
df = map_atom_info(df, 0, struct_df)
df = map_atom_info(df, 1, struct_df)
p_0 = df[['x_0', 'y_0', 'z_0']].values
p_1 = df[['x_1', 'y_1', 'z_1']].values
df['dist'] = np.linalg.norm(p_0 - p_1, axis=1)
df.drop(columns=['x_0', 'y_0', 'z_0', 'x_1', 'y_1', 'z_1'], inplace=True)
return df
def transform_per_atom_group(df, a_idx, col='dist', trans='mean'):
"""Apply transformation 'trans' on feature in 'col' to scalar coupling
constants grouped at the atom level."""
return df.groupby(
['molecule_name', f'atom_index_{a_idx}'])[col].transform(trans)
def inv_dist_per_atom(df, a_idx, d_col='dist', power=3):
"""Compute sum of inverse distances of scalar coupling constants grouped at
the atom level."""
trans = lambda x: 1 / sum(x ** -power)
return transform_per_atom_group(df, a_idx, d_col, trans=trans)
def inv_dist_harmonic_mean(df, postfix=''):
"""Compute the harmonic mean of inverse distances of atom_0 and atom_1."""
c0, c1 = 'inv_dist0' + postfix, 'inv_dist1' + postfix
return (df[c0] * df[c1]) / (df[c0] + df[c1])
def add_atom_counts(df, struct_df):
"""Add atom counts (total and per type) to 'df'."""
pd.options.mode.chained_assignment = None
atoms_per_mol_df = struct_df.groupby(['molecule_name', 'atom']).count()
atoms_per_mol_map = atoms_per_mol_df['atom_index'].unstack().fillna(0)
atoms_per_mol_map = atoms_per_mol_map.astype(int).to_dict()
df['num_atoms'] = 0
for a in atoms_per_mol_map:
df[f'num_{a}_atoms'] = df['molecule_name'].map(atoms_per_mol_map[a])
df['num_atoms'] += df[f'num_{a}_atoms']
return df
# source: https://stackoverflow.com/questions/20305272/dihedral-torsion-angle-from-four-points-in-cartesian-coordinates-in-python
def dihedral(p):
"""Praxeolitic formula: 1 sqrt, 1 cross product"""
p0 = p[0]
p1 = p[1]
p2 = p[2]
p3 = p[3]
b0 = -1.0*(p1 - p0)
b1 = p2 - p1
b2 = p3 - p2
# normalize b1 so that it does not influence magnitude of vector
# rejections that come next
b1 /= np.linalg.norm(b1)
# vector rejections
v = b0 - np.dot(b0, b1)*b1
w = b2 - np.dot(b2, b1)*b1
# angle between v and w in a plane is the torsion angle
# v and w may not be normalized but that's fine since tan is y/x
x = np.dot(v, w)
y = np.dot(np.cross(b1, v), w)
return np.arctan2(y, x)
def cosine_angle(p):
p0, p1, p2 = p[0], p[1], p[2]
v1, v2 = p0 - p1, p2 - p1
return np.dot(v1, v2) / np.sqrt(np.dot(v1, v1) * np.dot(v2, v2))
def add_sc_angle_features(df, xyzs, dist_matrices):
"""
Adds the following angle features to 'df':
- diangle: for 3J couplings
- cos_angle: for 2J couplings, angle between sc atom 0, atom in between sc
atoms and sc atom 1
- cos_angle0: for all coupling types, cos angle between sc atoms and atom
closest to atom 0 (except for 1J coupling)
- cos_angle1: for all coupling types, cos angle between sc atoms and atom
closest to atom 1
"""
df['diangle'] = 0.0
df['cos_angle'] = 0.0
df['cos_angle0'] = 0.0
df['cos_angle1'] = 0.0
diangles, cos_angles, cos_angles0, cos_angles1 = {}, {}, {}, {}
print('Add scalar coupling angle based features.')
n = len(df)
for i, (idx, row) in enumerate(df.iterrows()):
print_progress(i, n, 500000)
mol_name = row['molecule_name']
mol, xyz = mols[mol_name], xyzs[mol_name]
dist_matrix = dist_matrices[mol_name]
adj_matrix = rdmolops.GetAdjacencyMatrix(mol)
idx0, idx1 = row['atom_index_0'], row['atom_index_1']
atom_ids = rdmolops.GetShortestPath(mol, idx0, idx1)
if len(atom_ids)==4:
diangles[idx] = dihedral(xyz[atom_ids,:])
elif len(atom_ids)==3:
cos_angles[idx] = cosine_angle(xyz[atom_ids,:])
if row['type'] not in [0, 2]:
neighbors0 = np.where(adj_matrix[idx0]==1)[0]
if len(neighbors0) > 0:
idx0_closest = neighbors0[
dist_matrix[idx0][neighbors0].argmin()]
cos_angles0[idx] = cosine_angle(
xyz[[idx0_closest, idx0, idx1],:])
neighbors1 = np.setdiff1d(np.where(adj_matrix[idx1]==1)[0], [idx0])
if len(neighbors1) > 0:
idx1_closest = neighbors1[
dist_matrix[idx1][neighbors1].argmin()]
cos_angles1[idx] = cosine_angle(
xyz[[idx0, idx1, idx1_closest],:])
df['diangle'] = pd.Series(diangles).abs()
df['cos_angle'] = pd.Series(cos_angles)
df['cos_angle0'] = | pd.Series(cos_angles0) | pandas.Series |
"""tests.core.archive.test_archive.py
Copyright Keithley Instruments, LLC.
Licensed under MIT (https://github.com/tektronix/syphon/blob/master/LICENSE)
"""
import os
from typing import List, Optional, Tuple
import pytest
from _pytest.capture import CaptureFixture
from _pytest.fixtures import FixtureRequest
from _pytest.monkeypatch import MonkeyPatch
from pandas import DataFrame, concat, read_csv
from pandas.testing import assert_frame_equal
from py._path.local import LocalPath
from sortedcontainers import SortedDict, SortedList
import syphon
import syphon.hash
import syphon.schema
from syphon.core.archive.filemap import MappingBehavior
from syphon.core.check import DEFAULT_FILE as DEFAULT_HASH_FILE
from ... import get_data_path, rand_string
from ...assert_utils import assert_captured_outerr
from ...types import PathType
@pytest.fixture(
params=[
("iris.csv", SortedDict({"0": "Name"})),
("iris_plus.csv", SortedDict({"0": "Species", "1": "PetalColor"})),
(
"auto-mpg.csv",
SortedDict({"0": "model year", "1": "cylinders", "2": "origin"}),
),
]
)
def archive_params(request: FixtureRequest) -> Tuple[str, SortedDict]:
return request.param
@pytest.fixture(
params=[
(
"iris-part-1-of-6",
"iris-part-1-of-6-combined.csv",
SortedDict({"0": "Species", "1": "PetalColor"}),
),
(
"iris-part-2-of-6",
"iris-part-2-of-6-combined.csv",
SortedDict({"0": "Species", "1": "PetalColor"}),
),
(
"iris-part-3-of-6",
"iris-part-3-of-6-combined.csv",
SortedDict({"0": "Species", "1": "PetalColor"}),
),
(
"iris-part-4-of-6",
"iris-part-4-of-6-combined.csv",
SortedDict({"0": "Species", "1": "PetalColor"}),
),
(
"iris-part-5-of-6",
"iris-part-5-of-6-combined.csv",
SortedDict({"0": "Species", "1": "PetalColor"}),
),
(
"iris-part-6-of-6",
"iris-part-6-of-6-combined.csv",
SortedDict({"0": "Species", "1": "PetalColor"}),
),
]
)
def archive_meta_params(request: FixtureRequest) -> Tuple[str, str, SortedDict]:
return request.param
@pytest.fixture(params=[PathType.ABSOLUTE, PathType.RELATIVE])
def styled_cache_file(request: FixtureRequest, cache_file: LocalPath) -> str:
"""Breaks if any test in this file changes the current working directory!"""
if request.param == PathType.ABSOLUTE:
return str(cache_file)
elif request.param == PathType.RELATIVE:
return os.path.relpath(cache_file, os.getcwd())
else:
raise TypeError(f"Unsupported PathType '{request.param}'")
@pytest.fixture(params=[PathType.ABSOLUTE, PathType.RELATIVE])
def styled_hash_file(
request: FixtureRequest, hash_file: Optional[LocalPath]
) -> Optional[str]:
"""Breaks if any test in this file changes the current working directory!"""
if hash_file is None:
return None
if request.param == PathType.ABSOLUTE:
return str(hash_file)
elif request.param == PathType.RELATIVE:
return os.path.relpath(hash_file, os.getcwd())
else:
raise TypeError(f"Unsupported PathType '{request.param}'")
def _get_expected_paths(
path: str,
schema: SortedDict,
subset: DataFrame,
filename: str,
data: SortedList = SortedList(),
) -> SortedList:
path_list = data.copy()
this_schema = schema.copy()
try:
_, header = this_schema.popitem(index=0)
except KeyError:
path_list.add(os.path.join(path, filename))
return path_list
if header not in subset.columns:
return path_list
for value in subset.get(header).drop_duplicates().values:
new_subset = subset.loc[subset.get(header) == value]
value = value.lower().replace(" ", "_")
if value[-1] == ".":
value = value[:-1]
path_list = _get_expected_paths(
os.path.join(path, value), this_schema, new_subset, filename, data=path_list
)
return path_list
class TestArchive(object):
class ArchiveCacheAndHashPassthruChecker(object):
"""Asserts that the cache and hash file paths are not edited before being sent
to the `build` subcommand.
"""
def __init__(
self, monkeypatch: MonkeyPatch, cache_file: str, hash_file: Optional[str],
):
from syphon.core.build import build
self._syphon_build = build
self._monkeypatch: MonkeyPatch = monkeypatch
self.cache_file: str = cache_file
self.hash_file: Optional[str] = hash_file
def __call__(self, *args, **kwargs) -> bool:
with self._monkeypatch.context() as m:
m.setattr(syphon.core.build, "build", value=self._build_shim)
return syphon.archive(*args, **kwargs)
def _build_shim(self, *args, **kwargs) -> bool:
"""Everything is converted to str or None so test cases don't have to worry
about using LocalPath.
"""
# XXX: If the syphon.build argument order changes,
# then we need to access a different index!
assert (
str(args[0]) == self.cache_file
), f"Cache filepath edited from '{self.cache_file}' to '{args[0]}'"
# XXX: If the name of the argument changes,
# then we need to access a different key!
assert "hash_filepath" in kwargs
actual_hash_file = (
None
if kwargs["hash_filepath"] is None
else str(kwargs["hash_filepath"])
)
assert (
actual_hash_file == self.hash_file
), f"Hash filepath edited from '{self.hash_file}' to '{actual_hash_file}'"
return self._syphon_build(*args, **kwargs)
@pytest.fixture(scope="function")
def archive_fixture(
self,
monkeypatch: MonkeyPatch,
styled_cache_file: str,
styled_hash_file: Optional[str],
) -> "TestArchive.ArchiveCacheAndHashPassthruChecker":
return TestArchive.ArchiveCacheAndHashPassthruChecker(
monkeypatch, styled_cache_file, styled_hash_file
)
def test_empty_datafile(
self, capsys: CaptureFixture, archive_dir: LocalPath, verbose: bool
):
datafile = os.path.join(get_data_path(), "empty.csv")
assert not syphon.archive(archive_dir, [datafile], verbose=verbose)
assert_captured_outerr(capsys.readouterr(), verbose, False)
assert not os.path.exists(os.path.join(os.path.dirname(datafile), "#lock"))
def test_increment_one_to_many_with_metadata_with_schema(
self,
capsys: CaptureFixture,
archive_dir: LocalPath,
archive_fixture: "TestArchive.ArchiveCacheAndHashPassthruChecker",
schema_file: Optional[LocalPath],
verbose: bool,
):
# List of (expected frame filename, data filename, metadata filename) tuples
targets: List[Tuple[str, str, List[str]]] = [
(
"iris-part-1-of-6-combined.csv",
"iris-part-1-of-6.csv",
[
"iris-part-1-of-6-meta-part-1-of-2.meta",
"iris-part-1-of-6-meta-part-2-of-2.meta",
],
),
(
"iris-part-1-2.csv",
"iris-part-2-of-6.csv",
[
"iris-part-2-of-6-meta-part-1-of-2.meta",
"iris-part-2-of-6-meta-part-2-of-2.meta",
],
),
(
"iris-part-1-2-3.csv",
"iris-part-3-of-6.csv",
[
"iris-part-3-of-6-meta-part-1-of-2.meta",
"iris-part-3-of-6-meta-part-2-of-2.meta",
],
),
(
"iris-part-1-2-3-4.csv",
"iris-part-4-of-6.csv",
[
"iris-part-4-of-6-meta-part-1-of-2.meta",
"iris-part-4-of-6-meta-part-2-of-2.meta",
],
),
(
"iris-part-1-2-3-4-5.csv",
"iris-part-5-of-6.csv",
[
"iris-part-5-of-6-meta-part-1-of-2.meta",
"iris-part-5-of-6-meta-part-2-of-2.meta",
],
),
(
"iris_plus.csv",
"iris-part-6-of-6.csv",
[
"iris-part-6-of-6-meta-part-1-of-2.meta",
"iris-part-6-of-6-meta-part-2-of-2.meta",
],
),
]
expected_hashfile = (
LocalPath(archive_fixture.cache_file).dirpath(DEFAULT_HASH_FILE)
if archive_fixture.hash_file is None
else archive_fixture.hash_file
)
assert not os.path.exists(expected_hashfile)
assert not os.path.exists(archive_fixture.cache_file)
assert len(archive_dir.listdir()) == 0
expected_schemafile = (
archive_dir.join(syphon.schema.DEFAULT_FILE)
if schema_file is None
else schema_file
)
assert not os.path.exists(expected_schemafile)
syphon.init(
SortedDict({"0": "PetalColor", "1": "Species"}), expected_schemafile
)
assert os.path.exists(expected_schemafile)
for expected_frame_filename, data_filename, metadata_filenames in targets:
assert archive_fixture(
archive_dir,
[os.path.join(get_data_path(), data_filename)],
meta_files=[
os.path.join(get_data_path(), m) for m in metadata_filenames
],
filemap_behavior=MappingBehavior.ONE_TO_MANY,
schema_filepath=schema_file,
cache_filepath=archive_fixture.cache_file,
hash_filepath=archive_fixture.hash_file,
verbose=verbose,
)
assert_captured_outerr(capsys.readouterr(), verbose, False)
expected_frame = DataFrame(
read_csv(
os.path.join(get_data_path(), expected_frame_filename),
dtype=str,
index_col="Index",
)
)
expected_frame.sort_index(inplace=True)
actual_frame = DataFrame(
read_csv(str(archive_fixture.cache_file), dtype=str, index_col="Index")
)
actual_frame = actual_frame.reindex(columns=expected_frame.columns)
actual_frame.sort_index(inplace=True)
assert_captured_outerr(capsys.readouterr(), False, False)
assert_frame_equal(expected_frame, actual_frame)
assert os.path.exists(expected_hashfile)
assert syphon.check(
archive_fixture.cache_file,
hash_filepath=expected_hashfile,
verbose=verbose,
)
def test_increment_with_metadata_with_schema(
self,
capsys: CaptureFixture,
archive_dir: LocalPath,
archive_fixture: "TestArchive.ArchiveCacheAndHashPassthruChecker",
schema_file: Optional[LocalPath],
verbose: bool,
):
# List of (expected frame filename, data filename, metadata filename) tuples
targets: List[Tuple[str, str, str]] = [
(
"iris-part-1-of-6-combined.csv",
"iris-part-1-of-6.csv",
"iris-part-1-of-6.meta",
),
("iris-part-1-2.csv", "iris-part-2-of-6.csv", "iris-part-2-of-6.meta"),
("iris-part-1-2-3.csv", "iris-part-3-of-6.csv", "iris-part-3-of-6.meta"),
("iris-part-1-2-3-4.csv", "iris-part-4-of-6.csv", "iris-part-4-of-6.meta"),
(
"iris-part-1-2-3-4-5.csv",
"iris-part-5-of-6.csv",
"iris-part-5-of-6.meta",
),
("iris_plus.csv", "iris-part-6-of-6.csv", "iris-part-6-of-6.meta"),
]
expected_hashfile = (
LocalPath(archive_fixture.cache_file).dirpath(DEFAULT_HASH_FILE)
if archive_fixture.hash_file is None
else archive_fixture.hash_file
)
assert not os.path.exists(expected_hashfile)
assert not os.path.exists(archive_fixture.cache_file)
assert len(archive_dir.listdir()) == 0
expected_schemafile = (
archive_dir.join(syphon.schema.DEFAULT_FILE)
if schema_file is None
else schema_file
)
assert not os.path.exists(expected_schemafile)
syphon.init(
SortedDict({"0": "PetalColor", "1": "Species"}), expected_schemafile
)
assert os.path.exists(expected_schemafile)
for expected_frame_filename, data_filename, metadata_filename in targets:
assert archive_fixture(
archive_dir,
[os.path.join(get_data_path(), data_filename)],
meta_files=[os.path.join(get_data_path(), metadata_filename)],
schema_filepath=schema_file,
cache_filepath=archive_fixture.cache_file,
hash_filepath=archive_fixture.hash_file,
verbose=verbose,
)
assert_captured_outerr(capsys.readouterr(), verbose, False)
expected_frame = DataFrame(
read_csv(
os.path.join(get_data_path(), expected_frame_filename),
dtype=str,
index_col="Index",
)
)
expected_frame.sort_index(inplace=True)
actual_frame = DataFrame(
read_csv(str(archive_fixture.cache_file), dtype=str, index_col="Index")
)
actual_frame.sort_index(inplace=True)
assert_captured_outerr(capsys.readouterr(), False, False)
assert_frame_equal(expected_frame, actual_frame)
assert os.path.exists(expected_hashfile)
assert syphon.check(
archive_fixture.cache_file,
hash_filepath=expected_hashfile,
verbose=verbose,
)
def test_increment_with_metadata_without_schema(
self,
capsys: CaptureFixture,
archive_dir: LocalPath,
archive_fixture: "TestArchive.ArchiveCacheAndHashPassthruChecker",
verbose: bool,
):
# List of (expected frame filename, data filename, metadata filename) tuples
targets: List[Tuple[str, str, str]] = [
(
"iris-part-1-of-6-combined.csv",
"iris-part-1-of-6.csv",
"iris-part-1-of-6.meta",
),
("iris-part-1-2.csv", "iris-part-2-of-6.csv", "iris-part-2-of-6.meta"),
("iris-part-1-2-3.csv", "iris-part-3-of-6.csv", "iris-part-3-of-6.meta"),
("iris-part-1-2-3-4.csv", "iris-part-4-of-6.csv", "iris-part-4-of-6.meta"),
(
"iris-part-1-2-3-4-5.csv",
"iris-part-5-of-6.csv",
"iris-part-5-of-6.meta",
),
("iris_plus.csv", "iris-part-6-of-6.csv", "iris-part-6-of-6.meta"),
]
expected_hashfile = (
LocalPath(archive_fixture.cache_file).dirpath(DEFAULT_HASH_FILE)
if archive_fixture.hash_file is None
else archive_fixture.hash_file
)
assert not os.path.exists(expected_hashfile)
assert not os.path.exists(archive_fixture.cache_file)
assert len(archive_dir.listdir()) == 0
for expected_frame_filename, data_filename, metadata_filename in targets:
assert archive_fixture(
archive_dir,
[os.path.join(get_data_path(), data_filename)],
meta_files=[os.path.join(get_data_path(), metadata_filename)],
cache_filepath=archive_fixture.cache_file,
hash_filepath=archive_fixture.hash_file,
verbose=verbose,
)
assert_captured_outerr(capsys.readouterr(), verbose, False)
expected_frame = DataFrame(
read_csv(
os.path.join(get_data_path(), expected_frame_filename),
dtype=str,
index_col="Index",
)
)
expected_frame.sort_index(inplace=True)
actual_frame = DataFrame(
read_csv(str(archive_fixture.cache_file), dtype=str, index_col="Index")
)
actual_frame.sort_index(inplace=True)
assert_captured_outerr(capsys.readouterr(), False, False)
assert_frame_equal(expected_frame, actual_frame)
assert os.path.exists(expected_hashfile)
assert syphon.check(
archive_fixture.cache_file,
hash_filepath=expected_hashfile,
verbose=verbose,
)
def test_increment_without_metadata_with_schema(
self,
capsys: CaptureFixture,
archive_dir: LocalPath,
archive_fixture: "TestArchive.ArchiveCacheAndHashPassthruChecker",
schema_file: Optional[LocalPath],
verbose: bool,
):
# List of (expected frame filename, data filename) tuples
targets: List[Tuple[str, str]] = [
("iris-part-1-of-6-combined.csv", "iris-part-1-of-6-combined.csv"),
("iris-part-1-2.csv", "iris-part-2-of-6-combined.csv"),
("iris-part-1-2-3.csv", "iris-part-3-of-6-combined.csv"),
("iris-part-1-2-3-4.csv", "iris-part-4-of-6-combined.csv"),
("iris-part-1-2-3-4-5.csv", "iris-part-5-of-6-combined.csv"),
("iris_plus.csv", "iris-part-6-of-6-combined.csv"),
]
expected_hashfile = (
LocalPath(archive_fixture.cache_file).dirpath(DEFAULT_HASH_FILE)
if archive_fixture.hash_file is None
else archive_fixture.hash_file
)
assert not os.path.exists(expected_hashfile)
assert not os.path.exists(archive_fixture.cache_file)
assert len(archive_dir.listdir()) == 0
expected_schemafile = (
archive_dir.join(syphon.schema.DEFAULT_FILE)
if schema_file is None
else schema_file
)
assert not os.path.exists(expected_schemafile)
syphon.init(
SortedDict({"0": "PetalColor", "1": "Species"}), expected_schemafile
)
assert os.path.exists(expected_schemafile)
for expected_frame_filename, data_filename in targets:
assert archive_fixture(
archive_dir,
[os.path.join(get_data_path(), data_filename)],
schema_filepath=schema_file,
cache_filepath=archive_fixture.cache_file,
hash_filepath=archive_fixture.hash_file,
verbose=verbose,
)
assert_captured_outerr(capsys.readouterr(), verbose, False)
expected_frame = DataFrame(
read_csv(
os.path.join(get_data_path(), expected_frame_filename),
dtype=str,
index_col="Index",
)
)
expected_frame.sort_index(inplace=True)
actual_frame = DataFrame(
read_csv(str(archive_fixture.cache_file), dtype=str, index_col="Index")
)
actual_frame.sort_index(inplace=True)
assert_captured_outerr(capsys.readouterr(), False, False)
assert_frame_equal(expected_frame, actual_frame)
assert os.path.exists(expected_hashfile)
assert syphon.check(
archive_fixture.cache_file,
hash_filepath=expected_hashfile,
verbose=verbose,
)
def test_increment_without_metadata_without_schema(
self,
capsys: CaptureFixture,
archive_dir: LocalPath,
archive_fixture: "TestArchive.ArchiveCacheAndHashPassthruChecker",
schema_file: Optional[LocalPath],
verbose: bool,
):
# List of (expected frame filename, data filename) tuples
targets: List[Tuple[str, str]] = [
("iris-part-1-of-6-combined.csv", "iris-part-1-of-6.csv"),
("iris-part-1-2.csv", "iris-part-2-of-6.csv"),
("iris-part-1-2-3.csv", "iris-part-3-of-6.csv"),
("iris-part-1-2-3-4.csv", "iris-part-4-of-6.csv"),
("iris-part-1-2-3-4-5.csv", "iris-part-5-of-6.csv"),
("iris_plus.csv", "iris-part-6-of-6.csv"),
]
expected_hashfile = (
LocalPath(archive_fixture.cache_file).dirpath(DEFAULT_HASH_FILE)
if archive_fixture.hash_file is None
else archive_fixture.hash_file
)
assert not os.path.exists(expected_hashfile)
assert not os.path.exists(archive_fixture.cache_file)
assert len(archive_dir.listdir()) == 0
for expected_frame_filename, data_filename in targets:
assert archive_fixture(
archive_dir,
[os.path.join(get_data_path(), data_filename)],
cache_filepath=archive_fixture.cache_file,
hash_filepath=archive_fixture.hash_file,
verbose=verbose,
)
assert_captured_outerr(capsys.readouterr(), verbose, False)
expected_frame = DataFrame(
read_csv(
os.path.join(get_data_path(), expected_frame_filename),
dtype=str,
index_col="Index",
)
)
del expected_frame["Species"]
del expected_frame["PetalColor"]
expected_frame.sort_index(inplace=True)
actual_frame = DataFrame(
read_csv(str(archive_fixture.cache_file), dtype=str, index_col="Index")
)
actual_frame.sort_index(inplace=True)
assert_captured_outerr(capsys.readouterr(), False, False)
assert_frame_equal(expected_frame, actual_frame)
assert os.path.exists(expected_hashfile)
assert syphon.check(
archive_fixture.cache_file,
hash_filepath=expected_hashfile,
verbose=verbose,
)
def test_no_datafiles(
self, capsys: CaptureFixture, archive_dir: LocalPath, verbose: bool
):
assert not syphon.archive(archive_dir, [], verbose=verbose)
assert_captured_outerr(capsys.readouterr(), verbose, False)
def test_without_metadata_with_schema(
self,
capsys: CaptureFixture,
archive_params: Tuple[str, SortedDict],
archive_dir: LocalPath,
overwrite: bool,
verbose: bool,
):
filename: str
schema: SortedDict
filename, schema = archive_params
datafile = os.path.join(get_data_path(), filename)
schemafile = os.path.join(archive_dir, syphon.schema.DEFAULT_FILE)
syphon.init(schema, schemafile)
expected_df = DataFrame(read_csv(datafile, dtype=str))
expected_df.sort_values(list(expected_df.columns), inplace=True)
expected_df.reset_index(drop=True, inplace=True)
expected_paths: SortedList = _get_expected_paths(
archive_dir, schema, expected_df, filename
)
if overwrite:
for e in expected_paths:
os.makedirs(os.path.dirname(e), exist_ok=True)
with open(e, mode="w") as fd:
fd.write(rand_string())
assert syphon.archive(
archive_dir,
[datafile],
schema_filepath=schemafile,
overwrite=overwrite,
verbose=verbose,
)
assert not os.path.exists(os.path.join(os.path.dirname(datafile), "#lock"))
actual_frame = DataFrame()
actual_paths = SortedList()
for root, _, files in os.walk(archive_dir):
for f in files:
if ".csv" in f:
filepath: str = os.path.join(root, f)
actual_paths.add(filepath)
actual_frame = concat(
[actual_frame, DataFrame(read_csv(filepath, dtype=str))]
)
actual_frame.sort_values(list(actual_frame.columns), inplace=True)
actual_frame.reset_index(drop=True, inplace=True)
assert expected_paths == actual_paths
assert_frame_equal(expected_df, actual_frame)
assert_captured_outerr(capsys.readouterr(), verbose, False)
def test_without_metadata_without_schema(
self,
capsys: CaptureFixture,
archive_params: Tuple[str, SortedDict],
archive_dir: LocalPath,
overwrite: bool,
verbose: bool,
):
filename: str
filename, _ = archive_params
datafile = os.path.join(get_data_path(), filename)
expected_df = DataFrame(read_csv(datafile, dtype=str))
expected_df.sort_values(list(expected_df.columns), inplace=True)
expected_df.reset_index(drop=True, inplace=True)
expected_paths = SortedList([os.path.join(archive_dir, filename)])
if overwrite:
for e in expected_paths:
path: LocalPath = archive_dir.new()
path.mkdir(os.path.basename(os.path.dirname(e)))
with open(e, mode="w") as fd:
fd.write(rand_string())
assert syphon.archive(
archive_dir, [datafile], overwrite=overwrite, verbose=verbose
)
assert not os.path.exists(os.path.join(os.path.dirname(datafile), "#lock"))
actual_frame = DataFrame()
actual_paths = SortedList()
for root, _, files in os.walk(archive_dir):
for f in files:
if ".csv" in f:
filepath: str = os.path.join(root, f)
actual_paths.add(filepath)
actual_frame = concat(
[actual_frame, DataFrame(read_csv(filepath, dtype=str))]
)
actual_frame.sort_values(list(actual_frame.columns), inplace=True)
actual_frame.reset_index(drop=True, inplace=True)
assert expected_paths == actual_paths
assert_frame_equal(expected_df, actual_frame)
assert_captured_outerr(capsys.readouterr(), verbose, False)
def test_with_metadata_with_schema(
self,
capsys: CaptureFixture,
archive_meta_params: Tuple[str, str, SortedDict],
archive_dir: LocalPath,
overwrite: bool,
verbose: bool,
):
filename: str
expectedfilename: str
schema: SortedDict
filename, expectedfilename, schema = archive_meta_params
datafile = os.path.join(get_data_path(), filename + ".csv")
metafile = os.path.join(get_data_path(), filename + ".meta")
schemafile = os.path.join(archive_dir, syphon.schema.DEFAULT_FILE)
syphon.init(schema, schemafile)
expected_df = DataFrame(
# Read our dedicated *-combined.csv file instead of the import target.
read_csv(os.path.join(get_data_path(), expectedfilename), dtype=str)
)
expected_df.sort_values(list(expected_df.columns), inplace=True)
expected_df.reset_index(drop=True, inplace=True)
expected_paths: SortedList = _get_expected_paths(
archive_dir, schema, expected_df, filename + ".csv"
)
if overwrite:
for e in expected_paths:
os.makedirs(os.path.dirname(e), exist_ok=True)
with open(e, mode="w") as fd:
fd.write(rand_string())
assert syphon.archive(
archive_dir,
[datafile],
meta_files=[metafile],
schema_filepath=schemafile,
overwrite=overwrite,
verbose=verbose,
)
assert not os.path.exists(os.path.join(os.path.dirname(datafile), "#lock"))
actual_df = DataFrame()
actual_paths = SortedList()
for root, _, files in os.walk(archive_dir):
for f in files:
if ".csv" in f:
filepath: str = os.path.join(root, f)
actual_paths.add(filepath)
actual_df = concat(
[actual_df, DataFrame(read_csv(filepath, dtype=str))]
)
actual_df.sort_values(list(actual_df.columns), inplace=True)
actual_df.reset_index(drop=True, inplace=True)
assert expected_paths == actual_paths
assert_frame_equal(expected_df, actual_df)
def test_with_metadata_without_schema(
self,
capsys: CaptureFixture,
archive_meta_params: Tuple[str, str, SortedDict],
archive_dir: LocalPath,
overwrite: bool,
verbose: bool,
):
filename: str
expectedfilename: str
filename, expectedfilename, _ = archive_meta_params
datafile = os.path.join(get_data_path(), filename + ".csv")
metafile = os.path.join(get_data_path(), filename + ".meta")
expected_df = DataFrame(
# Read our dedicated *-combined.csv file instead of the import target.
read_csv(os.path.join(get_data_path(), expectedfilename), dtype=str)
)
expected_df.sort_values(list(expected_df.columns), inplace=True)
expected_df.reset_index(drop=True, inplace=True)
expected_paths: SortedList = _get_expected_paths(
archive_dir, SortedDict(), expected_df, filename + ".csv"
)
if overwrite:
for e in expected_paths:
os.makedirs(os.path.dirname(e), exist_ok=True)
with open(e, mode="w") as fd:
fd.write(rand_string())
assert syphon.archive(
archive_dir,
[datafile],
meta_files=[metafile],
overwrite=overwrite,
verbose=verbose,
)
assert not os.path.exists(os.path.join(os.path.dirname(datafile), "#lock"))
actual_df = DataFrame()
actual_paths = SortedList()
for root, _, files in os.walk(archive_dir):
for f in files:
if ".csv" in f:
filepath: str = os.path.join(root, f)
actual_paths.add(filepath)
actual_df = concat(
[actual_df, DataFrame( | read_csv(filepath, dtype=str) | pandas.read_csv |
''' IVMS checker program
'''
import datetime
import pandas as pd
import numpy as np
IVMS_file = 'D:\\OneDrive\\Work\\PDO\\IVMS\\Daily Trip Report - IVMS.xls'
vehicle_file = 'D:\\OneDrive\\Work\\PDO\\IVMS\\Lekhwair Vehicles Demob Plan V3.xlsx'
vehicle_ivms_file = 'D:\\OneDrive\\Work\\PDO\\IVMS\\Lekhwair Vehicles - IVMS.xlsx'
class IVMS:
def __init__(self):
self.ivms_df = pd.read_excel(IVMS_file, header=4)
# fill in dates where there is no date and replace the columns
date_col = self.ivms_df['Date'].to_list()
date_hold = pd.NaT
for i, _date in enumerate(date_col):
if | pd.isna([_date]) | pandas.isna |
from abc import abstractmethod
from analizer.abstract.expression import Expression
from analizer.abstract import expression
from enum import Enum
from storage.storageManager import jsonMode
from analizer.typechecker.Metadata import Struct
from analizer.typechecker import Checker
import pandas as pd
from analizer.symbol.symbol import Symbol
from analizer.symbol.environment import Environment
from analizer.reports import Nodo
from analizer.reports import AST
ast = AST.AST()
root = None
class SELECT_MODE(Enum):
ALL = 1
PARAMS = 2
# carga de datos
Struct.load()
# variable encargada de almacenar la base de datos a utilizar
dbtemp = ""
# listas encargadas de almacenar los errores semanticos
sintaxPostgreSQL = list()
semanticErrors = list()
class Instruction:
"""
Esta clase representa una instruccion
"""
def __init__(self, row, column) -> None:
self.row = row
self.column = column
@abstractmethod
def execute(self, environment):
"""
Metodo que servira para ejecutar las expresiones
"""
class SelectOnlyParams(Instruction):
def __init__(self, params, row, column):
Instruction.__init__(self, row, column)
self.params = params
def execute(self, environment):
value = [p.execute(environment).value for p in self.params]
labels = [p.temp for p in self.params]
return labels, value
class SelectParams(Instruction):
def __init__(self, params, row, column):
Instruction.__init__(self, row, column)
self.params = params
def execute(self, environment):
pass
class Select(Instruction):
def __init__(self, params, fromcl, wherecl, row, column):
Instruction.__init__(self, row, column)
self.params = params
self.wherecl = wherecl
self.fromcl = fromcl
def execute(self, environment):
newEnv = Environment(environment, dbtemp)
self.fromcl.execute(newEnv)
if self.params:
params = []
for p in self.params:
if isinstance(p, expression.TableAll):
result = p.execute(newEnv)
for r in result:
params.append(r)
else:
params.append(p)
labels = [p.temp for p in params]
value = [p.execute(newEnv).value for p in params]
else:
value = [newEnv.dataFrame[p] for p in newEnv.dataFrame]
labels = [p for p in newEnv.dataFrame]
for i in range(len(labels)):
newEnv.dataFrame[labels[i]] = value[i]
if self.wherecl == None:
return newEnv.dataFrame.filter(labels)
wh = self.wherecl.execute(newEnv)
w2 = wh.filter(labels)
# Si la clausula WHERE devuelve un dataframe vacio
if w2.empty:
return None
return [w2, environment.types]
class FromClause(Instruction):
"""
Clase encargada de la clausa FROM para la obtencion de datos
"""
def __init__(self, tables, aliases, row, column):
Instruction.__init__(self, row, column)
self.tables = tables
self.aliases = aliases
def crossJoin(self, tables):
if len(tables) <= 1:
return tables[0]
for t in tables:
t["____tempCol"] = 1
new_df = tables[0]
i = 1
while i < len(tables):
new_df = pd.merge(new_df, tables[i], on=["____tempCol"])
i += 1
new_df = new_df.drop("____tempCol", axis=1)
return new_df
def execute(self, environment):
tempDf = None
for i in range(len(self.tables)):
exec = self.tables[i].execute(environment)
data = exec[0]
types = exec[1]
if isinstance(self.tables[i], Select):
newNames = {}
subqAlias = self.aliases[i]
for (columnName, columnData) in data.iteritems():
colSplit = columnName.split(".")
if len(colSplit) >= 2:
newNames[columnName] = subqAlias + "." + colSplit[1]
types[subqAlias + "." + colSplit[1]] = columnName
else:
newNames[columnName] = subqAlias + "." + colSplit[0]
types[subqAlias + "." + colSplit[0]] = columnName
data.rename(columns=newNames, inplace=True)
environment.addVar(subqAlias, subqAlias, "TABLE", self.row, self.column)
else:
sym = Symbol(
self.tables[i].name,
None,
self.tables[i].row,
self.tables[i].column,
)
environment.addSymbol(self.tables[i].name, sym)
if self.aliases[i]:
environment.addSymbol(self.aliases[i], sym)
if i == 0:
tempDf = data
else:
tempDf = self.crossJoin([tempDf, data])
environment.dataFrame = tempDf
environment.types.update(types)
return
class TableID(Expression):
"""
Esta clase representa un objeto abstracto para el manejo de las tablas
"""
type_ = None
def __init__(self, name, row, column):
Expression.__init__(self, row, column)
self.name = name
def execute(self, environment):
result = jsonMode.extractTable(dbtemp, self.name)
if result == None:
sintaxPostgreSQL.insert(
len(sintaxPostgreSQL),
"Error: 42P01: la relacion "
+ dbtemp
+ "."
+ str(self.name)
+ " no existe",
)
return "FATAL ERROR TABLE ID"
# Almacena una lista con con el nombre y tipo de cada columna
lst = Struct.extractColumns(dbtemp, self.name)
columns = [l.name for l in lst]
newColumns = [self.name + "." + col for col in columns]
df = | pd.DataFrame(result, columns=newColumns) | pandas.DataFrame |
import argparse
from bs4 import BeautifulSoup
import multiprocessing as mp
from multiprocessing.pool import ThreadPool
import os
import pandas as pd
import pathlib
import requests
import subprocess
from tqdm.auto import tqdm
from utils import load_config
''' load config and secrets '''
# config = load_config(path='../', config_file='config.yaml')['kowalski']
config = load_config(config_file='config.yaml')['kowalski']
def collect_urls(rc):
bu = os.path.join(base_url, f'rc{rc:02d}')
response = requests.get(bu, auth=(config['ztf_depot']['username'], config['ztf_depot']['password']))
html = response.text
# link_list = []
soup = BeautifulSoup(html, 'html.parser')
links = soup.findAll('a')
for link in links:
txt = link.getText()
if 'fr' in txt:
bu_fr = os.path.join(bu, txt)
response_fr = requests.get(
bu_fr,
auth=(config['ztf_depot']['username'], config['ztf_depot']['password'])
)
html_fr = response_fr.text
soup_fr = BeautifulSoup(html_fr, 'html.parser')
links_fr = soup_fr.findAll('a')
for link_fr in links_fr:
txt_fr = link_fr.getText()
if txt_fr.endswith('.pytable'):
# print('\t', txt_fr)
urls.append({'rc': rc, 'name': txt_fr, 'url': os.path.join(bu_fr, txt_fr)})
# fixme:
# break
def fetch_url(urlrc, source='ipac'):
url, _rc = urlrc
p = os.path.join(str(path), str(_rc), os.path.basename(url))
if not os.path.exists(p):
if source == 'ipac':
subprocess.run(['wget',
f"--http-user={config['ztf_depot']['username']}",
f"--http-passwd={config['ztf_depot']['password']}",
'-q', '--timeout=600', '--waitretry=10',
'--tries=5', '-O', p, url])
elif source == 'supernova':
_url = url.replace('https://', '/media/Data2/Matchfiles/')
subprocess.run(['scp',
f'<EMAIL>:{_url}',
path])
# time.sleep(0.5)
def gunzip(f):
subprocess.run(['gunzip', f])
if __name__ == '__main__':
parser = argparse.ArgumentParser()
parser.add_argument('--tag', type=str, default='20200401', help='matchfile release time tag')
args = parser.parse_args()
t_tag = args.tag
path_base = pathlib.Path('./')
# path_base = pathlib.Path('/_tmp/')
path = path_base / f'ztf_matchfiles_{t_tag}/'
if not path.exists():
path.mkdir(exist_ok=True, parents=True)
for rc in range(0, 64):
path_rc = path / str(rc)
if not path_rc.exists():
path_rc.mkdir(exist_ok=True, parents=True)
path_urls = path_base / f'ztf_matchfiles_{t_tag}.csv'
# n_rc = 1
n_rc = 64
if not path_urls.exists():
base_url = 'https://ztfweb.ipac.caltech.edu/ztf/ops/srcmatch/'
# store urls
urls = []
print('Collecting urls of matchfiles to download:')
# collect urls of matchfiles to download
with ThreadPool(processes=20) as pool:
list(tqdm(pool.imap(collect_urls, range(0, n_rc)), total=n_rc))
df_mf = | pd.DataFrame.from_records(urls) | pandas.DataFrame.from_records |
#!/usr/bin/env python
# Copyright (C) 2019 <NAME>
import pandas as pd
import seaborn as sns
import matplotlib.pyplot as plt
from scipy.stats import pearsonr
from dtrace.DTracePlot import DTracePlot
class Preliminary(DTracePlot):
HIST_KDE_KWS = dict(cumulative=False, cut=0)
@classmethod
def _pairplot_fix_labels(cls, g, pca, by):
for i, ax in enumerate(g.axes):
vexp = pca[by]["vex"]["PC{}".format(i + 1)]
ax[0].set_ylabel("PC{} ({:.1f}%)".format(i + 1, vexp * 100))
for i, ax in enumerate(g.axes[2]):
vexp = pca[by]["vex"]["PC{}".format(i + 1)]
ax.set_xlabel("PC{} ({:.1f}%)".format(i + 1, vexp * 100))
@classmethod
def pairplot_pca_by_rows(cls, pca, hue="VERSION"):
df = pca["row"]["pcs"].reset_index()
pal = None if hue is None else dict(GDSC1=cls.PAL_DTRACE[2], GDSC2=cls.PAL_DTRACE[0])
color = cls.PAL_DTRACE[2] if hue is None else None
g = sns.PairGrid(
df,
vars=["PC1", "PC2", "PC3"],
despine=False,
size=1.5,
hue=hue,
palette=pal,
)
g = g.map_diag(plt.hist, color=color, linewidth=0, alpha=0.5)
g = g.map_offdiag(
plt.scatter, s=8, edgecolor="white", lw=0.1, alpha=0.8, color=color
)
if hue is not None:
g = g.add_legend()
cls._pairplot_fix_labels(g, pca, by="row")
@classmethod
def pairplot_pca_by_columns(cls, pca, hue=None, hue_vars=None):
df = pca["column"]["pcs"]
if hue_vars is not None:
df = | pd.concat([df, hue_vars], axis=1, sort=False) | pandas.concat |
from lib.timecards import Timecards
from datetime import date, timedelta
import pandas as pd
import pdb
class MonthTimecards:
def __init__(self, year, month):
self.sundays = [sunday for sunday in self.get_sundays_in_month(year, month)]
def get_timecards_in_month(self):
""" get the timecards in the month from the sundays array"""
week_cards = | pd.DataFrame() | pandas.DataFrame |
import pandas as pd
import numpy as np
from multiprocessing import Pool
import tqdm
import sys
import gzip as gz
from tango.prepare import init_sqlite_taxdb
def translate_taxids_to_names(res_df, reportranks, name_dict):
"""
Takes a pandas dataframe with ranks as columns and contigs as rows and taxids as values and translates taxids
to names column by column using a taxid->name dictionary
Parameters
----------
res_df: pandas.DataFrame
Results with taxids
reportranks: list
List of taxonomic ranks to report results for
name_dict: dictionary
Dictionary mapping taxids -> names
Returns
-------
res: pandas.DataFrame
Dataframe with names instead of taxids
"""
res = {}
for rank in reportranks:
res[rank] = [name_dict[taxid] for taxid in res_df.loc[:,rank]]
res = pd.DataFrame(res)
res.index = res_df.index
res = res.loc[:, reportranks]
return res
def get_thresholds(df, top=10):
"""
Here bit-score thresholds are calculated per query an returned in a dictionary.
The pandas DataFrame is first sorted by bitscore (high to low), then grouped by query, then for the first entry
per query the top% of the best hit is calculated and converted to dictionary.
Parameters
----------
df: pandas.DataFrame
DataFrame of diamond results
top: int
Percentage range of top bitscore
Returns
-------
thresholds: dict
Dictionary with queries as keys and bitscore thresholds as values
"""
thresholds = (df.sort_values("bitscore", ascending=False).groupby(level=0).first().bitscore * (
(100 - top)) / 100).to_dict()
return thresholds
def get_rank_thresholds(ranks, thresholds):
"""
Constructs dictionary of rank-specific thresholds
Parameters
----------
ranks: list
Taxonomic ranks to assign
thresholds: list
Thresholds for taxonomic ranks
Returns
-------
Dictionary of thresholds
"""
t_len, r_len = len(thresholds), len(ranks)
if t_len != r_len:
sys.exit("ERROR: Number of taxonomic ranks ({}) and number of thresholds ({}) differ\n".format(r_len, t_len))
return dict(zip(ranks, thresholds))
def add_names(x, taxid, ncbi_taxa):
"""
This function translates taxonomy ids to names. It operates per-row in the lineage dataframe.
Parameters
----------
x: pandas.DataFrame
DataFrame of one taxid and its taxonomic ranks
taxid: int
Taxid being evaluated
ncbi_taxa: ete3.ncbi_taxonomy.ncbiquery.NCBITaxa
The ete3 sqlite database connection
Returns
-------
The original DataFrame merged with the taxa names
"""
# Get a names dictionary for all taxids in the row
names = ncbi_taxa.get_taxid_translator(list(x.loc[taxid].values) + [taxid])
n = {}
# Iterate ranks
for rank in list(x.columns):
# Get taxid for the current rank
t = x.loc[taxid, rank]
# If taxid is negative it means that there is no classified taxonomy at this rank
# Instead we get the last known name in the hierarchy. We can then use the negative values to translate into
# the name with the "Unclassified." prefix.
# If the name is 'root' we just use 'Unclassified'
if t < 0:
known_name = names[-t]
if known_name == "root":
name = "Unclassified"
else:
name = known_name
# If taxid is positive we just use the name from the dictionary
else:
name = names[t]
# Add name to a dictionary with keys in the form of {rank}.name
n["{}.name".format(rank)] = name
name_df = pd.DataFrame(n, index=[taxid])
return pd.merge(x, name_df, left_index=True, right_index=True)
def propagate_lower(x, taxid, ranks):
"""
Shift known ranks down through the taxonomic hierarchy.
Parameters
----------
x: pandas.DataFrame
DataFrame of one taxid and its taxonomic ranks
taxid: int
Taxid being evaluated
ranks: list
Ranks used for assigning
Returns
-------
pandas.DataFrame updated with missing ranks
Some proteins in the database may map to a taxonomic rank above the lowest taxonomic rank that we are trying to
assign. For instance, if we use the ranks 'superkingdom phylum genus species' and a protein maps to a taxid at
rank phylum then we want to add the taxonomic information at the genus and species levels. This is done here by
adding the negative taxid of the lowest known rank to the lower ranks.
Example:
In the Uniref90 database the entry 'E1GVX1' maps to taxonomy id 838 (rank: genus, name: Prevotella).
When creating the lineage for taxid 838 we add '-838' to rank species.
"""
rev_ranks = [ranks[x] for x in list(range(len(ranks) - 1, -1, -1))]
missing = {}
known = taxid
for rank in rev_ranks[0:]:
if rank not in x.columns:
missing[rank] = -known
else:
known = x.loc[taxid, rank]
return pd.merge(x, pd.DataFrame(missing, index=[taxid]), left_index=True, right_index=True)
def get_lca(r, assignranks, reportranks):
"""
Assign lowest common ancestor from a set of taxids.
Parameters
----------
r: pandas.DataFrame
Results for a single query, extracted from the main diamond results file
assignranks: list
Taxonomic ranks to assign taxonomy for
reportranks: list
Taxonomic ranks to report taxonomy for
Returns
-------
a tuple of dictionaries with ranks as keys and taxa names/ids as values
This function takes a query-slice of the diamond results after filtering by score (and rank-threshold if tango mode
is 'rank_lca' or 'rank_vote'). It then iterates through each rank in reverse order checks how many unique taxids are
found at that rank. If there's only one taxid
"""
query = r.index.unique()[0]
# Reverse ranks for iterating
rev_ranks = [assignranks[x] for x in list(range(len(assignranks) - 1, -1, -1))]
# Iterate through the assignranks
for rank in rev_ranks:
higher_ranks = reportranks[0:reportranks.index(rank) + 1]
higher_rank_names = ["{}.name".format(x) for x in higher_ranks]
# Count number of taxa at rank
c = r.groupby(rank).count()
# If there's only one taxa then we have found the LCA
if len(c) == 1:
if len(r) == 1:
lca_taxids = r.loc[query, higher_ranks].values
else:
lca_taxids = r.loc[query, higher_ranks].values[0]
return dict(zip(higher_ranks, lca_taxids))
return {}
def parse_with_rank_thresholds(r, assignranks, reportranks, rank_thresholds, mode, vote_threshold):
"""Assigns taxonomy using rank_specific thresholds
The ranks used to assign taxonomy are iterated in reverse (e.g. species, genus, phylum),
at each rank results are filtered by the corresponding rank threshold,
if no hits remain after filtering the next rank is evaluated,
Then, if mode=='rank_lca', for remaining hits, a lowest common ancestor is calculated from all remaining taxids.
However, if mode=='rank_vote', taxids are counted among the remaining hits and all results matching taxids
that occur more than vote_threshold are used to determine the lowest common ancestor.
If a taxonomy can be assigned at a rank, it is returned directly. If no taxonomy can be assigned at any of the
ranks, empty results are returned.
Parameters
----------
r: pandas.DataFrame
Dataframe slice for a query
assignranks: list
Taxonomic ranks used to assign taxonomy
reportranks: list
Taxonomic ranks at which taxonomy is reported
rank_thresholds: dict
Dictionary of rank_specific thresholds
mode: str
'rank_lca' or 'rank_vote'
vote_threshold: float
Cutoff used to filter out common taxids
Returns
-------
tuple
Dictionaries with taxonomy names and taxonomy ids at each rank
"""
# Start from lowest rank
rev_ranks = [assignranks[x] for x in list(range(len(assignranks) - 1, -1, -1))]
for rank in rev_ranks:
# Make sure that LCA is not set below current rank
allowed_ranks = assignranks[0:assignranks.index(rank) + 1]
# Get rank threshold
threshold = rank_thresholds[rank]
# Filter results by rank threshold
try:
_r = r.loc[r.pident >= threshold]
except KeyError:
continue
if len(_r) == 0:
continue
lca_taxids = {}
# After filtering, either calculate lca from all filtered taxids
if mode == "rank_lca":
lca_taxids = get_lca(_r, allowed_ranks, reportranks)
# Or at each rank, get most common taxid
elif mode == "rank_vote":
vote = get_rank_vote(_r, rank, vote_threshold)
if len(vote) > 0:
lca_taxids = get_lca(vote, allowed_ranks, reportranks)
if len(lca_taxids.keys()) > 0:
return lca_taxids
return {}
def get_rank_vote(r, rank, vote_threshold=0.5):
"""
Filter results based on fraction of taxa
Parameters
----------
r: pandas.DataFrame
Results for a single query, after filtering with bitscore and rank-specific thresholds
rank: str
Current rank being investigated
vote_threshold: float
Required fraction of hits from a single taxa in order to keep taxa
Returns
-------
Filtered dataframe only containing taxa that meet vote_threshold
Here taxa are counted among all hits remaining for a query after filtering using bitscore and rank-specific
thresholds. Taxa are counted at a certain rank and counts are normalized. Hits belonging to taxa above
vote_threshold are kept while others are filtered out.
"""
# Create dataframe for unique taxids filtered at this rank threshold
taxid_counts = pd.DataFrame(dict.fromkeys(r.staxids.unique(), 1), index=["count"]).T
# Add taxid for rank being investigated
rank_df = r.groupby("staxids").first().reset_index()[[rank, "staxids"]].set_index("staxids")
rank_df = pd.merge(taxid_counts, rank_df, left_index=True, right_index=True)
# Sum counts for current rank
rank_sum = rank_df.groupby(rank).sum()
rank_norm = rank_sum.div(rank_sum.sum())
rank_norm = rank_norm.sort_values("count", ascending=False)
votes = rank_norm.loc[rank_norm["count"] > vote_threshold]
if len(votes) > 0:
return r.loc[r[rank].isin(votes.index)]
return []
def propagate_taxids(res, ranks):
"""
Transfer taxonomy ids to unassigned ranks based on best known taxonomy
Example:
{'species': -1, 'family': -171549, 'genus': -171549, 'order': 171549, 'phylum': 976, 'class': 200643, 'superkingdom': 2}
should become
{'species': -171549, 'family': -171549, 'genus': -171549, 'order': 171549, 'phylum': 976, 'class': 200643, 'superkingdom': 2}
Parameters
----------
res: dict
Dictionary of ranks and taxonomy ids
ranks: list
Ranks to assign taxonomy to
Returns
-------
res: dict
Dictionary with updated taxonomy ids
"""
known = -1
for rank in ranks:
# If not -1 (Unclassified) at rank, store assignment as known
if res[rank] != -1:
known = res[rank]
continue
# If -1 at rank (Unclassified), add the taxid with the '-' prefix
if res[rank] == -1:
res[rank] = -abs(known)
return res
def series2df(df):
"""Converts pandas series to pandas dataframe"""
if str(type(df)) == "<class 'pandas.core.series.Series'>":
df = pd.DataFrame(df).T
return df
def read_taxidmap(f, ids):
"""
Reads the protein to taxid map file and stores mappings
Parameters
----------
f: str
Input file with protein_id->taxid map
ids: list
Protein ids to store taxids for
Returns
-------
Dictionary of protein ids to taxid and all unique taxids
"""
taxidmap = dict.fromkeys(ids, -1)
open_function = open
if ".gz" in f:
open_function = gz.open
with open_function(f, 'rt') as fhin:
for line in tqdm.tqdm(fhin, desc="Reading idmap {}".format(f), ncols=100, unit=" lines"):
items = (line.rstrip()).rsplit()
# If file has only two columns, assume taxid in second
if len(items) == 2:
protid, taxid = items
# Otherwise, assume format is same as NCBI protein mapping
else:
protid, taxid = items[0], items[2]
# Add map to dictionary
# We initialize the dictionary with -1 so we make an attempt to add the taxid + 1
# If the protid is not in the dictionary we skip it
try:
taxidmap[protid] += int(taxid) + 1
except KeyError:
continue
except ValueError:
continue
return pd.DataFrame(taxidmap, index=["staxids"]).T, list(set(taxidmap.values()))
def read_df(infile, top=10, e=0.001, input_format="tango", taxidmap=None):
"""
Reads the blast results from file and returns a dictionary with query->results.
Note that the input is assumed to be sorted by bitscore for each query. The first entry for a query is used to set
the score threshold for storing hits for that query. So if a query has a bitscore of 100 and --top 10 is specified
then we only store subsequent hits that have a bitscore of at least (100-0.1*100) = 90.
Tango-formatted output contains two additional compared to the standard blast format 6:
query1 subject1 93.6 47 3 0 146 6 79 125 8.5e-16 91.3 314295
query1 subject2 100.0 44 0 0 137 6 484 527 2.5e-15 89.7 9347
query2 subject3 53.5 241 84 2 645 7 15 255 1.3e-53 216.9 864142
where the last column is the taxid of the subject.
Otherwise the output may have the typical blast format 6 output.
Parameters
----------
infile: str
Arguments from argument parser
top: int
Keep results within top% of best bitscore
e: float
Maximum allowed e-value to keep a hit.
input_format: str
Blast format. 'tango' if taxid for each subject is present in blast results, otherwise 'blast'
taxidmap: str
File mapping each subject id to a taxid
Returns
-------
tuple
The function returns a tuple with dictionary of query->results and
unique taxonomy ids (if tango format) or unique subject ids
"""
open_function = open
if ".gz" in infile:
open_function = gz.open
r = {}
taxids = []
queries = {}
with open_function(infile, 'rt') as fhin:
for line in tqdm.tqdm(fhin, desc="Reading {}".format(infile), ncols=100, unit=" lines"):
items = line.rstrip().rsplit()
query, subject, pident, evalue, score = items[0], items[1], float(items[2]), \
float(items[10]), float(items[11])
try:
min_score = queries[query]['min_score']
except KeyError:
min_score = score * ((100 - top) / 100)
queries[query] = {'min_score': min_score}
if score < min_score or evalue > e:
continue
if input_format == "tango" and len(items) > 12:
taxid = items[12]
taxids.append(taxid)
# TODO: Is there a way to skip storing the same taxid from a worse hit for the same query
elif input_format == "blast" and len(items) == 12:
taxid = ""
if not taxidmap:
sys.exit(
"ERROR: Standard blast input detected with no protein -> taxid file specified (--taxidmap).")
else:
continue
# Add results for query to dictionary
try:
r[query] += [[subject, pident, evalue, score, int(taxid)]]
except KeyError:
r[query] = [[subject, pident, evalue, score, int(taxid)]]
# If this is blast format then we return all subject ids found
if input_format == "blast":
ids = list(set([r[key][i][0] for key in list(r.keys()) for i in range(0, len(r[key]))]))
return r, ids
# If this is tango format then return all taxids found
return r, list(set(taxids))
def process_lineages(items):
"""
Looks up lineage information from taxids.
The lineage object is a list of taxonomic ids corresponding to the full lineage of a single taxid.
"""
taxid, ranks, taxdir, dbname, lineage = items
# Read the taxonomy db
ncbi_taxa = init_sqlite_taxdb(taxdir, dbname)
# Get ranks for each taxid in the lineage
lineage_ranks = ncbi_taxa.get_rank(lineage)
x = pd.DataFrame(lineage_ranks, index=["rank"]).T
x = x.loc[x["rank"].isin(ranks)].reset_index().T
x.columns = x.loc["rank"]
x.drop("rank", inplace=True)
x.index = [taxid]
# Add taxids for lower ranks in the hierarchy
x = propagate_lower(x, taxid, ranks)
# Add names for taxids
x = add_names(x, taxid, ncbi_taxa)
return x
def make_name_dict(df, ranks):
"""
Creates a dictionary of taxids to taxonomy names, including Unclassified ranks
Parameters
----------
df: pandas.DataFrame
Lineage dataframe
ranks: list
Ranks to store names information for
Returns
-------
name_dict: dict
Name dictionary mapping taxonomy ids to names
"""
name_dict = {}
for rank in ranks:
name_dict.update(dict(zip(df[rank].values, df["{}.name".format(rank)].values)))
name_dict.update(dict(zip(-abs(df[rank]), "Unclassified." + df["{}.name".format(rank)])))
name_dict[-1] = "Unclassified"
return name_dict
def make_lineage_df(taxids, taxdir, dbname, ranks, cpus=1):
"""
Creates a lineage dataframe with full taxonomic information for a list of taxids.
Example:
taxid species phylum genus genus.name phylum.name species.name
859655 305 1224 48736 Ralstonia Proteobacteria Ralstonia solanacearum
387344 1580 1239 1578 Lactobacillus Firmicutes Lactobacillus brevis
358681 1393 1239 55080 Brevibacillus Firmicutes Brevibacillus brevis
Parameters
----------
taxids: list
List of taxonomic ids to obtain information for
taxdir: str
Path to directory holding taxonomic info
dbname: str
Name of ete3 sqlite database within taxdir
ranks: list
Ranks to store information for
cpus: int
Number of cpus to use
Returns
-------
lineage_df: pandas.DataFrame
Data Frame with full taxonomic info
"""
# Read the taxonomy db
ncbi_taxa = init_sqlite_taxdb(taxdir, dbname)
lineages = ncbi_taxa.get_lineage_translator(taxids)
# Store potential missing taxids and warn user
missing_taxids = set([int(x) for x in taxids]).difference(lineages.keys())
# Get possible translations for taxids that have been changed
_, translate_dict = ncbi_taxa._translate_merged(list(set(taxids).difference(lineages.keys())))
rename = {y: x for x, y in translate_dict.items()}
# Update lineages with missing taxids
lineages.update(ncbi_taxa.get_lineage_translator(translate_dict.values()))
items = [[taxid, ranks, taxdir, dbname, lineages[taxid]] for taxid in list(lineages.keys())]
with Pool(processes=cpus) as pool:
res = list(
tqdm.tqdm(pool.imap(process_lineages, items), desc="Making lineages", total=len(items),
unit=" taxids", ncols=100))
lineage_df = pd.concat(res, sort=False)
lineage_df.rename(index=rename, inplace=True)
lineage_df.rename(index=lambda x: int(x), inplace=True)
for rank in ranks:
lineage_df[rank] = pd.to_numeric(lineage_df[rank])
name_dict = make_name_dict(lineage_df, ranks)
if len(missing_taxids) > 0:
sys.stderr.write("#WARNING: Missing taxids found:\n")
sys.stderr.write("#{}\n".format(",".join([str(x) for x in missing_taxids])))
sys.stderr.write("#To fix this, you can try to update the taxonomy database using\n")
sys.stderr.write("#tango download taxonomy --force\n")
return lineage_df.loc[:,lineage_df.dtypes==int], name_dict
def process_queries(args):
"""Receives a query and its results and assigns taxonomy"""
res_taxids = {}
min_rank_threshold = 0
query, res, rank_thresholds, top, reportranks, assignranks, mode, vote_threshold, lineage_df, taxidmap = args
if len(rank_thresholds) > 0 and "rank" in mode:
min_rank_threshold = min([x for x in rank_thresholds.values()])
columns = ['sseqid', 'pident', 'evalue', 'bitscore']
if len(res[0]) == 5:
columns += ['staxids']
# Create pandas dataframe for slice
res_df = pd.DataFrame(res, columns=columns, index=[query] * len(res))
# Add taxidmap if not present in results
if "staxids" not in res_df.columns:
res_df = pd.merge(res_df, taxidmap, left_on="sseqid", right_index=True, how="left")
# Calculate bit score threshold for slice
thresholds = get_thresholds(res_df, top=top)
# Set index
res_df.index.name = "qseqid"
# Merge with lineage df
res_df = pd.merge(res_df, lineage_df, left_on="staxids", right_index=True, how="left")
# Remove potential nan rows created if the blast results have taxids that are missing from lineage_df
res_df = res_df.loc[res_df[reportranks[0]] == res_df[reportranks[0]]]
# Initialize dictionary
res_taxids[query] = dict.fromkeys(reportranks, -1)
# Handle queries that return pandas Series
res_df = res_df.loc[res_df.bitscore >= thresholds[query]]
res_df = series2df(res_df)
lca_taxids = {}
# Parse with rank thresholds or by just filtering by bitscore
if "rank" in mode:
if len(res_df.loc[res_df.pident >= min_rank_threshold]) > 0:
lca_taxids = parse_with_rank_thresholds(res_df, assignranks, reportranks,
rank_thresholds, mode, vote_threshold)
else:
lca_taxids = get_lca(res_df, assignranks, reportranks)
# Update results with lca_taxids
res_taxids[query].update(lca_taxids)
res_taxids[query] = propagate_taxids(res_taxids[query], reportranks)
return res_taxids[query], query
def write_blobout(f, res_taxids, queries, ranks):
"""
Writes output in a format for use with blobtools
Parameters
----------
f: str
Outputfile
res_taxids: list
List of results for queries
queries: list
List of queries
ranks: list
Ranks to write results for
"""
rev_ranks = [ranks[x] for x in list(range(len(ranks) - 1, -1, -1))]
with open(f, 'w') as fhout:
for i, query in enumerate(queries):
d = res_taxids[i]
for rank in rev_ranks:
if rank in d.keys():
taxid = d[rank]
if taxid != -1:
fhout.write("{query}\t{taxid}\t1\tref\n".format(query=query, taxid=abs(taxid)))
break
def stage_queries(res, lineage_df, input_format="tango", rank_thresholds=[45, 60, 85], top=10, mode="rank_lca",
vote_threshold=0.5, assignranks=["phylum", "genus", "species"],
reportranks=["superkingdom", "phylum", "class", "order", "family", "genus", "species"],
taxidmap=None):
"""
Parameters
----------
res: dict
Dictionary with queries as keys and a list of hits as values
lineage_df: pandas.DataFrame
Data frame of taxids and taxonomic information
input_format: str
'tango' or 'blast'
rank_thresholds: list
List of thresholds for ranks
top: int
Only evaluate results within <top> percent bitscore of best scoring hit
mode: str
'rank_lca' or 'rank_vote' for rank thresholds usage or 'score' to just filter by bitscore
vote_threshold: float
Cutoff used to filter out common taxids
assignranks: list
Ranks used to assign taxonomy
reportranks: list
Ranks to report taxonomy for (inferred from assignranks)
taxidmap: dict
Dictionary with subject ids as keys and taxids as values
Returns
-------
items: list
List of items to send to multiprocessing
"""
items = []
total_queries = len(res)
for q in tqdm.tqdm(sorted(res.keys()), total=total_queries, unit=" queries", ncols=100, desc="Staging queries"):
# If the diamond output does not have standard tango format we do some work to add this information.
item = [q, res[q], rank_thresholds, top, reportranks, assignranks, mode, vote_threshold]
if input_format == "blast":
# Get all subject ids
s = list(set([res[q][i][0] for i in range(0, len(res[q]))]).intersection(lineage_df.index))
# Get all taxids for this query
q_taxids = taxidmap.loc[s, "staxids"].unique()
item += [lineage_df.loc[q_taxids], taxidmap.loc[s]]
# If diamond output has taxonomy id then directly create the list of results to
# feed into the multiprocessing pool
else:
# Get all taxids for query
q_taxids = list(set([res[q][i][-1] for i in range(0, len(res[q]))]).intersection(lineage_df.index))
item += [lineage_df.loc[q_taxids], None]
items.append(item)
return items
def parse_hits(diamond_results, outfile, taxidout=False, blobout=False, top=10, evalue=0.001, input_format="tango",
taxidmap=False, mode="rank_lca", vote_threshold=0.5, assignranks=["phylum", "genus", "species"],
reportranks=["superkingdom", "phylum", "class", "order", "family", "genus", "species"],
rank_thresholds=[45, 60, 85], taxdir="./taxonomy/", sqlitedb="taxonomy.sqlite", chunksize=1, cpus=1):
"""
This is the main function to handle diamond result files and assign taxonomy to queries.
The function performs the following steps:
1. Checks rank-specific thresholds
2. Reads the diamond results file
3. If required, maps subject ids to taxonomy ids
4. Creates a dataframe of all unique taxonomy ids found for subjects and their taxa names for each rank
5. Stages queries for multiprocessing
6. Processes each query and returns it with assigned taxonomy
7. Writes output to file
Parameters
----------
diamond_results: str
Diamond results file
outfile: str
File to write results to
taxidout: str
If True, write results with taxonomic ids instead of names to file
blobout: str
If True, write output in blobtools format
top: int
Evaluate hits within this bitscore percent range of the best scoring hit
evalue: float
Filter hits with evalue larger than this
input_format: str
'tango' or 'blast' depending on whether the diamond results has subject taxids in the last column or not
taxidmap: str
Path to a file mapping subject ids to taxids (needed if input_format != 'tango')
mode: str
How to assign taxonomy: 'rank_lca' and 'rank_vote' use rank specific thresholds,
'score' only filters by bitscore
vote_threshold: float
When using 'rank_vote' to assign taxonomy, this is the fraction of hits that must have the same taxid to
assign a taxonomy at a rank
assignranks: list
Ranks used to assign taxonomy
reportranks: list
Ranks to report taxonomy for
rank_thresholds: list
Percent identity thresholds for assigning taxonomy
taxdir: str
Path to directory holding taxonomic information files
sqlitedb: str
Name of ete3 sqlite database within taxdir
chunksize: int
The size of chunks for the iterable being submitted to the process pool
cpus: int
The number of worker processes to use
args:
Input arguments from __main__.py
Returns
-------
Return code 0 if function finished without issue
"""
# Set up rank thresholds
if "rank" in mode:
rank_thresholds = get_rank_thresholds(assignranks, rank_thresholds)
# Read diamond results
res, ids = read_df(diamond_results, top, evalue, input_format, taxidmap)
# Read protein -> taxidmap file if specified
taxidmap = pd.DataFrame()
if input_format == "blast":
taxidmap, taxids = read_taxidmap(taxidmap, ids)
else:
taxids = ids
# Create lineage dataframe
lineage_df, name_dict = make_lineage_df(taxids, taxdir, sqlitedb, reportranks, cpus)
# Set up multiprocessing pool
items = stage_queries(res, lineage_df, input_format, rank_thresholds, top, mode, vote_threshold, assignranks,
reportranks, taxidmap)
total_queries = len(res)
with Pool(processes=cpus) as pool:
assign_res = list(tqdm.tqdm(pool.imap(process_queries, items, chunksize=chunksize), desc="Parsing queries",
total=total_queries, unit=" queries", ncols=100))
# res_tax is the taxonomy table with taxids
res_tax = [item[0] for item in assign_res]
# queries is a list of queries
queries = [item[1] for item in assign_res]
# Create dataframe from taxonomy results
res_df = pd.DataFrame(res_tax, index=queries)[reportranks]
res_df.index.name = "query"
# Writes blobtools-compatible output
if blobout:
sys.stderr.write("Writing blobtools file to {}\n".format(blobout))
write_blobout(blobout, res_df, queries, reportranks)
# Write table with taxonomy ids instead of taxon names
if taxidout:
sys.stderr.write("Writing results with taxids to {}\n".format(taxidout))
res_df.to_csv(taxidout, sep="\t")
# Write main output
sys.stderr.write("Translating taxids to names\n")
res_names_df = translate_taxids_to_names(res_df, reportranks, name_dict)
sys.stderr.write("Writing main output to {}\n".format(outfile))
res_names_df.to_csv(outfile, sep="\t")
# Summary stats
unc = [len(res_names_df.loc[res_names_df.loc[:, rank].str.contains("Unclassified")]) for rank in reportranks]
tot = [len(res_names_df)] * len(reportranks)
cl = 100 - np.divide(unc, tot) * 100
cl = ["{}%".format(str(np.round(x, 1))) for x in cl]
summary = | pd.DataFrame(cl, index=reportranks) | pandas.DataFrame |
import pandas as pd
import numpy as np
from sklearn.preprocessing import StandardScaler
from sklearn.cluster import KMeans
from kneed import KneeLocator
from scipy import stats
from pyod.models.cblof import CBLOF
from pyod.models.feature_bagging import FeatureBagging
from pyod.models.hbos import HBOS
from pyod.models.iforest import IForest
from pyod.models.knn import KNN
from pyod.models.lof import LOF
import warnings
warnings.filterwarnings("ignore")
class BadCustomerDetection():
def _init_(self):
pass
##############################################
#Calculate within-cluster sum of square (wss)#
##############################################
def k_finder(self, cluster_scaled, plot = False, S = 0, curve='convex', direction='decreasing'):
clusters_range = [2,3,4,5,6,7,8,9,10,11,12,13,14,15,16,17]
sse = self.calculate_WSS(cluster_scaled, clusters_range, plot = plot)
kneedle = KneeLocator(clusters_range, sse, S, curve, direction)
return kneedle.elbow
def calculate_WSS(self, points, clusters_range, plot = False):
sse = []
for k in clusters_range:
kmeans = KMeans(n_clusters = k).fit(points)
centroids = kmeans.cluster_centers_
pred_clusters = kmeans.predict(points)
curr_sse = 0
# calculate square of Euclidean distance of each point from its cluster center and add to current WSS
for i in range(len(points)):
curr_center = centroids[pred_clusters[i]]
curr_sse += (points[i, 0] - curr_center[0]) ** 2 + (points[i, 1] - curr_center[1]) ** 2
sse.append(curr_sse)
if(plot == True):
plt.figure()
plt.plot(clusters_range,sse, marker='o')
return sse
####################################
#Detect Outliers or a group of data#
####################################
#It's multivariate outlier detection, can choose method among 'CBLOF','FB','HBOS','IF','KNN' or 'Voting'
#which will run apply all the models and output the results based on the voting of all the model results
def outlier_detector(self, clustered_data, outliers_fraction = 0.05, method = 'Voting',cluster_number = 3):
random_state = np.random.RandomState(42)
outliers_df = pd.DataFrame()
classifiers = {
#Cluster-based Local Outlier Factor
'CBLOF':CBLOF(contamination=outliers_fraction,check_estimator=False, random_state=random_state),
#Feature Bagging
'FB':FeatureBagging(LOF(n_neighbors=35),contamination=outliers_fraction,check_estimator=False,random_state=random_state),
#Histogram-base Outlier Detection
'HBOS': HBOS(contamination=outliers_fraction),
#Isolation Forest
'IF': IForest(contamination=outliers_fraction,random_state=random_state),
#K Nearest Neighbors
'KNN': KNN(contamination=outliers_fraction)
}
detectors_list = []
for k in range(cluster_number):
curr_cluster = clustered_data[clustered_data['Cluster'] == k]
X_train = curr_cluster.drop(['consumer_id','Cluster'], axis = 1)
for i, (clf_name, clf) in enumerate(classifiers.items()):
clf_pred = clf_name+'_Decision'
clf.fit(X_train)
if(method == 'Voting'):
if(clf_name == 'KNN'):#just save KNN for inference
detectors_list.append(clf)
elif(method !='Voting'):
if(clf_name == method):
detectors_list.append(clf)
# predict raw anomaly score
scores_pred = clf.decision_function(X_train)
scores_pred_df = pd.DataFrame(list(scores_pred), columns =[clf_name], index = curr_cluster.index.copy())
curr_cluster = pd.concat([curr_cluster, scores_pred_df], axis=1)
outliers_pred = clf.predict(X_train)
outliers_pred_df = pd.DataFrame(list(outliers_pred), columns =[clf_pred], index = curr_cluster.index. copy())
curr_cluster = pd.concat([curr_cluster, outliers_pred_df], axis=1)
outliers_df = outliers_df.append(curr_cluster)
if(method == 'Voting'):
outliers_df['Voting'] = outliers_df.filter(regex='Decision').sum(axis = 1)
outliers_df['bad_customer'] = 0
outliers_df.loc[(outliers_df.Voting > len(classifiers)/2), 'bad_customer'] = 1
else:
decision = method + '_Decision'
outliers_df['bad_customer'] = outliers_df[decision]
return outliers_df, detectors_list
###############################################
#bad customer detector based on the Clustering#
###############################################
#plot_wss = True will plot the WSS-K Plot
# S, curve and direction are parameters for finding the eblbow(the best K)
#The sensitivity parameter S allows us to adjust how aggressive we want Kneedle to be when detecting knees. Smaller values #for S detect knees quicker, while larger values are more conservative. Put simply, S is a measure of how many “flat” #points we #expect to see in the unmodified data curve before declaring a knee.
def bad_customer_detector(self, df, log_transformation = True, elbow_finder = False, n_cluster = 3, outliers_fraction = 0.05, method = 'Voting', plot_wss = False, S = 0, curve='convex', direction='decreasing'):
##if elbow = False, need to provide n_cluster
if(log_transformation == True):
cluster_log = np.log(df.drop(['consumer_id'], axis = 1))
else:
cluster_log = df.drop(['consumer_id'], axis = 1)
scaler = StandardScaler()
cluster_scaled = scaler.fit_transform(cluster_log)
##Start to test cluster numbers
if(elbow_finder == True):
K_elbow = self.k_finder(cluster_scaled, plot_wss, S, curve, direction)
if K_elbow is None:
print('Need to provide a number')
else:
cluster_number = kneedle.elbow
else:
cluster_number = n_cluster
#clustering starts from here
kmeans_sel = KMeans(n_clusters=cluster_number, random_state=42).fit(cluster_scaled)
labels = | pd.DataFrame(kmeans_sel.labels_) | pandas.DataFrame |
'''
Created on Jul 16, 2019
@author: vincentiusmartin
'''
import pandas as pd
from sitesfinder.imads import iMADS
from sitesfinder.imadsmodel import iMADSModel
from sitesfinder.plotcombiner import PlotCombiner
from sitesfinder.pbmescore import PBMEscore
from sitesfinder.sequence import Sequence
from sitesfinder.prediction.basepred import BasePrediction
import pickle
from cooperative import coopfilter
def main():
curdir = "/Users/vincentiusmartin/Research/chip2gcPBM/"
analysis_path = curdir + "result/ets1_HepG2/analysis_result/"
with open(analysis_path + "sitefiles_list.txt", 'r') as f:
sitelist = [line.strip() for line in f.readlines()]
slist = "/Users/vincentiusmartin/Research/chip2gcPBM/chip2probe/../result/ets1_A549/analysis_result/sites_within_d3_span50.tsv"
seqdf = | pd.read_csv(slist, sep='\t') | pandas.read_csv |
import pkg_resources
import pandas as pd
from unittest.mock import sentinel
import osmo_jupyter.dataset.parse as module
def test_parses_ysi_csv_correctly(tmpdir):
test_ysi_classic_file_path = pkg_resources.resource_filename(
"osmo_jupyter", "test_fixtures/test_ysi_classic.csv"
)
formatted_ysi_data = module.parse_ysi_proodo_file(test_ysi_classic_file_path)
expected_ysi_data = pd.DataFrame(
[
{
"timestamp": pd.to_datetime("2019-01-01 00:00:00"),
"YSI barometric pressure (mmHg)": 750,
"YSI DO (% sat)": 19,
"YSI temperature (C)": 24.7,
"YSI unit ID": "unit ID",
}
]
).set_index("timestamp")
pd.testing.assert_frame_equal(formatted_ysi_data, expected_ysi_data)
def test_parses_ysi_kordss_correctly(tmpdir):
test_ysi_kordss_file_path = pkg_resources.resource_filename(
"osmo_jupyter", "test_fixtures/test_ysi_kordss.csv"
)
formatted_ysi_data = module.parse_ysi_prosolo_file(test_ysi_kordss_file_path)
expected_ysi_data = pd.DataFrame(
[
{
"timestamp": pd.to_datetime("2019-01-01 00:00:00"),
"YSI barometric pressure (mmHg)": 750,
"YSI DO (% sat)": 60,
"YSI DO (mg/L)": 6,
"YSI temperature (C)": 24.7,
}
]
).set_index("timestamp")
pd.testing.assert_frame_equal(formatted_ysi_data, expected_ysi_data)
def test_parses_picolog_csv_correctly():
test_picolog_file_path = pkg_resources.resource_filename(
"osmo_jupyter", "test_fixtures/test_picolog.csv"
)
formatted_picolog_data = module.parse_picolog_file(test_picolog_file_path)
expected_picolog_data = pd.DataFrame(
[
{
"timestamp": pd.to_datetime("2019-01-01 00:00:00"),
"PicoLog temperature (C)": 39,
"PicoLog barometric pressure (mmHg)": 750,
},
{
"timestamp": pd.to_datetime("2019-01-01 00:00:02"),
"PicoLog temperature (C)": 40,
"PicoLog barometric pressure (mmHg)": 750,
},
{
"timestamp": pd.to_datetime("2019-01-01 00:00:04"),
"PicoLog temperature (C)": 40,
"PicoLog barometric pressure (mmHg)": 750,
},
]
).set_index("timestamp")
pd.testing.assert_frame_equal(formatted_picolog_data, expected_picolog_data)
def test_parses_calibration_log_correctly():
test_calibration_log_file_path = pkg_resources.resource_filename(
"osmo_jupyter", "test_fixtures/test_calibration_log.csv"
)
formatted_calibration_log_data = module.parse_calibration_log_file(
test_calibration_log_file_path
)
# Nothing is supposed to be renamed or dropped, just datetime formatting
expected_calibration_log_index = pd.DatetimeIndex(
[
pd.to_datetime("2019-01-01 00:00:00"),
pd.to_datetime("2019-01-01 00:00:01"),
pd.to_datetime("2019-01-01 00:00:03"),
pd.to_datetime("2019-01-01 00:00:04"),
],
name="timestamp",
)
pd.testing.assert_index_equal(
formatted_calibration_log_data.index, expected_calibration_log_index
)
class TestParseDataCollectionLog:
def test_parses_data_collection_log_correctly(self):
test_log_file_path = pkg_resources.resource_filename(
"osmo_jupyter", "test_fixtures/test_data_collection_log.xlsx"
)
actual_data_collection_log = module.parse_data_collection_log(
test_log_file_path
)
expected_data_collection_log = pd.DataFrame(
[
{
"experiment_names": [
"2019-07-26--19-34-38-Pi2E32-3000_images_attempt_1"
],
"drive_directory": "2019-07-26 Collect 3000 images (attempt 1)",
"pond": "calibration",
"cosmobot_id": "A",
"cartridge_id": "C00003",
"start_date": pd.to_datetime("2019-07-26 19:12"),
"end_date": pd.to_datetime("2019-07-28 13:55"),
},
{
"experiment_names": [
"2019-08-26--23-34-10-PiE5FB-scum_tank_shakedown"
],
"drive_directory": "2019-08-26 Scum Tank Shakedown",
"pond": "scum tank 1",
"cosmobot_id": "B",
"cartridge_id": "C00005",
"start_date": pd.to_datetime("2019-08-26 23:35"),
"end_date": pd.to_datetime("2019-08-27 08:15"),
},
]
)
pd.testing.assert_frame_equal(
actual_data_collection_log, expected_data_collection_log
)
def test_get_attempt_summary_gets_multiple_buckets(self):
test_attempt_data = pd.Series(
{
"S3 Bucket(s)": "1\n2\n3",
"Drive Directory": "Experiment",
"Cosmobot ID": "Z",
"Cartridge": "C1",
"Start Date/Time": pd.to_datetime("2019"),
"End Date/Time": pd.to_datetime("2020"),
}
)
actual_attempt_summary = module._get_attempt_summary(test_attempt_data)
expected_attempt_summary = pd.Series(
{
"experiment_names": ["1", "2", "3"],
"drive_directory": "Experiment",
"pond": "calibration",
"cosmobot_id": "Z",
"cartridge_id": "C1",
"start_date": pd.to_datetime("2019"),
"end_date": pd.to_datetime("2020"),
}
)
pd.testing.assert_series_equal(actual_attempt_summary, expected_attempt_summary)
def test_interpolates_calibration_log_data_correctly(mocker):
mock_calibration_log_data = pd.DataFrame(
{
"timestamp": [
pd.to_datetime("2019-01-01 00:00:00"),
pd.to_datetime("2019-01-01 00:00:02"),
],
"YSI DO (mmHg)": [50, 30],
"setpoint temperature (C)": [29.00000001, 30.0000003],
"setpoint O2 fraction": [0.100000001, 0.3],
"extraneous column": ["is", "dropped"],
}
).set_index("timestamp")
mocker.patch.object(
module, "parse_calibration_log_file", return_value=mock_calibration_log_data
)
transformed_log_data = module.process_calibration_log_file(sentinel.log_file_name)
expected_log_data = pd.DataFrame(
{
"timestamp": [
pd.to_datetime("2019-01-01 00:00:00"),
pd.to_datetime("2019-01-01 00:00:01"),
pd.to_datetime("2019-01-01 00:00:02"),
],
"YSI DO (mmHg)": [50, 40, 30],
"setpoint temperature (C)": [29, 29.5, 30],
"setpoint O2 fraction": [0.1, 0.2, 0.3],
},
dtype="float64",
).set_index("timestamp")
pd.testing.assert_frame_equal(
transformed_log_data, expected_log_data, check_less_precise=6
)
def test_setpoint_ids_assigned_correctly():
mock_iamge_data = pd.DataFrame(
{
"timestamp": [
# Each > than 5 minute jump should increment setpoint ID
pd.to_datetime("2019-01-01 00:00:00"),
pd.to_datetime("2019-01-01 00:05:00"), # Don't increment
pd.to_datetime("2019-01-01 00:10:01"), # Increment
pd.to_datetime("2019-01-01 00:16:00"), # Increment
],
"image": [
sentinel.image1,
sentinel.image2,
sentinel.image3,
sentinel.image4,
],
}
).set_index("timestamp")
setpoint_ids = module.generate_time_based_setpoint_ids(mock_iamge_data)
expected_setpoint_ids = pd.Series([0, 0, 1, 2], index=mock_iamge_data.index).rename(
"timestamp"
)
pd.testing.assert_series_equal(setpoint_ids, expected_setpoint_ids)
def test_prepare_ysi_data():
# After the parse_* functions that two types of ysi data look mostly the same
# so just test the common functionality here.
# Only difference is ProSolo also includes mg/L values.
mock_ysi_data = pd.DataFrame(
{
"timestamp": [
pd.to_datetime("2019-01-01 00:00:00"),
| pd.to_datetime("2019-01-01 00:00:02") | pandas.to_datetime |
# -*- coding: utf-8 -*-
import unittest
import pandas
from pipesnake.pipe import SeriesPipe
from pipesnake.transformers.imputer import KnnImputer
from pipesnake.transformers.imputer import ReplaceImputer
from pipesnake.transformers.selector import ColumnSelector
class TestImputer(unittest.TestCase):
def test_replace_imputer(self):
from _data import x_nan
from _data import y_nan
pipe = SeriesPipe(transformers=[
ColumnSelector(x_cols=['x_1', 'x_2', 'x_3', 'x_4'], y_cols=['y_1', 'y_2']),
ReplaceImputer(x_cols='all', y_cols='all'),
])
x_new, y_new = pipe.fit_transform(x_nan, y_nan)
self.assertEqual(pandas.isnull(x_new).any().any(), False, 'NaN values has been found in x_new')
self.assertEqual( | pandas.isnull(y_new) | pandas.isnull |
#GiG
import numpy as np
import pandas as pd
from pathlib import Path
from deep_blocker import DeepBlocker
from tuple_embedding_models import AutoEncoderTupleEmbedding, CTTTupleEmbedding, HybridTupleEmbedding, SIFEmbedding
from vector_pairing_models import ExactTopKVectorPairing
import blocking_utils
from configurations import *
import pickle
def ctt_train_score_with_pred(folder_root, golden_set,left_table_fname, right_table_fname, cols_to_block, tuple_embedding_model, vector_pairing_model):
"""
Inputs table names, model type, ground truth labels.
Outputs evaluation statistics with binary classifier, mainly for CTT
"""
folder_root = Path(folder_root)
left_table_name_csv = left_table_fname+'.csv'
right_table_name_csv = right_table_fname+'.csv'
left_df = pd.read_csv(folder_root / left_table_name_csv)
right_df = pd.read_csv(folder_root / right_table_name_csv)
db = DeepBlocker(tuple_embedding_model, vector_pairing_model)
candidate_set_df,predictions = db.driver(left_df, right_df, cols_to_block,True)
predictions = | pd.DataFrame(predictions,columns=['ltable_id','rtable_id','value']) | pandas.DataFrame |
# Arithmetic Operators
num1 = 10
num2 = 20
print(num1 + num2)
print(num1 - num2)
print(num1 * num2)
print(num1 / num2)
print("END")
print()
# RELATIONAL OPERATIONS
print(num1 < num2)
print(num1 > num2)
print(num1 == num2)
print(num1 != num2)
print("END")
print()
# LOGICAL OPERATIONS
log1 = True
log2 = False
print("END")
print()
# &
print(log1 & log2)
print(log2 & log1)
print(log2 & log2)
print(log1 & log1)
print("END")
print()
# |
print(log1 | log2)
print(log2 | log1)
print(log2 | log2)
print(log1 | log1)
print("END")
print()
# stings
my_string = "My name is Jhon"
print(my_string[0])
my_string = "My name is Jhon"
print(my_string[-1])
print("END")
print()
print(len(my_string))
print(my_string.lower())
print(my_string.upper())
print(my_string.replace('Jhon', 'Tom'))
print(my_string.count('is'))
print("END")
print()
# stings
fruit = "I like apples, mangoes, bananas"
print(fruit)
print(fruit.split(' '))
print("END")
print()
# lists
l1 = [1, "a", True]
print(l1)
print(type(l1))
print("END")
print()
# lists
l1 = [1, 'a', 2, 'b', 3, 'c']
print(l1)
print(type(l1))
print(l1[2:5])
print("END")
print()
# lists
l1 = ["apples", "mangoes", "bananas"]
print(l1)
l1[0] = "Fruit"
print(l1)
l1.append("Sparta")
print(l1)
l1.pop()
print(l1)
l1.reverse()
print(l1)
l1.insert(1, "Sparta")
print(l1)
l1.sort()
print(l1)
print("END")
print()
# lists
l1 = [1, 2, 3]
l2 = ["a", "b", "c"]
print(l1 + l2)
print("END")
print()
l1 = [1, "a", True]
print(l1 * 3)
print("END")
print()
# Tuple
tup1 = (1, "Sparta", True)
print(tup1)
print(type(tup1))
tup2 = (1, "a", True, 2, "b", False)
print(tup2)
# to find the length of the tuple
print(len(tup2))
# to add or multiply or do both at the same time
print(tup1 + tup2)
print()
print(tup1 * 3)
print()
print(tup1 * 3 + tup2)
print()
# to find out the maximum and minimum values of a tuple
tup1 = (1, 2, 3, 4, 5, 6)
print(min(tup1))
print(max(tup1))
print("END")
print()
# Dictionary
Fruits = {"Mango": 10, "Apple": 20, "Litchi": 30, "Blueberry": 40}
print(Fruits)
print(type(Fruits))
# separating keys and values
print(Fruits.keys())
print(Fruits.values())
print()
# adding or changing an existing element
Fruits["Banana"] = 50
print(Fruits)
Fruits["Apple"] = 60
print(Fruits)
print()
# adding two dictionaries
fruit1 = {"Mango": 100, "Apple": 200}
fruit2 = {"Litchi": 300, "Blueberry": 400}
fruit1.update(fruit2)
print(fruit1)
# removing an element from a dictionary
fruit1.pop("Blueberry")
print(fruit1)
print()
# Set operations
s1 = {1, "a", True, 2, "b", False}
print(s1)
# to add one single element
s1.add("Hello")
print(s1)
# to add multiple elements
s1.update([10, 20, 30])
print(s1)
print()
# to remove elements
s1.remove(False)
print(s1)
print()
# for union of two sets
t1 = {1, 2, 3}
t2 = {4, 5, 6}
print(t1.union(t2))
j1 = {1, 2, 3, 4, 5}
j2 = {5, 6, 7, 1, 3}
print(j1.intersection(j2))
print()
# If statement
a = 10
b = 20
if a > b:
print("a is greater than b")
else:
print("b is greater than a")
print()
# elif
a = 10
b = 20
c = 30
if (a > b) & (a > c):
print("a is the greatest")
if (b > a) & (b > c):
print("b is the greatest")
if (c > a) & (c > b):
print("c is the greatest")
print()
# tuple with if
tup1 = (1, 2, 3, 4, 5)
if "a" in tup1:
print("a is present in tup1")
else:
print("a is not present in tup1")
print()
# if with lists
l1 = ["a", "b", "c"]
if l1[0] == "a":
l1[0] = "z"
print(l1)
print()
# if with dictionary
d1 = {"k1": 10, "k2": 20, "k3": 30}
if d1["k1"] == 10:
d1["k1"] = d1["k1"] + 100
print(d1)
# looping statements (simple for loop)
fruits = ["apple", "mango", "banana"]
for i in fruits:
print(i)
print()
# nested for loop
color = ["blue", "black", "green", "yellow"]
item = ["book", "pencil", "ball", "chair"]
for i in color:
for j in item:
print(i, j)
print()
# Printing 1-10 while using loop
i = 1
while i <= 10:
print(i)
i = i + 1
print()
# printing 2-table with while loop
i = 1
n = 2
while i <= 10:
print(n, " * ", i, " = ", n * i)
i = i + 1
print()
# class
class Phone:
def make_call(self):
print("Making phone call")
def play_game(self):
print("Playing game")
p1 = Phone()
p1.make_call()
print()
p1.play_game()
print()
# adding parameters to the class
class Phone:
def set_color(self, color):
self.color = color
def set_cost(self, cost):
self.cost = cost
def show_color(self):
return self.color
def show_cost(self):
return self.cost
def make_call(self):
print("Making phone call")
def play_game(self):
print("Playing game")
p1 = Phone()
p1.set_color("blue")
p1.set_cost(999)
print(p1.show_color())
print(p1.show_cost())
print()
# class with a constructor
class Employee:
def __init__(self, name, age, salary, gender):
self.name = name
self.age = age
self.salary = salary
self.gender = gender
def employee_details(self):
print("Name of the employee is ", self.name)
print("Age of the employee is ", self.age)
print("Salary of the employee is ", self.salary)
print("Gender of the employee is ", self.gender)
# constructor example
e1 = Employee('Sam', 32, 85000, 'Male')
print(e1.employee_details())
# Inheritance (PARENT CLASS)
class Vehicle:
def __init__(self, mileage, cost):
self.mileage = mileage
self.cost = cost
def show_details(self):
print("I am a Vehicle ")
print("Mileage of the Vehicle is ", self.mileage)
print("Cost of the Vehicle is ", self.cost)
v1 = Vehicle(500, 500)
print(v1.show_details())
# Inheritance (CHILD CLASS
class Car(Vehicle):
def show_car(self):
print("I am a car")
c1 = Car(200, 1200)
print(c1.show_details())
print(c1.show_car())
# Inheritance (SUPER Class)
class Car(Vehicle):
def __init__(self, mileage, cost, tyres, hp):
super().__init__(mileage, cost)
self.tyres = tyres
self.hp = hp
def show_car_details(self):
print("I am a Car ")
print("Number of tyres are ", self.tyres)
print("Value of horse power is ", self.hp)
c1 = Car(20, 12000, 4, 300)
print(c1.show_details())
print(c1.show_car_details())
print()
# inheritance (MULTIPLE)
class Parent1():
def assign_string_one(self, str1):
self.str1 = str1
def show_string_one(self):
return self.str1
class Parent2():
def assign_string_two(self, str2):
self.str2 = str2
def show_string_two(self):
return self.str2
class Derived(Parent1, Parent2):
def assign_string_three(self, str3):
self.str3 = str3
def show_string_three(self):
return self.str3
d1 = Derived()
d1.assign_string_one("one")
d1.assign_string_two("Two")
d1.assign_string_three("Three")
print(d1.show_string_one())
print(d1.show_string_two())
print(d1.show_string_three())
print()
# inheritance (MULTI lvl)
class Parent():
def assign_name(self, name):
self.name = name
def show_name(self):
return self.name
class Child(Parent):
def assign_age(self, age):
self.age = age
def show_age(self):
return self.age
class GrandChild(Child):
def assign_gender(self, gender):
self.gender = gender
def show_gender(self):
return self.name
gc = GrandChild()
gc.assign_name("Bob")
gc.assign_age("52")
gc.assign_gender("Male")
print(gc.show_name())
print(gc.show_age())
print(gc.show_gender())
print()
# NumPy (SINGLE DIMENSIONAL ARRAY)
import numpy as np
n1 = np.array([10, 20, 30, 40])
print(n1)
print()
print(type(n1))
print()
import numpy as np
n2 = np.array([[10, 20, 30, 40], [40, 30, 20, 10]])
print(n2)
print()
# numpy with zeros
import numpy as np
n1 = np.zeros((1, 2))
print(n1)
print()
import numpy as np
n1 = np.zeros((5, 5))
print(n1)
print()
# NumPy with same number
import numpy as np
n1 = np.full((2, 2), 10)
print(n1)
print()
# NumPy with arrange
import numpy as np
n1 = np.arange(10, 20)
print(n1)
print()
import numpy as np
n1 = np.arange(10, 50, 5)
print(n1)
print()
# NumPy with random numbers
import numpy as np
n1 = np.random.randint(1, 100, 5)
print(n1)
print()
# NumPy with shapes
import numpy as np
n1 = np.array([[1, 2, 3], [4, 5, 6]])
print(n1.shape)
print()
n1.shape = (3, 2)
print(n1)
print()
# Joining NumPy
import numpy as np
n1 = np.array([10, 20, 30])
n2 = np.array([40, 50, 60])
print(np.vstack((n1, n2)))
print()
print(np.hstack((n1, n2)))
print()
print(np.column_stack((n1, n2)))
print()
import numpy as np
n1 = np.array([10, 20, 30, 40, 50])
n2 = np.array([40, 50, 60, 70, 80])
print(np.intersect1d(n1, n2))
print()
print(np.setdiff1d(n1, n2))
print()
print(np.setdiff1d(n2, n1))
print()
# addition in NumPy (BASIC)
import numpy as np
n1 = np.array([10, 20, 30])
n1 = n1 + 1
print(n1)
print()
import numpy as np
n1 = np.array([10, 20, 30])
n1 = n1 - 1
print(n1)
print()
import numpy as np
n1 = np.array([10, 20, 30])
n1 = n1 * 2
print(n1)
print()
import numpy as np
n1 = np.array([10, 20, 30])
n1 = n1 / 2
print(n1)
print()
# MEAN
import numpy as np
n1 = np.array([10, 20, 30, 40, 50, 60])
print(np.mean(n1))
print()
# median
import numpy as np
n1 = np.array([11, 44, 5, 96, 67, 85])
print(np.median(n1))
print()
# standard deviation
import numpy as np
n1 = np.array([1, 5, 3, 100, 4, 48])
print(np.std(n1))
print()
# NumPy Matrix
import numpy as np
n1 = np.array([[1, 2, 3], [4, 5, 6], [7, 8, 9]])
print(n1)
print()
print(n1[0])
print()
print(n1[1])
print()
print(n1[:, 1])
print()
print(n1[:, 2])
print()
# numpy matrix transpose
n1 = np.array([[1, 2, 3], [4, 5, 6], [7, 8, 9]])
print(n1)
print()
print(n1.transpose())
print()
# numpy matrix multiplication
n1 = np.array([[1, 2, 3], [4, 5, 6], [7, 8, 9]])
print(n1)
print()
n2 = np.array([[9, 8, 7], [6, 5, 4], [3, 2, 1]])
print(n2)
print(n1.dot(n2))
print()
print(n2.dot(n1))
print()
# NumPy Save & Load
import numpy as np
n1 = np.array([10, 20, 30, 40, 50, 60])
np.save('my_numpy', n1)
n2 = np.load('my_numpy.npy')
print(n2)
print()
# Python Pandas one dimensional labels
import pandas as pd
s1 = pd.Series([1, 2, 3, 4, 5])
print(s1)
print()
print(type(s1))
print()
# changing index
import pandas as pd
s1 = pd.Series([1, 2, 3, 4, 5], index=["a", "b", "c", "d", "e"])
print(s1)
print()
# series objects from dictionary
import pandas as pd
s1 = pd.Series({'a': 10, 'b': 20, 'c': 30})
print(s1)
print()
# changing index
import pandas as pd
print(pd.Series(s1, index=['b', 'c', 'd', 'a']))
print()
# extracting individual elements
s1 = pd.Series([1, 2, 3, 4, 5, 6, 7, 8, 9])
print(s1[3])
print()
print(s1[:4])
print()
print(s1[-3:])
print()
# adding scalar value to the series elements
print(s1 + 5)
print()
# adding two series elements
s1 = pd.Series([1, 2, 3, 4, 5, 6, 7, 8, 9])
s2 = pd.Series([10, 20, 30, 40, 50, 60, 70, 80, 90])
print(s1 + s2)
print()
# Pandas data frame
import pandas as pd
df = pd.DataFrame({"Name": ['Bob', 'Sam', 'Anne'], "Marks": [76, 25, 92]})
print(df)
print()
print(type(df))
# data frame in-build function
iris = | pd.read_csv('Book1.csv') | pandas.read_csv |
"""A module to help perform analyses on various observatioanl studies.
This module was implemented following studies of M249, Book 1.
Dependencies:
- **scipy**
- **statsmodels**
- **pandas**
- **numpy**
"""
from __future__ import annotations as _annotations
import math as _math
from scipy import stats as _st
from statsmodels.stats import contingency_tables as _tables
import pandas as _pd
import numpy as _np
def riskratio(obs: _np.ndarray, alpha: float = 0.05) -> _pd.DataFrame:
"""Return the point and (1-alpha)% confidence interval estimates for
the relative risk.
Args:
alpha: Significance level for the confidence interval.
Returns:
Point and (1-alpha)% confidence interval estimates for\
the relative risk.
"""
z: float = _st.norm().ppf(1-alpha/2)
a = obs[0, 1]
n1: int = _np.sum(obs[0])
df = _pd.DataFrame(index=["riskratio", "stderr", "lower", "upper"])
# add the reference category results
df["Exposed1 (-)"] = [1.0, 0.0, "NA", "NA"]
# gather results from array
for i in range(1, obs.shape[0]):
# get exposure results
c = obs[i, 1]
n2: int = _np.sum(obs[i])
# calculate the risk ratio
rr: float = (c/n2) / (a/n1)
stderr: float = _math.sqrt((1/a - 1/n1) + (1/c - 1/n2))
ci: tuple[float, float] = (
rr * _math.exp(-z * stderr), rr * _math.exp(z * stderr)
)
# append to df
df[f"Exposed{i+1} (+)"] = [rr, stderr, ci[0], ci[1]]
return df.T
def oddsratio(obs: _np.ndarray, alpha: float = 0.05) -> _pd.DataFrame:
"""Return the point and (1-alpha)% confidence interval estimates for
the odds ratio.
Args:
alpha: Significance level for the confidence interval.
Returns:
Point and (1-alpha)% confidence interval estimates for\
the odds ratio.
"""
# gather results
z: float = _st.norm().ppf(1-alpha/2)
a: float = obs[0, 0]
b: float = obs[0, 1]
df = _pd.DataFrame(index=["oddsratio", "stderr", "lower", "upper"])
# add the reference category results
df["Exposed1 (-)"] = [1.0, 0.0, "NA", "NA"]
# gather results from array
for i in range(1, obs.shape[0]):
# get exposure results
c: float = obs[i, 0]
d: float = obs[i, 1]
# calculate the odds ratio
or_: float = (a * d) / (b * c)
stderr: float = _math.sqrt(1/a + 1/b + 1/c + 1/d)
ci: tuple[float, float] =(
or_ * _math.exp(-z * stderr), or_ * _math.exp(z * stderr)
)
# append to df
df[f"Exposed{i+1} (+)"] = [or_, stderr, ci[0], ci[1]]
return df.T
def expectedfreq(obs: _np.ndarray) -> _np.ndarray:
"""Return the expected frequencies from a contingency table under
the hypothesis of no association.
Returns:
Expected frequencies.
"""
return _st.contingency.expected_freq(obs)
def chisqcontribs(obs: _np.ndarray) -> _np.ndarray:
"""Return the chi-squared contributions for each observation used in a
chi-squared test of no association.
Returns:
chi-squared contributions.
"""
exp = expectedfreq(obs)
contribs = _np.divide(_np.square(obs-exp), exp)
return contribs
def chisqtest( obs: _np.ndarray) -> _pd.DataFrame:
"""Return the results of a chi-squared test of no association.
Returns:
Results of a chi-squared test of no association.
"""
res = _st.chi2_contingency(obs, correction=False)
df = _pd.DataFrame(index=["chisq", "pval", "df"])
df["result"] = [res[0], res[1], res[2]]
return df.T
def aggregate(obs) -> _np.ndarray:
"""Return an aggregated array.
"""
agg: _np.ndarray = _np.empty((2, 2))
for table in obs:
agg += table
return agg
def adjusted_oddsratio(obs: _np.ndarray, alpha: float = 0.05) -> _pd.DataFrame:
"""Return the point and (1-alpha)% confidence interval estimates for
the adjusted odds ratio.
It uses the Mantel-Haenszel odds ratio.
Args:
alpha: Significance level for the confidence interval.
Returns:
Point and (1-alpha)% confidence interval estimates for\
the adjusted odds ratio.
"""
strattable = _tables.StratifiedTable(obs.tolist())
est = strattable.oddsratio_pooled
stderr = strattable.logodds_pooled_se
ci = strattable.oddsratio_pooled_confint(alpha)
"""
elif isinstance(table, OneToOneMatched):
est = table.table[0][1] /table.table[1][0]
se = (1/table.table[0][1] + 1/table.table[1][0]) ** 0.5
zscore = _st.norm.ppf(1-alpha/2)
ci = (est * _exp(-zscore * se), est * _exp(zscore * se))
else:
"Not defined for table type."
"""
df = _pd.DataFrame(index=["oddsratio", "stderr", "lower", "upper"])
df["result"] = [est, stderr, ci[0], ci[1]]
return df.T
def crude_oddsratio(obs: _np.ndarray, alpha: float = 0.05) -> _pd.DataFrame:
"""Return the point and (1-alpha)% confidence interval estimates for
the crude odds ratio.
Args:
alpha: Significance level for the confidence interval.
Returns:
Point and (1-alpha)% confidence interval estimates for\
the crude odds ratio.
"""
return oddsratio(aggregate(obs), alpha)
def test_equalodds(obs: _np.ndarray) -> _pd.DataFrame:
"""Return the results test of the null hypothesis that the odds
ratio is the same in all _k_ strata.
This is the Tarone test.
Args:
adjust: If true, use the Tarone adjustment to achieve the chi^2\
asymptotic distribution.
Returns:
test statistic and the p-value.
"""
strattable = _tables.StratifiedTable(obs.tolist())
res = strattable.test_equal_odds(True)
df = | _pd.DataFrame(index=["chisq", "pval"]) | pandas.DataFrame |
import pandas as pd
import numpy as np
import nltk
from sklearn.metrics.pairwise import cosine_similarity
from sklearn.feature_extraction.text import CountVectorizer
from sklearn.feature_extraction.text import TfidfVectorizer
from nltk.tokenize import RegexpTokenizer
##### Read data for 5 sample positions ###########
req_ids = ["e3625ad", "39ee3f", "45de815"
,"40a2c38", "63146c6"]
# read raw text data for embeddings
job_text = pd.read_csv("data/cleaned_job.csv", index_col=0)
resume_text = pd.read_csv("data/cleaned_resume.csv", index_col=0)
# read structured
job_features = pd.read_csv("Resume-Parser-JOBS/data/output/job_description_summary.csv")
resume_features = pd.read_csv("data/resumes_5jobs.csv")
# keep only the relevant positions and candidates
job_text = job_text[job_text["Req ID"].isin(req_ids)]
resume_text = resume_text[resume_text["Req ID"].isin(req_ids)]
job_features = job_features[job_features.ReqID.isin(req_ids)]
resume_features = resume_features[resume_features.ReqID.isin(req_ids)]
##### one hot encode the structured features #####
### for jobs ###
# drop unused columns
drop_cols = ['GPA', 'courses', 'hobbies', 'email'
, 'phone','Education', 'Extracurriculars'
,'Language', 'Work', 'Summaries', 'Skill'
, 'Member', 'Writing', 'Researching'
, 'Honor', 'Activity']
job_features.drop(drop_cols, inplace=True, axis=1)
df = job_features
hot = df[['ReqID']]
#honor_societies
df.honor_societies.fillna('', inplace=True)
hot['HonorSociety'] = df.honor_societies.apply(lambda x: 1 if len(x) > 2 else 0)
#latin_honors
df.latin_honors.fillna('', inplace=True)
hot['LatinHonors'] = df.latin_honors.apply(lambda x: 1 if len(x) > 2 else 0)
#scholarships_awards
df.scholarships_awards.fillna('', inplace=True)
hot['ScholarshipsAward'] = df.scholarships_awards.apply(lambda x: 1 if len(x) > 2 else 0)
#schools
df.community_college.fillna('', inplace=True)
hot['CommCollege'] = df.community_college.apply(lambda x: 1 if len(x) > 2 else 0)
df.other_universities.fillna('', inplace=True)
hot['OtherUni'] = df.other_universities.apply(lambda x: 1 if len(x) > 2 else 0)
df.top_100_universities.fillna('', inplace=True)
hot['Top100Uni'] = df.top_100_universities.apply(lambda x: 1 if len(x) > 2 else 0)
df.top_10_universities.fillna('', inplace=True)
hot['Top10Uni'] = df.top_10_universities.apply(lambda x: 1 if len(x) > 2 else 0)
#degrees
df.associate_education_level.fillna('', inplace=True)
hot['Associates'] = df.associate_education_level.apply(lambda x: 1 if len(x) > 2 else 0)
df.bachelor_education_level.fillna('', inplace=True)
hot['Bachelors'] = df.bachelor_education_level.apply(lambda x: 1 if len(x) > 2 else 0)
df.master_education_level.fillna('', inplace=True)
hot['Masters'] = df.master_education_level.apply(lambda x: 1 if len(x) > 2 else 0)
df.doctor_education_level.fillna('', inplace=True)
hot['Doctors'] = df.doctor_education_level.apply(lambda x: 1 if len(x) > 2 else 0)
#companies
df.company_foodbev.fillna('', inplace=True)
hot['FoodBev'] = df.company_foodbev.apply(lambda x: 1 if len(x) > 2 else 0)
df.company_consumer.fillna('', inplace=True)
hot['Consumer'] = df.company_consumer.apply(lambda x: 1 if len(x) > 2 else 0)
df.company_energychem.fillna('', inplace=True)
hot['EnergyChem'] = df.company_energychem.apply(lambda x: 1 if len(x) > 2 else 0)
df.company_fin.fillna('', inplace=True)
hot['Fin'] = df.company_fin.apply(lambda x: 1 if len(x) > 2 else 0)
df.company_health.fillna('', inplace=True)
hot['HealthMed'] = df.company_health.apply(lambda x: 1 if len(x) > 2 else 0)
df.company_industrial.fillna('', inplace=True)
hot['Industrial'] = df.company_industrial.apply(lambda x: 1 if len(x) > 2 else 0)
df.company_tech.fillna('', inplace=True)
hot['Tech'] = df.company_tech.apply(lambda x: 1 if len(x) > 2 else 0)
df.company_services.fillna('', inplace=True)
hot['Services'] = df.company_services.apply(lambda x: 1 if len(x) > 2 else 0)
df.company_other.fillna('', inplace=True)
hot['OtherCo'] = df.company_other.apply(lambda x: 1 if len(x) > 2 else 0)
# ONE HOT ENCODING - EXPLODING COLUMNS
import yaml
with open('Resume-Parser-master-new/confs/config.yaml', 'r') as stream:
yaml_file = yaml.safe_load(stream)
#certifications
df.certifications.fillna('', inplace=True)
for item in yaml_file['case_agnostic_whole_resume']['certifications']:
if type(item) == list:
search_term = item[0].replace('\\x20','').replace(' ','')
col_name = item[1].replace('\\x20','').replace(' ','')
else:
search_term = item.replace('\\x20','').replace(' ','')
col_name = search_term
hot[col_name] = df.certifications.apply(lambda x: 1 if x.find(search_term) >= 0 else 0)
#soft_skills
df.soft_skills.fillna('', inplace=True)
for item in yaml_file['case_agnostic_whole_resume']['soft_skills']:
if type(item) == list:
search_term = item[0].replace('\\x20','').replace(' ','')
col_name = item[1].replace('\\x20','').replace(' ','')
else:
search_term = item.replace('\\x20','').replace(' ','')
col_name = search_term
hot[col_name] = df.soft_skills.apply(lambda x: 1 if x.find(search_term) >= 0 else 0)
#major_minor
df.major_minor.fillna('', inplace=True)
for item in yaml_file['case_agnostic_education']['major_minor']:
if type(item) == list:
search_term = item[0].replace('\\x20','').replace(' ','')
col_name = item[1].replace('\\x20','').replace(' ','')
else:
search_term = item.replace('\\x20','').replace(' ','')
col_name = search_term
hot[col_name] = df.major_minor.apply(lambda x: 1 if x.find(search_term) >= 0 else 0)
#languages
df.languages.fillna('', inplace=True)
for item in yaml_file['case_agnostic_languages']['languages']:
if type(item) == list:
search_term = item[0].replace('\\x20','').replace(' ','')
col_name = item[1].replace('\\x20','').replace(' ','')
else:
search_term = item.replace('\\x20','').replace(' ','')
col_name = search_term
hot[col_name] = df.languages.apply(lambda x: 1 if x.find(search_term) >= 0 else 0)
#technical_skills
df.technical_skills.fillna('', inplace=True)
for item in yaml_file['case_agnostic_skill']['technical_skills']:
if type(item) == list:
search_term = item[0].replace('\\x20','').replace(' ','')
col_name = item[1].replace('\\x20','').replace(' ','')
else:
search_term = item.replace('\\x20','').replace(' ','')
col_name = search_term
hot[col_name] = df.technical_skills.apply(lambda x: 1 if x.find(search_term) >= 0 else 0)
job_dummies = hot
### for resumes ###
# drop unused columns
drop_cols = ['GPA', 'courses', 'hobbies', 'email', 'phone'
,'Education', 'Extracurriculars','Language', 'Work'
, 'Summaries', 'Skill', 'Member', 'Writing', 'Researching'
, 'Honor', 'Activity']
resume_features.drop(drop_cols, inplace=True, axis=1)
df = resume_features
#ONE HOT ENCODING
hot = df[['ReqID', 'CanID']]
#honor_societies
df.honor_societies.fillna('', inplace=True)
hot['HonorSociety'] = df.honor_societies.apply(lambda x: 1 if len(x) > 2 else 0)
#latin_honors
df.latin_honors.fillna('', inplace=True)
hot['LatinHonors'] = df.latin_honors.apply(lambda x: 1 if len(x) > 2 else 0)
#scholarships_awards
df.scholarships_awards.fillna('', inplace=True)
hot['ScholarshipsAward'] = df.scholarships_awards.apply(lambda x: 1 if len(x) > 2 else 0)
#schools
df.community_college.fillna('', inplace=True)
hot['CommCollege'] = df.community_college.apply(lambda x: 1 if len(x) > 2 else 0)
df.other_universities.fillna('', inplace=True)
hot['OtherUni'] = df.other_universities.apply(lambda x: 1 if len(x) > 2 else 0)
df.top_100_universities.fillna('', inplace=True)
hot['Top100Uni'] = df.top_100_universities.apply(lambda x: 1 if len(x) > 2 else 0)
df.top_10_universities.fillna('', inplace=True)
hot['Top10Uni'] = df.top_10_universities.apply(lambda x: 1 if len(x) > 2 else 0)
#degrees
df.associate_education_level.fillna('', inplace=True)
hot['Associates'] = df.associate_education_level.apply(lambda x: 1 if len(x) > 2 else 0)
df.bachelor_education_level.fillna('', inplace=True)
hot['Bachelors'] = df.bachelor_education_level.apply(lambda x: 1 if len(x) > 2 else 0)
df.master_education_level.fillna('', inplace=True)
hot['Masters'] = df.master_education_level.apply(lambda x: 1 if len(x) > 2 else 0)
df.doctor_education_level.fillna('', inplace=True)
hot['Doctors'] = df.doctor_education_level.apply(lambda x: 1 if len(x) > 2 else 0)
#companies
df.company_foodbev.fillna('', inplace=True)
hot['FoodBev'] = df.company_foodbev.apply(lambda x: 1 if len(x) > 2 else 0)
df.company_consumer.fillna('', inplace=True)
hot['Consumer'] = df.company_consumer.apply(lambda x: 1 if len(x) > 2 else 0)
df.company_energychem.fillna('', inplace=True)
hot['EnergyChem'] = df.company_energychem.apply(lambda x: 1 if len(x) > 2 else 0)
df.company_fin.fillna('', inplace=True)
hot['Fin'] = df.company_fin.apply(lambda x: 1 if len(x) > 2 else 0)
df.company_health.fillna('', inplace=True)
hot['HealthMed'] = df.company_health.apply(lambda x: 1 if len(x) > 2 else 0)
df.company_industrial.fillna('', inplace=True)
hot['Industrial'] = df.company_industrial.apply(lambda x: 1 if len(x) > 2 else 0)
df.company_tech.fillna('', inplace=True)
hot['Tech'] = df.company_tech.apply(lambda x: 1 if len(x) > 2 else 0)
df.company_services.fillna('', inplace=True)
hot['Services'] = df.company_services.apply(lambda x: 1 if len(x) > 2 else 0)
df.company_other.fillna('', inplace=True)
hot['OtherCo'] = df.company_other.apply(lambda x: 1 if len(x) > 2 else 0)
#ONE HOT ENCODING - EXPLODING COLUMNS
with open('Resume-Parser-master-new/confs/config.yaml', 'r') as stream:
yaml_file = yaml.safe_load(stream)
#certifications
df.certifications.fillna('', inplace=True)
for item in yaml_file['case_agnostic_whole_resume']['certifications']:
if type(item) == list:
search_term = item[0].replace('\\x20','').replace(' ','')
col_name = item[1].replace('\\x20','').replace(' ','')
else:
search_term = item.replace('\\x20','').replace(' ','')
col_name = search_term
hot[col_name] = df.certifications.apply(lambda x: 1 if x.find(search_term) >= 0 else 0)
#soft_skills
df.soft_skills.fillna('', inplace=True)
for item in yaml_file['case_agnostic_whole_resume']['soft_skills']:
if type(item) == list:
search_term = item[0].replace('\\x20','').replace(' ','')
col_name = item[1].replace('\\x20','').replace(' ','')
else:
search_term = item.replace('\\x20','').replace(' ','')
col_name = search_term
hot[col_name] = df.soft_skills.apply(lambda x: 1 if x.find(search_term) >= 0 else 0)
#major_minor
df.major_minor.fillna('', inplace=True)
for item in yaml_file['case_agnostic_education']['major_minor']:
if type(item) == list:
search_term = item[0].replace('\\x20','').replace(' ','')
col_name = item[1].replace('\\x20','').replace(' ','')
else:
search_term = item.replace('\\x20','').replace(' ','')
col_name = search_term
hot[col_name] = df.major_minor.apply(lambda x: 1 if x.find(search_term) >= 0 else 0)
#languages
df.languages.fillna('', inplace=True)
for item in yaml_file['case_agnostic_languages']['languages']:
if type(item) == list:
search_term = item[0].replace('\\x20','').replace(' ','')
col_name = item[1].replace('\\x20','').replace(' ','')
else:
search_term = item.replace('\\x20','').replace(' ','')
col_name = search_term
hot[col_name] = df.languages.apply(lambda x: 1 if x.find(search_term) >= 0 else 0)
#technical_skills
df.technical_skills.fillna('', inplace=True)
for item in yaml_file['case_agnostic_skill']['technical_skills']:
if type(item) == list:
search_term = item[0].replace('\\x20','').replace(' ','')
col_name = item[1].replace('\\x20','').replace(' ','')
else:
search_term = item.replace('\\x20','').replace(' ','')
col_name = search_term
hot[col_name] = df.technical_skills.apply(lambda x: 1 if x.find(search_term) >= 0 else 0)
empty_cols = []
for i in hot.columns[2:]:
if sum(hot[i]) == 0:
empty_cols.append(i)
resume_dummies = hot
##### text embedding(Count)#########
### a function to repeat the process
def GenerateCountEmbedding(req_id, job_text_df, resume_text_df):
pos_jd_text = job_text[job_text["Req ID"]==req_id]
pos_resume_text = resume_text[resume_text["Req ID"]==req_id]
pos_jd_text.rename(columns = {'Req ID':'ID',
'Job Description':'text'}, inplace=True)
pos_jd_text.ID = req_id
pos_jd_text = pos_jd_text[['ID', 'text']]
pos_resume_text.rename(columns = {'Candidate ID':'ID',
'Resume Text':'text'}, inplace=True)
pos_resume_text = pos_resume_text[['ID', 'text']]
#append to same df
df = pos_jd_text.append(pos_resume_text)
df.set_index('ID', inplace=True)
# join words and vectorize
tokenizer = RegexpTokenizer(r'\w+')
df['text'] = df['text'].apply(lambda x: tokenizer.tokenize(x))
df['text'] = df['text'].apply(lambda x: ' '.join(x))
count = CountVectorizer()
pos_embedding = count.fit_transform(df['text'])
pos_embedding = pd.DataFrame(pos_embedding.toarray())
pos_embedding.insert(loc=0, column="ID", value=df.index)
return pos_embedding
### for position e3625ad
pos1_embedding = GenerateCountEmbedding("e3625ad", job_text
, resume_text)
### for position "39ee3f"
pos2_embedding = GenerateCountEmbedding("39ee3f", job_text
, resume_text)
### for position "45de815"
pos3_embedding = GenerateCountEmbedding("45de815", job_text
, resume_text)
### for position "40a2c38"
pos4_embedding = GenerateCountEmbedding("40a2c38", job_text
, resume_text)
### for position "63146c6"
pos5_embedding = GenerateCountEmbedding("63146c6", job_text
, resume_text)
##### embeddings TFIDF #####
def GenerateTfidfEmbedding(req_id, job_text_df, resume_text_df):
pos_jd_text = job_text[job_text["Req ID"]==req_id]
pos_resume_text = resume_text[resume_text["Req ID"]==req_id]
pos_jd_text.rename(columns = {'Req ID':'ID',
'Job Description':'text'}, inplace=True)
pos_jd_text.ID = req_id
pos_jd_text = pos_jd_text[['ID', 'text']]
pos_resume_text.rename(columns = {'Candidate ID':'ID',
'Resume Text':'text'}, inplace=True)
pos_resume_text = pos_resume_text[['ID', 'text']]
#append to same df
df = pos_jd_text.append(pos_resume_text)
df.set_index('ID', inplace=True)
# join words and vectorize
tokenizer = RegexpTokenizer(r'\w+')
df['text'] = df['text'].apply(lambda x: tokenizer.tokenize(x))
df['text'] = df['text'].apply(lambda x: ' '.join(x))
tfidf = TfidfVectorizer()
tfidf_embedding = tfidf.fit_transform(df['text'])
tfidf_embedding = pd.DataFrame(tfidf_embedding.toarray())
tfidf_embedding.insert(loc=0, column="ID", value=df.index)
return tfidf_embedding
### for position "e3625ad"
pos1_tfidf = GenerateTfidfEmbedding("e3625ad", job_text
, resume_text)
### for position "39ee3f"
pos2_tfidf = GenerateTfidfEmbedding("39ee3f", job_text
, resume_text)
### for position "45de815"
pos3_tfidf = GenerateTfidfEmbedding("45de815", job_text
, resume_text)
### for position "40a2c38"
pos4_tfidf = GenerateTfidfEmbedding("40a2c38", job_text
, resume_text)
### for position "63146c6"
pos5_tfidf = GenerateTfidfEmbedding("63146c6", job_text
, resume_text)
##### combining embedding with dummies ########
# list(set(list(resume_dummies.columns))-set(list(job_dummies.columns)))
#rename their index column
resume_dummies.rename(columns = {'CanID':'ID'}, inplace=True)
resume_dummies.drop(["ReqID"], inplace=True, axis=1)
job_dummies.rename(columns = {'ReqID':'ID'}, inplace=True)
all_dummies = pd.concat([resume_dummies, job_dummies])
### Combine with Count embedding ###
pos1_full_count = pd.DataFrame(pos1_embedding).merge(all_dummies
, how="left"
, on="ID")
pos1_full_count.drop_duplicates(subset="ID", inplace=True)
pos1_full_count = pos1_full_count.fillna(value=0)
pos2_full_count = pd.DataFrame(pos2_embedding).merge(all_dummies
, how="left"
, on="ID")
pos2_full_count.drop_duplicates(subset="ID", inplace=True)
pos2_full_count = pos2_full_count.fillna(value=0)
pos3_full_count = pd.DataFrame(pos3_embedding).merge(all_dummies
, how="left"
, on="ID")
pos3_full_count.drop_duplicates(subset="ID", inplace=True)
pos3_full_count = pos3_full_count.fillna(value=0)
pos4_full_count = pd.DataFrame(pos4_embedding).merge(all_dummies
, how="left"
, on="ID")
pos4_full_count.drop_duplicates(subset="ID", inplace=True)
pos4_full_count = pos4_full_count.fillna(value=0)
pos5_full_count = pd.DataFrame(pos5_embedding).merge(all_dummies
, how="left"
, on="ID")
pos5_full_count.drop_duplicates(subset="ID", inplace=True)
pos5_full_count = pos5_full_count.fillna(value=0)
### Combine with TFIDF embedding ###
pos1_full_tfidf = pos1_tfidf.merge(all_dummies
, how="left"
, on="ID")
pos1_full_tfidf.drop_duplicates(subset="ID", inplace=True)
pos1_full_tfidf = pos1_full_tfidf.fillna(value=0)
pos2_full_tfidf = pos2_tfidf.merge(all_dummies
, how="left"
, on="ID")
pos2_full_tfidf.drop_duplicates(subset="ID", inplace=True)
pos2_full_tfidf = pos2_full_tfidf.fillna(value=0)
pos3_full_tfidf = pos3_tfidf.merge(all_dummies
, how="left"
, on="ID")
pos3_full_tfidf.drop_duplicates(subset="ID", inplace=True)
pos3_full_tfidf = pos3_full_tfidf.fillna(value=0)
pos4_full_tfidf = pos4_tfidf.merge(all_dummies
, how="left"
, on="ID")
pos4_full_tfidf.drop_duplicates(subset="ID", inplace=True)
pos4_full_tfidf = pos4_full_tfidf.fillna(value=0)
pos5_full_tfidf = pos5_tfidf.merge(all_dummies
, how="left"
, on="ID")
pos5_full_tfidf.drop_duplicates(subset="ID", inplace=True)
pos5_full_tfidf = pos5_full_tfidf.fillna(value=0)
##### Run Cos Sim and rank the candidates #####
# define function for returning recommended resume ID's based on Job Description
def RecommendTopTen(jobID, full_df):
recommended_candidates = []
indices = | pd.Series(full_df["ID"]) | pandas.Series |
import datetime
from datetime import timedelta
from distutils.version import LooseVersion
from io import BytesIO
import os
import re
from warnings import catch_warnings, simplefilter
import numpy as np
import pytest
from pandas.compat import is_platform_little_endian, is_platform_windows
import pandas.util._test_decorators as td
from pandas.core.dtypes.common import is_categorical_dtype
import pandas as pd
from pandas import (
Categorical,
CategoricalIndex,
DataFrame,
DatetimeIndex,
Index,
Int64Index,
MultiIndex,
RangeIndex,
Series,
Timestamp,
bdate_range,
concat,
date_range,
isna,
timedelta_range,
)
from pandas.tests.io.pytables.common import (
_maybe_remove,
create_tempfile,
ensure_clean_path,
ensure_clean_store,
safe_close,
safe_remove,
tables,
)
import pandas.util.testing as tm
from pandas.io.pytables import (
ClosedFileError,
HDFStore,
PossibleDataLossError,
Term,
read_hdf,
)
from pandas.io import pytables as pytables # noqa: E402 isort:skip
from pandas.io.pytables import TableIterator # noqa: E402 isort:skip
_default_compressor = "blosc"
ignore_natural_naming_warning = pytest.mark.filterwarnings(
"ignore:object name:tables.exceptions.NaturalNameWarning"
)
@pytest.mark.single
class TestHDFStore:
def test_format_kwarg_in_constructor(self, setup_path):
# GH 13291
with ensure_clean_path(setup_path) as path:
with pytest.raises(ValueError):
HDFStore(path, format="table")
def test_context(self, setup_path):
path = create_tempfile(setup_path)
try:
with HDFStore(path) as tbl:
raise ValueError("blah")
except ValueError:
pass
finally:
safe_remove(path)
try:
with HDFStore(path) as tbl:
tbl["a"] = tm.makeDataFrame()
with HDFStore(path) as tbl:
assert len(tbl) == 1
assert type(tbl["a"]) == DataFrame
finally:
safe_remove(path)
def test_conv_read_write(self, setup_path):
path = create_tempfile(setup_path)
try:
def roundtrip(key, obj, **kwargs):
obj.to_hdf(path, key, **kwargs)
return read_hdf(path, key)
o = tm.makeTimeSeries()
tm.assert_series_equal(o, roundtrip("series", o))
o = tm.makeStringSeries()
tm.assert_series_equal(o, roundtrip("string_series", o))
o = tm.makeDataFrame()
tm.assert_frame_equal(o, roundtrip("frame", o))
# table
df = DataFrame(dict(A=range(5), B=range(5)))
df.to_hdf(path, "table", append=True)
result = read_hdf(path, "table", where=["index>2"])
tm.assert_frame_equal(df[df.index > 2], result)
finally:
safe_remove(path)
def test_long_strings(self, setup_path):
# GH6166
df = DataFrame(
{"a": tm.rands_array(100, size=10)}, index=tm.rands_array(100, size=10)
)
with ensure_clean_store(setup_path) as store:
store.append("df", df, data_columns=["a"])
result = store.select("df")
tm.assert_frame_equal(df, result)
def test_api(self, setup_path):
# GH4584
# API issue when to_hdf doesn't accept append AND format args
with ensure_clean_path(setup_path) as path:
df = tm.makeDataFrame()
df.iloc[:10].to_hdf(path, "df", append=True, format="table")
df.iloc[10:].to_hdf(path, "df", append=True, format="table")
tm.assert_frame_equal(read_hdf(path, "df"), df)
# append to False
df.iloc[:10].to_hdf(path, "df", append=False, format="table")
df.iloc[10:].to_hdf(path, "df", append=True, format="table")
tm.assert_frame_equal(read_hdf(path, "df"), df)
with ensure_clean_path(setup_path) as path:
df = tm.makeDataFrame()
df.iloc[:10].to_hdf(path, "df", append=True)
df.iloc[10:].to_hdf(path, "df", append=True, format="table")
tm.assert_frame_equal(read_hdf(path, "df"), df)
# append to False
df.iloc[:10].to_hdf(path, "df", append=False, format="table")
df.iloc[10:].to_hdf(path, "df", append=True)
tm.assert_frame_equal(read_hdf(path, "df"), df)
with ensure_clean_path(setup_path) as path:
df = tm.makeDataFrame()
df.to_hdf(path, "df", append=False, format="fixed")
tm.assert_frame_equal(read_hdf(path, "df"), df)
df.to_hdf(path, "df", append=False, format="f")
tm.assert_frame_equal(read_hdf(path, "df"), df)
df.to_hdf(path, "df", append=False)
tm.assert_frame_equal(read_hdf(path, "df"), df)
df.to_hdf(path, "df")
tm.assert_frame_equal(read_hdf(path, "df"), df)
with ensure_clean_store(setup_path) as store:
path = store._path
df = tm.makeDataFrame()
_maybe_remove(store, "df")
store.append("df", df.iloc[:10], append=True, format="table")
store.append("df", df.iloc[10:], append=True, format="table")
tm.assert_frame_equal(store.select("df"), df)
# append to False
_maybe_remove(store, "df")
store.append("df", df.iloc[:10], append=False, format="table")
store.append("df", df.iloc[10:], append=True, format="table")
tm.assert_frame_equal(store.select("df"), df)
# formats
_maybe_remove(store, "df")
store.append("df", df.iloc[:10], append=False, format="table")
store.append("df", df.iloc[10:], append=True, format="table")
tm.assert_frame_equal(store.select("df"), df)
_maybe_remove(store, "df")
store.append("df", df.iloc[:10], append=False, format="table")
store.append("df", df.iloc[10:], append=True, format=None)
tm.assert_frame_equal(store.select("df"), df)
with ensure_clean_path(setup_path) as path:
# Invalid.
df = tm.makeDataFrame()
with pytest.raises(ValueError):
df.to_hdf(path, "df", append=True, format="f")
with pytest.raises(ValueError):
df.to_hdf(path, "df", append=True, format="fixed")
with pytest.raises(TypeError):
df.to_hdf(path, "df", append=True, format="foo")
with pytest.raises(TypeError):
df.to_hdf(path, "df", append=False, format="bar")
# File path doesn't exist
path = ""
with pytest.raises(FileNotFoundError):
read_hdf(path, "df")
def test_api_default_format(self, setup_path):
# default_format option
with ensure_clean_store(setup_path) as store:
df = tm.makeDataFrame()
pd.set_option("io.hdf.default_format", "fixed")
_maybe_remove(store, "df")
store.put("df", df)
assert not store.get_storer("df").is_table
with pytest.raises(ValueError):
store.append("df2", df)
pd.set_option("io.hdf.default_format", "table")
_maybe_remove(store, "df")
store.put("df", df)
assert store.get_storer("df").is_table
_maybe_remove(store, "df2")
store.append("df2", df)
assert store.get_storer("df").is_table
pd.set_option("io.hdf.default_format", None)
with ensure_clean_path(setup_path) as path:
df = tm.makeDataFrame()
pd.set_option("io.hdf.default_format", "fixed")
df.to_hdf(path, "df")
with HDFStore(path) as store:
assert not store.get_storer("df").is_table
with pytest.raises(ValueError):
df.to_hdf(path, "df2", append=True)
pd.set_option("io.hdf.default_format", "table")
df.to_hdf(path, "df3")
with HDFStore(path) as store:
assert store.get_storer("df3").is_table
df.to_hdf(path, "df4", append=True)
with HDFStore(path) as store:
assert store.get_storer("df4").is_table
pd.set_option("io.hdf.default_format", None)
def test_keys(self, setup_path):
with ensure_clean_store(setup_path) as store:
store["a"] = tm.makeTimeSeries()
store["b"] = tm.makeStringSeries()
store["c"] = tm.makeDataFrame()
assert len(store) == 3
expected = {"/a", "/b", "/c"}
assert set(store.keys()) == expected
assert set(store) == expected
def test_keys_ignore_hdf_softlink(self, setup_path):
# GH 20523
# Puts a softlink into HDF file and rereads
with ensure_clean_store(setup_path) as store:
df = DataFrame(dict(A=range(5), B=range(5)))
store.put("df", df)
assert store.keys() == ["/df"]
store._handle.create_soft_link(store._handle.root, "symlink", "df")
# Should ignore the softlink
assert store.keys() == ["/df"]
def test_iter_empty(self, setup_path):
with ensure_clean_store(setup_path) as store:
# GH 12221
assert list(store) == []
def test_repr(self, setup_path):
with ensure_clean_store(setup_path) as store:
repr(store)
store.info()
store["a"] = tm.makeTimeSeries()
store["b"] = tm.makeStringSeries()
store["c"] = tm.makeDataFrame()
df = tm.makeDataFrame()
df["obj1"] = "foo"
df["obj2"] = "bar"
df["bool1"] = df["A"] > 0
df["bool2"] = df["B"] > 0
df["bool3"] = True
df["int1"] = 1
df["int2"] = 2
df["timestamp1"] = Timestamp("20010102")
df["timestamp2"] = Timestamp("20010103")
df["datetime1"] = datetime.datetime(2001, 1, 2, 0, 0)
df["datetime2"] = datetime.datetime(2001, 1, 3, 0, 0)
df.loc[3:6, ["obj1"]] = np.nan
df = df._consolidate()._convert(datetime=True)
with catch_warnings(record=True):
simplefilter("ignore", pd.errors.PerformanceWarning)
store["df"] = df
# make a random group in hdf space
store._handle.create_group(store._handle.root, "bah")
assert store.filename in repr(store)
assert store.filename in str(store)
store.info()
# storers
with ensure_clean_store(setup_path) as store:
df = tm.makeDataFrame()
store.append("df", df)
s = store.get_storer("df")
repr(s)
str(s)
@ignore_natural_naming_warning
def test_contains(self, setup_path):
with ensure_clean_store(setup_path) as store:
store["a"] = tm.makeTimeSeries()
store["b"] = tm.makeDataFrame()
store["foo/bar"] = tm.makeDataFrame()
assert "a" in store
assert "b" in store
assert "c" not in store
assert "foo/bar" in store
assert "/foo/bar" in store
assert "/foo/b" not in store
assert "bar" not in store
# gh-2694: tables.NaturalNameWarning
with catch_warnings(record=True):
store["node())"] = tm.makeDataFrame()
assert "node())" in store
def test_versioning(self, setup_path):
with ensure_clean_store(setup_path) as store:
store["a"] = tm.makeTimeSeries()
store["b"] = tm.makeDataFrame()
df = tm.makeTimeDataFrame()
_maybe_remove(store, "df1")
store.append("df1", df[:10])
store.append("df1", df[10:])
assert store.root.a._v_attrs.pandas_version == "0.15.2"
assert store.root.b._v_attrs.pandas_version == "0.15.2"
assert store.root.df1._v_attrs.pandas_version == "0.15.2"
# write a file and wipe its versioning
_maybe_remove(store, "df2")
store.append("df2", df)
# this is an error because its table_type is appendable, but no
# version info
store.get_node("df2")._v_attrs.pandas_version = None
with pytest.raises(Exception):
store.select("df2")
def test_mode(self, setup_path):
df = tm.makeTimeDataFrame()
def check(mode):
with ensure_clean_path(setup_path) as path:
# constructor
if mode in ["r", "r+"]:
with pytest.raises(IOError):
HDFStore(path, mode=mode)
else:
store = HDFStore(path, mode=mode)
assert store._handle.mode == mode
store.close()
with ensure_clean_path(setup_path) as path:
# context
if mode in ["r", "r+"]:
with pytest.raises(IOError):
with HDFStore(path, mode=mode) as store: # noqa
pass
else:
with HDFStore(path, mode=mode) as store:
assert store._handle.mode == mode
with ensure_clean_path(setup_path) as path:
# conv write
if mode in ["r", "r+"]:
with pytest.raises(IOError):
df.to_hdf(path, "df", mode=mode)
df.to_hdf(path, "df", mode="w")
else:
df.to_hdf(path, "df", mode=mode)
# conv read
if mode in ["w"]:
with pytest.raises(ValueError):
read_hdf(path, "df", mode=mode)
else:
result = read_hdf(path, "df", mode=mode)
tm.assert_frame_equal(result, df)
def check_default_mode():
# read_hdf uses default mode
with ensure_clean_path(setup_path) as path:
df.to_hdf(path, "df", mode="w")
result = read_hdf(path, "df")
tm.assert_frame_equal(result, df)
check("r")
check("r+")
check("a")
check("w")
check_default_mode()
def test_reopen_handle(self, setup_path):
with ensure_clean_path(setup_path) as path:
store = HDFStore(path, mode="a")
store["a"] = tm.makeTimeSeries()
# invalid mode change
with pytest.raises(PossibleDataLossError):
store.open("w")
store.close()
assert not store.is_open
# truncation ok here
store.open("w")
assert store.is_open
assert len(store) == 0
store.close()
assert not store.is_open
store = HDFStore(path, mode="a")
store["a"] = tm.makeTimeSeries()
# reopen as read
store.open("r")
assert store.is_open
assert len(store) == 1
assert store._mode == "r"
store.close()
assert not store.is_open
# reopen as append
store.open("a")
assert store.is_open
assert len(store) == 1
assert store._mode == "a"
store.close()
assert not store.is_open
# reopen as append (again)
store.open("a")
assert store.is_open
assert len(store) == 1
assert store._mode == "a"
store.close()
assert not store.is_open
def test_open_args(self, setup_path):
with ensure_clean_path(setup_path) as path:
df = tm.makeDataFrame()
# create an in memory store
store = HDFStore(
path, mode="a", driver="H5FD_CORE", driver_core_backing_store=0
)
store["df"] = df
store.append("df2", df)
tm.assert_frame_equal(store["df"], df)
tm.assert_frame_equal(store["df2"], df)
store.close()
# the file should not have actually been written
assert not os.path.exists(path)
def test_flush(self, setup_path):
with ensure_clean_store(setup_path) as store:
store["a"] = tm.makeTimeSeries()
store.flush()
store.flush(fsync=True)
def test_get(self, setup_path):
with ensure_clean_store(setup_path) as store:
store["a"] = tm.makeTimeSeries()
left = store.get("a")
right = store["a"]
tm.assert_series_equal(left, right)
left = store.get("/a")
right = store["/a"]
tm.assert_series_equal(left, right)
with pytest.raises(KeyError, match="'No object named b in the file'"):
store.get("b")
@pytest.mark.parametrize(
"where, expected",
[
(
"/",
{
"": ({"first_group", "second_group"}, set()),
"/first_group": (set(), {"df1", "df2"}),
"/second_group": ({"third_group"}, {"df3", "s1"}),
"/second_group/third_group": (set(), {"df4"}),
},
),
(
"/second_group",
{
"/second_group": ({"third_group"}, {"df3", "s1"}),
"/second_group/third_group": (set(), {"df4"}),
},
),
],
)
def test_walk(self, where, expected, setup_path):
# GH10143
objs = {
"df1": pd.DataFrame([1, 2, 3]),
"df2": pd.DataFrame([4, 5, 6]),
"df3": pd.DataFrame([6, 7, 8]),
"df4": pd.DataFrame([9, 10, 11]),
"s1": pd.Series([10, 9, 8]),
# Next 3 items aren't pandas objects and should be ignored
"a1": np.array([[1, 2, 3], [4, 5, 6]]),
"tb1": np.array([(1, 2, 3), (4, 5, 6)], dtype="i,i,i"),
"tb2": np.array([(7, 8, 9), (10, 11, 12)], dtype="i,i,i"),
}
with ensure_clean_store("walk_groups.hdf", mode="w") as store:
store.put("/first_group/df1", objs["df1"])
store.put("/first_group/df2", objs["df2"])
store.put("/second_group/df3", objs["df3"])
store.put("/second_group/s1", objs["s1"])
store.put("/second_group/third_group/df4", objs["df4"])
# Create non-pandas objects
store._handle.create_array("/first_group", "a1", objs["a1"])
store._handle.create_table("/first_group", "tb1", obj=objs["tb1"])
store._handle.create_table("/second_group", "tb2", obj=objs["tb2"])
assert len(list(store.walk(where=where))) == len(expected)
for path, groups, leaves in store.walk(where=where):
assert path in expected
expected_groups, expected_frames = expected[path]
assert expected_groups == set(groups)
assert expected_frames == set(leaves)
for leaf in leaves:
frame_path = "/".join([path, leaf])
obj = store.get(frame_path)
if "df" in leaf:
tm.assert_frame_equal(obj, objs[leaf])
else:
tm.assert_series_equal(obj, objs[leaf])
def test_getattr(self, setup_path):
with ensure_clean_store(setup_path) as store:
s = tm.makeTimeSeries()
store["a"] = s
# test attribute access
result = store.a
tm.assert_series_equal(result, s)
result = getattr(store, "a")
tm.assert_series_equal(result, s)
df = tm.makeTimeDataFrame()
store["df"] = df
result = store.df
tm.assert_frame_equal(result, df)
# errors
for x in ["d", "mode", "path", "handle", "complib"]:
with pytest.raises(AttributeError):
getattr(store, x)
# not stores
for x in ["mode", "path", "handle", "complib"]:
getattr(store, "_{x}".format(x=x))
def test_put(self, setup_path):
with ensure_clean_store(setup_path) as store:
ts = tm.makeTimeSeries()
df = tm.makeTimeDataFrame()
store["a"] = ts
store["b"] = df[:10]
store["foo/bar/bah"] = df[:10]
store["foo"] = df[:10]
store["/foo"] = df[:10]
store.put("c", df[:10], format="table")
# not OK, not a table
with pytest.raises(ValueError):
store.put("b", df[10:], append=True)
# node does not currently exist, test _is_table_type returns False
# in this case
_maybe_remove(store, "f")
with pytest.raises(ValueError):
store.put("f", df[10:], append=True)
# can't put to a table (use append instead)
with pytest.raises(ValueError):
store.put("c", df[10:], append=True)
# overwrite table
store.put("c", df[:10], format="table", append=False)
tm.assert_frame_equal(df[:10], store["c"])
def test_put_string_index(self, setup_path):
with ensure_clean_store(setup_path) as store:
index = Index(
["I am a very long string index: {i}".format(i=i) for i in range(20)]
)
s = Series(np.arange(20), index=index)
df = DataFrame({"A": s, "B": s})
store["a"] = s
tm.assert_series_equal(store["a"], s)
store["b"] = df
tm.assert_frame_equal(store["b"], df)
# mixed length
index = Index(
["abcdefghijklmnopqrstuvwxyz1234567890"]
+ ["I am a very long string index: {i}".format(i=i) for i in range(20)]
)
s = Series(np.arange(21), index=index)
df = DataFrame({"A": s, "B": s})
store["a"] = s
tm.assert_series_equal(store["a"], s)
store["b"] = df
tm.assert_frame_equal(store["b"], df)
def test_put_compression(self, setup_path):
with ensure_clean_store(setup_path) as store:
df = tm.makeTimeDataFrame()
store.put("c", df, format="table", complib="zlib")
tm.assert_frame_equal(store["c"], df)
# can't compress if format='fixed'
with pytest.raises(ValueError):
store.put("b", df, format="fixed", complib="zlib")
@td.skip_if_windows_python_3
def test_put_compression_blosc(self, setup_path):
df = tm.makeTimeDataFrame()
with ensure_clean_store(setup_path) as store:
# can't compress if format='fixed'
with pytest.raises(ValueError):
store.put("b", df, format="fixed", complib="blosc")
store.put("c", df, format="table", complib="blosc")
tm.assert_frame_equal(store["c"], df)
def test_complibs_default_settings(self, setup_path):
# GH15943
df = tm.makeDataFrame()
# Set complevel and check if complib is automatically set to
# default value
with ensure_clean_path(setup_path) as tmpfile:
df.to_hdf(tmpfile, "df", complevel=9)
result = pd.read_hdf(tmpfile, "df")
tm.assert_frame_equal(result, df)
with tables.open_file(tmpfile, mode="r") as h5file:
for node in h5file.walk_nodes(where="/df", classname="Leaf"):
assert node.filters.complevel == 9
assert node.filters.complib == "zlib"
# Set complib and check to see if compression is disabled
with ensure_clean_path(setup_path) as tmpfile:
df.to_hdf(tmpfile, "df", complib="zlib")
result = pd.read_hdf(tmpfile, "df")
tm.assert_frame_equal(result, df)
with tables.open_file(tmpfile, mode="r") as h5file:
for node in h5file.walk_nodes(where="/df", classname="Leaf"):
assert node.filters.complevel == 0
assert node.filters.complib is None
# Check if not setting complib or complevel results in no compression
with ensure_clean_path(setup_path) as tmpfile:
df.to_hdf(tmpfile, "df")
result = pd.read_hdf(tmpfile, "df")
tm.assert_frame_equal(result, df)
with tables.open_file(tmpfile, mode="r") as h5file:
for node in h5file.walk_nodes(where="/df", classname="Leaf"):
assert node.filters.complevel == 0
assert node.filters.complib is None
# Check if file-defaults can be overridden on a per table basis
with ensure_clean_path(setup_path) as tmpfile:
store = pd.HDFStore(tmpfile)
store.append("dfc", df, complevel=9, complib="blosc")
store.append("df", df)
store.close()
with tables.open_file(tmpfile, mode="r") as h5file:
for node in h5file.walk_nodes(where="/df", classname="Leaf"):
assert node.filters.complevel == 0
assert node.filters.complib is None
for node in h5file.walk_nodes(where="/dfc", classname="Leaf"):
assert node.filters.complevel == 9
assert node.filters.complib == "blosc"
def test_complibs(self, setup_path):
# GH14478
df = tm.makeDataFrame()
# Building list of all complibs and complevels tuples
all_complibs = tables.filters.all_complibs
# Remove lzo if its not available on this platform
if not tables.which_lib_version("lzo"):
all_complibs.remove("lzo")
# Remove bzip2 if its not available on this platform
if not tables.which_lib_version("bzip2"):
all_complibs.remove("bzip2")
all_levels = range(0, 10)
all_tests = [(lib, lvl) for lib in all_complibs for lvl in all_levels]
for (lib, lvl) in all_tests:
with ensure_clean_path(setup_path) as tmpfile:
gname = "foo"
# Write and read file to see if data is consistent
df.to_hdf(tmpfile, gname, complib=lib, complevel=lvl)
result = pd.read_hdf(tmpfile, gname)
tm.assert_frame_equal(result, df)
# Open file and check metadata
# for correct amount of compression
h5table = tables.open_file(tmpfile, mode="r")
for node in h5table.walk_nodes(where="/" + gname, classname="Leaf"):
assert node.filters.complevel == lvl
if lvl == 0:
assert node.filters.complib is None
else:
assert node.filters.complib == lib
h5table.close()
def test_put_integer(self, setup_path):
# non-date, non-string index
df = DataFrame(np.random.randn(50, 100))
self._check_roundtrip(df, tm.assert_frame_equal, setup_path)
@td.xfail_non_writeable
def test_put_mixed_type(self, setup_path):
df = tm.makeTimeDataFrame()
df["obj1"] = "foo"
df["obj2"] = "bar"
df["bool1"] = df["A"] > 0
df["bool2"] = df["B"] > 0
df["bool3"] = True
df["int1"] = 1
df["int2"] = 2
df["timestamp1"] = Timestamp("20010102")
df["timestamp2"] = Timestamp("20010103")
df["datetime1"] = datetime.datetime(2001, 1, 2, 0, 0)
df["datetime2"] = datetime.datetime(2001, 1, 3, 0, 0)
df.loc[3:6, ["obj1"]] = np.nan
df = df._consolidate()._convert(datetime=True)
with ensure_clean_store(setup_path) as store:
_maybe_remove(store, "df")
# PerformanceWarning
with catch_warnings(record=True):
simplefilter("ignore", pd.errors.PerformanceWarning)
store.put("df", df)
expected = store.get("df")
tm.assert_frame_equal(expected, df)
@pytest.mark.filterwarnings(
"ignore:object name:tables.exceptions.NaturalNameWarning"
)
def test_append(self, setup_path):
with ensure_clean_store(setup_path) as store:
# this is allowed by almost always don't want to do it
# tables.NaturalNameWarning):
with catch_warnings(record=True):
df = tm.makeTimeDataFrame()
_maybe_remove(store, "df1")
store.append("df1", df[:10])
store.append("df1", df[10:])
tm.assert_frame_equal(store["df1"], df)
_maybe_remove(store, "df2")
store.put("df2", df[:10], format="table")
store.append("df2", df[10:])
tm.assert_frame_equal(store["df2"], df)
_maybe_remove(store, "df3")
store.append("/df3", df[:10])
store.append("/df3", df[10:])
tm.assert_frame_equal(store["df3"], df)
# this is allowed by almost always don't want to do it
# tables.NaturalNameWarning
_maybe_remove(store, "/df3 foo")
store.append("/df3 foo", df[:10])
store.append("/df3 foo", df[10:])
tm.assert_frame_equal(store["df3 foo"], df)
# dtype issues - mizxed type in a single object column
df = DataFrame(data=[[1, 2], [0, 1], [1, 2], [0, 0]])
df["mixed_column"] = "testing"
df.loc[2, "mixed_column"] = np.nan
_maybe_remove(store, "df")
store.append("df", df)
tm.assert_frame_equal(store["df"], df)
# uints - test storage of uints
uint_data = DataFrame(
{
"u08": Series(
np.random.randint(0, high=255, size=5), dtype=np.uint8
),
"u16": Series(
np.random.randint(0, high=65535, size=5), dtype=np.uint16
),
"u32": Series(
np.random.randint(0, high=2 ** 30, size=5), dtype=np.uint32
),
"u64": Series(
[2 ** 58, 2 ** 59, 2 ** 60, 2 ** 61, 2 ** 62],
dtype=np.uint64,
),
},
index=np.arange(5),
)
_maybe_remove(store, "uints")
store.append("uints", uint_data)
tm.assert_frame_equal(store["uints"], uint_data)
# uints - test storage of uints in indexable columns
_maybe_remove(store, "uints")
# 64-bit indices not yet supported
store.append("uints", uint_data, data_columns=["u08", "u16", "u32"])
tm.assert_frame_equal(store["uints"], uint_data)
def test_append_series(self, setup_path):
with ensure_clean_store(setup_path) as store:
# basic
ss = tm.makeStringSeries()
ts = tm.makeTimeSeries()
ns = Series(np.arange(100))
store.append("ss", ss)
result = store["ss"]
tm.assert_series_equal(result, ss)
assert result.name is None
store.append("ts", ts)
result = store["ts"]
tm.assert_series_equal(result, ts)
assert result.name is None
ns.name = "foo"
store.append("ns", ns)
result = store["ns"]
tm.assert_series_equal(result, ns)
assert result.name == ns.name
# select on the values
expected = ns[ns > 60]
result = store.select("ns", "foo>60")
tm.assert_series_equal(result, expected)
# select on the index and values
expected = ns[(ns > 70) & (ns.index < 90)]
result = store.select("ns", "foo>70 and index<90")
tm.assert_series_equal(result, expected)
# multi-index
mi = DataFrame(np.random.randn(5, 1), columns=["A"])
mi["B"] = np.arange(len(mi))
mi["C"] = "foo"
mi.loc[3:5, "C"] = "bar"
mi.set_index(["C", "B"], inplace=True)
s = mi.stack()
s.index = s.index.droplevel(2)
store.append("mi", s)
tm.assert_series_equal(store["mi"], s)
def test_store_index_types(self, setup_path):
# GH5386
# test storing various index types
with ensure_clean_store(setup_path) as store:
def check(format, index):
df = DataFrame(np.random.randn(10, 2), columns=list("AB"))
df.index = index(len(df))
_maybe_remove(store, "df")
store.put("df", df, format=format)
tm.assert_frame_equal(df, store["df"])
for index in [
tm.makeFloatIndex,
tm.makeStringIndex,
tm.makeIntIndex,
tm.makeDateIndex,
]:
check("table", index)
check("fixed", index)
# period index currently broken for table
# seee GH7796 FIXME
check("fixed", tm.makePeriodIndex)
# check('table',tm.makePeriodIndex)
# unicode
index = tm.makeUnicodeIndex
check("table", index)
check("fixed", index)
@pytest.mark.skipif(
not is_platform_little_endian(), reason="reason platform is not little endian"
)
def test_encoding(self, setup_path):
with ensure_clean_store(setup_path) as store:
df = DataFrame(dict(A="foo", B="bar"), index=range(5))
df.loc[2, "A"] = np.nan
df.loc[3, "B"] = np.nan
_maybe_remove(store, "df")
store.append("df", df, encoding="ascii")
tm.assert_frame_equal(store["df"], df)
expected = df.reindex(columns=["A"])
result = store.select("df", Term("columns=A", encoding="ascii"))
tm.assert_frame_equal(result, expected)
@pytest.mark.parametrize(
"val",
[
[b"E\xc9, 17", b"", b"a", b"b", b"c"],
[b"E\xc9, 17", b"a", b"b", b"c"],
[b"EE, 17", b"", b"a", b"b", b"c"],
[b"E\xc9, 17", b"\xf8\xfc", b"a", b"b", b"c"],
[b"", b"a", b"b", b"c"],
[b"\xf8\xfc", b"a", b"b", b"c"],
[b"A\xf8\xfc", b"", b"a", b"b", b"c"],
[np.nan, b"", b"b", b"c"],
[b"A\xf8\xfc", np.nan, b"", b"b", b"c"],
],
)
@pytest.mark.parametrize("dtype", ["category", object])
def test_latin_encoding(self, setup_path, dtype, val):
enc = "latin-1"
nan_rep = ""
key = "data"
val = [x.decode(enc) if isinstance(x, bytes) else x for x in val]
ser = pd.Series(val, dtype=dtype)
with ensure_clean_path(setup_path) as store:
ser.to_hdf(store, key, format="table", encoding=enc, nan_rep=nan_rep)
retr = read_hdf(store, key)
s_nan = ser.replace(nan_rep, np.nan)
if is_categorical_dtype(s_nan):
assert is_categorical_dtype(retr)
tm.assert_series_equal(
s_nan, retr, check_dtype=False, check_categorical=False
)
else:
tm.assert_series_equal(s_nan, retr)
# FIXME: don't leave commented-out
# fails:
# for x in examples:
# roundtrip(s, nan_rep=b'\xf8\xfc')
def test_append_some_nans(self, setup_path):
with ensure_clean_store(setup_path) as store:
df = DataFrame(
{
"A": Series(np.random.randn(20)).astype("int32"),
"A1": np.random.randn(20),
"A2": np.random.randn(20),
"B": "foo",
"C": "bar",
"D": Timestamp("20010101"),
"E": datetime.datetime(2001, 1, 2, 0, 0),
},
index=np.arange(20),
)
# some nans
_maybe_remove(store, "df1")
df.loc[0:15, ["A1", "B", "D", "E"]] = np.nan
store.append("df1", df[:10])
store.append("df1", df[10:])
tm.assert_frame_equal(store["df1"], df)
# first column
df1 = df.copy()
df1.loc[:, "A1"] = np.nan
_maybe_remove(store, "df1")
store.append("df1", df1[:10])
store.append("df1", df1[10:])
tm.assert_frame_equal(store["df1"], df1)
# 2nd column
df2 = df.copy()
df2.loc[:, "A2"] = np.nan
_maybe_remove(store, "df2")
store.append("df2", df2[:10])
store.append("df2", df2[10:])
tm.assert_frame_equal(store["df2"], df2)
# datetimes
df3 = df.copy()
df3.loc[:, "E"] = np.nan
_maybe_remove(store, "df3")
store.append("df3", df3[:10])
store.append("df3", df3[10:])
tm.assert_frame_equal(store["df3"], df3)
def test_append_all_nans(self, setup_path):
with ensure_clean_store(setup_path) as store:
df = DataFrame(
{"A1": np.random.randn(20), "A2": np.random.randn(20)},
index=np.arange(20),
)
df.loc[0:15, :] = np.nan
# nan some entire rows (dropna=True)
_maybe_remove(store, "df")
store.append("df", df[:10], dropna=True)
store.append("df", df[10:], dropna=True)
tm.assert_frame_equal(store["df"], df[-4:])
# nan some entire rows (dropna=False)
_maybe_remove(store, "df2")
store.append("df2", df[:10], dropna=False)
store.append("df2", df[10:], dropna=False)
tm.assert_frame_equal(store["df2"], df)
# tests the option io.hdf.dropna_table
pd.set_option("io.hdf.dropna_table", False)
_maybe_remove(store, "df3")
store.append("df3", df[:10])
store.append("df3", df[10:])
tm.assert_frame_equal(store["df3"], df)
pd.set_option("io.hdf.dropna_table", True)
_maybe_remove(store, "df4")
store.append("df4", df[:10])
store.append("df4", df[10:])
tm.assert_frame_equal(store["df4"], df[-4:])
# nan some entire rows (string are still written!)
df = DataFrame(
{
"A1": np.random.randn(20),
"A2": np.random.randn(20),
"B": "foo",
"C": "bar",
},
index=np.arange(20),
)
df.loc[0:15, :] = np.nan
_maybe_remove(store, "df")
store.append("df", df[:10], dropna=True)
store.append("df", df[10:], dropna=True)
tm.assert_frame_equal(store["df"], df)
_maybe_remove(store, "df2")
store.append("df2", df[:10], dropna=False)
store.append("df2", df[10:], dropna=False)
tm.assert_frame_equal(store["df2"], df)
# nan some entire rows (but since we have dates they are still
# written!)
df = DataFrame(
{
"A1": np.random.randn(20),
"A2": np.random.randn(20),
"B": "foo",
"C": "bar",
"D": Timestamp("20010101"),
"E": datetime.datetime(2001, 1, 2, 0, 0),
},
index=np.arange(20),
)
df.loc[0:15, :] = np.nan
_maybe_remove(store, "df")
store.append("df", df[:10], dropna=True)
store.append("df", df[10:], dropna=True)
tm.assert_frame_equal(store["df"], df)
_maybe_remove(store, "df2")
store.append("df2", df[:10], dropna=False)
store.append("df2", df[10:], dropna=False)
tm.assert_frame_equal(store["df2"], df)
# Test to make sure defaults are to not drop.
# Corresponding to Issue 9382
df_with_missing = DataFrame(
{"col1": [0, np.nan, 2], "col2": [1, np.nan, np.nan]}
)
with ensure_clean_path(setup_path) as path:
df_with_missing.to_hdf(path, "df_with_missing", format="table")
reloaded = read_hdf(path, "df_with_missing")
tm.assert_frame_equal(df_with_missing, reloaded)
def test_read_missing_key_close_store(self, setup_path):
# GH 25766
with ensure_clean_path(setup_path) as path:
df = pd.DataFrame({"a": range(2), "b": range(2)})
df.to_hdf(path, "k1")
with pytest.raises(KeyError, match="'No object named k2 in the file'"):
pd.read_hdf(path, "k2")
# smoke test to test that file is properly closed after
# read with KeyError before another write
df.to_hdf(path, "k2")
def test_read_missing_key_opened_store(self, setup_path):
# GH 28699
with ensure_clean_path(setup_path) as path:
df = pd.DataFrame({"a": range(2), "b": range(2)})
df.to_hdf(path, "k1")
store = pd.HDFStore(path, "r")
with pytest.raises(KeyError, match="'No object named k2 in the file'"):
pd.read_hdf(store, "k2")
# Test that the file is still open after a KeyError and that we can
# still read from it.
pd.read_hdf(store, "k1")
def test_append_frame_column_oriented(self, setup_path):
with ensure_clean_store(setup_path) as store:
# column oriented
df = tm.makeTimeDataFrame()
_maybe_remove(store, "df1")
store.append("df1", df.iloc[:, :2], axes=["columns"])
store.append("df1", df.iloc[:, 2:])
tm.assert_frame_equal(store["df1"], df)
result = store.select("df1", "columns=A")
expected = df.reindex(columns=["A"])
tm.assert_frame_equal(expected, result)
# selection on the non-indexable
result = store.select("df1", ("columns=A", "index=df.index[0:4]"))
expected = df.reindex(columns=["A"], index=df.index[0:4])
tm.assert_frame_equal(expected, result)
# this isn't supported
with pytest.raises(TypeError):
store.select("df1", "columns=A and index>df.index[4]")
def test_append_with_different_block_ordering(self, setup_path):
# GH 4096; using same frames, but different block orderings
with ensure_clean_store(setup_path) as store:
for i in range(10):
df = DataFrame(np.random.randn(10, 2), columns=list("AB"))
df["index"] = range(10)
df["index"] += i * 10
df["int64"] = Series([1] * len(df), dtype="int64")
df["int16"] = Series([1] * len(df), dtype="int16")
if i % 2 == 0:
del df["int64"]
df["int64"] = Series([1] * len(df), dtype="int64")
if i % 3 == 0:
a = df.pop("A")
df["A"] = a
df.set_index("index", inplace=True)
store.append("df", df)
# test a different ordering but with more fields (like invalid
# combinate)
with ensure_clean_store(setup_path) as store:
df = DataFrame(np.random.randn(10, 2), columns=list("AB"), dtype="float64")
df["int64"] = Series([1] * len(df), dtype="int64")
df["int16"] = Series([1] * len(df), dtype="int16")
store.append("df", df)
# store additional fields in different blocks
df["int16_2"] = Series([1] * len(df), dtype="int16")
with pytest.raises(ValueError):
store.append("df", df)
# store multile additional fields in different blocks
df["float_3"] = Series([1.0] * len(df), dtype="float64")
with pytest.raises(ValueError):
store.append("df", df)
def test_append_with_strings(self, setup_path):
with ensure_clean_store(setup_path) as store:
with catch_warnings(record=True):
def check_col(key, name, size):
assert (
getattr(store.get_storer(key).table.description, name).itemsize
== size
)
# avoid truncation on elements
df = DataFrame([[123, "asdqwerty"], [345, "dggnhebbsdfbdfb"]])
store.append("df_big", df)
tm.assert_frame_equal(store.select("df_big"), df)
check_col("df_big", "values_block_1", 15)
# appending smaller string ok
df2 = DataFrame([[124, "asdqy"], [346, "dggnhefbdfb"]])
store.append("df_big", df2)
expected = concat([df, df2])
tm.assert_frame_equal(store.select("df_big"), expected)
check_col("df_big", "values_block_1", 15)
# avoid truncation on elements
df = DataFrame([[123, "asdqwerty"], [345, "dggnhebbsdfbdfb"]])
store.append("df_big2", df, min_itemsize={"values": 50})
tm.assert_frame_equal(store.select("df_big2"), df)
check_col("df_big2", "values_block_1", 50)
# bigger string on next append
store.append("df_new", df)
df_new = DataFrame(
[[124, "abcdefqhij"], [346, "abcdefghijklmnopqrtsuvwxyz"]]
)
with pytest.raises(ValueError):
store.append("df_new", df_new)
# min_itemsize on Series index (GH 11412)
df = tm.makeMixedDataFrame().set_index("C")
store.append("ss", df["B"], min_itemsize={"index": 4})
tm.assert_series_equal(store.select("ss"), df["B"])
# same as above, with data_columns=True
store.append(
"ss2", df["B"], data_columns=True, min_itemsize={"index": 4}
)
tm.assert_series_equal(store.select("ss2"), df["B"])
# min_itemsize in index without appending (GH 10381)
store.put("ss3", df, format="table", min_itemsize={"index": 6})
# just make sure there is a longer string:
df2 = df.copy().reset_index().assign(C="longer").set_index("C")
store.append("ss3", df2)
tm.assert_frame_equal(store.select("ss3"), pd.concat([df, df2]))
# same as above, with a Series
store.put("ss4", df["B"], format="table", min_itemsize={"index": 6})
store.append("ss4", df2["B"])
tm.assert_series_equal(
store.select("ss4"), pd.concat([df["B"], df2["B"]])
)
# with nans
_maybe_remove(store, "df")
df = tm.makeTimeDataFrame()
df["string"] = "foo"
df.loc[1:4, "string"] = np.nan
df["string2"] = "bar"
df.loc[4:8, "string2"] = np.nan
df["string3"] = "bah"
df.loc[1:, "string3"] = np.nan
store.append("df", df)
result = store.select("df")
tm.assert_frame_equal(result, df)
with ensure_clean_store(setup_path) as store:
def check_col(key, name, size):
assert getattr(
store.get_storer(key).table.description, name
).itemsize, size
df = DataFrame(dict(A="foo", B="bar"), index=range(10))
# a min_itemsize that creates a data_column
_maybe_remove(store, "df")
store.append("df", df, min_itemsize={"A": 200})
check_col("df", "A", 200)
assert store.get_storer("df").data_columns == ["A"]
# a min_itemsize that creates a data_column2
_maybe_remove(store, "df")
store.append("df", df, data_columns=["B"], min_itemsize={"A": 200})
check_col("df", "A", 200)
assert store.get_storer("df").data_columns == ["B", "A"]
# a min_itemsize that creates a data_column2
_maybe_remove(store, "df")
store.append("df", df, data_columns=["B"], min_itemsize={"values": 200})
check_col("df", "B", 200)
check_col("df", "values_block_0", 200)
assert store.get_storer("df").data_columns == ["B"]
# infer the .typ on subsequent appends
_maybe_remove(store, "df")
store.append("df", df[:5], min_itemsize=200)
store.append("df", df[5:], min_itemsize=200)
tm.assert_frame_equal(store["df"], df)
# invalid min_itemsize keys
df = DataFrame(["foo", "foo", "foo", "barh", "barh", "barh"], columns=["A"])
_maybe_remove(store, "df")
with pytest.raises(ValueError):
store.append("df", df, min_itemsize={"foo": 20, "foobar": 20})
def test_append_with_empty_string(self, setup_path):
with ensure_clean_store(setup_path) as store:
# with all empty strings (GH 12242)
df = DataFrame({"x": ["a", "b", "c", "d", "e", "f", ""]})
store.append("df", df[:-1], min_itemsize={"x": 1})
store.append("df", df[-1:], min_itemsize={"x": 1})
tm.assert_frame_equal(store.select("df"), df)
def test_to_hdf_with_min_itemsize(self, setup_path):
with ensure_clean_path(setup_path) as path:
# min_itemsize in index with to_hdf (GH 10381)
df = tm.makeMixedDataFrame().set_index("C")
df.to_hdf(path, "ss3", format="table", min_itemsize={"index": 6})
# just make sure there is a longer string:
df2 = df.copy().reset_index().assign(C="longer").set_index("C")
df2.to_hdf(path, "ss3", append=True, format="table")
tm.assert_frame_equal(pd.read_hdf(path, "ss3"), pd.concat([df, df2]))
# same as above, with a Series
df["B"].to_hdf(path, "ss4", format="table", min_itemsize={"index": 6})
df2["B"].to_hdf(path, "ss4", append=True, format="table")
tm.assert_series_equal(
pd.read_hdf(path, "ss4"), pd.concat([df["B"], df2["B"]])
)
@pytest.mark.parametrize(
"format", [pytest.param("fixed", marks=td.xfail_non_writeable), "table"]
)
def test_to_hdf_errors(self, format, setup_path):
data = ["\ud800foo"]
ser = pd.Series(data, index=pd.Index(data))
with ensure_clean_path(setup_path) as path:
# GH 20835
ser.to_hdf(path, "table", format=format, errors="surrogatepass")
result = pd.read_hdf(path, "table", errors="surrogatepass")
tm.assert_series_equal(result, ser)
def test_append_with_data_columns(self, setup_path):
with ensure_clean_store(setup_path) as store:
df = tm.makeTimeDataFrame()
df.iloc[0, df.columns.get_loc("B")] = 1.0
_maybe_remove(store, "df")
store.append("df", df[:2], data_columns=["B"])
store.append("df", df[2:])
tm.assert_frame_equal(store["df"], df)
# check that we have indices created
assert store._handle.root.df.table.cols.index.is_indexed is True
assert store._handle.root.df.table.cols.B.is_indexed is True
# data column searching
result = store.select("df", "B>0")
expected = df[df.B > 0]
tm.assert_frame_equal(result, expected)
# data column searching (with an indexable and a data_columns)
result = store.select("df", "B>0 and index>df.index[3]")
df_new = df.reindex(index=df.index[4:])
expected = df_new[df_new.B > 0]
tm.assert_frame_equal(result, expected)
# data column selection with a string data_column
df_new = df.copy()
df_new["string"] = "foo"
df_new.loc[1:4, "string"] = np.nan
df_new.loc[5:6, "string"] = "bar"
_maybe_remove(store, "df")
store.append("df", df_new, data_columns=["string"])
result = store.select("df", "string='foo'")
expected = df_new[df_new.string == "foo"]
tm.assert_frame_equal(result, expected)
# using min_itemsize and a data column
def check_col(key, name, size):
assert (
getattr(store.get_storer(key).table.description, name).itemsize
== size
)
with ensure_clean_store(setup_path) as store:
_maybe_remove(store, "df")
store.append(
"df", df_new, data_columns=["string"], min_itemsize={"string": 30}
)
check_col("df", "string", 30)
_maybe_remove(store, "df")
store.append("df", df_new, data_columns=["string"], min_itemsize=30)
check_col("df", "string", 30)
_maybe_remove(store, "df")
store.append(
"df", df_new, data_columns=["string"], min_itemsize={"values": 30}
)
check_col("df", "string", 30)
with ensure_clean_store(setup_path) as store:
df_new["string2"] = "foobarbah"
df_new["string_block1"] = "foobarbah1"
df_new["string_block2"] = "foobarbah2"
_maybe_remove(store, "df")
store.append(
"df",
df_new,
data_columns=["string", "string2"],
min_itemsize={"string": 30, "string2": 40, "values": 50},
)
check_col("df", "string", 30)
check_col("df", "string2", 40)
check_col("df", "values_block_1", 50)
with ensure_clean_store(setup_path) as store:
# multiple data columns
df_new = df.copy()
df_new.iloc[0, df_new.columns.get_loc("A")] = 1.0
df_new.iloc[0, df_new.columns.get_loc("B")] = -1.0
df_new["string"] = "foo"
sl = df_new.columns.get_loc("string")
df_new.iloc[1:4, sl] = np.nan
df_new.iloc[5:6, sl] = "bar"
df_new["string2"] = "foo"
sl = df_new.columns.get_loc("string2")
df_new.iloc[2:5, sl] = np.nan
df_new.iloc[7:8, sl] = "bar"
_maybe_remove(store, "df")
store.append("df", df_new, data_columns=["A", "B", "string", "string2"])
result = store.select(
"df", "string='foo' and string2='foo' and A>0 and B<0"
)
expected = df_new[
(df_new.string == "foo")
& (df_new.string2 == "foo")
& (df_new.A > 0)
& (df_new.B < 0)
]
tm.assert_frame_equal(result, expected, check_index_type=False)
# yield an empty frame
result = store.select("df", "string='foo' and string2='cool'")
expected = df_new[(df_new.string == "foo") & (df_new.string2 == "cool")]
tm.assert_frame_equal(result, expected, check_index_type=False)
with ensure_clean_store(setup_path) as store:
# doc example
df_dc = df.copy()
df_dc["string"] = "foo"
df_dc.loc[4:6, "string"] = np.nan
df_dc.loc[7:9, "string"] = "bar"
df_dc["string2"] = "cool"
df_dc["datetime"] = Timestamp("20010102")
df_dc = df_dc._convert(datetime=True)
df_dc.loc[3:5, ["A", "B", "datetime"]] = np.nan
_maybe_remove(store, "df_dc")
store.append(
"df_dc", df_dc, data_columns=["B", "C", "string", "string2", "datetime"]
)
result = store.select("df_dc", "B>0")
expected = df_dc[df_dc.B > 0]
tm.assert_frame_equal(result, expected, check_index_type=False)
result = store.select("df_dc", ["B > 0", "C > 0", "string == foo"])
expected = df_dc[(df_dc.B > 0) & (df_dc.C > 0) & (df_dc.string == "foo")]
tm.assert_frame_equal(result, expected, check_index_type=False)
with ensure_clean_store(setup_path) as store:
# doc example part 2
np.random.seed(1234)
index = date_range("1/1/2000", periods=8)
df_dc = DataFrame(
np.random.randn(8, 3), index=index, columns=["A", "B", "C"]
)
df_dc["string"] = "foo"
df_dc.loc[4:6, "string"] = np.nan
df_dc.loc[7:9, "string"] = "bar"
df_dc.loc[:, ["B", "C"]] = df_dc.loc[:, ["B", "C"]].abs()
df_dc["string2"] = "cool"
# on-disk operations
store.append("df_dc", df_dc, data_columns=["B", "C", "string", "string2"])
result = store.select("df_dc", "B>0")
expected = df_dc[df_dc.B > 0]
tm.assert_frame_equal(result, expected)
result = store.select("df_dc", ["B > 0", "C > 0", 'string == "foo"'])
expected = df_dc[(df_dc.B > 0) & (df_dc.C > 0) & (df_dc.string == "foo")]
tm.assert_frame_equal(result, expected)
def test_create_table_index(self, setup_path):
with ensure_clean_store(setup_path) as store:
with catch_warnings(record=True):
def col(t, column):
return getattr(store.get_storer(t).table.cols, column)
# data columns
df = tm.makeTimeDataFrame()
df["string"] = "foo"
df["string2"] = "bar"
store.append("f", df, data_columns=["string", "string2"])
assert col("f", "index").is_indexed is True
assert col("f", "string").is_indexed is True
assert col("f", "string2").is_indexed is True
# specify index=columns
store.append(
"f2", df, index=["string"], data_columns=["string", "string2"]
)
assert col("f2", "index").is_indexed is False
assert col("f2", "string").is_indexed is True
assert col("f2", "string2").is_indexed is False
# try to index a non-table
_maybe_remove(store, "f2")
store.put("f2", df)
with pytest.raises(TypeError):
store.create_table_index("f2")
def test_append_hierarchical(self, setup_path):
index = MultiIndex(
levels=[["foo", "bar", "baz", "qux"], ["one", "two", "three"]],
codes=[[0, 0, 0, 1, 1, 2, 2, 3, 3, 3], [0, 1, 2, 0, 1, 1, 2, 0, 1, 2]],
names=["foo", "bar"],
)
df = DataFrame(np.random.randn(10, 3), index=index, columns=["A", "B", "C"])
with ensure_clean_store(setup_path) as store:
store.append("mi", df)
result = store.select("mi")
tm.assert_frame_equal(result, df)
# GH 3748
result = store.select("mi", columns=["A", "B"])
expected = df.reindex(columns=["A", "B"])
tm.assert_frame_equal(result, expected)
with ensure_clean_path("test.hdf") as path:
df.to_hdf(path, "df", format="table")
result = read_hdf(path, "df", columns=["A", "B"])
expected = df.reindex(columns=["A", "B"])
tm.assert_frame_equal(result, expected)
def test_column_multiindex(self, setup_path):
# GH 4710
# recreate multi-indexes properly
index = MultiIndex.from_tuples(
[("A", "a"), ("A", "b"), ("B", "a"), ("B", "b")], names=["first", "second"]
)
df = DataFrame(np.arange(12).reshape(3, 4), columns=index)
expected = df.copy()
if isinstance(expected.index, RangeIndex):
expected.index = Int64Index(expected.index)
with ensure_clean_store(setup_path) as store:
store.put("df", df)
tm.assert_frame_equal(
store["df"], expected, check_index_type=True, check_column_type=True
)
store.put("df1", df, format="table")
tm.assert_frame_equal(
store["df1"], expected, check_index_type=True, check_column_type=True
)
with pytest.raises(ValueError):
store.put("df2", df, format="table", data_columns=["A"])
with pytest.raises(ValueError):
store.put("df3", df, format="table", data_columns=True)
# appending multi-column on existing table (see GH 6167)
with ensure_clean_store(setup_path) as store:
store.append("df2", df)
store.append("df2", df)
tm.assert_frame_equal(store["df2"], concat((df, df)))
# non_index_axes name
df = DataFrame(
np.arange(12).reshape(3, 4), columns=Index(list("ABCD"), name="foo")
)
expected = df.copy()
if isinstance(expected.index, RangeIndex):
expected.index = Int64Index(expected.index)
with ensure_clean_store(setup_path) as store:
store.put("df1", df, format="table")
tm.assert_frame_equal(
store["df1"], expected, check_index_type=True, check_column_type=True
)
def test_store_multiindex(self, setup_path):
# validate multi-index names
# GH 5527
with ensure_clean_store(setup_path) as store:
def make_index(names=None):
return MultiIndex.from_tuples(
[
(datetime.datetime(2013, 12, d), s, t)
for d in range(1, 3)
for s in range(2)
for t in range(3)
],
names=names,
)
# no names
_maybe_remove(store, "df")
df = DataFrame(np.zeros((12, 2)), columns=["a", "b"], index=make_index())
store.append("df", df)
tm.assert_frame_equal(store.select("df"), df)
# partial names
_maybe_remove(store, "df")
df = DataFrame(
np.zeros((12, 2)),
columns=["a", "b"],
index=make_index(["date", None, None]),
)
store.append("df", df)
tm.assert_frame_equal(store.select("df"), df)
# series
_maybe_remove(store, "s")
s = Series(np.zeros(12), index=make_index(["date", None, None]))
store.append("s", s)
xp = Series(np.zeros(12), index=make_index(["date", "level_1", "level_2"]))
tm.assert_series_equal(store.select("s"), xp)
# dup with column
_maybe_remove(store, "df")
df = DataFrame(
np.zeros((12, 2)),
columns=["a", "b"],
index=make_index(["date", "a", "t"]),
)
with pytest.raises(ValueError):
store.append("df", df)
# dup within level
_maybe_remove(store, "df")
df = DataFrame(
np.zeros((12, 2)),
columns=["a", "b"],
index=make_index(["date", "date", "date"]),
)
with pytest.raises(ValueError):
store.append("df", df)
# fully names
_maybe_remove(store, "df")
df = DataFrame(
np.zeros((12, 2)),
columns=["a", "b"],
index=make_index(["date", "s", "t"]),
)
store.append("df", df)
tm.assert_frame_equal(store.select("df"), df)
def test_select_columns_in_where(self, setup_path):
# GH 6169
# recreate multi-indexes when columns is passed
# in the `where` argument
index = MultiIndex(
levels=[["foo", "bar", "baz", "qux"], ["one", "two", "three"]],
codes=[[0, 0, 0, 1, 1, 2, 2, 3, 3, 3], [0, 1, 2, 0, 1, 1, 2, 0, 1, 2]],
names=["foo_name", "bar_name"],
)
# With a DataFrame
df = DataFrame(np.random.randn(10, 3), index=index, columns=["A", "B", "C"])
with ensure_clean_store(setup_path) as store:
store.put("df", df, format="table")
expected = df[["A"]]
tm.assert_frame_equal(store.select("df", columns=["A"]), expected)
tm.assert_frame_equal(store.select("df", where="columns=['A']"), expected)
# With a Series
s = Series(np.random.randn(10), index=index, name="A")
with ensure_clean_store(setup_path) as store:
store.put("s", s, format="table")
tm.assert_series_equal(store.select("s", where="columns=['A']"), s)
def test_mi_data_columns(self, setup_path):
# GH 14435
idx = pd.MultiIndex.from_arrays(
[date_range("2000-01-01", periods=5), range(5)], names=["date", "id"]
)
df = pd.DataFrame({"a": [1.1, 1.2, 1.3, 1.4, 1.5]}, index=idx)
with ensure_clean_store(setup_path) as store:
store.append("df", df, data_columns=True)
actual = store.select("df", where="id == 1")
expected = df.iloc[[1], :]
tm.assert_frame_equal(actual, expected)
def test_pass_spec_to_storer(self, setup_path):
df = tm.makeDataFrame()
with ensure_clean_store(setup_path) as store:
store.put("df", df)
with pytest.raises(TypeError):
store.select("df", columns=["A"])
with pytest.raises(TypeError):
store.select("df", where=[("columns=A")])
@td.xfail_non_writeable
def test_append_misc(self, setup_path):
with ensure_clean_store(setup_path) as store:
df = tm.makeDataFrame()
store.append("df", df, chunksize=1)
result = store.select("df")
tm.assert_frame_equal(result, df)
store.append("df1", df, expectedrows=10)
result = store.select("df1")
tm.assert_frame_equal(result, df)
# more chunksize in append tests
def check(obj, comparator):
for c in [10, 200, 1000]:
with ensure_clean_store(setup_path, mode="w") as store:
store.append("obj", obj, chunksize=c)
result = store.select("obj")
comparator(result, obj)
df = tm.makeDataFrame()
df["string"] = "foo"
df["float322"] = 1.0
df["float322"] = df["float322"].astype("float32")
df["bool"] = df["float322"] > 0
df["time1"] = Timestamp("20130101")
df["time2"] = Timestamp("20130102")
check(df, tm.assert_frame_equal)
# empty frame, GH4273
with ensure_clean_store(setup_path) as store:
# 0 len
df_empty = DataFrame(columns=list("ABC"))
store.append("df", df_empty)
with pytest.raises(KeyError, match="'No object named df in the file'"):
store.select("df")
# repeated append of 0/non-zero frames
df = DataFrame(np.random.rand(10, 3), columns=list("ABC"))
store.append("df", df)
tm.assert_frame_equal(store.select("df"), df)
store.append("df", df_empty)
tm.assert_frame_equal(store.select("df"), df)
# store
df = DataFrame(columns=list("ABC"))
store.put("df2", df)
tm.assert_frame_equal(store.select("df2"), df)
def test_append_raise(self, setup_path):
with ensure_clean_store(setup_path) as store:
# test append with invalid input to get good error messages
# list in column
df = tm.makeDataFrame()
df["invalid"] = [["a"]] * len(df)
assert df.dtypes["invalid"] == np.object_
with pytest.raises(TypeError):
store.append("df", df)
# multiple invalid columns
df["invalid2"] = [["a"]] * len(df)
df["invalid3"] = [["a"]] * len(df)
with pytest.raises(TypeError):
store.append("df", df)
# datetime with embedded nans as object
df = tm.makeDataFrame()
s = Series(datetime.datetime(2001, 1, 2), index=df.index)
s = s.astype(object)
s[0:5] = np.nan
df["invalid"] = s
assert df.dtypes["invalid"] == np.object_
with pytest.raises(TypeError):
store.append("df", df)
# directly ndarray
with pytest.raises(TypeError):
store.append("df", np.arange(10))
# series directly
with pytest.raises(TypeError):
store.append("df", Series(np.arange(10)))
# appending an incompatible table
df = tm.makeDataFrame()
store.append("df", df)
df["foo"] = "foo"
with pytest.raises(ValueError):
store.append("df", df)
def test_table_index_incompatible_dtypes(self, setup_path):
df1 = DataFrame({"a": [1, 2, 3]})
df2 = DataFrame({"a": [4, 5, 6]}, index=date_range("1/1/2000", periods=3))
with ensure_clean_store(setup_path) as store:
store.put("frame", df1, format="table")
with pytest.raises(TypeError):
store.put("frame", df2, format="table", append=True)
def test_table_values_dtypes_roundtrip(self, setup_path):
with ensure_clean_store(setup_path) as store:
df1 = DataFrame({"a": [1, 2, 3]}, dtype="f8")
store.append("df_f8", df1)
tm.assert_series_equal(df1.dtypes, store["df_f8"].dtypes)
df2 = DataFrame({"a": [1, 2, 3]}, dtype="i8")
store.append("df_i8", df2)
tm.assert_series_equal(df2.dtypes, store["df_i8"].dtypes)
# incompatible dtype
with pytest.raises(ValueError):
store.append("df_i8", df1)
# check creation/storage/retrieval of float32 (a bit hacky to
# actually create them thought)
df1 = DataFrame(np.array([[1], [2], [3]], dtype="f4"), columns=["A"])
store.append("df_f4", df1)
tm.assert_series_equal(df1.dtypes, store["df_f4"].dtypes)
assert df1.dtypes[0] == "float32"
# check with mixed dtypes
df1 = DataFrame(
{
c: Series(np.random.randint(5), dtype=c)
for c in ["float32", "float64", "int32", "int64", "int16", "int8"]
}
)
df1["string"] = "foo"
df1["float322"] = 1.0
df1["float322"] = df1["float322"].astype("float32")
df1["bool"] = df1["float32"] > 0
df1["time1"] = Timestamp("20130101")
df1["time2"] = Timestamp("20130102")
store.append("df_mixed_dtypes1", df1)
result = store.select("df_mixed_dtypes1").dtypes.value_counts()
result.index = [str(i) for i in result.index]
expected = Series(
{
"float32": 2,
"float64": 1,
"int32": 1,
"bool": 1,
"int16": 1,
"int8": 1,
"int64": 1,
"object": 1,
"datetime64[ns]": 2,
}
)
result = result.sort_index()
expected = expected.sort_index()
tm.assert_series_equal(result, expected)
def test_table_mixed_dtypes(self, setup_path):
# frame
df = tm.makeDataFrame()
df["obj1"] = "foo"
df["obj2"] = "bar"
df["bool1"] = df["A"] > 0
df["bool2"] = df["B"] > 0
df["bool3"] = True
df["int1"] = 1
df["int2"] = 2
df["timestamp1"] = Timestamp("20010102")
df["timestamp2"] = Timestamp("20010103")
df["datetime1"] = datetime.datetime(2001, 1, 2, 0, 0)
df["datetime2"] = datetime.datetime(2001, 1, 3, 0, 0)
df.loc[3:6, ["obj1"]] = np.nan
df = df._consolidate()._convert(datetime=True)
with ensure_clean_store(setup_path) as store:
store.append("df1_mixed", df)
tm.assert_frame_equal(store.select("df1_mixed"), df)
def test_unimplemented_dtypes_table_columns(self, setup_path):
with ensure_clean_store(setup_path) as store:
dtypes = [("date", datetime.date(2001, 1, 2))]
# currently not supported dtypes ####
for n, f in dtypes:
df = tm.makeDataFrame()
df[n] = f
with pytest.raises(TypeError):
store.append("df1_{n}".format(n=n), df)
# frame
df = tm.makeDataFrame()
df["obj1"] = "foo"
df["obj2"] = "bar"
df["datetime1"] = datetime.date(2001, 1, 2)
df = df._consolidate()._convert(datetime=True)
with ensure_clean_store(setup_path) as store:
# this fails because we have a date in the object block......
with pytest.raises(TypeError):
store.append("df_unimplemented", df)
@td.xfail_non_writeable
@pytest.mark.skipif(
LooseVersion(np.__version__) == LooseVersion("1.15.0"),
reason=(
"Skipping pytables test when numpy version is "
"exactly equal to 1.15.0: gh-22098"
),
)
def test_calendar_roundtrip_issue(self, setup_path):
# 8591
# doc example from tseries holiday section
weekmask_egypt = "Sun Mon Tue Wed Thu"
holidays = [
"2012-05-01",
datetime.datetime(2013, 5, 1),
np.datetime64("2014-05-01"),
]
bday_egypt = pd.offsets.CustomBusinessDay(
holidays=holidays, weekmask=weekmask_egypt
)
dt = datetime.datetime(2013, 4, 30)
dts = date_range(dt, periods=5, freq=bday_egypt)
s = Series(dts.weekday, dts).map(Series("Mon Tue Wed Thu Fri Sat Sun".split()))
with ensure_clean_store(setup_path) as store:
store.put("fixed", s)
result = store.select("fixed")
tm.assert_series_equal(result, s)
store.append("table", s)
result = store.select("table")
tm.assert_series_equal(result, s)
def test_roundtrip_tz_aware_index(self, setup_path):
# GH 17618
time = pd.Timestamp("2000-01-01 01:00:00", tz="US/Eastern")
df = pd.DataFrame(data=[0], index=[time])
with ensure_clean_store(setup_path) as store:
store.put("frame", df, format="fixed")
recons = store["frame"]
tm.assert_frame_equal(recons, df)
assert recons.index[0].value == 946706400000000000
def test_append_with_timedelta(self, setup_path):
# GH 3577
# append timedelta
df = DataFrame(
dict(
A=Timestamp("20130101"),
B=[
Timestamp("20130101") + timedelta(days=i, seconds=10)
for i in range(10)
],
)
)
df["C"] = df["A"] - df["B"]
df.loc[3:5, "C"] = np.nan
with ensure_clean_store(setup_path) as store:
# table
_maybe_remove(store, "df")
store.append("df", df, data_columns=True)
result = store.select("df")
tm.assert_frame_equal(result, df)
result = store.select("df", where="C<100000")
tm.assert_frame_equal(result, df)
result = store.select("df", where="C<pd.Timedelta('-3D')")
tm.assert_frame_equal(result, df.iloc[3:])
result = store.select("df", "C<'-3D'")
tm.assert_frame_equal(result, df.iloc[3:])
# a bit hacky here as we don't really deal with the NaT properly
result = store.select("df", "C<'-500000s'")
result = result.dropna(subset=["C"])
tm.assert_frame_equal(result, df.iloc[6:])
result = store.select("df", "C<'-3.5D'")
result = result.iloc[1:]
tm.assert_frame_equal(result, df.iloc[4:])
# fixed
_maybe_remove(store, "df2")
store.put("df2", df)
result = store.select("df2")
tm.assert_frame_equal(result, df)
def test_remove(self, setup_path):
with ensure_clean_store(setup_path) as store:
ts = tm.makeTimeSeries()
df = tm.makeDataFrame()
store["a"] = ts
store["b"] = df
_maybe_remove(store, "a")
assert len(store) == 1
tm.assert_frame_equal(df, store["b"])
_maybe_remove(store, "b")
assert len(store) == 0
# nonexistence
with pytest.raises(
KeyError, match="'No object named a_nonexistent_store in the file'"
):
store.remove("a_nonexistent_store")
# pathing
store["a"] = ts
store["b/foo"] = df
_maybe_remove(store, "foo")
_maybe_remove(store, "b/foo")
assert len(store) == 1
store["a"] = ts
store["b/foo"] = df
_maybe_remove(store, "b")
assert len(store) == 1
# __delitem__
store["a"] = ts
store["b"] = df
del store["a"]
del store["b"]
assert len(store) == 0
def test_invalid_terms(self, setup_path):
with ensure_clean_store(setup_path) as store:
with catch_warnings(record=True):
df = tm.makeTimeDataFrame()
df["string"] = "foo"
df.loc[0:4, "string"] = "bar"
store.put("df", df, format="table")
# some invalid terms
with pytest.raises(TypeError):
Term()
# more invalid
with pytest.raises(ValueError):
store.select("df", "df.index[3]")
with pytest.raises(SyntaxError):
store.select("df", "index>")
# from the docs
with ensure_clean_path(setup_path) as path:
dfq = DataFrame(
np.random.randn(10, 4),
columns=list("ABCD"),
index=date_range("20130101", periods=10),
)
dfq.to_hdf(path, "dfq", format="table", data_columns=True)
# check ok
read_hdf(
path, "dfq", where="index>Timestamp('20130104') & columns=['A', 'B']"
)
read_hdf(path, "dfq", where="A>0 or C>0")
# catch the invalid reference
with ensure_clean_path(setup_path) as path:
dfq = DataFrame(
np.random.randn(10, 4),
columns=list("ABCD"),
index=date_range("20130101", periods=10),
)
dfq.to_hdf(path, "dfq", format="table")
with pytest.raises(ValueError):
read_hdf(path, "dfq", where="A>0 or C>0")
def test_same_name_scoping(self, setup_path):
with ensure_clean_store(setup_path) as store:
import pandas as pd
df = DataFrame(
np.random.randn(20, 2), index=pd.date_range("20130101", periods=20)
)
store.put("df", df, format="table")
expected = df[df.index > pd.Timestamp("20130105")]
import datetime # noqa
result = store.select("df", "index>datetime.datetime(2013,1,5)")
tm.assert_frame_equal(result, expected)
from datetime import datetime # noqa
# technically an error, but allow it
result = store.select("df", "index>datetime.datetime(2013,1,5)")
tm.assert_frame_equal(result, expected)
result = store.select("df", "index>datetime(2013,1,5)")
tm.assert_frame_equal(result, expected)
def test_series(self, setup_path):
s = tm.makeStringSeries()
self._check_roundtrip(s, tm.assert_series_equal, path=setup_path)
ts = tm.makeTimeSeries()
self._check_roundtrip(ts, tm.assert_series_equal, path=setup_path)
ts2 = Series(ts.index, Index(ts.index, dtype=object))
self._check_roundtrip(ts2, tm.assert_series_equal, path=setup_path)
ts3 = Series(ts.values, Index(np.asarray(ts.index, dtype=object), dtype=object))
self._check_roundtrip(
ts3, tm.assert_series_equal, path=setup_path, check_index_type=False
)
def test_float_index(self, setup_path):
# GH #454
index = np.random.randn(10)
s = Series(np.random.randn(10), index=index)
self._check_roundtrip(s, tm.assert_series_equal, path=setup_path)
@td.xfail_non_writeable
def test_tuple_index(self, setup_path):
# GH #492
col = np.arange(10)
idx = [(0.0, 1.0), (2.0, 3.0), (4.0, 5.0)]
data = np.random.randn(30).reshape((3, 10))
DF = DataFrame(data, index=idx, columns=col)
with catch_warnings(record=True):
simplefilter("ignore", pd.errors.PerformanceWarning)
self._check_roundtrip(DF, tm.assert_frame_equal, path=setup_path)
@td.xfail_non_writeable
@pytest.mark.filterwarnings("ignore::pandas.errors.PerformanceWarning")
def test_index_types(self, setup_path):
with catch_warnings(record=True):
values = np.random.randn(2)
func = lambda l, r: tm.assert_series_equal(
l, r, check_dtype=True, check_index_type=True, check_series_type=True
)
with catch_warnings(record=True):
ser = Series(values, [0, "y"])
self._check_roundtrip(ser, func, path=setup_path)
with catch_warnings(record=True):
ser = Series(values, [datetime.datetime.today(), 0])
self._check_roundtrip(ser, func, path=setup_path)
with catch_warnings(record=True):
ser = Series(values, ["y", 0])
self._check_roundtrip(ser, func, path=setup_path)
with catch_warnings(record=True):
ser = Series(values, [datetime.date.today(), "a"])
self._check_roundtrip(ser, func, path=setup_path)
with catch_warnings(record=True):
ser = Series(values, [0, "y"])
self._check_roundtrip(ser, func, path=setup_path)
ser = Series(values, [datetime.datetime.today(), 0])
self._check_roundtrip(ser, func, path=setup_path)
ser = Series(values, ["y", 0])
self._check_roundtrip(ser, func, path=setup_path)
ser = Series(values, [datetime.date.today(), "a"])
self._check_roundtrip(ser, func, path=setup_path)
ser = Series(values, [1.23, "b"])
self._check_roundtrip(ser, func, path=setup_path)
ser = Series(values, [1, 1.53])
self._check_roundtrip(ser, func, path=setup_path)
ser = Series(values, [1, 5])
self._check_roundtrip(ser, func, path=setup_path)
ser = Series(
values, [datetime.datetime(2012, 1, 1), datetime.datetime(2012, 1, 2)]
)
self._check_roundtrip(ser, func, path=setup_path)
def test_timeseries_preepoch(self, setup_path):
dr = bdate_range("1/1/1940", "1/1/1960")
ts = Series(np.random.randn(len(dr)), index=dr)
try:
self._check_roundtrip(ts, tm.assert_series_equal, path=setup_path)
except OverflowError:
pytest.skip("known failer on some windows platforms")
@td.xfail_non_writeable
@pytest.mark.parametrize(
"compression", [False, pytest.param(True, marks=td.skip_if_windows_python_3)]
)
def test_frame(self, compression, setup_path):
df = tm.makeDataFrame()
# put in some random NAs
df.values[0, 0] = np.nan
df.values[5, 3] = np.nan
self._check_roundtrip_table(
df, tm.assert_frame_equal, path=setup_path, compression=compression
)
self._check_roundtrip(
df, tm.assert_frame_equal, path=setup_path, compression=compression
)
tdf = tm.makeTimeDataFrame()
self._check_roundtrip(
tdf, tm.assert_frame_equal, path=setup_path, compression=compression
)
with ensure_clean_store(setup_path) as store:
# not consolidated
df["foo"] = np.random.randn(len(df))
store["df"] = df
recons = store["df"]
assert recons._data.is_consolidated()
# empty
self._check_roundtrip(df[:0], tm.assert_frame_equal, path=setup_path)
@td.xfail_non_writeable
def test_empty_series_frame(self, setup_path):
s0 = Series(dtype=object)
s1 = Series(name="myseries", dtype=object)
df0 = DataFrame()
df1 = DataFrame(index=["a", "b", "c"])
df2 = DataFrame(columns=["d", "e", "f"])
self._check_roundtrip(s0, tm.assert_series_equal, path=setup_path)
self._check_roundtrip(s1, tm.assert_series_equal, path=setup_path)
self._check_roundtrip(df0, tm.assert_frame_equal, path=setup_path)
self._check_roundtrip(df1, tm.assert_frame_equal, path=setup_path)
self._check_roundtrip(df2, tm.assert_frame_equal, path=setup_path)
@td.xfail_non_writeable
@pytest.mark.parametrize(
"dtype", [np.int64, np.float64, np.object, "m8[ns]", "M8[ns]"]
)
def test_empty_series(self, dtype, setup_path):
s = Series(dtype=dtype)
self._check_roundtrip(s, tm.assert_series_equal, path=setup_path)
def test_can_serialize_dates(self, setup_path):
rng = [x.date() for x in bdate_range("1/1/2000", "1/30/2000")]
frame = DataFrame(np.random.randn(len(rng), 4), index=rng)
self._check_roundtrip(frame, tm.assert_frame_equal, path=setup_path)
def test_store_hierarchical(self, setup_path):
index = MultiIndex(
levels=[["foo", "bar", "baz", "qux"], ["one", "two", "three"]],
codes=[[0, 0, 0, 1, 1, 2, 2, 3, 3, 3], [0, 1, 2, 0, 1, 1, 2, 0, 1, 2]],
names=["foo", "bar"],
)
frame = DataFrame(np.random.randn(10, 3), index=index, columns=["A", "B", "C"])
self._check_roundtrip(frame, tm.assert_frame_equal, path=setup_path)
self._check_roundtrip(frame.T, tm.assert_frame_equal, path=setup_path)
self._check_roundtrip(frame["A"], tm.assert_series_equal, path=setup_path)
# check that the names are stored
with ensure_clean_store(setup_path) as store:
store["frame"] = frame
recons = store["frame"]
tm.assert_frame_equal(recons, frame)
def test_store_index_name(self, setup_path):
df = tm.makeDataFrame()
df.index.name = "foo"
with ensure_clean_store(setup_path) as store:
store["frame"] = df
recons = store["frame"]
tm.assert_frame_equal(recons, df)
def test_store_index_name_with_tz(self, setup_path):
# GH 13884
df = pd.DataFrame({"A": [1, 2]})
df.index = pd.DatetimeIndex([1234567890123456787, 1234567890123456788])
df.index = df.index.tz_localize("UTC")
df.index.name = "foo"
with ensure_clean_store(setup_path) as store:
store.put("frame", df, format="table")
recons = store["frame"]
tm.assert_frame_equal(recons, df)
@pytest.mark.parametrize("table_format", ["table", "fixed"])
def test_store_index_name_numpy_str(self, table_format, setup_path):
# GH #13492
idx = pd.Index(
pd.to_datetime([datetime.date(2000, 1, 1), datetime.date(2000, 1, 2)]),
name="cols\u05d2",
)
idx1 = pd.Index(
pd.to_datetime([datetime.date(2010, 1, 1), datetime.date(2010, 1, 2)]),
name="rows\u05d0",
)
df = pd.DataFrame(np.arange(4).reshape(2, 2), columns=idx, index=idx1)
# This used to fail, returning numpy strings instead of python strings.
with ensure_clean_path(setup_path) as path:
df.to_hdf(path, "df", format=table_format)
df2 = read_hdf(path, "df")
tm.assert_frame_equal(df, df2, check_names=True)
assert type(df2.index.name) == str
assert type(df2.columns.name) == str
def test_store_series_name(self, setup_path):
df = tm.makeDataFrame()
series = df["A"]
with ensure_clean_store(setup_path) as store:
store["series"] = series
recons = store["series"]
tm.assert_series_equal(recons, series)
@td.xfail_non_writeable
@pytest.mark.parametrize(
"compression", [False, pytest.param(True, marks=td.skip_if_windows_python_3)]
)
def test_store_mixed(self, compression, setup_path):
def _make_one():
df = tm.makeDataFrame()
df["obj1"] = "foo"
df["obj2"] = "bar"
df["bool1"] = df["A"] > 0
df["bool2"] = df["B"] > 0
df["int1"] = 1
df["int2"] = 2
return df._consolidate()
df1 = _make_one()
df2 = _make_one()
self._check_roundtrip(df1, tm.assert_frame_equal, path=setup_path)
self._check_roundtrip(df2, tm.assert_frame_equal, path=setup_path)
with ensure_clean_store(setup_path) as store:
store["obj"] = df1
tm.assert_frame_equal(store["obj"], df1)
store["obj"] = df2
tm.assert_frame_equal(store["obj"], df2)
# check that can store Series of all of these types
self._check_roundtrip(
df1["obj1"],
tm.assert_series_equal,
path=setup_path,
compression=compression,
)
self._check_roundtrip(
df1["bool1"],
tm.assert_series_equal,
path=setup_path,
compression=compression,
)
self._check_roundtrip(
df1["int1"],
tm.assert_series_equal,
path=setup_path,
compression=compression,
)
@pytest.mark.filterwarnings(
"ignore:\\nduplicate:pandas.io.pytables.DuplicateWarning"
)
def test_select_with_dups(self, setup_path):
# single dtypes
df = DataFrame(np.random.randn(10, 4), columns=["A", "A", "B", "B"])
df.index = date_range("20130101 9:30", periods=10, freq="T")
with ensure_clean_store(setup_path) as store:
store.append("df", df)
result = store.select("df")
expected = df
tm.assert_frame_equal(result, expected, by_blocks=True)
result = store.select("df", columns=df.columns)
expected = df
tm.assert_frame_equal(result, expected, by_blocks=True)
result = store.select("df", columns=["A"])
expected = df.loc[:, ["A"]]
tm.assert_frame_equal(result, expected)
# dups across dtypes
df = concat(
[
DataFrame(np.random.randn(10, 4), columns=["A", "A", "B", "B"]),
DataFrame(
np.random.randint(0, 10, size=20).reshape(10, 2), columns=["A", "C"]
),
],
axis=1,
)
df.index = date_range("20130101 9:30", periods=10, freq="T")
with ensure_clean_store(setup_path) as store:
store.append("df", df)
result = store.select("df")
expected = df
tm.assert_frame_equal(result, expected, by_blocks=True)
result = store.select("df", columns=df.columns)
expected = df
tm.assert_frame_equal(result, expected, by_blocks=True)
expected = df.loc[:, ["A"]]
result = store.select("df", columns=["A"])
tm.assert_frame_equal(result, expected, by_blocks=True)
expected = df.loc[:, ["B", "A"]]
result = store.select("df", columns=["B", "A"])
tm.assert_frame_equal(result, expected, by_blocks=True)
# duplicates on both index and columns
with ensure_clean_store(setup_path) as store:
store.append("df", df)
store.append("df", df)
expected = df.loc[:, ["B", "A"]]
expected = concat([expected, expected])
result = store.select("df", columns=["B", "A"])
tm.assert_frame_equal(result, expected, by_blocks=True)
def test_overwrite_node(self, setup_path):
with ensure_clean_store(setup_path) as store:
store["a"] = tm.makeTimeDataFrame()
ts = tm.makeTimeSeries()
store["a"] = ts
tm.assert_series_equal(store["a"], ts)
def test_select(self, setup_path):
with ensure_clean_store(setup_path) as store:
with catch_warnings(record=True):
# select with columns=
df = tm.makeTimeDataFrame()
_maybe_remove(store, "df")
store.append("df", df)
result = store.select("df", columns=["A", "B"])
expected = df.reindex(columns=["A", "B"])
tm.assert_frame_equal(expected, result)
# equivalently
result = store.select("df", [("columns=['A', 'B']")])
expected = df.reindex(columns=["A", "B"])
tm.assert_frame_equal(expected, result)
# with a data column
_maybe_remove(store, "df")
store.append("df", df, data_columns=["A"])
result = store.select("df", ["A > 0"], columns=["A", "B"])
expected = df[df.A > 0].reindex(columns=["A", "B"])
tm.assert_frame_equal(expected, result)
# all a data columns
_maybe_remove(store, "df")
store.append("df", df, data_columns=True)
result = store.select("df", ["A > 0"], columns=["A", "B"])
expected = df[df.A > 0].reindex(columns=["A", "B"])
tm.assert_frame_equal(expected, result)
# with a data column, but different columns
_maybe_remove(store, "df")
store.append("df", df, data_columns=["A"])
result = store.select("df", ["A > 0"], columns=["C", "D"])
expected = df[df.A > 0].reindex(columns=["C", "D"])
tm.assert_frame_equal(expected, result)
def test_select_dtypes(self, setup_path):
with ensure_clean_store(setup_path) as store:
# with a Timestamp data column (GH #2637)
df = DataFrame(
dict(ts=bdate_range("2012-01-01", periods=300), A=np.random.randn(300))
)
_maybe_remove(store, "df")
store.append("df", df, data_columns=["ts", "A"])
result = store.select("df", "ts>=Timestamp('2012-02-01')")
expected = df[df.ts >= Timestamp("2012-02-01")]
tm.assert_frame_equal(expected, result)
# bool columns (GH #2849)
df = DataFrame(np.random.randn(5, 2), columns=["A", "B"])
df["object"] = "foo"
df.loc[4:5, "object"] = "bar"
df["boolv"] = df["A"] > 0
_maybe_remove(store, "df")
store.append("df", df, data_columns=True)
expected = df[df.boolv == True].reindex(columns=["A", "boolv"]) # noqa
for v in [True, "true", 1]:
result = store.select(
"df", "boolv == {v!s}".format(v=v), columns=["A", "boolv"]
)
tm.assert_frame_equal(expected, result)
expected = df[df.boolv == False].reindex(columns=["A", "boolv"]) # noqa
for v in [False, "false", 0]:
result = store.select(
"df", "boolv == {v!s}".format(v=v), columns=["A", "boolv"]
)
tm.assert_frame_equal(expected, result)
# integer index
df = DataFrame(dict(A=np.random.rand(20), B=np.random.rand(20)))
_maybe_remove(store, "df_int")
store.append("df_int", df)
result = store.select("df_int", "index<10 and columns=['A']")
expected = df.reindex(index=list(df.index)[0:10], columns=["A"])
tm.assert_frame_equal(expected, result)
# float index
df = DataFrame(
dict(
A=np.random.rand(20),
B=np.random.rand(20),
index=np.arange(20, dtype="f8"),
)
)
_maybe_remove(store, "df_float")
store.append("df_float", df)
result = store.select("df_float", "index<10.0 and columns=['A']")
expected = df.reindex(index=list(df.index)[0:10], columns=["A"])
tm.assert_frame_equal(expected, result)
with ensure_clean_store(setup_path) as store:
# floats w/o NaN
df = DataFrame(dict(cols=range(11), values=range(11)), dtype="float64")
df["cols"] = (df["cols"] + 10).apply(str)
store.append("df1", df, data_columns=True)
result = store.select("df1", where="values>2.0")
expected = df[df["values"] > 2.0]
tm.assert_frame_equal(expected, result)
# floats with NaN
df.iloc[0] = np.nan
expected = df[df["values"] > 2.0]
store.append("df2", df, data_columns=True, index=False)
result = store.select("df2", where="values>2.0")
tm.assert_frame_equal(expected, result)
# https://github.com/PyTables/PyTables/issues/282
# bug in selection when 0th row has a np.nan and an index
# store.append('df3',df,data_columns=True)
# result = store.select(
# 'df3', where='values>2.0')
# tm.assert_frame_equal(expected, result)
# not in first position float with NaN ok too
df = DataFrame(dict(cols=range(11), values=range(11)), dtype="float64")
df["cols"] = (df["cols"] + 10).apply(str)
df.iloc[1] = np.nan
expected = df[df["values"] > 2.0]
store.append("df4", df, data_columns=True)
result = store.select("df4", where="values>2.0")
tm.assert_frame_equal(expected, result)
# test selection with comparison against numpy scalar
# GH 11283
with ensure_clean_store(setup_path) as store:
df = tm.makeDataFrame()
expected = df[df["A"] > 0]
store.append("df", df, data_columns=True)
np_zero = np.float64(0) # noqa
result = store.select("df", where=["A>np_zero"])
tm.assert_frame_equal(expected, result)
def test_select_with_many_inputs(self, setup_path):
with ensure_clean_store(setup_path) as store:
df = DataFrame(
dict(
ts=bdate_range("2012-01-01", periods=300),
A=np.random.randn(300),
B=range(300),
users=["a"] * 50
+ ["b"] * 50
+ ["c"] * 100
+ ["a{i:03d}".format(i=i) for i in range(100)],
)
)
_maybe_remove(store, "df")
store.append("df", df, data_columns=["ts", "A", "B", "users"])
# regular select
result = store.select("df", "ts>=Timestamp('2012-02-01')")
expected = df[df.ts >= Timestamp("2012-02-01")]
tm.assert_frame_equal(expected, result)
# small selector
result = store.select(
"df", "ts>=Timestamp('2012-02-01') & users=['a','b','c']"
)
expected = df[
(df.ts >= Timestamp("2012-02-01")) & df.users.isin(["a", "b", "c"])
]
tm.assert_frame_equal(expected, result)
# big selector along the columns
selector = ["a", "b", "c"] + ["a{i:03d}".format(i=i) for i in range(60)]
result = store.select(
"df", "ts>=Timestamp('2012-02-01') and users=selector"
)
expected = df[(df.ts >= Timestamp("2012-02-01")) & df.users.isin(selector)]
tm.assert_frame_equal(expected, result)
selector = range(100, 200)
result = store.select("df", "B=selector")
expected = df[df.B.isin(selector)]
tm.assert_frame_equal(expected, result)
assert len(result) == 100
# big selector along the index
selector = Index(df.ts[0:100].values)
result = store.select("df", "ts=selector")
expected = df[df.ts.isin(selector.values)]
tm.assert_frame_equal(expected, result)
assert len(result) == 100
def test_select_iterator(self, setup_path):
# single table
with ensure_clean_store(setup_path) as store:
df = tm.makeTimeDataFrame(500)
_maybe_remove(store, "df")
store.append("df", df)
expected = store.select("df")
results = list(store.select("df", iterator=True))
result = concat(results)
tm.assert_frame_equal(expected, result)
results = list(store.select("df", chunksize=100))
assert len(results) == 5
result = concat(results)
tm.assert_frame_equal(expected, result)
results = list(store.select("df", chunksize=150))
result = concat(results)
tm.assert_frame_equal(result, expected)
with ensure_clean_path(setup_path) as path:
df = tm.makeTimeDataFrame(500)
df.to_hdf(path, "df_non_table")
with pytest.raises(TypeError):
read_hdf(path, "df_non_table", chunksize=100)
with pytest.raises(TypeError):
read_hdf(path, "df_non_table", iterator=True)
with ensure_clean_path(setup_path) as path:
df = tm.makeTimeDataFrame(500)
df.to_hdf(path, "df", format="table")
results = list(read_hdf(path, "df", chunksize=100))
result = concat(results)
assert len(results) == 5
tm.assert_frame_equal(result, df)
tm.assert_frame_equal(result, read_hdf(path, "df"))
# multiple
with ensure_clean_store(setup_path) as store:
df1 = tm.makeTimeDataFrame(500)
store.append("df1", df1, data_columns=True)
df2 = tm.makeTimeDataFrame(500).rename(columns="{}_2".format)
df2["foo"] = "bar"
store.append("df2", df2)
df = concat([df1, df2], axis=1)
# full selection
expected = store.select_as_multiple(["df1", "df2"], selector="df1")
results = list(
store.select_as_multiple(["df1", "df2"], selector="df1", chunksize=150)
)
result = concat(results)
tm.assert_frame_equal(expected, result)
def test_select_iterator_complete_8014(self, setup_path):
# GH 8014
# using iterator and where clause
chunksize = 1e4
# no iterator
with ensure_clean_store(setup_path) as store:
expected = | tm.makeTimeDataFrame(100064, "S") | pandas.util.testing.makeTimeDataFrame |
import types
from functools import wraps
import numpy as np
import datetime
import collections
from pandas.compat import(
zip, builtins, range, long, lzip,
OrderedDict, callable
)
from pandas import compat
from pandas.core.base import PandasObject
from pandas.core.categorical import Categorical
from pandas.core.frame import DataFrame
from pandas.core.generic import NDFrame
from pandas.core.index import Index, MultiIndex, _ensure_index, _union_indexes
from pandas.core.internals import BlockManager, make_block
from pandas.core.series import Series
from pandas.core.panel import Panel
from pandas.util.decorators import cache_readonly, Appender
import pandas.core.algorithms as algos
import pandas.core.common as com
from pandas.core.common import(_possibly_downcast_to_dtype, isnull,
notnull, _DATELIKE_DTYPES, is_numeric_dtype,
is_timedelta64_dtype, is_datetime64_dtype,
is_categorical_dtype, _values_from_object)
from pandas.core.config import option_context
from pandas import _np_version_under1p7
import pandas.lib as lib
from pandas.lib import Timestamp
import pandas.tslib as tslib
import pandas.algos as _algos
import pandas.hashtable as _hash
_agg_doc = """Aggregate using input function or dict of {column -> function}
Parameters
----------
arg : function or dict
Function to use for aggregating groups. If a function, must either
work when passed a DataFrame or when passed to DataFrame.apply. If
passed a dict, the keys must be DataFrame column names.
Notes
-----
Numpy functions mean/median/prod/sum/std/var are special cased so the
default behavior is applying the function along axis=0
(e.g., np.mean(arr_2d, axis=0)) as opposed to
mimicking the default Numpy behavior (e.g., np.mean(arr_2d)).
Returns
-------
aggregated : DataFrame
"""
# special case to prevent duplicate plots when catching exceptions when
# forwarding methods from NDFrames
_plotting_methods = frozenset(['plot', 'boxplot', 'hist'])
_common_apply_whitelist = frozenset([
'last', 'first',
'head', 'tail', 'median',
'mean', 'sum', 'min', 'max',
'cumsum', 'cumprod', 'cummin', 'cummax', 'cumcount',
'resample',
'describe',
'rank', 'quantile', 'count',
'fillna',
'mad',
'any', 'all',
'irow', 'take',
'idxmax', 'idxmin',
'shift', 'tshift',
'ffill', 'bfill',
'pct_change', 'skew',
'corr', 'cov', 'diff',
]) | _plotting_methods
_series_apply_whitelist = \
(_common_apply_whitelist - set(['boxplot'])) | \
frozenset(['dtype', 'value_counts', 'unique', 'nunique',
'nlargest', 'nsmallest'])
_dataframe_apply_whitelist = \
_common_apply_whitelist | frozenset(['dtypes', 'corrwith'])
class GroupByError(Exception):
pass
class DataError(GroupByError):
pass
class SpecificationError(GroupByError):
pass
def _groupby_function(name, alias, npfunc, numeric_only=True,
_convert=False):
def f(self):
self._set_selection_from_grouper()
try:
return self._cython_agg_general(alias, numeric_only=numeric_only)
except AssertionError as e:
raise SpecificationError(str(e))
except Exception:
result = self.aggregate(lambda x: npfunc(x, axis=self.axis))
if _convert:
result = result.convert_objects()
return result
f.__doc__ = "Compute %s of group values" % name
f.__name__ = name
return f
def _first_compat(x, axis=0):
def _first(x):
x = np.asarray(x)
x = x[notnull(x)]
if len(x) == 0:
return np.nan
return x[0]
if isinstance(x, DataFrame):
return x.apply(_first, axis=axis)
else:
return _first(x)
def _last_compat(x, axis=0):
def _last(x):
x = np.asarray(x)
x = x[notnull(x)]
if len(x) == 0:
return np.nan
return x[-1]
if isinstance(x, DataFrame):
return x.apply(_last, axis=axis)
else:
return _last(x)
def _count_compat(x, axis=0):
try:
return x.size
except:
return x.count()
class Grouper(object):
"""
A Grouper allows the user to specify a groupby instruction for a target object
This specification will select a column via the key parameter, or if the level and/or
axis parameters are given, a level of the index of the target object.
These are local specifications and will override 'global' settings, that is the parameters
axis and level which are passed to the groupby itself.
Parameters
----------
key : string, defaults to None
groupby key, which selects the grouping column of the target
level : name/number, defaults to None
the level for the target index
freq : string / freqency object, defaults to None
This will groupby the specified frequency if the target selection (via key or level) is
a datetime-like object
axis : number/name of the axis, defaults to None
sort : boolean, default to False
whether to sort the resulting labels
additional kwargs to control time-like groupers (when freq is passed)
closed : closed end of interval; left or right
label : interval boundary to use for labeling; left or right
convention : {'start', 'end', 'e', 's'}
If grouper is PeriodIndex
Returns
-------
A specification for a groupby instruction
Examples
--------
>>> df.groupby(Grouper(key='A')) : syntatic sugar for df.groupby('A')
>>> df.groupby(Grouper(key='date',freq='60s')) : specify a resample on the column 'date'
>>> df.groupby(Grouper(level='date',freq='60s',axis=1)) :
specify a resample on the level 'date' on the columns axis with a frequency of 60s
"""
def __new__(cls, *args, **kwargs):
if kwargs.get('freq') is not None:
from pandas.tseries.resample import TimeGrouper
cls = TimeGrouper
return super(Grouper, cls).__new__(cls)
def __init__(self, key=None, level=None, freq=None, axis=None, sort=False):
self.key=key
self.level=level
self.freq=freq
self.axis=axis
self.sort=sort
self.grouper=None
self.obj=None
self.indexer=None
self.binner=None
self.grouper=None
@property
def ax(self):
return self.grouper
def _get_grouper(self, obj):
"""
Parameters
----------
obj : the subject object
Returns
-------
a tuple of binner, grouper, obj (possibly sorted)
"""
self._set_grouper(obj)
return self.binner, self.grouper, self.obj
def _set_grouper(self, obj, sort=False):
"""
given an object and the specifcations, setup the internal grouper for this particular specification
Parameters
----------
obj : the subject object
"""
if self.key is not None and self.level is not None:
raise ValueError("The Grouper cannot specify both a key and a level!")
# the key must be a valid info item
if self.key is not None:
key = self.key
if key not in obj._info_axis:
raise KeyError("The grouper name {0} is not found".format(key))
ax = Index(obj[key],name=key)
else:
ax = obj._get_axis(self.axis)
if self.level is not None:
level = self.level
# if a level is given it must be a mi level or
# equivalent to the axis name
if isinstance(ax, MultiIndex):
if isinstance(level, compat.string_types):
if obj.index.name != level:
raise ValueError('level name %s is not the name of the '
'index' % level)
elif level > 0:
raise ValueError('level > 0 only valid with MultiIndex')
ax = Index(ax.get_level_values(level), name=level)
else:
if not (level == 0 or level == ax.name):
raise ValueError("The grouper level {0} is not valid".format(level))
# possibly sort
if (self.sort or sort) and not ax.is_monotonic:
indexer = self.indexer = ax.argsort(kind='quicksort')
ax = ax.take(indexer)
obj = obj.take(indexer, axis=self.axis, convert=False, is_copy=False)
self.obj = obj
self.grouper = ax
return self.grouper
def _get_binner_for_grouping(self, obj):
raise NotImplementedError
@property
def groups(self):
return self.grouper.groups
class GroupBy(PandasObject):
"""
Class for grouping and aggregating relational data. See aggregate,
transform, and apply functions on this object.
It's easiest to use obj.groupby(...) to use GroupBy, but you can also do:
::
grouped = groupby(obj, ...)
Parameters
----------
obj : pandas object
axis : int, default 0
level : int, default None
Level of MultiIndex
groupings : list of Grouping objects
Most users should ignore this
exclusions : array-like, optional
List of columns to exclude
name : string
Most users should ignore this
Notes
-----
After grouping, see aggregate, apply, and transform functions. Here are
some other brief notes about usage. When grouping by multiple groups, the
result index will be a MultiIndex (hierarchical) by default.
Iteration produces (key, group) tuples, i.e. chunking the data by group. So
you can write code like:
::
grouped = obj.groupby(keys, axis=axis)
for key, group in grouped:
# do something with the data
Function calls on GroupBy, if not specially implemented, "dispatch" to the
grouped data. So if you group a DataFrame and wish to invoke the std()
method on each group, you can simply do:
::
df.groupby(mapper).std()
rather than
::
df.groupby(mapper).aggregate(np.std)
You can pass arguments to these "wrapped" functions, too.
See the online documentation for full exposition on these topics and much
more
Returns
-------
**Attributes**
groups : dict
{group name -> group labels}
len(grouped) : int
Number of groups
"""
_apply_whitelist = _common_apply_whitelist
_internal_names = ['_cache']
_internal_names_set = set(_internal_names)
_group_selection = None
def __init__(self, obj, keys=None, axis=0, level=None,
grouper=None, exclusions=None, selection=None, as_index=True,
sort=True, group_keys=True, squeeze=False):
self._selection = selection
if isinstance(obj, NDFrame):
obj._consolidate_inplace()
self.level = level
if not as_index:
if not isinstance(obj, DataFrame):
raise TypeError('as_index=False only valid with DataFrame')
if axis != 0:
raise ValueError('as_index=False only valid for axis=0')
self.as_index = as_index
self.keys = keys
self.sort = sort
self.group_keys = group_keys
self.squeeze = squeeze
if grouper is None:
grouper, exclusions, obj = _get_grouper(obj, keys, axis=axis,
level=level, sort=sort)
self.obj = obj
self.axis = obj._get_axis_number(axis)
self.grouper = grouper
self.exclusions = set(exclusions) if exclusions else set()
def __len__(self):
return len(self.indices)
def __unicode__(self):
# TODO: Better unicode/repr for GroupBy object
return object.__repr__(self)
@property
def groups(self):
""" dict {group name -> group labels} """
return self.grouper.groups
@property
def ngroups(self):
return self.grouper.ngroups
@property
def indices(self):
""" dict {group name -> group indices} """
return self.grouper.indices
def _get_index(self, name):
""" safe get index, translate keys for datelike to underlying repr """
def convert(key, s):
# possibly convert to they actual key types
# in the indices, could be a Timestamp or a np.datetime64
if isinstance(s, (Timestamp,datetime.datetime)):
return Timestamp(key)
elif isinstance(s, np.datetime64):
return Timestamp(key).asm8
return key
sample = next(iter(self.indices))
if isinstance(sample, tuple):
if not isinstance(name, tuple):
raise ValueError("must supply a tuple to get_group with multiple grouping keys")
if not len(name) == len(sample):
raise ValueError("must supply a a same-length tuple to get_group with multiple grouping keys")
name = tuple([ convert(n, k) for n, k in zip(name,sample) ])
else:
name = convert(name, sample)
return self.indices[name]
@property
def name(self):
if self._selection is None:
return None # 'result'
else:
return self._selection
@property
def _selection_list(self):
if not isinstance(self._selection, (list, tuple, Series, Index, np.ndarray)):
return [self._selection]
return self._selection
@cache_readonly
def _selected_obj(self):
if self._selection is None or isinstance(self.obj, Series):
if self._group_selection is not None:
return self.obj[self._group_selection]
return self.obj
else:
return self.obj[self._selection]
def _set_selection_from_grouper(self):
""" we may need create a selection if we have non-level groupers """
grp = self.grouper
if self.as_index and getattr(grp,'groupings',None) is not None and self.obj.ndim > 1:
ax = self.obj._info_axis
groupers = [ g.name for g in grp.groupings if g.level is None and g.name is not None and g.name in ax ]
if len(groupers):
self._group_selection = (ax-Index(groupers)).tolist()
def _local_dir(self):
return sorted(set(self.obj._local_dir() + list(self._apply_whitelist)))
def __getattr__(self, attr):
if attr in self._internal_names_set:
return object.__getattribute__(self, attr)
if attr in self.obj:
return self[attr]
if hasattr(self.obj, attr):
return self._make_wrapper(attr)
raise AttributeError("%r object has no attribute %r" %
(type(self).__name__, attr))
def __getitem__(self, key):
raise NotImplementedError('Not implemented: %s' % key)
def _make_wrapper(self, name):
if name not in self._apply_whitelist:
is_callable = callable(getattr(self._selected_obj, name, None))
kind = ' callable ' if is_callable else ' '
msg = ("Cannot access{0}attribute {1!r} of {2!r} objects, try "
"using the 'apply' method".format(kind, name,
type(self).__name__))
raise AttributeError(msg)
# need to setup the selection
# as are not passed directly but in the grouper
self._set_selection_from_grouper()
f = getattr(self._selected_obj, name)
if not isinstance(f, types.MethodType):
return self.apply(lambda self: getattr(self, name))
f = getattr(type(self._selected_obj), name)
def wrapper(*args, **kwargs):
# a little trickery for aggregation functions that need an axis
# argument
kwargs_with_axis = kwargs.copy()
if 'axis' not in kwargs_with_axis:
kwargs_with_axis['axis'] = self.axis
def curried_with_axis(x):
return f(x, *args, **kwargs_with_axis)
def curried(x):
return f(x, *args, **kwargs)
# preserve the name so we can detect it when calling plot methods,
# to avoid duplicates
curried.__name__ = curried_with_axis.__name__ = name
# special case otherwise extra plots are created when catching the
# exception below
if name in _plotting_methods:
return self.apply(curried)
try:
return self.apply(curried_with_axis)
except Exception:
try:
return self.apply(curried)
except Exception:
# related to : GH3688
# try item-by-item
# this can be called recursively, so need to raise ValueError if
# we don't have this method to indicated to aggregate to
# mark this column as an error
try:
return self._aggregate_item_by_item(name, *args, **kwargs)
except (AttributeError):
raise ValueError
return wrapper
def get_group(self, name, obj=None):
"""
Constructs NDFrame from group with provided name
Parameters
----------
name : object
the name of the group to get as a DataFrame
obj : NDFrame, default None
the NDFrame to take the DataFrame out of. If
it is None, the object groupby was called on will
be used
Returns
-------
group : type of obj
"""
if obj is None:
obj = self._selected_obj
inds = self._get_index(name)
return obj.take(inds, axis=self.axis, convert=False)
def __iter__(self):
"""
Groupby iterator
Returns
-------
Generator yielding sequence of (name, subsetted object)
for each group
"""
return self.grouper.get_iterator(self.obj, axis=self.axis)
def apply(self, func, *args, **kwargs):
"""
Apply function and combine results together in an intelligent way. The
split-apply-combine combination rules attempt to be as common sense
based as possible. For example:
case 1:
group DataFrame
apply aggregation function (f(chunk) -> Series)
yield DataFrame, with group axis having group labels
case 2:
group DataFrame
apply transform function ((f(chunk) -> DataFrame with same indexes)
yield DataFrame with resulting chunks glued together
case 3:
group Series
apply function with f(chunk) -> DataFrame
yield DataFrame with result of chunks glued together
Parameters
----------
func : function
Notes
-----
See online documentation for full exposition on how to use apply.
In the current implementation apply calls func twice on the
first group to decide whether it can take a fast or slow code
path. This can lead to unexpected behavior if func has
side-effects, as they will take effect twice for the first
group.
See also
--------
aggregate, transform
Returns
-------
applied : type depending on grouped object and function
"""
func = _intercept_function(func)
@wraps(func)
def f(g):
return func(g, *args, **kwargs)
# ignore SettingWithCopy here in case the user mutates
with option_context('mode.chained_assignment',None):
return self._python_apply_general(f)
def _python_apply_general(self, f):
keys, values, mutated = self.grouper.apply(f, self._selected_obj,
self.axis)
return self._wrap_applied_output(keys, values,
not_indexed_same=mutated)
def aggregate(self, func, *args, **kwargs):
raise NotImplementedError
@Appender(_agg_doc)
def agg(self, func, *args, **kwargs):
return self.aggregate(func, *args, **kwargs)
def _iterate_slices(self):
yield self.name, self._selected_obj
def transform(self, func, *args, **kwargs):
raise NotImplementedError
def mean(self):
"""
Compute mean of groups, excluding missing values
For multiple groupings, the result index will be a MultiIndex
"""
try:
return self._cython_agg_general('mean')
except GroupByError:
raise
except Exception: # pragma: no cover
self._set_selection_from_grouper()
f = lambda x: x.mean(axis=self.axis)
return self._python_agg_general(f)
def median(self):
"""
Compute median of groups, excluding missing values
For multiple groupings, the result index will be a MultiIndex
"""
try:
return self._cython_agg_general('median')
except GroupByError:
raise
except Exception: # pragma: no cover
self._set_selection_from_grouper()
def f(x):
if isinstance(x, np.ndarray):
x = Series(x)
return x.median(axis=self.axis)
return self._python_agg_general(f)
def std(self, ddof=1):
"""
Compute standard deviation of groups, excluding missing values
For multiple groupings, the result index will be a MultiIndex
"""
# todo, implement at cython level?
return np.sqrt(self.var(ddof=ddof))
def var(self, ddof=1):
"""
Compute variance of groups, excluding missing values
For multiple groupings, the result index will be a MultiIndex
"""
if ddof == 1:
return self._cython_agg_general('var')
else:
self._set_selection_from_grouper()
f = lambda x: x.var(ddof=ddof)
return self._python_agg_general(f)
def sem(self, ddof=1):
"""
Compute standard error of the mean of groups, excluding missing values
For multiple groupings, the result index will be a MultiIndex
"""
return self.std(ddof=ddof)/np.sqrt(self.count())
def size(self):
"""
Compute group sizes
"""
return self.grouper.size()
sum = _groupby_function('sum', 'add', np.sum)
prod = _groupby_function('prod', 'prod', np.prod)
min = _groupby_function('min', 'min', np.min, numeric_only=False)
max = _groupby_function('max', 'max', np.max, numeric_only=False)
first = _groupby_function('first', 'first', _first_compat,
numeric_only=False, _convert=True)
last = _groupby_function('last', 'last', _last_compat, numeric_only=False,
_convert=True)
_count = _groupby_function('_count', 'count', _count_compat,
numeric_only=False)
def count(self, axis=0):
return self._count().astype('int64')
def ohlc(self):
"""
Compute sum of values, excluding missing values
For multiple groupings, the result index will be a MultiIndex
"""
return self._apply_to_column_groupbys(
lambda x: x._cython_agg_general('ohlc'))
def nth(self, n, dropna=None):
"""
Take the nth row from each group.
If dropna, will not show nth non-null row, dropna is either
Truthy (if a Series) or 'all', 'any' (if a DataFrame); this is equivalent
to calling dropna(how=dropna) before the groupby.
Examples
--------
>>> df = DataFrame([[1, np.nan], [1, 4], [5, 6]], columns=['A', 'B'])
>>> g = df.groupby('A')
>>> g.nth(0)
A B
0 1 NaN
2 5 6
>>> g.nth(1)
A B
1 1 4
>>> g.nth(-1)
A B
1 1 4
2 5 6
>>> g.nth(0, dropna='any')
B
A
1 4
5 6
>>> g.nth(1, dropna='any') # NaNs denote group exhausted when using dropna
B
A
1 NaN
5 NaN
"""
self._set_selection_from_grouper()
if not dropna: # good choice
m = self.grouper._max_groupsize
if n >= m or n < -m:
return self._selected_obj.loc[[]]
rng = np.zeros(m, dtype=bool)
if n >= 0:
rng[n] = True
is_nth = self._cumcount_array(rng)
else:
rng[- n - 1] = True
is_nth = self._cumcount_array(rng, ascending=False)
result = self._selected_obj[is_nth]
# the result index
if self.as_index:
ax = self.obj._info_axis
names = self.grouper.names
if self.obj.ndim == 1:
# this is a pass-thru
pass
elif all([ n in ax for n in names ]):
result.index = Index(self.obj[names][is_nth].values.ravel()).set_names(names)
elif self._group_selection is not None:
result.index = self.obj._get_axis(self.axis)[is_nth]
result = result.sort_index()
return result
if (isinstance(self._selected_obj, DataFrame)
and dropna not in ['any', 'all']):
# Note: when agg-ing picker doesn't raise this, just returns NaN
raise ValueError("For a DataFrame groupby, dropna must be "
"either None, 'any' or 'all', "
"(was passed %s)." % (dropna),)
# old behaviour, but with all and any support for DataFrames.
# modified in GH 7559 to have better perf
max_len = n if n >= 0 else - 1 - n
dropped = self.obj.dropna(how=dropna, axis=self.axis)
# get a new grouper for our dropped obj
if self.keys is None and self.level is None:
# we don't have the grouper info available (e.g. we have selected out
# a column that is not in the current object)
axis = self.grouper.axis
grouper = axis[axis.isin(dropped.index)]
keys = self.grouper.names
else:
# create a grouper with the original parameters, but on the dropped object
grouper, _, _ = _get_grouper(dropped, key=self.keys, axis=self.axis,
level=self.level, sort=self.sort)
sizes = dropped.groupby(grouper).size()
result = dropped.groupby(grouper).nth(n)
mask = (sizes<max_len).values
# set the results which don't meet the criteria
if len(result) and mask.any():
result.loc[mask] = np.nan
# reset/reindex to the original groups
if len(self.obj) == len(dropped) or len(result) == len(self.grouper.result_index):
result.index = self.grouper.result_index
else:
result = result.reindex(self.grouper.result_index)
return result
def cumcount(self, **kwargs):
"""
Number each item in each group from 0 to the length of that group - 1.
Essentially this is equivalent to
>>> self.apply(lambda x: Series(np.arange(len(x)), x.index))
Parameters
----------
ascending : bool, default True
If False, number in reverse, from length of group - 1 to 0.
Example
-------
>>> df = pd.DataFrame([['a'], ['a'], ['a'], ['b'], ['b'], ['a']],
... columns=['A'])
>>> df
A
0 a
1 a
2 a
3 b
4 b
5 a
>>> df.groupby('A').cumcount()
0 0
1 1
2 2
3 0
4 1
5 3
dtype: int64
>>> df.groupby('A').cumcount(ascending=False)
0 3
1 2
2 1
3 1
4 0
5 0
dtype: int64
"""
self._set_selection_from_grouper()
ascending = kwargs.pop('ascending', True)
index = self._selected_obj.index
cumcounts = self._cumcount_array(ascending=ascending)
return Series(cumcounts, index)
def head(self, n=5):
"""
Returns first n rows of each group.
Essentially equivalent to ``.apply(lambda x: x.head(n))``,
except ignores as_index flag.
Example
-------
>>> df = DataFrame([[1, 2], [1, 4], [5, 6]],
columns=['A', 'B'])
>>> df.groupby('A', as_index=False).head(1)
A B
0 1 2
2 5 6
>>> df.groupby('A').head(1)
A B
0 1 2
2 5 6
"""
obj = self._selected_obj
in_head = self._cumcount_array() < n
head = obj[in_head]
return head
def tail(self, n=5):
"""
Returns last n rows of each group
Essentially equivalent to ``.apply(lambda x: x.tail(n))``,
except ignores as_index flag.
Example
-------
>>> df = DataFrame([[1, 2], [1, 4], [5, 6]],
columns=['A', 'B'])
>>> df.groupby('A', as_index=False).tail(1)
A B
0 1 2
2 5 6
>>> df.groupby('A').head(1)
A B
0 1 2
2 5 6
"""
obj = self._selected_obj
rng = np.arange(0, -self.grouper._max_groupsize, -1, dtype='int64')
in_tail = self._cumcount_array(rng, ascending=False) > -n
tail = obj[in_tail]
return tail
def _cumcount_array(self, arr=None, **kwargs):
"""
arr is where cumcount gets its values from
note: this is currently implementing sort=False (though the default is sort=True)
for groupby in general
"""
ascending = kwargs.pop('ascending', True)
if arr is None:
arr = np.arange(self.grouper._max_groupsize, dtype='int64')
len_index = len(self._selected_obj.index)
cumcounts = np.zeros(len_index, dtype=arr.dtype)
if not len_index:
return cumcounts
indices, values = [], []
for v in self.indices.values():
indices.append(v)
if ascending:
values.append(arr[:len(v)])
else:
values.append(arr[len(v)-1::-1])
indices = np.concatenate(indices)
values = np.concatenate(values)
cumcounts[indices] = values
return cumcounts
def _index_with_as_index(self, b):
"""
Take boolean mask of index to be returned from apply, if as_index=True
"""
# TODO perf, it feels like this should already be somewhere...
from itertools import chain
original = self._selected_obj.index
gp = self.grouper
levels = chain((gp.levels[i][gp.labels[i][b]]
for i in range(len(gp.groupings))),
(original.get_level_values(i)[b]
for i in range(original.nlevels)))
new = MultiIndex.from_arrays(list(levels))
new.names = gp.names + original.names
return new
def _try_cast(self, result, obj):
"""
try to cast the result to our obj original type,
we may have roundtripped thru object in the mean-time
"""
if obj.ndim > 1:
dtype = obj.values.dtype
else:
dtype = obj.dtype
if not np.isscalar(result):
result = _possibly_downcast_to_dtype(result, dtype)
return result
def _cython_agg_general(self, how, numeric_only=True):
output = {}
for name, obj in self._iterate_slices():
is_numeric = is_numeric_dtype(obj.dtype)
if numeric_only and not is_numeric:
continue
try:
result, names = self.grouper.aggregate(obj.values, how)
except AssertionError as e:
raise GroupByError(str(e))
output[name] = self._try_cast(result, obj)
if len(output) == 0:
raise DataError('No numeric types to aggregate')
return self._wrap_aggregated_output(output, names)
def _python_agg_general(self, func, *args, **kwargs):
func = _intercept_function(func)
f = lambda x: func(x, *args, **kwargs)
# iterate through "columns" ex exclusions to populate output dict
output = {}
for name, obj in self._iterate_slices():
try:
result, counts = self.grouper.agg_series(obj, f)
output[name] = self._try_cast(result, obj)
except TypeError:
continue
if len(output) == 0:
return self._python_apply_general(f)
if self.grouper._filter_empty_groups:
mask = counts.ravel() > 0
for name, result in compat.iteritems(output):
# since we are masking, make sure that we have a float object
values = result
if is_numeric_dtype(values.dtype):
values = com.ensure_float(values)
output[name] = self._try_cast(values[mask], result)
return self._wrap_aggregated_output(output)
def _wrap_applied_output(self, *args, **kwargs):
raise NotImplementedError
def _concat_objects(self, keys, values, not_indexed_same=False):
from pandas.tools.merge import concat
if not not_indexed_same:
result = concat(values, axis=self.axis)
ax = self._selected_obj._get_axis(self.axis)
if isinstance(result, Series):
result = result.reindex(ax)
else:
result = result.reindex_axis(ax, axis=self.axis)
elif self.group_keys:
if self.as_index:
# possible MI return case
group_keys = keys
group_levels = self.grouper.levels
group_names = self.grouper.names
result = concat(values, axis=self.axis, keys=group_keys,
levels=group_levels, names=group_names)
else:
# GH5610, returns a MI, with the first level being a
# range index
keys = list(range(len(values)))
result = concat(values, axis=self.axis, keys=keys)
else:
result = concat(values, axis=self.axis)
return result
def _apply_filter(self, indices, dropna):
if len(indices) == 0:
indices = []
else:
indices = np.sort(np.concatenate(indices))
if dropna:
filtered = self._selected_obj.take(indices)
else:
mask = np.empty(len(self._selected_obj.index), dtype=bool)
mask.fill(False)
mask[indices.astype(int)] = True
# mask fails to broadcast when passed to where; broadcast manually.
mask = np.tile(mask, list(self._selected_obj.shape[1:]) + [1]).T
filtered = self._selected_obj.where(mask) # Fill with NaNs.
return filtered
@Appender(GroupBy.__doc__)
def groupby(obj, by, **kwds):
if isinstance(obj, Series):
klass = SeriesGroupBy
elif isinstance(obj, DataFrame):
klass = DataFrameGroupBy
else: # pragma: no cover
raise TypeError('invalid type: %s' % type(obj))
return klass(obj, by, **kwds)
def _get_axes(group):
if isinstance(group, Series):
return [group.index]
else:
return group.axes
def _is_indexed_like(obj, axes):
if isinstance(obj, Series):
if len(axes) > 1:
return False
return obj.index.equals(axes[0])
elif isinstance(obj, DataFrame):
return obj.index.equals(axes[0])
return False
class BaseGrouper(object):
"""
This is an internal Grouper class, which actually holds the generated groups
"""
def __init__(self, axis, groupings, sort=True, group_keys=True):
self.axis = axis
self.groupings = groupings
self.sort = sort
self.group_keys = group_keys
self.compressed = True
@property
def shape(self):
return tuple(ping.ngroups for ping in self.groupings)
def __iter__(self):
return iter(self.indices)
@property
def nkeys(self):
return len(self.groupings)
def get_iterator(self, data, axis=0):
"""
Groupby iterator
Returns
-------
Generator yielding sequence of (name, subsetted object)
for each group
"""
splitter = self._get_splitter(data, axis=axis)
keys = self._get_group_keys()
for key, (i, group) in zip(keys, splitter):
yield key, group
def _get_splitter(self, data, axis=0):
comp_ids, _, ngroups = self.group_info
return get_splitter(data, comp_ids, ngroups, axis=axis)
def _get_group_keys(self):
if len(self.groupings) == 1:
return self.levels[0]
else:
comp_ids, _, ngroups = self.group_info
# provide "flattened" iterator for multi-group setting
mapper = _KeyMapper(comp_ids, ngroups, self.labels, self.levels)
return [mapper.get_key(i) for i in range(ngroups)]
def apply(self, f, data, axis=0):
mutated = False
splitter = self._get_splitter(data, axis=axis)
group_keys = self._get_group_keys()
# oh boy
f_name = com._get_callable_name(f)
if (f_name not in _plotting_methods and
hasattr(splitter, 'fast_apply') and axis == 0):
try:
values, mutated = splitter.fast_apply(f, group_keys)
return group_keys, values, mutated
except (lib.InvalidApply):
# we detect a mutation of some kind
# so take slow path
pass
except (Exception) as e:
# raise this error to the caller
pass
result_values = []
for key, (i, group) in zip(group_keys, splitter):
object.__setattr__(group, 'name', key)
# group might be modified
group_axes = _get_axes(group)
res = f(group)
if not _is_indexed_like(res, group_axes):
mutated = True
result_values.append(res)
return group_keys, result_values, mutated
@cache_readonly
def indices(self):
""" dict {group name -> group indices} """
if len(self.groupings) == 1:
return self.groupings[0].indices
else:
label_list = [ping.labels for ping in self.groupings]
keys = [_values_from_object(ping.group_index) for ping in self.groupings]
return _get_indices_dict(label_list, keys)
@property
def labels(self):
return [ping.labels for ping in self.groupings]
@property
def levels(self):
return [ping.group_index for ping in self.groupings]
@property
def names(self):
return [ping.name for ping in self.groupings]
def size(self):
"""
Compute group sizes
"""
# TODO: better impl
labels, _, ngroups = self.group_info
bin_counts = algos.value_counts(labels, sort=False)
bin_counts = bin_counts.reindex(np.arange(ngroups))
bin_counts.index = self.result_index
return bin_counts
@cache_readonly
def _max_groupsize(self):
'''
Compute size of largest group
'''
# For many items in each group this is much faster than
# self.size().max(), in worst case marginally slower
if self.indices:
return max(len(v) for v in self.indices.values())
else:
return 0
@cache_readonly
def groups(self):
""" dict {group name -> group labels} """
if len(self.groupings) == 1:
return self.groupings[0].groups
else:
to_groupby = lzip(*(ping.grouper for ping in self.groupings))
to_groupby = Index(to_groupby)
return self.axis.groupby(to_groupby.values)
@cache_readonly
def group_info(self):
comp_ids, obs_group_ids = self._get_compressed_labels()
ngroups = len(obs_group_ids)
comp_ids = com._ensure_int64(comp_ids)
return comp_ids, obs_group_ids, ngroups
def _get_compressed_labels(self):
all_labels = [ping.labels for ping in self.groupings]
if self._overflow_possible:
tups = lib.fast_zip(all_labels)
labs, uniques = algos.factorize(tups)
if self.sort:
uniques, labs = _reorder_by_uniques(uniques, labs)
return labs, uniques
else:
if len(all_labels) > 1:
group_index = get_group_index(all_labels, self.shape)
comp_ids, obs_group_ids = _compress_group_index(group_index)
else:
ping = self.groupings[0]
comp_ids = ping.labels
obs_group_ids = np.arange(len(ping.group_index))
self.compressed = False
self._filter_empty_groups = False
return comp_ids, obs_group_ids
@cache_readonly
def _overflow_possible(self):
return _int64_overflow_possible(self.shape)
@cache_readonly
def ngroups(self):
return len(self.result_index)
@cache_readonly
def result_index(self):
recons = self.get_group_levels()
return MultiIndex.from_arrays(recons, names=self.names)
def get_group_levels(self):
obs_ids = self.group_info[1]
if not self.compressed and len(self.groupings) == 1:
return [self.groupings[0].group_index]
if self._overflow_possible:
recons_labels = [np.array(x) for x in zip(*obs_ids)]
else:
recons_labels = decons_group_index(obs_ids, self.shape)
name_list = []
for ping, labels in zip(self.groupings, recons_labels):
labels = com._ensure_platform_int(labels)
levels = ping.group_index.take(labels)
name_list.append(levels)
return name_list
#------------------------------------------------------------
# Aggregation functions
_cython_functions = {
'add': 'group_add',
'prod': 'group_prod',
'min': 'group_min',
'max': 'group_max',
'mean': 'group_mean',
'median': {
'name': 'group_median'
},
'var': 'group_var',
'first': {
'name': 'group_nth',
'f': lambda func, a, b, c, d: func(a, b, c, d, 1)
},
'last': 'group_last',
'count': 'group_count',
}
_cython_arity = {
'ohlc': 4, # OHLC
}
_name_functions = {}
_filter_empty_groups = True
def _get_aggregate_function(self, how, values):
dtype_str = values.dtype.name
def get_func(fname):
# find the function, or use the object function, or return a
# generic
for dt in [dtype_str, 'object']:
f = getattr(_algos, "%s_%s" % (fname, dtype_str), None)
if f is not None:
return f
return getattr(_algos, fname, None)
ftype = self._cython_functions[how]
if isinstance(ftype, dict):
func = afunc = get_func(ftype['name'])
# a sub-function
f = ftype.get('f')
if f is not None:
def wrapper(*args, **kwargs):
return f(afunc, *args, **kwargs)
# need to curry our sub-function
func = wrapper
else:
func = get_func(ftype)
if func is None:
raise NotImplementedError("function is not implemented for this"
"dtype: [how->%s,dtype->%s]" %
(how, dtype_str))
return func, dtype_str
def aggregate(self, values, how, axis=0):
arity = self._cython_arity.get(how, 1)
vdim = values.ndim
swapped = False
if vdim == 1:
values = values[:, None]
out_shape = (self.ngroups, arity)
else:
if axis > 0:
swapped = True
values = values.swapaxes(0, axis)
if arity > 1:
raise NotImplementedError
out_shape = (self.ngroups,) + values.shape[1:]
if is_numeric_dtype(values.dtype):
values = com.ensure_float(values)
is_numeric = True
out_dtype = 'f%d' % values.dtype.itemsize
else:
is_numeric = issubclass(values.dtype.type, (np.datetime64,
np.timedelta64))
if is_numeric:
out_dtype = 'float64'
values = values.view('int64')
else:
out_dtype = 'object'
values = values.astype(object)
# will be filled in Cython function
result = np.empty(out_shape, dtype=out_dtype)
result.fill(np.nan)
counts = np.zeros(self.ngroups, dtype=np.int64)
result = self._aggregate(result, counts, values, how, is_numeric)
if self._filter_empty_groups:
if result.ndim == 2:
try:
result = lib.row_bool_subset(
result, (counts > 0).view(np.uint8))
except ValueError:
result = lib.row_bool_subset_object(
result, (counts > 0).view(np.uint8))
else:
result = result[counts > 0]
if vdim == 1 and arity == 1:
result = result[:, 0]
if how in self._name_functions:
# TODO
names = self._name_functions[how]()
else:
names = None
if swapped:
result = result.swapaxes(0, axis)
return result, names
def _aggregate(self, result, counts, values, how, is_numeric):
agg_func, dtype = self._get_aggregate_function(how, values)
comp_ids, _, ngroups = self.group_info
if values.ndim > 3:
# punting for now
raise NotImplementedError
elif values.ndim > 2:
for i, chunk in enumerate(values.transpose(2, 0, 1)):
chunk = chunk.squeeze()
agg_func(result[:, :, i], counts, chunk, comp_ids)
else:
agg_func(result, counts, values, comp_ids)
return result
def agg_series(self, obj, func):
try:
return self._aggregate_series_fast(obj, func)
except Exception:
return self._aggregate_series_pure_python(obj, func)
def _aggregate_series_fast(self, obj, func):
func = _intercept_function(func)
if obj.index._has_complex_internals:
raise TypeError('Incompatible index for Cython grouper')
group_index, _, ngroups = self.group_info
# avoids object / Series creation overhead
dummy = obj._get_values(slice(None, 0)).to_dense()
indexer = _algos.groupsort_indexer(group_index, ngroups)[0]
obj = obj.take(indexer, convert=False)
group_index = com.take_nd(group_index, indexer, allow_fill=False)
grouper = lib.SeriesGrouper(obj, func, group_index, ngroups,
dummy)
result, counts = grouper.get_result()
return result, counts
def _aggregate_series_pure_python(self, obj, func):
group_index, _, ngroups = self.group_info
counts = np.zeros(ngroups, dtype=int)
result = None
splitter = get_splitter(obj, group_index, ngroups, axis=self.axis)
for label, group in splitter:
res = func(group)
if result is None:
if (isinstance(res, (Series, Index, np.ndarray)) or
isinstance(res, list)):
raise ValueError('Function does not reduce')
result = np.empty(ngroups, dtype='O')
counts[label] = group.shape[0]
result[label] = res
result = lib.maybe_convert_objects(result, try_float=0)
return result, counts
def generate_bins_generic(values, binner, closed):
"""
Generate bin edge offsets and bin labels for one array using another array
which has bin edge values. Both arrays must be sorted.
Parameters
----------
values : array of values
binner : a comparable array of values representing bins into which to bin
the first array. Note, 'values' end-points must fall within 'binner'
end-points.
closed : which end of bin is closed; left (default), right
Returns
-------
bins : array of offsets (into 'values' argument) of bins.
Zero and last edge are excluded in result, so for instance the first
bin is values[0:bin[0]] and the last is values[bin[-1]:]
"""
lenidx = len(values)
lenbin = len(binner)
if lenidx <= 0 or lenbin <= 0:
raise ValueError("Invalid length for values or for binner")
# check binner fits data
if values[0] < binner[0]:
raise ValueError("Values falls before first bin")
if values[lenidx - 1] > binner[lenbin - 1]:
raise ValueError("Values falls after last bin")
bins = np.empty(lenbin - 1, dtype=np.int64)
j = 0 # index into values
bc = 0 # bin count
# linear scan, presume nothing about values/binner except that it fits ok
for i in range(0, lenbin - 1):
r_bin = binner[i + 1]
# count values in current bin, advance to next bin
while j < lenidx and (values[j] < r_bin or
(closed == 'right' and values[j] == r_bin)):
j += 1
bins[bc] = j
bc += 1
return bins
class BinGrouper(BaseGrouper):
def __init__(self, bins, binlabels, filter_empty=False):
self.bins = com._ensure_int64(bins)
self.binlabels = _ensure_index(binlabels)
self._filter_empty_groups = filter_empty
@cache_readonly
def groups(self):
""" dict {group name -> group labels} """
# this is mainly for compat
# GH 3881
result = {}
for key, value in zip(self.binlabels, self.bins):
if key is not tslib.NaT:
result[key] = value
return result
@property
def nkeys(self):
return 1
def get_iterator(self, data, axis=0):
"""
Groupby iterator
Returns
-------
Generator yielding sequence of (name, subsetted object)
for each group
"""
if isinstance(data, NDFrame):
slicer = lambda start,edge: data._slice(slice(start,edge),axis=axis)
length = len(data.axes[axis])
else:
slicer = lambda start,edge: data[slice(start,edge)]
length = len(data)
start = 0
for edge, label in zip(self.bins, self.binlabels):
if label is not tslib.NaT:
yield label, slicer(start,edge)
start = edge
if start < length:
yield self.binlabels[-1], slicer(start,None)
def apply(self, f, data, axis=0):
result_keys = []
result_values = []
mutated = False
for key, group in self.get_iterator(data, axis=axis):
object.__setattr__(group, 'name', key)
# group might be modified
group_axes = _get_axes(group)
res = f(group)
if not _is_indexed_like(res, group_axes):
mutated = True
result_keys.append(key)
result_values.append(res)
return result_keys, result_values, mutated
@cache_readonly
def indices(self):
indices = collections.defaultdict(list)
i = 0
for label, bin in zip(self.binlabels, self.bins):
if i < bin:
if label is not tslib.NaT:
indices[label] = list(range(i, bin))
i = bin
return indices
@cache_readonly
def ngroups(self):
return len(self.binlabels)
@cache_readonly
def result_index(self):
mask = self.binlabels.asi8 == tslib.iNaT
return self.binlabels[~mask]
@property
def levels(self):
return [self.binlabels]
@property
def names(self):
return [self.binlabels.name]
@property
def groupings(self):
# for compat
return None
def size(self):
"""
Compute group sizes
"""
base = Series(np.zeros(len(self.result_index), dtype=np.int64),
index=self.result_index)
indices = self.indices
for k, v in compat.iteritems(indices):
indices[k] = len(v)
bin_counts = Series(indices, dtype=np.int64)
result = base.add(bin_counts, fill_value=0)
# addition with fill_value changes dtype to float64
result = result.astype(np.int64)
return result
#----------------------------------------------------------------------
# cython aggregation
_cython_functions = {
'add': 'group_add_bin',
'prod': 'group_prod_bin',
'mean': 'group_mean_bin',
'min': 'group_min_bin',
'max': 'group_max_bin',
'var': 'group_var_bin',
'ohlc': 'group_ohlc',
'first': {
'name': 'group_nth_bin',
'f': lambda func, a, b, c, d: func(a, b, c, d, 1)
},
'last': 'group_last_bin',
'count': 'group_count_bin',
}
_name_functions = {
'ohlc': lambda *args: ['open', 'high', 'low', 'close']
}
_filter_empty_groups = True
def _aggregate(self, result, counts, values, how, is_numeric=True):
agg_func, dtype = self._get_aggregate_function(how, values)
if values.ndim > 3:
# punting for now
raise NotImplementedError
elif values.ndim > 2:
for i, chunk in enumerate(values.transpose(2, 0, 1)):
agg_func(result[:, :, i], counts, chunk, self.bins)
else:
agg_func(result, counts, values, self.bins)
return result
def agg_series(self, obj, func):
dummy = obj[:0]
grouper = lib.SeriesBinGrouper(obj, func, self.bins, dummy)
return grouper.get_result()
class Grouping(object):
"""
Holds the grouping information for a single key
Parameters
----------
index : Index
grouper :
obj :
name :
level :
Returns
-------
**Attributes**:
* indices : dict of {group -> index_list}
* labels : ndarray, group labels
* ids : mapping of label -> group
* counts : array of group counts
* group_index : unique groups
* groups : dict of {group -> label_list}
"""
def __init__(self, index, grouper=None, obj=None, name=None, level=None,
sort=True):
self.name = name
self.level = level
self.grouper = _convert_grouper(index, grouper)
self.index = index
self.sort = sort
self.obj = obj
# right place for this?
if isinstance(grouper, (Series, Index)) and name is None:
self.name = grouper.name
if isinstance(grouper, MultiIndex):
self.grouper = grouper.values
# pre-computed
self._was_factor = False
self._should_compress = True
# we have a single grouper which may be a myriad of things, some of which are
# dependent on the passing in level
#
if level is not None:
if not isinstance(level, int):
if level not in index.names:
raise AssertionError('Level %s not in index' % str(level))
level = index.names.index(level)
inds = index.labels[level]
level_index = index.levels[level]
if self.name is None:
self.name = index.names[level]
# XXX complete hack
if grouper is not None:
level_values = index.levels[level].take(inds)
self.grouper = level_values.map(self.grouper)
else:
self._was_factor = True
# all levels may not be observed
labels, uniques = algos.factorize(inds, sort=True)
if len(uniques) > 0 and uniques[0] == -1:
# handle NAs
mask = inds != -1
ok_labels, uniques = algos.factorize(inds[mask], sort=True)
labels = np.empty(len(inds), dtype=inds.dtype)
labels[mask] = ok_labels
labels[~mask] = -1
if len(uniques) < len(level_index):
level_index = level_index.take(uniques)
self._labels = labels
self._group_index = level_index
self.grouper = level_index.take(labels)
else:
if isinstance(self.grouper, (list, tuple)):
self.grouper = com._asarray_tuplesafe(self.grouper)
# a passed Categorical
elif isinstance(self.grouper, Categorical):
factor = self.grouper
self._was_factor = True
# Is there any way to avoid this?
self.grouper = np.asarray(factor)
self._labels = factor.codes
self._group_index = factor.levels
if self.name is None:
self.name = factor.name
# a passed Grouper like
elif isinstance(self.grouper, Grouper):
# get the new grouper
grouper = self.grouper._get_binner_for_grouping(self.obj)
self.obj = self.grouper.obj
self.grouper = grouper
if self.name is None:
self.name = grouper.name
# no level passed
if not isinstance(self.grouper, (Series, Index, np.ndarray)):
self.grouper = self.index.map(self.grouper)
if not (hasattr(self.grouper, "__len__") and
len(self.grouper) == len(self.index)):
errmsg = ('Grouper result violates len(labels) == '
'len(data)\nresult: %s' %
com.pprint_thing(self.grouper))
self.grouper = None # Try for sanity
raise AssertionError(errmsg)
# if we have a date/time-like grouper, make sure that we have Timestamps like
if getattr(self.grouper,'dtype',None) is not None:
if is_datetime64_dtype(self.grouper):
from pandas import to_datetime
self.grouper = to_datetime(self.grouper)
elif is_timedelta64_dtype(self.grouper):
from pandas import to_timedelta
self.grouper = to_timedelta(self.grouper)
def __repr__(self):
return 'Grouping(%s)' % self.name
def __iter__(self):
return iter(self.indices)
_labels = None
_group_index = None
@property
def ngroups(self):
return len(self.group_index)
@cache_readonly
def indices(self):
return _groupby_indices(self.grouper)
@property
def labels(self):
if self._labels is None:
self._make_labels()
return self._labels
@property
def group_index(self):
if self._group_index is None:
self._make_labels()
return self._group_index
def _make_labels(self):
if self._was_factor: # pragma: no cover
raise Exception('Should not call this method grouping by level')
else:
labels, uniques = algos.factorize(self.grouper, sort=self.sort)
uniques = Index(uniques, name=self.name)
self._labels = labels
self._group_index = uniques
_groups = None
@property
def groups(self):
if self._groups is None:
self._groups = self.index.groupby(self.grouper)
return self._groups
def _get_grouper(obj, key=None, axis=0, level=None, sort=True):
"""
create and return a BaseGrouper, which is an internal
mapping of how to create the grouper indexers.
This may be composed of multiple Grouping objects, indicating
multiple groupers
Groupers are ultimately index mappings. They can originate as:
index mappings, keys to columns, functions, or Groupers
Groupers enable local references to axis,level,sort, while
the passed in axis, level, and sort are 'global'.
This routine tries to figure of what the passing in references
are and then creates a Grouping for each one, combined into
a BaseGrouper.
"""
group_axis = obj._get_axis(axis)
# validate thatthe passed level is compatible with the passed
# axis of the object
if level is not None:
if not isinstance(group_axis, MultiIndex):
if isinstance(level, compat.string_types):
if obj.index.name != level:
raise ValueError('level name %s is not the name of the '
'index' % level)
elif level > 0:
raise ValueError('level > 0 only valid with MultiIndex')
level = None
key = group_axis
# a passed in Grouper, directly convert
if isinstance(key, Grouper):
binner, grouper, obj = key._get_grouper(obj)
if key.key is None:
return grouper, [], obj
else:
return grouper, set([key.key]), obj
# already have a BaseGrouper, just return it
elif isinstance(key, BaseGrouper):
return key, [], obj
if not isinstance(key, (tuple, list)):
keys = [key]
else:
keys = key
# what are we after, exactly?
match_axis_length = len(keys) == len(group_axis)
any_callable = any(callable(g) or isinstance(g, dict) for g in keys)
any_arraylike = any(isinstance(g, (list, tuple, Series, Index, np.ndarray))
for g in keys)
try:
if isinstance(obj, DataFrame):
all_in_columns = all(g in obj.columns for g in keys)
else:
all_in_columns = False
except Exception:
all_in_columns = False
if (not any_callable and not all_in_columns
and not any_arraylike and match_axis_length
and level is None):
keys = [com._asarray_tuplesafe(keys)]
if isinstance(level, (tuple, list)):
if key is None:
keys = [None] * len(level)
levels = level
else:
levels = [level] * len(keys)
groupings = []
exclusions = []
for i, (gpr, level) in enumerate(zip(keys, levels)):
name = None
try:
obj._data.items.get_loc(gpr)
in_axis = True
except Exception:
in_axis = False
if _is_label_like(gpr) or in_axis:
exclusions.append(gpr)
name = gpr
gpr = obj[gpr]
if isinstance(gpr, Categorical) and len(gpr) != len(obj):
errmsg = "Categorical grouper must have len(grouper) == len(data)"
raise AssertionError(errmsg)
ping = Grouping(group_axis, gpr, obj=obj, name=name, level=level, sort=sort)
groupings.append(ping)
if len(groupings) == 0:
raise ValueError('No group keys passed!')
# create the internals grouper
grouper = BaseGrouper(group_axis, groupings, sort=sort)
return grouper, exclusions, obj
def _is_label_like(val):
return isinstance(val, compat.string_types) or np.isscalar(val)
def _convert_grouper(axis, grouper):
if isinstance(grouper, dict):
return grouper.get
elif isinstance(grouper, Series):
if grouper.index.equals(axis):
return grouper.values
else:
return grouper.reindex(axis).values
elif isinstance(grouper, (list, Series, Index, np.ndarray)):
if len(grouper) != len(axis):
raise AssertionError('Grouper and axis must be same length')
return grouper
else:
return grouper
class SeriesGroupBy(GroupBy):
_apply_whitelist = _series_apply_whitelist
def aggregate(self, func_or_funcs, *args, **kwargs):
"""
Apply aggregation function or functions to groups, yielding most likely
Series but in some cases DataFrame depending on the output of the
aggregation function
Parameters
----------
func_or_funcs : function or list / dict of functions
List/dict of functions will produce DataFrame with column names
determined by the function names themselves (list) or the keys in
the dict
Notes
-----
agg is an alias for aggregate. Use it.
Examples
--------
>>> series
bar 1.0
baz 2.0
qot 3.0
qux 4.0
>>> mapper = lambda x: x[0] # first letter
>>> grouped = series.groupby(mapper)
>>> grouped.aggregate(np.sum)
b 3.0
q 7.0
>>> grouped.aggregate([np.sum, np.mean, np.std])
mean std sum
b 1.5 0.5 3
q 3.5 0.5 7
>>> grouped.agg({'result' : lambda x: x.mean() / x.std(),
... 'total' : np.sum})
result total
b 2.121 3
q 4.95 7
See also
--------
apply, transform
Returns
-------
Series or DataFrame
"""
if isinstance(func_or_funcs, compat.string_types):
return getattr(self, func_or_funcs)(*args, **kwargs)
if hasattr(func_or_funcs, '__iter__'):
ret = self._aggregate_multiple_funcs(func_or_funcs)
else:
cyfunc = _intercept_cython(func_or_funcs)
if cyfunc and not args and not kwargs:
return getattr(self, cyfunc)()
if self.grouper.nkeys > 1:
return self._python_agg_general(func_or_funcs, *args, **kwargs)
try:
return self._python_agg_general(func_or_funcs, *args, **kwargs)
except Exception:
result = self._aggregate_named(func_or_funcs, *args, **kwargs)
index = Index(sorted(result), name=self.grouper.names[0])
ret = Series(result, index=index)
if not self.as_index: # pragma: no cover
print('Warning, ignoring as_index=True')
return ret
def _aggregate_multiple_funcs(self, arg):
if isinstance(arg, dict):
columns = list(arg.keys())
arg = list(arg.items())
elif any(isinstance(x, (tuple, list)) for x in arg):
arg = [(x, x) if not isinstance(x, (tuple, list)) else x
for x in arg]
# indicated column order
columns = lzip(*arg)[0]
else:
# list of functions / function names
columns = []
for f in arg:
if isinstance(f, compat.string_types):
columns.append(f)
else:
# protect against callables without names
columns.append(com._get_callable_name(f))
arg = lzip(columns, arg)
results = {}
for name, func in arg:
if name in results:
raise SpecificationError('Function names must be unique, '
'found multiple named %s' % name)
results[name] = self.aggregate(func)
return DataFrame(results, columns=columns)
def _wrap_aggregated_output(self, output, names=None):
# sort of a kludge
output = output[self.name]
index = self.grouper.result_index
if names is not None:
return DataFrame(output, index=index, columns=names)
else:
name = self.name
if name is None:
name = self._selected_obj.name
return Series(output, index=index, name=name)
def _wrap_applied_output(self, keys, values, not_indexed_same=False):
if len(keys) == 0:
# GH #6265
return Series([], name=self.name)
def _get_index():
if self.grouper.nkeys > 1:
index = MultiIndex.from_tuples(keys, names=self.grouper.names)
else:
index = Index(keys, name=self.grouper.names[0])
return index
if isinstance(values[0], dict):
# GH #823
index = _get_index()
return DataFrame(values, index=index).stack()
if isinstance(values[0], (Series, dict)):
return self._concat_objects(keys, values,
not_indexed_same=not_indexed_same)
elif isinstance(values[0], DataFrame):
# possible that Series -> DataFrame by applied function
return self._concat_objects(keys, values,
not_indexed_same=not_indexed_same)
else:
# GH #6265
return Series(values, index=_get_index(), name=self.name)
def _aggregate_named(self, func, *args, **kwargs):
result = {}
for name, group in self:
group.name = name
output = func(group, *args, **kwargs)
if isinstance(output, (Series, Index, np.ndarray)):
raise Exception('Must produce aggregated value')
result[name] = self._try_cast(output, group)
return result
def transform(self, func, *args, **kwargs):
"""
Call function producing a like-indexed Series on each group and return
a Series with the transformed values
Parameters
----------
func : function
To apply to each group. Should return a Series with the same index
Examples
--------
>>> grouped.transform(lambda x: (x - x.mean()) / x.std())
Returns
-------
transformed : Series
"""
# if string function
if isinstance(func, compat.string_types):
return self._transform_fast(lambda : getattr(self, func)(*args, **kwargs))
# do we have a cython function
cyfunc = _intercept_cython(func)
if cyfunc and not args and not kwargs:
return self._transform_fast(cyfunc)
# reg transform
dtype = self._selected_obj.dtype
result = self._selected_obj.values.copy()
wrapper = lambda x: func(x, *args, **kwargs)
for i, (name, group) in enumerate(self):
object.__setattr__(group, 'name', name)
res = wrapper(group)
if hasattr(res, 'values'):
res = res.values
# may need to astype
try:
common_type = np.common_type(np.array(res), result)
if common_type != result.dtype:
result = result.astype(common_type)
except:
pass
indexer = self._get_index(name)
result[indexer] = res
result = _possibly_downcast_to_dtype(result, dtype)
return self._selected_obj.__class__(result,
index=self._selected_obj.index,
name=self._selected_obj.name)
def _transform_fast(self, func):
"""
fast version of transform, only applicable to builtin/cythonizable functions
"""
if isinstance(func, compat.string_types):
func = getattr(self,func)
values = func().values
counts = self.count().values
values = np.repeat(values, com._ensure_platform_int(counts))
# the values/counts are repeated according to the group index
indices = self.indices
# shortcut of we have an already ordered grouper
if Index(self.grouper.group_info[0]).is_monotonic:
result = Series(values, index=self.obj.index)
else:
index = Index(np.concatenate([ indices[v] for v in self.grouper.result_index ]))
result = Series(values, index=index).sort_index()
result.index = self.obj.index
return result
def filter(self, func, dropna=True, *args, **kwargs):
"""
Return a copy of a Series excluding elements from groups that
do not satisfy the boolean criterion specified by func.
Parameters
----------
func : function
To apply to each group. Should return True or False.
dropna : Drop groups that do not pass the filter. True by default;
if False, groups that evaluate False are filled with NaNs.
Example
-------
>>> grouped.filter(lambda x: x.mean() > 0)
Returns
-------
filtered : Series
"""
if isinstance(func, compat.string_types):
wrapper = lambda x: getattr(x, func)(*args, **kwargs)
else:
wrapper = lambda x: func(x, *args, **kwargs)
# Interpret np.nan as False.
def true_and_notnull(x, *args, **kwargs):
b = wrapper(x, *args, **kwargs)
return b and notnull(b)
try:
indices = [self._get_index(name) if true_and_notnull(group) else []
for name, group in self]
except ValueError:
raise TypeError("the filter must return a boolean result")
except TypeError:
raise TypeError("the filter must return a boolean result")
filtered = self._apply_filter(indices, dropna)
return filtered
def _apply_to_column_groupbys(self, func):
""" return a pass thru """
return func(self)
class NDFrameGroupBy(GroupBy):
def _iterate_slices(self):
if self.axis == 0:
# kludge
if self._selection is None:
slice_axis = self.obj.columns
else:
slice_axis = self._selection_list
slicer = lambda x: self.obj[x]
else:
slice_axis = self.obj.index
slicer = self.obj.xs
for val in slice_axis:
if val in self.exclusions:
continue
yield val, slicer(val)
def _cython_agg_general(self, how, numeric_only=True):
new_items, new_blocks = self._cython_agg_blocks(how, numeric_only=numeric_only)
return self._wrap_agged_blocks(new_items, new_blocks)
def _wrap_agged_blocks(self, items, blocks):
obj = self._obj_with_exclusions
new_axes = list(obj._data.axes)
# more kludge
if self.axis == 0:
new_axes[0], new_axes[1] = new_axes[1], self.grouper.result_index
else:
new_axes[self.axis] = self.grouper.result_index
# Make sure block manager integrity check passes.
assert new_axes[0].equals(items)
new_axes[0] = items
mgr = BlockManager(blocks, new_axes)
new_obj = type(obj)(mgr)
return self._post_process_cython_aggregate(new_obj)
_block_agg_axis = 0
def _cython_agg_blocks(self, how, numeric_only=True):
data, agg_axis = self._get_data_to_aggregate()
new_blocks = []
if numeric_only:
data = data.get_numeric_data(copy=False)
for block in data.blocks:
values = block._try_operate(block.values)
if block.is_numeric:
values = com.ensure_float(values)
result, _ = self.grouper.aggregate(values, how, axis=agg_axis)
# see if we can cast the block back to the original dtype
result = block._try_coerce_and_cast_result(result)
newb = make_block(result, placement=block.mgr_locs)
new_blocks.append(newb)
if len(new_blocks) == 0:
raise DataError('No numeric types to aggregate')
return data.items, new_blocks
def _get_data_to_aggregate(self):
obj = self._obj_with_exclusions
if self.axis == 0:
return obj.swapaxes(0, 1)._data, 1
else:
return obj._data, self.axis
def _post_process_cython_aggregate(self, obj):
# undoing kludge from below
if self.axis == 0:
obj = obj.swapaxes(0, 1)
return obj
@cache_readonly
def _obj_with_exclusions(self):
if self._selection is not None:
return self.obj.reindex(columns=self._selection_list)
if len(self.exclusions) > 0:
return self.obj.drop(self.exclusions, axis=1)
else:
return self.obj
@Appender(_agg_doc)
def aggregate(self, arg, *args, **kwargs):
if isinstance(arg, compat.string_types):
return getattr(self, arg)(*args, **kwargs)
result = OrderedDict()
if isinstance(arg, dict):
if self.axis != 0: # pragma: no cover
raise ValueError('Can only pass dict with axis=0')
obj = self._selected_obj
if any(isinstance(x, (list, tuple, dict)) for x in arg.values()):
new_arg = OrderedDict()
for k, v in compat.iteritems(arg):
if not isinstance(v, (tuple, list, dict)):
new_arg[k] = [v]
else:
new_arg[k] = v
arg = new_arg
keys = []
if self._selection is not None:
subset = obj
if isinstance(subset, DataFrame):
raise NotImplementedError
for fname, agg_how in compat.iteritems(arg):
colg = SeriesGroupBy(subset, selection=self._selection,
grouper=self.grouper)
result[fname] = colg.aggregate(agg_how)
keys.append(fname)
else:
for col, agg_how in compat.iteritems(arg):
colg = SeriesGroupBy(obj[col], selection=col,
grouper=self.grouper)
result[col] = colg.aggregate(agg_how)
keys.append(col)
if isinstance(list(result.values())[0], DataFrame):
from pandas.tools.merge import concat
result = concat([result[k] for k in keys], keys=keys, axis=1)
else:
result = DataFrame(result)
elif isinstance(arg, list):
return self._aggregate_multiple_funcs(arg)
else:
cyfunc = _intercept_cython(arg)
if cyfunc and not args and not kwargs:
return getattr(self, cyfunc)()
if self.grouper.nkeys > 1:
return self._python_agg_general(arg, *args, **kwargs)
else:
# try to treat as if we are passing a list
try:
assert not args and not kwargs
result = self._aggregate_multiple_funcs([arg])
result.columns = Index(result.columns.levels[0],
name=self._selected_obj.columns.name)
except:
result = self._aggregate_generic(arg, *args, **kwargs)
if not self.as_index:
if isinstance(result.index, MultiIndex):
zipped = zip(result.index.levels, result.index.labels,
result.index.names)
for i, (lev, lab, name) in enumerate(zipped):
result.insert(i, name,
com.take_nd(lev.values, lab,
allow_fill=False))
result = result.consolidate()
else:
values = result.index.values
name = self.grouper.groupings[0].name
result.insert(0, name, values)
result.index = np.arange(len(result))
return result.convert_objects()
def _aggregate_multiple_funcs(self, arg):
from pandas.tools.merge import concat
if self.axis != 0:
raise NotImplementedError
obj = self._obj_with_exclusions
results = []
keys = []
for col in obj:
try:
colg = SeriesGroupBy(obj[col], selection=col,
grouper=self.grouper)
results.append(colg.aggregate(arg))
keys.append(col)
except (TypeError, DataError):
pass
except SpecificationError:
raise
result = concat(results, keys=keys, axis=1)
return result
def _aggregate_generic(self, func, *args, **kwargs):
if self.grouper.nkeys != 1:
raise AssertionError('Number of keys must be 1')
axis = self.axis
obj = self._obj_with_exclusions
result = {}
if axis != obj._info_axis_number:
try:
for name, data in self:
# for name in self.indices:
# data = self.get_group(name, obj=obj)
result[name] = self._try_cast(func(data, *args, **kwargs),
data)
except Exception:
return self._aggregate_item_by_item(func, *args, **kwargs)
else:
for name in self.indices:
try:
data = self.get_group(name, obj=obj)
result[name] = self._try_cast(func(data, *args, **kwargs),
data)
except Exception:
wrapper = lambda x: func(x, *args, **kwargs)
result[name] = data.apply(wrapper, axis=axis)
return self._wrap_generic_output(result, obj)
def _wrap_aggregated_output(self, output, names=None):
raise NotImplementedError
def _aggregate_item_by_item(self, func, *args, **kwargs):
# only for axis==0
obj = self._obj_with_exclusions
result = {}
cannot_agg = []
errors=None
for item in obj:
try:
data = obj[item]
colg = SeriesGroupBy(data, selection=item,
grouper=self.grouper)
result[item] = self._try_cast(
colg.aggregate(func, *args, **kwargs), data)
except ValueError:
cannot_agg.append(item)
continue
except TypeError as e:
cannot_agg.append(item)
errors=e
continue
result_columns = obj.columns
if cannot_agg:
result_columns = result_columns.drop(cannot_agg)
# GH6337
if not len(result_columns) and errors is not None:
raise errors
return DataFrame(result, columns=result_columns)
def _decide_output_index(self, output, labels):
if len(output) == len(labels):
output_keys = labels
else:
output_keys = sorted(output)
try:
output_keys.sort()
except Exception: # pragma: no cover
pass
if isinstance(labels, MultiIndex):
output_keys = MultiIndex.from_tuples(output_keys,
names=labels.names)
return output_keys
def _wrap_applied_output(self, keys, values, not_indexed_same=False):
from pandas.core.index import _all_indexes_same
if len(keys) == 0:
# XXX
return DataFrame({})
key_names = self.grouper.names
if isinstance(values[0], DataFrame):
return self._concat_objects(keys, values,
not_indexed_same=not_indexed_same)
elif self.grouper.groupings is not None:
if len(self.grouper.groupings) > 1:
key_index = MultiIndex.from_tuples(keys, names=key_names)
else:
ping = self.grouper.groupings[0]
if len(keys) == ping.ngroups:
key_index = ping.group_index
key_index.name = key_names[0]
key_lookup = Index(keys)
indexer = key_lookup.get_indexer(key_index)
# reorder the values
values = [values[i] for i in indexer]
else:
key_index = Index(keys, name=key_names[0])
# don't use the key indexer
if not self.as_index:
key_index = None
# make Nones an empty object
if com._count_not_none(*values) != len(values):
v = next(v for v in values if v is not None)
if v is None:
return DataFrame()
elif isinstance(v, NDFrame):
values = [
x if x is not None else
v._constructor(**v._construct_axes_dict())
for x in values
]
v = values[0]
if isinstance(v, (np.ndarray, Index, Series)):
if isinstance(v, Series):
applied_index = self._selected_obj._get_axis(self.axis)
all_indexed_same = _all_indexes_same([
x.index for x in values
])
singular_series = (len(values) == 1 and
applied_index.nlevels == 1)
# GH3596
# provide a reduction (Frame -> Series) if groups are
# unique
if self.squeeze:
# assign the name to this series
if singular_series:
values[0].name = keys[0]
# GH2893
# we have series in the values array, we want to
# produce a series:
# if any of the sub-series are not indexed the same
# OR we don't have a multi-index and we have only a
# single values
return self._concat_objects(
keys, values, not_indexed_same=not_indexed_same
)
# still a series
# path added as of GH 5545
elif all_indexed_same:
from pandas.tools.merge import concat
return concat(values)
if not all_indexed_same:
return self._concat_objects(
keys, values, not_indexed_same=not_indexed_same
)
try:
if self.axis == 0:
# GH6124 if the list of Series have a consistent name,
# then propagate that name to the result.
index = v.index.copy()
if index.name is None:
# Only propagate the series name to the result
# if all series have a consistent name. If the
# series do not have a consistent name, do
# nothing.
names = set(v.name for v in values)
if len(names) == 1:
index.name = list(names)[0]
# normally use vstack as its faster than concat
# and if we have mi-columns
if not _np_version_under1p7 or isinstance(v.index,MultiIndex) or key_index is None:
stacked_values = np.vstack([np.asarray(x) for x in values])
result = DataFrame(stacked_values,index=key_index,columns=index)
else:
# GH5788 instead of stacking; concat gets the dtypes correct
from pandas.tools.merge import concat
result = concat(values,keys=key_index,names=key_index.names,
axis=self.axis).unstack()
result.columns = index
else:
stacked_values = np.vstack([np.asarray(x) for x in values])
result = DataFrame(stacked_values.T,index=v.index,columns=key_index)
except (ValueError, AttributeError):
# GH1738: values is list of arrays of unequal lengths fall
# through to the outer else caluse
return Series(values, index=key_index)
# if we have date/time like in the original, then coerce dates
# as we are stacking can easily have object dtypes here
if (self._selected_obj.ndim == 2
and self._selected_obj.dtypes.isin(_DATELIKE_DTYPES).any()):
cd = 'coerce'
else:
cd = True
return result.convert_objects(convert_dates=cd)
else:
# only coerce dates if we find at least 1 datetime
cd = 'coerce' if any([ isinstance(v,Timestamp) for v in values ]) else False
return Series(values, index=key_index).convert_objects(convert_dates=cd)
else:
# Handle cases like BinGrouper
return self._concat_objects(keys, values,
not_indexed_same=not_indexed_same)
def _transform_general(self, func, *args, **kwargs):
from pandas.tools.merge import concat
applied = []
obj = self._obj_with_exclusions
gen = self.grouper.get_iterator(obj, axis=self.axis)
fast_path, slow_path = self._define_paths(func, *args, **kwargs)
path = None
for name, group in gen:
object.__setattr__(group, 'name', name)
if path is None:
# Try slow path and fast path.
try:
path, res = self._choose_path(fast_path, slow_path, group)
except TypeError:
return self._transform_item_by_item(obj, fast_path)
except Exception: # pragma: no cover
res = fast_path(group)
path = fast_path
else:
res = path(group)
# broadcasting
if isinstance(res, Series):
if res.index.is_(obj.index):
group.T.values[:] = res
else:
group.values[:] = res
applied.append(group)
else:
applied.append(res)
concat_index = obj.columns if self.axis == 0 else obj.index
concatenated = concat(applied, join_axes=[concat_index],
axis=self.axis, verify_integrity=False)
concatenated.sort_index(inplace=True)
return concatenated
def transform(self, func, *args, **kwargs):
"""
Call function producing a like-indexed DataFrame on each group and
return a DataFrame having the same indexes as the original object
filled with the transformed values
Parameters
----------
f : function
Function to apply to each subframe
Notes
-----
Each subframe is endowed the attribute 'name' in case you need to know
which group you are working on.
Examples
--------
>>> grouped = df.groupby(lambda x: mapping[x])
>>> grouped.transform(lambda x: (x - x.mean()) / x.std())
"""
# try to do a fast transform via merge if possible
try:
obj = self._obj_with_exclusions
if isinstance(func, compat.string_types):
result = getattr(self, func)(*args, **kwargs)
else:
cyfunc = _intercept_cython(func)
if cyfunc and not args and not kwargs:
result = getattr(self, cyfunc)()
else:
return self._transform_general(func, *args, **kwargs)
except:
return self._transform_general(func, *args, **kwargs)
# a reduction transform
if not isinstance(result, DataFrame):
return self._transform_general(func, *args, **kwargs)
# nuiscance columns
if not result.columns.equals(obj.columns):
return self._transform_general(func, *args, **kwargs)
# a grouped that doesn't preserve the index, remap index based on the grouper
# and broadcast it
if ((not isinstance(obj.index,MultiIndex) and
type(result.index) != type(obj.index)) or
len(result.index) != len(obj.index)):
results = obj.values.copy()
for (name, group), (i, row) in zip(self, result.iterrows()):
indexer = self._get_index(name)
results[indexer] = np.tile(row.values,len(indexer)).reshape(len(indexer),-1)
return DataFrame(results,columns=result.columns,index=obj.index).convert_objects()
# we can merge the result in
# GH 7383
names = result.columns
result = obj.merge(result, how='outer', left_index=True, right_index=True).iloc[:,-result.shape[1]:]
result.columns = names
return result
def _define_paths(self, func, *args, **kwargs):
if isinstance(func, compat.string_types):
fast_path = lambda group: getattr(group, func)(*args, **kwargs)
slow_path = lambda group: group.apply(
lambda x: getattr(x, func)(*args, **kwargs), axis=self.axis)
else:
fast_path = lambda group: func(group, *args, **kwargs)
slow_path = lambda group: group.apply(
lambda x: func(x, *args, **kwargs), axis=self.axis)
return fast_path, slow_path
def _choose_path(self, fast_path, slow_path, group):
path = slow_path
res = slow_path(group)
# if we make it here, test if we can use the fast path
try:
res_fast = fast_path(group)
# compare that we get the same results
if res.shape == res_fast.shape:
res_r = res.values.ravel()
res_fast_r = res_fast.values.ravel()
mask = | notnull(res_r) | pandas.core.common.notnull |
#importing libraries
import numpy as np
import pandas as pd
import math
import matplotlib.pyplot as plt
import seaborn as sns
import scipy.stats as stats
from sklearn import preprocessing
from sklearn.preprocessing import LabelEncoder
from sklearn.neighbors import KNeighborsClassifier
from sklearn import metrics
#loading dataset
dataset = pd.read_csv('Crimes_2001_to_2022.csv',low_memory=False)
dataset.head(10)
# dataset.dtypes
# droping the features that are not usefull
dataset=dataset.drop(columns=['ID','Case Number','Description','Updated On','Block'])
#for checking the shape of dataset
# X.shape
print('Columns in dataset: ', dataset.columns)
#Checking the shape of dataset
print('Displaying the shape of dataset',dataset.shape)
# droping the null value enteries drop null
dataset.dropna(inplace=True)
# Displaying DataSet
print('Displaying DataSet after removing null values',dataset)
# Before removing Null values 1048575
# After removing Null value 1015247
# Total Null values removed 33328
# ignore latitude and logitude outside of the chicago
dataset=dataset[(dataset["Latitude"] < 45)
& (dataset["Latitude"] > 40)
& (dataset["Longitude"] < -85)
& (dataset["Longitude"] > -90)]
# Displaying DataSet
print('Displaying DataSet',dataset)
# listing the crimes category wise with their counts
types=dataset['Primary Type'].value_counts().sort_values(ascending=False)
# Displaying types
print('Displaying types',types)
# crime types according to their counts in dataframe
# 15 classes
# major_crimes=['THEFT','BATTERY','CRIMINAL DAMAGE','ASSAULT','OTHER OFFENSE','DECEPTIVE PRACTICE','NARCOTICS',
# 'BURGLARY','MOTOR VEHICLE THEFT','ROBBERY','CRIMINAL TRESPASS','WEAPONS VIOLATION','OFFENSE INVOLVING CHILDREN',
# 'PUBLIC PEACE VIOLATION','CRIM SEXUAL ASSAULT']
# 8 classes
# storing major crime types according to their counts in dataframe
# major_crimes=['THEFT','BATTERY','CRIMINAL DAMAGE','ASSAULT','OTHER OFFENSE','DECEPTIVE PRACTICE','NARCOTICS','BURGLARY']
# major crime time
#---> Storing Major Crimes
major_crimes=['THEFT','BATTERY','CRIMINAL DAMAGE','ASSAULT']
# Displaying major_crimes
crime_df = dataset.loc[dataset['Primary Type'] .isin(major_crimes)]
print('Displaying major_crimes',crime_df)
# since we dont have different crimes in early years so we drop data of these years
data = crime_df.pivot_table(index='Year', columns='Primary Type', aggfunc='count')
print(data)
# selecting the dataset which starts from 2015
crime_df=crime_df[crime_df['Year']>=2015]
# Displaying major_crimes from 2015
print('Displaying major_crimes from 2015',crime_df)
temp=crime_df.copy()
temp
# getting the half of our data set for random data selection
nrows= temp.shape[0]
portion=math.floor(nrows/3)
# Displaying this portion size
print('Displaying this portion size',portion)
# First half of the data
first=temp.iloc[0:portion,:]
# Displaying the first half shape
print('Displaying the first half shape',first.shape)
# Second half of the data
nextp=portion+portion+1
scnd=temp.iloc[(portion+1):nextp,:]
# Displaying the second half shape
print('Displaying the second half shape',scnd.shape)
#Third half of the data
finalp=nextp+portion+1
third=temp.iloc[(nextp+1):finalp,:]
# Displaying the third half shape
print('Displaying the third half shape',third.shape)
# picking random 80k enteries from the first half
index=np.random.choice(portion,replace=False,size = 80000)
df_frst=first.iloc[index]
# displaying the first patch shape
print('Displaying the first patch shape',df_frst.shape)
# Drawing the boxplot to check outlying values
sns.set_theme(style="whitegrid")
ax = sns.boxplot(x=df_frst["Ward"])
# picking random 80k enteries from the second half
index=np.random.choice(portion,replace=False,size = 80000)
df_scnd=scnd.iloc[index]
# displaying the second patch
print('Displaying the second patch',df_scnd)
# picking random 80k enteries from the third half
index=np.random.choice(portion,replace=False,size = 80000)
df_third=third.iloc[index]
# displaying the third patch
print('Displaying the third patch',df_third)
# combined all three dataframe
temp_df = | pd.concat([df_frst,df_scnd],ignore_index=True) | pandas.concat |
import numpy as np
import pandas as pd
import io
import urllib.request
import requests
import camelot
from beis_indicators import project_dir
def download_data():
travel_to_work_2016 = 'https://www.ons.gov.uk/file?uri=/employmentandlabourmarket/peopleinwork/employmentandemployeetypes/adhocs/007252averagehometoworktraveltimeages16andoverocttodec2016/2017update.xls'
travel_to_work_2017 = 'https://www.ons.gov.uk/file?uri=/employmentandlabourmarket/peopleinwork/employmentandemployeetypes/adhocs/006022averagehometoworktraveltimeages16plusocttodec2015/2018updateod17data.xls'
travel_to_work_2018 = 'https://www.ons.gov.uk/file?uri=/employmentandlabourmarket/peopleinwork/employmentandemployeetypes/adhocs/010202averagehometoworktraveltimeage16yearsandoverukoctobertodecember2018/averagehometoworktraveltimes16plusod18.xls'
resp_16 = requests.get(travel_to_work_2016)
resp_17 = requests.get(travel_to_work_2017)
resp_18 = requests.get(travel_to_work_2018)
output_16 = open(f'{project_dir}/data/raw/travel/travel_to_work_2016.xls', 'wb')
output_17 = open(f'{project_dir}/data/raw/travel/travel_to_work_2017.xls', 'wb')
output_18 = open(f'{project_dir}/data/raw/travel/travel_to_work_2018.xls', 'wb')
output_16.write(resp_16.content)
output_17.write(resp_17.content)
output_18.write(resp_18.content)
output_16.close()
output_17.close()
output_18.close()
def load_updated_codes():
file = "https://www.ons.gov.uk/file?uri=/peoplepopulationandcommunity/populationandmigration/migrationwithintheuk/methodologies/interalmigrationmethodology/internalmigrationmethodology2016.pdf"
tables = camelot.read_pdf(file, pages = "15-end")
changes_1 = pd.concat([tables[0].df,tables[1].df]).iloc[1:]
changes_1[0] = changes_1[0].apply(lambda x: x.replace('\n', ''))
changes_1[1] = changes_1[1].apply(lambda x: x.replace('\n', ''))
changes_1[2] = changes_1[2].apply(lambda x: x.replace('\n', ''))
changes_1[3] = changes_1[3].apply(lambda x: x.replace('\n', ''))
changes_2 = pd.concat([tables[2].df,tables[3].df]).drop([0,0])
changes_2[0] = changes_2[0].apply(lambda x: x.replace('\n', ''))
changes_2[1] = changes_2[1].apply(lambda x: x.replace('\n', ''))
changes_2[2] = changes_2[2].apply(lambda x: x.replace('\n', ''))
convert_dict = dict(zip(changes_1[2], changes_1[3]))
return convert_dict.update(dict(zip(changes_2[1], changes_2[2])))
def get_travel_work_data():
download_data()
xl_16 = pd.ExcelFile(f'{project_dir}/data/raw/travel/travel_to_work_2016.xls')
xl_17 = pd.ExcelFile(f'{project_dir}/data/raw/travel/travel_to_work_2017.xls')
xl_18 = pd.ExcelFile(f'{project_dir}/data/raw/travel/travel_to_work_2018.xls')
df_16 = xl_16.parse('OD16').drop('Office For National Statistics', axis=1)
df_17 = xl_17.parse('OD17').drop('Office For National Statistics', axis=1)
df_18 = xl_18.parse('OD18').drop('Office For National Statistics', axis=1)
df_16.columns = ['UALADGB UA / LAD of residence', 'Mean']
df_17.columns = ['UALADGB UA / LAD of residence', 'Mean']
df_18.columns = ['UALADGB UA / LAD of residence', 'Mean']
df_16 = df_16[9:419].reset_index(drop=True)
df_17 = df_17[9:419].reset_index(drop=True)
df_18 = df_18[9:419].reset_index(drop=True)
# print(df_16.head())
df_16['Code'] = df_16['UALADGB UA / LAD of residence'].apply(lambda x: x.strip().split(" ",1)[0])
df_16['LAD'] = df_16['UALADGB UA / LAD of residence'].apply(lambda x: x.strip().split(" ",1)[1])
df_17['Code'] = df_17['UALADGB UA / LAD of residence'].apply(lambda x: x.strip().split(" ",1)[0])
df_17['LAD'] = df_17['UALADGB UA / LAD of residence'].apply(lambda x: x.strip().split(" ",1)[1])
df_18['Code'] = df_18['UALADGB UA / LAD of residence'].apply(lambda x: x.strip().split(" ",1)[0])
df_18['LAD'] = df_18['UALADGB UA / LAD of residence'].apply(lambda x: x.strip().split(" ",1)[1])
del df_16['UALADGB UA / LAD of residence']
del df_17['UALADGB UA / LAD of residence']
del df_18['UALADGB UA / LAD of residence']
df_16['Code'] = df_16['Code'].apply(lambda x: ('00'+ x) if len(x) <= 2 else x)
df_17['Code'] = df_17['Code'].apply(lambda x: ('00'+ x) if len(x) <= 2 else x)
df_18['Code'] = df_18['Code'].apply(lambda x: ('00'+ x) if len(x) <= 2 else x)
equivs = | pd.read_csv(f'{project_dir}/data/aux/equivalents_regions.csv',encoding='cp1252') | pandas.read_csv |
from helper import *
import pandas as pd
import os
import glob
import re
import matplotlib.pyplot as plt
from sklearn.model_selection import train_test_split
import numpy as np
from sklearn.decomposition import PCA
from sklearn.tree import DecisionTreeRegressor
from sklearn.ensemble import RandomForestRegressor
from sklearn.ensemble import ExtraTreesRegressor
from sklearn.metrics import mean_squared_error
import warnings
warnings.filterwarnings("ignore")
def test_feat(cond, df, cols, p, df_u):
unseen = ''
if cond =='unseen':
unseen = 'unseen'
# col is feauture comb
# p is for loss or latency
# 1: loss # 2 : latency
#print(df.columns)
X = df[cols]
X2 = df_u[cols]
if p == 1:
y = df.loss
y2 = df_u.loss
if p == 2:
y = df.latency
y2 = df_u.latency
# randomly split into train and test sets, test set is 80% of data
X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.2, random_state=1)
X2_train, X2_test, y2_train, y2_test = train_test_split(X2, y2, test_size=0.2, random_state=1)
if unseen == 'unseen':
X_test = X2
y_test = y2
clf = DecisionTreeRegressor()
clf = clf.fit(X_train,y_train)
y_pred = clf.predict(X_test)
acc1 = mean_squared_error(y_test, y_pred)
clf2 = RandomForestRegressor(n_estimators=10)
clf2 = clf2.fit(X_train,y_train)
y_pred2 = clf2.predict(X_test)
acc2= mean_squared_error(y_test, y_pred2)
#print("Random Forest Accuracy:", acc2, '\n')
clf3 = ExtraTreesRegressor(n_estimators=10)
clf3 = clf3.fit(X_train,y_train)
y_pred3 = clf3.predict(X_test)
acc3= mean_squared_error(y_test, y_pred3)
#print("Extra Trees Accuracy:", acc3, '\n')
pca = PCA()
X_transformed = pca.fit_transform(X_train)
cl = DecisionTreeRegressor()
cl.fit(X_transformed, y_train)
newdata_transformed = pca.transform(X_test)
y_pred4 = cl.predict(newdata_transformed)
acc4 = mean_squared_error(y_test, y_pred4)
#print("PCA Accuracy:", acc4, '\n')
return [acc1, acc2, acc3, acc4 ]
def getAllCombinations( cond_):
lst = ['total_bytes','max_bytes','proto', "1->2Bytes",'2->1Bytes'
,'1->2Pkts','2->1Pkts','total_pkts','number_ms', 'pkt_ratio','time_spread', 'pkt sum','longest_seq'
,'total_pkt_sizes']
lst1 = ["max_bytes", "longest_seq", "total_bytes"]
lst2 = ["total_pkts", "number_ms", "byte_ratio"]
if cond_ == 1:
lst = lst1
if cond_ == 2:
lst = lst2
uniq_objs = set(lst)
combinations = []
for obj in uniq_objs:
for i in range(0,len(combinations)):
combinations.append(combinations[i].union([obj]))
combinations.append(set([obj]))
print("all combinations generated")
return combinations
def test_mse(cond, all_comb1, all_comb2):
unseen = ''
if cond =='unseen':
unseen = 'unseen'
filedir_unseen = os.path.join(os.getcwd(), "outputs", unseen + "combined_t_latency.csv")
df_unseen = | pd.read_csv(filedir_unseen) | pandas.read_csv |
"""Utilities for solving geodesic equation
"""
import itertools
import typing
from collections import namedtuple
import numpy
import pandas
import sympy
from scipy import integrate
from pystein import metric, curvature, utilities
class Solution:
def __init__(self, soln: typing.List[sympy.Eq], vec_funcs: typing.List[sympy.Function], param: sympy.Symbol, curve: typing.List[sympy.Expr],
g: metric.Metric, eqns: typing.List[sympy.Eq]):
self.soln = soln
self._vec_funcs = vec_funcs
self._param = param
self._curve = curve
self._metric = g
self.eqns = eqns
def vec(self, val: sympy.Expr):
cs = self._metric.coord_system.base_symbols()
param_sub = {self._param: val}
subs = [(c, func.subs(param_sub)) for c, func in zip(cs, self._curve)]
subs += [(self._param, val)]
return [self.soln[n].args[1].subs(subs) for n in range(len(self.soln))]
def path_coord_func(coord, param):
return sympy.Function(coord.name)(param)
def vec_coord_func(coord, param):
return sympy.Function('v^{{{}}}'.format(coord.name))(param)
def parallel_transport_equation(mu: int, curve: typing.List[sympy.Function], param: sympy.Symbol, g: metric.Metric):
base_symbols = g.coord_system.base_symbols()
curve_subs = dict(zip(base_symbols, curve))
N = len(base_symbols)
vec_coord_func_map = {s: vec_coord_func(s, param) for s in base_symbols}
v_mu = vec_coord_func_map[base_symbols[mu]]
lhs = sympy.diff(v_mu, param)
for sig in range(N):
x_sig = curve[sig]
dx_sig_d_param = sympy.diff(x_sig, param)
for rho in range(N):
v_rho = vec_coord_func_map[base_symbols[rho]]
c_sig_rho = curvature.christoffel_symbol_component(mu, sig, rho, metric=g)
c_sig_rho = c_sig_rho.doit().subs(curve_subs)
lhs += c_sig_rho * dx_sig_d_param * v_rho
return lhs
def parallel_transport_soln(param: sympy.Symbol, curve: typing.List[sympy.Expr], g: metric.Metric):
bs = g.coord_system.base_symbols()
# Vector
v0 = sympy.Function('v^{{{}}}'.format(bs[0].name))
v1 = sympy.Function('v^{{{}}}'.format(bs[1].name))
lhs_0 = utilities.full_simplify(parallel_transport_equation(0, curve, param, g).doit())
lhs_1 = utilities.full_simplify(parallel_transport_equation(1, curve, param, g).doit())
eqns = [
sympy.Eq(lhs_0, 0),
sympy.Eq(lhs_1, 0),
]
funcs = [v0(param), v1(param)]
# Initial Conditions
ics = {v0(0): v0(0), v1(0): v1(0)}
soln = sympy.dsolve(eqns, funcs, ics=ics)
return Solution(soln, [v0, v1], param, curve, g, eqns)
def geodesic_equation(mu: int, param, metric: metric.Metric):
base_symbols = metric.coord_system.base_symbols()
coord_func_map = {s: path_coord_func(s, param) for s in base_symbols}
x_mu = coord_func_map[base_symbols[mu]]
lhs = sympy.diff(sympy.diff(x_mu, param))
for rho, sig in itertools.product(range(len(base_symbols)), range(len(base_symbols))):
x_rho = coord_func_map[base_symbols[rho]]
x_sig = coord_func_map[base_symbols[rho]]
c = curvature.christoffel_symbol_component(mu, rho, sig, metric=metric).subs(coord_func_map)
lhs += c * sympy.diff(x_rho, param) * sympy.diff(x_sig, param)
return lhs
def numerical_geodesic(g: metric.Metric, init, ts):
coords = g.coord_system.base_symbols()
N = len(coords)
param = sympy.symbols('lambda')
lhss = [utilities.full_simplify(geodesic_equation(mu, param, metric=g)) for mu in range(N)]
funcs = [sympy.Function(c.name)(param) for c in coords]
sub_map = [(sympy.diff(sympy.diff(func, param), param), sympy.symbols('{}2'.format(func.name))) for func in funcs] + \
[(sympy.diff(func, param), sympy.symbols('{}1'.format(func.name))) for func in funcs] + \
[(func, sympy.symbols('{}0'.format(func.name))) for func in funcs]
coord2_eqns = [sympy.solve(lhs.subs(sub_map), sympy.symbols('{}2'.format(func.name)))[0] for lhs, func in zip(lhss, funcs)]
state_symbols = list(sympy.symbols(['{}0'.format(c.name) for c in coords])) + list(sympy.symbols(['{}1'.format(c.name) for c in coords]))
dcoord1s = [sympy.lambdify(state_symbols, eqn) for eqn in coord2_eqns]
def integrand(state, param):
return [s for s in state[N:]] + [s(*state) for s in dcoord1s]
res = integrate.odeint(integrand, init, ts)
df = pandas.DataFrame(res[:, :N], columns=[c.name for c in coords])
return df
def numerical_sampler(g: metric.Metric, ls: numpy.ndarray, init_point: tuple, tangent_scale: float = 1, num_angles: int = 12):
dfs = []
for theta_0 in numpy.arange(0.0, 2 * numpy.pi, numpy.pi / num_angles):
_df = numerical_geodesic(g, tuple(list(init_point) + [tangent_scale * numpy.cos(theta_0), tangent_scale * numpy.sin(theta_0)]), ls)
_df = _df.assign(theta_0=theta_0)
dfs.append(_df)
return | pandas.concat(dfs, axis=0) | pandas.concat |
import pandas as pd
import numpy as np
import pdb
import sys
import os
#######################################
# creates validation table in CSV format
#
# this script assumes download of lake_surface_temp_preds.csv from
# the data release (https://www.sciencebase.gov/catalog/item/60341c3ed34eb12031172aa6)
#
##########################################################
#load data
df = pd.read_csv("lake_surface_temp_preds.csv")
vals = df['wtemp_ERA5'].values[:]
df['wtemp_ERA5'] = vals[:]+3.47
df['wtemp_ERA5b'] = vals[:]
pdb.set_trace()
site_ids = np.unique(df['site_id'].values)
meta = pd.read_csv("../../metadata/lake_metadata.csv")
meta = meta[meta['num_obs']>0]
#calculate error per site
# err_per_site_ea = [np.abs((df[df['site_id']==i_d]['wtemp_EALSTM']-df[df['site_id']==i_d]['wtemp_obs'])).mean()for i_d in site_ids]
err_per_site_ea = [np.sqrt(((df[df['site_id']==i_d]['wtemp_EALSTM']-df[df['site_id']==i_d]['wtemp_obs'])**2).mean())for i_d in site_ids]
err_per_site_LM = [np.sqrt(np.nanmean((df[df['site_id']==i_d]['wtemp_LM']-df[df['site_id']==i_d]['wtemp_obs'])**2)) for i_d in site_ids]
err_per_site_e5 = [np.sqrt(((df[df['site_id']==i_d]['wtemp_ERA5']-df[df['site_id']==i_d]['wtemp_obs'])**2).mean())for i_d in site_ids]
err_per_site_e5b = [np.sqrt(((df[df['site_id']==i_d]['wtemp_ERA5b']-3.46-df[df['site_id']==i_d]['wtemp_obs'])**2).mean())for i_d in site_ids]
err_per_site = | pd.DataFrame() | pandas.DataFrame |
import traceback
from pathlib import Path
from operator import itemgetter
import shlex
import os
import sys
import time
import cv2
import numpy as np
import subprocess
import requests
import json
import pydicom
from pydicom.dataset import Dataset
import pytesseract
from PIL import Image, ImageDraw, ImageFont
import pandas as pd
import socket
from multiprocessing import Process, Queue
import smtplib
from email.mime.multipart import MIMEMultipart
from email.mime.text import MIMEText
from email.mime.image import MIMEImage
from sqlalchemy import create_engine
from sqlalchemy.ext.declarative import declarative_base
from sqlalchemy import Column, Integer, String, DateTime
from sqlalchemy.orm import sessionmaker
from sqlalchemy.sql import exists
from datetime import datetime
from datetime import timedelta, date
from kombu import Connection, Exchange, Queue, Producer
from kombu.pools import connections
import uuid
import threading
import math
import slidingwindow as sw
import mxnet as mx
import gluoncv as gcv
from mxnet import gluon, nd, image, autograd
from mxnet.gluon.data.vision import transforms
from gluoncv import utils
from gluoncv.model_zoo import get_model
from gluoncv.utils import viz
from matplotlib import pyplot as plt
import redis
NODE_LIST = []
NODE_LIST.append('localhost')
### uncomment and edit following parts if clustering is needed
#NODE_LIST.append('172.16.31.10')
#NODE_LIST.append('172.16.58.3')
#NODE_LIST.append('172.16.31.10')
HOST_IP_ADDRESS = '172.16.31.10'
WEB_PORT = 5002
TEXT_MODEL = './frozen_east_text_detection.pb'
ANATOMY_MODEL = './anatomy_classification.params'
MARKER_MODEL = './marker_detection.params'
DICOM_DIR = '/app_cache/dicom'
SMTP_RELAY_SERVER = 'YOUR_SMTP_RELAY_SERVER_IP'
EMAIL_RECIPIENT_LIST = ['<EMAIL>', '<EMAIL>']
# redis_client_list = []
# local_redis = None
# for item in NODE_LIST:
# redisClient = redis.StrictRedis(host=item, port=6379, db=0)
# redis_client_list.append(redisClient)
# if item == 'localhost':
# local_redis = redisClient
Base = declarative_base()
class False_Annotation(Base):
__tablename__ = 'false_annotation'
Id = Column(Integer, autoincrement=True, primary_key=True)
Record_Date_Time = Column(DateTime, nullable=False,default=datetime.now)
Msg_ID = Column(String)
Accession_Number = Column(String)
Patient_Name = Column(String)
Exam_Room = Column(String)
SOP_Instance_UID = Column(String)
Problem_Detail = Column(String)
User_Reply = Column(String)
class Received_Image(Base):
__tablename__ = "received_image"
Id = Column(Integer, primary_key=True)
Record_Date_Time = Column(DateTime, nullable=False,default=datetime.now)
Station_Name = Column(String)
Accession_Number = Column(String)
SOP_Instance_UID = Column(String)
Study_Description = Column(String)
Flip = Column(String)
Anatomy_Detected = Column(String)
Digital_Marker_Used = Column(String)
Physical_Marker_Used = Column(String)
Remarks = Column(String)
class Program_Error(Base):
__tablename__ = 'program_error'
Id = Column(Integer, autoincrement=True, primary_key=True)
Record_Date_Time = Column(DateTime, nullable=False,default=datetime.now)
Error_Detail = Column(String)
class Order_Detail(Base):
__tablename__ = 'order_detail'
Id = Column(Integer, autoincrement=True, primary_key=True)
Record_Date_Time = Column(DateTime, nullable=False,default=datetime.now)
Accession_Number = Column(String)
Region = Column(String)
Laterality = Column(String)
Radiologist_Name = Column(String)
Urgency = Column(String)
Order_Control = Column(String)
Order_Number = Column(String)
Order_Status = Column(String)
Exam_Room = Column(String)
engine = create_engine('postgresql://rad:rad@localhost:5432/rad_db')
#engine = create_engine('sqlite:////home/rad/app/DR_Warnings/server/web_server/warning_web_server/web_server.db')
Base.metadata.create_all(bind=engine)
Session = sessionmaker(bind=engine)
session = Session()
class DicomImageAnalyzer:
def __init__(self):
self.img = None
text_detection_model = TEXT_MODEL
anatomy_classification_model = ANATOMY_MODEL
marker_detection_model = MARKER_MODEL
dir_path = Path(__file__).parent.absolute()
self.UpperExtrimityList = ['HAND','WRIST','FOREARM','ELBOW','HUMERUS','SHOULDER','FINGER']
self.LowerExtrimityList = ['FOOT','ANKLE','LEG','LEG','FEMUR','TOES','KNEE','HIP','CALCANEUM']
self.TrunkList = ['CHEST', 'ABDOMEN']
template_folder = str(dir_path) + '/marker_templates/'
list_marker = []
for subdir, dirs, files in os.walk(template_folder):
for f in files:
manufacturer_name = subdir.replace(template_folder, '')
marker_name = os.path.splitext(f)[0]
template_img = cv2.imread(os.path.join(subdir,f), cv2.IMREAD_GRAYSCALE)
temp_h, temp_w = template_img.shape[:2]
dict_template = {'manufacturer_name':manufacturer_name,'name': manufacturer_name + '_' + marker_name, 'width': temp_w, 'image': template_img}
list_marker.append(dict_template)
self.marker = sorted(list_marker, key=itemgetter('width'), reverse=True)
print('Starting DicomImageAnalyzer')
# Load text detection network with opencv
self.text_detection_net = cv2.dnn.readNet(text_detection_model)
# load anatomy classification network with gluoncv
anatomy_classes_df = | pd.read_csv('anatomy_classes.csv') | pandas.read_csv |
# -*- coding: utf-8 -*-
#
# Copyright (c) 2018 SMHI, Swedish Meteorological and Hydrological Institute
# License: MIT License (see LICENSE.txt or http://opensource.org/licenses/mit).
"""
Created on Thu Aug 30 15:30:28 2018
@author:
"""
import os
import codecs
import datetime
try:
import pandas as pd
except:
pass
from .. import mappinglib as mapping
class CodeList(dict):
def __init__(self, file_path=False):
if not file_path:
file_path = os.path.dirname(os.path.abspath(__file__)) + '/ices_codelist.txt'
self.file_path = file_path
self.df = | pd.read_csv(file_path, sep='\t', encoding='cp1252', dtype=str) | pandas.read_csv |
import sys
import os
import datetime
import time
import math
from functions import *
from PyQt5 import QtCore, QtGui, QtWidgets, uic
from PyQt5.QtWidgets import QInputDialog, QLineEdit, QFileDialog, QGridLayout
from PyQt5.QtGui import QIcon
from PyQt5.QtCore import pyqtSignal
#from PyQt5 import QtCore, QtGui, QtWidgets, uic
#from PyQt5.QtGui import QFileDialog
import pandas
import pandasql
import sqlite3
from sqlite3 import Error
import sqlparse
import math
import matplotlib.pyplot as plt
from matplotlib.backends.backend_qt5agg import FigureCanvasQTAgg as FigureCanvas
from matplotlib.figure import Figure
from matplotlib.backends.backend_qt5agg import ( NavigationToolbar2QT as NavigationToolbar )
class Window(QtWidgets.QWidget):
def __init__(self):
super().__init__()
self.dlg = None
self.init_ui()
self.dataBase = None
self.dataBaseRef = None
self.dataBaseTar = None
self.dynamicDataBase = None
self.dynamicTargetDatabase = None
self.dynamicReferenceDatabase = None
self.preview = None
self.recommendations = None
self.dimensions = None
self.measures = None
self.conn1 = None
self.conn2 = None
self.conn3 = None
self.dynaConn = None
self.dynaConnTarget = None
self.dynaConnReference = None
self.cur1 = None
self.cur2 = None
self.cur3 = None
self.dynaCursor = None
self.dynaConnTargetCursor = None
self.dynaConnReferenceCursor = None
self.allViews = None
self.toolBar = None
self.fileName = None
self.query = None
self.houseId = None
self.threadClass1 = None
self.threadRunning = False
self.targetDatabaseCondition = None
self.currentVisualIndex = 0
self.maxIndex = None
self.runAtLeastOnce = False
self.dlg.myConsole.setFontPointSize(8)
self.dlg.myConsole.setText(self.dlg.myConsole.toPlainText() + "\n")
def init_ui(self):
"""Import the QtDesigner file and assign functions to each of the UI's buttons.
"""
self.dlg = uic.loadUi("interface.ui")
self.dlg.show()
self.dlg.browseForFileButton.clicked.connect(self.browse_for_file)
self.dlg.importDataButton.clicked.connect(self.import_file)
self.dlg.previewDataButton.clicked.connect(self.preview_file)
self.dlg.assignAsDimensionButton.clicked.connect(self.assign_as_dimension)
self.dlg.assignAsMeasureButton.clicked.connect(self.assign_as_measure)
self.dlg.removeDimensionAssignmentButton.clicked.connect(self.remove_dimension_assignment)
self.dlg.removeMeasureAssignmentButton.clicked.connect(self.remove_measure_assignment)
self.dlg.executeRefQueryButton.clicked.connect(self.execute_ref_query)
self.dlg.previewRefQueryButton.clicked.connect(self.preview_target_query)
self.dlg.previewReferenceDatabaseButton.clicked.connect(self.preview_reference_query)
self.dlg.generateVisualizationsButton.clicked.connect(self.outputToPane)
self.dlg.connectToHouseButton.clicked.connect(self.connectToHouse)
self.dlg.closeDynamicConnectionButton.clicked.connect(self.closeDynamicConnection)
self.dlg.previousVisualButton.clicked.connect(self.previousVisual)
self.dlg.nextVisualButton.clicked.connect(self.nextVisual)
self.dlg.previewDynamicDataButton.clicked.connect(self.preview_dynamic_data)
def browse_for_file(self):
"""Open a windows File-Dialog to allow the user to simultaneously define the filepath and the filename
"""
if self.threadRunning :
self.addLineToConsole("Please close the dynamic connection before attempting to analyse static data")
return
fileName = QtWidgets.QFileDialog.getOpenFileName(self, 'OpenFile')
self.dlg.fileLocation.setText(fileName[0])
self.fileName = os.path.basename(self.dlg.fileLocation.text())
def create_connection(self, db_file):
"""Set objects for conn1 (Connection 1) and cur1 (Connection 1's cursor)
Args:
db_file: the data file being connected to.
"""
try:
self.conn1 = sqlite3.connect(db_file) #Set Connection 1
self.cur1 = self.conn1.cursor() #Set Cursor 1
except Error as e:
print(e)
def create_connection2(self, db_file):
"""Set objects for conn2 (Connection 2) and cur2 (Connection 2's cursor)
Two connections are needed because two subsets of the data file are being compared.
Args:
db_file: the data file being connected to.
"""
try:
self.conn2 = sqlite3.connect(db_file) #Set Connection 2
self.cur2 = self.conn2.cursor() #Set Cursor 2
except Error as e:
print(e)
def create_connection3(self, db_file):
"""Set objects for conn2 (Connection 2) and cur2 (Connection 2's cursor)
Two connections are needed because two subsets of the data file are being compared.
Args:
db_file: the data file being connected to.
"""
try:
self.conn3 = sqlite3.connect(db_file) #Set Connection 2
self.cur3 = self.conn3.cursor() #Set Cursor 2
except Error as e:
print(e)
def create_dynamic_connection(self, db_file):
"""Set objects for dynaConn (dynamic connection) and dynaCursor (Dynamic connection's cursor)
This set creates a connection to the current dynamic data source.
Args:
db_file: the dynamic data file being connected to.
"""
try:
self.dynaConn = sqlite3.connect(db_file) #Set Dynamic Connection
self.dynaCursor = self.dynaConn.cursor() #Set Dyanamic Cursor
except Error as e:
print(e) #TODO print errors out to a make shift terminal
def create_dynamic_target_connection(self, db_file):
try:
self.dynaConnTarget = sqlite3.connect(db_file) #Set Dynamic Connection
self.dynaConnTargetCursor = self.dynaConnTarget.cursor() #Set Dyanamic Cursor
except Error as e:
print(e) #TODO print errors out to a make shift terminal
def create_dynamic_reference_connection(self, db_file):
try:
self.dynaConnReference = sqlite3.connect(db_file) #Set Dynamic Connection
self.dynaConnReferenceCursor = self.dynaConnReference.cursor() #Set Dyanamic Cursor
except Error as e:
print(e) #TODO print errors out to a make shift terminal
def import_file(self):
"""Import csv, Retrieve column names, create the connection, convert database to SQL.
"""
filePath = self.dlg.fileLocation.text() #Select the file path directly from the textbox
if not filePath : return #Check if not null
if self.threadRunning : return
self.dataBase = pandas.read_csv(filePath, encoding = "ISO-8859-1") #Create the pandas dataframe using .read_csv
self.dlg.columnsListWidget.addItems(self.dataBase.columns) #Add the column names to the columnsListWidget
self.create_connection('databases/dataBase.db') #Create the connection to dataBase.db
self.dataBase.to_sql("dataBase", self.conn1, if_exists='replace', index=False) #Convert to SQL database
def preview_file(self):
"""Load the database into a temporary widget to let the user 'preview' their data and ensure its correctness.
Exit function if they haven't defined a filepath.
"""
if self.dataBase is None : return
if self.threadRunning : return
model = PandasModel(self.dataBase.head(100)) #Create a model of the database, only show the top 100 rows.
self.preview = QtWidgets.QWidget() #Create the 'preview' widget
ui = TableView() #Create a TableView for the UI
ui.setupUi(self.preview, model) #Load the model into the created widget
self.preview.show() #Show the widget
self.preview.setWindowTitle("SELECT TOP 100 FROM " + self.fileName)
def assign_as_dimension(self):
"""Take the currently selected item in columnsListWidget and assign it to a dimension.
"""
if self.dlg.columnsListWidget.currentItem() != None:
self.dlg.dimensionsListWidget.addItem(self.dlg.columnsListWidget.currentItem().text()) #Add to dimensions
self.dlg.columnsListWidget.takeItem(self.dlg.columnsListWidget.currentRow()) #Remove from columns
def assign_as_measure(self):
"""Take the currently selected item in columnsListWidget and assign it to be a measure.
"""
if self.dlg.columnsListWidget.currentItem() != None:
self.dlg.measuresListWidget.addItem(self.dlg.columnsListWidget.currentItem().text()) #Add to measures
self.dlg.columnsListWidget.takeItem(self.dlg.columnsListWidget.currentRow()) #Remove from columns
def remove_dimension_assignment(self):
"""Remove the currently selected dimension and re-add it to the columnsListWidget.
"""
if self.dlg.dimensionsListWidget.currentItem() != None:
self.dlg.columnsListWidget.addItem(self.dlg.dimensionsListWidget.currentItem().text()) #Add to columns.
self.dlg.dimensionsListWidget.takeItem(self.dlg.dimensionsListWidget.currentRow()) #Remove from dimensions.
def remove_measure_assignment(self):
"""Remove the currently selected measure and re-add it to the columnsListWidget.
"""
if self.dlg.measuresListWidget.currentItem() != None:
self.dlg.columnsListWidget.addItem(self.dlg.measuresListWidget.currentItem().text()) #Add to columns.
self.dlg.measuresListWidget.takeItem(self.dlg.measuresListWidget.currentRow()) #Remove from measures.
def execute_ref_query(self):
"""Method executes the user-defined query as an SQL query against the imported database.
This new database will be Connection 2.
TODO Need to add error checking to this so it doesn't brick when the query is bogus
"""
query = self.dlg.refQueryInput.toPlainText() #Get the query from the textbox
self.queryText = query
self.targetDatabaseCondition = str(str(
sqlparse.parse(self.dlg.refQueryInput.toPlainText())[0][8]).split("WHERE")[1]).strip()
if not self.threadRunning :
if self.conn1 is None : return
self.addLineToConsole("Creating target database... ")
self.dataBaseTar = pandas.read_sql_query(query, self.conn1) #Read query using pandas method
self.create_connection2('databases/dataBaseTar.db') #Create Connected 2
self.dataBaseTar.to_sql("dataBaseTar", self.conn2, if_exists='replace', index=False) #Make database SQL
self.addRepeatingLineToConsole("Creating target database...Complete.")
if self.dlg.complementDataSetCheckbox.isChecked() :
print("was, is this fireing?")
self.addLineToConsole("Creating reference database... ")
#The user wants the reference database to be the compliment of the target database, reverse the query
complimentQuery = self.get_compliment_query()
print(complimentQuery)
self.dataBaseRef = pandas.read_sql_query(complimentQuery, self.conn1)
self.dataBaseRef.to_csv(r'C:\Users\<NAME>\Desktop\ENGG4801\ENGG4801_RyanPhelan\databases\refd.csv')
self.create_connection3('databases/dataBaseRef.db')
self.dataBaseRef.to_sql("dataBaseRef", self.conn3, if_exists='replace', index = False)
self.addRepeatingLineToConsole("Creating reference database...Complete.")
else :
if self.dynaConn is None : return
# The dynamic thread is currently running, execute on the current house's database
self.addLineToConsole("Creating target database... ")
self.dynamicTargetDatabase = pandas.read_sql_query(query, self.dynaConn)
self.create_dynamic_target_connection('databases/id' + str(self.houseId) + 'Target.db')
self.dynamicTargetDatabase.to_sql("id" + str(self.houseId) + "Target", self.dynaConnTarget,
if_exists='replace', index = False)
self.addRepeatingLineToConsole("Creating target database...Complete.")
if self.dlg.complementDataSetCheckbox.isChecked() :
self.addLineToConsole("Creating reference database... ")
#The user wants the reference database to be the compliment of the target database, reverse the query
complimentQuery = self.get_compliment_query()
print(complimentQuery)
self.dynamicReferenceDatabase = pandas.read_sql_query(complimentQuery, self.dynaConn)
self.create_dynamic_reference_connection('databases/id' + str(self.houseId) + 'Reference.db')
self.dynamicReferenceDatabase.to_sql("id" + str(self.houseId) + "Reference", self.dynaConnReference,
if_exists='replace', index = False)
self.addRepeatingLineToConsole("Creating reference database...Complete.")
def get_compliment_query(self):
queryString = self.dlg.refQueryInput.toPlainText()
if queryString.find("WHERE") == -1 :
self.addLineToConsole("No 'WHERE' clause detected in query. Please try again.")
return
parsedQuery = sqlparse.parse(queryString)
statement = parsedQuery[0]
columnsWanted = str(statement[2])
tableName = str(statement[6])
whereClause = str(statement[8])
clause = whereClause.split("WHERE")[1]
sqliteValidOperators = ["==", "=", "!=", "<>", ">", "<", ">=", "<=", "!<", "!>"]
columnIdentifier = ""
clauseOperator = ""
valueComparingTo = ""
for operator in sqliteValidOperators:
if clause.find(operator) == -1:
continue
else :
columnIdentifier = clause.split(operator)[0]
clauseOperator = operator
valueComparingTo = clause.split(operator)[1]
complimentQuery = ("SELECT " + columnsWanted + " FROM " + tableName + " WHERE " + columnIdentifier
+ " NOT IN " + "( SELECT " + columnIdentifier.strip() + " FROM " + tableName + " WHERE "
+ columnIdentifier + " " + clauseOperator + " " + valueComparingTo + ")")
self.targetDatabaseCondition = str(whereClause.split("WHERE")[1].strip())
return complimentQuery
def preview_target_query(self):
"""Load target into a temporary widget and'preview' the data in a pop-up widget.
Exit function if the database isn't defined.
"""
if not self.threadRunning :
if self.dataBaseTar is None : return
model = PandasModel(self.dataBaseTar.head(100)) #Create a model of the database, only get TOP 100 rows
self.preview = QtWidgets.QWidget() #Create a TableView for the UI
ui = TableView() #Create the 'preview' widget
ui.setupUi(self.preview, model) #Load the model into the created widget
self.preview.show() #Show the widget
self.preview.setWindowTitle("SELECT TOP 100 FROM Target DataSet")
else :
if self.dynamicTargetDatabase is None : return
model = PandasModel(self.dynamicTargetDatabase.head(100)) #Create a model of the database, top 100 rows
self.preview = QtWidgets.QWidget() #Create a TableView for the UI
ui = TableView() #Create the 'preview' widget
ui.setupUi(self.preview, model) #Load the model into the created widget
self.preview.show() #Show the widget
self.preview.setWindowTitle("SELECT TOP 100 FROM Target DataSet")
def preview_reference_query(self):
"""Load reference into a temporary widget and'preview' the data in a pop-up widget.
Exit function if the database isn't defined.
"""
if not self.threadRunning :
if self.dataBaseRef is None : return
model = PandasModel(self.dataBaseRef.head(100)) #Create a model of the database, top 100 rows
self.preview = QtWidgets.QWidget() #Create a TableView for the UI
ui = TableView() #Create the 'preview' widget
ui.setupUi(self.preview, model) #Load the model into the created widget
self.preview.show() #Show the widget
self.preview.setWindowTitle("SELECT TOP 100 FROM Reference DataSet")
return
else :
if self.dynamicReferenceDatabase is None : return
model = PandasModel(self.dynamicReferenceDatabase.head(100)) #Create a model of the database, top 100 rows
self.preview = QtWidgets.QWidget() #Create a TableView for the UI
ui = TableView() #Create the 'preview' widget
ui.setupUi(self.preview, model) #Load the model into the created widget
self.preview.show() #Show the widget
self.preview.setWindowTitle("SELECT TOP 100 FROM Reference DataSet")
def preview_dynamic_data(self):
if not self.threadRunning : return
else :
previewMeterData = pandas.read_sql("SELECT * FROM (SELECT * FROM id" + str(self.houseId) + " ORDER BY TimeStamp DESC LIMIT 100) ORDER BY TimeStamp", con=self.dynaConn)
model = PandasModel(previewMeterData.head(100)) #Create a model of the database, top 100 rows
self.preview = QtWidgets.QWidget() #Create a TableView for the UI
ui = TableView() #Create the 'preview' widget
ui.setupUi(self.preview, model) #Load the model into the created widget
self.preview.show() #Show the widget
self.preview.setWindowTitle("SELECT BOTTOM 100 ROWS FROM Dynamic DataSet")
def get_aggregate_functions(self):
"""Helper method to convert all selected aggregate function buttons into a list of strings.
Returns:
A list of strings, each string represents an SQL aggregate function.
"""
functions = []
# If selected, add the NAME of the button to the list
if self.dlg.avgButton.isChecked() : functions.append(self.dlg.avgButton.text()) #AVG
if self.dlg.countButton.isChecked() : functions.append(self.dlg.countButton.text()) #COUNT
if self.dlg.minButton.isChecked() : functions.append(self.dlg.minButton.text()) #MIN
if self.dlg.maxButton.isChecked() : functions.append(self.dlg.maxButton.text()) #MAX
if self.dlg.sumButton.isChecked() : functions.append(self.dlg.sumButton.text()) #SUM
return functions
def calculate_utility(self, combinedDataFrame):
total = 0
targetMinimum = abs(combinedDataFrame[combinedDataFrame.columns[1]].min())
if targetMinimum >= 0 : targetMinimum = 0
targetValues = [x + targetMinimum for x in combinedDataFrame[combinedDataFrame.columns[1]].values]
targetSum = sum(targetValues)
referenceMinimum = abs(combinedDataFrame[combinedDataFrame.columns[2]].min())
if referenceMinimum >= 0 : referenceMinimum = 0
referenceValues = [x + referenceMinimum for x in combinedDataFrame[combinedDataFrame.columns[2]].values]
referenceSum = sum(referenceValues)
#Loop through and calculate the difference of the two scores squared
for target, reference in zip(targetValues, referenceValues):
targetNormalized = target / targetSum
referenceNormalized = reference / referenceSum
total = total + (targetNormalized - referenceNormalized)**2
return math.sqrt(total)
def getKey(self, item):
return item[1]
def calculate_all_utilities(self, queriesTuple):
print("Starting : calculate_all_utilities")
self.addLineToConsole("Calculating visualization utilities...Count = " + str(len(queriesTuple)))
#The return list that will have a list of tuples containing the dataframe and its calculated utility
self.allViews = []
index = 0
for queryTar,queryRef in queriesTuple:
print(queryTar + '\n' +queryRef)
#The subset query will be the first query in the tuple. Execute on conn2
if not self.threadRunning :
targetDataframe = pandas.read_sql(queryTar, con=self.conn2)
newColName1 = targetDataframe.columns[1] + " tar"
targetDataframe.columns = [targetDataframe.columns[0], newColName1]
if self.dlg.complementDataSetCheckbox.isChecked() :
referenceDataFrame = | pandas.read_sql(queryRef, con=self.conn3) | pandas.read_sql |
import json
import logging
import os
from itertools import compress
import numpy as np
import pandas as pd
import tensorflow as tf
from sklearn.metrics import (
accuracy_score,
f1_score,
precision_score,
recall_score,
roc_auc_score,
)
from sklearn.model_selection import train_test_split
from tensorflow.keras.callbacks import EarlyStopping
from tensorflow.keras.layers import Conv1D, Dense, Dropout, Embedding, GlobalMaxPool1D
from tensorflow.keras.models import Sequential
from tensorflow.keras.preprocessing import sequence
from tensorflow.keras.preprocessing.text import Tokenizer
from tags_classifier_library.settings import (
EMBEDDING_DIM,
MAX_NB_WORDS,
MAX_SEQUENCE_LENGTH,
PROBABILITY_THRESHOLD,
)
logger = logging.getLogger(__name__)
def report_metric_per_model(actual, predict, average_type="binary"):
precisions = precision_score(actual, predict, average=average_type)
recalls = recall_score(actual, predict, average=average_type)
f1 = f1_score(actual, predict, average=average_type)
accuracy = accuracy_score(actual, predict)
auc = roc_auc_score(actual, predict)
logger.info(f"Precision = {precisions}")
logger.info(f"Recall = {recalls}")
logger.info(f"f1 = {f1}")
logger.info(f"Accuracy = {accuracy}")
logger.info(f"AUC = {auc}")
return precisions, recalls, f1, accuracy, auc
def report_metrics_for_all(tag_size, tag_precisions, tag_recalls, tag_f1, tag_accuracy, tag_auc):
size_df = pd.DataFrame.from_dict(tag_size, orient="index")
size_df = size_df.rename(columns={0: "size"})
size_df = size_df[["size"]]
precisions_df = pd.DataFrame.from_dict(tag_precisions, orient="index")
precisions_df = precisions_df.rename(columns={0: "precisions"})
recalls_df = pd.DataFrame.from_dict(tag_recalls, orient="index")
recalls_df = recalls_df.rename(columns={0: "recalls"})
f1_df = pd.DataFrame.from_dict(tag_f1, orient="index")
f1_df = f1_df.rename(columns={0: "f1"})
accuracy_df = pd.DataFrame.from_dict(tag_accuracy, orient="index")
accuracy_df = accuracy_df.rename(columns={0: "accuracy"})
auc_df = | pd.DataFrame.from_dict(tag_auc, orient="index") | pandas.DataFrame.from_dict |
# This file is intended to provide some "reference information" in a useful form for python.
# The names of each run in a family (as defined in the families described in the simulation releases)
# are provided in a dictionary; their most-likely most-relevant comparison run is included as well.
# The file also provides some additional dictionaries, which may group together given families that
# touch on very related points; an example is the 'intranight' family, which includes some simulations
# looking at exposure time within a visit as well as whether to add a third visit - all of these relate
# to the survey strategy within a night.
# Each grouping or family only includes runs from a single release, that can use a single comparison run.
# Data / dictionaries containing the family information include:
# family - comment - nicknames - family_baseline - family_version
# Much of the text in this file is intended to be used in jupyter notebooks and thus is markdown.
import numpy as np
import pandas as pd
from IPython.display import display_markdown
import matplotlib.pyplot as plt
### METRICS FOR SHORT DISPLAY
tablemetrics = ['fOArea fO All visits HealpixSlicer',
'Effective Area (deg) ExgalM5_with_cuts i band non-DD year 10 HealpixSlicer',
# 'Nstars_no_crowding y HealpixSlicer',
'Nvisits All',
'fONv MedianNvis fO All visits HealpixSlicer',
'Median NVisits u band HealpixSlicer',
'Median NVisits g band HealpixSlicer',
'Median NVisits r band HealpixSlicer',
'Median NVisits i band HealpixSlicer',
'Median NVisits z band HealpixSlicer',
'Median NVisits y band HealpixSlicer', ]
tablenames = ['Area with >825 visits/pointing',
'Unextincted area i>25.9',
# 'Nstars y band (no crowding)',
'Nvisits total',
'Median Nvisits over best 18k',
'Median Nvis u band',
'Median Nvis g band',
'Median Nvis r band',
'Median Nvis i band',
'Median Nvis z band',
'Median Nvis y band']
class FamilyInfo():
"""A class to hold some high-level documentation of the available simulation runs.
(making this a class rather than just a file just makes it a little clearer to use in a notebook).
"""
def __init__(self):
"""Where we set all the run information."""
# Currently relevant runs from these releases will be included below
self.sim_versions = ['1.5', '1.6', '1.7']
# Overall baseline runs in each release.
self.baselines_versions = {'1.5': 'baseline_v1.5_10yrs', # 1x30s visits
'1.6': 'baseline_nexp2_v1.6_10yrs', # 2x15s visits
'1.7': 'baseline_nexp2_v1.7_10yrs' # 2x15s visits
}
# The simulations included in the survey strategy grouping
self.family = {}
# An overall comment about the grouping
self.comment = {}
# Potentially useful nicknames or overwhelmingly brief descriptors -- USE THESE SPARINGLY
# By using the nickname instead of the full name, you lose traceability for which run is really which.
self.nicknames = {}
# What is the most-useful comparison run for this grouping
self.family_baseline = {}
# The release number for this grouping
self.family_version = {}
# Release notes for each of the 1.5, 1.6 and 1.7 families:
# https://community.lsst.org/t/fbs-1-5-release-may-update-bonus-fbs-1-5-release/
# https://community.lsst.org/t/fbs-1-6-release-august-2020/
# https://community.lsst.org/t/survey-simulations-v1-7-release-january-2021/
# baselines -- this particular 'family' needs to be used differently than the rest.
key = 'version_baselines'
c = f"**{key}** = Comparison baselines across each release. The major differences across 1.5, 1.6 to " \
f"1.7 is whether we decide the default baseline for the release is 2x15s or 1x30s; however, " \
f"in each release we have created both a 2x15s and a 1x30s baseline -- so any subtle " \
f"changes due to updates in the telescope model or scheduler code can be evaluated by " \
f"looking at the same baseline versions. The trickier part of evaluating these at the " \
f"same time is that the results are likely contained in different dataframes in python " \
f"(if the big_1.5.csv, big_1.6.csv and big_1.7.csv files were used)."
self.comment[key] = c
self.family[key] = ['baseline_2snaps_v1.5_10yrs',
'baseline_v1.5_10yrs',
'baseline_nexp2_v1.6_10yrs',
'baseline_nexp1_v1.6_10yrs',
'baseline_nexp2_v1.7_10yrs',
'baseline_nexp1_v1.7_10yrs']
self.nicknames[key] = ['Baseline 2x15s v1.5',
'Baseline 1x30s v1.5',
'Baseline 2x15s v1.6',
'Baseline 1x30s v1.6',
'Baseline 2x15 v1.7',
'Baseline 1x30s v1.7']
self.family_baseline[key] = 'baseline_nexp1_v1.7_10yrs'
self.family_version[key] = 'All'
### visit_time
key = 'visit_time'
c = f"**{key}** = simulations bearing on the length of the individual visits. " \
"This demonstrates the impact of 1x30s vs. 2x15s visits (9% more visits). " \
"The variable exposure run allows the exposure time per visit to vary " \
"between 20-100 seconds to attempt to hold the single image visit depth roughly constant, " \
"but results in slightly fewer visits overall (although more visits per pointing within the WFD)."
self.comment[key] = c
self.family[key] = ['baseline_v1.5_10yrs',
'baseline_2snaps_v1.5_10yrs',
'var_expt_v1.5_10yrs']
self.nicknames[key] = ['1x30s',
'2x15s',
'1xVariable']
self.family_baseline[key] = 'baseline_v1.5_10yrs'
self.family_version[key] = '1.5'
### u_long
key = 'u_long'
c = f"**{key}** = simulations bearing on the length of the u-band exposure time. " \
f"These simulations swap a single exposure per visit of variable length for each visit in *u* " \
f"band. The number of *u* band visits is left unchanged, resulting in a shift of visits from " \
f"other filters to compensate for the increase in time. (Note the DDF visits were left " \
f"unchanged at 2x15s each)." \
f"<br>\n" \
"There is an additional u_long simulation in v1.5 called `u60_v1.5_10yrs`; " \
"the u60 v1.5 simulation uses 2x30s u band visits but cuts the number of visits in half " \
"rather than maintaining the number of u-band visits as the family below does. " \
"Halving u visits was a no-go for transient science (shown in the TDE metric), and so is " \
"dropped here. "
self.comment[key] = c
self.family[key] = ['baseline_nexp2_v1.7_10yrs',
'u_long_ms_30_v1.7_10yrs',
'u_long_ms_40_v1.7_10yrs',
'u_long_ms_50_v1.7_10yrs',
'u_long_ms_60_v1.7_10yrs']
self.nicknames[key] = ['u 2x15s',
'u 1x30s',
'u 1x40s',
'u 1x50s',
'u 1x60s']
self.family_baseline[key] = 'baseline_nexp2_v1.7_10yrs'
self.family_version[key] = '1.7'
## intranight
key = 'intranight'
c = f"**{key}** = simulations bearing on the distribution of visits within a night. " \
"Snaps per visit (2x15s vs 1x30s) is included for completeness, but the other " \
"simulations include variations on whether visits are in the same or mixed filters, " \
"and the effect of devoting a fraction of time to obtaining an additional (third) visit " \
"per night. " \
"<br>\n" \
"In the `third_obs` simulations, we add a third visit per night to augment the pairs of " \
"visits, by adding a *g*, *r*, *i* or *z* visit at the end of the night in the WFD. The " \
"amount of the night dedicated to obtaining this third visit at the end of the night varies " \
"across the family, from 15 minutes to 120 minutes (corresponding to covering approximately " \
"one blob to about five, or half of the night's pairs receiving a third visit). We find " \
"the third visit decreases the amount of sky imaged in each night and has an accompanying " \
"negative impact on metrics which prefer more sky area within a given time (such as solar " \
"system discovery and slower transient metrics, such as SNIa) -- the amount of this " \
"impact varies from negligible to noticeable depending on how much time is allocated " \
"to the third visit."
self.comment[key] = c
self.family[key] = ['baseline_2snaps_v1.5_10yrs',
'baseline_v1.5_10yrs',
'baseline_samefilt_v1.5_10yrs',
'third_obs_pt15v1.5_10yrs',
'third_obs_pt30v1.5_10yrs',
'third_obs_pt45v1.5_10yrs',
'third_obs_pt60v1.5_10yrs',
'third_obs_pt90v1.5_10yrs',
'third_obs_pt120v1.5_10yrs']
self.nicknames[key] = ['2x15s visits mixed filters',
'1x30s visits mixed filters',
'1x30s visits same filter',
'third visits for 15 min',
'third visits for 30 min',
'third visits for 45 min',
'third visits for 60 min',
'third visits for 90 min',
'third visits for 120 min']
self.family_baseline[key] = 'baseline_v1.5_10yrs'
self.family_version[key] = '1.5'
## pair_times
key = 'pair_times'
c = f"**{key}** = these simulations explore the impact of varying the time between pairs of visits " \
f"in a night. Varying the pair time changes the overall number of filter changes per night, " \
f"so longer pair times result in more visits overall in the survey. Longer pair times are more " \
f"vulnerable to interruption however, resulting in a lower fraction of visits occuring in pairs. " \
f"This family is related to the intranight family, but is from v1.7 so must be considered " \
f"separately. The standard baseline attempts pairs at 22 minutes."
self.comment[key] = c
self.family[key] = ['pair_times_11_v1.7_10yrs',
'pair_times_22_v1.7_10yrs',
'pair_times_33_v1.7_10yrs',
'pair_times_44_v1.7_10yrs',
'pair_times_55_v1.7_10yrs',
'baseline_nexp2_v1.7_10yrs']
self.nicknames[key] = ['11 minute pairs',
'22 minute pairs (baseline)',
'33 minute pairs',
'44 minute pairs',
'55 minute pairs',
'Baseline']
self.family_baseline[key] = 'baseline_nexp2_v1.7_10yrs'
self.family_version[key] = '1.7'
## twilight pairs
key = 'twilight_pairs'
c = f"**{key}** = explore the effect of programming twilight observing in pairs, rather than " \
f"single visits. The baseline chooses visits during twilight (-18 to -12 degrees solar " \
f"altitude) one at a time using a greedy algorithm. This family programs visits during " \
f"twilight in pairs, similarly to the remainder of the night but with a shorter return " \
f"interval of 15 minutes. In some simulations, visits during morning twilight are " \
f"preferentially chosen to be areas of the sky already observed earlier in the night. " \
f"Depending on the simulation, pairs are taken in the same filter (r+r, i+i, z+z, or y+y), " \
f"or mixed filters (r+i, i+z, z+y or y+y)."
self.comment[key] = c
self.family[key] = ['baseline_nexp2_v1.7_10yrs',
'twi_pairs_v1.7_10yrs',
'twi_pairs_mixed_v1.7_10yrs',
'twi_pairs_repeat_v1.7_10yrs',
'twi_pairs_mixed_repeat_v1.7_10yrs',
]
self.nicknames[key] = ['Baseline (greedy)',
'Twi pairs same filter',
'Twi pairs mixed filters',
'Twi pairs same filter, repeat area',
'Twi pairs mixed filters, repeat area']
self.family_baseline[key] = 'baseline_nexp2_v1.7_10yrs'
self.family_version[key] = '1.7'
## wfddepth
key = 'wfd_depth'
c = f"**{key}** = evaluates the impact of scaling the fraction of survey time devoted to WFD (and thus " \
f"the number of visits per pointing in the WFD) up or down. For metrics which respond simply " \
f"to number of visits, this is a useful family to demonstrate that effect. Metrics which " \
f"require coverage outside the WFD but are still sensitive to number of visits will show more " \
f"complicated behavior as coverage in the NES and GP is reduced or increased. When the DDF " \
f"fields are present, they are allocated 5% of the available survey time." \
f"<br>\n " \
f"From these simulations we determined that between 1.65 and 1.7M visits are required to " \
f"cover 18K square degrees of the standard WFD to a minimum number of visits of 825 per " \
f"pointing; some of the range in that required number of visits comes from over and under " \
f"subscription in some parts of the sky, which leads to unevenness in coverage. "
self.comment[key] = c
self.family[key] = ['baseline_v1.5_10yrs',
'wfd_depth_scale0.65_noddf_v1.5_10yrs',
'wfd_depth_scale0.70_noddf_v1.5_10yrs',
'wfd_depth_scale0.75_noddf_v1.5_10yrs',
'wfd_depth_scale0.80_noddf_v1.5_10yrs',
'wfd_depth_scale0.85_noddf_v1.5_10yrs',
'wfd_depth_scale0.90_noddf_v1.5_10yrs',
'wfd_depth_scale0.95_noddf_v1.5_10yrs',
'wfd_depth_scale0.99_noddf_v1.5_10yrs',
'wfd_depth_scale0.65_v1.5_10yrs',
'wfd_depth_scale0.70_v1.5_10yrs',
'wfd_depth_scale0.75_v1.5_10yrs',
'wfd_depth_scale0.80_v1.5_10yrs',
'wfd_depth_scale0.85_v1.5_10yrs',
'wfd_depth_scale0.90_v1.5_10yrs',
'wfd_depth_scale0.95_v1.5_10yrs',
'wfd_depth_scale0.99_v1.5_10yrs'
]
self.nicknames[key] = ['Baseline', '65% no DDF', '70% no DDF', '75% noDDF', '80% no DDF',
'85% no DDF', '90% no DDF', '95% noDDF', '99% no DDF',
'65%', '70%', '75%', '80%', '85%', '90%', '95%', '99%']
self.family_baseline[key] = 'baseline_v1.5_10yrs'
self.family_version[key] = '1.5'
## filter_dist
key = 'filter_dist'
c = f"**{key}** = evaluate the impact of changing the balance of visits between filters. " \
"Note that this family uses a simplified footprint that is a simple stripe of Declination " \
"corresponding to the traditional WFD declination limits but no NES or SCP and continuing " \
"WFD over the GP. Generally we find transients and variable stars metrics favor bluer " \
"distributions of filters while solar system and galaxy metrics prefer redder " \
"distributions of filters (particularly *i* band). <br> " \
" ** the comparison run for this family is NOT the baseline, but rather one of the filter_dist " \
"family, to avoid differences due to the footprint change. "
self.comment[key] = c
self.family[key] = ['filterdist_indx1_v1.5_10yrs',
'filterdist_indx2_v1.5_10yrs',
'filterdist_indx4_v1.5_10yrs',
'filterdist_indx3_v1.5_10yrs',
'filterdist_indx6_v1.5_10yrs',
'filterdist_indx5_v1.5_10yrs',
'filterdist_indx7_v1.5_10yrs',
'filterdist_indx8_v1.5_10yrs', ]
self.nicknames[key] = ['Uniform',
'Baseline-like',
'u heavy',
'g heavy',
'i heavy',
'z and y heavy',
'Bluer',
'Redder']
self.family_baseline[key] = 'filterdist_indx2_v1.5_10yrs'
self.family_version[key] = '1.5'
### footprint (stage 1)
key = 'footprint'
c = f"**{key}** = an initial large set of simulations exploring widely different options for the " \
"overall survey footprint. The fraction of time devoted to the WFD will necessarily vary " \
"among these simulations; this is one of the parameters suitable for later fine-tuning. " \
"All runs in this family use 1x30s visits. Some of these footprints contain a traditional " \
"WFD footprint, while others contain a shifted/extended N/S footprint that sometimes includes " \
"a dust extinction limit around the galactic plane and sometimes a simple galactic latitude " \
"cutoff; the bulges family is included because without significant galactic bulge coverage " \
"metrics related to MW populations fell significantly, yet the time requirement to cover all " \
"of the relevant areas together must be considered."
self.comment[key] = c
self.family[key] = ['filterdist_indx2_v1.5_10yrs',
'baseline_v1.5_10yrs',
'footprint_standard_goalsv1.5_10yrs',
'footprint_bluer_footprintv1.5_10yrs',
'footprint_no_gp_northv1.5_10yrs',
'footprint_gp_smoothv1.5_10yrs',
'footprint_add_mag_cloudsv1.5_10yrs',
'footprint_big_sky_dustv1.5_10yrs',
'footprint_big_skyv1.5_10yrs',
'footprint_big_sky_nouiyv1.5_10yrs',
'footprint_big_wfdv1.5_10yrs',
'footprint_newAv1.5_10yrs',
'footprint_newBv1.5_10yrs',
'bulges_bs_v1.5_10yrs',
'bulges_cadence_bs_v1.5_10yrs',
'bulges_bulge_wfd_v1.5_10yrs',
'bulges_cadence_bulge_wfd_v1.5_10yrs',
'bulges_i_heavy_v1.5_10yrs',
'bulges_cadence_i_heavy_v1.5_10yrs',
]
self.nicknames[key] = ['no nes',
'standard baseline',
'standard baseline again',
'bluer filters',
'no GP N extension',
'GP at WFD level',
'add MagClouds',
'big sky, dust-no GP',
'big sky - no GP',
'big sky - no GP, no uiy',
'big WFD',
'big sky + GP1 + NES',
'big sky + GP2 + NES',
'big sky + GP bulge',
'big sky + GP bulge cadenced',
'big sky + GP bulge @ WFD',
'big sky + GP bulge @WFD cadenced',
'big sky + GP i heavy',
'big sky + GP i heavy cadenced']
self.family_baseline[key] = 'baseline_v1.5_10yrs'
self.family_version[key] = '1.5'
## footprint_tune
key = 'footprint_tune'
c = f"**{key}** = a further exploration of the survey footprint, exploring options " \
"for improving the basic setup used in v1.6 in the 'combo_dust' simulation. " \
"We feel combo_dust had a lot of promise, as it improved metrics for a wide range " \
"of science without impacting metrics (currently avaialable in MAF) dramatically. " \
"However, combo_dust had some issues, especially in terms of contingency available in " \
"the WFD area (in case of exceptionally bad weather, it could fail SRD, and " \
"as it was only one run, it did not offer variations to attempt to improve MWLV or TVS " \
"science, for example). This footprint_tune family offers additional variations on " \
"the combo_dust (shifted WFD with dust-extinction-limits) simulation. Generally the WFD here " \
"is a dust-extinction limited footprint, but with variable N/S limits. We vary the coverage on " \
"the remaining sky. " \
"Because this is a v1.7 run, it cannot be compared directly with the footprint family above. " \
"(they use 2x15s visits, not 1x30s). "
self.comment[key] = c
self.family[key] = ['baseline_nexp2_v1.7_10yrs',
'footprint_0_v1.710yrs',
'footprint_1_v1.710yrs',
'footprint_2_v1.710yrs',
'footprint_3_v1.710yrs',
'footprint_4_v1.710yrs',
'footprint_5_v1.710yrs',
'footprint_6_v1.710yrs',
'footprint_7_v1.710yrs',
'footprint_8_v1.710yrs']
self.nicknames[key] = ['Traditional footprint',
'WFD -70.2<dec<7.8 + north ring', # 0
'WFD -70.2<dec<7.8', # 1
'WFD -67.4<dec<8', # 2
'WFD -67.4<dec8 + 20deg bridge', # 3
'WFD -62.5<dec<3.6 +33deg bridge', # 4
'WFD -67.4<dec<8 + 20deg bridge (like 3)', # 5
'WFD -67.4<dec<8 + 20deg bridge (south)', # 6
'WFD -70.2<dec<7.8 no ecliptic in galaxy', # 7
'WFD -70.2<dec<7.8 no galactic', # 8
]
self.family_baseline[key] = 'baseline_nexp2_v1.7_10yrs'
self.family_version[key] = '1.7'
### filter_cadence
key = 'filter_cadence'
c = f"**{key}** = investigate the impact of reducing the gaps between g band visits over the month, " \
f"(essentially down-weighting the lunar cycle by adding a requirement that fields receive " \
f"visits in g band filter throughout each month). In order to avoid 'long gaps' in g band " \
f"coverage, additional fill-in visits in g are requested in each night; there is a limit to " \
f"the number of fill-in visits in each night allowed, and these fill-in visits can be " \
f"requested as contiguous blobs or non-contiguous pointings. The goal is to improve transient " \
f"discovery for longer timescale transients which require bluer filter coverage (like SN)."
self.comment[key] = c
self.family[key] = ['baseline_nexp2_v1.7_10yrs',
'cadence_drive_gl30v1.7_10yrs',
'cadence_drive_gl30_gcbv1.7_10yrs',
'cadence_drive_gl100v1.7_10yrs',
'cadence_drive_gl100_gcbv1.7_10yrs',
'cadence_drive_gl200v1.7_10yrs',
'cadence_drive_gl200_gcbv1.7_10yrs',
]
self.nicknames[key] = ['Baseline',
'Add g, limit 30/night, non-contiguous',
'Add g, limit 30/night, contiguous',
'Add g, limit 100/night, non-contiguous',
'Add g, limit 100/night, contiguous',
'Add g, limit 200/night, non-contiguous',
'Add g, limit 200/night, contiguous']
self.family_baseline[key] = 'baseline_nexp2_v1.7_10yrs'
self.family_version[key] = '1.7'
key = 'alt_rolling'
c = f"**{key}** = a family of simulations that add alt-sched like nightly variations between " \
f"the northern and southern portions of the sky. For some members of the family, a 2-band " \
f"rolling cadence is also included (if 'roll' in the simulation name). Note that ALL of " \
f"these footprints are a shifted WFD - extended N/S and using dust extinction to delineate " \
f"the galactic plane. Few visits are placed into the GP or the NES or SCP. <br>" \
f"The baseline for this run is tricky to pick; the standard baseline is a very different " \
f"footprint, yet all runs in this family either add rolling OR add the alt-sched N/S " \
f"modulation. For purposes of identifying the effects of the alt-sched algorithm, the run " \
f"without alt-sched is chosen as the baseline.<br>"
self.comment[key] = c
self.family[key] = ['alt_dust_v1.5_10yrs',
'alt_roll_mod2_dust_sdf_0.20_v1.5_10yrs',
'roll_mod2_dust_sdf_0.20_v1.5_10yrs']
self.nicknames[key] = ['Alt-sched modulation, no rolling',
'Alt-sched modulation, plus rolling',
'Standard sched, plus rolling']
self.family_baseline[key] = 'roll_mod2_dust_sdf_0.20_v1.5_10yrs'
self.family_version[key] = '1.5'
## rolling
key = 'rolling'
c = f"**{key}** = Add a rolling cadence, where some parts of the sky receive a higher number " \
f"of visits during an 'on' season, followed by a lower number of visits during an 'off' " \
f"season. During the first year and half, and the last year and half (or so), the sky is " \
f"covered uniformly as normal. This 'intro' and 'outro' allows for better proper motion " \
f"coverage, and is 1.5 years instead of 1 to allow the entire sky to receive even coverage " \
f"during that period. This leaves 6 years for 'rolling'; simulations either split the sky " \
f"into 2 or 3 declination-based regions which are covered either every other season or " \
f"every third season. Each of the active regions is actually composed of 2 sub-section in " \
f"the North and South, to spread follow-up requirements over the sky. Some simulations add " \
f"an every-other nightly modulation between this northern and southern sub-section; some " \
f"do not although this may happen to some extent due to 'blob' coverage."
self.comment[key] = c
self.family[key] = ['baseline_nexp2_v1.7_10yrs',
'rolling_nm_scale0.2_nslice2_v1.7_10yrs',
'rolling_nm_scale0.4_nslice2_v1.7_10yrs',
'rolling_nm_scale0.6_nslice2_v1.7_10yrs',
'rolling_nm_scale0.8_nslice2_v1.7_10yrs',
'rolling_nm_scale0.9_nslice2_v1.7_10yrs',
'rolling_nm_scale1.0_nslice2_v1.7_10yrs',
'rolling_nm_scale0.2_nslice3_v1.7_10yrs',
'rolling_nm_scale0.4_nslice3_v1.7_10yrs',
'rolling_nm_scale0.6_nslice3_v1.7_10yrs',
'rolling_nm_scale0.8_nslice3_v1.7_10yrs',
'rolling_nm_scale0.9_nslice3_v1.7_10yrs',
'rolling_nm_scale1.0_nslice3_v1.7_10yrs',
'rolling_scale0.2_nslice2_v1.7_10yrs',
'rolling_scale0.4_nslice2_v1.7_10yrs',
'rolling_scale0.6_nslice2_v1.7_10yrs',
'rolling_scale0.8_nslice2_v1.7_10yrs',
'rolling_scale0.9_nslice2_v1.7_10yrs',
'rolling_scale1.0_nslice2_v1.7_10yrs',
'rolling_scale0.2_nslice3_v1.7_10yrs',
'rolling_scale0.4_nslice3_v1.7_10yrs',
'rolling_scale0.6_nslice3_v1.7_10yrs',
'rolling_scale0.8_nslice3_v1.7_10yrs',
'rolling_scale0.9_nslice3_v1.7_10yrs',
'rolling_scale1.0_nslice3_v1.7_10yrs']
self.nicknames[key] = ['Baseline',
'No Modulation, 0.2 strength, 2 band',
'No Modulation, 0.4 strength, 2 band',
'No Modulation, 0.6 strength, 2 band',
'No Modulation, 0.8 strength, 2 band',
'No Modulation, 0.9 strength, 2 band',
'No Modulation, 1.0 strength, 2 band',
'No Modulation, 0.2 strength, 3 band',
'No Modulation, 0.4 strength, 3 band',
'No Modulation, 0.6 strength, 3 band',
'No Modulation, 0.8 strength, 3 band',
'No Modulation, 0.9 strength, 3 band',
'No Modulation, 1.0 strength, 3 band',
'0.2 strength, 2 band',
'0.4 strength, 2 band',
'0.6 strength, 2 band',
'0.8 strength, 2 band',
'0.9 strength, 2 band',
'1.0 strength, 2 band',
'0.2 strength, 3 band',
'0.4 strength, 3 band',
'0.6 strength, 3 band',
'0.8 strength, 3 band',
'0.9 strength, 3 band',
'1.0 strength, 3 band']
self.family_baseline[key] = 'baseline_nexp2_v1.7_10yrs'
self.family_version[key] = '1.7'
# MINISURVEYS
## twilight_neo
key = 'twilight_neo'
c = f"**{key}** = explore the impact of adding a twilight NEO survey, operating on various " \
f"timescales and thus requiring varying fraction of survey time. These twilight NEO surveys " \
f"replace the set initially released in v1.5, improving the twilight NEO mini-survey " \
f"performance for NEOs by restricting visits to low solar " \
f"elongations. Twilight NEO visits are 1 second long, in r,i, and z filters."
self.comment[key] = c
self.family[key] = ['twi_neo_pattern1_v1.7_10yrs',
'twi_neo_pattern2_v1.7_10yrs',
'twi_neo_pattern3_v1.7_10yrs',
'twi_neo_pattern4_v1.7_10yrs',
'twi_neo_pattern5_v1.7_10yrs',
'twi_neo_pattern6_v1.7_10yrs',
'twi_neo_pattern7_v1.7_10yrs',
'baseline_nexp2_v1.7_10yrs']
self.nicknames[key] = ['On every night',
'On every other night',
'On every third night',
'On every fourth night',
'On for 4 nights, off for 4 nights',
'On for 3 nights, off for 4 nights',
'On for 2 nights, off for 4 nights',
'Baseline (none)']
self.family_baseline[key] = 'baseline_nexp2_v1.7_10yrs'
self.family_version[key] = '1.7'
## shortexp
key = 'shortexp'
c = f"**{key}** = explore the impact of adding 2 or 5 short exposures of 1 or 5 seconds each year " \
f"(in all 6 filters). The number of visits in the entire survey increases -- but some will " \
f"be too short to be useful for some science -- the amount of time used for the mini-survey " \
f"varies in each of these examples, from 0.5% to 5%."
self.comment[key] = c
self.family[key] = ['short_exp_2ns_1expt_v1.5_10yrs',
'short_exp_2ns_5expt_v1.5_10yrs',
'short_exp_5ns_1expt_v1.5_10yrs',
'short_exp_5ns_5expt_v1.5_10yrs',
'baseline_v1.5_10yrs']
self.nicknames[key] = ['2/yr x 1s',
'2/yr x 5s',
'5/yr x 1s',
'5/yr x 5s',
'Baseline (none)']
self.family_baseline[key] = 'baseline_v1.5_10yrs'
self.family_version[key] = '1.5'
## dcr
key = 'dcr'
c = f"**{key}** = explore the impact of adding 1 or 2 high-airmass visits in various bandpasses " \
f"each year, for the purpose of better-measuring differential chromatic refraction (helping " \
f"with AGN redshifts and the creation of difference image templates). "
self.comment[key] = c
self.family[key] = ['dcr_nham1_ug_v1.5_10yrs',
'dcr_nham1_ugr_v1.5_10yrs',
'dcr_nham1_ugri_v1.5_10yrs',
'dcr_nham2_ug_v1.5_10yrs',
'dcr_nham2_ugr_v1.5_10yrs',
'dcr_nham2_ugri_v1.5_10yrs',
'baseline_v1.5_10yrs']
self.nicknames[key] = ['1/yr in ug',
'1/yr in ugr',
'1/yr in ugri',
'2/yr in ug',
'2/yr in ugr',
'2/yr in ugri',
'Baseline (none)']
self.family_baseline[key] = 'baseline_v1.5_10yrs'
self.family_version[key] = '1.5'
## good_seeing
key = 'good_seeing'
c = f"**{key}** = explore the effect of prioritizing achieving at least 1 'good seeing' image " \
f"in the specified bandpasses in each year. These simulations do improve the seeing " \
f"distributions in the targeted bands, compared to baseline -- this improvement is most " \
f"visible when comparing the achieved IQ against the standard baseline, within a given year. "
self.comment[key] = c
self.family[key] = ['goodseeing_i_v1.5_10yrs',
'goodseeing_gi_v1.5_10yrs',
'goodseeing_gz_v1.5_10yrs',
'goodseeing_gri_v1.5_10yrs',
'goodseeing_griz_v1.5_10yrs',
'baseline_v1.5_10yrs', ]
self.nicknames[key] = ['good i band',
'good gi bands',
'good gz bands',
'good gri bands',
'good griz bands',
'baseline - none']
self.family_baseline[key] = 'baseline_v1.5_10yrs'
self.family_version[key] = '1.5'
## spiders
key = 'spiders'
c = f"**{key}** = This example simulation explores rotating the camera so that diffraction " \
f"spikes are aligned with the X/Y directions of the CCD, to reduce artifacts in " \
f"difference imaging."
self.comment[key] = c
self.family[key] = ['spiders_v1.5_10yrs', 'baseline_v1.5_10yrs']
self.nicknames[key] = ['Spiders Aligned', 'Random orientation']
self.family_baseline[key] = 'baseline_v1.5_10yrs'
self.family_version[key] = '1.5'
### DDF families
# ddf sequences
key = 'ddf'
c = f"**{key}** = Vary the sequences for DDF fields. The amount of time per DDF field varies " \
f"between some of these simulations."
self.comment[key] = c
self.family[key] = ['agnddf_v1.5_10yrs',
'descddf_v1.5_10yrs',
'daily_ddf_v1.5_10yrs',
'baseline_v1.5_10yrs']
self.nicknames[key] = ['AGN sequences',
'DESC sequences',
'Daily sequences',
'Baseline']
self.family_baseline[key] = 'baseline_v1.5_10yrs'
self.family_version[key] = '1.5'
### DDF dithers
key = 'ddf_dithers'
c = f"**{key}** = Vary the translational dither offsets in the DDFs, from 0 to 2.0 degrees. " \
f"Smaller dithers will help the overall depth and uniformity, but larger dithers may be " \
f"needed for calibration."
self.comment[key] = c
self.family[key] = ['ddf_dither0.00_v1.7_10yrs',
'ddf_dither0.05_v1.7_10yrs',
'ddf_dither0.10_v1.7_10yrs',
'ddf_dither0.30_v1.7_10yrs',
'ddf_dither0.70_v1.7_10yrs',
'ddf_dither1.00_v1.7_10yrs',
'ddf_dither1.50_v1.7_10yrs',
'ddf_dither2.00_v1.7_10yrs',
'baseline_nexp2_v1.7_10yrs']
self.nicknames[key] = ['0 dither',
'0.05 deg dither',
'0.10 deg dither',
'0.30 deg dither',
'0.70 deg dither',
'1.00 deg dither',
'1.50 deg dither',
'2.00 deg dither',
'Baseline (0.70 deg)']
self.family_baseline[key] = 'baseline_nexp2_v1.7_10yrs'
self.family_version[key] = '1.7'
key = 'euclid_dithers'
c = f"**{key}** = vary the translational dither offsets to fill in the Euclid DDF footprint, as the " \
f"Euclid field is a double pointing for Rubin. These simulation vary the spatial dither both " \
f"towards the second pointing and perpendicular to the second pointing. The perpendicular " \
f"dithering is relatively small (and symmetric 'up' and 'down'). The dithering along the " \
f"footprint ('direct') is larger and non-symmetric, with a smaller dither 'away' from the " \
f"second pointing and a larger dither 'toward' the second pointing. (offsets are in degrees)."
self.comment[key] = c
self.family[key] = ['euclid_dither1_v1.7_10yrs',
'euclid_dither2_v1.7_10yrs',
'euclid_dither3_v1.7_10yrs',
'euclid_dither4_v1.7_10yrs',
'euclid_dither5_v1.7_10yrs',
'baseline_nexp2_v1.7_10yrs']
self.nicknames[key] = ['Direct -0.25/+1.0, Perp +/-0.25 (deg)',
'Direct -0.1/+1.0, Perp +/-0.25 (deg)',
'Direct -0.25/+1.0, Perp +/-0.10 (deg)',
'Direct -0.25/+1.5, Perp +/-0.25 (deg)',
'Direct -0.25/+0.75, Perp +/-0.25 (deg',
'Baseline (random)']
self.family_baseline[key] = 'baseline_nexp2_v1.7_10yrs'
self.family_version[key] = '1.7'
## V1.6 potential schedulers (v16)
key = 'potential_schedulers'
c = f"**{key}** = A series of simulations where we vary *multiple* survey strategies at once, " \
"trying to combine the survey strategies that seemed useful to us (at the time anyway) to " \
"reach a particular science goal. These simulations are like cross-sections of the families, " \
"using bits and pieces to try to reach goals, rather than explore the impact of the various " \
"survey strategy changes individually. Each simulation is repeated for 2x15s visits and " \
"for 1x30s visits. " \
"<br>\n" \
"The point here is to illustrate the effect of combinations of survey strategy variations; " \
"some are successful and sometimes we may meet technical goals but not science goals. For " \
"further details on each simulation, Section 5 in the cadence report for the " \
"SCOC (https://pstn-051.lsst.io/) is recommended. "
self.comment[key] = c
self.family[key] = ['barebones_nexp2_v1.6_10yrs',
'barebones_v1.6_10yrs',
'baseline_nexp2_scaleddown_v1.6_10yrs',
'baseline_nexp2_v1.6_10yrs',
'baseline_nexp1_v1.6_10yrs',
'combo_dust_nexp2_v1.6_10yrs',
'combo_dust_v1.6_10yrs',
'ddf_heavy_nexp2_v1.6_10yrs',
'ddf_heavy_v1.6_10yrs',
'dm_heavy_nexp2_v1.6_10yrs',
'dm_heavy_v1.6_10yrs',
'mw_heavy_nexp2_v1.6_10yrs',
'mw_heavy_v1.6_10yrs',
'rolling_exgal_mod2_dust_sdf_0.80_nexp2_v1.6_10yrs',
'rolling_exgal_mod2_dust_sdf_0.80_v1.6_10yrs',
'ss_heavy_nexp2_v1.6_10yrs',
'ss_heavy_v1.6_10yrs']
self.nicknames[key] = ['WFD only 2x15s', 'WFD only 1x30s',
'Baseline 2x15s adjusted WFD fraction', 'Baseline 2x15s', 'Baseline 1x30s',
'Combo dust 2x15s', 'Combo dust 1x30s',
'DDF 13.4 % 2x15s', 'DDF 13.4% 1x30s',
'DM heavy 2x15s', 'DM heavy 1x30s',
'Bulge/MC @ WFD 2x15s', 'Bulge/MC @ WFD 1x30s',
'Shifted rolling WFD 2x15s', 'Shifted rolling WFD 1x30s',
'Twilight pairs + NEO 2x15s', 'Twilight pairs + NEO 1x30s']
self.family_baseline[key] = 'baseline_nexp1_v1.6_10yrs'
self.family_version[key] = '1.6'
"""
## filtercadence
key = 'even_filters'
c = f"**{key}** = choose to observe in bluer bandpasses even when the moon is full. This was the first " \
f"round of simulations to investigate this effect - see also the 'cadence_drive' family. "
self.family[key] = ['even_filters_alt_g_v1.6_10yrs',
'even_filters_altv1.6_10yrs',
'even_filters_g_v1.6_10yrs',
'even_filtersv1.6_10yrs',
'baseline_nexp1_v1.6_10yrs'
]
self.nicknames[key] = self.family[key]
self.family_baseline[key] = 'baseline_nexp1_v1.6_10yrs'
self.family_version[key] = '1.5'
self.comment['greedy'] = 'greedy = look at the impact of changing the `greedy` visits during
twilight to exclude the ecliptic. This should push all ecliptic visits
into pairs. This did not make a significant difference, and now we have
simulations that actually program pairs during twilight. '
self.family['greedy'] = ['greedy_footprint_v1.5_10yrs',
'baseline_v1.5_10yrs']
self.nicknames['greedy'] = ['greedy', 'baseline']
self.family_baseline['greedy'] = 'baseline_v1.5_10yrs'
self.family_version['greedy'] = '1.5'
self.comment['u60'] = 'u60 = simulation extending u band visits to 60s but not doubling the number
of visits. This was pretty disastrous for transients. Replaced by u_long.'
self.family['u60'] = ['u60_v1.5_10yrs',
'baseline_v1.5_10yrs']
self.nicknames['u60'] = ['u60 fewer visits', 'baseline']
self.family_baseline['u60'] = 'baseline_v1.5_10yrs'
self.family_version['u60'] = '1.5'
"""
def read_summary_csv(self, csv_file='all_summaries_2021_02_08.csv'):
"""Read the summary stat csv file from disk.
This file can be downloaded from:
https://epyc.astro.washington.edu/~lynnej/opsim_downloads/all_summaries_2021_02_08.csv
"""
# Read in the CSV file containing the summary stat information (
self.summaries = pd.read_csv(csv_file, index_col=0)
def list_of_families(self):
"""Print a list of the simulation groups under consideration, as of this time. """
# The families
total = 0
displaystring = ''
family_list = []
for k in self.family:
if k == 'version_baselines':
continue
family_list.append(k)
displaystring+= f"**{k}**, with {len(self.family[k])} simulations.<br>"
total += len(self.family[k])
display_markdown(displaystring, raw=True)
print(f'For {total} simulations in all.')
return family_list
def family_info(self, f, normalized=False):
"""Print some summary information about the family and return a high-level set of metrics."""
d = | pd.DataFrame(self.summaries[tablemetrics].loc[self.family[f]]) | pandas.DataFrame |
import plotly.graph_objects as go
import plotly.express as px
import pandas as pd
from collections import Counter, OrderedDict
def plot_active_users(df):
dic = df.to_dict()
k = list(dic.keys())
v = list(dic.values())
out_df = pd.DataFrame({'Пользователь': k, 'Число сообщений': v}).head(7)
fig = px.bar(out_df, x='Пользователь', y='Число сообщений')
return fig
def plot_active_hours(df):
hours_dict = {
'00':0,
'01':0,
'02':0,
'03':0,
'04':0,
'05':0,
'06':0,
'07':0,
'08':0,
'09':0,
'10':0,
'11':0,
'12':0,
'13':0,
'14':0,
'15':0,
'16':0,
'17':0,
'18':0,
'19':0,
'20':0,
'21':0,
'22':0,
'23':0
}
hours = Counter(df.to_dict())
hours.update(hours_dict)
k = list(hours.keys())
v = list(hours.values())
out_df = pd.DataFrame({'Время': k, 'Число сообщений': v}).sort_values(by=['Время'])
fig = px.area(out_df, x='Время', y='Число сообщений')
fig.update_xaxes(showgrid=False)
fig.update_yaxes(showgrid=False)
return fig
def plot_active_month_year(df):
monthes = {
'01':'Январь',
'02':'Февраль',
'03':'Март',
'04':'Апрель',
'05':'Май',
'06':'Июнь',
'07':'Июль',
'08':'Август',
'09':'Сентябрь',
'10':'Октябрь',
'11':'Ноябрь',
'12':'Декабрь'
}
dates = df['date'].values
new_dates = []
for date in dates:
splitted = date.split('.')
month = monthes[splitted[1]]
year = splitted[2]
new_dates.append(f'{month}/{year}')
dates_counter = Counter(new_dates)
k = list(dates_counter.keys())
v = list(dates_counter.values())
out_df = | pd.DataFrame({'Дата': k, 'Число сообщений': v}) | pandas.DataFrame |
"""
A warehouse for constant values required to initilize the PUDL Database.
This constants module stores and organizes a bunch of constant values which are
used throughout PUDL to populate static lists within the data packages or for
data cleaning purposes.
"""
import pandas as pd
import sqlalchemy as sa
######################################################################
# Constants used within the init.py module.
######################################################################
prime_movers = [
'steam_turbine',
'gas_turbine',
'hydro',
'internal_combustion',
'solar_pv',
'wind_turbine'
]
"""list: A list of the types of prime movers"""
rto_iso = {
'CAISO': 'California ISO',
'ERCOT': 'Electric Reliability Council of Texas',
'MISO': 'Midcontinent ISO',
'ISO-NE': 'ISO New England',
'NYISO': 'New York ISO',
'PJM': 'PJM Interconnection',
'SPP': 'Southwest Power Pool'
}
"""dict: A dictionary containing ISO/RTO abbreviations (keys) and names (values)
"""
us_states = {
'AK': 'Alaska',
'AL': 'Alabama',
'AR': 'Arkansas',
'AS': 'American Samoa',
'AZ': 'Arizona',
'CA': 'California',
'CO': 'Colorado',
'CT': 'Connecticut',
'DC': 'District of Columbia',
'DE': 'Delaware',
'FL': 'Florida',
'GA': 'Georgia',
'GU': 'Guam',
'HI': 'Hawaii',
'IA': 'Iowa',
'ID': 'Idaho',
'IL': 'Illinois',
'IN': 'Indiana',
'KS': 'Kansas',
'KY': 'Kentucky',
'LA': 'Louisiana',
'MA': 'Massachusetts',
'MD': 'Maryland',
'ME': 'Maine',
'MI': 'Michigan',
'MN': 'Minnesota',
'MO': 'Missouri',
'MP': 'Northern Mariana Islands',
'MS': 'Mississippi',
'MT': 'Montana',
'NA': 'National',
'NC': 'North Carolina',
'ND': 'North Dakota',
'NE': 'Nebraska',
'NH': 'New Hampshire',
'NJ': 'New Jersey',
'NM': 'New Mexico',
'NV': 'Nevada',
'NY': 'New York',
'OH': 'Ohio',
'OK': 'Oklahoma',
'OR': 'Oregon',
'PA': 'Pennsylvania',
'PR': 'Puerto Rico',
'RI': 'Rhode Island',
'SC': 'South Carolina',
'SD': 'South Dakota',
'TN': 'Tennessee',
'TX': 'Texas',
'UT': 'Utah',
'VA': 'Virginia',
'VI': 'Virgin Islands',
'VT': 'Vermont',
'WA': 'Washington',
'WI': 'Wisconsin',
'WV': 'West Virginia',
'WY': 'Wyoming'
}
"""dict: A dictionary containing US state abbreviations (keys) and names
(values)
"""
canada_prov_terr = {
'AB': 'Alberta',
'BC': 'British Columbia',
'CN': 'Canada',
'MB': 'Manitoba',
'NB': 'New Brunswick',
'NS': 'Nova Scotia',
'NL': 'Newfoundland and Labrador',
'NT': 'Northwest Territories',
'NU': 'Nunavut',
'ON': 'Ontario',
'PE': 'Prince Edwards Island',
'QC': 'Quebec',
'SK': 'Saskatchewan',
'YT': 'Yukon Territory',
}
"""dict: A dictionary containing Canadian provinces' and territories'
abbreviations (keys) and names (values)
"""
cems_states = {k: v for k, v in us_states.items() if v not in
{'Alaska',
'American Samoa',
'Guam',
'Hawaii',
'Northern Mariana Islands',
'National',
'Puerto Rico',
'Virgin Islands'}
}
"""dict: A dictionary containing US state abbreviations (keys) and names
(values) that are present in the CEMS dataset
"""
# This is imperfect for states that have split timezones. See:
# https://en.wikipedia.org/wiki/List_of_time_offsets_by_U.S._state_and_territory
# For states that are split, I went with where there seem to be more people
# List of timezones in pytz.common_timezones
# Canada: https://en.wikipedia.org/wiki/Time_in_Canada#IANA_time_zone_database
state_tz_approx = {
"AK": "US/Alaska", # Alaska; Not in CEMS
"AL": "US/Central", # Alabama
"AR": "US/Central", # Arkansas
"AS": "Pacific/Pago_Pago", # American Samoa; Not in CEMS
"AZ": "US/Arizona", # Arizona
"CA": "US/Pacific", # California
"CO": "US/Mountain", # Colorado
"CT": "US/Eastern", # Connecticut
"DC": "US/Eastern", # District of Columbia
"DE": "US/Eastern", # Delaware
"FL": "US/Eastern", # Florida (split state)
"GA": "US/Eastern", # Georgia
"GU": "Pacific/Guam", # Guam; Not in CEMS
"HI": "US/Hawaii", # Hawaii; Not in CEMS
"IA": "US/Central", # Iowa
"ID": "US/Mountain", # Idaho (split state)
"IL": "US/Central", # Illinois
"IN": "US/Eastern", # Indiana (split state)
"KS": "US/Central", # Kansas (split state)
"KY": "US/Eastern", # Kentucky (split state)
"LA": "US/Central", # Louisiana
"MA": "US/Eastern", # Massachusetts
"MD": "US/Eastern", # Maryland
"ME": "US/Eastern", # Maine
"MI": "America/Detroit", # Michigan (split state)
"MN": "US/Central", # Minnesota
"MO": "US/Central", # Missouri
"MP": "Pacific/Saipan", # Northern Mariana Islands; Not in CEMS
"MS": "US/Central", # Mississippi
"MT": "US/Mountain", # Montana
"NC": "US/Eastern", # North Carolina
"ND": "US/Central", # North Dakota (split state)
"NE": "US/Central", # Nebraska (split state)
"NH": "US/Eastern", # New Hampshire
"NJ": "US/Eastern", # New Jersey
"NM": "US/Mountain", # New Mexico
"NV": "US/Pacific", # Nevada
"NY": "US/Eastern", # New York
"OH": "US/Eastern", # Ohio
"OK": "US/Central", # Oklahoma
"OR": "US/Pacific", # Oregon (split state)
"PA": "US/Eastern", # Pennsylvania
"PR": "America/Puerto_Rico", # Puerto Rico; Not in CEMS
"RI": "US/Eastern", # Rhode Island
"SC": "US/Eastern", # South Carolina
"SD": "US/Central", # South Dakota (split state)
"TN": "US/Central", # Tennessee
"TX": "US/Central", # Texas
"UT": "US/Mountain", # Utah
"VA": "US/Eastern", # Virginia
"VI": "America/Puerto_Rico", # Virgin Islands; Not in CEMS
"VT": "US/Eastern", # Vermont
"WA": "US/Pacific", # Washington
"WI": "US/Central", # Wisconsin
"WV": "US/Eastern", # West Virginia
"WY": "US/Mountain", # Wyoming
# Canada (none of these are in CEMS)
"AB": "America/Edmonton", # Alberta
"BC": "America/Vancouver", # British Columbia (split province)
"MB": "America/Winnipeg", # Manitoba
"NB": "America/Moncton", # New Brunswick
"NS": "America/Halifax", # Nova Scotia
"NL": "America/St_Johns", # Newfoundland and Labrador (split province)
"NT": "America/Yellowknife", # Northwest Territories (split province)
"NU": "America/Iqaluit", # Nunavut (split province)
"ON": "America/Toronto", # Ontario (split province)
"PE": "America/Halifax", # Prince Edwards Island
"QC": "America/Montreal", # Quebec (split province)
"SK": "America/Regina", # Saskatchewan (split province)
"YT": "America/Whitehorse", # Yukon Territory
}
"""dict: A dictionary containing US and Canadian state/territory abbreviations
(keys) and timezones (values)
"""
ferc1_power_purchase_type = {
'RQ': 'requirement',
'LF': 'long_firm',
'IF': 'intermediate_firm',
'SF': 'short_firm',
'LU': 'long_unit',
'IU': 'intermediate_unit',
'EX': 'electricity_exchange',
'OS': 'other_service',
'AD': 'adjustment'
}
"""dict: A dictionary of abbreviations (keys) and types (values) for power
purchase agreements from FERC Form 1.
"""
# Dictionary mapping DBF files (w/o .DBF file extension) to DB table names
ferc1_dbf2tbl = {
'F1_1': 'f1_respondent_id',
'F1_2': 'f1_acb_epda',
'F1_3': 'f1_accumdepr_prvsn',
'F1_4': 'f1_accumdfrrdtaxcr',
'F1_5': 'f1_adit_190_detail',
'F1_6': 'f1_adit_190_notes',
'F1_7': 'f1_adit_amrt_prop',
'F1_8': 'f1_adit_other',
'F1_9': 'f1_adit_other_prop',
'F1_10': 'f1_allowances',
'F1_11': 'f1_bal_sheet_cr',
'F1_12': 'f1_capital_stock',
'F1_13': 'f1_cash_flow',
'F1_14': 'f1_cmmn_utlty_p_e',
'F1_15': 'f1_comp_balance_db',
'F1_16': 'f1_construction',
'F1_17': 'f1_control_respdnt',
'F1_18': 'f1_co_directors',
'F1_19': 'f1_cptl_stk_expns',
'F1_20': 'f1_csscslc_pcsircs',
'F1_21': 'f1_dacs_epda',
'F1_22': 'f1_dscnt_cptl_stk',
'F1_23': 'f1_edcfu_epda',
'F1_24': 'f1_elctrc_erg_acct',
'F1_25': 'f1_elctrc_oper_rev',
'F1_26': 'f1_elc_oper_rev_nb',
'F1_27': 'f1_elc_op_mnt_expn',
'F1_28': 'f1_electric',
'F1_29': 'f1_envrnmntl_expns',
'F1_30': 'f1_envrnmntl_fclty',
'F1_31': 'f1_fuel',
'F1_32': 'f1_general_info',
'F1_33': 'f1_gnrt_plant',
'F1_34': 'f1_important_chg',
'F1_35': 'f1_incm_stmnt_2',
'F1_36': 'f1_income_stmnt',
'F1_37': 'f1_miscgen_expnelc',
'F1_38': 'f1_misc_dfrrd_dr',
'F1_39': 'f1_mthly_peak_otpt',
'F1_40': 'f1_mtrl_spply',
'F1_41': 'f1_nbr_elc_deptemp',
'F1_42': 'f1_nonutility_prop',
'F1_43': 'f1_note_fin_stmnt', # 37% of DB
'F1_44': 'f1_nuclear_fuel',
'F1_45': 'f1_officers_co',
'F1_46': 'f1_othr_dfrrd_cr',
'F1_47': 'f1_othr_pd_in_cptl',
'F1_48': 'f1_othr_reg_assets',
'F1_49': 'f1_othr_reg_liab',
'F1_50': 'f1_overhead',
'F1_51': 'f1_pccidica',
'F1_52': 'f1_plant_in_srvce',
'F1_53': 'f1_pumped_storage',
'F1_54': 'f1_purchased_pwr',
'F1_55': 'f1_reconrpt_netinc',
'F1_56': 'f1_reg_comm_expn',
'F1_57': 'f1_respdnt_control',
'F1_58': 'f1_retained_erng',
'F1_59': 'f1_r_d_demo_actvty',
'F1_60': 'f1_sales_by_sched',
'F1_61': 'f1_sale_for_resale',
'F1_62': 'f1_sbsdry_totals',
'F1_63': 'f1_schedules_list',
'F1_64': 'f1_security_holder',
'F1_65': 'f1_slry_wg_dstrbtn',
'F1_66': 'f1_substations',
'F1_67': 'f1_taxacc_ppchrgyr',
'F1_68': 'f1_unrcvrd_cost',
'F1_69': 'f1_utltyplnt_smmry',
'F1_70': 'f1_work',
'F1_71': 'f1_xmssn_adds',
'F1_72': 'f1_xmssn_elc_bothr',
'F1_73': 'f1_xmssn_elc_fothr',
'F1_74': 'f1_xmssn_line',
'F1_75': 'f1_xtraordnry_loss',
'F1_76': 'f1_codes_val',
'F1_77': 'f1_sched_lit_tbl',
'F1_78': 'f1_audit_log',
'F1_79': 'f1_col_lit_tbl',
'F1_80': 'f1_load_file_names',
'F1_81': 'f1_privilege',
'F1_82': 'f1_sys_error_log',
'F1_83': 'f1_unique_num_val',
'F1_84': 'f1_row_lit_tbl',
'F1_85': 'f1_footnote_data',
'F1_86': 'f1_hydro',
'F1_87': 'f1_footnote_tbl', # 52% of DB
'F1_88': 'f1_ident_attsttn',
'F1_89': 'f1_steam',
'F1_90': 'f1_leased',
'F1_91': 'f1_sbsdry_detail',
'F1_92': 'f1_plant',
'F1_93': 'f1_long_term_debt',
'F1_106_2009': 'f1_106_2009',
'F1_106A_2009': 'f1_106a_2009',
'F1_106B_2009': 'f1_106b_2009',
'F1_208_ELC_DEP': 'f1_208_elc_dep',
'F1_231_TRN_STDYCST': 'f1_231_trn_stdycst',
'F1_324_ELC_EXPNS': 'f1_324_elc_expns',
'F1_325_ELC_CUST': 'f1_325_elc_cust',
'F1_331_TRANSISO': 'f1_331_transiso',
'F1_338_DEP_DEPL': 'f1_338_dep_depl',
'F1_397_ISORTO_STL': 'f1_397_isorto_stl',
'F1_398_ANCL_PS': 'f1_398_ancl_ps',
'F1_399_MTH_PEAK': 'f1_399_mth_peak',
'F1_400_SYS_PEAK': 'f1_400_sys_peak',
'F1_400A_ISO_PEAK': 'f1_400a_iso_peak',
'F1_429_TRANS_AFF': 'f1_429_trans_aff',
'F1_ALLOWANCES_NOX': 'f1_allowances_nox',
'F1_CMPINC_HEDGE_A': 'f1_cmpinc_hedge_a',
'F1_CMPINC_HEDGE': 'f1_cmpinc_hedge',
'F1_EMAIL': 'f1_email',
'F1_RG_TRN_SRV_REV': 'f1_rg_trn_srv_rev',
'F1_S0_CHECKS': 'f1_s0_checks',
'F1_S0_FILING_LOG': 'f1_s0_filing_log',
'F1_SECURITY': 'f1_security'
# 'F1_PINS': 'f1_pins', # private data, not publicized.
# 'F1_FREEZE': 'f1_freeze', # private data, not publicized
}
"""dict: A dictionary mapping FERC Form 1 DBF files(w / o .DBF file extension)
(keys) to database table names (values).
"""
ferc1_huge_tables = {
'f1_footnote_tbl',
'f1_footnote_data',
'f1_note_fin_stmnt',
}
"""set: A set containing large FERC Form 1 tables.
"""
# Invert the map above so we can go either way as needed
ferc1_tbl2dbf = {v: k for k, v in ferc1_dbf2tbl.items()}
"""dict: A dictionary mapping database table names (keys) to FERC Form 1 DBF
files(w / o .DBF file extension) (values).
"""
# This dictionary maps the strings which are used to denote field types in the
# DBF objects to the corresponding generic SQLAlchemy Column types:
# These definitions come from a combination of the dbfread example program
# dbf2sqlite and this DBF file format documentation page:
# http://www.dbase.com/KnowledgeBase/int/db7_file_fmt.htm
# Un-mapped types left as 'XXX' which should obviously make an error...
dbf_typemap = {
'C': sa.String,
'D': sa.Date,
'F': sa.Float,
'I': sa.Integer,
'L': sa.Boolean,
'M': sa.Text, # 10 digit .DBT block number, stored as a string...
'N': sa.Float,
'T': sa.DateTime,
'0': sa.Integer, # based on dbf2sqlite mapping
'B': 'XXX', # .DBT block number, binary string
'@': 'XXX', # Timestamp... Date = Julian Day, Time is in milliseconds?
'+': 'XXX', # Autoincrement (e.g. for IDs)
'O': 'XXX', # Double, 8 bytes
'G': 'XXX', # OLE 10 digit/byte number of a .DBT block, stored as string
}
"""dict: A dictionary mapping field types in the DBF objects (keys) to the
corresponding generic SQLAlchemy Column types.
"""
# This is the set of tables which have been successfully integrated into PUDL:
ferc1_pudl_tables = (
'fuel_ferc1', # Plant-level data, linked to plants_steam_ferc1
'plants_steam_ferc1', # Plant-level data
'plants_small_ferc1', # Plant-level data
'plants_hydro_ferc1', # Plant-level data
'plants_pumped_storage_ferc1', # Plant-level data
'purchased_power_ferc1', # Inter-utility electricity transactions
'plant_in_service_ferc1', # Row-mapped plant accounting data.
# 'accumulated_depreciation_ferc1' # Requires row-mapping to be useful.
)
"""tuple: A tuple containing the FERC Form 1 tables that can be successfully
integrated into PUDL.
"""
table_map_ferc1_pudl = {
'fuel_ferc1': 'f1_fuel',
'plants_steam_ferc1': 'f1_steam',
'plants_small_ferc1': 'f1_gnrt_plant',
'plants_hydro_ferc1': 'f1_hydro',
'plants_pumped_storage_ferc1': 'f1_pumped_storage',
'plant_in_service_ferc1': 'f1_plant_in_srvce',
'purchased_power_ferc1': 'f1_purchased_pwr',
# 'accumulated_depreciation_ferc1': 'f1_accumdepr_prvsn'
}
"""dict: A dictionary mapping PUDL table names (keys) to the corresponding FERC
Form 1 DBF table names.
"""
# This is the list of EIA923 tables that can be successfully pulled into PUDL
eia923_pudl_tables = ('generation_fuel_eia923',
'boiler_fuel_eia923',
'generation_eia923',
'coalmine_eia923',
'fuel_receipts_costs_eia923')
"""tuple: A tuple containing the EIA923 tables that can be successfully
integrated into PUDL.
"""
epaipm_pudl_tables = (
'transmission_single_epaipm',
'transmission_joint_epaipm',
'load_curves_epaipm',
'plant_region_map_epaipm',
)
"""tuple: A tuple containing the EPA IPM tables that can be successfully
integrated into PUDL.
"""
# List of entity tables
entity_tables = ['utilities_entity_eia',
'plants_entity_eia',
'generators_entity_eia',
'boilers_entity_eia',
'regions_entity_epaipm', ]
"""list: A list of PUDL entity tables.
"""
xlsx_maps_pkg = 'pudl.package_data.meta.xlsx_maps'
"""string: The location of the xlsx maps within the PUDL package data."""
##############################################################################
# EIA 923 Spreadsheet Metadata
##############################################################################
##############################################################################
# EIA 860 Spreadsheet Metadata
##############################################################################
# This is the list of EIA860 tables that can be successfully pulled into PUDL
eia860_pudl_tables = (
'boiler_generator_assn_eia860',
'utilities_eia860',
'plants_eia860',
'generators_eia860',
'ownership_eia860'
)
"""tuple: A tuple enumerating EIA 860 tables for which PUDL's ETL works."""
# The set of FERC Form 1 tables that have the same composite primary keys: [
# respondent_id, report_year, report_prd, row_number, spplmnt_num ].
# TODO: THIS ONLY PERTAINS TO 2015 AND MAY NEED TO BE ADJUSTED BY YEAR...
ferc1_data_tables = (
'f1_acb_epda', 'f1_accumdepr_prvsn', 'f1_accumdfrrdtaxcr',
'f1_adit_190_detail', 'f1_adit_190_notes', 'f1_adit_amrt_prop',
'f1_adit_other', 'f1_adit_other_prop', 'f1_allowances', 'f1_bal_sheet_cr',
'f1_capital_stock', 'f1_cash_flow', 'f1_cmmn_utlty_p_e',
'f1_comp_balance_db', 'f1_construction', 'f1_control_respdnt',
'f1_co_directors', 'f1_cptl_stk_expns', 'f1_csscslc_pcsircs',
'f1_dacs_epda', 'f1_dscnt_cptl_stk', 'f1_edcfu_epda', 'f1_elctrc_erg_acct',
'f1_elctrc_oper_rev', 'f1_elc_oper_rev_nb', 'f1_elc_op_mnt_expn',
'f1_electric', 'f1_envrnmntl_expns', 'f1_envrnmntl_fclty', 'f1_fuel',
'f1_general_info', 'f1_gnrt_plant', 'f1_important_chg', 'f1_incm_stmnt_2',
'f1_income_stmnt', 'f1_miscgen_expnelc', 'f1_misc_dfrrd_dr',
'f1_mthly_peak_otpt', 'f1_mtrl_spply', 'f1_nbr_elc_deptemp',
'f1_nonutility_prop', 'f1_note_fin_stmnt', 'f1_nuclear_fuel',
'f1_officers_co', 'f1_othr_dfrrd_cr', 'f1_othr_pd_in_cptl',
'f1_othr_reg_assets', 'f1_othr_reg_liab', 'f1_overhead', 'f1_pccidica',
'f1_plant_in_srvce', 'f1_pumped_storage', 'f1_purchased_pwr',
'f1_reconrpt_netinc', 'f1_reg_comm_expn', 'f1_respdnt_control',
'f1_retained_erng', 'f1_r_d_demo_actvty', 'f1_sales_by_sched',
'f1_sale_for_resale', 'f1_sbsdry_totals', 'f1_schedules_list',
'f1_security_holder', 'f1_slry_wg_dstrbtn', 'f1_substations',
'f1_taxacc_ppchrgyr', 'f1_unrcvrd_cost', 'f1_utltyplnt_smmry', 'f1_work',
'f1_xmssn_adds', 'f1_xmssn_elc_bothr', 'f1_xmssn_elc_fothr',
'f1_xmssn_line', 'f1_xtraordnry_loss',
'f1_hydro', 'f1_steam', 'f1_leased', 'f1_sbsdry_detail',
'f1_plant', 'f1_long_term_debt', 'f1_106_2009', 'f1_106a_2009',
'f1_106b_2009', 'f1_208_elc_dep', 'f1_231_trn_stdycst', 'f1_324_elc_expns',
'f1_325_elc_cust', 'f1_331_transiso', 'f1_338_dep_depl',
'f1_397_isorto_stl', 'f1_398_ancl_ps', 'f1_399_mth_peak',
'f1_400_sys_peak', 'f1_400a_iso_peak', 'f1_429_trans_aff',
'f1_allowances_nox', 'f1_cmpinc_hedge_a', 'f1_cmpinc_hedge',
'f1_rg_trn_srv_rev')
"""tuple: A tuple containing the FERC Form 1 tables that have the same composite
primary keys: [respondent_id, report_year, report_prd, row_number,
spplmnt_num].
"""
# Line numbers, and corresponding FERC account number
# from FERC Form 1 pages 204-207, Electric Plant in Service.
# Descriptions from: https://www.law.cornell.edu/cfr/text/18/part-101
ferc_electric_plant_accounts = pd.DataFrame.from_records([
# 1. Intangible Plant
(2, '301', 'Intangible: Organization'),
(3, '302', 'Intangible: Franchises and consents'),
(4, '303', 'Intangible: Miscellaneous intangible plant'),
(5, 'subtotal_intangible', 'Subtotal: Intangible Plant'),
# 2. Production Plant
# A. steam production
(8, '310', 'Steam production: Land and land rights'),
(9, '311', 'Steam production: Structures and improvements'),
(10, '312', 'Steam production: Boiler plant equipment'),
(11, '313', 'Steam production: Engines and engine-driven generators'),
(12, '314', 'Steam production: Turbogenerator units'),
(13, '315', 'Steam production: Accessory electric equipment'),
(14, '316', 'Steam production: Miscellaneous power plant equipment'),
(15, '317', 'Steam production: Asset retirement costs for steam production\
plant'),
(16, 'subtotal_steam_production', 'Subtotal: Steam Production Plant'),
# B. nuclear production
(18, '320', 'Nuclear production: Land and land rights (Major only)'),
(19, '321', 'Nuclear production: Structures and improvements (Major\
only)'),
(20, '322', 'Nuclear production: Reactor plant equipment (Major only)'),
(21, '323', 'Nuclear production: Turbogenerator units (Major only)'),
(22, '324', 'Nuclear production: Accessory electric equipment (Major\
only)'),
(23, '325', 'Nuclear production: Miscellaneous power plant equipment\
(Major only)'),
(24, '326', 'Nuclear production: Asset retirement costs for nuclear\
production plant (Major only)'),
(25, 'subtotal_nuclear_produciton', 'Subtotal: Nuclear Production Plant'),
# C. hydraulic production
(27, '330', 'Hydraulic production: Land and land rights'),
(28, '331', 'Hydraulic production: Structures and improvements'),
(29, '332', 'Hydraulic production: Reservoirs, dams, and waterways'),
(30, '333', 'Hydraulic production: Water wheels, turbines and generators'),
(31, '334', 'Hydraulic production: Accessory electric equipment'),
(32, '335', 'Hydraulic production: Miscellaneous power plant equipment'),
(33, '336', 'Hydraulic production: Roads, railroads and bridges'),
(34, '337', 'Hydraulic production: Asset retirement costs for hydraulic\
production plant'),
(35, 'subtotal_hydraulic_production', 'Subtotal: Hydraulic Production\
Plant'),
# D. other production
(37, '340', 'Other production: Land and land rights'),
(38, '341', 'Other production: Structures and improvements'),
(39, '342', 'Other production: Fuel holders, producers, and accessories'),
(40, '343', 'Other production: Prime movers'),
(41, '344', 'Other production: Generators'),
(42, '345', 'Other production: Accessory electric equipment'),
(43, '346', 'Other production: Miscellaneous power plant equipment'),
(44, '347', 'Other production: Asset retirement costs for other production\
plant'),
(None, '348', 'Other production: Energy Storage Equipment'),
(45, 'subtotal_other_production', 'Subtotal: Other Production Plant'),
(46, 'subtotal_production', 'Subtotal: Production Plant'),
# 3. Transmission Plant,
(48, '350', 'Transmission: Land and land rights'),
(None, '351', 'Transmission: Energy Storage Equipment'),
(49, '352', 'Transmission: Structures and improvements'),
(50, '353', 'Transmission: Station equipment'),
(51, '354', 'Transmission: Towers and fixtures'),
(52, '355', 'Transmission: Poles and fixtures'),
(53, '356', 'Transmission: Overhead conductors and devices'),
(54, '357', 'Transmission: Underground conduit'),
(55, '358', 'Transmission: Underground conductors and devices'),
(56, '359', 'Transmission: Roads and trails'),
(57, '359.1', 'Transmission: Asset retirement costs for transmission\
plant'),
(58, 'subtotal_transmission', 'Subtotal: Transmission Plant'),
# 4. Distribution Plant
(60, '360', 'Distribution: Land and land rights'),
(61, '361', 'Distribution: Structures and improvements'),
(62, '362', 'Distribution: Station equipment'),
(63, '363', 'Distribution: Storage battery equipment'),
(64, '364', 'Distribution: Poles, towers and fixtures'),
(65, '365', 'Distribution: Overhead conductors and devices'),
(66, '366', 'Distribution: Underground conduit'),
(67, '367', 'Distribution: Underground conductors and devices'),
(68, '368', 'Distribution: Line transformers'),
(69, '369', 'Distribution: Services'),
(70, '370', 'Distribution: Meters'),
(71, '371', 'Distribution: Installations on customers\' premises'),
(72, '372', 'Distribution: Leased property on customers\' premises'),
(73, '373', 'Distribution: Street lighting and signal systems'),
(74, '374', 'Distribution: Asset retirement costs for distribution plant'),
(75, 'subtotal_distribution', 'Subtotal: Distribution Plant'),
# 5. Regional Transmission and Market Operation Plant
(77, '380', 'Regional transmission: Land and land rights'),
(78, '381', 'Regional transmission: Structures and improvements'),
(79, '382', 'Regional transmission: Computer hardware'),
(80, '383', 'Regional transmission: Computer software'),
(81, '384', 'Regional transmission: Communication Equipment'),
(82, '385', 'Regional transmission: Miscellaneous Regional Transmission\
and Market Operation Plant'),
(83, '386', 'Regional transmission: Asset Retirement Costs for Regional\
Transmission and Market Operation\
Plant'),
(84, 'subtotal_regional_transmission', 'Subtotal: Transmission and Market\
Operation Plant'),
(None, '387', 'Regional transmission: [Reserved]'),
# 6. General Plant
(86, '389', 'General: Land and land rights'),
(87, '390', 'General: Structures and improvements'),
(88, '391', 'General: Office furniture and equipment'),
(89, '392', 'General: Transportation equipment'),
(90, '393', 'General: Stores equipment'),
(91, '394', 'General: Tools, shop and garage equipment'),
(92, '395', 'General: Laboratory equipment'),
(93, '396', 'General: Power operated equipment'),
(94, '397', 'General: Communication equipment'),
(95, '398', 'General: Miscellaneous equipment'),
(96, 'subtotal_general', 'Subtotal: General Plant'),
(97, '399', 'General: Other tangible property'),
(98, '399.1', 'General: Asset retirement costs for general plant'),
(99, 'total_general', 'TOTAL General Plant'),
(100, '101_and_106', 'Electric plant in service (Major only)'),
(101, '102_purchased', 'Electric plant purchased'),
(102, '102_sold', 'Electric plant sold'),
(103, '103', 'Experimental plant unclassified'),
(104, 'total_electric_plant', 'TOTAL Electric Plant in Service')],
columns=['row_number', 'ferc_account_id', 'ferc_account_description'])
"""list: A list of tuples containing row numbers, FERC account IDs, and FERC
account descriptions from FERC Form 1 pages 204 - 207, Electric Plant in
Service.
"""
# Line numbers, and corresponding FERC account number
# from FERC Form 1 page 219, ACCUMULATED PROVISION FOR DEPRECIATION
# OF ELECTRIC UTILITY PLANT (Account 108).
ferc_accumulated_depreciation = pd.DataFrame.from_records([
# Section A. Balances and Changes During Year
(1, 'balance_beginning_of_year', 'Balance Beginning of Year'),
(3, 'depreciation_expense', '(403) Depreciation Expense'),
(4, 'depreciation_expense_asset_retirement', \
'(403.1) Depreciation Expense for Asset Retirement Costs'),
(5, 'expense_electric_plant_leased_to_others', \
'(413) Exp. of Elec. Plt. Leas. to Others'),
(6, 'transportation_expenses_clearing',\
'Transportation Expenses-Clearing'),
(7, 'other_clearing_accounts', 'Other Clearing Accounts'),
(8, 'other_accounts_specified',\
'Other Accounts (Specify, details in footnote):'),
# blank: might also be other charges like line 17.
(9, 'other_charges', 'Other Charges:'),
(10, 'total_depreciation_provision_for_year',\
'TOTAL Deprec. Prov for Year (Enter Total of lines 3 thru 9)'),
(11, 'net_charges_for_plant_retired', 'Net Charges for Plant Retired:'),
(12, 'book_cost_of_plant_retired', 'Book Cost of Plant Retired'),
(13, 'cost_of_removal', 'Cost of Removal'),
(14, 'salvage_credit', 'Salvage (Credit)'),
(15, 'total_net_charges_for_plant_retired',\
'TOTAL Net Chrgs. for Plant Ret. (Enter Total of lines 12 thru 14)'),
(16, 'other_debit_or_credit_items',\
'Other Debit or Cr. Items (Describe, details in footnote):'),
# blank: can be "Other Charges", e.g. in 2012 for PSCo.
(17, 'other_charges_2', 'Other Charges 2'),
(18, 'book_cost_or_asset_retirement_costs_retired',\
'Book Cost or Asset Retirement Costs Retired'),
(19, 'balance_end_of_year', \
'Balance End of Year (Enter Totals of lines 1, 10, 15, 16, and 18)'),
# Section B. Balances at End of Year According to Functional Classification
(20, 'steam_production_end_of_year', 'Steam Production'),
(21, 'nuclear_production_end_of_year', 'Nuclear Production'),
(22, 'hydraulic_production_end_of_year',\
'Hydraulic Production-Conventional'),
(23, 'pumped_storage_end_of_year', 'Hydraulic Production-Pumped Storage'),
(24, 'other_production', 'Other Production'),
(25, 'transmission', 'Transmission'),
(26, 'distribution', 'Distribution'),
(27, 'regional_transmission_and_market_operation',
'Regional Transmission and Market Operation'),
(28, 'general', 'General'),
(29, 'total', 'TOTAL (Enter Total of lines 20 thru 28)')],
columns=['row_number', 'line_id', 'ferc_account_description'])
"""list: A list of tuples containing row numbers, FERC account IDs, and FERC
account descriptions from FERC Form 1 page 219, Accumulated Provision for
Depreciation of electric utility plant(Account 108).
"""
######################################################################
# Constants from EIA From 923 used within init.py module
######################################################################
# From Page 7 of EIA Form 923, Census Region a US state is located in
census_region = {
'NEW': 'New England',
'MAT': 'Middle Atlantic',
'SAT': 'South Atlantic',
'ESC': 'East South Central',
'WSC': 'West South Central',
'ENC': 'East North Central',
'WNC': 'West North Central',
'MTN': 'Mountain',
'PACC': 'Pacific Contiguous (OR, WA, CA)',
'PACN': 'Pacific Non-Contiguous (AK, HI)',
}
"""dict: A dictionary mapping Census Region abbreviations (keys) to Census
Region names (values).
"""
# From Page 7 of EIA Form923
# Static list of NERC (North American Electric Reliability Corporation)
# regions, used for where plant is located
nerc_region = {
'NPCC': 'Northeast Power Coordinating Council',
'ASCC': 'Alaska Systems Coordinating Council',
'HICC': 'Hawaiian Islands Coordinating Council',
'MRO': 'Midwest Reliability Organization',
'SERC': 'SERC Reliability Corporation',
'RFC': 'Reliability First Corporation',
'SPP': 'Southwest Power Pool',
'TRE': 'Texas Regional Entity',
'FRCC': 'Florida Reliability Coordinating Council',
'WECC': 'Western Electricity Coordinating Council'
}
"""dict: A dictionary mapping NERC Region abbreviations (keys) to NERC
Region names (values).
"""
# From Page 7 of EIA Form 923 EIA’s internal consolidated NAICS sectors.
# For internal purposes, EIA consolidates NAICS categories into seven groups.
sector_eia = {
# traditional regulated electric utilities
'1': 'Electric Utility',
# Independent power producers which are not cogenerators
'2': 'NAICS-22 Non-Cogen',
# Independent power producers which are cogenerators, but whose
# primary business purpose is the sale of electricity to the public
'3': 'NAICS-22 Cogen',
# Commercial non-cogeneration facilities that produce electric power,
# are connected to the gird, and can sell power to the public
'4': 'Commercial NAICS Non-Cogen',
# Commercial cogeneration facilities that produce electric power, are
# connected to the grid, and can sell power to the public
'5': 'Commercial NAICS Cogen',
# Industrial non-cogeneration facilities that produce electric power, are
# connected to the gird, and can sell power to the public
'6': 'Industrial NAICS Non-Cogen',
# Industrial cogeneration facilities that produce electric power, are
# connected to the gird, and can sell power to the public
'7': 'Industrial NAICS Cogen'
}
"""dict: A dictionary mapping EIA numeric codes (keys) to EIA’s internal
consolidated NAICS sectors (values).
"""
# EIA 923: EIA Type of prime mover:
prime_movers_eia923 = {
'BA': 'Energy Storage, Battery',
'BT': 'Turbines Used in a Binary Cycle. Including those used for geothermal applications',
'CA': 'Combined-Cycle -- Steam Part',
'CC': 'Combined-Cycle, Total Unit',
'CE': 'Energy Storage, Compressed Air',
'CP': 'Energy Storage, Concentrated Solar Power',
'CS': 'Combined-Cycle Single-Shaft Combustion Turbine and Steam Turbine share of single',
'CT': 'Combined-Cycle Combustion Turbine Part',
'ES': 'Energy Storage, Other (Specify on Schedule 9, Comments)',
'FC': 'Fuel Cell',
'FW': 'Energy Storage, Flywheel',
'GT': 'Combustion (Gas) Turbine. Including Jet Engine design',
'HA': 'Hydrokinetic, Axial Flow Turbine',
'HB': 'Hydrokinetic, Wave Buoy',
'HK': 'Hydrokinetic, Other',
'HY': 'Hydraulic Turbine. Including turbines associated with delivery of water by pipeline.',
'IC': 'Internal Combustion (diesel, piston, reciprocating) Engine',
'PS': 'Energy Storage, Reversible Hydraulic Turbine (Pumped Storage)',
'OT': 'Other',
'ST': 'Steam Turbine. Including Nuclear, Geothermal, and Solar Steam (does not include Combined Cycle).',
'PV': 'Photovoltaic',
'WT': 'Wind Turbine, Onshore',
'WS': 'Wind Turbine, Offshore'
}
"""dict: A dictionary mapping EIA 923 prime mover codes (keys) and prime mover
names / descriptions (values).
"""
# EIA 923: The fuel code reported to EIA.Two or three letter alphanumeric:
fuel_type_eia923 = {
'AB': 'Agricultural By-Products',
'ANT': 'Anthracite Coal',
'BFG': 'Blast Furnace Gas',
'BIT': 'Bituminous Coal',
'BLQ': 'Black Liquor',
'CBL': 'Coal, Blended',
'DFO': 'Distillate Fuel Oil. Including diesel, No. 1, No. 2, and No. 4 fuel oils.',
'GEO': 'Geothermal',
'JF': 'Jet Fuel',
'KER': 'Kerosene',
'LFG': 'Landfill Gas',
'LIG': 'Lignite Coal',
'MSB': 'Biogenic Municipal Solid Waste',
'MSN': 'Non-biogenic Municipal Solid Waste',
'MSW': 'Municipal Solid Waste',
'MWH': 'Electricity used for energy storage',
'NG': 'Natural Gas',
'NUC': 'Nuclear. Including Uranium, Plutonium, and Thorium.',
'OBG': 'Other Biomass Gas. Including digester gas, methane, and other biomass gases.',
'OBL': 'Other Biomass Liquids',
'OBS': 'Other Biomass Solids',
'OG': 'Other Gas',
'OTH': 'Other Fuel',
'PC': 'Petroleum Coke',
'PG': 'Gaseous Propane',
'PUR': 'Purchased Steam',
'RC': 'Refined Coal',
'RFO': 'Residual Fuel Oil. Including No. 5 & 6 fuel oils and bunker C fuel oil.',
'SC': 'Coal-based Synfuel. Including briquettes, pellets, or extrusions, which are formed by binding materials or processes that recycle materials.',
'SGC': 'Coal-Derived Synthesis Gas',
'SGP': 'Synthesis Gas from Petroleum Coke',
'SLW': 'Sludge Waste',
'SUB': 'Subbituminous Coal',
'SUN': 'Solar',
'TDF': 'Tire-derived Fuels',
'WAT': 'Water at a Conventional Hydroelectric Turbine and water used in Wave Buoy Hydrokinetic Technology, current Hydrokinetic Technology, Tidal Hydrokinetic Technology, and Pumping Energy for Reversible (Pumped Storage) Hydroelectric Turbines.',
'WC': 'Waste/Other Coal. Including anthracite culm, bituminous gob, fine coal, lignite waste, waste coal.',
'WDL': 'Wood Waste Liquids, excluding Black Liquor. Including red liquor, sludge wood, spent sulfite liquor, and other wood-based liquids.',
'WDS': 'Wood/Wood Waste Solids. Including paper pellets, railroad ties, utility polies, wood chips, bark, and other wood waste solids.',
'WH': 'Waste Heat not directly attributed to a fuel source',
'WND': 'Wind',
'WO': 'Waste/Other Oil. Including crude oil, liquid butane, liquid propane, naphtha, oil waste, re-refined moto oil, sludge oil, tar oil, or other petroleum-based liquid wastes.'
}
"""dict: A dictionary mapping EIA 923 fuel type codes (keys) and fuel type
names / descriptions (values).
"""
# Fuel type strings for EIA 923 generator fuel table
fuel_type_eia923_gen_fuel_coal_strings = [
'ant', 'bit', 'cbl', 'lig', 'pc', 'rc', 'sc', 'sub', 'wc', ]
"""list: The list of EIA 923 Generation Fuel strings associated with coal fuel.
"""
fuel_type_eia923_gen_fuel_oil_strings = [
'dfo', 'rfo', 'wo', 'jf', 'ker', ]
"""list: The list of EIA 923 Generation Fuel strings associated with oil fuel.
"""
fuel_type_eia923_gen_fuel_gas_strings = [
'bfg', 'lfg', 'ng', 'og', 'obg', 'pg', 'sgc', 'sgp', ]
"""list: The list of EIA 923 Generation Fuel strings associated with gas fuel.
"""
fuel_type_eia923_gen_fuel_solar_strings = ['sun', ]
"""list: The list of EIA 923 Generation Fuel strings associated with solar
power.
"""
fuel_type_eia923_gen_fuel_wind_strings = ['wnd', ]
"""list: The list of EIA 923 Generation Fuel strings associated with wind
power.
"""
fuel_type_eia923_gen_fuel_hydro_strings = ['wat', ]
"""list: The list of EIA 923 Generation Fuel strings associated with hydro
power.
"""
fuel_type_eia923_gen_fuel_nuclear_strings = ['nuc', ]
"""list: The list of EIA 923 Generation Fuel strings associated with nuclear
power.
"""
fuel_type_eia923_gen_fuel_waste_strings = [
'ab', 'blq', 'msb', 'msn', 'msw', 'obl', 'obs', 'slw', 'tdf', 'wdl', 'wds']
"""list: The list of EIA 923 Generation Fuel strings associated with solid waste
fuel.
"""
fuel_type_eia923_gen_fuel_other_strings = ['geo', 'mwh', 'oth', 'pur', 'wh', ]
"""list: The list of EIA 923 Generation Fuel strings associated with geothermal
power.
"""
fuel_type_eia923_gen_fuel_simple_map = {
'coal': fuel_type_eia923_gen_fuel_coal_strings,
'oil': fuel_type_eia923_gen_fuel_oil_strings,
'gas': fuel_type_eia923_gen_fuel_gas_strings,
'solar': fuel_type_eia923_gen_fuel_solar_strings,
'wind': fuel_type_eia923_gen_fuel_wind_strings,
'hydro': fuel_type_eia923_gen_fuel_hydro_strings,
'nuclear': fuel_type_eia923_gen_fuel_nuclear_strings,
'waste': fuel_type_eia923_gen_fuel_waste_strings,
'other': fuel_type_eia923_gen_fuel_other_strings,
}
"""dict: A dictionary mapping EIA 923 Generation Fuel fuel types (keys) to lists
of strings associated with that fuel type (values).
"""
# Fuel type strings for EIA 923 boiler fuel table
fuel_type_eia923_boiler_fuel_coal_strings = [
'ant', 'bit', 'lig', 'pc', 'rc', 'sc', 'sub', 'wc', ]
"""list: A list of strings from EIA 923 Boiler Fuel associated with fuel type
coal.
"""
fuel_type_eia923_boiler_fuel_oil_strings = ['dfo', 'rfo', 'wo', 'jf', 'ker', ]
"""list: A list of strings from EIA 923 Boiler Fuel associated with fuel type
oil.
"""
fuel_type_eia923_boiler_fuel_gas_strings = [
'bfg', 'lfg', 'ng', 'og', 'obg', 'pg', 'sgc', 'sgp', ]
"""list: A list of strings from EIA 923 Boiler Fuel associated with fuel type
gas.
"""
fuel_type_eia923_boiler_fuel_waste_strings = [
'ab', 'blq', 'msb', 'msn', 'obl', 'obs', 'slw', 'tdf', 'wdl', 'wds', ]
"""list: A list of strings from EIA 923 Boiler Fuel associated with fuel type
waste.
"""
fuel_type_eia923_boiler_fuel_other_strings = ['oth', 'pur', 'wh', ]
"""list: A list of strings from EIA 923 Boiler Fuel associated with fuel type
other.
"""
fuel_type_eia923_boiler_fuel_simple_map = {
'coal': fuel_type_eia923_boiler_fuel_coal_strings,
'oil': fuel_type_eia923_boiler_fuel_oil_strings,
'gas': fuel_type_eia923_boiler_fuel_gas_strings,
'waste': fuel_type_eia923_boiler_fuel_waste_strings,
'other': fuel_type_eia923_boiler_fuel_other_strings,
}
"""dict: A dictionary mapping EIA 923 Boiler Fuel fuel types (keys) to lists
of strings associated with that fuel type (values).
"""
# PUDL consolidation of EIA923 AER fuel type strings into same categories as
# 'energy_source_eia923' plus additional renewable and nuclear categories.
# These classifications are not currently used, as the EIA fuel type and energy
# source designations provide more detailed information.
aer_coal_strings = ['col', 'woc', 'pc']
"""list: A list of EIA 923 AER fuel type strings associated with coal.
"""
aer_gas_strings = ['mlg', 'ng', 'oog']
"""list: A list of EIA 923 AER fuel type strings associated with gas.
"""
aer_oil_strings = ['dfo', 'rfo', 'woo']
"""list: A list of EIA 923 AER fuel type strings associated with oil.
"""
aer_solar_strings = ['sun']
"""list: A list of EIA 923 AER fuel type strings associated with solar power.
"""
aer_wind_strings = ['wnd']
"""list: A list of EIA 923 AER fuel type strings associated with wind power.
"""
aer_hydro_strings = ['hps', 'hyc']
"""list: A list of EIA 923 AER fuel type strings associated with hydro power.
"""
aer_nuclear_strings = ['nuc']
"""list: A list of EIA 923 AER fuel type strings associated with nuclear power.
"""
aer_waste_strings = ['www']
"""list: A list of EIA 923 AER fuel type strings associated with waste.
"""
aer_other_strings = ['geo', 'orw', 'oth']
"""list: A list of EIA 923 AER fuel type strings associated with other fuel.
"""
aer_fuel_type_strings = {
'coal': aer_coal_strings,
'gas': aer_gas_strings,
'oil': aer_oil_strings,
'solar': aer_solar_strings,
'wind': aer_wind_strings,
'hydro': aer_hydro_strings,
'nuclear': aer_nuclear_strings,
'waste': aer_waste_strings,
'other': aer_other_strings
}
"""dict: A dictionary mapping EIA 923 AER fuel types (keys) to lists
of strings associated with that fuel type (values).
"""
# EIA 923: A partial aggregation of the reported fuel type codes into
# larger categories used by EIA in, for example,
# the Annual Energy Review (AER).Two or three letter alphanumeric.
# See the Fuel Code table (Table 5), below:
fuel_type_aer_eia923 = {
'SUN': 'Solar PV and thermal',
'COL': 'Coal',
'DFO': 'Distillate Petroleum',
'GEO': 'Geothermal',
'HPS': 'Hydroelectric Pumped Storage',
'HYC': 'Hydroelectric Conventional',
'MLG': 'Biogenic Municipal Solid Waste and Landfill Gas',
'NG': 'Natural Gas',
'NUC': 'Nuclear',
'OOG': 'Other Gases',
'ORW': 'Other Renewables',
'OTH': 'Other (including nonbiogenic MSW)',
'PC': 'Petroleum Coke',
'RFO': 'Residual Petroleum',
'WND': 'Wind',
'WOC': 'Waste Coal',
'WOO': 'Waste Oil',
'WWW': 'Wood and Wood Waste'
}
"""dict: A dictionary mapping EIA 923 AER fuel types (keys) to lists
of strings associated with that fuel type (values).
"""
fuel_type_eia860_coal_strings = ['ant', 'bit', 'cbl', 'lig', 'pc', 'rc', 'sc',
'sub', 'wc', 'coal', 'petroleum coke', 'col',
'woc']
"""list: A list of strings from EIA 860 associated with fuel type coal.
"""
fuel_type_eia860_oil_strings = ['dfo', 'jf', 'ker', 'rfo', 'wo', 'woo',
'petroleum']
"""list: A list of strings from EIA 860 associated with fuel type oil.
"""
fuel_type_eia860_gas_strings = ['bfg', 'lfg', 'mlg', 'ng', 'obg', 'og', 'pg',
'sgc', 'sgp', 'natural gas', 'other gas',
'oog', 'sg']
"""list: A list of strings from EIA 860 associated with fuel type gas.
"""
fuel_type_eia860_solar_strings = ['sun', 'solar']
"""list: A list of strings from EIA 860 associated with solar power.
"""
fuel_type_eia860_wind_strings = ['wnd', 'wind', 'wt']
"""list: A list of strings from EIA 860 associated with wind power.
"""
fuel_type_eia860_hydro_strings = ['wat', 'hyc', 'hps', 'hydro']
"""list: A list of strings from EIA 860 associated with hydro power.
"""
fuel_type_eia860_nuclear_strings = ['nuc', 'nuclear']
"""list: A list of strings from EIA 860 associated with nuclear power.
"""
fuel_type_eia860_waste_strings = ['ab', 'blq', 'bm', 'msb', 'msn', 'obl',
'obs', 'slw', 'tdf', 'wdl', 'wds', 'biomass',
'msw', 'www']
"""list: A list of strings from EIA 860 associated with fuel type waste.
"""
fuel_type_eia860_other_strings = ['mwh', 'oth', 'pur', 'wh', 'geo', 'none',
'orw', 'other']
"""list: A list of strings from EIA 860 associated with fuel type other.
"""
fuel_type_eia860_simple_map = {
'coal': fuel_type_eia860_coal_strings,
'oil': fuel_type_eia860_oil_strings,
'gas': fuel_type_eia860_gas_strings,
'solar': fuel_type_eia860_solar_strings,
'wind': fuel_type_eia860_wind_strings,
'hydro': fuel_type_eia860_hydro_strings,
'nuclear': fuel_type_eia860_nuclear_strings,
'waste': fuel_type_eia860_waste_strings,
'other': fuel_type_eia860_other_strings,
}
"""dict: A dictionary mapping EIA 860 fuel types (keys) to lists
of strings associated with that fuel type (values).
"""
# EIA 923/860: Lumping of energy source categories.
energy_source_eia_simple_map = {
'coal': ['ANT', 'BIT', 'LIG', 'PC', 'SUB', 'WC', 'RC'],
'oil': ['DFO', 'JF', 'KER', 'RFO', 'WO'],
'gas': ['BFG', 'LFG', 'NG', 'OBG', 'OG', 'PG', 'SG', 'SGC', 'SGP'],
'solar': ['SUN'],
'wind': ['WND'],
'hydro': ['WAT'],
'nuclear': ['NUC'],
'waste': ['AB', 'BLQ', 'MSW', 'OBL', 'OBS', 'SLW', 'TDF', 'WDL', 'WDS'],
'other': ['GEO', 'MWH', 'OTH', 'PUR', 'WH']
}
"""dict: A dictionary mapping EIA fuel types (keys) to fuel codes (values).
"""
fuel_group_eia923_simple_map = {
'coal': ['coal', 'petroleum coke'],
'oil': ['petroleum'],
'gas': ['natural gas', 'other gas']
}
"""dict: A dictionary mapping EIA 923 simple fuel types("oil", "coal", "gas")
(keys) to fuel types (values).
"""
# EIA 923: The type of physical units fuel consumption is reported in.
# All consumption is reported in either short tons for solids,
# thousands of cubic feet for gases, and barrels for liquids.
fuel_units_eia923 = {
'mcf': 'Thousands of cubic feet (for gases)',
'short_tons': 'Short tons (for solids)',
'barrels': 'Barrels (for liquids)'
}
"""dict: A dictionary mapping EIA 923 fuel units (keys) to fuel unit
descriptions (values).
"""
# EIA 923: Designates the purchase type under which receipts occurred
# in the reporting month. One or two character alphanumeric:
contract_type_eia923 = {
'C': 'Contract - Fuel received under a purchase order or contract with a term of one year or longer. Contracts with a shorter term are considered spot purchases ',
'NC': 'New Contract - Fuel received under a purchase order or contract with duration of one year or longer, under which deliveries were first made during the reporting month',
'N': 'New Contract - see NC code. This abbreviation existed only in 2008 before being replaced by NC.',
'S': 'Spot Purchase',
'T': 'Tolling Agreement – Fuel received under a tolling agreement (bartering arrangement of fuel for generation)'
}
"""dict: A dictionary mapping EIA 923 contract codes (keys) to contract
descriptions (values) for each month in the Fuel Receipts and Costs table.
"""
# EIA 923: The fuel code associated with the fuel receipt.
# Defined on Page 7 of EIA Form 923
# Two or three character alphanumeric:
energy_source_eia923 = {
'ANT': 'Anthracite Coal',
'BFG': 'Blast Furnace Gas',
'BM': 'Biomass',
'BIT': 'Bituminous Coal',
'DFO': 'Distillate Fuel Oil. Including diesel, No. 1, No. 2, and No. 4 fuel oils.',
'JF': 'Jet Fuel',
'KER': 'Kerosene',
'LIG': 'Lignite Coal',
'NG': 'Natural Gas',
'PC': 'Petroleum Coke',
'PG': 'Gaseous Propone',
'OG': 'Other Gas',
'RC': 'Refined Coal',
'RFO': 'Residual Fuel Oil. Including No. 5 & 6 fuel oils and bunker C fuel oil.',
'SG': 'Synthesis Gas from Petroleum Coke',
'SGP': 'Petroleum Coke Derived Synthesis Gas',
'SC': 'Coal-based Synfuel. Including briquettes, pellets, or extrusions, which are formed by binding materials or processes that recycle materials.',
'SUB': 'Subbituminous Coal',
'WC': 'Waste/Other Coal. Including anthracite culm, bituminous gob, fine coal, lignite waste, waste coal.',
'WO': 'Waste/Other Oil. Including crude oil, liquid butane, liquid propane, naphtha, oil waste, re-refined moto oil, sludge oil, tar oil, or other petroleum-based liquid wastes.',
}
"""dict: A dictionary mapping fuel codes (keys) to fuel descriptions (values)
for each fuel receipt from the EIA 923 Fuel Receipts and Costs table.
"""
# EIA 923 Fuel Group, from Page 7 EIA Form 923
# Groups fossil fuel energy sources into fuel groups that are located in the
# Electric Power Monthly: Coal, Natural Gas, Petroleum, Petroleum Coke.
fuel_group_eia923 = (
'coal',
'natural_gas',
'petroleum',
'petroleum_coke',
'other_gas'
)
"""tuple: A tuple containing EIA 923 fuel groups.
"""
# EIA 923: Type of Coal Mine as defined on Page 7 of EIA Form 923
coalmine_type_eia923 = {
'P': 'Preparation Plant',
'S': 'Surface',
'U': 'Underground',
'US': 'Both an underground and surface mine with most coal extracted from underground',
'SU': 'Both an underground and surface mine with most coal extracted from surface',
}
"""dict: A dictionary mapping EIA 923 coal mine type codes (keys) to
descriptions (values).
"""
# EIA 923: State abbreviation related to coal mine location.
# Country abbreviations are also used in this category, but they are
# non-standard because of collisions with US state names. Instead of using
# the provided non-standard names, we convert to ISO-3166-1 three letter
# country codes https://en.wikipedia.org/wiki/ISO_3166-1_alpha-3
coalmine_country_eia923 = {
'AU': 'AUS', # Australia
'CL': 'COL', # Colombia
'CN': 'CAN', # Canada
'IS': 'IDN', # Indonesia
'PL': 'POL', # Poland
'RS': 'RUS', # Russia
'UK': 'GBR', # United Kingdom of Great Britain
'VZ': 'VEN', # Venezuela
'OT': 'other_country',
'IM': 'unknown'
}
"""dict: A dictionary mapping coal mine country codes (keys) to ISO-3166-1 three
letter country codes (values).
"""
# EIA 923: Mode for the longest / second longest distance.
transport_modes_eia923 = {
'RR': 'Rail: Shipments of fuel moved to consumers by rail \
(private or public/commercial). Included is coal hauled to or \
away from a railroad siding by truck if the truck did not use public\
roads.',
'RV': 'River: Shipments of fuel moved to consumers via river by barge. \
Not included are shipments to Great Lakes coal loading docks, \
tidewater piers, or coastal ports.',
'GL': 'Great Lakes: Shipments of coal moved to consumers via \
the Great Lakes. These shipments are moved via the Great Lakes \
coal loading docks, which are identified by name and location as \
follows: Conneaut Coal Storage & Transfer, Conneaut, Ohio; \
NS Coal Dock (Ashtabula Coal Dock), Ashtabula, Ohio; \
Sandusky Coal Pier, Sandusky, Ohio; Toledo Docks, Toledo, Ohio; \
KCBX Terminals Inc., Chicago, Illinois; \
Superior Midwest Energy Terminal, Superior, Wisconsin',
'TP': 'Tidewater Piers and Coastal Ports: Shipments of coal moved to \
Tidewater Piers and Coastal Ports for further shipments to consumers \
via coastal water or ocean. The Tidewater Piers and Coastal Ports \
are identified by name and location as follows: Dominion Terminal \
Associates, Newport News, Virginia; McDuffie Coal Terminal, Mobile, \
Alabama; IC Railmarine Terminal, Convent, Louisiana; \
International Marine Terminals, Myrtle Grove, Louisiana; \
Cooper/T. Smith Stevedoring Co. Inc., Darrow, Louisiana; \
Seward Terminal Inc., Seward, Alaska; Los Angeles Export Terminal, \
Inc., Los Angeles, California; Levin-Richmond Terminal Corp., \
Richmond, California; Baltimore Terminal, Baltimore, Maryland; \
Norfolk Southern Lamberts Point P-6, Norfolk, Virginia; \
Chesapeake Bay Piers, Baltimore, Maryland; Pier IX Terminal Company, \
Newport News, Virginia; Electro-Coal Transport Corp., Davant, \
Louisiana',
'WT': 'Water: Shipments of fuel moved to consumers by other waterways.',
'TR': 'Truck: Shipments of fuel moved to consumers by truck. \
Not included is fuel hauled to or away from a railroad siding by \
truck on non-public roads.',
'tr': 'Truck: Shipments of fuel moved to consumers by truck. \
Not included is fuel hauled to or away from a railroad siding by \
truck on non-public roads.',
'TC': 'Tramway/Conveyor: Shipments of fuel moved to consumers \
by tramway or conveyor.',
'SP': 'Slurry Pipeline: Shipments of coal moved to consumers \
by slurry pipeline.',
'PL': 'Pipeline: Shipments of fuel moved to consumers by pipeline'
}
"""dict: A dictionary mapping primary and secondary transportation mode codes
(keys) to descriptions (values).
"""
# we need to include all of the columns which we want to keep for either the
# entity or annual tables. The order here matters. We need to harvest the plant
# location before harvesting the location of the utilites for example.
entities = {
'plants': [
# base cols
['plant_id_eia'],
# static cols
['balancing_authority_code_eia', 'balancing_authority_name_eia',
'city', 'county', 'ferc_cogen_status',
'ferc_exempt_wholesale_generator', 'ferc_small_power_producer',
'grid_voltage_2_kv', 'grid_voltage_3_kv', 'grid_voltage_kv',
'iso_rto_code', 'latitude', 'longitude', 'service_area',
'plant_name_eia', 'primary_purpose_naics_id',
'sector_id', 'sector_name', 'state', 'street_address', 'zip_code'],
# annual cols
['ash_impoundment', 'ash_impoundment_lined', 'ash_impoundment_status',
'datum', 'energy_storage', 'ferc_cogen_docket_no', 'water_source',
'ferc_exempt_wholesale_generator_docket_no',
'ferc_small_power_producer_docket_no',
'liquefied_natural_gas_storage',
'natural_gas_local_distribution_company', 'natural_gas_storage',
'natural_gas_pipeline_name_1', 'natural_gas_pipeline_name_2',
'natural_gas_pipeline_name_3', 'nerc_region', 'net_metering',
'pipeline_notes', 'regulatory_status_code',
'transmission_distribution_owner_id',
'transmission_distribution_owner_name',
'transmission_distribution_owner_state', 'utility_id_eia'],
# need type fixing
{},
],
'generators': [
# base cols
['plant_id_eia', 'generator_id'],
# static cols
['prime_mover_code', 'duct_burners', 'operating_date',
'topping_bottoming_code', 'solid_fuel_gasification',
'pulverized_coal_tech', 'fluidized_bed_tech', 'subcritical_tech',
'supercritical_tech', 'ultrasupercritical_tech', 'stoker_tech',
'other_combustion_tech', 'bypass_heat_recovery',
'rto_iso_lmp_node_id', 'rto_iso_location_wholesale_reporting_id',
'associated_combined_heat_power', 'original_planned_operating_date',
'operating_switch', 'previously_canceled'],
# annual cols
['capacity_mw', 'fuel_type_code_pudl', 'multiple_fuels',
'ownership_code', 'owned_by_non_utility', 'deliver_power_transgrid',
'summer_capacity_mw', 'winter_capacity_mw', 'summer_capacity_estimate',
'winter_capacity_estimate', 'minimum_load_mw', 'distributed_generation',
'technology_description', 'reactive_power_output_mvar',
'energy_source_code_1', 'energy_source_code_2',
'energy_source_code_3', 'energy_source_code_4',
'energy_source_code_5', 'energy_source_code_6',
'energy_source_1_transport_1', 'energy_source_1_transport_2',
'energy_source_1_transport_3', 'energy_source_2_transport_1',
'energy_source_2_transport_2', 'energy_source_2_transport_3',
'startup_source_code_1', 'startup_source_code_2',
'startup_source_code_3', 'startup_source_code_4',
'time_cold_shutdown_full_load_code', 'syncronized_transmission_grid',
'turbines_num', 'operational_status_code', 'operational_status',
'planned_modifications', 'planned_net_summer_capacity_uprate_mw',
'planned_net_winter_capacity_uprate_mw', 'planned_new_capacity_mw',
'planned_uprate_date', 'planned_net_summer_capacity_derate_mw',
'planned_net_winter_capacity_derate_mw', 'planned_derate_date',
'planned_new_prime_mover_code', 'planned_energy_source_code_1',
'planned_repower_date', 'other_planned_modifications',
'other_modifications_date', 'planned_retirement_date',
'carbon_capture', 'cofire_fuels', 'switch_oil_gas',
'turbines_inverters_hydrokinetics', 'nameplate_power_factor',
'uprate_derate_during_year', 'uprate_derate_completed_date',
'current_planned_operating_date', 'summer_estimated_capability_mw',
'winter_estimated_capability_mw', 'retirement_date',
'utility_id_eia', 'data_source'],
# need type fixing
{}
],
# utilities must come after plants. plant location needs to be
# removed before the utility locations are compiled
'utilities': [
# base cols
['utility_id_eia'],
# static cols
['utility_name_eia'],
# annual cols
['street_address', 'city', 'state', 'zip_code', 'entity_type',
'plants_reported_owner', 'plants_reported_operator',
'plants_reported_asset_manager', 'plants_reported_other_relationship',
'attention_line', 'address_2', 'zip_code_4',
'contact_firstname', 'contact_lastname', 'contact_title',
'contact_firstname_2', 'contact_lastname_2', 'contact_title_2',
'phone_extension_1', 'phone_extension_2', 'phone_number_1',
'phone_number_2'],
# need type fixing
{'utility_id_eia': 'int64', }, ],
'boilers': [
# base cols
['plant_id_eia', 'boiler_id'],
# static cols
['prime_mover_code'],
# annual cols
[],
# need type fixing
{},
]
}
"""dict: A dictionary containing table name strings (keys) and lists of columns
to keep for those tables (values).
"""
epacems_tables = ("hourly_emissions_epacems")
"""tuple: A tuple containing tables of EPA CEMS data to pull into PUDL.
"""
files_dict_epaipm = {
'transmission_single_epaipm': '*table_3-21*',
'transmission_joint_epaipm': '*transmission_joint_ipm*',
'load_curves_epaipm': '*table_2-2_*',
'plant_region_map_epaipm': '*needs_v6*',
}
"""dict: A dictionary of EPA IPM tables and strings that files of those tables
contain.
"""
epaipm_url_ext = {
'transmission_single_epaipm': 'table_3-21_annual_transmission_capabilities_of_u.s._model_regions_in_epa_platform_v6_-_2021.xlsx',
'load_curves_epaipm': 'table_2-2_load_duration_curves_used_in_epa_platform_v6.xlsx',
'plant_region_map_epaipm': 'needs_v6_november_2018_reference_case_0.xlsx',
}
"""dict: A dictionary of EPA IPM tables and associated URLs extensions for
downloading that table's data.
"""
epaipm_region_names = [
'ERC_PHDL', 'ERC_REST', 'ERC_FRNT', 'ERC_GWAY', 'ERC_WEST',
'FRCC', 'NENG_CT', 'NENGREST', 'NENG_ME', 'MIS_AR', 'MIS_IL',
'MIS_INKY', 'MIS_IA', 'MIS_MIDA', 'MIS_LA', 'MIS_LMI', 'MIS_MNWI',
'MIS_D_MS', 'MIS_MO', 'MIS_MAPP', 'MIS_AMSO', 'MIS_WOTA',
'MIS_WUMS', 'NY_Z_A', 'NY_Z_B', 'NY_Z_C&E', 'NY_Z_D', 'NY_Z_F',
'NY_Z_G-I', 'NY_Z_J', 'NY_Z_K', 'PJM_West', 'PJM_AP', 'PJM_ATSI',
'PJM_COMD', 'PJM_Dom', 'PJM_EMAC', 'PJM_PENE', 'PJM_SMAC',
'PJM_WMAC', 'S_C_KY', 'S_C_TVA', 'S_D_AECI', 'S_SOU', 'S_VACA',
'SPP_NEBR', 'SPP_N', 'SPP_SPS', 'SPP_WEST', 'SPP_KIAM', 'SPP_WAUE',
'WECC_AZ', 'WEC_BANC', 'WECC_CO', 'WECC_ID', 'WECC_IID',
'WEC_LADW', 'WECC_MT', 'WECC_NM', 'WEC_CALN', 'WECC_NNV',
'WECC_PNW', 'WEC_SDGE', 'WECC_SCE', 'WECC_SNV', 'WECC_UT',
'WECC_WY', 'CN_AB', 'CN_BC', 'CN_NL', 'CN_MB', 'CN_NB', 'CN_NF',
'CN_NS', 'CN_ON', 'CN_PE', 'CN_PQ', 'CN_SK',
]
"""list: A list of EPA IPM region names."""
epaipm_region_aggregations = {
'PJM': [
'PJM_AP', 'PJM_ATSI', 'PJM_COMD', 'PJM_Dom',
'PJM_EMAC', 'PJM_PENE', 'PJM_SMAC', 'PJM_WMAC'
],
'NYISO': [
'NY_Z_A', 'NY_Z_B', 'NY_Z_C&E', 'NY_Z_D',
'NY_Z_F', 'NY_Z_G-I', 'NY_Z_J', 'NY_Z_K'
],
'ISONE': ['NENG_CT', 'NENGREST', 'NENG_ME'],
'MISO': [
'MIS_AR', 'MIS_IL', 'MIS_INKY', 'MIS_IA',
'MIS_MIDA', 'MIS_LA', 'MIS_LMI', 'MIS_MNWI', 'MIS_D_MS',
'MIS_MO', 'MIS_MAPP', 'MIS_AMSO', 'MIS_WOTA', 'MIS_WUMS'
],
'SPP': [
'SPP_NEBR', 'SPP_N', 'SPP_SPS', 'SPP_WEST', 'SPP_KIAM', 'SPP_WAUE'
],
'WECC_NW': [
'WECC_CO', 'WECC_ID', 'WECC_MT', 'WECC_NNV',
'WECC_PNW', 'WECC_UT', 'WECC_WY'
]
}
"""
dict: A dictionary containing EPA IPM regions (keys) and lists of their
associated abbreviations (values).
"""
epaipm_rename_dict = {
'transmission_single_epaipm': {
'From': 'region_from',
'To': 'region_to',
'Capacity TTC (MW)': 'firm_ttc_mw',
'Energy TTC (MW)': 'nonfirm_ttc_mw',
'Transmission Tariff (2016 mills/kWh)': 'tariff_mills_kwh',
},
'load_curves_epaipm': {
'day': 'day_of_year',
'region': 'region_id_epaipm',
},
'plant_region_map_epaipm': {
'ORIS Plant Code': 'plant_id_eia',
'Region Name': 'region',
},
}
glue_pudl_tables = ('plants_eia', 'plants_ferc', 'plants', 'utilities_eia',
'utilities_ferc', 'utilities', 'utility_plant_assn')
"""
dict: A dictionary of dictionaries containing EPA IPM tables (keys) and items
for each table to be renamed along with the replacement name (values).
"""
data_sources = (
'eia860',
'eia861',
'eia923',
'epacems',
'epaipm',
'ferc1',
'ferc714',
# 'pudl'
)
"""tuple: A tuple containing the data sources we are able to pull into PUDL."""
# All the years for which we ought to be able to download these data sources
data_years = {
'eia860': tuple(range(2001, 2020)),
'eia861': tuple(range(1990, 2020)),
'eia923': tuple(range(2001, 2020)),
'epacems': tuple(range(1995, 2021)),
'epaipm': (None, ),
'ferc1': tuple(range(1994, 2020)),
'ferc714': (None, ),
}
"""
dict: A dictionary of data sources (keys) and tuples containing the years
that we expect to be able to download for each data source (values).
"""
# The full set of years we currently expect to be able to ingest, per source:
working_partitions = {
'eia860': {
'years': tuple(range(2004, 2020))
},
'eia860m': {
'year_month': '2020-11'
},
'eia861': {
'years': tuple(range(2001, 2020))
},
'eia923': {
'years': tuple(range(2001, 2020))
},
'epacems': {
'years': tuple(range(1995, 2021)),
'states': tuple(cems_states.keys())},
'ferc1': {
'years': tuple(range(1994, 2020))
},
'ferc714': {},
}
"""
dict: A dictionary of data sources (keys) and dictionaries (values) of names of
partition type (sub-key) and paritions (sub-value) containing the paritions
such as tuples of years for each data source that are able to be ingested
into PUDL.
"""
pudl_tables = {
'eia860': eia860_pudl_tables,
'eia861': (
"service_territory_eia861",
"balancing_authority_eia861",
"sales_eia861",
"advanced_metering_infrastructure_eia861",
"demand_response_eia861",
"demand_side_management_eia861",
"distributed_generation_eia861",
"distribution_systems_eia861",
"dynamic_pricing_eia861",
"energy_efficiency_eia861",
"green_pricing_eia861",
"mergers_eia861",
"net_metering_eia861",
"non_net_metering_eia861",
"operational_data_eia861",
"reliability_eia861",
"utility_data_eia861",
),
'eia923': eia923_pudl_tables,
'epacems': epacems_tables,
'epaipm': epaipm_pudl_tables,
'ferc1': ferc1_pudl_tables,
'ferc714': (
"respondent_id_ferc714",
"id_certification_ferc714",
"gen_plants_ba_ferc714",
"demand_monthly_ba_ferc714",
"net_energy_load_ba_ferc714",
"adjacency_ba_ferc714",
"interchange_ba_ferc714",
"lambda_hourly_ba_ferc714",
"lambda_description_ferc714",
"description_pa_ferc714",
"demand_forecast_pa_ferc714",
"demand_hourly_pa_ferc714",
),
'glue': glue_pudl_tables,
}
"""
dict: A dictionary containing data sources (keys) and the list of associated
tables from that datasource that can be pulled into PUDL (values).
"""
base_data_urls = {
'eia860': 'https://www.eia.gov/electricity/data/eia860',
'eia861': 'https://www.eia.gov/electricity/data/eia861/zip',
'eia923': 'https://www.eia.gov/electricity/data/eia923',
'epacems': 'ftp://newftp.epa.gov/dmdnload/emissions/hourly/monthly',
'ferc1': 'ftp://eforms1.ferc.gov/f1allyears',
'ferc714': 'https://www.ferc.gov/docs-filing/forms/form-714/data',
'ferceqr': 'ftp://eqrdownload.ferc.gov/DownloadRepositoryProd/BulkNew/CSV',
'msha': 'https://arlweb.msha.gov/OpenGovernmentData/DataSets',
'epaipm': 'https://www.epa.gov/sites/production/files/2019-03',
'pudl': 'https://catalyst.coop/pudl/'
}
"""
dict: A dictionary containing data sources (keys) and their base data URLs
(values).
"""
need_fix_inting = {
'plants_steam_ferc1': ('construction_year', 'installation_year'),
'plants_small_ferc1': ('construction_year', 'ferc_license_id'),
'plants_hydro_ferc1': ('construction_year', 'installation_year',),
'plants_pumped_storage_ferc1': ('construction_year', 'installation_year',),
'hourly_emissions_epacems': ('facility_id', 'unit_id_epa',),
}
"""
dict: A dictionary containing tables (keys) and column names (values)
containing integer - type columns whose null values need fixing.
"""
contributors = {
"catalyst-cooperative": {
"title": "Catalyst Cooperative",
"path": "https://catalyst.coop/",
"role": "publisher",
"email": "<EMAIL>",
"organization": "Catalyst Cooperative",
},
"zane-selvans": {
"title": "<NAME>",
"email": "<EMAIL>",
"path": "https://amateurearthling.org/",
"role": "wrangler",
"organization": "Catalyst Cooperative"
},
"christina-gosnell": {
"title": "<NAME>",
"email": "<EMAIL>",
"role": "contributor",
"organization": "Catalyst Cooperative",
},
"steven-winter": {
"title": "<NAME>",
"email": "<EMAIL>",
"role": "contributor",
"organization": "Catalyst Cooperative",
},
"alana-wilson": {
"title": "<NAME>",
"email": "<EMAIL>",
"role": "contributor",
"organization": "Catalyst Cooperative",
},
"karl-dunkle-werner": {
"title": "<NAME>",
"email": "<EMAIL>",
"path": "https://karldw.org/",
"role": "contributor",
"organization": "UC Berkeley",
},
'greg-schivley': {
"title": "<NAME>",
"role": "contributor",
},
}
"""
dict: A dictionary of dictionaries containing organization names (keys) and
their attributes (values).
"""
data_source_info = {
"eia860": {
"title": "EIA Form 860",
"path": "https://www.eia.gov/electricity/data/eia860/",
},
"eia861": {
"title": "EIA Form 861",
"path": "https://www.eia.gov/electricity/data/eia861/",
},
"eia923": {
"title": "EIA Form 923",
"path": "https://www.eia.gov/electricity/data/eia923/",
},
"eiawater": {
"title": "EIA Water Use for Power",
"path": "https://www.eia.gov/electricity/data/water/",
},
"epacems": {
"title": "EPA Air Markets Program Data",
"path": "https://ampd.epa.gov/ampd/",
},
"epaipm": {
"title": "EPA Integrated Planning Model",
"path": "https://www.epa.gov/airmarkets/national-electric-energy-data-system-needs-v6",
},
"ferc1": {
"title": "FERC Form 1",
"path": "https://www.ferc.gov/docs-filing/forms/form-1/data.asp",
},
"ferc714": {
"title": "FERC Form 714",
"path": "https://www.ferc.gov/docs-filing/forms/form-714/data.asp",
},
"ferceqr": {
"title": "FERC Electric Quarterly Report",
"path": "https://www.ferc.gov/docs-filing/eqr.asp",
},
"msha": {
"title": "Mining Safety and Health Administration",
"path": "https://www.msha.gov/mine-data-retrieval-system",
},
"phmsa": {
"title": "Pipelines and Hazardous Materials Safety Administration",
"path": "https://www.phmsa.dot.gov/data-and-statistics/pipeline/data-and-statistics-overview",
},
"pudl": {
"title": "The Public Utility Data Liberation Project (PUDL)",
"path": "https://catalyst.coop/pudl/",
"email": "<EMAIL>",
},
}
"""
dict: A dictionary of dictionaries containing datasources (keys) and
associated attributes (values)
"""
contributors_by_source = {
"pudl": [
"catalyst-cooperative",
"zane-selvans",
"christina-gosnell",
"steven-winter",
"alana-wilson",
"karl-dunkle-werner",
],
"eia923": [
"catalyst-cooperative",
"zane-selvans",
"christina-gosnell",
"steven-winter",
],
"eia860": [
"catalyst-cooperative",
"zane-selvans",
"christina-gosnell",
"steven-winter",
"alana-wilson",
],
"ferc1": [
"catalyst-cooperative",
"zane-selvans",
"christina-gosnell",
"steven-winter",
"alana-wilson",
],
"epacems": [
"catalyst-cooperative",
"karl-dunkle-werner",
"zane-selvans",
],
"epaipm": [
"greg-schivley",
],
}
"""
dict: A dictionary of data sources (keys) and lists of contributors (values).
"""
licenses = {
"cc-by-4.0": {
"name": "CC-BY-4.0",
"title": "Creative Commons Attribution 4.0",
"path": "https://creativecommons.org/licenses/by/4.0/"
},
"us-govt": {
"name": "other-pd",
"title": "U.S. Government Work",
"path": "http://www.usa.gov/publicdomain/label/1.0/",
}
}
"""
dict: A dictionary of dictionaries containing license types and their
attributes.
"""
output_formats = [
'sqlite',
'parquet',
'datapkg',
]
"""list: A list of types of PUDL output formats."""
keywords_by_data_source = {
'pudl': [
'us', 'electricity',
],
'eia860': [
'electricity', 'electric', 'boiler', 'generator', 'plant', 'utility',
'fuel', 'coal', 'natural gas', 'prime mover', 'eia860', 'retirement',
'capacity', 'planned', 'proposed', 'energy', 'hydro', 'solar', 'wind',
'nuclear', 'form 860', 'eia', 'annual', 'gas', 'ownership', 'steam',
'turbine', 'combustion', 'combined cycle', 'eia',
'energy information administration'
],
'eia923': [
'fuel', 'boiler', 'generator', 'plant', 'utility', 'cost', 'price',
'natural gas', 'coal', 'eia923', 'energy', 'electricity', 'form 923',
'receipts', 'generation', 'net generation', 'monthly', 'annual', 'gas',
'fuel consumption', 'MWh', 'energy information administration', 'eia',
'mercury', 'sulfur', 'ash', 'lignite', 'bituminous', 'subbituminous',
'heat content'
],
'epacems': [
'epa', 'us', 'emissions', 'pollution', 'ghg', 'so2', 'co2', 'sox',
'nox', 'load', 'utility', 'electricity', 'plant', 'generator', 'unit',
'generation', 'capacity', 'output', 'power', 'heat content', 'mmbtu',
'steam', 'cems', 'continuous emissions monitoring system', 'hourly'
'environmental protection agency', 'ampd', 'air markets program data',
],
'ferc1': [
'electricity', 'electric', 'utility', 'plant', 'steam', 'generation',
'cost', 'expense', 'price', 'heat content', 'ferc', 'form 1',
'federal energy regulatory commission', 'capital', 'accounting',
'depreciation', 'finance', 'plant in service', 'hydro', 'coal',
'natural gas', 'gas', 'opex', 'capex', 'accounts', 'investment',
'capacity'
],
'ferc714': [
'electricity', 'electric', 'utility', 'planning area', 'form 714',
'balancing authority', 'demand', 'system lambda', 'ferc',
'federal energy regulatory commission', "hourly", "generation",
"interchange", "forecast", "load", "adjacency", "plants",
],
'epaipm': [
'epaipm', 'integrated planning',
]
}
"""dict: A dictionary of datasets (keys) and keywords (values). """
ENTITY_TYPE_DICT = {
'M': 'Municipal',
'C': 'Cooperative',
'R': 'Retail Power Marketer',
'I': 'Investor Owned',
'P': 'Political Subdivision',
'T': 'Transmission',
'S': 'State',
'W': 'Wholesale Power Marketer',
'F': 'Federal',
'A': 'Municipal Mktg Authority',
'G': 'Community Choice Aggregator',
'D': 'Nonutility DSM Administrator',
'B': 'Behind the Meter',
'Q': 'Independent Power Producer',
'IND': 'Industrial',
'COM': 'Commercial',
'PR': 'Private', # Added by AES for OD table (Arbitrary moniker)
'PO': 'Power Marketer', # Added by AES for OD table
'U': 'Unknown', # Added by AES for OD table
'O': 'Other' # Added by AES for OD table
}
# Confirm these designations -- educated guess based on the form instructions
MOMENTARY_INTERRUPTION_DEF = { # Added by AES for R table
'L': 'Less than 1 minute',
'F': 'Less than or equal to 5 minutes',
'O': 'Other',
}
# https://www.eia.gov/electricity/data/eia411/#tabs_NERC-3
RECOGNIZED_NERC_REGIONS = [
'BASN', # ASSESSMENT AREA Basin (WECC)
'CALN', # ASSESSMENT AREA California (WECC)
'CALS', # ASSESSMENT AREA California (WECC)
'DSW', # ASSESSMENT AREA Desert Southwest (WECC)
'ASCC', # Alaska
'ISONE', # ISO New England (NPCC)
'ERCOT', # lumped under TRE in 2017 Form instructions
'NORW', # ASSESSMENT AREA Northwest (WECC)
'NYISO', # ISO (NPCC)
'PJM', # RTO
'ROCK', # ASSESSMENT AREA Rockies (WECC)
'ECAR', # OLD RE Now part of RFC and SERC
'FRCC', # included in 2017 Form instructions, recently joined with SERC
'HICC', # Hawaii
'MAAC', # OLD RE Now part of RFC
'MAIN', # OLD RE Now part of SERC, RFC, MRO
'MAPP', # OLD/NEW RE Became part of MRO, resurfaced in 2010
'MRO', # RE included in 2017 Form instructions
'NPCC', # RE included in 2017 Form instructions
'RFC', # RE included in 2017 Form instructions
'SERC', # RE included in 2017 Form instructions
'SPP', # RE included in 2017 Form instructions
'TRE', # RE included in 2017 Form instructions (included ERCOT)
'WECC', # RE included in 2017 Form instructions
'WSCC', # OLD RE pre-2002 version of WECC
'MISO', # ISO unclear whether technically a regional entity, but lots of entries
'ECAR_MAAC',
'MAPP_WECC',
'RFC_SERC',
'SPP_WECC',
'MRO_WECC',
'ERCOT_SPP',
'SPP_TRE',
'ERCOT_TRE',
'MISO_TRE',
'VI', # Virgin Islands
'GU', # Guam
'PR', # Puerto Rico
'AS', # American Samoa
'UNK',
]
CUSTOMER_CLASSES = [
"commercial",
"industrial",
"direct_connection",
"other",
"residential",
"total",
"transportation"
]
TECH_CLASSES = [
'backup', # WHERE Is this used? because removed from DG table b/c not a real component
'chp_cogen',
'combustion_turbine',
'fuel_cell',
'hydro',
'internal_combustion',
'other',
'pv',
'steam',
'storage_pv',
'all_storage', # need 'all' as prefix so as not to confuse with other storage category
'total',
'virtual_pv',
'wind',
]
REVENUE_CLASSES = [
'retail_sales',
'unbundled',
'delivery_customers',
'sales_for_resale',
'credits_or_adjustments',
'other',
'transmission',
'total',
]
RELIABILITY_STANDARDS = [
'ieee_standard',
'other_standard'
]
FUEL_CLASSES = [
'gas',
'oil',
'other',
'renewable',
'water',
'wind',
'wood',
]
RTO_CLASSES = [
'caiso',
'ercot',
'pjm',
'nyiso',
'spp',
'miso',
'isone',
'other'
]
ESTIMATED_OR_ACTUAL = {'E': 'estimated', 'A': 'actual'}
TRANSIT_TYPE_DICT = {
'CV': 'conveyer',
'PL': 'pipeline',
'RR': 'railroad',
'TK': 'truck',
'WA': 'water',
'UN': 'unknown',
}
"""dict: A dictionary of datasets (keys) and keywords (values). """
column_dtypes = {
"ferc1": { # Obviously this is not yet a complete list...
"construction_year": pd.Int64Dtype(),
"installation_year": pd.Int64Dtype(),
"plant_id_ferc1": pd.Int64Dtype(),
"plant_id_pudl": pd.Int64Dtype(),
"report_date": "datetime64[ns]",
"report_year": pd.Int64Dtype(),
"utility_id_ferc1": pd.Int64Dtype(),
"utility_id_pudl": pd.Int64Dtype(),
},
"ferc714": { # INCOMPLETE
"demand_mwh": float,
"demand_annual_mwh": float,
"eia_code": pd.Int64Dtype(),
"peak_demand_summer_mw": float,
"peak_demand_winter_mw": float,
"report_date": "datetime64[ns]",
"respondent_id_ferc714": pd.Int64Dtype(),
"respondent_name_ferc714": pd.StringDtype(),
"respondent_type": pd.CategoricalDtype(categories=[
"utility", "balancing_authority",
]),
"timezone": pd.CategoricalDtype(categories=[
"America/New_York", "America/Chicago", "America/Denver",
"America/Los_Angeles", "America/Anchorage", "Pacific/Honolulu"]),
"utc_datetime": "datetime64[ns]",
},
"epacems": {
'state': pd.StringDtype(),
'plant_id_eia': pd.Int64Dtype(), # Nullable Integer
'unitid': pd.StringDtype(),
'operating_datetime_utc': "datetime64[ns]",
'operating_time_hours': float,
'gross_load_mw': float,
'steam_load_1000_lbs': float,
'so2_mass_lbs': float,
'so2_mass_measurement_code': pd.StringDtype(),
'nox_rate_lbs_mmbtu': float,
'nox_rate_measurement_code': pd.StringDtype(),
'nox_mass_lbs': float,
'nox_mass_measurement_code': pd.StringDtype(),
'co2_mass_tons': float,
'co2_mass_measurement_code': pd.StringDtype(),
'heat_content_mmbtu': float,
'facility_id': pd.Int64Dtype(), # Nullable Integer
'unit_id_epa': pd.Int64Dtype(), # Nullable Integer
},
"eia": {
'actual_peak_demand_savings_mw': float, # Added by AES for DR table
'address_2': pd.StringDtype(), # Added by AES for 860 utilities table
'advanced_metering_infrastructure': pd.Int64Dtype(), # Added by AES for AMI table
# Added by AES for UD misc table
'alternative_fuel_vehicle_2_activity': pd.BooleanDtype(),
'alternative_fuel_vehicle_activity': pd.BooleanDtype(),
'annual_indirect_program_cost': float,
'annual_total_cost': float,
'ash_content_pct': float,
'ash_impoundment': pd.BooleanDtype(),
'ash_impoundment_lined': pd.BooleanDtype(),
# TODO: convert this field to more descriptive words
'ash_impoundment_status': pd.StringDtype(),
'associated_combined_heat_power': pd.BooleanDtype(),
'attention_line': pd.StringDtype(),
'automated_meter_reading': pd.Int64Dtype(), # Added by AES for AMI table
'backup_capacity_mw': float, # Added by AES for NNM & DG misc table
'balancing_authority_code_eia': pd.CategoricalDtype(),
'balancing_authority_id_eia': pd.Int64Dtype(),
'balancing_authority_name_eia': pd.StringDtype(),
'bga_source': pd.StringDtype(),
'boiler_id': pd.StringDtype(),
'bunded_activity': pd.BooleanDtype(),
'business_model': pd.CategoricalDtype(categories=[
"retail", "energy_services"]),
'buy_distribution_activity': pd.BooleanDtype(),
'buying_transmission_activity': pd.BooleanDtype(),
'bypass_heat_recovery': pd.BooleanDtype(),
'caidi_w_major_event_days_minus_loss_of_service_minutes': float,
'caidi_w_major_event_dats_minutes': float,
'caidi_wo_major_event_days_minutes': float,
'capacity_mw': float,
'carbon_capture': pd.BooleanDtype(),
'chlorine_content_ppm': float,
'circuits_with_voltage_optimization': pd.Int64Dtype(),
'city': pd.StringDtype(),
'cofire_fuels': pd.BooleanDtype(),
'consumed_by_facility_mwh': float,
'consumed_by_respondent_without_charge_mwh': float,
'contact_firstname': pd.StringDtype(),
'contact_firstname_2': pd.StringDtype(),
'contact_lastname': pd.StringDtype(),
'contact_lastname_2': pd.StringDtype(),
'contact_title': pd.StringDtype(),
'contact_title_2': pd.StringDtype(),
'contract_expiration_date': 'datetime64[ns]',
'contract_type_code': pd.StringDtype(),
'county': pd.StringDtype(),
'county_id_fips': pd.StringDtype(), # Must preserve leading zeroes
'credits_or_adjustments': float,
'critical_peak_pricing': pd.BooleanDtype(),
'critical_peak_rebate': pd.BooleanDtype(),
'current_planned_operating_date': 'datetime64[ns]',
'customers': float,
'customer_class': pd.CategoricalDtype(categories=CUSTOMER_CLASSES),
'customer_incentives_cost': float,
'customer_incentives_incremental_cost': float,
'customer_incentives_incremental_life_cycle_cost': float,
'customer_other_costs_incremental_life_cycle_cost': float,
'daily_digital_access_customers': pd.Int64Dtype(),
'data_observed': pd.BooleanDtype(),
'datum': pd.StringDtype(),
'deliver_power_transgrid': pd.BooleanDtype(),
'delivery_customers': float,
'direct_load_control_customers': pd.Int64Dtype(),
'distributed_generation': pd.BooleanDtype(),
'distributed_generation_owned_capacity_mw': float,
'distribution_activity': pd.BooleanDtype(),
'distribution_circuits': pd.Int64Dtype(),
'duct_burners': pd.BooleanDtype(),
'energy_displaced_mwh': float,
'energy_efficiency_annual_cost': float,
'energy_efficiency_annual_actual_peak_reduction_mw': float,
'energy_efficiency_annual_effects_mwh': float,
'energy_efficiency_annual_incentive_payment': float,
'energy_efficiency_incremental_actual_peak_reduction_mw': float,
'energy_efficiency_incremental_effects_mwh': float,
'energy_savings_estimates_independently_verified': pd.BooleanDtype(),
'energy_savings_independently_verified': pd.BooleanDtype(),
'energy_savings_mwh': float,
'energy_served_ami_mwh': float,
'energy_source_1_transport_1': pd.CategoricalDtype(categories=TRANSIT_TYPE_DICT.values()),
'energy_source_1_transport_2': pd.CategoricalDtype(categories=TRANSIT_TYPE_DICT.values()),
'energy_source_1_transport_3': pd.CategoricalDtype(categories=TRANSIT_TYPE_DICT.values()),
'energy_source_2_transport_1': pd.CategoricalDtype(categories=TRANSIT_TYPE_DICT.values()),
'energy_source_2_transport_2': pd.CategoricalDtype(categories=TRANSIT_TYPE_DICT.values()),
'energy_source_2_transport_3': pd.CategoricalDtype(categories=TRANSIT_TYPE_DICT.values()),
'energy_source_code': pd.StringDtype(),
'energy_source_code_1': pd.StringDtype(),
'energy_source_code_2': pd.StringDtype(),
'energy_source_code_3': pd.StringDtype(),
'energy_source_code_4': pd.StringDtype(),
'energy_source_code_5': pd.StringDtype(),
'energy_source_code_6': pd.StringDtype(),
'energy_storage': pd.BooleanDtype(),
'entity_type': pd.CategoricalDtype(categories=ENTITY_TYPE_DICT.values()),
'estimated_or_actual_capacity_data': pd.CategoricalDtype(categories=ESTIMATED_OR_ACTUAL.values()),
'estimated_or_actual_fuel_data': pd.CategoricalDtype(categories=ESTIMATED_OR_ACTUAL.values()),
'estimated_or_actual_tech_data': pd.CategoricalDtype(categories=ESTIMATED_OR_ACTUAL.values()),
'exchange_energy_delivered_mwh': float,
'exchange_energy_recieved_mwh': float,
'ferc_cogen_docket_no': pd.StringDtype(),
'ferc_cogen_status': pd.BooleanDtype(),
'ferc_exempt_wholesale_generator': pd.BooleanDtype(),
'ferc_exempt_wholesale_generator_docket_no': | pd.StringDtype() | pandas.StringDtype |
#!/usr/bin/env python
#-*- coding:utf-8 -*-
import numpy as np
import pandas as pd
import scipy.ndimage
import skimage.morphology
import sklearn.mixture
class HDoG_CPU(object):
def __init__(self, width=2560, height=2160, depth=None, sigma_xy=(4.0, 6.0), sigma_z=(1.8,2.7),
radius_small=(24,3), radius_large=(100,5), min_intensity=1000, gamma=1.0):
self.width = width
self.height = height
self.depth = depth
if type(sigma_xy) in [float,int]:
self.sigma_xy = (sigma_xy, sigma_xy*1.5)
else:
self.sigma_xy = sigma_xy
if type(sigma_z) in [float,int]:
self.sigma_z = (sigma_z, sigma_z*1.5)
else:
self.sigma_z = sigma_z
if not radius_small:
self.radius_small_xy = int(self.sigma_xy[1]*4)
self.radius_small_z = int(self.sigma_z[1]*2)
else:
self.radius_small_xy = radius_small[0]
self.radius_small_z = radius_small[1]
self.size_small = (self.radius_small_z*2+1, self.radius_small_xy*2+1, self.radius_small_xy*2+1)
if not radius_large:
self.radius_large_xy = int(self.sigma_xy[1]*30)
self.radius_large_xy = int(self.sigma_z[1]*10)
else:
self.radius_large_xy = radius_large[0]
self.radius_large_z = radius_large[1]
self.size_large = (self.radius_large_z*2+1, self.radius_large_xy*2+1, self.radius_large_xy*2+1)
self.min_intensity = min_intensity
self.gamma = gamma
self.normalizer = (self.sigma_xy[0]**(gamma*2)) * (self.sigma_z[0]**gamma)
def load_images(self, list_images, dtype=np.uint16):
imgs = []
for path in list_images:
img = np.fromfile(path, dtype=dtype).reshape(self.height, self.width)
imgs.append(img)
imgs = np.array(imgs)
self.depth = imgs.shape[0]
return imgs
def Normalize(self, src_img):
dilation_l_img = scipy.ndimage.filters.uniform_filter(
scipy.ndimage.morphology.grey_dilation(src_img, size=self.size_large, mode="nearest").astype(np.float32),
size=self.size_large, mode="constant", cval=0)
erosion_l_img = scipy.ndimage.filters.uniform_filter(
scipy.ndimage.morphology.grey_erosion(src_img, size=self.size_large, mode="nearest").astype(np.float32),
size=self.size_large, mode="constant", cval=0)
intensity = src_img.astype(np.float32)
norm_img = (intensity >= self.min_intensity) * intensity / (dilation_l_img - erosion_l_img)
return norm_img
def DoGFilter(self, src_img):
temp1 = scipy.ndimage.filters.gaussian_filter(
src_img.astype(np.float32),
sigma=(self.sigma_z[0],self.sigma_xy[0],self.sigma_xy[0]),
truncate=2.0, mode="constant", cval=0)
temp2 = scipy.ndimage.filters.gaussian_filter(
src_img.astype(np.float32),
sigma=(self.sigma_z[1],self.sigma_xy[1],self.sigma_xy[1]),
truncate=2.0, mode="constant", cval=0)
dog_img = (temp1 - temp2) * self.normalizer
return dog_img
def HessianPDFilter(self, dog_img):
Hz,Hy,Hx = np.gradient(dog_img)
Hzz,Hyz,Hxz = np.gradient(Hz)
Hyz,Hyy,Hxy = np.gradient(Hy)
Hxz,Hxy,Hxx = np.gradient(Hx)
det_img = Hxx*Hyy*Hzz + 2*Hxy*Hyz*Hxz - Hxx*Hyz*Hyz - Hyy*Hxz*Hxz - Hzz*Hxy*Hxy
pd_img = np.bitwise_and(np.bitwise_and(Hxx < 0, Hxx*Hyy-Hxy*Hxy > 0), det_img < 0)
hessian_img = np.array([Hxx,Hxy,Hxz,Hyy,Hyz,Hzz])
return pd_img, hessian_img
def ScaleResponse(self, scale_img, pd_img):
response = np.sum(scale_img*pd_img) / np.sum(pd_img)
return response
def CCL(self, pd_img):
labels_img = skimage.morphology.label(pd_img)
return labels_img
def RegionalFeatures(self, norm_img, hessian_img, labels_img):
on_region = np.nonzero(labels_img)
labels_list = labels_img[on_region]
num_labels = np.max(labels_list)
# max intensity
max_normalized = scipy.ndimage.maximum(norm_img, labels=labels_img, index=range(1, num_labels+1))
# region size
ns = np.ones(len(labels_list))
region_size = np.bincount(labels_list-1, weights=ns)
# Regional Hessian Eigenvalues
HT = np.empty((6, num_labels))
for i in range(6):
HT[i] = np.bincount(labels_list-1, weights=hessian_img[i][on_region])
HT_mat = np.array([
[HT[0],HT[1],HT[2]],
[HT[1],HT[3],HT[4]],
[HT[2],HT[4],HT[5]]
]).T
eigenvals = np.linalg.eigvalsh(HT_mat)
l1,l2,l3 = eigenvals[:,0],eigenvals[:,1], eigenvals[:,2]
blobness = l3*l3 / (l1*l2) #l3/np.sqrt(l1*l2)
structureness = l1*l1 + l2*l2 + l3*l3 #np.sqrt()
# following code is needed if the label is not relabeled as 1,2,3,...
#label_values = np.array(sorted(np.unique(labels))) # including background(0)
#mp = np.arange(0,np.max(label_values)+1)
#mp[label_values] = np.arange(label_values.shape[0])
#labels_new = mp[labels]
zgrid,ygrid,xgrid = np.mgrid[0:self.depth, 0:self.height, 0:self.width]
centroid_x = np.bincount(labels_img.flatten(), weights=xgrid.flatten())[1:] / region_size#np.bincount(labels_img.flatten())[1:]
centroid_y = np.bincount(labels_img.flatten(), weights=ygrid.flatten())[1:] / region_size#np.bincount(labels_img.flatten())[1:]
centroid_z = np.bincount(labels_img.flatten(), weights=zgrid.flatten())[1:] / region_size#np.bincount(labels_img.flatten())[1:]
df = pd.DataFrame({
"index": pd.Series(np.arange(num_labels)),
"intensity": pd.Series(max_normalized),
"size": | pd.Series(region_size) | pandas.Series |
import glob
import datetime
import os
import pandas as pd
import numpy as np
import re
from tkinter import filedialog
from tkinter import *
from tkinter import ttk
from tkinter import messagebox
# pyinstaller --onefile --noconsole --icon GetCSV.ico Arca_GetCSVConverter_2-0-0.py
#for MMW 18-6 spreadsheets
probCol = False
#infer desktop
desktopPath = os.path.expanduser("~/Desktop/")
filelist=['']
probRecords = []
probColls = []
#filename = r'arms_modsonly_May9.csv'
col_names = ["IslandoraContentModel","BCRDHSimpleObjectPID",'imageLink','filename','directory','childKey','title', 'alternativeTitle', 'creator1', 'creator2','creator3']
col_names += ['corporateCreator1','corporateCreator2','contributor1','contributor2','corporateContributor1','publisher_original','publisher_location']
col_names += ['dateCreated','description','extent','topicalSubject1','topicalSubject2','topicalSubject3','topicalSubject4','topicalSubject5']
col_names += ['geographicSubject1','coordinates','personalSubject1','personalSubject2','corporateSubject1','corporateSubject2', 'dateIssued_start']
col_names += ['dateIssued_end','dateRange', 'frequency','genre','genreAuthority','type','internetMediaType','language1','language2','notes']
col_names += ['accessIdentifier','localIdentifier','ISBN','classification','URI']
col_names += ['source','rights','creativeCommons_URI','rightsStatement_URI','relatedItem_title','relatedItem_PID','recordCreationDate','recordOrigin']
pattern1 = r'^[A-Z][a-z]{2}-\d{2}$' #%b-%Y date (e.g. Jun-17)
pattern2 = r'^\d{2}-\d{2}-[1-2]\d{3}$'
contentModels = {
r"info:fedora/islandora:sp_large_image_cmodel": "Large Image",
r"info:fedora/islandora:sp_basic_image": "Basic Image",
r"info:fedora/islandora:bookCModel": "Book",
r"info:fedora/islandora:newspaperIssueCModel":"Newspaper - issue",
r"info:fedora/islandora:newspaperPageCModel":"Newspaper",
r"info:fedora/islandora:sp_PDF":"PDF",
r"info:fedora/islandora:sp-audioCModel":"Audio",
r"info:fedora/islandora:sp_videoCModel":"Video",
r"info:fedora/islandora:sp_compoundCModel":"Compound",
r"info:fedora/ir:citationCModel":"Citation"
}
def browse_button():
# Allow user to select a directory and store it in global var
# called folder_path1
lbl1['text'] = ""
csvname = filedialog.askopenfilename(initialdir = desktopPath,title = "Select file",filetypes = (("csv files","*.csv"),("all files","*.*")))
if ".csv" not in csvname:
lbl1['text'] = "**Please choose a file with a .csv extension!"
else:
filelist[0] = csvname
lbl1['text'] = csvname
def splitMultiHdgs(hdgs):
if pd.notna(hdgs):
hdgs = hdgs.replace("\\,",";")
hdgs = hdgs.split(",")
newhdgs = []
for hdg in hdgs:
newhdg = hdg.replace(";", ",")
newhdgs.append(newhdg)
return newhdgs
else:
return None
def getMultiVals(item, string, df, pd):
hdgs = df.filter(like=string).columns
for hdg in hdgs:
vals = df.at[item.Index,hdg]
if pd.notna(vals):
vals = splitMultiHdgs(vals)
return vals
return None
def convert_date(dt_str, letter_date):
"""
Converts an invalid formatted date into a proper date for ARCA Mods
Correct format: Y-m-d
Fixes:
Incorrect format: m-d-Y
Incorrect format (letter date): m-d e.g. Jun-17
:param dt_str: the date string
:param letter_date: whether the string is a letter date. Letter date is something like Jun-17
:return: the correctly formatted date
"""
if letter_date:
rev_date = datetime.datetime.strptime(dt_str, '%b-%y').strftime('%Y-%m') # convert date to yymm string format
rev_date_pts = rev_date.split("-")
year_num = int(rev_date_pts[0])
if year_num > 1999:
year_num = year_num - 100
year_str = str(year_num)
rev_date_pts[0] = year_str
revised = "-".join(rev_date_pts)
else:
revised = datetime.datetime.strptime(dt_str, '%d-%m-%Y').strftime(
'%Y-%m-%d') # convert date to YY-mm string format
return revised
def sortValues(lst):
for item in lst:
if pd.isna(item):
lst.remove(item)
lst = set(lst)
lst = list(lst)
return lst
def dropNullCols(df):
nullcols = []
for col in df.columns:
notNull = df[col].notna().sum()
if notNull < 1:
nullcols.append(col)
return nullcols
def convert():
probCol = False
df2 = pd.DataFrame(columns = col_names)
df2.append( | pd.Series() | pandas.Series |
import gzip
import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
import json
# from sklearn.model_selection import train_test_split
from collections import Counter
import csv
import tensorflow as tf
import os.path
# from os import listdir
from tensorflow import keras
import os
import re
import spacy
import time
import math
import xml.etree.ElementTree as ET
import codecs
from collections import defaultdict
def pause():
int(input("enter a num to cont..."))
def clean(path, dataset=None):
filename = 'reviews_clean.json'
with open(filename, 'w') as f:
for line in open(path):
if dataset == 'meta':
line = line.replace('\\r', ' ')
elif dataset == 'amazon':
line = line.replace('\000','')
f.write(line)
return filename
def parse(path):
# g = gzip.open(path, 'rb')
g = open(path, 'r')
for l in g:
# yield eval(l)
yield json.loads(l) # deal with null
def getDF(path):
i = 0
df = {}
for d in parse(path):
df[i] = d
i += 1
return pd.DataFrame.from_dict(df, orient='index')
def get_child(child, df, nlp, f):
if isinstance(child, float):
f.write('\n')
return
for c in child.split():
if c in df.keys():
line = str(df[c]['clean_text'])
doc = nlp(line)
for sent in doc.sents:
f.write(str(sent).rstrip())
f.write('\n')
df[c]['visited'] = True
return get_child(df[c]['children_ids'],
df, nlp, f)
def read_tsv(input_file):
with tf.gfile.Open(input_file, "r") as f:
reader = csv.reader(f, delimiter="\t")
lines = []
for line in reader:
lines.append(line)
return lines
def train_test_split(counts, pct_train=0.7, pct_dev=0.2, pct_test=0.1):
total = sum([v for _, v in counts.items()])
n_train = int(total * pct_train)
n_dev = int(total * pct_dev)
# n_test = int(total * pct_test)
train = []
dev = []
test = []
current_train = 0
current_dev = 0
for k, v in counts.items():
if current_train + v <= n_train:
train.append(k)
current_train += v
elif current_dev + v <= n_dev:
dev.append(k)
current_dev += v
else:
test.append(k)
return train, dev, test
def over_sample(df, col_name):
labels, values = zip(*Counter(df[col_name].values).items())
seed = int(np.max(values) / np.min(values))
oversample = df.loc[df[col_name] == labels[values.index(np.min(values))]]
for i in range(seed - 1):
df = df.append(oversample)
# shuffle
df = df.sample(frac=1)
return df
def read_meta(argv=None):
review_clean = clean('/home/ydu/BERT/DATA/metacritic/reviews.json', 'meta')
df = getDF(review_clean)
df = df[['title','text','score']]
df.score = df.score.astype(int)
df['senti'] = -1
df['senti'][df.score >= 7] = 1
df['senti'][df.score <= 4] = 0
df = df.loc[df['senti'] != -1]
df = df.drop(columns=['score'])
counts = dict(Counter(df.title.values))
train_labels, dev_labels, test_labels = train_test_split(counts)
# oversample in training
train = df.loc[df['title'].isin(train_labels)]
train = over_sample(train, 'senti')
# oversample in dev
dev = df.loc[df['title'].isin(dev_labels)]
dev = over_sample(dev, 'senti')
test = df.loc[df['title'].isin(test_labels)]
train = train.drop(columns=['title'])
dev = dev.drop(columns=['title'])
test = test.drop(columns=['title'])
df = df.drop(columns=['title'])
train.to_csv('/home/ydu/BERT/DATA/metacritic/train.tsv', index=False, sep='\t')
dev.to_csv('/home/ydu/BERT/DATA/metacritic/dev.tsv', index=False, sep='\t')
test.to_csv('/home/ydu/BERT/DATA/metacritic/test.tsv', index=False, sep='\t')
df.to_csv('/home/ydu/BERT/DATA/metacritic/all.tsv', index=False, sep='\t')
def read_reddit(argv=None):
df = pd.read_csv('/home/ydu/BERT/DATA/reddit/posts_with_ids.csv')
df = df.dropna(subset=['text'])
df['clean_text'] = df['text'].apply(lambda x: ' '.join(x.split()))
df['visited'] = False
df.set_index('post_id', inplace=True)
df = df.drop(columns=['text'])
df = df.to_dict(orient='index')
nlp = spacy.load('en_core_web_sm')
start_time = time.time()
f = open('pretrain_data/txt/pretrain_texttree.txt', 'w')
for k, _ in df.items():
if not df[k]['visited']:
doc = nlp(str(df[k]['clean_text']))
for sent in doc.sents:
f.write(str(sent).rstrip())
f.write('\n')
df[k]['visited'] = True
get_child(df[k]['children_ids'], df, nlp, f)
f.close()
print("--- %s sec ---" % (time.time() - start_time))
def read_amazon(argv=None):
review_clean = clean('/home/ydu/BERT/DATA/amazon/aggressive_dedup_video_games.json', 'amazon')
df = getDF(review_clean)
df = df[['asin','reviewText','overall']]
df.overall = df.overall.astype(int)
df['senti'] = -1
df['senti'][df.overall <= 2] = 0
df['senti'][df.overall >= 4] = 1
df = df.loc[df['senti'] != -1]
df = df.drop(columns=['overall'])
df = df.rename(columns={"reviewText": "text"})
counts = dict(Counter(df.asin.values))
train_labels, dev_labels, test_labels = train_test_split(counts)
# oversample in training
train = df.loc[df['asin'].isin(train_labels)]
train = over_sample(train, 'senti')
# oversample in dev
dev = df.loc[df['asin'].isin(dev_labels)]
dev = over_sample(dev, 'senti')
test = df.loc[df['asin'].isin(test_labels)]
train = train.drop(columns=['asin'])
dev = dev.drop(columns=['asin'])
test = test.drop(columns=['asin'])
df = df.drop(columns=['asin'])
train.to_csv('/home/ydu/BERT/DATA/amazon/train.tsv', index=False, sep='\t')
dev.to_csv('/home/ydu/BERT/DATA/amazon/dev.tsv', index=False, sep='\t')
test.to_csv('/home/ydu/BERT/DATA/amazon/test.tsv', index=False, sep='\t')
df.to_csv('/home/ydu/BERT/DATA/amazon/all.tsv', index=False, sep='\t')
# Load all files from a directory in a DataFrame.
def load_directory_data(directory):
data = {}
data["text"] = []
for file_path in os.listdir(directory):
with tf.gfile.GFile(os.path.join(directory, file_path), "r") as f:
data["text"].append(f.read())
return | pd.DataFrame.from_dict(data) | pandas.DataFrame.from_dict |
import streamlit as st
import plotly_express as px
import pandas as pd
from plotnine import *
from plotly.tools import mpl_to_plotly as ggplotly
import numpy as np
import math
import scipy.stats as ss
from scipy.stats import *
def app():
# add a select widget to the side bar
st.sidebar.subheader("Discrete Probaility")
prob_choice = st.sidebar.radio("",["Discrete Probability","Binomial Probability","Geometric Probability","Poisson Probability"])
st.markdown('Discrete Probability')
if prob_choice == "Discrete Probability":
top = st.columns((1,1,2))
bottom = st.columns((1,1))
with top[0]:
#st.subheader("Discrete Probaility")
gs_URL = st.session_state.gs_URL
googleSheetId = gs_URL.split("spreadsheets/d/")[1].split("/edit")[0]
worksheetName = st.text_input("Sheet Name:","Discrete")
URL = f'https://docs.google.com/spreadsheets/d/{googleSheetId}/gviz/tq?tqx=out:csv&sheet={worksheetName}'
if st.button('Refresh'):
df = pd.read_csv(URL)
df = df.dropna(axis=1, how="all")
df = pd.read_csv(URL)
df = df.dropna(axis=1, how="all")
with bottom[0]:
st.dataframe(df)
global numeric_columns
global non_numeric_columns
try:
numeric_columns = list(df.select_dtypes(['float', 'int']).columns)
non_numeric_columns = list(df.select_dtypes(['object']).columns)
except Exception as e:
print(e)
st.write("Please upload file to the application.")
with top[1]:
x_axis = st.selectbox('X-Axis', options=numeric_columns, index=0)
prob = st.selectbox('Probabilities', options=numeric_columns, index = 1)
cat = 0
if len(non_numeric_columns) >= 1:
cat = 1
#cv = st.selectbox("Group", options=list(df[non_numeric_columns[0]].unique()))
if cat == 0:
x = df[x_axis]
p_x = df[prob]
m = sum(x*p_x)
sd = math.sqrt(sum((x-m)**2*p_x))
data = pd.DataFrame({"Mean":m,"Std Dev":sd},index = [0])
with top[2]:
dph = ggplot(df) + geom_bar(aes(x=df[df.columns[0]],weight=df[df.columns[1]]),color="darkblue", fill="lightblue")
st.pyplot(ggplot.draw(dph))
with bottom[1]:
st.write(data)
if cat != 0:
with bottom[1]:
data = pd.DataFrame(columns = ['Type','Mean','Standard Deviation'])
drow = 0
for type in list(df[non_numeric_columns[0]].unique()):
df1 = df[df[non_numeric_columns[0]]==type]
x = df1[x_axis]
p_x = df1[prob]
data.loc[drow,'Type'] = type
m = sum(x*p_x)
data.loc[drow,'Mean'] = m
data.loc[drow,'Standard Deviation'] = math.sqrt(sum((x-m)**2*p_x))
drow = +1
st.dataframe(data)
with top[2]:
dph = ggplot(df) + geom_bar(aes(x=df[x_axis],weight=df[prob],fill=non_numeric_columns[0],color=non_numeric_columns[0]),position= "identity", alpha = .4)
st.pyplot(ggplot.draw(dph))
if prob_choice == "Binomial Probability":
top = st.columns(2)
with top[0]:
st.subheader("Binomial Probability")
bip, bit, bih = st.text_input("Hit Probability:",.2),st.text_input("Tries:",8),st.text_input("Hits:",0)
bit = int(bit)
bip = float(bip)
biah = np.r_[0:bit+1]
cdf = binom.cdf(biah,bit,bip)
pmf = binom.pmf(biah,bit,bip)
biah = pd.DataFrame(biah)
cdf = pd.DataFrame(cdf)
pmf = pd.DataFrame(pmf)
bm,bv = binom.stats(bit,bip)
bdf = pd.concat([biah,pmf,cdf],axis=1)
bdf.columns = ["Hits","PDF","CDF"]
with top[1]:
st.write(bdf)
data = pd.DataFrame({"Mean":bm,"Std Dev":math.sqrt(bv)},index = [0])
st.write(data)
with top[0]:
bph = ggplot(bdf) + geom_bar(aes(x=bdf["Hits"],weight=bdf["PDF"]),color="darkblue", fill="lightblue")
st.pyplot(ggplot.draw(bph))
if prob_choice == "Geometric Probability":
again = st.columns(2)
with again[0]:
st.subheader("Geometric Probability")
gip, gih = st.text_input("Hit Probability:",.2,key ="1"),st.text_input("Tries:",4,key="2")
gip = float(gip)
gih = int(gih)
giah = np.r_[0:gih+6]
cdf = geom.cdf(giah,gip)
pmf = geom.pmf(giah,gip)
giah = pd.DataFrame(giah)
cdf = pd.DataFrame(cdf)
pmf = pd.DataFrame(pmf)
gm,gv = geom.stats(gip)
gdf = | pd.concat([giah,pmf,cdf],axis=1) | pandas.concat |
from datetime import timedelta
from functools import partial
import itertools
from parameterized import parameterized
import numpy as np
from numpy.testing import assert_array_equal, assert_almost_equal
import pandas as pd
from toolz import merge
from zipline.pipeline import SimplePipelineEngine, Pipeline, CustomFactor
from zipline.pipeline.common import (
EVENT_DATE_FIELD_NAME,
FISCAL_QUARTER_FIELD_NAME,
FISCAL_YEAR_FIELD_NAME,
SID_FIELD_NAME,
TS_FIELD_NAME,
)
from zipline.pipeline.data import DataSet
from zipline.pipeline.data import Column
from zipline.pipeline.domain import EquitySessionDomain
from zipline.pipeline.loaders.earnings_estimates import (
NextEarningsEstimatesLoader,
NextSplitAdjustedEarningsEstimatesLoader,
normalize_quarters,
PreviousEarningsEstimatesLoader,
PreviousSplitAdjustedEarningsEstimatesLoader,
split_normalized_quarters,
)
from zipline.testing.fixtures import (
WithAdjustmentReader,
WithTradingSessions,
ZiplineTestCase,
)
from zipline.testing.predicates import assert_equal
from zipline.testing.predicates import assert_frame_equal
from zipline.utils.numpy_utils import datetime64ns_dtype
from zipline.utils.numpy_utils import float64_dtype
import pytest
class Estimates(DataSet):
event_date = Column(dtype=datetime64ns_dtype)
fiscal_quarter = Column(dtype=float64_dtype)
fiscal_year = Column(dtype=float64_dtype)
estimate = Column(dtype=float64_dtype)
class MultipleColumnsEstimates(DataSet):
event_date = Column(dtype=datetime64ns_dtype)
fiscal_quarter = Column(dtype=float64_dtype)
fiscal_year = Column(dtype=float64_dtype)
estimate1 = Column(dtype=float64_dtype)
estimate2 = Column(dtype=float64_dtype)
def QuartersEstimates(announcements_out):
class QtrEstimates(Estimates):
num_announcements = announcements_out
name = Estimates
return QtrEstimates
def MultipleColumnsQuartersEstimates(announcements_out):
class QtrEstimates(MultipleColumnsEstimates):
num_announcements = announcements_out
name = Estimates
return QtrEstimates
def QuartersEstimatesNoNumQuartersAttr(num_qtr):
class QtrEstimates(Estimates):
name = Estimates
return QtrEstimates
def create_expected_df_for_factor_compute(start_date, sids, tuples, end_date):
"""
Given a list of tuples of new data we get for each sid on each critical
date (when information changes), create a DataFrame that fills that
data through a date range ending at `end_date`.
"""
df = pd.DataFrame(tuples, columns=[SID_FIELD_NAME, "estimate", "knowledge_date"])
df = df.pivot_table(
columns=SID_FIELD_NAME, values="estimate", index="knowledge_date", dropna=False
)
df = df.reindex(pd.date_range(start_date, end_date))
# Index name is lost during reindex.
df.index = df.index.rename("knowledge_date")
df["at_date"] = end_date.tz_localize("utc")
df = df.set_index(["at_date", df.index.tz_localize("utc")]).ffill()
new_sids = set(sids) - set(df.columns)
df = df.reindex(columns=df.columns.union(new_sids))
return df
class WithEstimates(WithTradingSessions, WithAdjustmentReader):
"""
ZiplineTestCase mixin providing cls.loader and cls.events as class
level fixtures.
Methods
-------
make_loader(events, columns) -> PipelineLoader
Method which returns the loader to be used throughout tests.
events : pd.DataFrame
The raw events to be used as input to the pipeline loader.
columns : dict[str -> str]
The dictionary mapping the names of BoundColumns to the
associated column name in the events DataFrame.
make_columns() -> dict[BoundColumn -> str]
Method which returns a dictionary of BoundColumns mapped to the
associated column names in the raw data.
"""
# Short window defined in order for test to run faster.
START_DATE = pd.Timestamp("2014-12-28", tz="utc")
END_DATE = pd.Timestamp("2015-02-04", tz="utc")
@classmethod
def make_loader(cls, events, columns):
raise NotImplementedError("make_loader")
@classmethod
def make_events(cls):
raise NotImplementedError("make_events")
@classmethod
def get_sids(cls):
return cls.events[SID_FIELD_NAME].unique()
@classmethod
def make_columns(cls):
return {
Estimates.event_date: "event_date",
Estimates.fiscal_quarter: "fiscal_quarter",
Estimates.fiscal_year: "fiscal_year",
Estimates.estimate: "estimate",
}
def make_engine(self, loader=None):
if loader is None:
loader = self.loader
return SimplePipelineEngine(
lambda x: loader,
self.asset_finder,
default_domain=EquitySessionDomain(
self.trading_days,
self.ASSET_FINDER_COUNTRY_CODE,
),
)
@classmethod
def init_class_fixtures(cls):
cls.events = cls.make_events()
cls.ASSET_FINDER_EQUITY_SIDS = cls.get_sids()
cls.ASSET_FINDER_EQUITY_SYMBOLS = [
"s" + str(n) for n in cls.ASSET_FINDER_EQUITY_SIDS
]
# We need to instantiate certain constants needed by supers of
# `WithEstimates` before we call their `init_class_fixtures`.
super(WithEstimates, cls).init_class_fixtures()
cls.columns = cls.make_columns()
# Some tests require `WithAdjustmentReader` to be set up by the time we
# make the loader.
cls.loader = cls.make_loader(
cls.events, {column.name: val for column, val in cls.columns.items()}
)
class WithOneDayPipeline(WithEstimates):
"""
ZiplineTestCase mixin providing cls.events as a class level fixture and
defining a test for all inheritors to use.
Attributes
----------
events : pd.DataFrame
A simple DataFrame with columns needed for estimates and a single sid
and no other data.
Tests
------
test_wrong_num_announcements_passed()
Tests that loading with an incorrect quarter number raises an error.
test_no_num_announcements_attr()
Tests that the loader throws an AssertionError if the dataset being
loaded has no `num_announcements` attribute.
"""
@classmethod
def make_columns(cls):
return {
MultipleColumnsEstimates.event_date: "event_date",
MultipleColumnsEstimates.fiscal_quarter: "fiscal_quarter",
MultipleColumnsEstimates.fiscal_year: "fiscal_year",
MultipleColumnsEstimates.estimate1: "estimate1",
MultipleColumnsEstimates.estimate2: "estimate2",
}
@classmethod
def make_events(cls):
return pd.DataFrame(
{
SID_FIELD_NAME: [0] * 2,
TS_FIELD_NAME: [pd.Timestamp("2015-01-01"), pd.Timestamp("2015-01-06")],
EVENT_DATE_FIELD_NAME: [
pd.Timestamp("2015-01-10"),
pd.Timestamp("2015-01-20"),
],
"estimate1": [1.0, 2.0],
"estimate2": [3.0, 4.0],
FISCAL_QUARTER_FIELD_NAME: [1, 2],
FISCAL_YEAR_FIELD_NAME: [2015, 2015],
}
)
@classmethod
def make_expected_out(cls):
raise NotImplementedError("make_expected_out")
@classmethod
def init_class_fixtures(cls):
super(WithOneDayPipeline, cls).init_class_fixtures()
cls.sid0 = cls.asset_finder.retrieve_asset(0)
cls.expected_out = cls.make_expected_out()
def test_load_one_day(self):
# We want to test multiple columns
dataset = MultipleColumnsQuartersEstimates(1)
engine = self.make_engine()
results = engine.run_pipeline(
Pipeline({c.name: c.latest for c in dataset.columns}),
start_date=pd.Timestamp("2015-01-15", tz="utc"),
end_date=pd.Timestamp("2015-01-15", tz="utc"),
)
assert_frame_equal(results.sort_index(1), self.expected_out.sort_index(1))
class PreviousWithOneDayPipeline(WithOneDayPipeline, ZiplineTestCase):
"""
Tests that previous quarter loader correctly breaks if an incorrect
number of quarters is passed.
"""
@classmethod
def make_loader(cls, events, columns):
return PreviousEarningsEstimatesLoader(events, columns)
@classmethod
def make_expected_out(cls):
return pd.DataFrame(
{
EVENT_DATE_FIELD_NAME: pd.Timestamp("2015-01-10"),
"estimate1": 1.0,
"estimate2": 3.0,
FISCAL_QUARTER_FIELD_NAME: 1.0,
FISCAL_YEAR_FIELD_NAME: 2015.0,
},
index=pd.MultiIndex.from_tuples(
((pd.Timestamp("2015-01-15", tz="utc"), cls.sid0),)
),
)
class NextWithOneDayPipeline(WithOneDayPipeline, ZiplineTestCase):
"""
Tests that next quarter loader correctly breaks if an incorrect
number of quarters is passed.
"""
@classmethod
def make_loader(cls, events, columns):
return NextEarningsEstimatesLoader(events, columns)
@classmethod
def make_expected_out(cls):
return pd.DataFrame(
{
EVENT_DATE_FIELD_NAME: pd.Timestamp("2015-01-20"),
"estimate1": 2.0,
"estimate2": 4.0,
FISCAL_QUARTER_FIELD_NAME: 2.0,
FISCAL_YEAR_FIELD_NAME: 2015.0,
},
index=pd.MultiIndex.from_tuples(
((pd.Timestamp("2015-01-15", tz="utc"), cls.sid0),)
),
)
dummy_df = pd.DataFrame(
{SID_FIELD_NAME: 0},
columns=[
SID_FIELD_NAME,
TS_FIELD_NAME,
EVENT_DATE_FIELD_NAME,
FISCAL_QUARTER_FIELD_NAME,
FISCAL_YEAR_FIELD_NAME,
"estimate",
],
index=[0],
)
class WithWrongLoaderDefinition(WithEstimates):
"""
ZiplineTestCase mixin providing cls.events as a class level fixture and
defining a test for all inheritors to use.
Attributes
----------
events : pd.DataFrame
A simple DataFrame with columns needed for estimates and a single sid
and no other data.
Tests
------
test_wrong_num_announcements_passed()
Tests that loading with an incorrect quarter number raises an error.
test_no_num_announcements_attr()
Tests that the loader throws an AssertionError if the dataset being
loaded has no `num_announcements` attribute.
"""
@classmethod
def make_events(cls):
return dummy_df
def test_wrong_num_announcements_passed(self):
bad_dataset1 = QuartersEstimates(-1)
bad_dataset2 = QuartersEstimates(-2)
good_dataset = QuartersEstimates(1)
engine = self.make_engine()
columns = {
c.name + str(dataset.num_announcements): c.latest
for dataset in (bad_dataset1, bad_dataset2, good_dataset)
for c in dataset.columns
}
p = Pipeline(columns)
err_msg = (
r"Passed invalid number of quarters -[0-9],-[0-9]; "
r"must pass a number of quarters >= 0"
)
with pytest.raises(ValueError, match=err_msg):
engine.run_pipeline(
p,
start_date=self.trading_days[0],
end_date=self.trading_days[-1],
)
def test_no_num_announcements_attr(self):
dataset = QuartersEstimatesNoNumQuartersAttr(1)
engine = self.make_engine()
p = Pipeline({c.name: c.latest for c in dataset.columns})
with pytest.raises(AttributeError):
engine.run_pipeline(
p,
start_date=self.trading_days[0],
end_date=self.trading_days[-1],
)
class PreviousWithWrongNumQuarters(WithWrongLoaderDefinition, ZiplineTestCase):
"""
Tests that previous quarter loader correctly breaks if an incorrect
number of quarters is passed.
"""
@classmethod
def make_loader(cls, events, columns):
return PreviousEarningsEstimatesLoader(events, columns)
class NextWithWrongNumQuarters(WithWrongLoaderDefinition, ZiplineTestCase):
"""
Tests that next quarter loader correctly breaks if an incorrect
number of quarters is passed.
"""
@classmethod
def make_loader(cls, events, columns):
return NextEarningsEstimatesLoader(events, columns)
options = [
"split_adjustments_loader",
"split_adjusted_column_names",
"split_adjusted_asof",
]
class WrongSplitsLoaderDefinition(WithEstimates, ZiplineTestCase):
"""
Test class that tests that loaders break correctly when incorrectly
instantiated.
Tests
-----
test_extra_splits_columns_passed(SplitAdjustedEstimatesLoader)
A test that checks that the loader correctly breaks when an
unexpected column is passed in the list of split-adjusted columns.
"""
@classmethod
def init_class_fixtures(cls):
super(WithEstimates, cls).init_class_fixtures()
@parameterized.expand(
itertools.product(
(
NextSplitAdjustedEarningsEstimatesLoader,
PreviousSplitAdjustedEarningsEstimatesLoader,
),
)
)
def test_extra_splits_columns_passed(self, loader):
columns = {
Estimates.event_date: "event_date",
Estimates.fiscal_quarter: "fiscal_quarter",
Estimates.fiscal_year: "fiscal_year",
Estimates.estimate: "estimate",
}
with pytest.raises(ValueError):
loader(
dummy_df,
{column.name: val for column, val in columns.items()},
split_adjustments_loader=self.adjustment_reader,
split_adjusted_column_names=["estimate", "extra_col"],
split_adjusted_asof=pd.Timestamp("2015-01-01"),
)
class WithEstimatesTimeZero(WithEstimates):
"""
ZiplineTestCase mixin providing cls.events as a class level fixture and
defining a test for all inheritors to use.
Attributes
----------
cls.events : pd.DataFrame
Generated dynamically in order to test inter-leavings of estimates and
event dates for multiple quarters to make sure that we select the
right immediate 'next' or 'previous' quarter relative to each date -
i.e., the right 'time zero' on the timeline. We care about selecting
the right 'time zero' because we use that to calculate which quarter's
data needs to be returned for each day.
Methods
-------
get_expected_estimate(q1_knowledge,
q2_knowledge,
comparable_date) -> pd.DataFrame
Retrieves the expected estimate given the latest knowledge about each
quarter and the date on which the estimate is being requested. If
there is no expected estimate, returns an empty DataFrame.
Tests
------
test_estimates()
Tests that we get the right 'time zero' value on each day for each
sid and for each column.
"""
# Shorter date range for performance
END_DATE = pd.Timestamp("2015-01-28", tz="utc")
q1_knowledge_dates = [
pd.Timestamp("2015-01-01"),
pd.Timestamp("2015-01-04"),
pd.Timestamp("2015-01-07"),
pd.Timestamp("2015-01-11"),
]
q2_knowledge_dates = [
pd.Timestamp("2015-01-14"),
pd.Timestamp("2015-01-17"),
pd.Timestamp("2015-01-20"),
pd.Timestamp("2015-01-23"),
]
# We want to model the possibility of an estimate predicting a release date
# that doesn't match the actual release. This could be done by dynamically
# generating more combinations with different release dates, but that
# significantly increases the amount of time it takes to run the tests.
# These hard-coded cases are sufficient to know that we can update our
# beliefs when we get new information.
q1_release_dates = [
pd.Timestamp("2015-01-13"),
pd.Timestamp("2015-01-14"),
] # One day late
q2_release_dates = [
pd.Timestamp("2015-01-25"), # One day early
pd.Timestamp("2015-01-26"),
]
@classmethod
def make_events(cls):
"""
In order to determine which estimate we care about for a particular
sid, we need to look at all estimates that we have for that sid and
their associated event dates.
We define q1 < q2, and thus event1 < event2 since event1 occurs
during q1 and event2 occurs during q2 and we assume that there can
only be 1 event per quarter. We assume that there can be multiple
estimates per quarter leading up to the event. We assume that estimates
will not surpass the relevant event date. We will look at 2 estimates
for an event before the event occurs, since that is the simplest
scenario that covers the interesting edge cases:
- estimate values changing
- a release date changing
- estimates for different quarters interleaving
Thus, we generate all possible inter-leavings of 2 estimates per
quarter-event where estimate1 < estimate2 and all estimates are < the
relevant event and assign each of these inter-leavings to a
different sid.
"""
sid_estimates = []
sid_releases = []
# We want all permutations of 2 knowledge dates per quarter.
it = enumerate(
itertools.permutations(cls.q1_knowledge_dates + cls.q2_knowledge_dates, 4)
)
for sid, (q1e1, q1e2, q2e1, q2e2) in it:
# We're assuming that estimates must come before the relevant
# release.
if (
q1e1 < q1e2
and q2e1 < q2e2
# All estimates are < Q2's event, so just constrain Q1
# estimates.
and q1e1 < cls.q1_release_dates[0]
and q1e2 < cls.q1_release_dates[0]
):
sid_estimates.append(
cls.create_estimates_df(q1e1, q1e2, q2e1, q2e2, sid)
)
sid_releases.append(cls.create_releases_df(sid))
return pd.concat(sid_estimates + sid_releases).reset_index(drop=True)
@classmethod
def get_sids(cls):
sids = cls.events[SID_FIELD_NAME].unique()
# Tack on an extra sid to make sure that sids with no data are
# included but have all-null columns.
return list(sids) + [max(sids) + 1]
@classmethod
def create_releases_df(cls, sid):
# Final release dates never change. The quarters have very tight date
# ranges in order to reduce the number of dates we need to iterate
# through when testing.
return pd.DataFrame(
{
TS_FIELD_NAME: [pd.Timestamp("2015-01-13"), pd.Timestamp("2015-01-26")],
EVENT_DATE_FIELD_NAME: [
pd.Timestamp("2015-01-13"),
pd.Timestamp("2015-01-26"),
],
"estimate": [0.5, 0.8],
FISCAL_QUARTER_FIELD_NAME: [1.0, 2.0],
FISCAL_YEAR_FIELD_NAME: [2015.0, 2015.0],
SID_FIELD_NAME: sid,
}
)
@classmethod
def create_estimates_df(cls, q1e1, q1e2, q2e1, q2e2, sid):
return pd.DataFrame(
{
EVENT_DATE_FIELD_NAME: cls.q1_release_dates + cls.q2_release_dates,
"estimate": [0.1, 0.2, 0.3, 0.4],
FISCAL_QUARTER_FIELD_NAME: [1.0, 1.0, 2.0, 2.0],
FISCAL_YEAR_FIELD_NAME: [2015.0, 2015.0, 2015.0, 2015.0],
TS_FIELD_NAME: [q1e1, q1e2, q2e1, q2e2],
SID_FIELD_NAME: sid,
}
)
def get_expected_estimate(self, q1_knowledge, q2_knowledge, comparable_date):
return pd.DataFrame()
def test_estimates(self):
dataset = QuartersEstimates(1)
engine = self.make_engine()
results = engine.run_pipeline(
Pipeline({c.name: c.latest for c in dataset.columns}),
start_date=self.trading_days[1],
end_date=self.trading_days[-2],
)
for sid in self.ASSET_FINDER_EQUITY_SIDS:
sid_estimates = results.xs(sid, level=1)
# Separate assertion for all-null DataFrame to avoid setting
# column dtypes on `all_expected`.
if sid == max(self.ASSET_FINDER_EQUITY_SIDS):
assert sid_estimates.isnull().all().all()
else:
ts_sorted_estimates = self.events[
self.events[SID_FIELD_NAME] == sid
].sort_values(TS_FIELD_NAME)
q1_knowledge = ts_sorted_estimates[
ts_sorted_estimates[FISCAL_QUARTER_FIELD_NAME] == 1
]
q2_knowledge = ts_sorted_estimates[
ts_sorted_estimates[FISCAL_QUARTER_FIELD_NAME] == 2
]
all_expected = pd.concat(
[
self.get_expected_estimate(
q1_knowledge[
q1_knowledge[TS_FIELD_NAME] <= date.tz_localize(None)
],
q2_knowledge[
q2_knowledge[TS_FIELD_NAME] <= date.tz_localize(None)
],
date.tz_localize(None),
).set_index([[date]])
for date in sid_estimates.index
],
axis=0,
)
sid_estimates.index = all_expected.index.copy()
assert_equal(all_expected[sid_estimates.columns], sid_estimates)
class NextEstimate(WithEstimatesTimeZero, ZiplineTestCase):
@classmethod
def make_loader(cls, events, columns):
return NextEarningsEstimatesLoader(events, columns)
def get_expected_estimate(self, q1_knowledge, q2_knowledge, comparable_date):
# If our latest knowledge of q1 is that the release is
# happening on this simulation date or later, then that's
# the estimate we want to use.
if (
not q1_knowledge.empty
and q1_knowledge[EVENT_DATE_FIELD_NAME].iloc[-1] >= comparable_date
):
return q1_knowledge.iloc[-1:]
# If q1 has already happened or we don't know about it
# yet and our latest knowledge indicates that q2 hasn't
# happened yet, then that's the estimate we want to use.
elif (
not q2_knowledge.empty
and q2_knowledge[EVENT_DATE_FIELD_NAME].iloc[-1] >= comparable_date
):
return q2_knowledge.iloc[-1:]
return pd.DataFrame(columns=q1_knowledge.columns, index=[comparable_date])
class PreviousEstimate(WithEstimatesTimeZero, ZiplineTestCase):
@classmethod
def make_loader(cls, events, columns):
return PreviousEarningsEstimatesLoader(events, columns)
def get_expected_estimate(self, q1_knowledge, q2_knowledge, comparable_date):
# The expected estimate will be for q2 if the last thing
# we've seen is that the release date already happened.
# Otherwise, it'll be for q1, as long as the release date
# for q1 has already happened.
if (
not q2_knowledge.empty
and q2_knowledge[EVENT_DATE_FIELD_NAME].iloc[-1] <= comparable_date
):
return q2_knowledge.iloc[-1:]
elif (
not q1_knowledge.empty
and q1_knowledge[EVENT_DATE_FIELD_NAME].iloc[-1] <= comparable_date
):
return q1_knowledge.iloc[-1:]
return pd.DataFrame(columns=q1_knowledge.columns, index=[comparable_date])
class WithEstimateMultipleQuarters(WithEstimates):
"""
ZiplineTestCase mixin providing cls.events, cls.make_expected_out as
class-level fixtures and self.test_multiple_qtrs_requested as a test.
Attributes
----------
events : pd.DataFrame
Simple DataFrame with estimates for 2 quarters for a single sid.
Methods
-------
make_expected_out() --> pd.DataFrame
Returns the DataFrame that is expected as a result of running a
Pipeline where estimates are requested for multiple quarters out.
fill_expected_out(expected)
Fills the expected DataFrame with data.
Tests
------
test_multiple_qtrs_requested()
Runs a Pipeline that calculate which estimates for multiple quarters
out and checks that the returned columns contain data for the correct
number of quarters out.
"""
@classmethod
def make_events(cls):
return pd.DataFrame(
{
SID_FIELD_NAME: [0] * 2,
TS_FIELD_NAME: [pd.Timestamp("2015-01-01"), pd.Timestamp("2015-01-06")],
EVENT_DATE_FIELD_NAME: [
pd.Timestamp("2015-01-10"),
pd.Timestamp("2015-01-20"),
],
"estimate": [1.0, 2.0],
FISCAL_QUARTER_FIELD_NAME: [1, 2],
FISCAL_YEAR_FIELD_NAME: [2015, 2015],
}
)
@classmethod
def init_class_fixtures(cls):
super(WithEstimateMultipleQuarters, cls).init_class_fixtures()
cls.expected_out = cls.make_expected_out()
@classmethod
def make_expected_out(cls):
expected = pd.DataFrame(
columns=[cls.columns[col] + "1" for col in cls.columns]
+ [cls.columns[col] + "2" for col in cls.columns],
index=cls.trading_days,
)
for (col, raw_name), suffix in itertools.product(
cls.columns.items(), ("1", "2")
):
expected_name = raw_name + suffix
if col.dtype == datetime64ns_dtype:
expected[expected_name] = pd.to_datetime(expected[expected_name])
else:
expected[expected_name] = expected[expected_name].astype(col.dtype)
cls.fill_expected_out(expected)
return expected.reindex(cls.trading_days)
def test_multiple_qtrs_requested(self):
dataset1 = QuartersEstimates(1)
dataset2 = QuartersEstimates(2)
engine = self.make_engine()
results = engine.run_pipeline(
Pipeline(
merge(
[
{c.name + "1": c.latest for c in dataset1.columns},
{c.name + "2": c.latest for c in dataset2.columns},
]
)
),
start_date=self.trading_days[0],
end_date=self.trading_days[-1],
)
q1_columns = [col.name + "1" for col in self.columns]
q2_columns = [col.name + "2" for col in self.columns]
# We now expect a column for 1 quarter out and a column for 2
# quarters out for each of the dataset columns.
assert_equal(
sorted(np.array(q1_columns + q2_columns)), sorted(results.columns.values)
)
assert_equal(
self.expected_out.sort_index(axis=1),
results.xs(0, level=1).sort_index(axis=1),
)
class NextEstimateMultipleQuarters(WithEstimateMultipleQuarters, ZiplineTestCase):
@classmethod
def make_loader(cls, events, columns):
return NextEarningsEstimatesLoader(events, columns)
@classmethod
def fill_expected_out(cls, expected):
# Fill columns for 1 Q out
for raw_name in cls.columns.values():
expected.loc[
pd.Timestamp("2015-01-01", tz="UTC") : pd.Timestamp(
"2015-01-11", tz="UTC"
),
raw_name + "1",
] = cls.events[raw_name].iloc[0]
expected.loc[
pd.Timestamp("2015-01-11", tz="UTC") : pd.Timestamp(
"2015-01-20", tz="UTC"
),
raw_name + "1",
] = cls.events[raw_name].iloc[1]
# Fill columns for 2 Q out
# We only have an estimate and event date for 2 quarters out before
# Q1's event happens; after Q1's event, we know 1 Q out but not 2 Qs
# out.
for col_name in ["estimate", "event_date"]:
expected.loc[
pd.Timestamp("2015-01-06", tz="UTC") : pd.Timestamp(
"2015-01-10", tz="UTC"
),
col_name + "2",
] = cls.events[col_name].iloc[1]
# But we know what FQ and FY we'd need in both Q1 and Q2
# because we know which FQ is next and can calculate from there
expected.loc[
pd.Timestamp("2015-01-01", tz="UTC") : pd.Timestamp("2015-01-09", tz="UTC"),
FISCAL_QUARTER_FIELD_NAME + "2",
] = 2
expected.loc[
pd.Timestamp("2015-01-12", tz="UTC") : pd.Timestamp("2015-01-20", tz="UTC"),
FISCAL_QUARTER_FIELD_NAME + "2",
] = 3
expected.loc[
pd.Timestamp("2015-01-01", tz="UTC") : pd.Timestamp("2015-01-20", tz="UTC"),
FISCAL_YEAR_FIELD_NAME + "2",
] = 2015
return expected
class PreviousEstimateMultipleQuarters(WithEstimateMultipleQuarters, ZiplineTestCase):
@classmethod
def make_loader(cls, events, columns):
return PreviousEarningsEstimatesLoader(events, columns)
@classmethod
def fill_expected_out(cls, expected):
# Fill columns for 1 Q out
for raw_name in cls.columns.values():
expected[raw_name + "1"].loc[
pd.Timestamp("2015-01-12", tz="UTC") : pd.Timestamp(
"2015-01-19", tz="UTC"
)
] = cls.events[raw_name].iloc[0]
expected[raw_name + "1"].loc[
pd.Timestamp("2015-01-20", tz="UTC") :
] = cls.events[raw_name].iloc[1]
# Fill columns for 2 Q out
for col_name in ["estimate", "event_date"]:
expected[col_name + "2"].loc[
pd.Timestamp("2015-01-20", tz="UTC") :
] = cls.events[col_name].iloc[0]
expected[FISCAL_QUARTER_FIELD_NAME + "2"].loc[
pd.Timestamp("2015-01-12", tz="UTC") : pd.Timestamp("2015-01-20", tz="UTC")
] = 4
expected[FISCAL_YEAR_FIELD_NAME + "2"].loc[
pd.Timestamp("2015-01-12", tz="UTC") : pd.Timestamp("2015-01-20", tz="UTC")
] = 2014
expected[FISCAL_QUARTER_FIELD_NAME + "2"].loc[
pd.Timestamp("2015-01-20", tz="UTC") :
] = 1
expected[FISCAL_YEAR_FIELD_NAME + "2"].loc[
pd.Timestamp("2015-01-20", tz="UTC") :
] = 2015
return expected
class WithVaryingNumEstimates(WithEstimates):
"""
ZiplineTestCase mixin providing fixtures and a test to ensure that we
have the correct overwrites when the event date changes. We want to make
sure that if we have a quarter with an event date that gets pushed back,
we don't start overwriting for the next quarter early. Likewise,
if we have a quarter with an event date that gets pushed forward, we want
to make sure that we start applying adjustments at the appropriate, earlier
date, rather than the later date.
Methods
-------
assert_compute()
Defines how to determine that results computed for the `SomeFactor`
factor are correct.
Tests
-----
test_windows_with_varying_num_estimates()
Tests that we create the correct overwrites from 2015-01-13 to
2015-01-14 regardless of how event dates were updated for each
quarter for each sid.
"""
@classmethod
def make_events(cls):
return pd.DataFrame(
{
SID_FIELD_NAME: [0] * 3 + [1] * 3,
TS_FIELD_NAME: [
pd.Timestamp("2015-01-09"),
pd.Timestamp("2015-01-12"),
pd.Timestamp("2015-01-13"),
]
* 2,
EVENT_DATE_FIELD_NAME: [
pd.Timestamp("2015-01-12"),
pd.Timestamp("2015-01-13"),
pd.Timestamp("2015-01-20"),
pd.Timestamp("2015-01-13"),
pd.Timestamp("2015-01-12"),
pd.Timestamp("2015-01-20"),
],
"estimate": [11.0, 12.0, 21.0] * 2,
FISCAL_QUARTER_FIELD_NAME: [1, 1, 2] * 2,
FISCAL_YEAR_FIELD_NAME: [2015] * 6,
}
)
@classmethod
def assert_compute(cls, estimate, today):
raise NotImplementedError("assert_compute")
def test_windows_with_varying_num_estimates(self):
dataset = QuartersEstimates(1)
assert_compute = self.assert_compute
class SomeFactor(CustomFactor):
inputs = [dataset.estimate]
window_length = 3
def compute(self, today, assets, out, estimate):
assert_compute(estimate, today)
engine = self.make_engine()
engine.run_pipeline(
Pipeline({"est": SomeFactor()}),
start_date=pd.Timestamp("2015-01-13", tz="utc"),
# last event date we have
end_date=pd.Timestamp("2015-01-14", tz="utc"),
)
class PreviousVaryingNumEstimates(WithVaryingNumEstimates, ZiplineTestCase):
def assert_compute(self, estimate, today):
if today == pd.Timestamp("2015-01-13", tz="utc"):
assert_array_equal(estimate[:, 0], np.array([np.NaN, np.NaN, 12]))
assert_array_equal(estimate[:, 1], np.array([np.NaN, 12, 12]))
else:
assert_array_equal(estimate[:, 0], np.array([np.NaN, 12, 12]))
assert_array_equal(estimate[:, 1], np.array([12, 12, 12]))
@classmethod
def make_loader(cls, events, columns):
return PreviousEarningsEstimatesLoader(events, columns)
class NextVaryingNumEstimates(WithVaryingNumEstimates, ZiplineTestCase):
def assert_compute(self, estimate, today):
if today == pd.Timestamp("2015-01-13", tz="utc"):
assert_array_equal(estimate[:, 0], np.array([11, 12, 12]))
assert_array_equal(estimate[:, 1], np.array([np.NaN, np.NaN, 21]))
else:
assert_array_equal(estimate[:, 0], np.array([np.NaN, 21, 21]))
assert_array_equal(estimate[:, 1], np.array([np.NaN, 21, 21]))
@classmethod
def make_loader(cls, events, columns):
return NextEarningsEstimatesLoader(events, columns)
class WithEstimateWindows(WithEstimates):
"""
ZiplineTestCase mixin providing fixures and a test to test running a
Pipeline with an estimates loader over differently-sized windows.
Attributes
----------
events : pd.DataFrame
DataFrame with estimates for 2 quarters for 2 sids.
window_test_start_date : pd.Timestamp
The date from which the window should start.
timelines : dict[int -> pd.DataFrame]
A dictionary mapping to the number of quarters out to
snapshots of how the data should look on each date in the date range.
Methods
-------
make_expected_timelines() -> dict[int -> pd.DataFrame]
Creates a dictionary of expected data. See `timelines`, above.
Tests
-----
test_estimate_windows_at_quarter_boundaries()
Tests that we overwrite values with the correct quarter's estimate at
the correct dates when we have a factor that asks for a window of data.
"""
END_DATE = pd.Timestamp("2015-02-10", tz="utc")
window_test_start_date = pd.Timestamp("2015-01-05")
critical_dates = [
pd.Timestamp("2015-01-09", tz="utc"),
pd.Timestamp("2015-01-15", tz="utc"),
pd.Timestamp("2015-01-20", tz="utc"),
pd.Timestamp("2015-01-26", tz="utc"),
pd.Timestamp("2015-02-05", tz="utc"),
pd.Timestamp("2015-02-10", tz="utc"),
]
# Starting date, number of announcements out.
window_test_cases = list(itertools.product(critical_dates, (1, 2)))
@classmethod
def make_events(cls):
# Typical case: 2 consecutive quarters.
sid_0_timeline = pd.DataFrame(
{
TS_FIELD_NAME: [
cls.window_test_start_date,
pd.Timestamp("2015-01-20"),
pd.Timestamp("2015-01-12"),
pd.Timestamp("2015-02-10"),
# We want a case where we get info for a later
# quarter before the current quarter is over but
# after the split_asof_date to make sure that
# we choose the correct date to overwrite until.
pd.Timestamp("2015-01-18"),
],
EVENT_DATE_FIELD_NAME: [
pd.Timestamp("2015-01-20"),
pd.Timestamp("2015-01-20"),
pd.Timestamp("2015-02-10"),
pd.Timestamp("2015-02-10"),
pd.Timestamp("2015-04-01"),
],
"estimate": [100.0, 101.0] + [200.0, 201.0] + [400],
FISCAL_QUARTER_FIELD_NAME: [1] * 2 + [2] * 2 + [4],
FISCAL_YEAR_FIELD_NAME: 2015,
SID_FIELD_NAME: 0,
}
)
# We want a case where we skip a quarter. We never find out about Q2.
sid_10_timeline = pd.DataFrame(
{
TS_FIELD_NAME: [
pd.Timestamp("2015-01-09"),
pd.Timestamp("2015-01-12"),
pd.Timestamp("2015-01-09"),
pd.Timestamp("2015-01-15"),
],
EVENT_DATE_FIELD_NAME: [
pd.Timestamp("2015-01-22"),
pd.Timestamp("2015-01-22"),
pd.Timestamp("2015-02-05"),
pd.Timestamp("2015-02-05"),
],
"estimate": [110.0, 111.0] + [310.0, 311.0],
FISCAL_QUARTER_FIELD_NAME: [1] * 2 + [3] * 2,
FISCAL_YEAR_FIELD_NAME: 2015,
SID_FIELD_NAME: 10,
}
)
# We want to make sure we have correct overwrites when sid quarter
# boundaries collide. This sid's quarter boundaries collide with sid 0.
sid_20_timeline = pd.DataFrame(
{
TS_FIELD_NAME: [
cls.window_test_start_date,
pd.Timestamp("2015-01-07"),
cls.window_test_start_date,
pd.Timestamp("2015-01-17"),
],
EVENT_DATE_FIELD_NAME: [
pd.Timestamp("2015-01-20"),
pd.Timestamp("2015-01-20"),
pd.Timestamp("2015-02-10"),
pd.Timestamp("2015-02-10"),
],
"estimate": [120.0, 121.0] + [220.0, 221.0],
FISCAL_QUARTER_FIELD_NAME: [1] * 2 + [2] * 2,
FISCAL_YEAR_FIELD_NAME: 2015,
SID_FIELD_NAME: 20,
}
)
concatted = pd.concat(
[sid_0_timeline, sid_10_timeline, sid_20_timeline]
).reset_index()
np.random.seed(0)
return concatted.reindex(np.random.permutation(concatted.index))
@classmethod
def get_sids(cls):
sids = sorted(cls.events[SID_FIELD_NAME].unique())
# Add extra sids between sids in our data. We want to test that we
# apply adjustments to the correct sids.
return [
sid for i in range(len(sids) - 1) for sid in range(sids[i], sids[i + 1])
] + [sids[-1]]
@classmethod
def make_expected_timelines(cls):
return {}
@classmethod
def init_class_fixtures(cls):
super(WithEstimateWindows, cls).init_class_fixtures()
cls.create_expected_df_for_factor_compute = partial(
create_expected_df_for_factor_compute,
cls.window_test_start_date,
cls.get_sids(),
)
cls.timelines = cls.make_expected_timelines()
@parameterized.expand(window_test_cases)
def test_estimate_windows_at_quarter_boundaries(
self, start_date, num_announcements_out
):
dataset = QuartersEstimates(num_announcements_out)
trading_days = self.trading_days
timelines = self.timelines
# The window length should be from the starting index back to the first
# date on which we got data. The goal is to ensure that as we
# progress through the timeline, all data we got, starting from that
# first date, is correctly overwritten.
window_len = (
self.trading_days.get_loc(start_date)
- self.trading_days.get_loc(self.window_test_start_date)
+ 1
)
class SomeFactor(CustomFactor):
inputs = [dataset.estimate]
window_length = window_len
def compute(self, today, assets, out, estimate):
today_idx = trading_days.get_loc(today)
today_timeline = (
timelines[num_announcements_out]
.loc[today]
.reindex(trading_days[: today_idx + 1])
.values
)
timeline_start_idx = len(today_timeline) - window_len
assert_almost_equal(estimate, today_timeline[timeline_start_idx:])
engine = self.make_engine()
engine.run_pipeline(
Pipeline({"est": SomeFactor()}),
start_date=start_date,
# last event date we have
end_date=pd.Timestamp("2015-02-10", tz="utc"),
)
class PreviousEstimateWindows(WithEstimateWindows, ZiplineTestCase):
@classmethod
def make_loader(cls, events, columns):
return PreviousEarningsEstimatesLoader(events, columns)
@classmethod
def make_expected_timelines(cls):
oneq_previous = pd.concat(
[
pd.concat(
[
cls.create_expected_df_for_factor_compute(
[
(0, np.NaN, cls.window_test_start_date),
(10, np.NaN, cls.window_test_start_date),
(20, np.NaN, cls.window_test_start_date),
],
end_date,
)
for end_date in pd.date_range("2015-01-09", "2015-01-19")
]
),
cls.create_expected_df_for_factor_compute(
[
(0, 101, pd.Timestamp("2015-01-20")),
(10, np.NaN, cls.window_test_start_date),
(20, 121, pd.Timestamp("2015-01-20")),
],
pd.Timestamp("2015-01-20"),
),
cls.create_expected_df_for_factor_compute(
[
(0, 101, pd.Timestamp("2015-01-20")),
(10, np.NaN, cls.window_test_start_date),
(20, 121, pd.Timestamp("2015-01-20")),
],
pd.Timestamp("2015-01-21"),
),
pd.concat(
[
cls.create_expected_df_for_factor_compute(
[
(0, 101, pd.Timestamp("2015-01-20")),
(10, 111, pd.Timestamp("2015-01-22")),
(20, 121, pd.Timestamp("2015-01-20")),
],
end_date,
)
for end_date in pd.date_range("2015-01-22", "2015-02-04")
]
),
pd.concat(
[
cls.create_expected_df_for_factor_compute(
[
(0, 101, pd.Timestamp("2015-01-20")),
(10, 311, pd.Timestamp("2015-02-05")),
(20, 121, pd.Timestamp("2015-01-20")),
],
end_date,
)
for end_date in pd.date_range("2015-02-05", "2015-02-09")
]
),
cls.create_expected_df_for_factor_compute(
[
(0, 201, pd.Timestamp("2015-02-10")),
(10, 311, pd.Timestamp("2015-02-05")),
(20, 221, pd.Timestamp("2015-02-10")),
],
pd.Timestamp("2015-02-10"),
),
]
)
twoq_previous = pd.concat(
[
cls.create_expected_df_for_factor_compute(
[
(0, np.NaN, cls.window_test_start_date),
(10, np.NaN, cls.window_test_start_date),
(20, np.NaN, cls.window_test_start_date),
],
end_date,
)
for end_date in pd.date_range("2015-01-09", "2015-02-09")
]
# We never get estimates for S1 for 2Q ago because once Q3
# becomes our previous quarter, 2Q ago would be Q2, and we have
# no data on it.
+ [
cls.create_expected_df_for_factor_compute(
[
(0, 101, pd.Timestamp("2015-02-10")),
(10, np.NaN, pd.Timestamp("2015-02-05")),
(20, 121, pd.Timestamp("2015-02-10")),
],
pd.Timestamp("2015-02-10"),
)
]
)
return {1: oneq_previous, 2: twoq_previous}
class NextEstimateWindows(WithEstimateWindows, ZiplineTestCase):
@classmethod
def make_loader(cls, events, columns):
return NextEarningsEstimatesLoader(events, columns)
@classmethod
def make_expected_timelines(cls):
oneq_next = pd.concat(
[
cls.create_expected_df_for_factor_compute(
[
(0, 100, cls.window_test_start_date),
(10, 110, pd.Timestamp("2015-01-09")),
(20, 120, cls.window_test_start_date),
(20, 121, pd.Timestamp("2015-01-07")),
],
pd.Timestamp("2015-01-09"),
),
pd.concat(
[
cls.create_expected_df_for_factor_compute(
[
(0, 100, cls.window_test_start_date),
(10, 110, pd.Timestamp("2015-01-09")),
(10, 111, pd.Timestamp("2015-01-12")),
(20, 120, cls.window_test_start_date),
(20, 121, pd.Timestamp("2015-01-07")),
],
end_date,
)
for end_date in pd.date_range("2015-01-12", "2015-01-19")
]
),
cls.create_expected_df_for_factor_compute(
[
(0, 100, cls.window_test_start_date),
(0, 101, pd.Timestamp("2015-01-20")),
(10, 110, pd.Timestamp("2015-01-09")),
(10, 111, pd.Timestamp("2015-01-12")),
(20, 120, cls.window_test_start_date),
(20, 121, pd.Timestamp("2015-01-07")),
],
pd.Timestamp("2015-01-20"),
),
pd.concat(
[
cls.create_expected_df_for_factor_compute(
[
(0, 200, pd.Timestamp("2015-01-12")),
(10, 110, pd.Timestamp("2015-01-09")),
(10, 111, pd.Timestamp("2015-01-12")),
(20, 220, cls.window_test_start_date),
(20, 221, pd.Timestamp("2015-01-17")),
],
end_date,
)
for end_date in pd.date_range("2015-01-21", "2015-01-22")
]
),
pd.concat(
[
cls.create_expected_df_for_factor_compute(
[
(0, 200, pd.Timestamp("2015-01-12")),
(10, 310, pd.Timestamp("2015-01-09")),
(10, 311, pd.Timestamp("2015-01-15")),
(20, 220, cls.window_test_start_date),
(20, 221, pd.Timestamp("2015-01-17")),
],
end_date,
)
for end_date in pd.date_range("2015-01-23", "2015-02-05")
]
),
pd.concat(
[
cls.create_expected_df_for_factor_compute(
[
(0, 200, pd.Timestamp("2015-01-12")),
(10, np.NaN, cls.window_test_start_date),
(20, 220, cls.window_test_start_date),
(20, 221, pd.Timestamp("2015-01-17")),
],
end_date,
)
for end_date in pd.date_range("2015-02-06", "2015-02-09")
]
),
cls.create_expected_df_for_factor_compute(
[
(0, 200, pd.Timestamp("2015-01-12")),
(0, 201, pd.Timestamp("2015-02-10")),
(10, np.NaN, cls.window_test_start_date),
(20, 220, cls.window_test_start_date),
(20, 221, pd.Timestamp("2015-01-17")),
],
pd.Timestamp("2015-02-10"),
),
]
)
twoq_next = pd.concat(
[
cls.create_expected_df_for_factor_compute(
[
(0, np.NaN, cls.window_test_start_date),
(10, np.NaN, cls.window_test_start_date),
(20, 220, cls.window_test_start_date),
],
end_date,
)
for end_date in pd.date_range("2015-01-09", "2015-01-11")
]
+ [
cls.create_expected_df_for_factor_compute(
[
(0, 200, pd.Timestamp("2015-01-12")),
(10, np.NaN, cls.window_test_start_date),
(20, 220, cls.window_test_start_date),
],
end_date,
)
for end_date in pd.date_range("2015-01-12", "2015-01-16")
]
+ [
cls.create_expected_df_for_factor_compute(
[
(0, 200, pd.Timestamp("2015-01-12")),
(10, np.NaN, cls.window_test_start_date),
(20, 220, cls.window_test_start_date),
(20, 221, pd.Timestamp("2015-01-17")),
],
pd.Timestamp("2015-01-20"),
)
]
+ [
cls.create_expected_df_for_factor_compute(
[
(0, np.NaN, cls.window_test_start_date),
(10, np.NaN, cls.window_test_start_date),
(20, np.NaN, cls.window_test_start_date),
],
end_date,
)
for end_date in pd.date_range("2015-01-21", "2015-02-10")
]
)
return {1: oneq_next, 2: twoq_next}
class WithSplitAdjustedWindows(WithEstimateWindows):
"""
ZiplineTestCase mixin providing fixures and a test to test running a
Pipeline with an estimates loader over differently-sized windows and with
split adjustments.
"""
split_adjusted_asof_date = pd.Timestamp("2015-01-14")
@classmethod
def make_events(cls):
# Add an extra sid that has a release before the split-asof-date in
# order to test that we're reversing splits correctly in the previous
# case (without an overwrite) and in the next case (with an overwrite).
sid_30 = pd.DataFrame(
{
TS_FIELD_NAME: [
cls.window_test_start_date,
pd.Timestamp("2015-01-09"),
# For Q2, we want it to start early enough
# that we can have several adjustments before
# the end of the first quarter so that we
# can test un-adjusting & readjusting with an
# overwrite.
cls.window_test_start_date,
# We want the Q2 event date to be enough past
# the split-asof-date that we can have
# several splits and can make sure that they
# are applied correctly.
pd.Timestamp("2015-01-20"),
],
EVENT_DATE_FIELD_NAME: [
pd.Timestamp("2015-01-09"),
pd.Timestamp("2015-01-09"),
pd.Timestamp("2015-01-20"),
pd.Timestamp("2015-01-20"),
],
"estimate": [130.0, 131.0, 230.0, 231.0],
FISCAL_QUARTER_FIELD_NAME: [1] * 2 + [2] * 2,
FISCAL_YEAR_FIELD_NAME: 2015,
SID_FIELD_NAME: 30,
}
)
# An extra sid to test no splits before the split-adjusted-asof-date.
# We want an event before and after the split-adjusted-asof-date &
# timestamps for data points also before and after
# split-adjsuted-asof-date (but also before the split dates, so that
# we can test that splits actually get applied at the correct times).
sid_40 = pd.DataFrame(
{
TS_FIELD_NAME: [pd.Timestamp("2015-01-09"), pd.Timestamp("2015-01-15")],
EVENT_DATE_FIELD_NAME: [
pd.Timestamp("2015-01-09"),
pd.Timestamp("2015-02-10"),
],
"estimate": [140.0, 240.0],
FISCAL_QUARTER_FIELD_NAME: [1, 2],
FISCAL_YEAR_FIELD_NAME: 2015,
SID_FIELD_NAME: 40,
}
)
# An extra sid to test all splits before the
# split-adjusted-asof-date. All timestamps should be before that date
# so that we have cases where we un-apply and re-apply splits.
sid_50 = pd.DataFrame(
{
TS_FIELD_NAME: [pd.Timestamp("2015-01-09"), pd.Timestamp("2015-01-12")],
EVENT_DATE_FIELD_NAME: [
pd.Timestamp("2015-01-09"),
pd.Timestamp("2015-02-10"),
],
"estimate": [150.0, 250.0],
FISCAL_QUARTER_FIELD_NAME: [1, 2],
FISCAL_YEAR_FIELD_NAME: 2015,
SID_FIELD_NAME: 50,
}
)
return pd.concat(
[
# Slightly hacky, but want to make sure we're using the same
# events as WithEstimateWindows.
cls.__base__.make_events(),
sid_30,
sid_40,
sid_50,
]
)
@classmethod
def make_splits_data(cls):
# For sid 0, we want to apply a series of splits before and after the
# split-adjusted-asof-date we well as between quarters (for the
# previous case, where we won't see any values until after the event
# happens).
sid_0_splits = pd.DataFrame(
{
SID_FIELD_NAME: 0,
"ratio": (-1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 100),
"effective_date": (
pd.Timestamp("2014-01-01"), # Filter out
# Split before Q1 event & after first estimate
pd.Timestamp("2015-01-07"),
# Split before Q1 event
pd.Timestamp("2015-01-09"),
# Split before Q1 event
pd.Timestamp("2015-01-13"),
# Split before Q1 event
pd.Timestamp("2015-01-15"),
# Split before Q1 event
pd.Timestamp("2015-01-18"),
# Split after Q1 event and before Q2 event
pd.Timestamp("2015-01-30"),
# Filter out - this is after our date index
pd.Timestamp("2016-01-01"),
),
}
)
sid_10_splits = pd.DataFrame(
{
SID_FIELD_NAME: 10,
"ratio": (0.2, 0.3),
"effective_date": (
# We want a split before the first estimate and before the
# split-adjusted-asof-date but within our calendar index so
# that we can test that the split is NEVER applied.
pd.Timestamp("2015-01-07"),
# Apply a single split before Q1 event.
pd.Timestamp("2015-01-20"),
),
}
)
# We want a sid with split dates that collide with another sid (0) to
# make sure splits are correctly applied for both sids.
sid_20_splits = pd.DataFrame(
{
SID_FIELD_NAME: 20,
"ratio": (
0.4,
0.5,
0.6,
0.7,
0.8,
0.9,
),
"effective_date": (
pd.Timestamp("2015-01-07"),
pd.Timestamp("2015-01-09"),
pd.Timestamp("2015-01-13"),
pd.Timestamp("2015-01-15"),
pd.Timestamp("2015-01-18"),
pd.Timestamp("2015-01-30"),
),
}
)
# This sid has event dates that are shifted back so that we can test
# cases where an event occurs before the split-asof-date.
sid_30_splits = pd.DataFrame(
{
SID_FIELD_NAME: 30,
"ratio": (8, 9, 10, 11, 12),
"effective_date": (
# Split before the event and before the
# split-asof-date.
pd.Timestamp("2015-01-07"),
# Split on date of event but before the
# split-asof-date.
pd.Timestamp("2015-01-09"),
# Split after the event, but before the
# split-asof-date.
pd.Timestamp("2015-01-13"),
pd.Timestamp("2015-01-15"),
pd.Timestamp("2015-01-18"),
),
}
)
# No splits for a sid before the split-adjusted-asof-date.
sid_40_splits = pd.DataFrame(
{
SID_FIELD_NAME: 40,
"ratio": (13, 14),
"effective_date": (
pd.Timestamp("2015-01-20"),
pd.Timestamp("2015-01-22"),
),
}
)
# No splits for a sid after the split-adjusted-asof-date.
sid_50_splits = pd.DataFrame(
{
SID_FIELD_NAME: 50,
"ratio": (15, 16),
"effective_date": (
pd.Timestamp("2015-01-13"),
pd.Timestamp("2015-01-14"),
),
}
)
return pd.concat(
[
sid_0_splits,
sid_10_splits,
sid_20_splits,
sid_30_splits,
sid_40_splits,
sid_50_splits,
]
)
class PreviousWithSplitAdjustedWindows(WithSplitAdjustedWindows, ZiplineTestCase):
@classmethod
def make_loader(cls, events, columns):
return PreviousSplitAdjustedEarningsEstimatesLoader(
events,
columns,
split_adjustments_loader=cls.adjustment_reader,
split_adjusted_column_names=["estimate"],
split_adjusted_asof=cls.split_adjusted_asof_date,
)
@classmethod
def make_expected_timelines(cls):
oneq_previous = pd.concat(
[
pd.concat(
[
cls.create_expected_df_for_factor_compute(
[
(0, np.NaN, cls.window_test_start_date),
(10, np.NaN, cls.window_test_start_date),
(20, np.NaN, cls.window_test_start_date),
# Undo all adjustments that haven't happened yet.
(30, 131 * 1 / 10, pd.Timestamp("2015-01-09")),
(40, 140.0, pd.Timestamp("2015-01-09")),
(50, 150 * 1 / 15 * 1 / 16, pd.Timestamp("2015-01-09")),
],
end_date,
)
for end_date in pd.date_range("2015-01-09", "2015-01-12")
]
),
cls.create_expected_df_for_factor_compute(
[
(0, np.NaN, cls.window_test_start_date),
(10, np.NaN, cls.window_test_start_date),
(20, np.NaN, cls.window_test_start_date),
(30, 131, pd.Timestamp("2015-01-09")),
(40, 140.0, pd.Timestamp("2015-01-09")),
(50, 150.0 * 1 / 16, pd.Timestamp("2015-01-09")),
],
pd.Timestamp("2015-01-13"),
),
cls.create_expected_df_for_factor_compute(
[
(0, np.NaN, cls.window_test_start_date),
(10, np.NaN, cls.window_test_start_date),
(20, np.NaN, cls.window_test_start_date),
(30, 131, pd.Timestamp("2015-01-09")),
(40, 140.0, pd.Timestamp("2015-01-09")),
(50, 150.0, pd.Timestamp("2015-01-09")),
],
pd.Timestamp("2015-01-14"),
),
pd.concat(
[
cls.create_expected_df_for_factor_compute(
[
(0, np.NaN, cls.window_test_start_date),
(10, np.NaN, cls.window_test_start_date),
(20, np.NaN, cls.window_test_start_date),
(30, 131 * 11, pd.Timestamp("2015-01-09")),
(40, 140.0, pd.Timestamp("2015-01-09")),
(50, 150.0, pd.Timestamp("2015-01-09")),
],
end_date,
)
for end_date in pd.date_range("2015-01-15", "2015-01-16")
]
),
pd.concat(
[
cls.create_expected_df_for_factor_compute(
[
(0, 101, pd.Timestamp("2015-01-20")),
(10, np.NaN, cls.window_test_start_date),
(20, 121 * 0.7 * 0.8, pd.Timestamp("2015-01-20")),
(30, 231, pd.Timestamp("2015-01-20")),
(40, 140.0 * 13, pd.Timestamp("2015-01-09")),
(50, 150.0, pd.Timestamp("2015-01-09")),
],
end_date,
)
for end_date in pd.date_range("2015-01-20", "2015-01-21")
]
),
pd.concat(
[
cls.create_expected_df_for_factor_compute(
[
(0, 101, pd.Timestamp("2015-01-20")),
(10, 111 * 0.3, pd.Timestamp("2015-01-22")),
(20, 121 * 0.7 * 0.8, pd.Timestamp("2015-01-20")),
(30, 231, pd.Timestamp("2015-01-20")),
(40, 140.0 * 13 * 14, pd.Timestamp("2015-01-09")),
(50, 150.0, pd.Timestamp("2015-01-09")),
],
end_date,
)
for end_date in pd.date_range("2015-01-22", "2015-01-29")
]
),
pd.concat(
[
cls.create_expected_df_for_factor_compute(
[
(0, 101 * 7, pd.Timestamp("2015-01-20")),
(10, 111 * 0.3, pd.Timestamp("2015-01-22")),
(20, 121 * 0.7 * 0.8 * 0.9, pd.Timestamp("2015-01-20")),
(30, 231, pd.Timestamp("2015-01-20")),
(40, 140.0 * 13 * 14, pd.Timestamp("2015-01-09")),
(50, 150.0, pd.Timestamp("2015-01-09")),
],
end_date,
)
for end_date in pd.date_range("2015-01-30", "2015-02-04")
]
),
pd.concat(
[
cls.create_expected_df_for_factor_compute(
[
(0, 101 * 7, pd.Timestamp("2015-01-20")),
(10, 311 * 0.3, pd.Timestamp("2015-02-05")),
(20, 121 * 0.7 * 0.8 * 0.9, pd.Timestamp("2015-01-20")),
(30, 231, pd.Timestamp("2015-01-20")),
(40, 140.0 * 13 * 14, pd.Timestamp("2015-01-09")),
(50, 150.0, pd.Timestamp("2015-01-09")),
],
end_date,
)
for end_date in pd.date_range("2015-02-05", "2015-02-09")
]
),
cls.create_expected_df_for_factor_compute(
[
(0, 201, pd.Timestamp("2015-02-10")),
(10, 311 * 0.3, pd.Timestamp("2015-02-05")),
(20, 221 * 0.8 * 0.9, pd.Timestamp("2015-02-10")),
(30, 231, pd.Timestamp("2015-01-20")),
(40, 240.0 * 13 * 14, pd.Timestamp("2015-02-10")),
(50, 250.0, pd.Timestamp("2015-02-10")),
],
pd.Timestamp("2015-02-10"),
),
]
)
twoq_previous = pd.concat(
[
cls.create_expected_df_for_factor_compute(
[
(0, np.NaN, cls.window_test_start_date),
(10, np.NaN, cls.window_test_start_date),
(20, np.NaN, cls.window_test_start_date),
(30, np.NaN, cls.window_test_start_date),
],
end_date,
)
for end_date in pd.date_range("2015-01-09", "2015-01-19")
]
+ [
cls.create_expected_df_for_factor_compute(
[
(0, np.NaN, cls.window_test_start_date),
(10, np.NaN, cls.window_test_start_date),
(20, np.NaN, cls.window_test_start_date),
(30, 131 * 11 * 12, pd.Timestamp("2015-01-20")),
],
end_date,
)
for end_date in pd.date_range("2015-01-20", "2015-02-09")
]
# We never get estimates for S1 for 2Q ago because once Q3
# becomes our previous quarter, 2Q ago would be Q2, and we have
# no data on it.
+ [
cls.create_expected_df_for_factor_compute(
[
(0, 101 * 7, pd.Timestamp("2015-02-10")),
(10, np.NaN, pd.Timestamp("2015-02-05")),
(20, 121 * 0.7 * 0.8 * 0.9, pd.Timestamp("2015-02-10")),
(30, 131 * 11 * 12, pd.Timestamp("2015-01-20")),
(40, 140.0 * 13 * 14, pd.Timestamp("2015-02-10")),
(50, 150.0, pd.Timestamp("2015-02-10")),
],
pd.Timestamp("2015-02-10"),
)
]
)
return {1: oneq_previous, 2: twoq_previous}
class NextWithSplitAdjustedWindows(WithSplitAdjustedWindows, ZiplineTestCase):
@classmethod
def make_loader(cls, events, columns):
return NextSplitAdjustedEarningsEstimatesLoader(
events,
columns,
split_adjustments_loader=cls.adjustment_reader,
split_adjusted_column_names=["estimate"],
split_adjusted_asof=cls.split_adjusted_asof_date,
)
@classmethod
def make_expected_timelines(cls):
oneq_next = pd.concat(
[
cls.create_expected_df_for_factor_compute(
[
(0, 100 * 1 / 4, cls.window_test_start_date),
(10, 110, pd.Timestamp("2015-01-09")),
(20, 120 * 5 / 3, cls.window_test_start_date),
(20, 121 * 5 / 3, pd.Timestamp("2015-01-07")),
(30, 130 * 1 / 10, cls.window_test_start_date),
(30, 131 * 1 / 10, pd.Timestamp("2015-01-09")),
(40, 140, pd.Timestamp("2015-01-09")),
(50, 150.0 * 1 / 15 * 1 / 16, pd.Timestamp("2015-01-09")),
],
pd.Timestamp("2015-01-09"),
),
cls.create_expected_df_for_factor_compute(
[
(0, 100 * 1 / 4, cls.window_test_start_date),
(10, 110, pd.Timestamp("2015-01-09")),
(10, 111, pd.Timestamp("2015-01-12")),
(20, 120 * 5 / 3, cls.window_test_start_date),
(20, 121 * 5 / 3, pd.Timestamp("2015-01-07")),
(30, 230 * 1 / 10, cls.window_test_start_date),
(40, np.NaN, pd.Timestamp("2015-01-10")),
(50, 250.0 * 1 / 15 * 1 / 16, pd.Timestamp("2015-01-12")),
],
pd.Timestamp("2015-01-12"),
),
cls.create_expected_df_for_factor_compute(
[
(0, 100, cls.window_test_start_date),
(10, 110, pd.Timestamp("2015-01-09")),
(10, 111, pd.Timestamp("2015-01-12")),
(20, 120, cls.window_test_start_date),
(20, 121, pd.Timestamp("2015-01-07")),
(30, 230, cls.window_test_start_date),
(40, np.NaN, pd.Timestamp("2015-01-10")),
(50, 250.0 * 1 / 16, pd.Timestamp("2015-01-12")),
],
pd.Timestamp("2015-01-13"),
),
cls.create_expected_df_for_factor_compute(
[
(0, 100, cls.window_test_start_date),
(10, 110, pd.Timestamp("2015-01-09")),
(10, 111, pd.Timestamp("2015-01-12")),
(20, 120, cls.window_test_start_date),
(20, 121, pd.Timestamp("2015-01-07")),
(30, 230, cls.window_test_start_date),
(40, np.NaN, pd.Timestamp("2015-01-10")),
(50, 250.0, pd.Timestamp("2015-01-12")),
],
pd.Timestamp("2015-01-14"),
),
pd.concat(
[
cls.create_expected_df_for_factor_compute(
[
(0, 100 * 5, cls.window_test_start_date),
(10, 110, pd.Timestamp("2015-01-09")),
(10, 111, pd.Timestamp("2015-01-12")),
(20, 120 * 0.7, cls.window_test_start_date),
(20, 121 * 0.7, pd.Timestamp("2015-01-07")),
(30, 230 * 11, cls.window_test_start_date),
(40, 240, pd.Timestamp("2015-01-15")),
(50, 250.0, pd.Timestamp("2015-01-12")),
],
end_date,
)
for end_date in pd.date_range("2015-01-15", "2015-01-16")
]
),
cls.create_expected_df_for_factor_compute(
[
(0, 100 * 5 * 6, cls.window_test_start_date),
(0, 101, pd.Timestamp("2015-01-20")),
(10, 110 * 0.3, pd.Timestamp("2015-01-09")),
(10, 111 * 0.3, pd.Timestamp("2015-01-12")),
(20, 120 * 0.7 * 0.8, cls.window_test_start_date),
(20, 121 * 0.7 * 0.8, pd.Timestamp("2015-01-07")),
(30, 230 * 11 * 12, cls.window_test_start_date),
(30, 231, pd.Timestamp("2015-01-20")),
(40, 240 * 13, pd.Timestamp("2015-01-15")),
(50, 250.0, pd.Timestamp("2015-01-12")),
],
pd.Timestamp("2015-01-20"),
),
cls.create_expected_df_for_factor_compute(
[
(0, 200 * 5 * 6, pd.Timestamp("2015-01-12")),
(10, 110 * 0.3, pd.Timestamp("2015-01-09")),
(10, 111 * 0.3, pd.Timestamp("2015-01-12")),
(20, 220 * 0.7 * 0.8, cls.window_test_start_date),
(20, 221 * 0.8, pd.Timestamp("2015-01-17")),
(40, 240 * 13, pd.Timestamp("2015-01-15")),
(50, 250.0, pd.Timestamp("2015-01-12")),
],
pd.Timestamp("2015-01-21"),
),
cls.create_expected_df_for_factor_compute(
[
(0, 200 * 5 * 6, pd.Timestamp("2015-01-12")),
(10, 110 * 0.3, pd.Timestamp("2015-01-09")),
(10, 111 * 0.3, pd.Timestamp("2015-01-12")),
(20, 220 * 0.7 * 0.8, cls.window_test_start_date),
(20, 221 * 0.8, pd.Timestamp("2015-01-17")),
(40, 240 * 13 * 14, pd.Timestamp("2015-01-15")),
(50, 250.0, pd.Timestamp("2015-01-12")),
],
pd.Timestamp("2015-01-22"),
),
pd.concat(
[
cls.create_expected_df_for_factor_compute(
[
(0, 200 * 5 * 6, pd.Timestamp("2015-01-12")),
(10, 310 * 0.3, pd.Timestamp("2015-01-09")),
(10, 311 * 0.3, pd.Timestamp("2015-01-15")),
(20, 220 * 0.7 * 0.8, cls.window_test_start_date),
(20, 221 * 0.8, pd.Timestamp("2015-01-17")),
(40, 240 * 13 * 14, pd.Timestamp("2015-01-15")),
(50, 250.0, pd.Timestamp("2015-01-12")),
],
end_date,
)
for end_date in pd.date_range("2015-01-23", "2015-01-29")
]
),
pd.concat(
[
cls.create_expected_df_for_factor_compute(
[
(0, 200 * 5 * 6 * 7, pd.Timestamp("2015-01-12")),
(10, 310 * 0.3, pd.Timestamp("2015-01-09")),
(10, 311 * 0.3, pd.Timestamp("2015-01-15")),
(20, 220 * 0.7 * 0.8 * 0.9, cls.window_test_start_date),
(20, 221 * 0.8 * 0.9, pd.Timestamp("2015-01-17")),
(40, 240 * 13 * 14, pd.Timestamp("2015-01-15")),
(50, 250.0, pd.Timestamp("2015-01-12")),
],
end_date,
)
for end_date in pd.date_range("2015-01-30", "2015-02-05")
]
),
pd.concat(
[
cls.create_expected_df_for_factor_compute(
[
(0, 200 * 5 * 6 * 7, pd.Timestamp("2015-01-12")),
(10, np.NaN, cls.window_test_start_date),
(20, 220 * 0.7 * 0.8 * 0.9, cls.window_test_start_date),
(20, 221 * 0.8 * 0.9, pd.Timestamp("2015-01-17")),
(40, 240 * 13 * 14, pd.Timestamp("2015-01-15")),
(50, 250.0, pd.Timestamp("2015-01-12")),
],
end_date,
)
for end_date in pd.date_range("2015-02-06", "2015-02-09")
]
),
cls.create_expected_df_for_factor_compute(
[
(0, 200 * 5 * 6 * 7, pd.Timestamp("2015-01-12")),
(0, 201, pd.Timestamp("2015-02-10")),
(10, np.NaN, cls.window_test_start_date),
(20, 220 * 0.7 * 0.8 * 0.9, cls.window_test_start_date),
(20, 221 * 0.8 * 0.9, pd.Timestamp("2015-01-17")),
(40, 240 * 13 * 14, pd.Timestamp("2015-01-15")),
(50, 250.0, pd.Timestamp("2015-01-12")),
],
pd.Timestamp("2015-02-10"),
),
]
)
twoq_next = pd.concat(
[
cls.create_expected_df_for_factor_compute(
[
(0, np.NaN, cls.window_test_start_date),
(10, np.NaN, cls.window_test_start_date),
(20, 220 * 5 / 3, cls.window_test_start_date),
(30, 230 * 1 / 10, cls.window_test_start_date),
(40, np.NaN, cls.window_test_start_date),
(50, np.NaN, cls.window_test_start_date),
],
pd.Timestamp("2015-01-09"),
)
]
+ [
cls.create_expected_df_for_factor_compute(
[
(0, 200 * 1 / 4, pd.Timestamp("2015-01-12")),
(10, np.NaN, cls.window_test_start_date),
(20, 220 * 5 / 3, cls.window_test_start_date),
(30, np.NaN, cls.window_test_start_date),
(40, np.NaN, cls.window_test_start_date),
],
pd.Timestamp("2015-01-12"),
)
]
+ [
cls.create_expected_df_for_factor_compute(
[
(0, 200, pd.Timestamp("2015-01-12")),
(10, np.NaN, cls.window_test_start_date),
(20, 220, cls.window_test_start_date),
(30, np.NaN, cls.window_test_start_date),
(40, np.NaN, cls.window_test_start_date),
],
end_date,
)
for end_date in pd.date_range("2015-01-13", "2015-01-14")
]
+ [
cls.create_expected_df_for_factor_compute(
[
(0, 200 * 5, pd.Timestamp("2015-01-12")),
(10, np.NaN, cls.window_test_start_date),
(20, 220 * 0.7, cls.window_test_start_date),
(30, np.NaN, cls.window_test_start_date),
(40, np.NaN, cls.window_test_start_date),
],
end_date,
)
for end_date in pd.date_range("2015-01-15", "2015-01-16")
]
+ [
cls.create_expected_df_for_factor_compute(
[
(0, 200 * 5 * 6, pd.Timestamp("2015-01-12")),
(10, np.NaN, cls.window_test_start_date),
(20, 220 * 0.7 * 0.8, cls.window_test_start_date),
(20, 221 * 0.8, pd.Timestamp("2015-01-17")),
(30, np.NaN, cls.window_test_start_date),
(40, np.NaN, cls.window_test_start_date),
],
pd.Timestamp("2015-01-20"),
)
]
+ [
cls.create_expected_df_for_factor_compute(
[
(0, np.NaN, cls.window_test_start_date),
(10, np.NaN, cls.window_test_start_date),
(20, np.NaN, cls.window_test_start_date),
(30, np.NaN, cls.window_test_start_date),
(40, np.NaN, cls.window_test_start_date),
],
end_date,
)
for end_date in pd.date_range("2015-01-21", "2015-02-10")
]
)
return {1: oneq_next, 2: twoq_next}
class WithSplitAdjustedMultipleEstimateColumns(WithEstimates):
"""
ZiplineTestCase mixin for having multiple estimate columns that are
split-adjusted to make sure that adjustments are applied correctly.
Attributes
----------
test_start_date : pd.Timestamp
The start date of the test.
test_end_date : pd.Timestamp
The start date of the test.
split_adjusted_asof : pd.Timestamp
The split-adjusted-asof-date of the data used in the test, to be used
to create all loaders of test classes that subclass this mixin.
Methods
-------
make_expected_timelines_1q_out -> dict[pd.Timestamp -> dict[str ->
np.array]]
The expected array of results for each date of the date range for
each column. Only for 1 quarter out.
make_expected_timelines_2q_out -> dict[pd.Timestamp -> dict[str ->
np.array]]
The expected array of results for each date of the date range. For 2
quarters out, so only for the column that is requested to be loaded
with 2 quarters out.
Tests
-----
test_adjustments_with_multiple_adjusted_columns
Tests that if you have multiple columns, we still split-adjust
correctly.
test_multiple_datasets_different_num_announcements
Tests that if you have multiple datasets that ask for a different
number of quarters out, and each asks for a different estimates column,
we still split-adjust correctly.
"""
END_DATE = pd.Timestamp("2015-02-10", tz="utc")
test_start_date = pd.Timestamp("2015-01-06", tz="utc")
test_end_date = pd.Timestamp("2015-01-12", tz="utc")
split_adjusted_asof = pd.Timestamp("2015-01-08")
@classmethod
def make_columns(cls):
return {
MultipleColumnsEstimates.event_date: "event_date",
MultipleColumnsEstimates.fiscal_quarter: "fiscal_quarter",
MultipleColumnsEstimates.fiscal_year: "fiscal_year",
MultipleColumnsEstimates.estimate1: "estimate1",
MultipleColumnsEstimates.estimate2: "estimate2",
}
@classmethod
def make_events(cls):
sid_0_events = pd.DataFrame(
{
# We only want a stale KD here so that adjustments
# will be applied.
TS_FIELD_NAME: [pd.Timestamp("2015-01-05"), pd.Timestamp("2015-01-05")],
EVENT_DATE_FIELD_NAME: [
pd.Timestamp("2015-01-09"),
pd.Timestamp("2015-01-12"),
],
"estimate1": [1100.0, 1200.0],
"estimate2": [2100.0, 2200.0],
FISCAL_QUARTER_FIELD_NAME: [1, 2],
FISCAL_YEAR_FIELD_NAME: 2015,
SID_FIELD_NAME: 0,
}
)
# This is just an extra sid to make sure that we apply adjustments
# correctly for multiple columns when we have multiple sids.
sid_1_events = pd.DataFrame(
{
# We only want a stale KD here so that adjustments
# will be applied.
TS_FIELD_NAME: [pd.Timestamp("2015-01-05"), pd.Timestamp("2015-01-05")],
EVENT_DATE_FIELD_NAME: [
| pd.Timestamp("2015-01-08") | pandas.Timestamp |
#!/usr/bin/env python3
import json
import math
import sys
import glob
import argparse
import os
from collections import namedtuple, defaultdict
import seaborn as sns
import matplotlib.pyplot as plt
import matplotlib.patches as mpatches
from matplotlib.lines import Line2D
from matplotlib.ticker import MaxNLocator
import pandas
RunConfig = namedtuple("RunConfig", "scheduler fec")
RunInfo = namedtuple("RunInfo", "count total durations interrupted_segments interrupt_times bitrates segment_bitrates segment_download_times segment_filenames initial_buffering")
PALETTE_5 = sns.color_palette("muted")
PALETTE_9 = sns.color_palette("muted")
PALETTE_9[4:9] = PALETTE_9[:5]
class FIGSIZE():
BOX_M = (5, 5)
WIDE_M = (12, 5)
WIDE_L = (15, 8)
def get_mean(l):
return sum(l) / len(l)
def get_stddev(l):
mean = get_mean(l)
return math.sqrt(sum([(x - mean)**2 for x in l]) / (len(l) - 1))
def get_median(l):
return sorted(l)[len(l) // 2]
def get_z_score(x, mean, stddev):
return abs((x - mean) / stddev)
def z_filter(l, cutoff = 2.5):
mean = get_mean(l)
stddev = get_stddev(l)
return list(filter(lambda x: get_z_score(x, mean, stddev) < cutoff, l))
def fixname(name):
name = name[:3].replace("IOD", "R-IOD") + name[3:]
name = name.replace("XOR4-1", "XOR 4")
name = name.replace("XOR16-1", "XOR 16")
return name.replace("LL", "LowRTT")
def get_population_stats(p):
return ", ".join([
f"mean: {round(get_mean(p), 2)}",
f"median: {round(get_median(p), 2)}",
f"stddev: {round(get_stddev(p), 2)}",
f"min: {round(min(p), 2)}",
f"max: {round(max(p), 2)}",
f"sum: {round(sum(p), 2)}",
])
def read_log(filename, slow_start_duration = 15):
with open(filename, 'rb') as fo:
log = json.load(fo)
conf = RunConfig(log['scheduler'], log['fecConfig'])
total = 0.0
start_time = log['playback_info']['start_time']
initial_buffering = float(log['playback_info']['initial_buffering_duration'])
count = 0
durations = []
interrupted_segments = []
interrupt_times = []
for event in log['playback_info']['interruptions']['events']:
seg_no = event['segment_number']
start = event['timeframe'][0]
end = event['timeframe'][1]
duration = end - start
if start < start_time + slow_start_duration:
# ignore first few seconds of stream
continue
# some interruptions are really short, ignore?
if duration < 1e-4:
continue
# some, on the other hand, are unrealistically long. this points
# towards a crash in the server and can be ignored
if duration > 10:
continue
count += 1
durations.append(duration)
total += duration
interrupted_segments.append(seg_no)
interrupt_times.append({
"start": start - start_time,
"end": end - start_time,
"duration": duration,
})
segment_filenames = [x[0] for x in log['segment_info']]
segment_bitrates = [int(x[1]) for x in log['segment_info']]
segment_download_times = [float(x[3]) for x in log['segment_info']]
bitrates = set(segment_bitrates)
return conf, RunInfo(count, total, durations, interrupted_segments,
interrupt_times, bitrates, segment_bitrates,
segment_download_times, segment_filenames, initial_buffering)
def print_stats(allInfos):
for conf, infos in allInfos.items():
print(f"=== {conf.scheduler}, {conf.fec} ===")
print("> population size")
print(f" {len(infos)}")
print("> count")
counts = [x.count for x in infos]
print(f" {get_population_stats(counts)}")
print("> total")
totals = [x.total for x in infos]
print(f" {get_population_stats(totals)}")
print("> bitrates")
bitrates = []
for info in infos:
bitrates += info.segment_bitrates
print(f" {get_population_stats(bitrates)}")
print("> bitrate switching (up)")
bitrate_up = []
for info in infos:
count = 0
for prev, current in zip(info.segment_bitrates[:-1], info.segment_bitrates[1:]):
if prev < current:
count += 1
bitrate_up.append(count)
print(f" {get_population_stats(bitrate_up)}")
print("> bitrate switching (down)")
bitrate_down = []
for info in infos:
count = 0
for prev, current in zip(info.segment_bitrates[:-1], info.segment_bitrates[1:]):
if prev > current:
count += 1
bitrate_down.append(count)
print(f" {get_population_stats(bitrate_down)}")
def visualize_boxplot(allInfos):
plt.figure(figsize=FIGSIZE.WIDE_M)
sns.set(style="whitegrid", palette=PALETTE_9)
data = {}
for conf, infos in allInfos.items():
key = fixname(f"{conf.scheduler}\n{conf.fec}".upper())
data[key] = z_filter([x.count for x in infos])
# fill missing recordings with NaNs
maxlen = max([len(data[k]) for k in data.keys()])
for k, v in data.items():
data[k] = v + [float('nan')] * (maxlen - (len(v)))
df = pandas.DataFrame.from_dict(data)
ax = sns.boxplot(palette=PALETTE_9, data=df)
#sns.swarmplot(size=2, color="0.3", linewidth=0, data=df)
ax.set(xlabel='', ylabel='# Interruptions')
ax.set(ylim=(0, None))
ax.yaxis.set_major_locator(MaxNLocator(integer=True))
plt.savefig("vis-boxplot." + FORMAT)
def visualize_boxplot_split(allInfos):
data_a = {}
data_b = {}
for conf, infos in allInfos.items():
key = fixname(f"{conf.scheduler}\n{conf.fec}".upper())
data_a[key] = []
data_b[key] = []
for info in infos:
count_a = 0
count_b = 0
for interrupt_time in info.interrupt_times:
if interrupt_time["start"] < 100:
count_a+=1
else:
count_b+=1
data_a[key].append(count_a)
data_b[key].append(count_b)
# fill missing recordings with NaNs
maxlen_a = max([len(data_a[k]) for k in data_a.keys()])
maxlen_b = max([len(data_b[k]) for k in data_b.keys()])
for k, v in data_a.items():
data_a[k] = v + [float('nan')] * (maxlen_a - (len(v)))
for k, v in data_b.items():
data_b[k] = v + [float('nan')] * (maxlen_b - (len(v)))
# draw A
plt.figure(figsize=FIGSIZE.WIDE_M)
sns.set(style="whitegrid", palette=PALETTE_9)
df = pandas.DataFrame.from_dict(data_a)
ax = sns.boxplot(palette=PALETTE_9, data=df)
#sns.swarmplot(size=2, color="0.3", linewidth=0, data=df)
ax.set(xlabel='', ylabel='# Interruptions')
ax.yaxis.set_major_locator(MaxNLocator(integer=True))
ax.set(ylim=(0, None))
plt.savefig("vis-boxplot-split-a." + FORMAT)
# draw B
plt.figure(figsize=FIGSIZE.WIDE_M)
sns.set(style="whitegrid", palette=PALETTE_9)
df = pandas.DataFrame.from_dict(data_b)
ax = sns.boxplot(palette=PALETTE_9, data=df)
#sns.swarmplot(size=2, color="0.3", linewidth=0, data=df)
ax.set(xlabel='', ylabel='# Interruptions')
ax.yaxis.set_major_locator(MaxNLocator(integer=True))
ax.set(ylim=(0, None))
plt.savefig("vis-boxplot-split-b." + FORMAT)
def visualize_distplot_interrupts(allInfos):
plt.figure(figsize=(6,5))
sns.set(style="whitegrid", palette=PALETTE_9)
data = {
"config": [],
"interrupted_segments": [],
}
configs = set()
segments_count = 0
for conf, infos in allInfos.items():
key = fixname(f"{conf.scheduler} - {conf.fec}".upper())
configs.add(key)
for info in infos:
data["config"].extend([key]*len(info.interrupted_segments))
data["interrupted_segments"].extend(info.interrupted_segments)
segments_count = max(segments_count, len(info.segment_bitrates))
# fill missing recordings with NaNs
maxlen = max([len(data[k]) for k in data.keys()])
for k, v in data.items():
data[k] = v + [float('nan')] * (maxlen - (len(v)))
df = pandas.DataFrame.from_dict(data)
ax = plt.gca()
pal = sns.cubehelix_palette(10, rot=-.25, light=.7)
g = sns.FacetGrid(df, row="config", hue="config", aspect=10, height=1, palette=pal)
g.map(sns.kdeplot, "interrupted_segments", clip_on=False, shade=True, alpha=1, lw=1.5, bw=.2, clip=(0, segments_count))
##g.map(plt.axhline, y=0, lw=2, clip_on=False)
# Set the subplots to overlap
#g.fig.subplots_adjust(hspace=-.25)
#g.set_titles("")
g.set(yticks=[], xlabel='Segments')
g.despine(bottom=True, left=True, right=True)
plt.savefig("vis-dist-interrupts." + FORMAT)
def visualize_distplot_interrupts_cumulative(allInfos):
plt.figure(figsize=(10,6))
sns.set(style="ticks", palette=PALETTE_5)
data = {}
configs = set()
for conf, infos in allInfos.items():
key = fixname(f"{conf.scheduler} - {conf.fec}".upper())
configs.add(key)
data[key] = [x.count for x in infos]
# fill missing recordings with NaNs
maxlen = max([len(data[k]) for k in data.keys()])
for k, v in data.items():
data[k] = v + [float('nan')] * (maxlen - (len(v)))
kwargs = {"cumulative": True}
patches = []
for i, config in enumerate(configs):
ax = sns.distplot(data[config], hist=False, kde_kws=kwargs)
patches.append(mpatches.Patch(
color=sns.color_palette()[i],
label=config
))
ax.set(xlabel='# Interruptions', ylabel='')
plt.legend(handles=patches)
plt.savefig("vis-dist-amount-cumulative." + FORMAT)
def visualize_boxplot_accumulated(allInfos, split=False):
plt.figure(figsize=FIGSIZE.WIDE_M)
sns.set(style="whitegrid", palette=PALETTE_9)
data = {}
for conf, infos in allInfos.items():
key = fixname(f"{conf.scheduler}\n{conf.fec}".upper())
data[key] = [x.total for x in infos]
# fill missing recordings with NaNs
maxlen = max([len(data[k]) for k in data.keys()])
for k, v in data.items():
data[k] = v + [float('nan')] * (maxlen - (len(v)))
df = pandas.DataFrame.from_dict(data)
ax = sns.boxplot(palette=PALETTE_9, data=df)
#sns.swarmplot(size=2, color="0.3", linewidth=0, data=df)
ax.set(xlabel='', ylabel='Accumulated Interruption Duration (s)')
ax.set(ylim=(0, None))
plt.savefig("vis-boxplot2." + FORMAT)
def visualize_boxplot_mean(allInfos, split=False):
plt.figure(figsize=FIGSIZE.WIDE_M)
sns.set(style="whitegrid", palette="pastel")
data = {}
for conf, infos in allInfos.items():
key = fixname(f"{conf.scheduler}\n{conf.fec}".upper())
data[key] = []
for x in infos:
data[key] += x.durations
data[key] = z_filter(data[key])
# fill missing recordings with NaNs
maxlen = max([len(data[k]) for k in data.keys()])
for k, v in data.items():
data[k] = v + [float('nan')] * (maxlen - (len(v)))
df = pandas.DataFrame.from_dict(data)
ax = sns.boxplot(palette=PALETTE_9, data=df, showfliers=False)
#sns.swarmplot(size=2, color="0.3", linewidth=0, data=df)
ax.set(xlabel='', ylabel='Interruption Duration (s)')
ax.set(ylim=(0, None))
plt.savefig("vis-boxplot3." + FORMAT)
def visualize_boxplot_mean_split(allInfos, split=False):
data_a = {}
data_b = {}
for conf, infos in allInfos.items():
key = fixname(f"{conf.scheduler}\n{conf.fec}".upper())
data_a[key] = []
data_b[key] = []
for info in infos:
durations_a = []
durations_b = []
for interrupt_time in info.interrupt_times:
if interrupt_time["start"] < 100:
durations_a.append(interrupt_time["duration"])
else:
durations_b.append(interrupt_time["duration"])
data_a[key].extend(durations_a)
data_b[key].extend(durations_b)
# fill missing recordings with NaNs
maxlen_a = max([len(data_a[k]) for k in data_a.keys()])
maxlen_b = max([len(data_b[k]) for k in data_b.keys()])
for k, v in data_a.items():
data_a[k] = v + [float('nan')] * (maxlen_a - (len(v)))
for k, v in data_b.items():
data_b[k] = v + [float('nan')] * (maxlen_b - (len(v)))
# draw A
plt.figure(figsize=FIGSIZE.WIDE_M)
sns.set(style="whitegrid", palette=PALETTE_9)
df = pandas.DataFrame.from_dict(data_a)
ax = sns.boxplot(palette=PALETTE_9, data=df)
#sns.swarmplot(size=2, color="0.3", linewidth=0, data=df)
ax.set(xlabel='', ylabel='# Interruptions')
ax.yaxis.set_major_locator(MaxNLocator(integer=True))
plt.savefig("vis-boxplot3-split-a." + FORMAT)
# draw B
plt.figure(figsize=FIGSIZE.WIDE_M)
sns.set(style="whitegrid", palette=PALETTE_9)
df = pandas.DataFrame.from_dict(data_b)
ax = sns.boxplot(palette=PALETTE_9, data=df)
#sns.swarmplot(size=2, color="0.3", linewidth=0, data=df)
ax.set(xlabel='', ylabel='# Interruptions')
ax.yaxis.set_major_locator(MaxNLocator(integer=True))
plt.savefig("vis-boxplot3-split-b." + FORMAT)
def visualize_distplot_duration(allInfos):
plt.figure(figsize=(10,10))
sns.set(style="ticks", palette="pastel")
data = {
"config": [],
"duration": [],
}
configs = set()
for conf, infos in allInfos.items():
key = fixname(f"{conf.scheduler} - {conf.fec}".upper())
configs.add(key)
for info in infos:
for duration in info.durations:
data["config"].append(key)
data["duration"].append(duration)
# fill missing recordings with NaNs
maxlen = max([len(data[k]) for k in data.keys()])
for k, v in data.items():
data[k] = v + [float('nan')] * (maxlen - (len(v)))
df = pandas.DataFrame.from_dict(data)
ax = plt.gca()
pal = sns.cubehelix_palette(10, rot=-.25, light=.7)
g = sns.FacetGrid(df, row="config", hue="config", aspect=10, height=2, palette=pal)
g.map(sns.kdeplot, "duration", clip_on=False, shade=True, alpha=1, lw=1.5, bw=.2, clip=(0, 0.2))
g.map(sns.kdeplot, "duration", clip_on=False, color="w", lw=2, bw=.2, clip=(0, 0.2))
g.map(plt.axhline, y=0, lw=2, clip_on=False)
# Set the subplots to overlap
#g.fig.subplots_adjust(hspace=-.25)
g.despine(bottom=True, left=True)
ax.set(xlabel='', ylabel='Interruption Duration (s)')
ax.set(ylim=(0, None))
plt.savefig("vis-dist-duration." + FORMAT)
def visualize_boxplot_initial_buffering(allInfos):
plt.figure(figsize=FIGSIZE.WIDE_M)
sns.set(style="whitegrid", palette=PALETTE_9)
data = {}
for conf, infos in allInfos.items():
key = fixname(f"{conf.scheduler}\n{conf.fec}".upper())
data[key] = []
for x in infos:
data[key].append(x.initial_buffering)
data[key] = z_filter(data[key])
# fill missing recordings with NaNs
maxlen = max([len(data[k]) for k in data.keys()])
for k, v in data.items():
data[k] = v + [float('nan')] * (maxlen - (len(v)))
df = pandas.DataFrame.from_dict(data)
ax = sns.boxplot(palette=PALETTE_9, data=df)
#sns.swarmplot(size=2, color="0.3", linewidth=0, data=df)
ax.set(xlabel='', ylabel='Initial Buffering Delay (s)')
plt.savefig("vis-boxplot4." + FORMAT)
def visualize_boxplot_segments_until_highest_bitrate(allInfos):
plt.figure(figsize=FIGSIZE.WIDE_M)
sns.set(style="ticks", palette="pastel")
data = {}
for conf, infos in allInfos.items():
key = fixname(f"{conf.scheduler}\n{conf.fec}".upper())
data[key] = []
for x in infos:
segments = 0
for bitrate in x.segment_bitrates:
if bitrate < 10000000:
segments += 1
else:
break
data[key].append(segments)
df = pandas.DataFrame.from_dict(data)
ax = sns.violinplot(palette=PALETTE_9, data=df, cut=0)
#sns.swarmplot(size=2, color="0.3", linewidth=0, data=df)
ax.set(xlabel='', ylabel='# Segments until highest bitrate')
plt.savefig("vis-boxplot5." + FORMAT)
def visualize_timeseries(allInfos):
plt.figure(figsize=FIGSIZE.WIDE_M)
sns.set(style="ticks", palette="pastel")
data = {
"segment": [],
"accumulated": [],
"config": [],
}
max_segment_number = 0
for conf, infos in allInfos.items():
key = fixname(f"{conf.scheduler} - {conf.fec}".upper())
run_max_seg_no = 0
for info in infos:
if len(info.interrupted_segments) > 0 and max(info.interrupted_segments) > run_max_seg_no:
run_max_seg_no = max(info.interrupted_segments)
if run_max_seg_no > max_segment_number:
max_segment_number = run_max_seg_no
for run in infos:
accum = 0
for segment in range(run_max_seg_no + 1):
if segment in run.interrupted_segments:
accum += 1
data["segment"].append(segment)
data["accumulated"].append(accum)
data["config"].append(key)
df = pandas.DataFrame.from_dict(data)
ax = sns.lineplot(data=df, x="segment", y="accumulated", hue="config")
ax.set(xlabel='Segment Number', ylabel='Accumulated Interruptions')
#plt.yticks(range(1, max_interruptions + 1))
plt.savefig("vis-interrupt-distribution." + FORMAT)
def visualize_timeseries_duration(allInfos):
plt.figure(figsize=FIGSIZE.WIDE_M)
sns.set(style="ticks", palette="pastel")
data = {
"segment": [],
"accumulated": [],
"config": [],
}
max_segment_number = 0
for conf, infos in allInfos.items():
key = fixname(f"{conf.scheduler} - {conf.fec}".upper())
run_max_seg_no = 0
for info in infos:
if len(info.interrupted_segments) > 0 and max(info.interrupted_segments) > run_max_seg_no:
run_max_seg_no = max(info.interrupted_segments)
if run_max_seg_no > max_segment_number:
max_segment_number = run_max_seg_no
for run in infos:
accum = 0
durations = run.durations
for segment in range(run_max_seg_no + 1):
if segment in run.interrupted_segments:
accum += durations[0]
durations.pop(0)
data["segment"].append(segment)
data["accumulated"].append(accum)
data["config"].append(key)
df = | pandas.DataFrame.from_dict(data) | pandas.DataFrame.from_dict |
# -*- coding: utf-8 -*-
"""Untitled0.ipynb
Automatically generated by Colaboratory.
Original file is located at
https://colab.research.google.com/drive/1uPsIhY5eetnUG-xeLtHmKvq5K0mIr6wW
"""
import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
import keras
import tensorflow as tf
dataset = pd.read_csv('Churn_Modelling.csv')
X = dataset.iloc[:, 3:13]
y = dataset.iloc[:, 13]
geography = pd.get_dummies(X['Geography'],drop_first=True)
gender = | pd.get_dummies(X['Gender'],drop_first=True) | pandas.get_dummies |
from ast import literal_eval
import numpy as np
import pandas as pd
import scipy
from pandas import DataFrame
from sklearn.decomposition import TruncatedSVD
from sklearn.feature_extraction.text import TfidfTransformer
from sklearn.neighbors import BallTree, KDTree, NearestNeighbors
from sklearn.preprocessing import MultiLabelBinarizer, Normalizer
from tqdm import tqdm
def parse_json(filename_python_json: str, read_max: int = -1) -> DataFrame:
"""Parses json file into a DataFrame
Args:
filename_python_json (str): Path to json file
read_max (int, optional): Max amount of lines to read from json file. Defaults to -1.
Returns:
DataFrame: DataFrame from parsed json
"""
with open(filename_python_json, "r", encoding="utf-8") as f:
# parse json
parse_data = []
# tqdm is for showing progress bar, always good when processing large amounts of data
for line in tqdm(f):
# load python nested datastructure
parsed_result = literal_eval(line)
parse_data.append(parsed_result)
if read_max != -1 and len(parse_data) > read_max:
print(f"Break reading after {read_max} records")
break
print(f"Reading {len(parse_data)} rows.")
# create dataframe
df = DataFrame.from_dict(parse_data)
return df
# TODO: use seed for SVD
class ContentBasedRec(object):
def __init__(self, items_path: str, sparse: bool = True, distance_metric='minkowski', dim_red=None, tfidf='default', use_feedback=True, normalize=False) -> None:
"""Content based recommender
Args:
items_path (str): Path to json file containing the items
sparse (bool, optional): If recommender uses a sparse representation. Defaults to True.
distance_metric (str, optional): Which distance metric to use. Defaults to 'minkowski'.
dim_red ([type], optional): Which dimensionality reduction to use. Defaults to None.
tfidf (str, optional): Which tf-idf method to use. Defaults to 'default'.
use_feedback (bool, optional): Consider positive/negative reviews. Defaults to True.
"""
super().__init__()
self.sparse = sparse
self.dim_red = dim_red
self.use_feedback = use_feedback
self.normalize = normalize
self.items = self._generate_item_features(parse_json(items_path))
self.recommendations = None
self.normalizer = Normalizer(copy=False)
# Select tf-idf method to use
self.tfidf = None
if tfidf == 'default':
self.tfidf = TfidfTransformer(smooth_idf=False, sublinear_tf=False)
elif tfidf == 'smooth':
self.tfidf = TfidfTransformer(smooth_idf=True, sublinear_tf=False)
elif tfidf == 'sublinear':
self.tfidf = TfidfTransformer(smooth_idf=False, sublinear_tf=True)
elif tfidf == 'smooth_sublinear':
self.tfidf = TfidfTransformer(smooth_idf=True, sublinear_tf=True)
# Select algorithm to use for neighbour computation
algorithm = 'auto'
if distance_metric in BallTree.valid_metrics:
algorithm = 'ball_tree'
elif distance_metric in KDTree.valid_metrics:
algorithm = 'kd_tree'
self.method = NearestNeighbors(n_neighbors=10, algorithm=algorithm, metric=distance_metric)
def _generate_item_features(self, items: DataFrame) -> DataFrame:
"""Generates feature vector of items and appends to returned DataFrame
Args:
items (DataFrame): dataframe containing the items
Returns:
DataFrame: dataframe with feature vector appended
"""
items.drop(["publisher", "app_name", "title", "url", "release_date", "discount_price", "reviews_url",
"price", "early_access", "developer", "sentiment", "metascore", "specs"], axis=1, inplace=True)
items.dropna(subset=["id"], inplace=True)
items = items.reset_index(drop=True)
# Combine genres, tags and specs into one column
items["genres"] = items["genres"].fillna("").apply(set)
items["tags"] = items["tags"].fillna("").apply(set)
items["tags"] = items.apply(lambda x: list(
set.union(x["genres"], x["tags"])), axis=1)
items = items.drop(["genres"], axis=1)
# Compute one-hot encoded vector of tags
mlb = MultiLabelBinarizer(sparse_output=self.sparse)
if self.sparse:
items = items.join(DataFrame.sparse.from_spmatrix(mlb.fit_transform(items.pop(
"tags")), index=items.index, columns=["tag_" + c for c in mlb.classes_]))
else:
items = items.join(DataFrame(mlb.fit_transform(items.pop(
"tags")), index=items.index, columns=["tag_" + c for c in mlb.classes_]))
return items
def generate_recommendations(self, data_path: str, amount=10, read_max=None) -> None:
"""Generate recommendations based on user review data
Args:
data_path (str): User review data
amount (int, optional): Amount of times to recommend. Defaults to 10.
read_max (int, optional): Max amount of users to read. Defaults to None.
"""
items = self.items
df = parse_json(data_path) if read_max is None else parse_json(data_path, read_max=read_max)
df.drop(df[~df["reviews"].astype(bool)].index,inplace=True) # filter out empty reviews
# Process reviews
df = df.explode("reviews", ignore_index=True)
df = pd.concat([df.drop(["reviews", "user_url"], axis=1), pd.json_normalize(df.reviews)],
axis=1).drop(["funny", "helpful", "posted", "last_edited", "review"], axis=1)
df = df.groupby("user_id").agg(list).reset_index()
# Drop id so only feature vector is left
if self.sparse:
X = scipy.sparse.csr_matrix(items.drop(["id"], axis=1).values)
else:
X = np.array(items.drop(["id"], axis=1).values)
if self.tfidf:
# Use tf-idf
X = self.tfidf.fit_transform(X)
if self.dim_red:
# Use dimensionality reduction
X = self.dim_red.fit_transform(X)
if self.normalize:
X = self.normalizer.fit_transform(X)
# Combine transformed feature vector back into items
if self.sparse:
items = pd.concat([items["id"], DataFrame.sparse.from_spmatrix(X)], axis=1)
else:
items = pd.concat([items["id"], | DataFrame(X) | pandas.DataFrame |
import pandas as pd
import instances.dinamizators.dinamizators as din
import math
def simplest_test():
'''
Test if the dinamizators are running
'''
df = (
pd.read_pickle('./instances/analysis/df_requests.zip')
.reset_index()
)
din.dinamize_as_berbeglia(df.pickup_location_x_coord,
df.pickup_location_y_coord,
df.delivery_location_x_coord,
df.delivery_location_y_coord,
df.pickup_upper_tw,
df.delivery_upper_tw,
df.pickup_service_time,
0.5,
60)
din.dinamize_as_pureza_laporte(df.depot_location_x,
df.depot_location_y,
df.pickup_location_x_coord,
df.pickup_location_y_coord,
df.pickup_lower_tw,
df.pickup_upper_tw,
0)
din.dinamize_as_pankratz(df.depot_location_x,
df.depot_location_y,
df.pickup_location_x_coord,
df.pickup_location_y_coord,
df.delivery_location_x_coord,
df.delivery_location_y_coord,
df.pickup_upper_tw,
df.delivery_upper_tw,
df.pickup_service_time,
0.5)
din.dinamize_as_fabri_recht(df.pickup_location_x_coord,
df.pickup_location_y_coord,
df.delivery_location_x_coord,
df.delivery_location_y_coord,
df.pickup_lower_tw,
df.delivery_upper_tw)
def test_calculate_travel_time():
pickup_location_x_coord = -1
pickup_location_y_coord = -1
delivery_location_x_coord = 1
delivery_location_y_coord = 1
expected_travel_time = math.ceil(math.sqrt(2) + math.sqrt(2))
calculated_travel_time = (
din.calculate_travel_time(
pickup_location_x_coord,
pickup_location_y_coord,
delivery_location_x_coord,
delivery_location_y_coord)
)
assert (expected_travel_time == calculated_travel_time)
def test_series_elementwise_max():
x = pd.Series([1, 2, 3])
y = pd.Series([3, 2, 1])
expected_max = pd.Series([3, 2, 3])
calculated_max = din.elementwise_max(x, y)
assert (expected_max == calculated_max).all()
def test_dataframe_elementwise_max():
x = pd.DataFrame([[1, 2, 3], [3, 2, 1]])
y = | pd.DataFrame([[3, 2, 1], [1, 2, 3]]) | pandas.DataFrame |
import os, os.path, sys
if 'OORB_DATA' not in os.environ:
os.environ['OORB_DATA'] = '/Users/mjuric/projects/lsst_ssp/oorb-lynne/data'
extra_paths = [
'/Users/mjuric/projects/lsst_ssp/oorb-lynne/python',
]
for _p in extra_paths:
if not os.path.isdir(_p):
print(f"{_p} not present. Skipping.")
continue
if _p not in sys.path:
sys.path += [ _p ]
print(f"Added {_p}")
from . import api, utils
from pyoorb import pyoorb
from astropy.time import Time
from astropy.coordinates import SkyCoord
import numpy as np
import pandas as pd
__all__ = ["Orbits", "Propagator"]
timeScales = dict( UTC=1, UT1=2, TT=3, TAI=4 )
elemType = dict( CAR=1, COM=2, KEP=3, DEL=4, EQX=5 )
inv_timeScales = dict(zip(timeScales.values(), timeScales.keys()))
inv_elemType = dict(zip(elemType.values(), elemType.keys()))
def _to_pyoorb_representation(df):
"""Convert orbital elements into the numpy fortran-format array OpenOrb requires.
The OpenOrb element format is a single array with elemenets:
0 : orbitId (cannot be a string)
1-6 : orbital elements, using radians for angles
7 : element 'type' code (1 = CAR, 2 = COM, 3 = KEP, 4 = DELauny, 5 = EQX (equinoctial))
8 : epoch
9 : timescale for epoch (1 = UTC, 2 = UT1, 3 = TT, 4 = TAI : always assumes TT)
10 : magHv
11 : g
Sets self.oorbElem, the orbit parameters in an array formatted for OpenOrb.
"""
oorbElem = np.empty( (len(df), 12), dtype=np.double, order='F')
oorbElem[:, 0] = np.arange(len(df))
if 'objId' in df:
id = df['objId'].values
else:
id = np.arange(len(df))
# Add the appropriate element and epoch types:
# Convert other elements INCLUDING converting inclination, node, argperi to RADIANS
if 'meanAnomaly' in df:
oorbElem[:, 1] = df['a']
oorbElem[:, 2] = df['e']
oorbElem[:, 3] = np.radians(df['inc'])
oorbElem[:, 4] = np.radians(df['Omega'])
oorbElem[:, 5] = np.radians(df['argPeri'])
oorbElem[:, 6] = np.radians(df['meanAnomaly'])
oorbElem[:, 7] = elemType['KEP']
elif 'argPeri' in df:
oorbElem[:, 1] = df['q']
oorbElem[:, 2] = df['e']
oorbElem[:, 3] = np.radians(df['inc'])
oorbElem[:, 4] = np.radians(df['Omega'])
oorbElem[:, 5] = np.radians(df['argPeri'])
oorbElem[:, 6] = df['tPeri']
oorbElem[:, 7] = elemType['COM']
elif 'x' in df:
oorbElem[:, 1] = df['x']
oorbElem[:, 2] = df['y']
oorbElem[:, 3] = df['z']
oorbElem[:, 4] = df['xdot']
oorbElem[:, 5] = df['ydot']
oorbElem[:, 6] = df['zdot']
oorbElem[:, 7] = elemType['CAR']
else:
raise ValueError(f'Unsupported element type %s: should be one of KEP, COM or CAR.' % elem_type)
oorbElem[:,8] = df['epoch']
oorbElem[:,9] = timeScales['TT']
if 'H' in df or 'G' in df:
oorbElem[:,10] = df['H']
oorbElem[:,11] = df['G']
else:
oorbElem[:,10] = '0'
oorbElem[:,11] = 0.15
return id, oorbElem
def _from_pyoorb_representation(id, oorbElem, element_type):
et = elemType[element_type]
# if oorbElem are not of the right element type, convert
if np.any(oorbElem[:, 7] != et):
_init_pyoorb()
oorbElem, err = pyoorb.oorb_element_transformation(in_orbits=oorbElem, in_element_type=et)
if err != 0:
raise RuntimeError('Oorb returned error code %s' % (err))
# convert timescales, if not in TT
epoch = oorbElem[:, 8]
if np.any(oorbElem[:, 9] != timeScales['TT']):
if np.any(oorbElem[:, 9] != oorbElem[0, 9]):
# this should never happen if only manipulating the states through the public interface
raise ValueError('Unsupported: mixed timescales in internal pyoorb array')
scale = inv_timeScales[oorbElem[0, 9]]
epoch = Time(epoch, format='mjd', scale=scale.lower()).tt
# convert to dataframe
df = | pd.DataFrame({'objId': id}) | pandas.DataFrame |
# -*- coding: utf-8 -*-
import click
import logging
from pathlib import Path
# from dotenv import find_dotenv, load_dotenv
import requests
from bs4 import BeautifulSoup
import numpy as np
import pandas as pd
import datetime
import yfinance as yf
from pandas_datareader import data as pdr
from flask import current_app
from stk_predictor.extensions import db
@click.command()
@click.argument('input_filepath', type=click.Path(exists=True))
@click.argument('output_filepath', type=click.Path())
def main(input_filepath, output_filepath):
""" Runs data processing scripts to turn raw data from (../raw) into
cleaned data ready to be analyzed (saved in ../processed).
"""
logger = logging.getLogger(__name__)
logger.info('making final data set from raw data')
def get_ticker_from_yahoo(ticker, start_date, end_date):
yf.pdr_override()
try:
new_trading_df = pdr.get_data_yahoo(
ticker, start_date, end_date, interval='1d')
new_trading_df = new_trading_df.drop(
['Open', 'High', 'Low', 'Adj Close'], axis=1)
new_trading_df = new_trading_df.dropna('index')
new_trading_df = new_trading_df.reset_index()
new_trading_df.columns = ['trading_date',
'intraday_close', 'intraday_volumes']
his_trading_df = pd.read_sql('aapl', db.engine, index_col='id')
df = pd.concat([his_trading_df, new_trading_df]
).drop_duplicates('trading_date')
df = df.sort_values(by='trading_date')
df = df.reset_index(drop=True)
if len(df) > 0:
df.to_sql("aapl", db.engine, if_exists='replace', index_label='id')
return df
else:
# t = pd.read_sql('aapl', db.engine, index_col='id')
return None
except Exception as ex:
raise RuntimeError(
"Catch Excetion when retrieve data from Yahoo...", ex)
return None
def get_news_from_finviz(ticker):
"""Request news headline from finviz, according to
company ticker's name
Parameters
-----------
ticker: str
the stock ticker name
Return
----------
df : pd.DataFrame
return the latest 2 days news healines.
"""
current_app.logger.info("Job >> Enter Finviz news scrape step...")
base_url = 'https://finviz.com/quote.ashx?t={}'.format(ticker)
headers = {
'User-Agent': 'Mozilla/5.0 (Macintosh; Intel Mac OS X 10_11_5) \
AppleWebKit/537.36 (KHTML, like Gecko) \
Chrome/50.0.2661.102 Safari/537.36'
}
parsed_news = []
try:
res = requests.get(base_url, headers=headers)
if res.status_code == 200:
texts = res.text
soup = BeautifulSoup(texts)
news_tables = soup.find(id="news-table")
for x in news_tables.findAll('tr'):
text = x.a.get_text()
date_scrape = x.td.text.split()
if len(date_scrape) == 1:
time = date_scrape[0]
else:
date = date_scrape[0]
time = date_scrape[1]
parsed_news.append([date, time, text])
# filter the recent day news
df = pd.DataFrame(parsed_news, columns=['date', 'time', 'texts'])
df['date'] = pd.to_datetime(df.date).dt.date
one_day_period = (datetime.datetime.today() -
datetime.timedelta(days=1)).date()
df_sub = df[df.date >= one_day_period]
return df_sub
else:
raise RuntimeError("HTTP response Error {}".format(
res.status_code)) from None
except Exception as ex:
current_app.logger.info("Exception in scrape Finviz.", ex)
raise RuntimeError("Exception in scrape Finviz.") from ex
def prepare_trading_dataset(df):
"""Prepare the trading data set.
Time series analysis incoporate previous data for future prediction,
We need to retrieve historical data to generate features.
Parameters
-----------
df: DataFrame
the stock ticker trading data, including trading-date, close-price, volumes
window: int, default = 400
feature engineer windows size. Using at most 400 trading days to construct
features.
Return
----------
array_lstm : np.array
return the array with 3 dimensions shape -> [samples, 1, features]
"""
if len(df) == 0:
raise RuntimeError(
"Encounter Error in >>make_dataset.prepare_trading_dataset<<... \
Did not catch any news.") from None
else:
df['log_ret_1d'] = np.log(df['intraday_close'] / df['intraday_close'].shift(1))
df['log_ret_1w'] = pd.Series(df['log_ret_1d']).rolling(window=5).sum()
df['log_ret_2w'] = pd.Series(df['log_ret_1d']).rolling(window=10).sum()
df['log_ret_3w'] = pd.Series(df['log_ret_1d']).rolling(window=15).sum()
df['log_ret_4w'] = pd.Series(df['log_ret_1d']).rolling(window=20).sum()
df['log_ret_8w'] = pd.Series(df['log_ret_1d']).rolling(window=40).sum()
df['log_ret_12w'] = pd.Series(df['log_ret_1d']).rolling(window=60).sum()
df['log_ret_16w'] = pd.Series(df['log_ret_1d']).rolling(window=80).sum()
df['log_ret_20w'] = pd.Series(df['log_ret_1d']).rolling(window=100).sum()
df['log_ret_24w'] = pd.Series(df['log_ret_1d']).rolling(window=120).sum()
df['log_ret_28w'] = | pd.Series(df['log_ret_1d']) | pandas.Series |
import os
from datetime import date
from dask.dataframe import DataFrame as DaskDataFrame
from numpy import nan, ndarray
from numpy.testing import assert_allclose, assert_array_equal
from pandas import DataFrame, Series, Timedelta, Timestamp
from pandas.testing import assert_frame_equal, assert_series_equal
from pymove import (
DaskMoveDataFrame,
MoveDataFrame,
PandasDiscreteMoveDataFrame,
PandasMoveDataFrame,
read_csv,
)
from pymove.core.grid import Grid
from pymove.utils.constants import (
DATE,
DATETIME,
DAY,
DIST_PREV_TO_NEXT,
DIST_TO_PREV,
HOUR,
HOUR_SIN,
LATITUDE,
LOCAL_LABEL,
LONGITUDE,
PERIOD,
SITUATION,
SPEED_PREV_TO_NEXT,
TID,
TIME_PREV_TO_NEXT,
TRAJ_ID,
TYPE_DASK,
TYPE_PANDAS,
UID,
WEEK_END,
)
list_data = [
[39.984094, 116.319236, '2008-10-23 05:53:05', 1],
[39.984198, 116.319322, '2008-10-23 05:53:06', 1],
[39.984224, 116.319402, '2008-10-23 05:53:11', 2],
[39.984224, 116.319402, '2008-10-23 05:53:11', 2],
]
str_data_default = """
lat,lon,datetime,id
39.984093,116.319236,2008-10-23 05:53:05,4
39.9842,116.319322,2008-10-23 05:53:06,1
39.984222,116.319402,2008-10-23 05:53:11,2
39.984222,116.319402,2008-10-23 05:53:11,2
"""
str_data_different = """
latitude,longitude,time,traj_id
39.984093,116.319236,2008-10-23 05:53:05,4
39.9842,116.319322,2008-10-23 05:53:06,1
39.984222,116.319402,2008-10-23 05:53:11,2
39.984222,116.319402,2008-10-23 05:53:11,2
"""
str_data_missing = """
39.984093,116.319236,2008-10-23 05:53:05,4
39.9842,116.319322,2008-10-23 05:53:06,1
39.984222,116.319402,2008-10-23 05:53:11,2
39.984222,116.319402,2008-10-23 05:53:11,2
"""
def _default_move_df():
return MoveDataFrame(
data=list_data,
latitude=LATITUDE,
longitude=LONGITUDE,
datetime=DATETIME,
traj_id=TRAJ_ID,
)
def _default_pandas_df():
return DataFrame(
data=[
[39.984094, 116.319236, Timestamp('2008-10-23 05:53:05'), 1],
[39.984198, 116.319322, Timestamp('2008-10-23 05:53:06'), 1],
[39.984224, 116.319402, Timestamp('2008-10-23 05:53:11'), 2],
[39.984224, 116.319402, Timestamp('2008-10-23 05:53:11'), 2],
],
columns=['lat', 'lon', 'datetime', 'id'],
index=[0, 1, 2, 3],
)
def test_move_data_frame_from_list():
move_df = _default_move_df()
assert MoveDataFrame.has_columns(move_df)
try:
MoveDataFrame.validate_move_data_frame(move_df)
except Exception:
assert False
assert isinstance(move_df, PandasMoveDataFrame)
def test_move_data_frame_from_file(tmpdir):
d = tmpdir.mkdir('core')
file_default_columns = d.join('test_read_default.csv')
file_default_columns.write(str_data_default)
filename_default = os.path.join(
file_default_columns.dirname, file_default_columns.basename
)
move_df = read_csv(filename_default)
assert MoveDataFrame.has_columns(move_df)
try:
MoveDataFrame.validate_move_data_frame(move_df)
except Exception:
assert False
assert isinstance(move_df, PandasMoveDataFrame)
file_different_columns = d.join('test_read_different.csv')
file_different_columns.write(str_data_different)
filename_diferent = os.path.join(
file_different_columns.dirname, file_different_columns.basename
)
move_df = read_csv(
filename_diferent,
latitude='latitude',
longitude='longitude',
datetime='time',
traj_id='traj_id',
)
assert MoveDataFrame.has_columns(move_df)
try:
MoveDataFrame.validate_move_data_frame(move_df)
except Exception:
assert False
assert isinstance(move_df, PandasMoveDataFrame)
file_missing_columns = d.join('test_read_missing.csv')
file_missing_columns.write(str_data_missing)
filename_missing = os.path.join(
file_missing_columns.dirname, file_missing_columns.basename
)
move_df = read_csv(
filename_missing, names=[LATITUDE, LONGITUDE, DATETIME, TRAJ_ID]
)
assert MoveDataFrame.has_columns(move_df)
try:
MoveDataFrame.validate_move_data_frame(move_df)
except Exception:
assert False
assert isinstance(move_df, PandasMoveDataFrame)
def test_move_data_frame_from_dict():
dict_data = {
LATITUDE: [39.984198, 39.984224, 39.984094],
LONGITUDE: [116.319402, 116.319322, 116.319402],
DATETIME: [
'2008-10-23 05:53:11',
'2008-10-23 05:53:06',
'2008-10-23 05:53:06',
],
}
move_df = MoveDataFrame(
data=dict_data,
latitude=LATITUDE,
longitude=LONGITUDE,
datetime=DATETIME,
traj_id=TRAJ_ID,
)
assert MoveDataFrame.has_columns(move_df)
try:
MoveDataFrame.validate_move_data_frame(move_df)
except Exception:
assert False
assert isinstance(move_df, PandasMoveDataFrame)
def test_move_data_frame_from_data_frame():
df = _default_pandas_df()
move_df = MoveDataFrame(
data=df, latitude=LATITUDE, longitude=LONGITUDE, datetime=DATETIME
)
assert MoveDataFrame.has_columns(move_df)
try:
MoveDataFrame.validate_move_data_frame(move_df)
except Exception:
assert False
assert isinstance(move_df, PandasMoveDataFrame)
def test_attribute_error_from_data_frame():
df = DataFrame(
data=[
[39.984094, 116.319236, Timestamp('2008-10-23 05:53:05'), 1],
[39.984198, 116.319322, Timestamp('2008-10-23 05:53:06'), 1],
[39.984224, 116.319402, Timestamp('2008-10-23 05:53:11'), 2],
[39.984224, 116.319402, Timestamp('2008-10-23 05:53:11'), 2],
],
columns=['laterr', 'lon', 'datetime', 'id'],
index=[0, 1, 2, 3],
)
try:
MoveDataFrame(
data=df, latitude=LATITUDE, longitude=LONGITUDE, datetime=DATETIME
)
raise AssertionError(
'AttributeError error not raised by MoveDataFrame'
)
except KeyError:
pass
df = DataFrame(
data=[
[39.984094, 116.319236, Timestamp('2008-10-23 05:53:05'), 1],
[39.984198, 116.319322, Timestamp('2008-10-23 05:53:06'), 1],
[39.984224, 116.319402, Timestamp('2008-10-23 05:53:11'), 2],
[39.984224, 116.319402, Timestamp('2008-10-23 05:53:11'), 2],
],
columns=['lat', 'lonerr', 'datetime', 'id'],
index=[0, 1, 2, 3],
)
try:
MoveDataFrame(
data=df, latitude=LATITUDE, longitude=LONGITUDE, datetime=DATETIME
)
raise AssertionError(
'AttributeError error not raised by MoveDataFrame'
)
except KeyError:
pass
df = DataFrame(
data=[
[39.984094, 116.319236, Timestamp('2008-10-23 05:53:05'), 1],
[39.984198, 116.319322, Timestamp('2008-10-23 05:53:06'), 1],
[39.984224, 116.319402, Timestamp('2008-10-23 05:53:11'), 2],
[39.984224, 116.319402, Timestamp('2008-10-23 05:53:11'), 2],
],
columns=['lat', 'lon', 'datetimerr', 'id'],
index=[0, 1, 2, 3],
)
try:
MoveDataFrame(
data=df, latitude=LATITUDE, longitude=LONGITUDE, datetime=DATETIME
)
raise AssertionError(
'AttributeError error not raised by MoveDataFrame'
)
except KeyError:
pass
def test_lat():
move_df = _default_move_df()
lat = move_df.lat
srs = Series(
data=[39.984094, 39.984198, 39.984224, 39.984224],
index=[0, 1, 2, 3],
dtype='float64',
name='lat',
)
assert_series_equal(lat, srs)
def test_lon():
move_df = _default_move_df()
lon = move_df.lon
srs = Series(
data=[116.319236, 116.319322, 116.319402, 116.319402],
index=[0, 1, 2, 3],
dtype='float64',
name='lon',
)
assert_series_equal(lon, srs)
def test_datetime():
move_df = _default_move_df()
datetime = move_df.datetime
srs = Series(
data=[
'2008-10-23 05:53:05',
'2008-10-23 05:53:06',
'2008-10-23 05:53:11',
'2008-10-23 05:53:11',
],
index=[0, 1, 2, 3],
dtype='datetime64[ns]',
name='datetime',
)
assert_series_equal(datetime, srs)
def test_loc():
move_df = _default_move_df()
assert move_df.loc[0, TRAJ_ID] == 1
loc_ = move_df.loc[move_df[LONGITUDE] > 116.319321]
expected = DataFrame(
data=[
[39.984198, 116.319322, Timestamp('2008-10-23 05:53:06'), 1],
[39.984224, 116.319402, Timestamp('2008-10-23 05:53:11'), 2],
[39.984224, 116.319402, Timestamp('2008-10-23 05:53:11'), 2],
],
columns=['lat', 'lon', 'datetime', 'id'],
index=[1, 2, 3],
)
assert_frame_equal(loc_, expected)
def test_iloc():
move_df = _default_move_df()
expected = Series(
data=[39.984094, 116.319236, Timestamp('2008-10-23 05:53:05'), 1],
index=['lat', 'lon', 'datetime', 'id'],
dtype='object',
name=0,
)
assert_series_equal(move_df.iloc[0], expected)
def test_at():
move_df = _default_move_df()
assert move_df.at[0, TRAJ_ID] == 1
def test_values():
move_df = _default_move_df()
expected = [
[39.984094, 116.319236, Timestamp('2008-10-23 05:53:05'), 1],
[39.984198, 116.319322, Timestamp('2008-10-23 05:53:06'), 1],
[39.984224, 116.319402, Timestamp('2008-10-23 05:53:11'), 2],
[39.984224, 116.319402, Timestamp('2008-10-23 05:53:11'), 2],
]
assert_array_equal(move_df.values, expected)
def test_columns():
move_df = _default_move_df()
assert_array_equal(
move_df.columns, [LATITUDE, LONGITUDE, DATETIME, TRAJ_ID]
)
def test_index():
move_df = _default_move_df()
assert_array_equal(move_df.index, [0, 1, 2, 3])
def test_dtypes():
move_df = _default_move_df()
expected = Series(
data=['float64', 'float64', '<M8[ns]', 'int64'],
index=['lat', 'lon', 'datetime', 'id'],
dtype='object',
name=None,
)
assert_series_equal(move_df.dtypes, expected)
def test_shape():
move_df = _default_move_df()
assert move_df.shape == (4, 4)
def test_len():
move_df = _default_move_df()
assert move_df.len() == 4
def test_unique():
move_df = _default_move_df()
assert_array_equal(move_df['id'].unique(), [1, 2])
def test_head():
move_df = _default_move_df()
expected = DataFrame(
data=[
[39.984094, 116.319236, Timestamp('2008-10-23 05:53:05'), 1],
[39.984198, 116.319322, Timestamp('2008-10-23 05:53:06'), 1],
],
columns=['lat', 'lon', 'datetime', 'id'],
index=[0, 1],
)
assert_frame_equal(move_df.head(2), expected)
def test_tail():
move_df = _default_move_df()
expected = DataFrame(
data=[
[39.984224, 116.319402, Timestamp('2008-10-23 05:53:11'), 2],
[39.984224, 116.319402, Timestamp('2008-10-23 05:53:11'), 2],
],
columns=['lat', 'lon', 'datetime', 'id'],
index=[2, 3],
)
assert_frame_equal(move_df.tail(2), expected)
def test_number_users():
move_df = MoveDataFrame(
data=list_data,
latitude=LATITUDE,
longitude=LONGITUDE,
datetime=DATETIME,
traj_id=TRAJ_ID,
)
assert move_df.get_users_number() == 1
move_df[UID] = [1, 1, 2, 3]
assert move_df.get_users_number() == 3
def test_to_numpy():
move_df = MoveDataFrame(
data=list_data,
latitude=LATITUDE,
longitude=LONGITUDE,
datetime=DATETIME,
traj_id=TRAJ_ID,
)
assert isinstance(move_df.to_numpy(), ndarray)
def test_to_dict():
move_df = MoveDataFrame(
data=list_data,
latitude=LATITUDE,
longitude=LONGITUDE,
datetime=DATETIME,
traj_id=TRAJ_ID,
)
assert isinstance(move_df.to_dict(), dict)
def test_to_grid():
move_df = MoveDataFrame(
data=list_data,
latitude=LATITUDE,
longitude=LONGITUDE,
datetime=DATETIME,
traj_id=TRAJ_ID,
)
g = move_df.to_grid(8)
assert isinstance(move_df.to_grid(8), Grid)
def test_to_data_frame():
move_df = MoveDataFrame(
data=list_data,
latitude=LATITUDE,
longitude=LONGITUDE,
datetime=DATETIME,
traj_id=TRAJ_ID,
)
assert isinstance(move_df.to_data_frame(), DataFrame)
def test_to_discrete_move_df():
move_df = PandasDiscreteMoveDataFrame(
data={DATETIME: ['2020-01-01 01:08:29',
'2020-01-05 01:13:24',
'2020-01-06 02:21:53',
'2020-01-06 03:34:48',
'2020-01-08 05:55:41'],
LATITUDE: [3.754245,
3.150849,
3.754249,
3.165933,
3.920178],
LONGITUDE: [38.3456743,
38.6913486,
38.3456743,
38.2715962,
38.5161605],
TRAJ_ID: ['pwe-5089',
'xjt-1579',
'tre-1890',
'xjt-1579',
'pwe-5089'],
LOCAL_LABEL: [1, 4, 2, 16, 32]},
)
assert isinstance(
move_df.to_dicrete_move_df(), PandasDiscreteMoveDataFrame
)
def test_describe():
move_df = _default_move_df()
expected = DataFrame(
data=[
[4.0, 4.0, 4.0],
[39.984185, 116.31934049999998, 1.5],
[6.189237971348586e-05, 7.921910543639078e-05, 0.5773502691896257],
[39.984094, 116.319236, 1.0],
[39.984172, 116.3193005, 1.0],
[39.984211, 116.319362, 1.5],
[39.984224, 116.319402, 2.0],
[39.984224, 116.319402, 2.0],
],
columns=['lat', 'lon', 'id'],
index=['count', 'mean', 'std', 'min', '25%', '50%', '75%', 'max'],
)
assert_frame_equal(move_df.describe(), expected)
def test_memory_usage():
move_df = _default_move_df()
expected = Series(
data=[128, 32, 32, 32, 32],
index=['Index', 'lat', 'lon', 'datetime', 'id'],
dtype='int64',
name=None,
)
assert_series_equal(move_df.memory_usage(), expected)
def test_copy():
move_df = _default_move_df()
cp = move_df.copy()
assert_frame_equal(move_df, cp)
cp.at[0, TRAJ_ID] = 0
assert move_df.loc[0, TRAJ_ID] == 1
assert move_df.loc[0, TRAJ_ID] != cp.loc[0, TRAJ_ID]
def test_generate_tid_based_on_id_datetime():
move_df = _default_move_df()
new_move_df = move_df.generate_tid_based_on_id_datetime(inplace=False)
expected = DataFrame(
data=[
[
39.984094,
116.319236,
Timestamp('2008-10-23 05:53:05'),
1,
'12008102305',
],
[
39.984198,
116.319322,
Timestamp('2008-10-23 05:53:06'),
1,
'12008102305',
],
[
39.984224,
116.319402,
Timestamp('2008-10-23 05:53:11'),
2,
'22008102305',
],
[
39.984224,
116.319402,
Timestamp('2008-10-23 05:53:11'),
2,
'22008102305',
],
],
columns=['lat', 'lon', 'datetime', 'id', 'tid'],
index=[0, 1, 2, 3],
)
assert_frame_equal(new_move_df, expected)
assert isinstance(new_move_df, PandasMoveDataFrame)
assert TID not in move_df
move_df.generate_tid_based_on_id_datetime()
assert_frame_equal(move_df, expected)
def test_generate_date_features():
move_df = _default_move_df()
new_move_df = move_df.generate_date_features(inplace=False)
expected = DataFrame(
data=[
[
39.984094,
116.319236,
Timestamp('2008-10-23 05:53:05'),
1,
date(2008, 10, 23),
],
[
39.984198,
116.319322,
Timestamp('2008-10-23 05:53:06'),
1,
date(2008, 10, 23),
],
[
39.984224,
116.319402,
Timestamp('2008-10-23 05:53:11'),
2,
date(2008, 10, 23),
],
[
39.984224,
116.319402,
Timestamp('2008-10-23 05:53:11'),
2,
date(2008, 10, 23),
],
],
columns=['lat', 'lon', 'datetime', 'id', 'date'],
index=[0, 1, 2, 3],
)
assert_frame_equal(new_move_df, expected)
assert isinstance(new_move_df, PandasMoveDataFrame)
assert DATE not in move_df
move_df.generate_date_features()
assert_frame_equal(move_df, expected)
def test_generate_hour_features():
move_df = _default_move_df()
new_move_df = move_df.generate_hour_features(inplace=False)
expected = DataFrame(
data=[
[39.984094, 116.319236, Timestamp('2008-10-23 05:53:05'), 1, 5],
[39.984198, 116.319322, Timestamp('2008-10-23 05:53:06'), 1, 5],
[39.984224, 116.319402, Timestamp('2008-10-23 05:53:11'), 2, 5],
[39.984224, 116.319402, Timestamp('2008-10-23 05:53:11'), 2, 5],
],
columns=['lat', 'lon', 'datetime', 'id', 'hour'],
index=[0, 1, 2, 3],
)
assert_frame_equal(new_move_df, expected)
assert isinstance(new_move_df, PandasMoveDataFrame)
assert HOUR not in move_df
move_df.generate_hour_features()
assert_frame_equal(move_df, expected)
def test_generate_day_of_the_week_features():
move_df = _default_move_df()
new_move_df = move_df.generate_day_of_the_week_features(inplace=False)
expected = DataFrame(
data=[
[
39.984094,
116.319236,
Timestamp('2008-10-23 05:53:05'),
1,
'Thursday',
],
[
39.984198,
116.319322,
Timestamp('2008-10-23 05:53:06'),
1,
'Thursday',
],
[
39.984224,
116.319402,
Timestamp('2008-10-23 05:53:11'),
2,
'Thursday',
],
[
39.984224,
116.319402,
Timestamp('2008-10-23 05:53:11'),
2,
'Thursday',
],
],
columns=['lat', 'lon', 'datetime', 'id', 'day'],
index=[0, 1, 2, 3],
)
assert_frame_equal(new_move_df, expected)
assert isinstance(new_move_df, PandasMoveDataFrame)
assert DAY not in move_df
move_df.generate_day_of_the_week_features()
assert_frame_equal(move_df, expected)
def test_generate_weekend_features():
move_df = _default_move_df()
new_move_df = move_df.generate_weekend_features(inplace=False)
expected = DataFrame(
data=[
[39.984094, 116.319236, Timestamp('2008-10-23 05:53:05'), 1, 0],
[39.984198, 116.319322, Timestamp('2008-10-23 05:53:06'), 1, 0],
[39.984224, 116.319402, Timestamp('2008-10-23 05:53:11'), 2, 0],
[39.984224, 116.319402, Timestamp('2008-10-23 05:53:11'), 2, 0],
],
columns=['lat', 'lon', 'datetime', 'id', 'weekend'],
index=[0, 1, 2, 3],
)
assert_frame_equal(new_move_df, expected)
assert isinstance(new_move_df, PandasMoveDataFrame)
assert WEEK_END not in move_df
move_df.generate_weekend_features()
assert_frame_equal(move_df, expected)
def test_generate_time_of_day_features():
move_df = _default_move_df()
new_move_df = move_df.generate_time_of_day_features(inplace=False)
expected = DataFrame(
data=[
[
39.984094,
116.319236,
Timestamp('2008-10-23 05:53:05'),
1,
'Early morning',
],
[
39.984198,
116.319322,
Timestamp('2008-10-23 05:53:06'),
1,
'Early morning',
],
[
39.984224,
116.319402,
Timestamp('2008-10-23 05:53:11'),
2,
'Early morning',
],
[
39.984224,
116.319402,
Timestamp('2008-10-23 05:53:11'),
2,
'Early morning',
],
],
columns=['lat', 'lon', 'datetime', 'id', 'period'],
index=[0, 1, 2, 3],
)
assert_frame_equal(new_move_df, expected)
assert isinstance(new_move_df, PandasMoveDataFrame)
assert PERIOD not in move_df
move_df.generate_time_of_day_features()
assert_frame_equal(move_df, expected)
def test_generate_datetime_in_format_cyclical():
move_df = _default_move_df()
new_move_df = move_df.generate_datetime_in_format_cyclical(inplace=False)
expected = DataFrame(
data=[
[
39.984094,
116.319236,
Timestamp('2008-10-23 05:53:05'),
1,
0.9790840876823229,
0.20345601305263375,
],
[
39.984198,
116.319322,
Timestamp('2008-10-23 05:53:06'),
1,
0.9790840876823229,
0.20345601305263375,
],
[
39.984224,
116.319402,
Timestamp('2008-10-23 05:53:11'),
2,
0.9790840876823229,
0.20345601305263375,
],
[
39.984224,
116.319402,
Timestamp('2008-10-23 05:53:11'),
2,
0.9790840876823229,
0.20345601305263375,
],
],
columns=['lat', 'lon', 'datetime', 'id', 'hour_sin', 'hour_cos'],
index=[0, 1, 2, 3],
)
assert_frame_equal(new_move_df, expected)
assert isinstance(new_move_df, PandasMoveDataFrame)
assert HOUR_SIN not in move_df
move_df.generate_datetime_in_format_cyclical()
assert_frame_equal(move_df, expected)
def test_generate_dist_time_speed_features():
move_df = _default_move_df()
new_move_df = move_df.generate_dist_time_speed_features(inplace=False)
expected = DataFrame(
data=[
[
1,
39.984094,
116.319236,
Timestamp('2008-10-23 05:53:05'),
nan,
nan,
nan,
],
[
1,
39.984198,
116.319322,
Timestamp('2008-10-23 05:53:06'),
13.690153134343689,
1.0,
13.690153134343689,
],
[
2,
39.984224,
116.319402,
Timestamp('2008-10-23 05:53:11'),
nan,
nan,
nan,
],
[
2,
39.984224,
116.319402,
Timestamp('2008-10-23 05:53:11'),
0.0,
0.0,
nan,
],
],
columns=[
'id',
'lat',
'lon',
'datetime',
'dist_to_prev',
'time_to_prev',
'speed_to_prev',
],
index=[0, 1, 2, 3],
)
assert_frame_equal(new_move_df, expected)
assert isinstance(new_move_df, PandasMoveDataFrame)
assert DIST_TO_PREV not in move_df
move_df.generate_dist_time_speed_features()
| assert_frame_equal(move_df, expected) | pandas.testing.assert_frame_equal |
# License: Apache-2.0
from gators.feature_generation_str import StringContains
from pandas.testing import assert_frame_equal
import pytest
import numpy as np
import pandas as pd
import databricks.koalas as ks
ks.set_option('compute.default_index_type', 'distributed-sequence')
@pytest.fixture
def data():
X = pd.DataFrame(np.zeros((3, 3)), columns=list('qwe'))
X['a'] = ['0', '1Q', '1QQ']
X['s'] = ['0', 'W2', 'W2W']
X['d'] = ['0', 'Q', '']
obj = StringContains(
columns=list('asd'), contains_vec=['1', '2', '0']).fit(X)
columns_expected = [
'q', 'w', 'e', 'a', 's', 'd',
'a__contains_1', 's__contains_2', 'd__contains_0']
X_expected = pd.DataFrame(
[[0.0, 0.0, 0.0, '0', '0', '0', 0.0, 0.0, 1.0],
[0.0, 0.0, 0.0, '1Q', 'W2', 'Q', 1.0, 1.0, 0.0],
[0.0, 0.0, 0.0, '1QQ', 'W2W', '', 1.0, 1.0, 0.0]],
columns=columns_expected)
return obj, X, X_expected
@pytest.fixture
def data_ks():
X = ks.DataFrame(np.zeros((3, 3)), columns=list('qwe'))
X['a'] = ['0', '1Q', '1QQ']
X['s'] = ['0', 'W2', 'W2W']
X['d'] = ['0', 'Q', '']
obj = StringContains(
columns=list('asd'), contains_vec=['1', '2', '0']).fit(X)
columns_expected = [
'q', 'w', 'e', 'a', 's', 'd',
'a__contains_1', 's__contains_2', 'd__contains_0']
X_expected = pd.DataFrame(
[[0.0, 0.0, 0.0, '0', '0', '0', 0.0, 0.0, 1.0],
[0.0, 0.0, 0.0, '1Q', 'W2', 'Q', 1.0, 1.0, 0.0],
[0.0, 0.0, 0.0, '1QQ', 'W2W', '', 1.0, 1.0, 0.0]],
columns=columns_expected)
return obj, X, X_expected
@pytest.fixture
def data_with_names():
X = pd.DataFrame(np.zeros((3, 3)), columns=list('qwe'))
X['a'] = ['0', '1Q', '1QQ']
X['s'] = ['0', 'W2', 'W2W']
X['d'] = ['0', 'Q', '']
obj = StringContains(
columns=list('asd'),
contains_vec=['1', '2', '0'],
column_names=['a_with_1', 's_with_2', 'd_with_0']).fit(X)
columns_expected = [
'q', 'w', 'e', 'a', 's', 'd', 'a_with_1',
's_with_2', 'd_with_0']
X_expected = pd.DataFrame(
[[0.0, 0.0, 0.0, '0', '0', '0', 0.0, 0.0, 1.0],
[0.0, 0.0, 0.0, '1Q', 'W2', 'Q', 1.0, 1.0, 0.0],
[0.0, 0.0, 0.0, '1QQ', 'W2W', '', 1.0, 1.0, 0.0]],
columns=columns_expected)
return obj, X, X_expected
@pytest.fixture
def data_with_names_ks():
X = ks.DataFrame(np.zeros((3, 3)), columns=list('qwe'))
X['a'] = ['0', '1Q', '1QQ']
X['s'] = ['0', 'W2', 'W2W']
X['d'] = ['0', 'Q', '']
obj = StringContains(
columns=list('asd'),
contains_vec=['1', '2', '0'],
column_names=['a_with_1', 's_with_2', 'd_with_0']).fit(X)
columns_expected = [
'q', 'w', 'e', 'a', 's', 'd', 'a_with_1',
's_with_2', 'd_with_0']
X_expected = pd.DataFrame(
[[0.0, 0.0, 0.0, '0', '0', '0', 0.0, 0.0, 1.0],
[0.0, 0.0, 0.0, '1Q', 'W2', 'Q', 1.0, 1.0, 0.0],
[0.0, 0.0, 0.0, '1QQ', 'W2W', '', 1.0, 1.0, 0.0]],
columns=columns_expected)
return obj, X, X_expected
def test_pd(data):
obj, X, X_expected = data
X_new = obj.transform(X)
| assert_frame_equal(X_new, X_expected) | pandas.testing.assert_frame_equal |
"""Tasks to process Alpha Diversity results."""
from pandas import DataFrame
from app.extensions import celery
from app.display_modules.utils import persist_result_helper
from .models import AncestryResult
@celery.task()
def ancestry_reducer(samples):
"""Wrap collated samples as actual Result type."""
framed_samples = | DataFrame(samples) | pandas.DataFrame |
# general utilities used throughout the project
import numpy as np
import pandas as pd
import requests
import const
# convert time string to season
def to_season(time):
datetime = pd.to_datetime(time)
return (datetime.month % 12 + 3) // 3 if datetime is not np.nan else np.nan
# normalize values of data-frame to [0, 1]
def normalize(data_frame, multiplier):
return multiplier * (data_frame - data_frame.min()) / (data_frame.max() - data_frame.min())
# convert float[s] to pretty
def pretty(value, decimal):
if isinstance(value, list):
return [("%0." + str(decimal) + "f") % y for y in value]
else:
return ("%0." + str(decimal) + "f") % value
def fill(series: pd.Series, max_interval=0, inplace=False):
"""
Replace NaN values with average of nearest non NaN neighbors
:param max_interval: id number of consecutive NaN values > max_interval, they are not filled
:param series:
:param inplace:
:return:
"""
filled = series if inplace else series.copy()
region = [-1, -1] # temporary interval of NaN values
last_item = len(filled) - 1
lbound = filled.index[0] # index bounds of the series
ubound = filled.index[last_item]
for index, value in filled.iteritems():
# Keep track of current interval of NaN values
if np.isnan(value):
if region[0] == -1:
region[0] = index
region[1] = index
# Replace NaN values with their boundary average
# when a NaN interval is started, and is ending with a non-NaN value or end of list
if region[0] != -1 and (not np.isnan(value) or index == ubound):
# do not fill NaN intervals wider than max_interval
if max_interval <= 0 or region[1] - region[0] + 1 <= max_interval:
start = region[0] - lbound # offset index to 0
end = region[1] - lbound # offset index to 0
first_value = filled.values[start - 1] if region[0] > lbound else np.nan
last_value = filled.values[end + 1] if region[1] < ubound else np.nan
# Duplicate one boundary to another if one does not exist
# this happens when a series starts or ends with a NaN
first_value = last_value if np.isnan(first_value) else first_value
last_value = first_value if np.isnan(last_value) else last_value
# Set NaN interval by monotonically moving from first boundary value to last
d = end - start + 1
for i in range(0, d):
filled.values[start + i] = ((d - i) * first_value + i * last_value) / d
# Reset NaN interval indicators
region[0] = region[1] = -1
return filled
def SMAPE(forecast: pd.Series, actual: pd.Series):
"""
SMAPE error for predicted array compared to array of real values
:param forecast:
:param actual:
:return:
"""
if forecast.size != actual.size:
raise ValueError("length forecast {%s} <> {%s} actual" % (forecast.size, actual.size))
diff = np.abs(np.subtract(forecast, actual))
avg = (np.abs(actual) + np.abs(forecast)) / 2
count = np.sum(~np.isnan(actual))
return (1 / count) * np.nansum(diff / avg)
def drop_columns(df: pd.DataFrame, end_with):
"""
Drop all columns that their name ends with end_with
:param df:
:param end_with:
:return:
"""
df = df[df.columns.drop(list(df.filter(regex=end_with)))]
return df
def merge_columns(df: pd.DataFrame, main: str, auxiliary: str):
"""
Merge two columns with same prefix into one column without suffix
For example: merge name_x and name_y into name
:param df:
:param main: suffix of main columns to be kept
:param auxiliary: suffix of auxiliary columns to fill na values of corresponding main columns
:return:
"""
mains = set([name.split(main)[0] for name in list(df.filter(regex=main))])
auxiliaries = set([name.split(auxiliary)[0] for name in list(df.filter(regex=auxiliary))])
shared = list(mains.intersection(auxiliaries)) # columns shared in main and auxiliary
only_aux = list(auxiliaries.difference(mains))
# Fill nan values of main columns with auxiliary columns
main_columns = [name + main for name in shared]
aux_columns = [name + auxiliary for name in shared]
df = fillna(df, target=main_columns, source=aux_columns)
# Re-suffix auxiliary columns having no duplicate in main columns
# to keep exclusively auxiliary ones in final results
df = df.rename(columns={name + auxiliary: name + main for name in only_aux})
# Drop auxiliary columns
df = drop_columns(df=df, end_with=auxiliary)
# Remove suffix from main columns
df = df.rename(columns={col: col.split(main)[0] for col in df.columns})
return df
def fillna(df: pd.DataFrame, target, source):
"""
Fill some columns with another columns in a dataframe
:param df:
:param target: array of column names
:param source: array of column names
:return: pd.DataFrame
"""
for index, target in enumerate(target):
df[target].fillna(df[source[index]], inplace=True)
return df
def write(df: pd.DataFrame, address):
"""
Write CSV data efficiently
:param df:
:param address:
:return:
"""
df.to_csv(address, sep=';', index=False, float_format='%.1f'
, date_format=const.T_FORMAT, chunksize=100000)
def shift(l: list):
l.append(l.pop(0))
def nan_gap(values: list):
"""
Average index interval created by sequential nan values
:param values:
:return:
"""
last_index = len(values) - 1
first_nan = -1
last_nan = -1
gap_sum = 0
gap_count = 0
for index, value in enumerate(values):
if pd.isnull(value):
if first_nan == -1:
first_nan = index
last_nan = index
if first_nan != -1 and (not | pd.isnull(value) | pandas.isnull |
import itertools
import numpy as np
import pytest
import pandas as pd
from pandas.core.internals import ExtensionBlock
from .base import BaseExtensionTests
class BaseReshapingTests(BaseExtensionTests):
"""Tests for reshaping and concatenation."""
@pytest.mark.parametrize('in_frame', [True, False])
def test_concat(self, data, in_frame):
wrapped = pd.Series(data)
if in_frame:
wrapped = pd.DataFrame(wrapped)
result = pd.concat([wrapped, wrapped], ignore_index=True)
assert len(result) == len(data) * 2
if in_frame:
dtype = result.dtypes[0]
else:
dtype = result.dtype
assert dtype == data.dtype
assert isinstance(result._data.blocks[0], ExtensionBlock)
@pytest.mark.parametrize('in_frame', [True, False])
def test_concat_all_na_block(self, data_missing, in_frame):
valid_block = pd.Series(data_missing.take([1, 1]), index=[0, 1])
na_block = pd.Series(data_missing.take([0, 0]), index=[2, 3])
if in_frame:
valid_block = pd.DataFrame({"a": valid_block})
na_block = pd.DataFrame({"a": na_block})
result = pd.concat([valid_block, na_block])
if in_frame:
expected = pd.DataFrame({"a": data_missing.take([1, 1, 0, 0])})
self.assert_frame_equal(result, expected)
else:
expected = pd.Series(data_missing.take([1, 1, 0, 0]))
self.assert_series_equal(result, expected)
def test_concat_mixed_dtypes(self, data):
# https://github.com/pandas-dev/pandas/issues/20762
df1 = pd.DataFrame({'A': data[:3]})
df2 = pd.DataFrame({"A": [1, 2, 3]})
df3 = pd.DataFrame({"A": ['a', 'b', 'c']}).astype('category')
dfs = [df1, df2, df3]
# dataframes
result = pd.concat(dfs)
expected = pd.concat([x.astype(object) for x in dfs])
self.assert_frame_equal(result, expected)
# series
result = pd.concat([x['A'] for x in dfs])
expected = pd.concat([x['A'].astype(object) for x in dfs])
self.assert_series_equal(result, expected)
# simple test for just EA and one other
result = pd.concat([df1, df2])
expected = pd.concat([df1.astype('object'), df2.astype('object')])
self.assert_frame_equal(result, expected)
result = pd.concat([df1['A'], df2['A']])
expected = pd.concat([df1['A'].astype('object'),
df2['A'].astype('object')])
self.assert_series_equal(result, expected)
def test_concat_columns(self, data, na_value):
df1 = pd.DataFrame({'A': data[:3]})
df2 = pd.DataFrame({'B': [1, 2, 3]})
expected = pd.DataFrame({'A': data[:3], 'B': [1, 2, 3]})
result = pd.concat([df1, df2], axis=1)
self.assert_frame_equal(result, expected)
result = pd.concat([df1['A'], df2['B']], axis=1)
self.assert_frame_equal(result, expected)
# non-aligned
df2 = pd.DataFrame({'B': [1, 2, 3]}, index=[1, 2, 3])
expected = pd.DataFrame({
'A': data._from_sequence(list(data[:3]) + [na_value],
dtype=data.dtype),
'B': [np.nan, 1, 2, 3]})
result = pd.concat([df1, df2], axis=1)
self.assert_frame_equal(result, expected)
result = pd.concat([df1['A'], df2['B']], axis=1)
self.assert_frame_equal(result, expected)
def test_align(self, data, na_value):
a = data[:3]
b = data[2:5]
r1, r2 = pd.Series(a).align(pd.Series(b, index=[1, 2, 3]))
# Assumes that the ctor can take a list of scalars of the type
e1 = pd.Series(data._from_sequence(list(a) + [na_value],
dtype=data.dtype))
e2 = pd.Series(data._from_sequence([na_value] + list(b),
dtype=data.dtype))
self.assert_series_equal(r1, e1)
self.assert_series_equal(r2, e2)
def test_align_frame(self, data, na_value):
a = data[:3]
b = data[2:5]
r1, r2 = pd.DataFrame({'A': a}).align(
| pd.DataFrame({'A': b}, index=[1, 2, 3]) | pandas.DataFrame |
import xml.etree.ElementTree as ET
from pathlib import Path
import pandas as pd
from .utils import remove_duplicate_indices, resample_data
NAMESPACES = {
"default": "http://www.topografix.com/GPX/1/1",
"gpxtpx": "http://www.garmin.com/xmlschemas/TrackPointExtension/v1",
"gpxx": "http://www.garmin.com/xmlschemas/GpxExtensions/v3",
"xsi": "http://www.w3.org/2001/XMLSchema-instance",
}
def xml_find_value_or_none(element, match, namespaces=None):
e = element.find(match, namespaces=namespaces)
if e is None:
return e
else:
return e.text
def read_gpx(fpath, resample: bool = False, interpolate: bool = False) -> pd.DataFrame:
"""This method loads a GPX file into a Pandas DataFrame.
Columns names are translated to sweat terminology (e.g. "heart_rate" > "heartrate").
Args:
fpath: str, file-like or Path object
resample: whether or not the data frame needs to be resampled to 1Hz
interpolate: whether or not missing data in the data frame needs to be interpolated
Returns:
A pandas data frame with all the data.
"""
tree = ET.parse(Path(fpath))
root = tree.getroot()
trk = root.find("default:trk", NAMESPACES)
trkseg = trk.find("default:trkseg", NAMESPACES)
records = []
for trackpoint in trkseg.findall("default:trkpt", NAMESPACES):
latitude = trackpoint.attrib.get("lat", None)
longitude = trackpoint.attrib.get("lon", None)
elevation = xml_find_value_or_none(trackpoint, "default:ele", NAMESPACES)
datetime = xml_find_value_or_none(trackpoint, "default:time", NAMESPACES)
extensions = trackpoint.find("default:extensions", NAMESPACES)
power = xml_find_value_or_none(extensions, "default:power", NAMESPACES)
trackpoint_extension = extensions.find("gpxtpx:TrackPointExtension", NAMESPACES)
temperature = xml_find_value_or_none(
trackpoint_extension, "gpxtpx:atemp", NAMESPACES
)
heartrate = xml_find_value_or_none(
trackpoint_extension, "gpxtpx:hr", NAMESPACES
)
cadence = xml_find_value_or_none(trackpoint_extension, "gpxtpx:cad", NAMESPACES)
records.append(
dict(
latitude=pd.to_numeric(latitude),
longitude=pd.to_numeric(longitude),
elevation=pd.to_numeric(elevation),
datetime=datetime,
power=pd.to_numeric(power),
temperature= | pd.to_numeric(temperature) | pandas.to_numeric |
import numpy as np
import pandas as pd
from pandas import (
Categorical,
DataFrame,
MultiIndex,
Series,
Timestamp,
date_range,
)
from .pandas_vb_common import tm
try:
from pandas.tseries.offsets import (
Hour,
Nano,
)
except ImportError:
# For compatibility with older versions
from pandas.core.datetools import (
Hour,
Nano,
)
class FromDicts:
def setup(self):
N, K = 5000, 50
self.index = tm.makeStringIndex(N)
self.columns = tm.makeStringIndex(K)
frame = DataFrame(np.random.randn(N, K), index=self.index, columns=self.columns)
self.data = frame.to_dict()
self.dict_list = frame.to_dict(orient="records")
self.data2 = {i: {j: float(j) for j in range(100)} for i in range(2000)}
# arrays which we wont consolidate
self.dict_of_categoricals = {i: Categorical(np.arange(N)) for i in range(K)}
def time_list_of_dict(self):
DataFrame(self.dict_list)
def time_nested_dict(self):
DataFrame(self.data)
def time_nested_dict_index(self):
DataFrame(self.data, index=self.index)
def time_nested_dict_columns(self):
DataFrame(self.data, columns=self.columns)
def time_nested_dict_index_columns(self):
DataFrame(self.data, index=self.index, columns=self.columns)
def time_nested_dict_int64(self):
# nested dict, integer indexes, regression described in #621
DataFrame(self.data2)
def time_dict_of_categoricals(self):
# dict of arrays that we wont consolidate
DataFrame(self.dict_of_categoricals)
class FromSeries:
def setup(self):
mi = MultiIndex.from_product([range(100), range(100)])
self.s = Series(np.random.randn(10000), index=mi)
def time_mi_series(self):
DataFrame(self.s)
class FromDictwithTimestamp:
params = [Nano(1), Hour(1)]
param_names = ["offset"]
def setup(self, offset):
N = 10**3
idx = date_range(Timestamp("1/1/1900"), freq=offset, periods=N)
df = DataFrame(np.random.randn(N, 10), index=idx)
self.d = df.to_dict()
def time_dict_with_timestamp_offsets(self, offset):
DataFrame(self.d)
class FromRecords:
params = [None, 1000]
param_names = ["nrows"]
# Generators get exhausted on use, so run setup before every call
number = 1
repeat = (3, 250, 10)
def setup(self, nrows):
N = 100000
self.gen = ((x, (x * 20), (x * 100)) for x in range(N))
def time_frame_from_records_generator(self, nrows):
# issue-6700
self.df = DataFrame.from_records(self.gen, nrows=nrows)
class FromNDArray:
def setup(self):
N = 100000
self.data = np.random.randn(N)
def time_frame_from_ndarray(self):
self.df = | DataFrame(self.data) | pandas.DataFrame |
import pandas as pd
import matplotlib.pyplot as plt
from data import games #import games from data.py
# Select Attendance
# The games DataFrame contains the attendance for each game. An attendance row looks like this:
# type multi2 multi3 ... year
# info attendance 45342 ... 1946
# We need to select all of these rows, so below the import statements select these rows using loc[] with the conditions:
# games['type'] == 'info'
# games['multi2'] == 'attendance'
# Select only the year, and multi3 columns.
# loc[] returns the new selection as a DataFrame. Call this new DataFrame attendance.
attendance = games.loc[(games['type'] == 'info') & (games['multi2'] == 'attendance'), ['year', 'multi3']]
# Column Labels
# The attendance DataFrame only has two columns now. Change the labels of these columns to year and attendance with the columns property.
attendance.columns = ['year', 'attendance']
# Convert to Numeric
# Select all rows and just the attendance column of the attendance DataFrame with the loc[] function.
# Hint: dataframe.loc[:, 'column']
# Assign to this selection the result of calling pd.to_numeric().
# As an argument to the pd.to_numeric() function call, pass in the same loc[] selection as above.
# Hint: selection = pd.to_numeric(selection)
attendance.loc[:, 'attendance'] = | pd.to_numeric(attendance.loc[:, 'attendance']) | pandas.to_numeric |
from __future__ import division, print_function
from builtins import object, zip
import pandas as pd
import xarray as xr
from dask.diagnostics import ProgressBar
from numpy import array
from past.utils import old_div
# This file is to deal with CAMx code - try to make it general for CAMx 4.7.1 --> 5.1
ProgressBar().register()
class CAMx(object):
def __init__(self):
self.objtype = 'CAMX'
self.coarse = array(
['NA', 'PSO4', 'PNO3', 'PNH4', 'PH2O', 'PCL', 'PEC', 'FPRM', 'FCRS', 'CPRM', 'CCRS', 'SOA1', 'SOA2', 'SOA3',
'SOA4'])
self.fine = array(
['NA', 'PSO4', 'PNO3', 'PNH4', 'PH2O', 'PCL', 'PEC', 'FPRM', 'FCRS', 'SOA1', 'SOA2', 'SOA3',
'SOA4'])
self.noy_gas = array(
['NO', 'NO2', 'NO3', 'N2O5', 'HONO', 'HNO3', 'PAN', 'PANX', 'PNA', 'NTR', 'CRON', 'CRN2', 'CRNO',
'CRPX', 'OPAN'])
self.poc = array(['SOA1', 'SOA2', 'SOA3', 'SOA4'])
self.dset = None
self.grid = None # gridcro2d obj
self.fname = None
self.metcrofnames = None
self.aerofnames = None
self.dates = None
self.keys = None
self.indexdates = None
self.metindex = None
self.latitude = None
self.longitude = None
self.map = None
def get_dates(self):
print('Reading CAMx dates...')
print(self.dset)
tflag1 = array(self.dset['TFLAG'][:, 0], dtype='|S7')
tflag2 = array(old_div(self.dset['TFLAG'][:, 1], 10000), dtype='|S6')
date = pd.to_datetime([i + j.zfill(2) for i, j in zip(tflag1, tflag2)], format='%Y%j%H')
indexdates = pd.Series(date).drop_duplicates(keep='last').index.values
self.dset = self.dset.isel(time=indexdates)
self.dset['time'] = date[indexdates]
def open_camx(self, file):
from glob import glob
from numpy import sort
dropset = ['layer', 'longitude_bounds', 'latitude_bounds',
'x', 'y', 'level', 'lambert_conformal_conic']
nameset = {'COL': 'x', 'ROW': 'y', 'TSTEP': 'time', 'LAY': 'z'}
if type(file) == str:
fname = sort(array(glob(file)))
else:
fname = sort(array(file))
if fname.shape[0] >= 1:
if self.dset is None:
self.dset = xr.open_mfdataset(
fname.tolist(), concat_dim='TSTEP', engine='pnc').drop(dropset).rename(nameset).squeeze()
self.load_conus_basemap(res='l')
self.get_dates()
else:
dset = xr.open_mfdataset(fname.tolist(), concat_dim='TSTEP',
engine='pnc').drop(dropset).rename(nameset).squeeze()
self.dset = xr.merge([self.dset, dset])
else:
print('Files not found')
self.keys = list(self.dset.keys())
def check_z(self, varname):
if | pd.Series(self.dset[varname].dims) | pandas.Series |
import re
from inspect import isclass
import numpy as np
import pandas as pd
import pytest
from mock import patch
import woodwork as ww
from woodwork.accessor_utils import (
_is_dask_dataframe,
_is_dask_series,
_is_koalas_dataframe,
_is_koalas_series,
init_series,
)
from woodwork.exceptions import (
ColumnNotPresentError,
IndexTagRemovedWarning,
ParametersIgnoredWarning,
TypeConversionError,
TypingInfoMismatchWarning,
WoodworkNotInitError,
)
from woodwork.logical_types import (
URL,
Address,
Age,
AgeFractional,
AgeNullable,
Boolean,
BooleanNullable,
Categorical,
CountryCode,
Datetime,
Double,
EmailAddress,
Filepath,
Integer,
IntegerNullable,
IPAddress,
LatLong,
NaturalLanguage,
Ordinal,
PersonFullName,
PhoneNumber,
PostalCode,
SubRegionCode,
Unknown,
)
from woodwork.table_accessor import (
WoodworkTableAccessor,
_check_index,
_check_logical_types,
_check_partial_schema,
_check_time_index,
_check_unique_column_names,
_check_use_standard_tags,
_infer_missing_logical_types,
)
from woodwork.table_schema import TableSchema
from woodwork.tests.testing_utils import (
is_property,
is_public_method,
to_pandas,
validate_subset_schema,
)
from woodwork.tests.testing_utils.table_utils import assert_schema_equal
from woodwork.utils import import_or_none
dd = import_or_none("dask.dataframe")
ks = import_or_none("databricks.koalas")
def test_check_index_errors(sample_df):
error_message = "Specified index column `foo` not found in dataframe"
with pytest.raises(ColumnNotPresentError, match=error_message):
_check_index(dataframe=sample_df, index="foo")
if isinstance(sample_df, pd.DataFrame):
# Does not check for index uniqueness with Dask
error_message = "Index column must be unique"
with pytest.raises(LookupError, match=error_message):
_check_index(sample_df, index="age")
def test_check_logical_types_errors(sample_df):
error_message = "logical_types must be a dictionary"
with pytest.raises(TypeError, match=error_message):
_check_logical_types(sample_df, logical_types="type")
bad_logical_types_keys = {
"full_name": None,
"age": None,
"birthday": None,
"occupation": None,
}
error_message = re.escape(
"logical_types contains columns that are not present in dataframe: ['birthday', 'occupation']"
)
with pytest.raises(ColumnNotPresentError, match=error_message):
_check_logical_types(sample_df, bad_logical_types_keys)
def test_check_time_index_errors(sample_df):
error_message = "Specified time index column `foo` not found in dataframe"
with pytest.raises(ColumnNotPresentError, match=error_message):
_check_time_index(dataframe=sample_df, time_index="foo")
def test_check_unique_column_names_errors(sample_df):
if _is_koalas_dataframe(sample_df):
pytest.skip("Koalas enforces unique column names")
duplicate_cols_df = sample_df.copy()
if _is_dask_dataframe(sample_df):
duplicate_cols_df = dd.concat(
[duplicate_cols_df, duplicate_cols_df["age"]], axis=1
)
else:
duplicate_cols_df.insert(0, "age", [18, 21, 65, 43], allow_duplicates=True)
with pytest.raises(
IndexError, match="Dataframe cannot contain duplicate columns names"
):
_check_unique_column_names(duplicate_cols_df)
def test_check_use_standard_tags_errors():
error_message = "use_standard_tags must be a dictionary or a boolean"
with pytest.raises(TypeError, match=error_message):
_check_use_standard_tags(1)
def test_accessor_init(sample_df):
assert sample_df.ww.schema is None
sample_df.ww.init()
assert isinstance(sample_df.ww.schema, TableSchema)
def test_accessor_schema_property(sample_df):
sample_df.ww.init()
assert sample_df.ww._schema is not sample_df.ww.schema
assert sample_df.ww._schema == sample_df.ww.schema
def test_set_accessor_name(sample_df):
df = sample_df.copy()
error = re.escape(
"Woodwork not initialized for this DataFrame. Initialize by calling DataFrame.ww.init"
)
with pytest.raises(WoodworkNotInitError, match=error):
df.ww.name
with pytest.raises(WoodworkNotInitError, match=error):
df.ww.name = "name"
df.ww.init()
assert df.ww.name is None
df.ww.name = "name"
assert df.ww.schema.name == "name"
assert df.ww.name == "name"
def test_rename_init_with_name(sample_df):
df = sample_df.copy()
df.ww.init(name="name")
assert df.ww.name == "name"
df.ww.name = "new_name"
assert df.ww.schema.name == "new_name"
assert df.ww.name == "new_name"
def test_name_error_on_init(sample_df):
err_msg = "Table name must be a string"
with pytest.raises(TypeError, match=err_msg):
sample_df.ww.init(name=123)
def test_name_error_on_update(sample_df):
sample_df.ww.init()
err_msg = "Table name must be a string"
with pytest.raises(TypeError, match=err_msg):
sample_df.ww.name = 123
def test_name_persists_after_drop(sample_df):
df = sample_df.copy()
df.ww.init()
df.ww.name = "name"
assert df.ww.name == "name"
dropped_df = df.ww.drop(["id"])
assert dropped_df.ww.name == "name"
assert dropped_df.ww.schema.name == "name"
def test_set_accessor_metadata(sample_df):
df = sample_df.copy()
error = re.escape(
"Woodwork not initialized for this DataFrame. Initialize by calling DataFrame.ww.init"
)
with pytest.raises(WoodworkNotInitError, match=error):
df.ww.metadata
with pytest.raises(WoodworkNotInitError, match=error):
df.ww.metadata = {"new": "metadata"}
df.ww.init()
assert df.ww.metadata == {}
df.ww.metadata = {"new": "metadata"}
assert df.ww.schema.metadata == {"new": "metadata"}
assert df.ww.metadata == {"new": "metadata"}
def test_set_metadata_after_init_with_metadata(sample_df):
df = sample_df.copy()
df.ww.init(table_metadata={"new": "metadata"})
assert df.ww.metadata == {"new": "metadata"}
df.ww.metadata = {"new": "new_metadata"}
assert df.ww.schema.metadata == {"new": "new_metadata"}
assert df.ww.metadata == {"new": "new_metadata"}
def test_metadata_persists_after_drop(sample_df):
df = sample_df.copy()
df.ww.init()
df.ww.metadata = {"new": "metadata"}
assert df.ww.metadata == {"new": "metadata"}
dropped_df = df.ww.drop(["id"])
assert dropped_df.ww.metadata == {"new": "metadata"}
assert dropped_df.ww.schema.metadata == {"new": "metadata"}
def test_metadata_error_on_init(sample_df):
err_msg = "Table metadata must be a dictionary."
with pytest.raises(TypeError, match=err_msg):
sample_df.ww.init(table_metadata=123)
def test_metadata_error_on_update(sample_df):
sample_df.ww.init()
err_msg = "Table metadata must be a dictionary."
with pytest.raises(TypeError, match=err_msg):
sample_df.ww.metadata = 123
def test_accessor_physical_types_property(sample_df):
sample_df.ww.init(logical_types={"age": "Categorical"})
assert isinstance(sample_df.ww.physical_types, dict)
assert set(sample_df.ww.physical_types.keys()) == set(sample_df.columns)
for k, v in sample_df.ww.physical_types.items():
logical_type = sample_df.ww.columns[k].logical_type
if _is_koalas_dataframe(sample_df) and logical_type.backup_dtype is not None:
assert v == logical_type.backup_dtype
else:
assert v == logical_type.primary_dtype
def test_accessor_separation_of_params(sample_df):
# mix up order of acccessor and schema params
schema_df = sample_df.copy()
schema_df.ww.init(
name="test_name",
index="id",
semantic_tags={"id": "test_tag"},
time_index="signup_date",
)
assert schema_df.ww.semantic_tags["id"] == {"index", "test_tag"}
assert schema_df.ww.index == "id"
assert schema_df.ww.time_index == "signup_date"
assert schema_df.ww.name == "test_name"
def test_init_with_full_schema(sample_df):
schema_df = sample_df.copy()
schema_df.ww.init(name="test_schema", semantic_tags={"id": "test_tag"}, index="id")
schema = schema_df.ww._schema
head_df = schema_df.head(2)
assert head_df.ww.schema is None
head_df.ww.init_with_full_schema(schema=schema)
assert head_df.ww._schema is schema
assert head_df.ww.name == "test_schema"
assert head_df.ww.semantic_tags["id"] == {"index", "test_tag"}
iloc_df = schema_df.loc[[2, 3]]
assert iloc_df.ww.schema is None
iloc_df.ww.init_with_full_schema(schema=schema)
assert iloc_df.ww._schema is schema
assert iloc_df.ww.name == "test_schema"
assert iloc_df.ww.semantic_tags["id"] == {"index", "test_tag"}
# Extra parameters do not take effect
assert isinstance(iloc_df.ww.logical_types["id"], Integer)
def test_accessor_init_errors_methods(sample_df):
methods_to_exclude = ["init", "init_with_full_schema", "init_with_partial_schema"]
public_methods = [
method
for method in dir(sample_df.ww)
if is_public_method(WoodworkTableAccessor, method)
]
public_methods = [
method for method in public_methods if method not in methods_to_exclude
]
method_args_dict = {
"add_semantic_tags": [{"id": "new_tag"}],
"describe": None,
"pop": ["id"],
"describe": None,
"describe_dict": None,
"drop": ["id"],
"get_valid_mi_columns": None,
"mutual_information": None,
"mutual_information_dict": None,
"remove_semantic_tags": [{"id": "new_tag"}],
"rename": [{"id": "new_id"}],
"reset_semantic_tags": None,
"select": [["Double"]],
"set_index": ["id"],
"set_time_index": ["signup_date"],
"set_types": [{"id": "Integer"}],
"to_disk": ["dir"],
"to_dictionary": None,
"value_counts": None,
"infer_temporal_frequencies": None,
}
error = re.escape(
"Woodwork not initialized for this DataFrame. Initialize by calling DataFrame.ww.init"
)
for method in public_methods:
func = getattr(sample_df.ww, method)
method_args = method_args_dict[method]
with pytest.raises(WoodworkNotInitError, match=error):
if method_args:
func(*method_args)
else:
func()
def test_accessor_init_errors_properties(sample_df):
props_to_exclude = ["iloc", "loc", "schema", "_dataframe"]
props = [
prop
for prop in dir(sample_df.ww)
if is_property(WoodworkTableAccessor, prop) and prop not in props_to_exclude
]
error = re.escape(
"Woodwork not initialized for this DataFrame. Initialize by calling DataFrame.ww.init"
)
for prop in props:
with pytest.raises(WoodworkNotInitError, match=error):
getattr(sample_df.ww, prop)
def test_init_accessor_with_schema_errors(sample_df):
schema_df = sample_df.copy()
schema_df.ww.init()
schema = schema_df.ww.schema
iloc_df = schema_df.iloc[:, :-1]
assert iloc_df.ww.schema is None
error = "Provided schema must be a Woodwork.TableSchema object."
with pytest.raises(TypeError, match=error):
iloc_df.ww.init_with_full_schema(schema=int)
error = (
"Woodwork typing information is not valid for this DataFrame: "
"The following columns in the typing information were missing from the DataFrame: {'ip_address'}"
)
with pytest.raises(ValueError, match=error):
iloc_df.ww.init_with_full_schema(schema=schema)
def test_accessor_with_schema_parameter_warning(sample_df):
schema_df = sample_df.copy()
schema_df.ww.init(name="test_schema", semantic_tags={"id": "test_tag"}, index="id")
schema = schema_df.ww.schema
head_df = schema_df.head(2)
warning = (
"A schema was provided and the following parameters were ignored: index, "
"time_index, logical_types, already_sorted, semantic_tags, use_standard_tags"
)
with pytest.warns(ParametersIgnoredWarning, match=warning):
head_df.ww.init_with_full_schema(
index="ignored_id",
time_index="ignored_time_index",
logical_types={"ignored": "ltypes"},
already_sorted=True,
semantic_tags={"ignored_id": "ignored_test_tag"},
use_standard_tags={"id": True, "age": False},
schema=schema,
)
assert head_df.ww.name == "test_schema"
assert head_df.ww.semantic_tags["id"] == {"index", "test_tag"}
def test_accessor_getattr(sample_df):
schema_df = sample_df.copy()
# We can access attributes on the Accessor class before the schema is initialized
assert schema_df.ww.schema is None
error = re.escape(
"Woodwork not initialized for this DataFrame. Initialize by calling DataFrame.ww.init"
)
with pytest.raises(WoodworkNotInitError, match=error):
schema_df.ww.index
schema_df.ww.init()
assert schema_df.ww.name is None
assert schema_df.ww.index is None
assert schema_df.ww.time_index is None
assert set(schema_df.ww.columns.keys()) == set(sample_df.columns)
error = re.escape("Woodwork has no attribute 'not_present'")
with pytest.raises(AttributeError, match=error):
sample_df.ww.init()
sample_df.ww.not_present
def test_getitem(sample_df):
df = sample_df
df.ww.init(
time_index="signup_date",
index="id",
name="df_name",
logical_types={"age": "Double"},
semantic_tags={"age": {"custom_tag"}},
)
assert list(df.columns) == list(df.ww.schema.columns)
subset = ["id", "signup_date"]
df_subset = df.ww[subset]
pd.testing.assert_frame_equal(to_pandas(df[subset]), to_pandas(df_subset))
assert subset == list(df_subset.ww._schema.columns)
assert df_subset.ww.index == "id"
assert df_subset.ww.time_index == "signup_date"
subset = ["age", "email"]
df_subset = df.ww[subset]
pd.testing.assert_frame_equal(to_pandas(df[subset]), to_pandas(df_subset))
assert subset == list(df_subset.ww._schema.columns)
assert df_subset.ww.index is None
assert df_subset.ww.time_index is None
assert isinstance(df_subset.ww.logical_types["age"], Double)
assert df_subset.ww.semantic_tags["age"] == {"custom_tag", "numeric"}
subset = df.ww[[]]
assert len(subset.ww.columns) == 0
assert subset.ww.index is None
assert subset.ww.time_index is None
series = df.ww["age"]
pd.testing.assert_series_equal(to_pandas(series), to_pandas(df["age"]))
assert isinstance(series.ww.logical_type, Double)
assert series.ww.semantic_tags == {"custom_tag", "numeric"}
series = df.ww["id"]
pd.testing.assert_series_equal(to_pandas(series), to_pandas(df["id"]))
assert isinstance(series.ww.logical_type, Integer)
assert series.ww.semantic_tags == {"index"}
def test_getitem_init_error(sample_df):
error = re.escape(
"Woodwork not initialized for this DataFrame. Initialize by calling DataFrame.ww.init"
)
with pytest.raises(WoodworkNotInitError, match=error):
sample_df.ww["age"]
def test_getitem_invalid_input(sample_df):
df = sample_df
df.ww.init()
error_msg = r"Column\(s\) '\[1, 2\]' not found in DataFrame"
with pytest.raises(ColumnNotPresentError, match=error_msg):
df.ww[["email", 2, 1]]
error_msg = "Column with name 'invalid_column' not found in DataFrame"
with pytest.raises(ColumnNotPresentError, match=error_msg):
df.ww["invalid_column"]
def test_accessor_equality(sample_df):
# Confirm equality with same schema and same data
schema_df = sample_df.copy()
schema_df.ww.init()
copy_df = schema_df.ww.copy()
assert schema_df.ww == copy_df.ww
# Confirm not equal with different schema but same data
copy_df.ww.set_time_index("signup_date")
assert schema_df.ww != copy_df.ww
# Confirm not equal with same schema but different data - only pandas
loc_df = schema_df.ww.loc[:2, :]
if isinstance(sample_df, pd.DataFrame):
assert schema_df.ww != loc_df
else:
assert schema_df.ww == loc_df
def test_accessor_shallow_equality(sample_df):
metadata_table = sample_df.copy()
metadata_table.ww.init(table_metadata={"user": "user0"})
diff_metadata_table = sample_df.copy()
diff_metadata_table.ww.init(table_metadata={"user": "user2"})
assert diff_metadata_table.ww.__eq__(metadata_table, deep=False)
assert not diff_metadata_table.ww.__eq__(metadata_table, deep=True)
schema = metadata_table.ww.schema
diff_data_table = metadata_table.ww.loc[:2, :]
same_data_table = metadata_table.ww.copy()
assert diff_data_table.ww.schema.__eq__(schema, deep=True)
assert same_data_table.ww.schema.__eq__(schema, deep=True)
assert same_data_table.ww.__eq__(metadata_table.ww, deep=False)
assert same_data_table.ww.__eq__(metadata_table.ww, deep=True)
assert diff_data_table.ww.__eq__(metadata_table.ww, deep=False)
if isinstance(sample_df, pd.DataFrame):
assert not diff_data_table.ww.__eq__(metadata_table.ww, deep=True)
def test_accessor_init_with_valid_string_time_index(time_index_df):
time_index_df.ww.init(name="schema", index="id", time_index="times")
assert time_index_df.ww.name == "schema"
assert time_index_df.ww.index == "id"
assert time_index_df.ww.time_index == "times"
assert isinstance(
time_index_df.ww.columns[time_index_df.ww.time_index].logical_type, Datetime
)
def test_accessor_init_with_numeric_datetime_time_index(time_index_df):
schema_df = time_index_df.copy()
schema_df.ww.init(time_index="ints", logical_types={"ints": Datetime})
error_msg = "Time index column must contain datetime or numeric values"
with pytest.raises(TypeError, match=error_msg):
time_index_df.ww.init(
name="schema", time_index="strs", logical_types={"strs": Datetime}
)
assert schema_df.ww.time_index == "ints"
assert schema_df["ints"].dtype == "datetime64[ns]"
def test_accessor_with_numeric_time_index(time_index_df):
# Set a numeric time index on init
schema_df = time_index_df.copy()
schema_df.ww.init(time_index="ints")
date_col = schema_df.ww.columns["ints"]
assert schema_df.ww.time_index == "ints"
assert isinstance(date_col.logical_type, Integer)
assert date_col.semantic_tags == {"time_index", "numeric"}
# Specify logical type for time index on init
schema_df = time_index_df.copy()
schema_df.ww.init(time_index="ints", logical_types={"ints": "Double"})
date_col = schema_df.ww.columns["ints"]
assert schema_df.ww.time_index == "ints"
assert isinstance(date_col.logical_type, Double)
assert date_col.semantic_tags == {"time_index", "numeric"}
schema_df = time_index_df.copy()
schema_df.ww.init(time_index="strs", logical_types={"strs": "Double"})
date_col = schema_df.ww.columns["strs"]
assert schema_df.ww.time_index == "strs"
assert isinstance(date_col.logical_type, Double)
assert date_col.semantic_tags == {"time_index", "numeric"}
error_msg = "Time index column must contain datetime or numeric values"
with pytest.raises(TypeError, match=error_msg):
time_index_df.ww.init(time_index="ints", logical_types={"ints": "Categorical"})
error_msg = "Time index column must contain datetime or numeric values"
with pytest.raises(TypeError, match=error_msg):
time_index_df.ww.init(time_index="letters", logical_types={"strs": "Integer"})
# Set numeric time index after init
schema_df = time_index_df.copy()
schema_df.ww.init(logical_types={"ints": "Double"})
assert schema_df.ww.time_index is None
schema_df.ww.set_time_index("ints")
date_col = schema_df.ww.columns["ints"]
assert schema_df.ww.time_index == "ints"
assert isinstance(date_col.logical_type, Double)
assert date_col.semantic_tags == {"numeric", "time_index"}
def test_numeric_time_index_dtypes(numeric_time_index_df):
numeric_time_index_df.ww.init(time_index="ints")
assert numeric_time_index_df.ww.time_index == "ints"
assert isinstance(numeric_time_index_df.ww.logical_types["ints"], Integer)
assert numeric_time_index_df.ww.semantic_tags["ints"] == {"time_index", "numeric"}
numeric_time_index_df.ww.set_time_index("floats")
assert numeric_time_index_df.ww.time_index == "floats"
assert isinstance(numeric_time_index_df.ww.logical_types["floats"], Double)
assert numeric_time_index_df.ww.semantic_tags["floats"] == {"time_index", "numeric"}
numeric_time_index_df.ww.set_time_index("with_null")
assert numeric_time_index_df.ww.time_index == "with_null"
assert isinstance(
numeric_time_index_df.ww.logical_types["with_null"], IntegerNullable
)
assert numeric_time_index_df.ww.semantic_tags["with_null"] == {
"time_index",
"numeric",
}
def test_accessor_init_with_invalid_string_time_index(sample_df):
error_msg = "Time index column must contain datetime or numeric values"
with pytest.raises(TypeError, match=error_msg):
sample_df.ww.init(name="schema", time_index="full_name")
def test_accessor_init_with_string_logical_types(sample_df):
logical_types = {"full_name": "natural_language", "age": "Double"}
schema_df = sample_df.copy()
schema_df.ww.init(name="schema", logical_types=logical_types)
assert isinstance(schema_df.ww.columns["full_name"].logical_type, NaturalLanguage)
assert isinstance(schema_df.ww.columns["age"].logical_type, Double)
logical_types = {
"full_name": "NaturalLanguage",
"age": "IntegerNullable",
"signup_date": "Datetime",
}
schema_df = sample_df.copy()
schema_df.ww.init(
name="schema", logical_types=logical_types, time_index="signup_date"
)
assert isinstance(schema_df.ww.columns["full_name"].logical_type, NaturalLanguage)
assert isinstance(schema_df.ww.columns["age"].logical_type, IntegerNullable)
assert schema_df.ww.time_index == "signup_date"
def test_int_dtype_inference_on_init():
df = pd.DataFrame(
{
"ints_no_nans": pd.Series([1, 2]),
"ints_nan": pd.Series([1, np.nan]),
"ints_NA": pd.Series([1, pd.NA]),
"ints_NA_specified": pd.Series([1, pd.NA], dtype="Int64"),
}
)
df = df.loc[df.index.repeat(5)].reset_index(drop=True)
df.ww.init()
assert df["ints_no_nans"].dtype == "int64"
assert df["ints_nan"].dtype == "float64"
assert df["ints_NA"].dtype == "category"
assert df["ints_NA_specified"].dtype == "Int64"
def test_bool_dtype_inference_on_init():
df = pd.DataFrame(
{
"bools_no_nans": pd.Series([True, False]),
"bool_nan": pd.Series([True, np.nan]),
"bool_NA": pd.Series([True, pd.NA]),
"bool_NA_specified": | pd.Series([True, pd.NA], dtype="boolean") | pandas.Series |
import math
import warnings
from typing import List, Tuple, Union
import numpy as np
import pandas as pd
from scipy.stats import kurtosis, skew
from sklearn.cluster import KMeans
pi = math.pi
pd.options.display.max_columns = 500
warnings.filterwarnings("ignore")
def range_func(x: List[Union[int, float]]) -> float:
max_val = np.max(x)
min_val = np.min(x)
range_val = max_val - min_val
return range_val
def iqr_func2(x: List[Union[int, float]]) -> float:
q3, q1 = np.percentile(x, [20, 80])
iqr = q3 - q1
return iqr
def iqr_func3(x: List[Union[int, float]]) -> float:
q3, q1 = np.percentile(x, [40, 60])
iqr = q3 - q1
return iqr
def iqr_func4(x: List[Union[int, float]]) -> float:
q3, q1 = np.percentile(x, [15, 95])
iqr = q3 - q1
return iqr
def premad(x: List[Union[int, float]]) -> float:
return np.median(np.absolute(x - np.median(x, axis=0)), axis=0)
def preskew(x: List[Union[int, float]]) -> float:
return skew(x)
def prekurt(x: List[Union[int, float]]) -> float:
return kurtosis(x, fisher=True)
def load_dataset(path: str) -> Tuple[pd.DataFrame, pd.DataFrame]:
path = "../input/ai-hackaton/"
train = pd.read_csv(path + "train_features.csv")
test = | pd.read_csv(path + "test_features.csv") | pandas.read_csv |
"""Provides utilities to import the account *.csv files in the folder `csv`.
The csv files have to match a certain naming pattern in order to map them to
different importers. See `_read_account_csvs()`."""
import pathlib
import re
from typing import Iterable
import numpy as np
import pandas as pd
import wealth.config
import wealth.importers.dkb_giro
import wealth.importers.dkb_visa
import wealth.importers.n26_mastercard
import wealth.importers.sparkasse_giro
from wealth.importers.common import transfer_columns
from wealth.util.transaction_type import TransactionType
def _create_transaction_type(row: pd.Series) -> TransactionType:
"""Create a TransactionType object from a dataframe row."""
if row["transaction_type"]:
return row["transaction_type"]
accounts = wealth.config.get("accounts", {})
ibans = [accounts[acc].get("iban", 0) for acc in accounts.keys()]
if row["iban"] in ibans:
return TransactionType.from_amount(row["amount"], is_internal=True)
return TransactionType.from_amount(row["amount"], is_internal=False)
def _add_transaction_type_column(df: pd.DataFrame) -> pd.DataFrame:
"""Populate a column named transaction_type with values of type
TransactionType to the given data frame and return the same DataFrame."""
df["transaction_type"] = df.apply(_create_transaction_type, axis=1)
return df
def _yield_files_with_suffix(
directory: pathlib.Path, suffix: str
) -> Iterable[pathlib.Path]:
"""Yield all files with the given suffix in the given folder."""
suffix_lower = suffix.lower()
for file in directory.iterdir():
if file.suffix.lower() == suffix_lower and file.is_file():
yield file
def _create_offset_df(account_name: str, df: pd.DataFrame) -> pd.DataFrame:
"""Create a dataframe holding the account's configured initial offset."""
accounts = wealth.config.get("accounts", {})
return pd.DataFrame(
{
"account": account_name,
"account_type": df["account_type"].iloc[0],
"amount": accounts.get(account_name, {}).get("offset", 0),
"date": df["date"].min(),
"description": "<initial offset>",
},
index=[0],
)
def _delay_incomes(df: pd.DataFrame) -> pd.DataFrame:
"""Guarantee that incomese virtually happen after all expenses at any given
day."""
df.loc[df["amount"] > 0, "date"] = df["date"] + | pd.DateOffset(hours=1) | pandas.DateOffset |
from typing import Optional
from dataclasses import dataclass
import pandas as pd
from poker.base import unique_values, native_mean, running_mean, running_std, running_median, running_percentile
from poker.document_filter_class import DocumentFilter
pd.set_option('use_inf_as_na', True)
def _ts_concat(dic: dict, index_lst: list) -> pd.DataFrame:
"""Concat a dict of dicts or pd.DataFrames"""
lst_df = []
for key, val in dic.items():
if type(val) != pd.DataFrame:
val = pd.DataFrame(val, index=index_lst)
val.columns = [key + ' ' + col if col != '' else key for col in val.columns]
else:
val.columns = [key]
lst_df.append(val)
final_df = pd.concat(lst_df, axis=1).reset_index()
return final_df
def _ts_hand(data: pd.DataFrame) -> pd.DataFrame:
"""Build Hand related data"""
pos_dic = {'Pre Flop': 0.25, 'Post Flop': 0.50, 'Post Turn': 0.75, 'Post River': 1.0}
# Game Id
g_i_df = pd.DataFrame(data.groupby('Start Time')['Game Id'].last())
g_i_df.columns = ['']
# Time in Hand
t_h_df = pd.DataFrame(data.groupby('Start Time')['Seconds into Hand'].last())
t_h_df.columns = ['']
# Last Position
last_position = data.groupby('Start Time')['Position'].last().tolist()
l_p_df = pd.DataFrame([pos_dic[item] for item in last_position], index=t_h_df.index, columns=[''])
# Win
r_w_p = data.groupby('Start Time')['Win'].last().tolist()
r_w_p = [1 if item is True else 0 for item in r_w_p]
r_w_p_df = pd.DataFrame(running_mean(data=r_w_p, num=5), index=t_h_df.index, columns=[''])
ind_lst = data.groupby('Start Time').last().index.tolist()
lst_dic = {'Seconds per Hand': t_h_df, 'Last Position in Hand': l_p_df, 'Rolling Win Percent': r_w_p_df,
'Game Id': g_i_df}
return _ts_concat(dic=lst_dic, index_lst=ind_lst)
def _ts_position(data: pd.DataFrame) -> pd.DataFrame:
"""Build position related data"""
temp_df = data[(data['Class'] == 'Calls') | (data['Class'] == 'Raises') | (data['Class'] == 'Checks')]
p_bet = {'Pre Flop': [], 'Post Flop': [], 'Post Turn': [], 'Post River': []}
t_p_bet = {'Pre Flop': 0, 'Post Flop': 0, 'Post Turn': 0, 'Post River': 0}
prev_ind, len_temp_df, game_id_lst = temp_df['Start Time'].iloc[0], len(temp_df), []
for ind, row in temp_df.iterrows():
if row['Start Time'] != prev_ind:
prev_ind = row['Start Time']
game_id_lst.append(row['Game Id'])
for key, val in t_p_bet.items():
p_bet[key].append(val)
t_p_bet = {'Pre Flop': 0, 'Post Flop': 0, 'Post Turn': 0, 'Post River': 0}
t_p_bet[row['Position']] += row['Bet Amount']
if ind == len_temp_df:
game_id_lst.append(row['Game Id'])
for key, val in t_p_bet.items():
p_bet[key].append(val)
ind_lst = unique_values(data=temp_df['Start Time'].tolist())
lst_dic = {'Position Bet': p_bet, 'Game Id': {'': game_id_lst}}
return _ts_concat(dic=lst_dic, index_lst=ind_lst)
def _ts_class_counts_seconds(data: pd.DataFrame) -> pd.DataFrame:
"""Build class, counts, and seconds data"""
# Bet, Count, and Time Per Position
temp_df = data[(data['Class'] == 'Calls') | (data['Class'] == 'Raises') | (data['Class'] == 'Checks')]
pos_lst = ['Pre Flop', 'Post Flop', 'Post Turn', 'Post River']
class_lst, short_class_lst = ['Checks', 'Calls', 'Raises'], ['Calls', 'Raises']
c_count = {item1 + ' ' + item: [] for item in class_lst for item1 in pos_lst}
c_seconds = {item1 + ' ' + item: [] for item in class_lst for item1 in pos_lst}
c_bet = {item1 + ' ' + item: [] for item in short_class_lst for item1 in pos_lst}
c_bet_per_pot = {item1 + ' ' + item: [] for item in short_class_lst for item1 in pos_lst}
c_bet_per_chips = {item1 + ' ' + item: [] for item in short_class_lst for item1 in pos_lst}
t_c_count = {item1 + ' ' + item: 0 for item in class_lst for item1 in pos_lst}
t_c_seconds = {item1 + ' ' + item: None for item in class_lst for item1 in pos_lst}
t_c_bet = {item1 + ' ' + item: None for item in short_class_lst for item1 in pos_lst}
t_c_bet_per_pot = {item1 + ' ' + item: None for item in short_class_lst for item1 in pos_lst}
t_c_bet_per_chips = {item1 + ' ' + item: None for item in short_class_lst for item1 in pos_lst}
prev_ind, len_temp_df, game_id_lst = temp_df['Start Time'].iloc[0], len(temp_df), []
for ind, row in temp_df.iterrows():
if row['Start Time'] != prev_ind:
prev_ind = row['Start Time']
game_id_lst.append(row['Game Id'])
for item in class_lst:
for item1 in pos_lst:
c_count[item1 + ' ' + item].append(t_c_count[item1 + ' ' + item])
c_seconds[item1 + ' ' + item].append(t_c_seconds[item1 + ' ' + item])
if item != 'Checks':
c_bet[item1 + ' ' + item].append(t_c_bet[item1 + ' ' + item])
c_bet_per_pot[item1 + ' ' + item].append(t_c_bet_per_pot[item1 + ' ' + item])
c_bet_per_chips[item1 + ' ' + item].append(t_c_bet_per_chips[item1 + ' ' + item])
t_c_count = {item1 + ' ' + item: 0 for item in class_lst for item1 in pos_lst}
t_c_seconds = {item1 + ' ' + item: None for item in class_lst for item1 in pos_lst}
t_c_bet = {item1 + ' ' + item: None for item in short_class_lst for item1 in pos_lst}
t_c_bet_per_pot = {item1 + ' ' + item: None for item in short_class_lst for item1 in pos_lst}
t_c_bet_per_chips = {item1 + ' ' + item: None for item in short_class_lst for item1 in pos_lst}
t_pos, t_bet, t_class, t_second = row['Position'], row['Bet Amount'], row['Class'], row['Seconds']
t_key = t_pos + ' ' + t_class
t_c_count[t_key] += 1
if t_c_seconds[t_key] is not None:
t_c_seconds[t_key] = native_mean(data=[t_c_seconds[t_key]] + [t_second])
else:
t_c_seconds[t_key] = t_second
if t_class != 'Checks':
if t_c_bet[t_key] is not None:
t_c_bet[t_key] = native_mean(data=[t_c_bet[t_key]] + [t_bet])
else:
t_c_bet[t_key] = t_bet
bet_pot_per = t_bet / (row['Pot Size'] - t_bet)
if t_c_bet_per_pot[t_key] is not None:
t_c_bet_per_pot[t_key] = native_mean(data=[t_c_bet_per_pot[t_key]] + [bet_pot_per])
else:
t_c_bet_per_pot[t_key] = bet_pot_per
bet_chip_per = t_bet / (row['Player Current Chips'] + t_bet)
if t_c_bet_per_chips[t_key] is not None:
t_c_bet_per_chips[t_key] = native_mean(data=[t_c_bet_per_chips[t_key]] + [bet_chip_per])
else:
t_c_bet_per_chips[t_key] = bet_chip_per
if ind == len_temp_df:
game_id_lst.append(row['Game Id'])
for item in class_lst:
for item1 in pos_lst:
c_count[item1 + ' ' + item].append(t_c_count[item1 + ' ' + item])
c_seconds[item1 + ' ' + item].append(t_c_seconds[item1 + ' ' + item])
if item != 'Checks':
c_bet[item1 + ' ' + item].append(t_c_bet[item1 + ' ' + item])
c_bet_per_pot[item1 + ' ' + item].append(t_c_bet_per_pot[item1 + ' ' + item])
c_bet_per_chips[item1 + ' ' + item].append(t_c_bet_per_chips[item1 + ' ' + item])
ind_lst = unique_values(data=temp_df['Start Time'].tolist())
lst_dic = {'Class Count': c_count, 'Class Seconds': c_seconds, 'Class Bet': c_bet,
'Class Bet Percent of Pot': c_bet_per_pot, 'Class Bet Percent of Chips': c_bet_per_chips,
'Game Id': {'': game_id_lst}}
return _ts_concat(dic=lst_dic, index_lst=ind_lst)
@dataclass
class TSanalysis:
"""
Calculate Time Series stats for a player.
:param data: Input DocumentFilter.
:type data: DocumentFilter
:param upper_q: Upper Quantile percent, default is 0.841. *Optional*
:type upper_q: float
:param lower_q: Lower Quantile percent, default is 0.159. *Optional*
:type lower_q: float
:param window: Rolling window, default is 5. *Optional*
:type window: int
:example:
>>> from poker.time_series_class import TSanalysis
>>> docu_filter = DocumentFilter(data=poker, player_index_lst=['DZy-22KNBS'])
>>> TSanalysis(data=docu_filter)
:note: This class expects a DocumentFilter with only one player_index used.
"""
def __init__(self, data: DocumentFilter, upper_q: Optional[float] = 0.841, lower_q: Optional[float] = 0.159,
window: Optional[int] = 5):
self._docu_filter = data
self._window = window
self._upper_q = upper_q
self._lower_q = lower_q
self._df = data.df
hand_df = _ts_hand(data=self._df)
self._hand = hand_df.copy()
position_df = _ts_position(data=self._df)
self._position = position_df.copy()
class_df = _ts_class_counts_seconds(data=self._df)
self._class = class_df.copy()
hand_cols, hand_ind = hand_df.columns, hand_df.index
self._hand_mean = pd.DataFrame(columns=hand_cols, index=hand_ind)
self._hand_std = pd.DataFrame(columns=hand_cols, index=hand_ind)
self._hand_median = pd.DataFrame(columns=hand_cols, index=hand_ind)
self._hand_upper_q = pd.DataFrame(columns=hand_cols, index=hand_ind)
self._hand_lower_q = pd.DataFrame(columns=hand_cols, index=hand_ind)
for col in hand_cols:
if col not in ['Game Id', 'index', 'Start Time']:
self._hand_mean[col] = running_mean(data=hand_df[col], num=self._window)
self._hand_std[col] = running_std(data=hand_df[col], num=self._window)
self._hand_median[col] = running_median(data=hand_df[col], num=self._window)
self._hand_upper_q[col] = running_percentile(data=hand_df[col], num=self._window, q=upper_q)
self._hand_lower_q[col] = running_percentile(data=hand_df[col], num=self._window, q=lower_q)
pos_cols, pos_ind = position_df.columns, position_df.index
self._position_mean = pd.DataFrame(columns=pos_cols, index=pos_ind)
self._position_std = pd.DataFrame(columns=pos_cols, index=pos_ind)
self._position_median = pd.DataFrame(columns=pos_cols, index=pos_ind)
self._position_upper_q = pd.DataFrame(columns=pos_cols, index=pos_ind)
self._position_lower_q = pd.DataFrame(columns=pos_cols, index=pos_ind)
for col in pos_cols:
if col not in ['Game Id', 'index', 'Start Time']:
self._position_mean[col] = running_mean(data=position_df[col], num=self._window)
self._position_std[col] = running_std(data=position_df[col], num=self._window)
self._position_median[col] = running_median(data=position_df[col], num=self._window)
self._position_upper_q[col] = running_percentile(data=position_df[col], num=self._window, q=upper_q)
self._position_lower_q[col] = running_percentile(data=position_df[col], num=self._window, q=lower_q)
class_cols, class_ind = class_df.columns, class_df.index
self._class_mean = pd.DataFrame(columns=class_cols, index=class_ind)
self._class_std = pd.DataFrame(columns=class_cols, index=class_ind)
self._class_median = pd.DataFrame(columns=class_cols, index=class_ind)
self._class_upper_q = | pd.DataFrame(columns=class_cols, index=class_ind) | pandas.DataFrame |
from sales_analysis.data_pipeline import BASEPATH
from sales_analysis.data_pipeline._pipeline import SalesPipeline
import pytest
import os
import pandas as pd
# --------------------------------------------------------------------------
# Fixtures
@pytest.fixture
def pipeline():
FILEPATH = os.path.join(BASEPATH, "data")
DATA_FILES = [f for f in os.listdir(FILEPATH) if f.endswith('.csv')]
DATA = {f : pd.read_csv(os.path.join(FILEPATH, f)) for f in DATA_FILES}
return SalesPipeline(**DATA)
# --------------------------------------------------------------------------
# Data
data = {'customers': {pd.Timestamp('2019-08-01 00:00:00'): 9,
pd.Timestamp('2019-08-02 00:00:00'): 10,
pd.Timestamp('2019-08-03 00:00:00'): 10,
pd.Timestamp('2019-08-04 00:00:00'): 10,
pd.Timestamp('2019-08-05 00:00:00'): 9,
pd.Timestamp('2019-08-06 00:00:00'): 9,
pd.Timestamp('2019-08-07 00:00:00'): 10,
pd.Timestamp('2019-08-08 00:00:00'): 8,
pd.Timestamp('2019-08-09 00:00:00'): 5,
pd.Timestamp('2019-08-10 00:00:00'): 5,
pd.Timestamp('2019-08-11 00:00:00'): 10,
pd.Timestamp('2019-08-12 00:00:00'): 10,
pd.Timestamp('2019-08-13 00:00:00'): 6,
pd.Timestamp('2019-08-14 00:00:00'): 7,
pd.Timestamp('2019-08-15 00:00:00'): 10,
pd.Timestamp('2019-08-16 00:00:00'): 8,
pd.Timestamp('2019-08-17 00:00:00'): 7,
pd.Timestamp('2019-08-18 00:00:00'): 9,
pd.Timestamp('2019-08-19 00:00:00'): 5,
pd.Timestamp('2019-08-20 00:00:00'): 5},
'total_discount_amount': {pd.Timestamp('2019-08-01 00:00:00'): 15152814.736907512,
pd.Timestamp('2019-08-02 00:00:00'): 20061245.64408109,
pd.Timestamp('2019-08-03 00:00:00'): 26441693.751396574,
pd.Timestamp('2019-08-04 00:00:00'): 25783015.567048658,
pd.Timestamp('2019-08-05 00:00:00'): 16649773.993076814,
pd.Timestamp('2019-08-06 00:00:00'): 24744027.428384878,
pd.Timestamp('2019-08-07 00:00:00'): 21641181.771564845,
pd.Timestamp('2019-08-08 00:00:00'): 27012160.85245146,
pd.Timestamp('2019-08-09 00:00:00'): 13806814.237002019,
pd.Timestamp('2019-08-10 00:00:00'): 9722459.599448118,
pd.Timestamp('2019-08-11 00:00:00'): 20450260.26194652,
pd.Timestamp('2019-08-12 00:00:00'): 22125711.151501,
pd.Timestamp('2019-08-13 00:00:00'): 11444206.200090334,
pd.Timestamp('2019-08-14 00:00:00'): 17677326.65707852,
pd.Timestamp('2019-08-15 00:00:00'): 26968819.12338184,
pd.Timestamp('2019-08-16 00:00:00'): 22592246.991756547,
pd.Timestamp('2019-08-17 00:00:00'): 15997597.519811645,
pd.Timestamp('2019-08-18 00:00:00'): 17731498.506244037,
pd.Timestamp('2019-08-19 00:00:00'): 22127822.876592986,
pd.Timestamp('2019-08-20 00:00:00'): 5550506.789972418},
'items': {pd.Timestamp('2019-08-01 00:00:00'): 2895,
pd.Timestamp('2019-08-02 00:00:00'): 3082,
pd.Timestamp('2019-08-03 00:00:00'): 3559,
pd.Timestamp('2019-08-04 00:00:00'): 3582,
pd.Timestamp('2019-08-05 00:00:00'): 2768,
pd.Timestamp('2019-08-06 00:00:00'): 3431,
pd.Timestamp('2019-08-07 00:00:00'): 2767,
pd.Timestamp('2019-08-08 00:00:00'): 2643,
pd.Timestamp('2019-08-09 00:00:00'): 1506,
pd.Timestamp('2019-08-10 00:00:00'): 1443,
pd.Timestamp('2019-08-11 00:00:00'): 2466,
pd.Timestamp('2019-08-12 00:00:00'): 3482,
pd.Timestamp('2019-08-13 00:00:00'): 1940,
pd.Timestamp('2019-08-14 00:00:00'): 1921,
pd.Timestamp('2019-08-15 00:00:00'): 3479,
pd.Timestamp('2019-08-16 00:00:00'): 3053,
pd.Timestamp('2019-08-17 00:00:00'): 2519,
pd.Timestamp('2019-08-18 00:00:00'): 2865,
pd.Timestamp('2019-08-19 00:00:00'): 1735,
pd.Timestamp('2019-08-20 00:00:00'): 1250},
'order_total_avg': {pd.Timestamp('2019-08-01 00:00:00'): 1182286.0960463749,
pd.Timestamp('2019-08-02 00:00:00'): 1341449.559055637,
pd.Timestamp('2019-08-03 00:00:00'): 1270616.0372525519,
pd.Timestamp('2019-08-04 00:00:00'): 1069011.1516039693,
pd.Timestamp('2019-08-05 00:00:00'): 1355304.7342628485,
pd.Timestamp('2019-08-06 00:00:00'): 1283968.435650978,
pd.Timestamp('2019-08-07 00:00:00'): 1319110.4787216866,
pd.Timestamp('2019-08-08 00:00:00'): 1027231.5196824896,
pd.Timestamp('2019-08-09 00:00:00'): 1201471.0717715647,
pd.Timestamp('2019-08-10 00:00:00'): 1314611.2300065856,
pd.Timestamp('2019-08-11 00:00:00'): 1186152.4565363638,
pd.Timestamp('2019-08-12 00:00:00'): 1155226.4552911327,
pd.Timestamp('2019-08-13 00:00:00'): 1346981.8930212667,
pd.Timestamp('2019-08-14 00:00:00'): 1019646.0386455443,
| pd.Timestamp('2019-08-15 00:00:00') | pandas.Timestamp |
# -*- coding: utf-8 -*-
"""main.ipynb
Automatically generated by Colaboratory.
Original file is located at
https://colab.research.google.com/drive/1_KzpxPsl8B2T4hE_Z2liSu1xzHxbA5KE
"""
import pandas as pd
import numpy as np
import seaborn as sns
from datetime import datetime
import matplotlib.pyplot as plt
# =============================================================================
# from datetime import datetime
# import folium
# from folium import plugins
# from folium.plugins import MarkerCluster
# =============================================================================
sns.set_style('darkgrid')
df = pd.read_csv('US_WeatherEvents_2016-2019.csv')
df.head()
df.describe()
df.info()
df.isnull().sum()
print(df.Type.unique())
print(df.Severity.unique())
print(len(df.AirportCode.unique()))
print(df.TimeZone.unique())
print(len(df.County.unique()))
print(len(df.State.unique()))
# print(df.EventId.unique())
## Plot these for better visualization
weather_type_df = df['Type'].value_counts(ascending=True)
## Some formatting to make it look nicer
fig=plt.figure(figsize=(18, 16))
plt.title("Frequency of Weathers")
plt.xlabel("Frequency of Weather")
plt.ylabel("Type of Weather")
ax = weather_type_df.plot(kind='barh')
ax.get_xaxis().set_major_formatter(plt.FuncFormatter(lambda x, loc: "{:,}".format(int(x))))
df["StartTime(UTC)"] = | pd.to_datetime(df["StartTime(UTC)"], format="%Y-%m-%d %H:%M:%S") | pandas.to_datetime |
Subsets and Splits
No saved queries yet
Save your SQL queries to embed, download, and access them later. Queries will appear here once saved.