prompt
stringlengths 19
1.03M
| completion
stringlengths 4
2.12k
| api
stringlengths 8
90
|
---|---|---|
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Thu Apr 23 23:11:09 2020
@author: esteban
"""
import os
print(os.getcwd())
import pandas as pd
import glob
from variables import pathInformesComunas,\
pathExport,\
nombreInformeConsolidadoComunas,\
nombreInformesComunas,pathReportesCOVID,\
pathCasosActivos
print('La consolidaciรณn de comunas se hace a partir del submodulo de ivan')
print('En conjunto con el submรณdulo del MinCiencias para casos activos')
pathImport=pathInformesComunas
allfiles = [i for i in glob.glob((pathImport+'*.{}').format('csv'))]
auxBool=True
primero=True
df=pd.read_csv(pathReportesCOVID)
'''
df.columns
Out[27]: Index(['Fecha',
'CUT', 'Region', 'Comuna', 'Casos Confirmados'],
dtype='object')
'''
#primero cambiamos al formato como lo tenรญamos nosotros, para no echar a perder los scripts
df=df.rename(columns={"Fecha": "fecha",
"CUT": "id_comuna",
"Region": "nombre_region",
"Comuna":"nombre_comuna",
"Casos Confirmados": "casos_totales"})
df.fecha= | pd.to_datetime(df["fecha"],format="%d-%m-%Y") | pandas.to_datetime |
import pandas as pd
import json
benchmarks = json.load(open("bench_out.json"))["benchmarks"]
classes = set()
datasets = set()
for benchmark in benchmarks:
name = benchmark["name"]
classes.add(name.split("_")[0])
datasets.add(name.split("<")[1].split(">")[0])
df = pd.DataFrame(benchmarks)
for class_name in classes:
d = | pd.DataFrame() | pandas.DataFrame |
import random
from collections import defaultdict
from contextlib import redirect_stdout, redirect_stderr
from io import StringIO
from typing import Dict
from warnings import warn
from pandas import concat, DataFrame, Categorical
from tqdm import tqdm
from data_frames import to_nested_dicts
from data_sources.drug_connectivity_map import AggregatedScores
from ..models.with_controls import TCGAExpressionWithControls
from .permutations import compare_against_permutations_group, compare_observations_with_permutations
from .reevaluation import extract_scores_from_result, extract_single_score, reevaluate
from .display import maximized_metrics, minimized_metrics, choose_columns
from .scores_models import Group
def subtypes_benchmark(
expression, samples_by_subtype, benchmark_function, funcs, *args,
samples_mapping=lambda x: x, use_all_controls=True,
single_sample=True, multi_sample=True, **kwargs
):
subtypes_results = {}
if use_all_controls:
all_controls = expression[expression.columns[expression.classes == 'normal']]
additional_controls = {'additional_controls': all_controls}
else:
additional_controls = {}
print(f'Using all disease controls: {use_all_controls}')
for subtype, samples in samples_by_subtype.items():
type_subset = expression[[samples_mapping(sample) for sample in samples]]
queries = {'query_signature': None}
if single_sample:
print(f'Using subset: {subtype} with {len(type_subset.columns)} samples')
if 'normal' not in type_subset.classes and not use_all_controls:
print(
'No normal subtype-specific samples to create differential expression, '
'set use_all_controls=True to include control samples from all subtypes.'
)
continue
differential_subset = type_subset.differential(
'tumor', 'normal',
only_paired=False,
**additional_controls
)
if differential_subset is None:
print(f'Skipping subtype {subtype}')
continue
queries['query_signature'] = differential_subset
if multi_sample:
if use_all_controls:
absent_controls = all_controls.columns.difference(type_subset.columns)
type_subset = concat([type_subset, all_controls[absent_controls]], axis=1)
subset_with_controls = TCGAExpressionWithControls(type_subset)
queries['query_expression'] = subset_with_controls
subtypes_results[subtype] = benchmark_function(
funcs,
*args, **{**queries, **kwargs}
)
return subtypes_results
def random_subtypes_benchmark(i, expression, *args, **kwargs):
samples = list(expression.columns)
random_mapping = dict(zip(samples, random.sample(samples, len(samples))))
f = StringIO()
with redirect_stdout(f), redirect_stderr(f):
result = subtypes_benchmark(expression, *args, samples_mapping=random_mapping.get, **kwargs)
return result
def group_permutations_by_subtype(permutations) -> Dict[str, DataFrame]:
grouped_by_corresponding_cluster = defaultdict(list)
for permutation in permutations:
for cluster_name, result in permutation.items():
grouped_by_corresponding_cluster[cluster_name].append(result)
grouped_by_corresponding_cluster = {
cluster_name: concat(results)
for cluster_name, results in grouped_by_corresponding_cluster.items()
}
return grouped_by_corresponding_cluster
def test_permutations_number_in_subtype(
result_subtype_subset: DataFrame, subtype_subset: DataFrame, subtype: str,
ranked_categories={'indications', 'contraindications', 'controls'}
):
data = []
maximized_columns = choose_columns(result_subtype_subset, maximized_metrics, ranked_categories)
minimized_columns = choose_columns(result_subtype_subset, minimized_metrics, ranked_categories)
if set(subtype_subset.index.unique()) != set(result_subtype_subset.index.unique()):
warn('Different sets of functions in result and permutations')
for scoring_function in subtype_subset.index.unique():
function_subset = subtype_subset.loc[scoring_function]
result_function_subset = result_subtype_subset.loc[scoring_function]
count = len(function_subset)
for first_n_permutations in range(2, count + 1):
considered_permutations = function_subset.head(first_n_permutations)
measurements = DataFrame(compare_against_permutations_group(
result_function_subset, considered_permutations,
minimized_columns, maximized_columns
))
measurements['n_permutations'] = first_n_permutations
measurements['scoring_function'] = scoring_function
data.append(measurements)
joined = concat(data)
joined['subtype'] = subtype
return joined
def compare_observations_with_subtype_permutations(
subtypes_results: Dict[Group, DataFrame],
permutations: DataFrame,
ranked_categories={'indications', 'contraindications', 'controls'},
check_functions=True
):
permutations_grouped_by_corresponding_cluster = permutations.groupby('subtype')
data = []
for subtype, permutations in tqdm(permutations_grouped_by_corresponding_cluster):
result = subtypes_results[subtype]
function_results = compare_observations_with_permutations(
result, permutations,
ranked_categories, check_functions
)
for function_result in function_results:
function_result['subtype'] = subtype
data.append(function_result)
df = | concat(data) | pandas.concat |
import os
import logging
import inspect
import time
import pandas as pd
import numpy as np
from talpa.visualization import *
from sklearn.model_selection import StratifiedShuffleSplit
from talpa.classifiers import *
from talpa.metrics import *
from sklearn.preprocessing import StandardScaler
from talpa.core.data_checks import check_data, is_numeric
logfile_dir =os.path.join(os.getcwd(), "logs")
logfile = os.path.join(logfile_dir, 'Logs.log')
logging.basicConfig(filename=logfile ,level=logging.INFO, format='%(asctime)s %(name)s %(levelname)-8s %(message)s',datefmt='%Y-%m-%d %H:%M:%S', filemode='w')
class DatasetReader():
def __init__(self, dataset_folder="",filename=" ", **kwargs):
"""
The generic dataset parser for parsing datasets for solving different learning problems.
Parameters
----------
dataset_folder: string
Name of the folder containing the datasets
kwargs:
Keyword arguments for the dataset parser
"""
#self.dr_logger =logging.basicConfig(level=logging.INFO)
self.dr_logger = logging.getLogger(DatasetReader.__name__)
self.accuracy = []
self.f1scr =[]
if dataset_folder is not None:
self.dirname = os.path.join(os.getcwd(), dataset_folder)
self.dr_logger.info("Dataset Folder path {}".format(self.dirname))
if not os.path.exists(self.dirname):
self.dr_logger.info("Path given for dataset does not exist {}".format(self.dirname))
self.dirname = None
else:
self.filename = os.path.join(self.dirname, filename)
self.dr_logger.info("Dataset Filepath {}".format(self.filename))
def fit_predict(self, model, X_train, y_train, X_test, y_test, classifier_name):
'''
:param model: Classifier to be fitted
:param X_train: Dataframe of shape (n_samples, n_features)
:param y_train: Dataframe of shape (n_samples, 1)
:param X_test: Dataframe of shape (n_samples, n_features)
:param y_test: Dataframe of shape (n_samples, 1)
:param classifier_name: Name of the classifier model
:return: Numpy array of accuracy and f1score
'''
model.fit(X_train, y_train)
y_pred = model.predict(X_test)
acc = accuracy_score(y_test, y_pred)
f1score = f1_measure(y_test, y_pred)
self.dr_logger.info("Accuracy and F1score in each split for {} {}, {}:".format(classifier_name, acc, f1score))
return acc, f1score
def execute_model(self, modelname, df):
'''
This function will execute the model based on the model name provided and the dataframe of the datasets
:param modelname: Classifier name that needs to be run
:param df: Dataframe of the datasets
:return:
'''
X = df.iloc[:, :-1]
y = df.iloc[:, -1]
if modelname == "LogisticRegression" or modelname == "KNN":
self.dr_logger.info("Performing feature scaling before executing {}".format (modelname))
scaler = StandardScaler()
scaler.fit(df.drop('activity', axis=1))
scaled_features = scaler.transform(df.drop('activity', axis=1))
df_feat = pd.DataFrame(scaled_features, columns=df.columns[:-1], index=df.index)
X = df_feat
y = df.iloc[:, -1]
sss = StratifiedShuffleSplit(n_splits=5, test_size=0.2, random_state=0)
for train_index, test_index in sss.split(X, y):
X_train, X_test = X.iloc[train_index], X.iloc[test_index]
y_train, y_test = y.iloc[train_index], y.iloc[test_index]
if modelname == 'GradientBoost':
clf = GradientBoostDetector(max_depth=5, n_estimators=100, random_state=0)
acc, f1 = self.fit_predict(clf, X_train, y_train, X_test, y_test, modelname)
self.accuracy.append(acc)
self.f1scr.append(f1)
elif modelname == 'RandomForest':
rf = RandomForestDetector(max_depth=7, n_estimators=50, random_state=0)
acc, f1 =self.fit_predict(rf, X_train, y_train, X_test, y_test, modelname)
self.accuracy.append(acc)
self.f1scr.append(f1)
elif modelname == 'LogisticRegression':
logreg = LogisticRegressionDetector(random_state=0)
acc, f1 = self.fit_predict(logreg, X_train, y_train, X_test, y_test, modelname)
self.accuracy.append(acc)
self.f1scr.append(f1)
elif modelname == 'KNN':
knn = KNeighborsDetector(n_neighbors=3)
acc, f1 =self.fit_predict(knn, X_train, y_train, X_test, y_test, modelname)
self.accuracy.append(acc)
self.f1scr.append(f1)
elif modelname == 'XGBoost':
# conda install -c anaconda py-xgboost
xgb = XGBClassification()
acc, f1 =self.fit_predict(xgb, X_train, y_train, X_test, y_test, modelname)
self.accuracy.append(acc)
self.f1scr.append(f1)
acc_mean = np.array(self.accuracy).mean()
f1_mean = np.array(self.f1scr).mean()
df_metrics = | pd.DataFrame({'Accuracy':[acc_mean] , 'F1score': [f1_mean]}) | pandas.DataFrame |
#!/usr/bin/python3
import pandas as pd
import numpy as np
import mhyp_enrich as mh
import pdb
import time
import math
import statsmodels.stats.multitest as mt
import random
from scipy import stats as st
from scipy.stats import beta
def main():
num_MC_samp = 1000000 # Number of Monte-Carlo samples to use
alt = 'two-sided'
random.seed(525601)
if 1:
# Create Pickle for fast loading of the data
tfoe_FC_df = pd.read_excel('Downloads/tfoe.searchable_130115.xlsx',sheetname='TFOE.data', header=9, skip_footer = 3, index_col = 0, parse_cols = list(range(0,210)))
tfoe_FC_df.to_pickle('Analysis_Output/tfoe_FC.pkl')
tfoe_pval_df = pd.read_excel('Downloads/tfoe.searchable_130115.xlsx',sheetname='TFOE.data', header=9, skip_footer = 3, index_col = 0, parse_cols = list(range(210,420)))
tfoe_pval_df.to_pickle('Analysis_Output/tfoe_pval.pkl')
else:
# Load Pickles (much faster than reading excel files)
tfoe_FC_df = pd.read_pickle('Analysis_Output/tfoe_FC.pkl')
tfoe_pval_df = pd.read_pickle('Analysis_Output/tfoe_pval.pkl')
# Remove TFs (from both dfs) with less than 0.5 l2FC up.
to_keep = [tfoe_FC_df.loc[name,name] > 0.5 for name in list(tfoe_FC_df.columns.values)]
tfoe_FC_df = tfoe_FC_df.loc[:, to_keep]
tfoe_pval_df = tfoe_pval_df.loc[:, to_keep]
# Create new df with 1 = UP, -1 = DOWN, 0 = NOCALL for each TF
col_up_down_ls = list()
for i,c in enumerate(tfoe_FC_df.columns.values):
new_col = pd.DataFrame({'Rv': tfoe_FC_df.index, c: 0}).set_index('Rv')
new_col[((tfoe_pval_df[c] < .01) & (tfoe_FC_df[c] > 1.0))] = 1 #called upregulated
new_col[((tfoe_pval_df[c] < .01) & (tfoe_FC_df[c] < -1.0))] = -1 #called downregulated
col_up_down_ls.append(new_col)
tfoe_call_df = pd.concat(col_up_down_ls,axis=1)
# Read in RNA-seq data to get NCBI Descriptions
hyp_rnaseq = pd.read_csv("Analysis_Output/7H9vshyp_low-read-rm.csv").rename(columns={"Rv.Homologs..NCBI.":"Rv#","Annotations..NCBI.":"Description"})
ncbi_desc = hyp_rnaseq[["Rv#","Description"]]
# Read in and format Voskuil Hypoxia data
hyp_rna_arr = pd.read_excel('Downloads/1-s2.0-S147297920400023X-mmc1.xls',sheetname='Sheet1', header=3, skip_footer = 0, parse_cols = [0,63])
hyp_rna_arr['Ave.'] = pd.to_numeric(hyp_rna_arr['Ave.'], errors='coerce')
hyp_rna_arr = hyp_rna_arr.dropna(how = 'any',axis=0) #Remove genes where data is missing.
def RV_to_Rv(x):
# Converts the format of the Rv numbers so that merge will work.
x = x[0] + x[1].lower() + x[2:]
x = x[0:-1] + x[-1].lower()
return x
hyp_rna_arr['Rv#'] = hyp_rna_arr['Rv#'].apply(RV_to_Rv)
hyp_rna_arr['log2FC_hyp'] = hyp_rna_arr['Ave.'].apply(lambda x: math.log2(x))
hyp_rna_arr = hyp_rna_arr.merge(ncbi_desc,how='left',on='Rv#')
# Read in a format Betts PBS data
pbs_rna_arr_up = pd.read_excel('Downloads/MMI_2779_sm_sup.xlsx',sheetname='RESUP',header=0, skip_footer = 0, parse_cols = [0,1,3,6])
pbs_rna_arr_down = pd.read_excel('Downloads/MMI_2779_sm_sup.xlsx',sheetname='RESDOWN',header=0, skip_footer = 0, parse_cols = [0,1,3,6])
pbs_rna_arr = pd.concat([pbs_rna_arr_up,pbs_rna_arr_down])
pbs_rna_arr = pbs_rna_arr[pbs_rna_arr['Time'] == 't3'].drop(['Time'],axis=1)
pbs_rna_arr = pbs_rna_arr.rename(columns = {'Gene':'Rv#', 'P-value':'pval', 'Log ratio':'log2FC_pbs'})
pbs_rna_arr['log2FC_pbs'] = pbs_rna_arr['log2FC_pbs'].apply(lambda x: x*(math.log(10,2))) #Convert to base 2.
pbs_rna_arr['pval'].loc[(pbs_rna_arr['pval'] == '<.000001')] = '0.000001' # This line produces a warning but appears to work as expected.
pbs_rna_arr['pval'] = pd.to_numeric(pbs_rna_arr['pval'])
pbs_rna_arr = pbs_rna_arr.merge(ncbi_desc,how='left',on='Rv#')
# Call each gene from microarray data as UP = 1, DOWN = -1, NOCALL = 0.
hyp_rna_arr['rna_arr_data'] = 0
hyp_rna_arr['rna_arr_data'].loc[(hyp_rna_arr['Ave.'] > 1.6)] = 1 #upregulated
hyp_rna_arr['rna_arr_data'].loc[(hyp_rna_arr['Ave.'] < 1/1.6)] = -1 #downregulated
hyp_rna_arr = hyp_rna_arr.set_index('Rv#')[['rna_arr_data','log2FC_hyp','Description']]
pbs_rna_arr['rna_arr_data'] = 0
pbs_rna_arr['rna_arr_data'].loc[(pbs_rna_arr['log2FC_pbs'] > 1) & (pbs_rna_arr['pval'] < .001)] = 1 #upregulated
pbs_rna_arr['rna_arr_data'].loc[(pbs_rna_arr['log2FC_pbs'] < -1) & (pbs_rna_arr['pval'] < .001)] = -1 #downregulated
pbs_rna_arr = pbs_rna_arr.set_index('Rv#')[['rna_arr_data','log2FC_pbs','Description']]
both_rna_arr = hyp_rna_arr.merge(pbs_rna_arr.drop(['Description'],axis=1),how='outer',left_index=True,right_index=True) #Note: This puts nans for any gene not appearing in both datasets. Betts only included ~3000 genes in the published dataset. The reason for the missing genes is unknown - it could be that they failed QC.
both_rna_arr['rna_arr_data'] = 0
both_rna_arr.loc[(both_rna_arr['rna_arr_data_x'] > 0) & (both_rna_arr['rna_arr_data_y'] > 0), 'rna_arr_data'] = 1
both_rna_arr.loc[(both_rna_arr['rna_arr_data_x'] < 0) & (both_rna_arr['rna_arr_data_y'] < 0), 'rna_arr_data'] = -1
both_rna_arr = both_rna_arr[['rna_arr_data','log2FC_hyp','log2FC_pbs','Description']]
# scores_df,cont_tables_ls = mh.find_enriched_regs(tfoe_call_df,both_rna_arr,num_MC_samp,alt)
scores_hyp_df,cont_hyp_ls = mh.find_enriched_regs(tfoe_call_df,hyp_rna_arr,num_MC_samp,alt)
scores_pbs_df,cont_pbs_ls = mh.find_enriched_regs(tfoe_call_df,pbs_rna_arr,num_MC_samp,alt)
if 1:
#Write individual tf scores (and p-values) to file
# with open('Analysis_Output/lit_tf_scores'+'_'+str(num_MC_samp)+'_'+alt+'_hyp+pbs.csv', 'w') as fp:
# scores_df[['Pvalue','mu-score','FET Pvalue','BY corrected Pvalue','log2FC_hyp','log2FC_pbs','Description']].to_csv(fp)
#For hyp and pbs individually:
with open('Analysis_Output/lit_tf_scores'+'_'+str(num_MC_samp)+'_'+alt+'_hyp.csv', 'w') as fp:
scores_hyp_df[['Pvalue','mu-score','FET Pvalue','BY corrected Pvalue','log2FC_hyp','Description']].to_csv(fp)
with open('Analysis_Output/lit_tf_scores'+'_'+str(num_MC_samp)+'_'+alt+'_pbs.csv', 'w') as fp:
scores_pbs_df[['Pvalue','mu-score','FET Pvalue','BY corrected Pvalue','log2FC_pbs','Description']].to_csv(fp)
if 1:
#Write confusion matrices for TFs out to file
# writer = pd.ExcelWriter('Analysis_Output/lit_confusion_matrices_tf_hyp+pbs.xlsx')
# for x in cont_tables_ls:
# if isinstance(x[0],pd.DataFrame):
# x[0].to_excel(writer, sheet_name=x[1])
# writer.save()
# Write out confusion matrices for hyp, pbs individually.
writer = pd.ExcelWriter('Analysis_Output/lit_confusion_matrices_tf_hyp_only.xlsx')
for x in cont_hyp_ls:
if isinstance(x[0],pd.DataFrame):
x[0].to_excel(writer, sheet_name=x[1])
writer.save()
writer = | pd.ExcelWriter('Analysis_Output/lit_confusion_matrices_tf_pbs_only.xlsx') | pandas.ExcelWriter |
import numpy as np
from numpy.testing import assert_allclose
import pandas as pd
import pytest
import quantopy as qp
@pytest.fixture(autouse=True)
def random():
np.random.seed(0)
class TestReturnSeries:
def test_from_price(self):
expected = [0.0625, 0.058824]
rs = qp.ReturnSeries.from_price([80, 85, 90])
assert type(rs) is qp.ReturnSeries
assert_allclose(
qp.ReturnSeries.from_price([80, 85, 90]),
expected,
rtol=1e-1,
)
assert_allclose(
qp.ReturnSeries.from_price(np.array([80, 85, 90])),
expected,
rtol=1e-1,
)
assert_allclose(
qp.ReturnSeries.from_price(pd.Series([80, 85, 90])),
expected,
rtol=1e-1,
)
assert_allclose(
qp.ReturnSeries.from_price([80]),
[],
rtol=1e-1,
)
assert_allclose(
qp.ReturnSeries.from_price([]),
[],
rtol=1e-1,
)
def test_cumulated(self) -> None:
val = qp.ReturnSeries([0.062500, 0.058824]).cumulated()
assert type(val) is qp.ReturnSeries
assert_allclose(
val,
[1.0625, 1.1250],
rtol=1e-1,
)
assert_allclose(
qp.ReturnSeries([0.500000, 0.333333]).cumulated(),
[1.5, 2.0],
rtol=1e-1,
)
assert_allclose(
qp.ReturnSeries([0.5]).cumulated(),
[1.5],
rtol=1e-1,
)
assert_allclose(
qp.ReturnSeries([], dtype="float64").cumulated(),
[],
rtol=1e-1,
)
def test_mean(self) -> None:
arithmetic_mean = qp.ReturnSeries([0.3, 0.25, 0.09, 0.1, 0.23]).mean()
assert type(arithmetic_mean) is np.float64
assert_allclose(
arithmetic_mean,
0.194,
rtol=1e-1,
)
def test_gmean(self):
rs = qp.ReturnSeries([0.9, 0.1, 0.2, 0.3, -0.9])
assert_allclose(rs.gmean(), -0.200802, rtol=1e-5)
assert type(rs.gmean()) is np.float64
def test_manipulations(self):
rs = qp.ReturnSeries([1, 2, 3])
assert type(rs) is qp.ReturnSeries
to_framed = rs.to_frame()
assert type(to_framed) is qp.ReturnDataFrame
sliced1 = rs[:2]
assert type(sliced1) is qp.ReturnSeries
def test_sharpe_ratio(self) -> None:
# Data from https://en.wikipedia.org/wiki/Sharpe_ratio
mu = (1 + 0.25) ** (1 / 12) - 1 # monthly
sigma = (1 + 0.1) ** (1 / 12) - 1 # monthly
riskfree_rate = 0.1 # yearly
rs = qp.random.generator.returns(mu, sigma, 4000)
periodicity = qp.stats.period.MONTHLY
rs_sharpe_ratio = rs.sharpe_ratio(riskfree_rate, periodicity)
expected = (rs.annualized(periodicity) - riskfree_rate) / rs.effect_vol(
periodicity
)
assert_allclose(rs_sharpe_ratio, expected, rtol=1e-2)
assert type(rs_sharpe_ratio) is np.float64
def test_return_series(self) -> None:
rs = qp.random.generator.returns(0.01, 0.1, 100)
rs_drawdown = rs.drawdown()
assert type(rs_drawdown) is qp.ReturnSeries
# Compute expected value
wealth_index = (rs + 1).cumprod() # type: ignore
previous_peaks = wealth_index.cummax()
expected = (wealth_index - previous_peaks) / previous_peaks
assert_allclose(rs_drawdown, expected, rtol=1e-2)
def test_annualized(self):
mu = 0.03 # mean
sigma = 0.01 # standard deviation
rs = qp.random.generator.returns(mu, sigma, 1000)
expected = (mu + 1) ** 12 - 1
effect = rs.annualized(qp.stats.period.MONTHLY)
assert type(effect) is np.float64
assert_allclose(
effect,
expected,
rtol=1e-1,
)
def test_effect_vol(self):
mu = 0.01 # mean
sigma = 0.01 # standard deviation
rs = qp.random.generator.returns(mu, sigma, 1000)
expected = sigma * np.sqrt(252)
effect = rs.effect_vol(qp.stats.period.DAILY)
assert type(effect) is np.float64
assert_allclose(
effect,
expected,
rtol=1e-1,
)
def test_total_return(self):
ps = | pd.Series([8.7, 8.91, 8.71, 8.43, 8.73]) | pandas.Series |
#!/usr/bin/env python
# encoding:utf-8
'''sklearn doc
'''
import re
import os
import sys
import numpy as np
import pandas as pd
from time import time
from sklearn.model_selection import GridSearchCV, cross_val_predict
# RandomizedSearchCV cross_val_score train_test_split
from skfeature.function.information_theoretical_based import MRMR
from imblearn.over_sampling import SMOTE
# from sklearn.feature_selection import SelectKBest, mutual_info_classif, mutual_info_regression,f_classif
# from sklearn.decomposition import PCA
from sklearn.pipeline import Pipeline
from joblib import Memory, dump, load
from sklearn import metrics
from pycm import * #swiss-army knife of confusion matrice
from collections import Counter
# from sklearn.base import BaseEstimator,TransformerMixin
# from imblearn.metrics import classification_report_imbalanced
import utils
import matplotlib.pyplot as plt
import matplotlib
matplotlib.use('agg') #UserWarning:
from plotnine import * #ggplot
#Global variables
mem = Memory("./mycache") #A context object for caching a function's return value each time it is called with the same input arguments.
import itertools
# COLORS = 'bgrcmyk' #blue green red itertools.cycle(cmap.colors))
# cmaps['Qualitative'] = ['Pastel1', 'Pastel2', 'Paired', 'Accent',
# 'Dark2', 'Set1', 'Set2', 'Set3',
# 'tab10', 'tab20', 'tab20b', 'tab20c']
cmap = plt.get_cmap('Paired')
COLORS = cmap.colors
from sklearn_pipeline_config import * #SCALERS, Tree_based_CLASSIFIERS, Other_CLASSIFIERS RANDOM_STATE
All_CLASSIFIERS = Tree_based_CLASSIFIERS + Other_CLASSIFIERS
######################## pipeline functions ###################
def plot_tsne(df, Y=None, targets=None, filename='decomposition'):
"""to be fihished
method= ['tsne', 'pca', 'tsvd']
t-SNE has a cost function that is not convex, i.e. with different initializations we can get different results
PCA for dense data or
TruncatedSVD for sparse data
ไฝTSVD็ดๆฅไฝฟ็จscipy.sparse็ฉ้ต๏ผไธ้่ฆdensifyๆไฝ๏ผๆไปฅๆจ่ไฝฟ็จTSVD่ไธๆฏPCA
"""
from sklearn.manifold import TSNE
from sklearn.decomposition import PCA, TruncatedSVD
n_components = min(df.shape) if min(df.shape) <10 else 10
X = TSNE(random_state=RANDOM_STATE, learning_rate=100, n_components=2).fit_transform(df)
pd.DataFrame(X).to_csv(filename + ".tSNE.csv")
fig = plt.figure(figsize=(10, 6))
for c, i, target_name in zip('rgb', np.unique(Y), targets):
plt.scatter(X[Y==i, 0], X[Y==i, 1], c=c, label=target_name)
plt.xlabel('tSNE-1')
plt.ylabel('tSNE-2')
plt.title('tSNE')
plt.legend()
fig.savefig(filename + ".tSNE.svg")
#pca
pca = PCA(random_state=RANDOM_STATE, n_components=n_components)
pca.fit(df)
X = pca.transform(df)
pd.DataFrame(X).to_csv(filename + ".pca.csv")
fig = plt.figure(figsize=(10, 6))
for c, i, target_name in zip('rgb', np.unique(Y), targets):
plt.scatter(X[Y==i, 0], X[Y==i, 1], c=c, label=target_name)
p1,p2=pca.explained_variance_ratio_[:2]
plt.xlabel('PCA-1 explained variance ratio: ' + '{:.2f}%'.format(p1))
plt.ylabel('PCA-2 explained variance ratio: ' + '{:.2f}%'.format(p2))
plt.title('PCA')
plt.legend()
# print("singular_values: ", pca.singular_values_)
fig.savefig(filename + ".pca.svg")
#tSVD
tsvd=TruncatedSVD(random_state=RANDOM_STATE, n_components=n_components)
tsvd.fit(df)
X = tsvd.transform(df)
pd.DataFrame(X).to_csv(filename + ".tSVD.csv")
fig = plt.figure(figsize=(10, 6))
for c, i, target_name in zip('rgb', np.unique(Y), targets):
plt.scatter(X[Y==i, 0], X[Y==i, 1], c=c, label=target_name)
p1,p2=tsvd.explained_variance_ratio_[:2]
plt.xlabel('tSVD-1 explained variance ratio: ' + '{:.2f}%'.format(p1))
plt.ylabel('tSVD-2 explained variance ratio: ' + '{:.2f}%'.format(p1))
plt.title('tSVD')
plt.legend()
fig.savefig(filename + ".tSVD.svg")
@mem.cache
def get_data(X_file, y_file):
"""features matrix and metadata group.mf with header and index_col,transform to relative abundance matrix"""
if X_file.endswith("csv"):
X = pd.read_csv(X_file, index_col=0, header=0) # rows =samples ,columns=genes(features)
else:
X = pd.read_csv(X_file, index_col=0, header=0,sep="\t")
if y_file.endswith("csv"):
y = | pd.read_csv(y_file, index_col=0, header=0) | pandas.read_csv |
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
READ IN:
1) <NAME> Data "../../../AKJ_Replication/Replication/data/data_replication.csv"
2) Alternative data "../output/alternativedata.csv"
EXPORT:
"../output/alternativedata.csv"
@author: olivergiesecke
"""
import pandas as pd
import numpy as np
import os
import re
ref_df = pd.read_csv("../output/alternativedata.csv")
ref_df["start_date"] = | pd.to_datetime(ref_df["start_date"]) | pandas.to_datetime |
__author__ = "saeedamen" # <NAME>
#
# Copyright 2016 Cuemacro
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may not
# use this file except in compliance with the License. You may obtain a copy of
# the License at http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on a "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
#
# See the License for the specific language governing permissions and
# limitations under the License.
#
import pytest
import pandas as pd
from findatapy.market.ioengine import IOEngine
from findatapy.util.dataconstants import DataConstants
data_constants = DataConstants()
redis_server = data_constants.db_cache_server
redis_port = data_constants.db_cache_port
def test_redis_caching():
# Note: you need to install Redis in order for this to work!
# read CSV from disk, and make sure to parse dates
df = pd.read_csv("S&P500.csv", parse_dates=['Date'], index_col=['Date'])
df.index = | pd.to_datetime(df.index) | pandas.to_datetime |
# coding:utf-8
import sys
import numpy as np
import pandas as pd
import pendulum
import pyltr
from flasgger import Swagger
from flask import Flask, jsonify, render_template, request
from pymongo import MongoClient, DESCENDING
from sklearn.externals import joblib
import config
app = Flask(__name__)
swagger = Swagger(app)
client = MongoClient('mongodb://%s:%s@%s' % (config.mongo_username,
config.mongo_password,
config.mongo_host))
SCORE_DB = client['soma_2018']
score_history_db = SCORE_DB['score_history']
eval_qid_gold_list_dict = joblib.load(config.eval_gold_file_path)
test160_qid_gold_list_dict = joblib.load(config.test160_gold_file_path)
test500_qid_gold_list_dict = joblib.load(config.test500_gold_file_path)
qid_gold_list_dict = {'eval': eval_qid_gold_list_dict,
'test160': test160_qid_gold_list_dict,
'test500': test500_qid_gold_list_dict}
def add_score_data(name, nickname, score, email, mode, ip, day):
'''
์ ์ ๊ธฐ๋ก์ score_history ์ score ์ ์ถ๊ฐํ๋ค
๋จ score ์ ์ถ๊ฐํ ๋์๋ ์ ์๊ฐ ๋ ์ข์๋๋ง ์ถ๊ฐํ๋ค.
@param name: ์ค๋ช
@param nickname: ์ ์ํ ํ์์ฉ ์ด๋ฆ
@param score: ๊ฒ์ ์ฑ๋ฅ ์ ์
@param email: email
@return:
'''
score_history_db.insert_many([{'nickname': nickname, 'name': name, 'score': score, 'date': pendulum.now(),
'email': email, 'mode': mode, 'ip': ip, 'day': day}])
prev_score_info = SCORE_DB[mode + "_score"].find_one({'nickname': nickname})
if prev_score_info:
if prev_score_info['score'] > score:
'''
์๋ก ์
๋ฐ์ดํธ ํ๋ ๊ฒ์ ์ ์๊ฐ ์ด์ ์ต๊ณ ๊ธฐ๋ก๋ณด๋ค ์ ์๊ฐ ๋ฎ์ ๊ฒฝ์ฐ์ด๋ค. ์ด๋๋ ์ ์๋ฅผ ์
๋ฐ์ดํธ ํ์ง ์๋๋ค.
'''
return False
SCORE_DB[mode + "_score"].update_one({'nickname': nickname},
{'$set': {'name': name, 'nickname': nickname, 'score': score,
'date': pendulum.now(), 'email': email}}, upsert=True)
return True
@app.route("/", methods=['POST'])
def evaluation():
'''
pred_result ์ ๋ํ ndcg ์ฑ๋ฅ ํ๊ฐ ๊ฒฐ๊ณผ๋ฅผ ๋ฐํํ๋ค.
ํ๊ฐ ์๋ฒ์ ์์ฒญํ ๋์๋ ์๋์ ๊ฐ์ด ์์ฒญ ๊ฐ๋ฅํ๋ค.
mode ๋ eval, test160, test500 3๊ฐ์ง๊ฐ ๊ฐ๋ฅํ๋ค. eval ๋ ํ๋ผ๋ฏธํฐ ์ต์ ํ ํ ๋ ์ฌ์ฉํ๊ณ ์ฌ๊ธฐ์ ์ต์ ํ๋ ํ๋ผ๋ฏธํฐ๋ก test160/test500 ์ ์ต์ข
์ฑ๋ฅ์ ํ๊ฐํ๋ค.
test160 ๋ ํ ip ๋น ์ผ 1ํ ์์ฒญ ์ ํ์ด ์๋ค.
r = requests.post('http://eval.buzzni.net:31000', json={"pred_result": system_result_dict})
print r.json()
name = '<NAME>'
nickname = 'gil-dong'
email = 'email'
mode = 'test160'
r = requests.post('http://eval.buzzni.net:31000', json={"pred_result": system_result_dict,'name':name, 'nickname':nickname, 'mode':mode,'email':email})
print (r.json())
system_result_dict ์๋ query_id ๋ณ๋ก ํด๋น query image ์ ๊ฐ์ฅ ๋น์ทํ 20๊ฐ์ ์ด๋ฏธ์ง id ๋ฆฌ์คํธ๋ฅผ ๊ฐ์ง๊ณ ์๋ค.
์ด๋ฏธ์ง id ์ ์์๋ ๋ ๋น์ทํ ์์๋๋ก ๋ค์ด ์์ด์ผ ํ๋ค.
{'1': ['57aaa9e3efd3e84d8d906cd95c6da6a9.jpg',
'd6c86e7ae73003d8c848baa0a95720f9.jpg' .. ]
'10': ['2b2ec56ebd933b0dbb3818d17244519b.jpg',
'9f27802f1a4dba04774ab9bc252bd122.jpg'..] .. }
---
parameters:
- name: pred_result
in: prediction
type: query
required: true
- name: name
in: query
type: string
required: true
- name: nickname
in: query
type: string
required: true
- name: email
in: query
type: string
required: true
- name: mode
in: query
type: string
required: true
responses:
200:
description: prediction score
examples:
{score: 0.2}
'''
system_result_dict = request.get_json()['pred_result']
name = request.get_json().get('name', '')
nickname = request.get_json().get('nickname', '')
email = request.get_json().get('email', '')
mode = request.get_json().get('mode', 'eval')
ip = request.remote_addr
now_day = pendulum.now().format('%Y%m%d')
if not name:
return jsonify({'msg': 'name parameter required! - http://eval.buzzni.net:31000/apidocs/#/default/post_'})
if not nickname:
return jsonify({'msg': 'nickname parameter required! - http://eval.buzzni.net:31000/apidocs/#/default/post_'})
if mode not in ['eval', 'test160', 'test500']:
return jsonify({'msg': 'mode value (eval or test) parameter required! - http://eval.buzzni.net:31000/apidocs/#/default/post_'})
if not email:
return jsonify({'msg': 'email parameter required! - http://eval.buzzni.net:31000/apidocs/#/default/post_'})
mode_size_dict = {'eval': 160, 'test160': 160, 'test500': 495}
if len(system_result_dict.keys()) != mode_size_dict[mode]:
return jsonify({'msg': 'expected result num is 160, but current result num is %d' % len(system_result_dict.keys())})
if mode in ['test160']:
prev_history = score_history_db.find_one({'mode': mode, 'ip': ip, 'day': now_day})
if prev_history:
return jsonify({'msg': 'you can submit only one result in a day'})
search_gold_y_list = []
search_system_y_list = []
search_qid_list = []
for qid in qid_gold_list_dict[mode].keys():
system_key_score_dict = {}
for idx, k in enumerate(system_result_dict[qid]):
'''
์๋์ ๊ฐ์ด 3๋ฑ ์ด๋ด์ผ๋ ๋ ๋์ ์ ์๋ฅผ ์ฃผ๋ ์ด์ ๋ ์์ ๊ฒ์ ๊ฒฐ๊ณผ์์ ๋งค์นญ ๋๋ ๊ฒฝ์ฐ์ ๋ ๋์ ์ ์๋ฅผ ๋ถ์ฌํ๊ณ ์ ํ๊ธฐ ๋๋ฌธ์ด๋ค
'''
if idx < 3:
system_key_score_dict[k] = 2
elif idx < len(qid_gold_list_dict[mode][qid]) / 2.0:
'''
query ์ ๋ฐ๋ผ์ gold ์ ํฌํจ๋๋ ์ด๋ฏธ์ง ๊ฐ์๊ฐ ๋ค ๊ฐ๋ณ์ ์ด๋ผ์, ์์ ๊ฐ์ด ํ์๋ค.
'''
system_key_score_dict[k] = 1.5
elif idx < len(qid_gold_list_dict[mode][qid]):
system_key_score_dict[k] = 1
key_gold_score_dict = {}
for key in qid_gold_list_dict[mode][qid]:
key_gold_score_dict[key] = 1
max_limit = np.min([len(qid_gold_list_dict[mode][qid]), 10])
# for key in list(system_key_score_dict.keys())[:len(qid_gold_list_dict[mode][qid])]:
for key in list(system_key_score_dict.keys())[:max_limit]:
search_qid_list.append(qid)
search_gold_y_list.append(key_gold_score_dict.get(key, 0))
search_system_y_list.append(system_key_score_dict.get(key))
# ํ๊ฐ ํ ๋๋ง๋ค ๋ง๋ค์ด์ค์ผํจ
metric = pyltr.metrics.NDCG(k=10)
score = metric.calc_mean(search_qid_list, np.asarray(search_gold_y_list), np.asarray(search_system_y_list))
if score > 1:
return jsonify({'msg': 'invalid score ' + str(score)})
print('score:', score)
result = {'score': score}
add_score_data(name=name, nickname=nickname, score=score, email=email, mode=mode, ip=ip, day=now_day)
return jsonify(result)
@app.route("/leader_board")
def show_leader_board():
filter_mode = request.args.get("mode", '')
score_data_list = []
titles = ['na']
for mode in ['test160', 'eval', 'test500']:
if filter_mode:
if mode != filter_mode:
continue
score_list = []
rank = 1
for each in SCORE_DB[mode + "_score"].find().sort([("score", DESCENDING)]).limit(300):
score_list.append({'rank': rank, 'name': each['nickname'], 'score': each['score']})
rank += 1
if not score_list:
continue
data = | pd.DataFrame(score_list) | pandas.DataFrame |
from typing import NoReturn, Tuple, Any, Union, Optional, List
from copy import deepcopy, copy
from warnings import warn
from darts import TimeSeries as DartsTimeSeries
import numpy as np
from pandas import DataFrame, date_range, infer_freq, Series, DatetimeIndex, \
Timestamp, Timedelta, concat
from timeatlas.abstract import (
AbstractBaseTimeSeries,
AbstractOutputText,
AbstractOutputPickle
)
from timeatlas.config.constants import (
COMPONENT_VALUES,
TIME_SERIES_FILENAME,
TIME_SERIES_EXT,
METADATA_FILENAME,
METADATA_EXT
)
from timeatlas.metadata import Metadata
from timeatlas.processors.scaler import Scaler
from timeatlas.plots.time_series import line_plot
from timeatlas.utils import ensure_dir, to_pickle
from timeatlas.time_series.component import Component
from timeatlas.time_series.component_handler import ComponentHandler
class TimeSeries(AbstractBaseTimeSeries, AbstractOutputText, AbstractOutputPickle):
"""
A TimeSeries object is a series of time indexed values.
"""
"""Defines a time series
A TimeSeries object is a series of time indexed values.
Attributes:
series: An optional Pandas DataFrame
metadata: An optional Dict storing metadata about this TimeSeries
"""
def __init__(self,
data: DataFrame = None,
handler: ComponentHandler = None):
"""Defines a time series
A TimeSeries object is a series of time indexed values.
Args:
data: DataFrame containing the values and labels
handler: ComponentHandler
"""
if data is not None:
# Perform preliminary checks
# --------------------------
assert isinstance(data, DataFrame), \
'data must be of a DataFrame.'
assert isinstance(data.index, DatetimeIndex), \
'Values must be indexed with a DatetimeIndex.'
assert len(data) >= 1, \
'Values must have at least one values.'
assert len(data.columns) >= 1, \
"DataFrame must have at least one column."
# Create the TimeSeries object
# ----------------------------
# Create the components handler
if handler is None:
self._handler = ComponentHandler()
for col in data.columns:
component = Component(col)
self._handler.append(component)
else:
self._handler = handler
# Rename the columns
data.columns = self._handler.get_columns()
# Store the data with certainty that values are sorted
self._data = data.sort_index()
# Add the freq if regular
if len(data) >= 3:
self._data.index.freq = infer_freq(self._data.index)
# Create instance variables
self.index = self._data.index # index accessor
self.values = self._data[
self._handler.get_columns().to_list()
]
else:
# Create empty structures
self._data = DataFrame()
self._handler = ComponentHandler()
def __repr__(self):
return self._data.__repr__()
def __len__(self):
return len(self._data)
def __iter__(self):
return (v for i, v in self._data.iterrows())
def __getitem__(self, item: Union[int, str, Timestamp,
slice,
List[int], List[str]]):
# ts[0] -> select rows
if isinstance(item, int):
new_handler = self._handler
new_data = self._data.iloc[[item]]
# ts["0_foo"] -> select columns
elif isinstance(item, str):
new_handler = self._handler[item]
new_data = self._data.loc[:, new_handler.get_columns()]
# ts[my_timestamp] -> select rows
elif isinstance(item, Timestamp):
new_handler = self._handler
new_data = self._data.loc[[item]]
elif isinstance(item, slice):
# ts[0:4] -> select rows
if isinstance(item.start, int) or isinstance(item.stop, int):
new_handler = self._handler
new_data = self._data.iloc[item]
# ts["2013":"2014"] -> select rows
elif isinstance(item.start, str) or isinstance(item.stop, str):
new_handler = self._handler
new_data = self._data.loc[item]
else:
raise KeyError(f"rows can't be sliced with type {type(item)}")
elif isinstance(item, list):
# ts[[0,3,5]] -> select columns
if all(isinstance(i, int) for i in item):
new_handler = self._handler[item]
new_data = self._data.iloc[:, item]
# ts[["a",... ,"b"]] -> select columns
elif all(isinstance(i, str) for i in item):
new_handler = self._handler[item]
new_data = self._data.loc[:, item]
else:
raise TypeError(f"TimeSeries can't be selected with list of "
f"type {type(item)}")
else:
raise TypeError(f"TimeSeries can't be selected with type "
f"{type(item)}")
return TimeSeries(new_data, new_handler)
# ==========================================================================
# Methods
# ==========================================================================
@staticmethod
def create(start: str, end: str,
freq: Union[str, 'TimeSeries'] = None) \
-> 'TimeSeries':
"""Creates an empty TimeSeries object with the period as index
Args:
start: str of the start of the DatetimeIndex (as in Pandas.date_range())
end: the end of the DatetimeIndex (as in Pandas.date_range())
freq: the optional frequency it can be a str or a TimeSeries (to copy its frequency)
Returns:
TimeSeries
"""
if freq is not None:
if isinstance(freq, TimeSeries):
freq = infer_freq(freq._data.index)
elif isinstance(freq, str):
freq = freq
data = DataFrame(columns=[COMPONENT_VALUES],
index=date_range(start, end, freq=freq))
return TimeSeries(data)
def stack(self, ts: 'TimeSeries'):
""" Stack two TimeSeries together
Create a unique TimeSeries from two TimeSeries so that the resulting
TimeSeries has the component(s) from self and ts.
Args:
ts: the TimeSeries to stack
Returns:
TimeSeries
"""
assert (self.index == ts.index).all(), \
"Indexes aren't the same"
new_data = concat([self._data, ts._data], axis=1)
new_components = self._handler.copy()
for c in ts._handler.components:
new_components.append(c)
return TimeSeries(new_data, new_components)
def drop(self, key: Union[int, str]) -> 'TimeSeries':
""" Drop a component of a TimeSeries by its index
Args:
key: int or str of the component to delete
Returns:
TimeSeries
"""
# Given the current state of the TimeSeries, get the name of the columns
# that will compose the new_data
all_cols = self._handler.get_columns().to_list()
if isinstance(key, int):
cols_to_remove = self._handler.get_column_by_id(key).to_list()
elif isinstance(key, str):
cols_to_remove = self._handler.get_column_by_name(key).to_list()
else:
raise TypeError(f"key must be int or str, not {type(key)}")
new_cols = self.__list_diff(all_cols, cols_to_remove)
# select only the leftover data from self.data
new_data = self._data.copy()
new_data = new_data[new_cols]
# drop the component to get rid off
new_handler = self._handler.copy()
del new_handler[key]
return TimeSeries(new_data, new_handler)
def plot(self, *args, **kwargs) -> Any:
"""Plot a TimeSeries
Returns:
plotly.graph_objects.Figure
"""
return line_plot(self, *args, **kwargs)
def copy(self, deep=False) -> 'TimeSeries':
"""Copy a TimeSeries
Copy the TSD to either a deep or shallow copy of itself
Args:
deep: if True, creates a deep copy else a shallow one
Returns: (deep) copy of TimeSeries
"""
return deepcopy(self) if deep else copy(self)
def split_at(self, timestamp: Union[str, Timestamp]) \
-> Tuple['TimeSeries', 'TimeSeries']:
"""Split a TimeSeries at a defined point and include the splitting point
in both as in [start,...,at] and [at,...,end].
Args:
timestamp: str or Timestamp where to the TimeSeries will be split
(e.g. "2019-12-31 00:00:00")
Returns:
a Tuple of TimeSeries ([start,...,at] and [at,...,end])
"""
start = self._data.index[0]
end = self._data.index[-1]
first_split = self._data[start:timestamp].copy()
second_split = self._data[timestamp:end].copy()
before = TimeSeries(first_split, self._handler)
after = TimeSeries(second_split, self._handler)
return before, after
def split_in_chunks(self, n: int) -> List['TimeSeries']:
"""Split a TimeSeries into chunks of length n
When the number of element in the TimeSeries is not a multiple of n, the
last chunk will have a length smaller than n.
Args:
n: length of the chunks
Returns:
List of TimeSeries
"""
ts_chunks = [TimeSeries(data=v, handler=self._handler) for n, v in
self._data.groupby(np.arange(len(self._data)) // n)]
return ts_chunks
def sliding(self, size: int, step: int = 1) -> List['TimeSeries']:
"""
Creates windows of the TimeSeries. If size > step the windows will be overlapping.
Args:
size: size of the window
step: step size between windows
Returns: List of TimeSeries
"""
if size < step:
warn(
f"Windows size ({size}) is bigger than step size ({step}). The resulting data will jump over some values.")
_rolling_data = [TimeSeries(v, handler=self._handler) for v in self._data.rolling(size) if len(v) == size]
return _rolling_data[::step]
def fill(self, value: Any) -> 'TimeSeries':
"""Fill a TimeSeries with values
Fill a TimeSeries with a value. If given a unique value, all values
will be broadcast. If given an array of the length of the TimeSeries,
it will replace all values.
Args:
value: Any values that you want to fill the TimeSeries with
Returns:
TimeSeries
"""
new_data = self.copy(deep=True)
new_data._data[:] = value
return new_data
def empty(self) -> 'TimeSeries':
"""Empty the TimeSeries (fill all values with NaNs)
Replace all values of a TimeSeries with NaNs
Returns:
TimeSeries
"""
return self.fill(np.nan)
def pad(self, limit: Union[int, str, Timestamp], side: Optional[str] = None, value: Any = np.NaN) -> 'TimeSeries':
"""Pad a TimeSeries until a given limit
Padding a TimeSeries on left or right sides.
Args:
limit: int, str or Pandas Timestamp
if int, it will pad the side given in the side arguments by n
elements.
side: Optional[str]
side to which the TimeSeries will be padded. This arg can have
two value: "before" and "after" depending where the padding is
needed.
This arg is needed only in case the limit is given in int.
value: Any values
Returns:
TimeSeries
"""
def create_pad(new_limit, ts, fill_val):
"""
Local utility function to create padding time series from a Pandas
Timestamp for a given TimeSeries
Args:
new_limit: Pandas Timestamp of the new limit to pad from/to
ts: TimeSeries to pad
fill_val: value to fill the TimeSeries with
Returns:
TimeSeries
"""
if new_limit < ts.start():
return ts.create(new_limit, ts.start(), freq=ts) \
.fill(fill_val)[:-1]
elif new_limit > ts.end():
return ts.create(ts.end(), new_limit, freq=ts) \
.fill(fill_val)[1:]
if new_limit == ts.start() or new_limit == ts.end():
return TimeSeries()
else:
raise ValueError("The given limit is included in the time "
"series, padding is impossible")
# Create padding TimeSeries from a given number of elements to pad with
if isinstance(limit, int):
# Add 1 to make the interval is too small
target_limit = limit + 1
if side == "before":
index = date_range(end=self.start(), freq=self.frequency(),
periods=target_limit, closed="left")
elif side == "after":
index = date_range(start=self.end(), freq=self.frequency(),
periods=target_limit, closed="right")
else:
raise ValueError("side argument isn't valid")
values = [value] * len(index)
df = DataFrame(index=index, data=values)
pad = TimeSeries(df)
# Create padding TimeSeries from time stamp as str
if isinstance(limit, str):
target_limit = | Timestamp(limit) | pandas.Timestamp |
#!/usr/bin/env python
"""get_map_grid_data.py: module is dedicated to fetch map2, mapex, grid2, grd, gridex data from files."""
__author__ = "<NAME>."
__copyright__ = "Copyright 2020, SuperDARN@VT"
__credits__ = []
__license__ = "MIT"
__version__ = "1.0."
__maintainer__ = "<NAME>."
__email__ = "<EMAIL>"
__status__ = "Research"
import os
import numpy as np
import scipy
import pandas as pd
import datetime as dt
import glob
import bz2
import gzip
import pydarn
import pydarnio
import configparser
import shutil
import xarray
from plotMapGrd import MapPlot
import multiprocessing as mp
from functools import partial
class FetchMap(object):
"""
Fetch map level data [map, mapex, cnvmap]
"""
def __init__(self, dates, hemi, file_type="map2",
_filestr="/sd-data/{year}/{file_type}/{hemi}/{date}.{hemi}.{file_type}.bz2",
radEarth=6371.0, lenFactor=500.):
self.dates = dates
self.hemi = hemi
self.file_type = file_type
self._filestr = _filestr
# set up some initial parameters
self.radEarth = radEarth
# This is used to change the length of the vector on the plot
self.lenFactor = lenFactor
self.radEarthMtrs = self.radEarth * 1000.0
self.records = None
return
def fetch_map_files(self):
"""
Read mapex and map2 files
"""
self.files = []
for d in self.dates:
f = self._filestr.format(year=d.year, hemi=self.hemi,file_type=self.file_type,
date=d.strftime("%Y%m%d"))
fs = glob.glob(f)
if len(fs) > 0: self.files.append(fs[0])
else: print(f" File not exists, {f}!")
return
def fetch_cnvmap_files(self):
"""
Read and copy cnvmaps
"""
if not os.path.exists("raw/"): os.system("mkdir raw/")
self.files = []
for d in self.dates:
f = self._filestr.format(year=d.year, hemi=self.hemi,file_type=self.file_type,
date=d.strftime("%Y%m%d"))
fs = glob.glob(f)
if len(fs) > 0:
f = fs[0]
shutil.copy(f, "raw/")
dest = "raw/" + f.split("/")[-1]
self.files.append(dest.replace(".bz2", ""))
os.system("bzip2 -d " + dest)
else: print(f" File not exists, {f}!")
return
def fetch_records(self):
if self.records == None:
self.records = []
for f in self.files:
if ("cnvmap" in f) or ("mapex" in f):
reader = pydarn.SuperDARNRead()
recs = reader.read_dmap(f)
else:
with bz2.open(f) as fp: ds = fp.read()
reader = pydarnio.SDarnRead(ds, True)
recs = reader.read_map()
self.records.extend(recs)
if self.file_type == "cnvmap": os.system("rm -rf raw/*")
return self.records
def get_grids(self, start, end, summary=[], records=[]):
"""
Fetch gridex, grid2 content
"""
print(" Fetch grid records.")
self.summ, self.reco = pd.DataFrame(), pd.DataFrame()
grids = self.fetch_records()
for r in grids:
stime = dt.datetime(r["start.year"], r["start.month"], r["start.day"], r["start.hour"],
r["start.minute"], int(r["start.second"]))
etime = dt.datetime(r["end.year"], r["end.month"], r["end.day"], r["end.hour"],
r["end.minute"], int(r["end.second"]))
o = pd.DataFrame(r, columns=summary)
o["stime"], o["etime"] = stime, etime
self.summ = pd.concat([self.summ, o])
if "vector.mlat" in r:
o = pd.DataFrame(r, columns=records)
o["stime"], o["etime"] = stime, etime
self.reco = pd.concat([self.reco, o])
self.summ = self.summ.reset_index().drop(columns=["index"])
self.reco = self.reco.reset_index().drop(columns=["index"])
self.summ = self.summ[(self.summ.stime>=start) & (self.summ.stime<=end)]
self.reco = self.reco[(self.reco.stime>=start) & (self.reco.stime<=end)]
return self.summ, self.reco
def get_maps(self, start, end, scalers=["pot.drop"], vectors=[]):
"""
Fetch mapex, map2 file content
"""
print(" Fetch map records.")
self.reco = pd.DataFrame()
records = self.fetch_records()
for r in records:
stime = dt.datetime(r["start.year"], r["start.month"], r["start.day"], r["start.hour"],
r["start.minute"], int(r["start.second"]))
etime = dt.datetime(r["end.year"], r["end.month"], r["end.day"], r["end.hour"],
r["end.minute"], int(r["end.second"]))
if len(vectors)>0: o = pd.DataFrame(r, columns=vectors)
else: o = pd.DataFrame()
L = 1 if len(o) == 0 else len(o)
o["stime"], o["etime"] = [stime]*L, [etime]*L
for p in scalers:
o[p] = [r[p]]*L
self.reco = | pd.concat([self.reco, o]) | pandas.concat |
# <NAME> - <EMAIL>
"""
Predict : Regression Methods(using scikit-learn package)
A prediction task for rental home company
Author: <NAME> - <EMAIL>
"""
import numpy as np
import matplotlib.pyplot as plt
import pandas as pd
from sklearn.metrics import fbeta_score, make_scorer
# Custom Loss function to evaluate the accuracy
def custom_loss_func_1(y_true, y_pred):
diff = np.abs(y_true - y_pred).mean()
return np.log1p(diff)
# Custom Loss function to evaluate the accuracy
def custom_loss_func_2(y_true, y_pred):
diff = np.median(np.abs(y_true - y_pred), axis=0)
return np.log1p(diff)
# Import dataset
data_train = pd.read_csv('Data Science.csv')
data_test = pd.read_csv('Data Science.csv')
# separate dependent and independent vars
# and building the test and train
X_tr = data_train.loc[:, ~data_train.columns.isin(['SaleDollarCnt','TransDate','ZoneCodeCounty']) ].values
Y_tr = data_train['SaleDollarCnt'].values
#X_ts for the final prediction
X_ts = data_test.loc[:, ~data_test.columns.isin(['SaleDollarCnt','TransDate','ZoneCodeCounty'])].values
Xtr_len = len(X_tr)
Xts_len = len(X_ts)
# missing data
from sklearn.preprocessing import Imputer
imputer = Imputer(missing_values = 'NaN', strategy = 'mean', axis = 0)
imputer = imputer.fit(X_tr[:, :])
X_tr[:, :] = imputer.transform(X_tr[:, :])
imputer = imputer.fit(X_ts[:, :])
X_ts[:, :] = imputer.transform(X_ts[:, :])
# Splitting Training set and Test set
from sklearn.model_selection import train_test_split
x_train, x_test, y_train, y_test = train_test_split(X_tr, Y_tr, test_size = 0.2, random_state = 0)
# Fit Linear Regression to x_train
from sklearn.linear_model import LinearRegression
lin_reg = LinearRegression()
lin_reg.fit(x_train, y_train)
# Predicting
y_pred = lin_reg.predict(x_test)
print('First Way: Linear Regression: ')
score = make_scorer(custom_loss_func_1, greater_is_better=False)
print('Loss function 1 output: ', custom_loss_func_1(y_pred, y_test))
print('score: ', score(lin_reg, x_test, y_test))
score = make_scorer(custom_loss_func_2, greater_is_better=False)
print('Loss function 2 output: ', custom_loss_func_2(y_pred, y_test))
print('score: ', score(lin_reg, x_test, y_test))
print('--------------------------------------------------------')
# Fit Decision Tree Regression to x_train
from sklearn.tree import DecisionTreeRegressor
regressor = DecisionTreeRegressor(random_state = 0)
regressor.fit(x_train, y_train)
# Predicting a new result
y_pred = regressor.predict(x_test)
print('Second Way: Decision Tree Regression: ')
score = make_scorer(custom_loss_func_1, greater_is_better=False)
print('Loss function 1 output: ', custom_loss_func_1(y_pred, y_test))
print('score: ', score(regressor, x_test, y_test))
score = make_scorer(custom_loss_func_2, greater_is_better=False)
print('Loss function 2 output: ', custom_loss_func_2(y_pred, y_test))
print('score: ', score(regressor, x_test, y_test))
print('--------------------------------------------------------')
# Fit Random Forest Regression to the x train
from sklearn.ensemble import RandomForestRegressor
regressor = RandomForestRegressor(n_estimators = 10, random_state = 0)
regressor.fit(x_train, y_train)
# Predicting
y_pred = regressor.predict(x_test)
print('Third Way: Random Forest Regression: ')
score = make_scorer(custom_loss_func_1, greater_is_better=False)
print('Loss function 1 output: ', custom_loss_func_1(y_pred, y_test))
print('score: ', score(regressor, x_test, y_test))
score = make_scorer(custom_loss_func_2, greater_is_better=False)
print('Loss function 2 output: ', custom_loss_func_2(y_pred, y_test))
print('score: ', score(regressor, x_test, y_test))
print('--------------------------------------------------------')
##### Final Prediction:
y_pred_final = regressor.predict(X_ts)
#CSV results
from pandas import DataFrame
column1 = 'PropertyID'
column2 = 'SaleDollarCnt'
dic = dict(zip([column1,column2],[data_test['PropertyID'].values,y_pred_final.tolist()]))
print(dic)
df = | DataFrame(dic) | pandas.DataFrame |
import pandas as pd
import numpy as np
import itertools
import warnings
import scipy.cluster.hierarchy as sch
from scipy.spatial import distance
from joblib import Parallel, delayed
__all__ = ['hcluster_tally',
'neighborhood_tally',
'running_neighborhood_tally',
'any_cluster_tally']
"""TODO:
* Incorporate running_neighbors into TCRdist, wrapping the standard metrics so they can work
easily.
* Verify that running_neighbor uses all CPUs and less memory: see how it could be further optimized
with joblib caching, expecially for the metrics that include the CDR2 and CDR1.5 etc.
"""
def _counts_to_cols(counts):
"""Encodes the counts Series as columns that can be added to a takky result row
Example counts table:
trait1 trait2 cmember
0 0 0 233
1 226
1 0 71
1 79
1 0 0 0
1 0
1 0 0
1 9"""
j = 0
cols = tuple(counts.index.names)
levels = []
for name, lev in zip(counts.index.names, counts.index.levels):
if len(lev) == 1:
"""This solves the problem of when a variable with one level is included
by accident or e.g. all instances are cmember = 1 (top node, big R)"""
if name == 'cmember':
levels.append(('MEM+', 'MEM-'))
elif isinstance(lev[0], int):
levels.append(tuple(sorted((0, lev[0]))))
else:
levels.append(tuple(sorted(('REF', lev[0]))))
else:
levels.append(tuple(lev))
levels = tuple(levels)
out = {'ct_columns':cols}
for xis in itertools.product(*(range(len(u)) for u in levels)):
vals = []
for ui, (col, u, xi) in enumerate(zip(counts.index.names, levels, xis)):
vals.append(u[xi])
try:
ct = counts.loc[tuple(vals)]
except (pd.core.indexing.IndexingError, KeyError):
ct = 0
out.update({'val_%d' % j:tuple(vals),
'ct_%d' % j:ct})
j += 1
return out
def _dict_to_nby2(d):
"""Takes the encoded columns of counts from a results row and re-creates the counts table"""
cols = d['ct_columns']
n = np.max([int(k.split('_')[1]) for k in d if 'val_' in k]) + 1
cts = [d['ct_%d' % j] for j in range(n)]
idx = pd.MultiIndex.from_tuples([d['val_%d' % j] for j in range(n)], names=cols)
counts = pd.Series(cts, index=idx)
return counts
def _prep_counts(cdf, xcols, ycol, count_col):
"""Returns a dict with keys that can be added to a result row to store tallies
For a 2x2 table the data is encoded as follows
X+MEM+ encodes the first level in Y (cluster membership = MEM+) and X
and out contains columns named val_j and ct_j where j is ravel order, such that
the values of a 2x2 table (a, b, c, d) are:
ct_0 X-MEM+ a First level of X and a cluster member ("M+" which sorts before "M-" so is also first level)
ct_1 X-MEM- b First level of X and a non member
ct_2 X+MEM+ c Second level of X and a cluster member
ct_3 X+MEM- d Second level of X and a non member
val_j also encodes explictly the values of the X levels and cluster membership indicator (MEM+ = member)
This means that an OR > 1 is enrichment of the SECOND level of X in the cluster.
Longer tables are stored in ravel order with ct_j/val_j pairs with val_j containing the values
of each column/variable.
Key "ct_columns" contains the xcols and ycol as a list
Ket levels contains the levels of xcols and ycol as lists from a pd.Series.MultiIndex"""
counts = cdf.groupby(xcols + [ycol], sort=True)[count_col].agg(np.sum)
out = _counts_to_cols(counts)
counts = _dict_to_nby2(out)
out['levels'] = [list(lev) for lev in counts.index.levels]
if len(xcols) == 1 and counts.shape[0] == 4:
"""For a 2x2 add helpful count and probability columns
Note that the first level of a column/variable is "negative"
because its index in levels is 0"""
n = counts.sum()
levels = counts.index.levels
tmp = {'X+MEM+':counts[(levels[0][1], 'MEM+')],
'X+MEM-':counts[(levels[0][1], 'MEM-')],
'X-MEM+':counts[(levels[0][0], 'MEM+')],
'X-MEM-':counts[(levels[0][0], 'MEM-')]}
with warnings.catch_warnings():
warnings.simplefilter('ignore')
tmp.update({'X_marg':(tmp['X+MEM+'] + tmp['X+MEM-']) / n,
'MEM_marg':(tmp['X+MEM+'] + tmp['X-MEM+']) / n,
'X|MEM+':tmp['X+MEM+'] / (tmp['X+MEM+'] + tmp['X-MEM+']),
'X|MEM-':tmp['X+MEM-'] / (tmp['X+MEM-'] + tmp['X-MEM-']),
'MEM|X+':tmp['X+MEM+'] / (tmp['X+MEM+'] + tmp['X+MEM-']),
'MEM|X-':tmp['X-MEM+'] / (tmp['X-MEM+'] + tmp['X-MEM-'])})
out.update(tmp)
return out
def neighborhood_tally(df_pop, pwmat, x_cols, df_centroids=None, count_col='count', knn_neighbors=50, knn_radius=None):
"""Forms a cluster around each row of df and tallies the number of instances with/without traits
in x_cols. The contingency table for each cluster/row of df can be used to test for enrichments of the traits
in x_cols with the distances between each row provided in pwmat. The neighborhood is defined by the K closest neighbors
using pairwise distances in pwmat, or defined by a distance radius.
For TCR analysis this can be used to test whether the TCRs in a neighborhood are associated with a certain trait or
phenotype. You can use hier_diff.cluster_association_test with the output of this function to test for
significnt enrichment.
Note on output: val_j/ct_j pairs provide the counts for each element of the n x 2 continency table where the last
dimension is always 'cmember' (MEM+ or MEM-) indicating cluster membership for each row. The X+MEM+ notation
is provided for convenience for 2x2 tables and X+ indicates the second level of x_col when sorted (e.g. 1 for [0, 1]).
Params
------
df_pop : pd.DataFrame [nclones x metadata]
Contains metadata for each clone in the population to be tallied.
pwmat : np.ndarray [df_centroids.shape[0] x df_pop.shape[0]]
Pairwise distance matrix for defining neighborhoods.
Number of rows in pwmat must match the number of rows in df_centroids,
which may be the number of rows in df_pop if df_centroids=None
x_cols : list
List of columns to be tested for association with the neighborhood
df_centroids : pd.DataFrame [nclones x 1]
An optional DataFrame containing clones that will act as centroids in the
neighborhood clustering. These can be a subset of df_pop or not, however
the number of rows in df_centroids must match the number of rows in pwmat.
If df_centroids=None then df_centroids = df_pop and all clones in df_pop
are used.
count_col : str
Column in df that specifies counts.
Default none assumes count of 1 cell for each row.
knn_neighbors : int
Number of neighbors to include in the neighborhood, or fraction of all data if K < 1
knn_radius : float
Radius for inclusion of neighbors within the neighborhood.
Specify K or R but not both.
Returns
-------
res_df : pd.DataFrame [nclones x results]
Counts of clones within each neighborhood, grouped by x_cols.
The "neighbors" column provides the pd.DataFrame indices of the elements in
df_pop that are within the neighborhood of each centroid (not the integer/vector
based indices)"""
if knn_neighbors is None and knn_radius is None:
raise(ValueError('Must specify K or radius'))
if not knn_neighbors is None and not knn_radius is None:
raise(ValueError('Must specify K or radius (not both)'))
if df_centroids is None:
df_centroids = df_pop
if pwmat.shape[0] != df_pop.shape[0]:
raise ValueError(f'Number of rows in pwmat {pwmat.shape[0]} does not match df_pop {df_pop.shape[0]}')
if pwmat.shape[1] != df_pop.shape[0]:
raise ValueError(f'Number of columns in pwmat {pwmat.shape[1]} does not match df_pop {df_pop.shape[0]}')
else:
if pwmat.shape[0] != df_centroids.shape[0]:
raise ValueError(f'Number of rows in pwmat {pwmat.shape[0]} does not match df_centroids {df_centroids.shape[0]}')
if pwmat.shape[1] != df_pop.shape[0]:
raise ValueError(f'Number of columns in pwmat {pwmat.shape[1]} does not match df_pop {df_pop.shape[0]}')
if count_col is None:
df = df_pop.assign(count=1)
count_col = 'count'
ycol = 'cmember'
res = []
for ii in range(df_centroids.shape[0]):
if not knn_neighbors is None:
if knn_neighbors < 1:
frac = knn_neighbors
K = int(knn_neighbors * df_pop.shape[0])
# print('Using K = %d (%1.0f%% of %d)' % (K, 100*frac, n))
else:
K = int(knn_neighbors)
R = np.partition(pwmat[ii, :], K)[K]
else:
R = knn_radius
y_lu = {True:'MEM+', False:'MEM-'}
y_float = (pwmat[ii, :] <= R).astype(float)
y = np.array([y_lu[yy] for yy in y_float])
K = int(np.sum(y_float))
cdf = df_pop.assign(**{ycol:y})[[ycol, count_col] + x_cols]
out = _prep_counts(cdf, x_cols, ycol, count_col)
out.update({'index':ii,
'neighbors':list(df_pop.index[np.nonzero(y_float)[0]]),
'K_neighbors':K,
'R_radius':R})
res.append(out)
res_df = pd.DataFrame(res)
return res_df
def any_cluster_tally(df, cluster_df, x_cols, cluster_ind_col='neighbors', count_col='count'):
"""Tallies clones inside (outside) each cluster for testing enrichment of other categorical
variables defined by x_cols in df. Clusters are defined in cluster_df using the cluster_ind_col
(default: 'neighbors') which should contain *positional* indices into df for cluster members.
This function only organizes the counts for testing such that each row of the output represents
a cluster that could be tested for enrichment.
As an example, one could use Fisher's exact test to detect enrichment/association of the
neighborhood/cluster with one variable.
Tests the 2 x 2 table for each clone:
+----+----+-------+--------+
| | Cluster |
| +-------+--------+
| | Y | N |
+----+----+-------+--------+
|VAR | 1 | a | b |
| +----+-------+--------+
| | 0 | c | d |
+----+----+-------+--------+
This and other tests are available with the cluster_association_test function that takes the output
of this function as input.
Params
------
df : pd.DataFrame [nclones x metadata]
Contains metadata for each clone.
cluster_df : pd.DataFrame, one row per cluster
Contains the column in cluster_ind_col (default: "neighbors") that should
contain positional indices into df indicating cluster membership
x_cols : list
List of columns to be tested for association with the neighborhood
count_col : str
Column in df that specifies counts.
Default none assumes count of 1 cell for each row.
cluster_ind_col : str, column in cluster_df
Values should be lists or tuples of positional indices into df
Returns
-------
res_df : pd.DataFrame [nclusters x results]
A 2xN table for each cluster."""
ycol = 'cmember'
if count_col is None:
df = df.assign(count=1)
count_col = 'count'
n = df.shape[0]
res = []
for cid, m in cluster_df[cluster_ind_col].values:
not_m = [i for i in range(n) if not i in m]
y_float = np.zeros(n, dtype=np.int)
y_float[m] = 1
y_lu = {1:'MEM+', 0:'MEM-'}
y = np.array([y_lu[yy] for yy in y_float])
K = int(np.sum(y_float))
cdf = df.assign(**{ycol:y})[[ycol, count_col] + x_cols]
out = _prep_counts(cdf, x_cols, ycol, count_col)
out.update({'cid':cid,
'neighbors':list(df.index[m]),
'neighbors_i':m,
'K_neighbors':K})
res.append(out)
res_df = | pd.DataFrame(res) | pandas.DataFrame |
import re
import pandas as pd
# Function that searches data.txt for email/phone numbers before returning a dictionary
def find_data(pattern, column_name):
with open('data.txt', 'r') as file:
contents = file.read()
matches = pattern.findall(contents)
matches_dict = {column_name: matches}
return matches_dict
# Function that converts aobve dictionary to excel
def save_excel(matches, filename):
df = | pd.DataFrame(data=matches) | pandas.DataFrame |
import numpy as np
import pandas as pd
def getDailyVol(close, span0=100):
'''
Computes the daily volatility of price returns.
It takes a closing price series, applies a diff sample to sample
(assumes each sample is the closing price), computes an EWM with
`span0` samples and then the standard deviation of it.
See Advances in Financial Analytics, snippet 3.1
@param[in] close A series of prices where each value is the closing price of an asset.
The index of the series must be a valid datetime type.
@param[in] span0 The sample size of the EWM.
@return A pandas series of daily return volatility.
'''
df0 = close.index.searchsorted(close.index-pd.Timedelta(days=1))
df0 = df0[df0 > 0]
df0 = pd.Series(close.index[df0-1], index=close.index[close.shape[0]-df0.shape[0]:])
df0 = close.loc[df0.index] / close.loc[df0.values].values-1 # Daily returns
df0 = df0.ewm(span=span0).std()
return df0
def getVerticalBarrier(tEvents, close, numDays=0):
"""
Adding a Vertical Barrier
For each index in t_events, it finds the timestamp of the next price bar at or immediately after
a number of days num_days. This vertical barrier can be passed as an optional argument t1 in get_events.
This function creates a series that has all the timestamps of when the vertical barrier would be reached.
Advances in Financial Machine Learning, Snippet 3.4 page 49.
@param tEvents A pd.DateTimeIndex of events.
@param close A pd.Series of close prices.
@param numDays The number of days to add for vertical barrier.
@return A pd.Series of Timestamps of vertical barriers
"""
verticalBarrier = close.index.searchsorted(tEvents + pd.Timedelta(days=numDays))
verticalBarrier = verticalBarrier[verticalBarrier < close.shape[0]]
return pd.Series(close.index[verticalBarrier], index = tEvents[:verticalBarrier.shape[0]]) # NaNs at the end
def applyPtSlOnT1(close, events, ptSl, molecule):
'''
Apply stop loss/profit taking, if it takes place before t1 (vertical barrier)
(end of event).
Advances in Financial Machine Learning, snippet 3.2 page 45.
@param close
@param events
@param ptSl
@param molecule
@return
'''
events_ = events.loc[molecule]
out = events_[['t1']].copy(deep=True)
if ptSl[0] > 0:
pt = ptSl[0] * events_['trgt']
else:
pt = | pd.Series(index=events.index) | pandas.Series |
import datetime
from datetime import timedelta
from distutils.version import LooseVersion
from io import BytesIO
import os
import re
from warnings import catch_warnings, simplefilter
import numpy as np
import pytest
from pandas.compat import is_platform_little_endian, is_platform_windows
import pandas.util._test_decorators as td
from pandas.core.dtypes.common import is_categorical_dtype
import pandas as pd
from pandas import (
Categorical,
CategoricalIndex,
DataFrame,
DatetimeIndex,
Index,
Int64Index,
MultiIndex,
RangeIndex,
Series,
Timestamp,
bdate_range,
concat,
date_range,
isna,
timedelta_range,
)
from pandas.tests.io.pytables.common import (
_maybe_remove,
create_tempfile,
ensure_clean_path,
ensure_clean_store,
safe_close,
safe_remove,
tables,
)
import pandas.util.testing as tm
from pandas.io.pytables import (
ClosedFileError,
HDFStore,
PossibleDataLossError,
Term,
read_hdf,
)
from pandas.io import pytables as pytables # noqa: E402 isort:skip
from pandas.io.pytables import TableIterator # noqa: E402 isort:skip
_default_compressor = "blosc"
ignore_natural_naming_warning = pytest.mark.filterwarnings(
"ignore:object name:tables.exceptions.NaturalNameWarning"
)
@pytest.mark.single
class TestHDFStore:
def test_format_kwarg_in_constructor(self, setup_path):
# GH 13291
with ensure_clean_path(setup_path) as path:
with pytest.raises(ValueError):
HDFStore(path, format="table")
def test_context(self, setup_path):
path = create_tempfile(setup_path)
try:
with HDFStore(path) as tbl:
raise ValueError("blah")
except ValueError:
pass
finally:
safe_remove(path)
try:
with HDFStore(path) as tbl:
tbl["a"] = tm.makeDataFrame()
with HDFStore(path) as tbl:
assert len(tbl) == 1
assert type(tbl["a"]) == DataFrame
finally:
safe_remove(path)
def test_conv_read_write(self, setup_path):
path = create_tempfile(setup_path)
try:
def roundtrip(key, obj, **kwargs):
obj.to_hdf(path, key, **kwargs)
return read_hdf(path, key)
o = tm.makeTimeSeries()
tm.assert_series_equal(o, roundtrip("series", o))
o = tm.makeStringSeries()
tm.assert_series_equal(o, roundtrip("string_series", o))
o = tm.makeDataFrame()
tm.assert_frame_equal(o, roundtrip("frame", o))
# table
df = DataFrame(dict(A=range(5), B=range(5)))
df.to_hdf(path, "table", append=True)
result = read_hdf(path, "table", where=["index>2"])
tm.assert_frame_equal(df[df.index > 2], result)
finally:
safe_remove(path)
def test_long_strings(self, setup_path):
# GH6166
df = DataFrame(
{"a": tm.rands_array(100, size=10)}, index=tm.rands_array(100, size=10)
)
with ensure_clean_store(setup_path) as store:
store.append("df", df, data_columns=["a"])
result = store.select("df")
tm.assert_frame_equal(df, result)
def test_api(self, setup_path):
# GH4584
# API issue when to_hdf doesn't accept append AND format args
with ensure_clean_path(setup_path) as path:
df = tm.makeDataFrame()
df.iloc[:10].to_hdf(path, "df", append=True, format="table")
df.iloc[10:].to_hdf(path, "df", append=True, format="table")
tm.assert_frame_equal(read_hdf(path, "df"), df)
# append to False
df.iloc[:10].to_hdf(path, "df", append=False, format="table")
df.iloc[10:].to_hdf(path, "df", append=True, format="table")
tm.assert_frame_equal(read_hdf(path, "df"), df)
with ensure_clean_path(setup_path) as path:
df = tm.makeDataFrame()
df.iloc[:10].to_hdf(path, "df", append=True)
df.iloc[10:].to_hdf(path, "df", append=True, format="table")
tm.assert_frame_equal(read_hdf(path, "df"), df)
# append to False
df.iloc[:10].to_hdf(path, "df", append=False, format="table")
df.iloc[10:].to_hdf(path, "df", append=True)
tm.assert_frame_equal(read_hdf(path, "df"), df)
with ensure_clean_path(setup_path) as path:
df = tm.makeDataFrame()
df.to_hdf(path, "df", append=False, format="fixed")
tm.assert_frame_equal(read_hdf(path, "df"), df)
df.to_hdf(path, "df", append=False, format="f")
tm.assert_frame_equal(read_hdf(path, "df"), df)
df.to_hdf(path, "df", append=False)
tm.assert_frame_equal(read_hdf(path, "df"), df)
df.to_hdf(path, "df")
tm.assert_frame_equal(read_hdf(path, "df"), df)
with ensure_clean_store(setup_path) as store:
path = store._path
df = tm.makeDataFrame()
_maybe_remove(store, "df")
store.append("df", df.iloc[:10], append=True, format="table")
store.append("df", df.iloc[10:], append=True, format="table")
tm.assert_frame_equal(store.select("df"), df)
# append to False
_maybe_remove(store, "df")
store.append("df", df.iloc[:10], append=False, format="table")
store.append("df", df.iloc[10:], append=True, format="table")
tm.assert_frame_equal(store.select("df"), df)
# formats
_maybe_remove(store, "df")
store.append("df", df.iloc[:10], append=False, format="table")
store.append("df", df.iloc[10:], append=True, format="table")
tm.assert_frame_equal(store.select("df"), df)
_maybe_remove(store, "df")
store.append("df", df.iloc[:10], append=False, format="table")
store.append("df", df.iloc[10:], append=True, format=None)
tm.assert_frame_equal(store.select("df"), df)
with ensure_clean_path(setup_path) as path:
# Invalid.
df = tm.makeDataFrame()
with pytest.raises(ValueError):
df.to_hdf(path, "df", append=True, format="f")
with pytest.raises(ValueError):
df.to_hdf(path, "df", append=True, format="fixed")
with pytest.raises(TypeError):
df.to_hdf(path, "df", append=True, format="foo")
with pytest.raises(TypeError):
df.to_hdf(path, "df", append=False, format="bar")
# File path doesn't exist
path = ""
with pytest.raises(FileNotFoundError):
read_hdf(path, "df")
def test_api_default_format(self, setup_path):
# default_format option
with ensure_clean_store(setup_path) as store:
df = tm.makeDataFrame()
pd.set_option("io.hdf.default_format", "fixed")
_maybe_remove(store, "df")
store.put("df", df)
assert not store.get_storer("df").is_table
with pytest.raises(ValueError):
store.append("df2", df)
pd.set_option("io.hdf.default_format", "table")
_maybe_remove(store, "df")
store.put("df", df)
assert store.get_storer("df").is_table
_maybe_remove(store, "df2")
store.append("df2", df)
assert store.get_storer("df").is_table
pd.set_option("io.hdf.default_format", None)
with ensure_clean_path(setup_path) as path:
df = tm.makeDataFrame()
pd.set_option("io.hdf.default_format", "fixed")
df.to_hdf(path, "df")
with HDFStore(path) as store:
assert not store.get_storer("df").is_table
with pytest.raises(ValueError):
df.to_hdf(path, "df2", append=True)
pd.set_option("io.hdf.default_format", "table")
df.to_hdf(path, "df3")
with HDFStore(path) as store:
assert store.get_storer("df3").is_table
df.to_hdf(path, "df4", append=True)
with HDFStore(path) as store:
assert store.get_storer("df4").is_table
pd.set_option("io.hdf.default_format", None)
def test_keys(self, setup_path):
with ensure_clean_store(setup_path) as store:
store["a"] = tm.makeTimeSeries()
store["b"] = tm.makeStringSeries()
store["c"] = tm.makeDataFrame()
assert len(store) == 3
expected = {"/a", "/b", "/c"}
assert set(store.keys()) == expected
assert set(store) == expected
def test_keys_ignore_hdf_softlink(self, setup_path):
# GH 20523
# Puts a softlink into HDF file and rereads
with ensure_clean_store(setup_path) as store:
df = DataFrame(dict(A=range(5), B=range(5)))
store.put("df", df)
assert store.keys() == ["/df"]
store._handle.create_soft_link(store._handle.root, "symlink", "df")
# Should ignore the softlink
assert store.keys() == ["/df"]
def test_iter_empty(self, setup_path):
with ensure_clean_store(setup_path) as store:
# GH 12221
assert list(store) == []
def test_repr(self, setup_path):
with ensure_clean_store(setup_path) as store:
repr(store)
store.info()
store["a"] = tm.makeTimeSeries()
store["b"] = tm.makeStringSeries()
store["c"] = tm.makeDataFrame()
df = tm.makeDataFrame()
df["obj1"] = "foo"
df["obj2"] = "bar"
df["bool1"] = df["A"] > 0
df["bool2"] = df["B"] > 0
df["bool3"] = True
df["int1"] = 1
df["int2"] = 2
df["timestamp1"] = Timestamp("20010102")
df["timestamp2"] = Timestamp("20010103")
df["datetime1"] = datetime.datetime(2001, 1, 2, 0, 0)
df["datetime2"] = datetime.datetime(2001, 1, 3, 0, 0)
df.loc[3:6, ["obj1"]] = np.nan
df = df._consolidate()._convert(datetime=True)
with catch_warnings(record=True):
simplefilter("ignore", pd.errors.PerformanceWarning)
store["df"] = df
# make a random group in hdf space
store._handle.create_group(store._handle.root, "bah")
assert store.filename in repr(store)
assert store.filename in str(store)
store.info()
# storers
with ensure_clean_store(setup_path) as store:
df = tm.makeDataFrame()
store.append("df", df)
s = store.get_storer("df")
repr(s)
str(s)
@ignore_natural_naming_warning
def test_contains(self, setup_path):
with ensure_clean_store(setup_path) as store:
store["a"] = tm.makeTimeSeries()
store["b"] = tm.makeDataFrame()
store["foo/bar"] = tm.makeDataFrame()
assert "a" in store
assert "b" in store
assert "c" not in store
assert "foo/bar" in store
assert "/foo/bar" in store
assert "/foo/b" not in store
assert "bar" not in store
# gh-2694: tables.NaturalNameWarning
with catch_warnings(record=True):
store["node())"] = tm.makeDataFrame()
assert "node())" in store
def test_versioning(self, setup_path):
with ensure_clean_store(setup_path) as store:
store["a"] = tm.makeTimeSeries()
store["b"] = tm.makeDataFrame()
df = tm.makeTimeDataFrame()
_maybe_remove(store, "df1")
store.append("df1", df[:10])
store.append("df1", df[10:])
assert store.root.a._v_attrs.pandas_version == "0.15.2"
assert store.root.b._v_attrs.pandas_version == "0.15.2"
assert store.root.df1._v_attrs.pandas_version == "0.15.2"
# write a file and wipe its versioning
_maybe_remove(store, "df2")
store.append("df2", df)
# this is an error because its table_type is appendable, but no
# version info
store.get_node("df2")._v_attrs.pandas_version = None
with pytest.raises(Exception):
store.select("df2")
def test_mode(self, setup_path):
df = tm.makeTimeDataFrame()
def check(mode):
with ensure_clean_path(setup_path) as path:
# constructor
if mode in ["r", "r+"]:
with pytest.raises(IOError):
HDFStore(path, mode=mode)
else:
store = HDFStore(path, mode=mode)
assert store._handle.mode == mode
store.close()
with ensure_clean_path(setup_path) as path:
# context
if mode in ["r", "r+"]:
with pytest.raises(IOError):
with HDFStore(path, mode=mode) as store: # noqa
pass
else:
with HDFStore(path, mode=mode) as store:
assert store._handle.mode == mode
with ensure_clean_path(setup_path) as path:
# conv write
if mode in ["r", "r+"]:
with pytest.raises(IOError):
df.to_hdf(path, "df", mode=mode)
df.to_hdf(path, "df", mode="w")
else:
df.to_hdf(path, "df", mode=mode)
# conv read
if mode in ["w"]:
with pytest.raises(ValueError):
read_hdf(path, "df", mode=mode)
else:
result = read_hdf(path, "df", mode=mode)
tm.assert_frame_equal(result, df)
def check_default_mode():
# read_hdf uses default mode
with ensure_clean_path(setup_path) as path:
df.to_hdf(path, "df", mode="w")
result = read_hdf(path, "df")
tm.assert_frame_equal(result, df)
check("r")
check("r+")
check("a")
check("w")
check_default_mode()
def test_reopen_handle(self, setup_path):
with ensure_clean_path(setup_path) as path:
store = HDFStore(path, mode="a")
store["a"] = tm.makeTimeSeries()
# invalid mode change
with pytest.raises(PossibleDataLossError):
store.open("w")
store.close()
assert not store.is_open
# truncation ok here
store.open("w")
assert store.is_open
assert len(store) == 0
store.close()
assert not store.is_open
store = HDFStore(path, mode="a")
store["a"] = tm.makeTimeSeries()
# reopen as read
store.open("r")
assert store.is_open
assert len(store) == 1
assert store._mode == "r"
store.close()
assert not store.is_open
# reopen as append
store.open("a")
assert store.is_open
assert len(store) == 1
assert store._mode == "a"
store.close()
assert not store.is_open
# reopen as append (again)
store.open("a")
assert store.is_open
assert len(store) == 1
assert store._mode == "a"
store.close()
assert not store.is_open
def test_open_args(self, setup_path):
with ensure_clean_path(setup_path) as path:
df = tm.makeDataFrame()
# create an in memory store
store = HDFStore(
path, mode="a", driver="H5FD_CORE", driver_core_backing_store=0
)
store["df"] = df
store.append("df2", df)
tm.assert_frame_equal(store["df"], df)
tm.assert_frame_equal(store["df2"], df)
store.close()
# the file should not have actually been written
assert not os.path.exists(path)
def test_flush(self, setup_path):
with ensure_clean_store(setup_path) as store:
store["a"] = tm.makeTimeSeries()
store.flush()
store.flush(fsync=True)
def test_get(self, setup_path):
with ensure_clean_store(setup_path) as store:
store["a"] = tm.makeTimeSeries()
left = store.get("a")
right = store["a"]
tm.assert_series_equal(left, right)
left = store.get("/a")
right = store["/a"]
tm.assert_series_equal(left, right)
with pytest.raises(KeyError, match="'No object named b in the file'"):
store.get("b")
@pytest.mark.parametrize(
"where, expected",
[
(
"/",
{
"": ({"first_group", "second_group"}, set()),
"/first_group": (set(), {"df1", "df2"}),
"/second_group": ({"third_group"}, {"df3", "s1"}),
"/second_group/third_group": (set(), {"df4"}),
},
),
(
"/second_group",
{
"/second_group": ({"third_group"}, {"df3", "s1"}),
"/second_group/third_group": (set(), {"df4"}),
},
),
],
)
def test_walk(self, where, expected, setup_path):
# GH10143
objs = {
"df1": pd.DataFrame([1, 2, 3]),
"df2": pd.DataFrame([4, 5, 6]),
"df3": pd.DataFrame([6, 7, 8]),
"df4": pd.DataFrame([9, 10, 11]),
"s1": pd.Series([10, 9, 8]),
# Next 3 items aren't pandas objects and should be ignored
"a1": np.array([[1, 2, 3], [4, 5, 6]]),
"tb1": np.array([(1, 2, 3), (4, 5, 6)], dtype="i,i,i"),
"tb2": np.array([(7, 8, 9), (10, 11, 12)], dtype="i,i,i"),
}
with ensure_clean_store("walk_groups.hdf", mode="w") as store:
store.put("/first_group/df1", objs["df1"])
store.put("/first_group/df2", objs["df2"])
store.put("/second_group/df3", objs["df3"])
store.put("/second_group/s1", objs["s1"])
store.put("/second_group/third_group/df4", objs["df4"])
# Create non-pandas objects
store._handle.create_array("/first_group", "a1", objs["a1"])
store._handle.create_table("/first_group", "tb1", obj=objs["tb1"])
store._handle.create_table("/second_group", "tb2", obj=objs["tb2"])
assert len(list(store.walk(where=where))) == len(expected)
for path, groups, leaves in store.walk(where=where):
assert path in expected
expected_groups, expected_frames = expected[path]
assert expected_groups == set(groups)
assert expected_frames == set(leaves)
for leaf in leaves:
frame_path = "/".join([path, leaf])
obj = store.get(frame_path)
if "df" in leaf:
tm.assert_frame_equal(obj, objs[leaf])
else:
tm.assert_series_equal(obj, objs[leaf])
def test_getattr(self, setup_path):
with ensure_clean_store(setup_path) as store:
s = tm.makeTimeSeries()
store["a"] = s
# test attribute access
result = store.a
tm.assert_series_equal(result, s)
result = getattr(store, "a")
tm.assert_series_equal(result, s)
df = tm.makeTimeDataFrame()
store["df"] = df
result = store.df
tm.assert_frame_equal(result, df)
# errors
for x in ["d", "mode", "path", "handle", "complib"]:
with pytest.raises(AttributeError):
getattr(store, x)
# not stores
for x in ["mode", "path", "handle", "complib"]:
getattr(store, "_{x}".format(x=x))
def test_put(self, setup_path):
with ensure_clean_store(setup_path) as store:
ts = tm.makeTimeSeries()
df = tm.makeTimeDataFrame()
store["a"] = ts
store["b"] = df[:10]
store["foo/bar/bah"] = df[:10]
store["foo"] = df[:10]
store["/foo"] = df[:10]
store.put("c", df[:10], format="table")
# not OK, not a table
with pytest.raises(ValueError):
store.put("b", df[10:], append=True)
# node does not currently exist, test _is_table_type returns False
# in this case
_maybe_remove(store, "f")
with pytest.raises(ValueError):
store.put("f", df[10:], append=True)
# can't put to a table (use append instead)
with pytest.raises(ValueError):
store.put("c", df[10:], append=True)
# overwrite table
store.put("c", df[:10], format="table", append=False)
tm.assert_frame_equal(df[:10], store["c"])
def test_put_string_index(self, setup_path):
with ensure_clean_store(setup_path) as store:
index = Index(
["I am a very long string index: {i}".format(i=i) for i in range(20)]
)
s = Series(np.arange(20), index=index)
df = DataFrame({"A": s, "B": s})
store["a"] = s
tm.assert_series_equal(store["a"], s)
store["b"] = df
tm.assert_frame_equal(store["b"], df)
# mixed length
index = Index(
["abcdefghijklmnopqrstuvwxyz1234567890"]
+ ["I am a very long string index: {i}".format(i=i) for i in range(20)]
)
s = Series(np.arange(21), index=index)
df = DataFrame({"A": s, "B": s})
store["a"] = s
tm.assert_series_equal(store["a"], s)
store["b"] = df
tm.assert_frame_equal(store["b"], df)
def test_put_compression(self, setup_path):
with ensure_clean_store(setup_path) as store:
df = tm.makeTimeDataFrame()
store.put("c", df, format="table", complib="zlib")
tm.assert_frame_equal(store["c"], df)
# can't compress if format='fixed'
with pytest.raises(ValueError):
store.put("b", df, format="fixed", complib="zlib")
@td.skip_if_windows_python_3
def test_put_compression_blosc(self, setup_path):
df = tm.makeTimeDataFrame()
with ensure_clean_store(setup_path) as store:
# can't compress if format='fixed'
with pytest.raises(ValueError):
store.put("b", df, format="fixed", complib="blosc")
store.put("c", df, format="table", complib="blosc")
tm.assert_frame_equal(store["c"], df)
def test_complibs_default_settings(self, setup_path):
# GH15943
df = tm.makeDataFrame()
# Set complevel and check if complib is automatically set to
# default value
with ensure_clean_path(setup_path) as tmpfile:
df.to_hdf(tmpfile, "df", complevel=9)
result = pd.read_hdf(tmpfile, "df")
tm.assert_frame_equal(result, df)
with tables.open_file(tmpfile, mode="r") as h5file:
for node in h5file.walk_nodes(where="/df", classname="Leaf"):
assert node.filters.complevel == 9
assert node.filters.complib == "zlib"
# Set complib and check to see if compression is disabled
with ensure_clean_path(setup_path) as tmpfile:
df.to_hdf(tmpfile, "df", complib="zlib")
result = pd.read_hdf(tmpfile, "df")
tm.assert_frame_equal(result, df)
with tables.open_file(tmpfile, mode="r") as h5file:
for node in h5file.walk_nodes(where="/df", classname="Leaf"):
assert node.filters.complevel == 0
assert node.filters.complib is None
# Check if not setting complib or complevel results in no compression
with ensure_clean_path(setup_path) as tmpfile:
df.to_hdf(tmpfile, "df")
result = pd.read_hdf(tmpfile, "df")
tm.assert_frame_equal(result, df)
with tables.open_file(tmpfile, mode="r") as h5file:
for node in h5file.walk_nodes(where="/df", classname="Leaf"):
assert node.filters.complevel == 0
assert node.filters.complib is None
# Check if file-defaults can be overridden on a per table basis
with ensure_clean_path(setup_path) as tmpfile:
store = pd.HDFStore(tmpfile)
store.append("dfc", df, complevel=9, complib="blosc")
store.append("df", df)
store.close()
with tables.open_file(tmpfile, mode="r") as h5file:
for node in h5file.walk_nodes(where="/df", classname="Leaf"):
assert node.filters.complevel == 0
assert node.filters.complib is None
for node in h5file.walk_nodes(where="/dfc", classname="Leaf"):
assert node.filters.complevel == 9
assert node.filters.complib == "blosc"
def test_complibs(self, setup_path):
# GH14478
df = tm.makeDataFrame()
# Building list of all complibs and complevels tuples
all_complibs = tables.filters.all_complibs
# Remove lzo if its not available on this platform
if not tables.which_lib_version("lzo"):
all_complibs.remove("lzo")
# Remove bzip2 if its not available on this platform
if not tables.which_lib_version("bzip2"):
all_complibs.remove("bzip2")
all_levels = range(0, 10)
all_tests = [(lib, lvl) for lib in all_complibs for lvl in all_levels]
for (lib, lvl) in all_tests:
with ensure_clean_path(setup_path) as tmpfile:
gname = "foo"
# Write and read file to see if data is consistent
df.to_hdf(tmpfile, gname, complib=lib, complevel=lvl)
result = pd.read_hdf(tmpfile, gname)
tm.assert_frame_equal(result, df)
# Open file and check metadata
# for correct amount of compression
h5table = tables.open_file(tmpfile, mode="r")
for node in h5table.walk_nodes(where="/" + gname, classname="Leaf"):
assert node.filters.complevel == lvl
if lvl == 0:
assert node.filters.complib is None
else:
assert node.filters.complib == lib
h5table.close()
def test_put_integer(self, setup_path):
# non-date, non-string index
df = DataFrame(np.random.randn(50, 100))
self._check_roundtrip(df, tm.assert_frame_equal, setup_path)
@td.xfail_non_writeable
def test_put_mixed_type(self, setup_path):
df = tm.makeTimeDataFrame()
df["obj1"] = "foo"
df["obj2"] = "bar"
df["bool1"] = df["A"] > 0
df["bool2"] = df["B"] > 0
df["bool3"] = True
df["int1"] = 1
df["int2"] = 2
df["timestamp1"] = Timestamp("20010102")
df["timestamp2"] = Timestamp("20010103")
df["datetime1"] = datetime.datetime(2001, 1, 2, 0, 0)
df["datetime2"] = datetime.datetime(2001, 1, 3, 0, 0)
df.loc[3:6, ["obj1"]] = np.nan
df = df._consolidate()._convert(datetime=True)
with ensure_clean_store(setup_path) as store:
_maybe_remove(store, "df")
# PerformanceWarning
with catch_warnings(record=True):
simplefilter("ignore", pd.errors.PerformanceWarning)
store.put("df", df)
expected = store.get("df")
tm.assert_frame_equal(expected, df)
@pytest.mark.filterwarnings(
"ignore:object name:tables.exceptions.NaturalNameWarning"
)
def test_append(self, setup_path):
with ensure_clean_store(setup_path) as store:
# this is allowed by almost always don't want to do it
# tables.NaturalNameWarning):
with catch_warnings(record=True):
df = tm.makeTimeDataFrame()
_maybe_remove(store, "df1")
store.append("df1", df[:10])
store.append("df1", df[10:])
tm.assert_frame_equal(store["df1"], df)
_maybe_remove(store, "df2")
store.put("df2", df[:10], format="table")
store.append("df2", df[10:])
tm.assert_frame_equal(store["df2"], df)
_maybe_remove(store, "df3")
store.append("/df3", df[:10])
store.append("/df3", df[10:])
tm.assert_frame_equal(store["df3"], df)
# this is allowed by almost always don't want to do it
# tables.NaturalNameWarning
_maybe_remove(store, "/df3 foo")
store.append("/df3 foo", df[:10])
store.append("/df3 foo", df[10:])
tm.assert_frame_equal(store["df3 foo"], df)
# dtype issues - mizxed type in a single object column
df = DataFrame(data=[[1, 2], [0, 1], [1, 2], [0, 0]])
df["mixed_column"] = "testing"
df.loc[2, "mixed_column"] = np.nan
_maybe_remove(store, "df")
store.append("df", df)
tm.assert_frame_equal(store["df"], df)
# uints - test storage of uints
uint_data = DataFrame(
{
"u08": Series(
np.random.randint(0, high=255, size=5), dtype=np.uint8
),
"u16": Series(
np.random.randint(0, high=65535, size=5), dtype=np.uint16
),
"u32": Series(
np.random.randint(0, high=2 ** 30, size=5), dtype=np.uint32
),
"u64": Series(
[2 ** 58, 2 ** 59, 2 ** 60, 2 ** 61, 2 ** 62],
dtype=np.uint64,
),
},
index=np.arange(5),
)
_maybe_remove(store, "uints")
store.append("uints", uint_data)
tm.assert_frame_equal(store["uints"], uint_data)
# uints - test storage of uints in indexable columns
_maybe_remove(store, "uints")
# 64-bit indices not yet supported
store.append("uints", uint_data, data_columns=["u08", "u16", "u32"])
tm.assert_frame_equal(store["uints"], uint_data)
def test_append_series(self, setup_path):
with ensure_clean_store(setup_path) as store:
# basic
ss = tm.makeStringSeries()
ts = tm.makeTimeSeries()
ns = Series(np.arange(100))
store.append("ss", ss)
result = store["ss"]
tm.assert_series_equal(result, ss)
assert result.name is None
store.append("ts", ts)
result = store["ts"]
tm.assert_series_equal(result, ts)
assert result.name is None
ns.name = "foo"
store.append("ns", ns)
result = store["ns"]
tm.assert_series_equal(result, ns)
assert result.name == ns.name
# select on the values
expected = ns[ns > 60]
result = store.select("ns", "foo>60")
tm.assert_series_equal(result, expected)
# select on the index and values
expected = ns[(ns > 70) & (ns.index < 90)]
result = store.select("ns", "foo>70 and index<90")
tm.assert_series_equal(result, expected)
# multi-index
mi = DataFrame(np.random.randn(5, 1), columns=["A"])
mi["B"] = np.arange(len(mi))
mi["C"] = "foo"
mi.loc[3:5, "C"] = "bar"
mi.set_index(["C", "B"], inplace=True)
s = mi.stack()
s.index = s.index.droplevel(2)
store.append("mi", s)
tm.assert_series_equal(store["mi"], s)
def test_store_index_types(self, setup_path):
# GH5386
# test storing various index types
with ensure_clean_store(setup_path) as store:
def check(format, index):
df = DataFrame(np.random.randn(10, 2), columns=list("AB"))
df.index = index(len(df))
_maybe_remove(store, "df")
store.put("df", df, format=format)
tm.assert_frame_equal(df, store["df"])
for index in [
tm.makeFloatIndex,
tm.makeStringIndex,
tm.makeIntIndex,
tm.makeDateIndex,
]:
check("table", index)
check("fixed", index)
# period index currently broken for table
# seee GH7796 FIXME
check("fixed", tm.makePeriodIndex)
# check('table',tm.makePeriodIndex)
# unicode
index = tm.makeUnicodeIndex
check("table", index)
check("fixed", index)
@pytest.mark.skipif(
not is_platform_little_endian(), reason="reason platform is not little endian"
)
def test_encoding(self, setup_path):
with ensure_clean_store(setup_path) as store:
df = DataFrame(dict(A="foo", B="bar"), index=range(5))
df.loc[2, "A"] = np.nan
df.loc[3, "B"] = np.nan
_maybe_remove(store, "df")
store.append("df", df, encoding="ascii")
tm.assert_frame_equal(store["df"], df)
expected = df.reindex(columns=["A"])
result = store.select("df", Term("columns=A", encoding="ascii"))
tm.assert_frame_equal(result, expected)
@pytest.mark.parametrize(
"val",
[
[b"E\xc9, 17", b"", b"a", b"b", b"c"],
[b"E\xc9, 17", b"a", b"b", b"c"],
[b"EE, 17", b"", b"a", b"b", b"c"],
[b"E\xc9, 17", b"\xf8\xfc", b"a", b"b", b"c"],
[b"", b"a", b"b", b"c"],
[b"\xf8\xfc", b"a", b"b", b"c"],
[b"A\xf8\xfc", b"", b"a", b"b", b"c"],
[np.nan, b"", b"b", b"c"],
[b"A\xf8\xfc", np.nan, b"", b"b", b"c"],
],
)
@pytest.mark.parametrize("dtype", ["category", object])
def test_latin_encoding(self, setup_path, dtype, val):
enc = "latin-1"
nan_rep = ""
key = "data"
val = [x.decode(enc) if isinstance(x, bytes) else x for x in val]
ser = pd.Series(val, dtype=dtype)
with ensure_clean_path(setup_path) as store:
ser.to_hdf(store, key, format="table", encoding=enc, nan_rep=nan_rep)
retr = read_hdf(store, key)
s_nan = ser.replace(nan_rep, np.nan)
if is_categorical_dtype(s_nan):
assert is_categorical_dtype(retr)
tm.assert_series_equal(
s_nan, retr, check_dtype=False, check_categorical=False
)
else:
tm.assert_series_equal(s_nan, retr)
# FIXME: don't leave commented-out
# fails:
# for x in examples:
# roundtrip(s, nan_rep=b'\xf8\xfc')
def test_append_some_nans(self, setup_path):
with ensure_clean_store(setup_path) as store:
df = DataFrame(
{
"A": Series(np.random.randn(20)).astype("int32"),
"A1": np.random.randn(20),
"A2": np.random.randn(20),
"B": "foo",
"C": "bar",
"D": Timestamp("20010101"),
"E": datetime.datetime(2001, 1, 2, 0, 0),
},
index=np.arange(20),
)
# some nans
_maybe_remove(store, "df1")
df.loc[0:15, ["A1", "B", "D", "E"]] = np.nan
store.append("df1", df[:10])
store.append("df1", df[10:])
tm.assert_frame_equal(store["df1"], df)
# first column
df1 = df.copy()
df1.loc[:, "A1"] = np.nan
_maybe_remove(store, "df1")
store.append("df1", df1[:10])
store.append("df1", df1[10:])
tm.assert_frame_equal(store["df1"], df1)
# 2nd column
df2 = df.copy()
df2.loc[:, "A2"] = np.nan
_maybe_remove(store, "df2")
store.append("df2", df2[:10])
store.append("df2", df2[10:])
tm.assert_frame_equal(store["df2"], df2)
# datetimes
df3 = df.copy()
df3.loc[:, "E"] = np.nan
_maybe_remove(store, "df3")
store.append("df3", df3[:10])
store.append("df3", df3[10:])
tm.assert_frame_equal(store["df3"], df3)
def test_append_all_nans(self, setup_path):
with ensure_clean_store(setup_path) as store:
df = DataFrame(
{"A1": np.random.randn(20), "A2": np.random.randn(20)},
index=np.arange(20),
)
df.loc[0:15, :] = np.nan
# nan some entire rows (dropna=True)
_maybe_remove(store, "df")
store.append("df", df[:10], dropna=True)
store.append("df", df[10:], dropna=True)
tm.assert_frame_equal(store["df"], df[-4:])
# nan some entire rows (dropna=False)
_maybe_remove(store, "df2")
store.append("df2", df[:10], dropna=False)
store.append("df2", df[10:], dropna=False)
tm.assert_frame_equal(store["df2"], df)
# tests the option io.hdf.dropna_table
pd.set_option("io.hdf.dropna_table", False)
_maybe_remove(store, "df3")
store.append("df3", df[:10])
store.append("df3", df[10:])
tm.assert_frame_equal(store["df3"], df)
pd.set_option("io.hdf.dropna_table", True)
_maybe_remove(store, "df4")
store.append("df4", df[:10])
store.append("df4", df[10:])
tm.assert_frame_equal(store["df4"], df[-4:])
# nan some entire rows (string are still written!)
df = DataFrame(
{
"A1": np.random.randn(20),
"A2": np.random.randn(20),
"B": "foo",
"C": "bar",
},
index=np.arange(20),
)
df.loc[0:15, :] = np.nan
_maybe_remove(store, "df")
store.append("df", df[:10], dropna=True)
store.append("df", df[10:], dropna=True)
tm.assert_frame_equal(store["df"], df)
_maybe_remove(store, "df2")
store.append("df2", df[:10], dropna=False)
store.append("df2", df[10:], dropna=False)
tm.assert_frame_equal(store["df2"], df)
# nan some entire rows (but since we have dates they are still
# written!)
df = DataFrame(
{
"A1": np.random.randn(20),
"A2": np.random.randn(20),
"B": "foo",
"C": "bar",
"D": Timestamp("20010101"),
"E": datetime.datetime(2001, 1, 2, 0, 0),
},
index=np.arange(20),
)
df.loc[0:15, :] = np.nan
_maybe_remove(store, "df")
store.append("df", df[:10], dropna=True)
store.append("df", df[10:], dropna=True)
tm.assert_frame_equal(store["df"], df)
_maybe_remove(store, "df2")
store.append("df2", df[:10], dropna=False)
store.append("df2", df[10:], dropna=False)
tm.assert_frame_equal(store["df2"], df)
# Test to make sure defaults are to not drop.
# Corresponding to Issue 9382
df_with_missing = DataFrame(
{"col1": [0, np.nan, 2], "col2": [1, np.nan, np.nan]}
)
with ensure_clean_path(setup_path) as path:
df_with_missing.to_hdf(path, "df_with_missing", format="table")
reloaded = read_hdf(path, "df_with_missing")
tm.assert_frame_equal(df_with_missing, reloaded)
def test_read_missing_key_close_store(self, setup_path):
# GH 25766
with ensure_clean_path(setup_path) as path:
df = pd.DataFrame({"a": range(2), "b": range(2)})
df.to_hdf(path, "k1")
with pytest.raises(KeyError, match="'No object named k2 in the file'"):
pd.read_hdf(path, "k2")
# smoke test to test that file is properly closed after
# read with KeyError before another write
df.to_hdf(path, "k2")
def test_read_missing_key_opened_store(self, setup_path):
# GH 28699
with ensure_clean_path(setup_path) as path:
df = pd.DataFrame({"a": range(2), "b": range(2)})
df.to_hdf(path, "k1")
store = pd.HDFStore(path, "r")
with pytest.raises(KeyError, match="'No object named k2 in the file'"):
pd.read_hdf(store, "k2")
# Test that the file is still open after a KeyError and that we can
# still read from it.
pd.read_hdf(store, "k1")
def test_append_frame_column_oriented(self, setup_path):
with ensure_clean_store(setup_path) as store:
# column oriented
df = tm.makeTimeDataFrame()
_maybe_remove(store, "df1")
store.append("df1", df.iloc[:, :2], axes=["columns"])
store.append("df1", df.iloc[:, 2:])
tm.assert_frame_equal(store["df1"], df)
result = store.select("df1", "columns=A")
expected = df.reindex(columns=["A"])
tm.assert_frame_equal(expected, result)
# selection on the non-indexable
result = store.select("df1", ("columns=A", "index=df.index[0:4]"))
expected = df.reindex(columns=["A"], index=df.index[0:4])
tm.assert_frame_equal(expected, result)
# this isn't supported
with pytest.raises(TypeError):
store.select("df1", "columns=A and index>df.index[4]")
def test_append_with_different_block_ordering(self, setup_path):
# GH 4096; using same frames, but different block orderings
with ensure_clean_store(setup_path) as store:
for i in range(10):
df = DataFrame(np.random.randn(10, 2), columns=list("AB"))
df["index"] = range(10)
df["index"] += i * 10
df["int64"] = Series([1] * len(df), dtype="int64")
df["int16"] = Series([1] * len(df), dtype="int16")
if i % 2 == 0:
del df["int64"]
df["int64"] = Series([1] * len(df), dtype="int64")
if i % 3 == 0:
a = df.pop("A")
df["A"] = a
df.set_index("index", inplace=True)
store.append("df", df)
# test a different ordering but with more fields (like invalid
# combinate)
with ensure_clean_store(setup_path) as store:
df = DataFrame(np.random.randn(10, 2), columns=list("AB"), dtype="float64")
df["int64"] = Series([1] * len(df), dtype="int64")
df["int16"] = Series([1] * len(df), dtype="int16")
store.append("df", df)
# store additional fields in different blocks
df["int16_2"] = Series([1] * len(df), dtype="int16")
with pytest.raises(ValueError):
store.append("df", df)
# store multile additional fields in different blocks
df["float_3"] = Series([1.0] * len(df), dtype="float64")
with pytest.raises(ValueError):
store.append("df", df)
def test_append_with_strings(self, setup_path):
with ensure_clean_store(setup_path) as store:
with catch_warnings(record=True):
def check_col(key, name, size):
assert (
getattr(store.get_storer(key).table.description, name).itemsize
== size
)
# avoid truncation on elements
df = DataFrame([[123, "asdqwerty"], [345, "dggnhebbsdfbdfb"]])
store.append("df_big", df)
tm.assert_frame_equal(store.select("df_big"), df)
check_col("df_big", "values_block_1", 15)
# appending smaller string ok
df2 = DataFrame([[124, "asdqy"], [346, "dggnhefbdfb"]])
store.append("df_big", df2)
expected = concat([df, df2])
tm.assert_frame_equal(store.select("df_big"), expected)
check_col("df_big", "values_block_1", 15)
# avoid truncation on elements
df = DataFrame([[123, "asdqwerty"], [345, "dggnhebbsdfbdfb"]])
store.append("df_big2", df, min_itemsize={"values": 50})
tm.assert_frame_equal(store.select("df_big2"), df)
check_col("df_big2", "values_block_1", 50)
# bigger string on next append
store.append("df_new", df)
df_new = DataFrame(
[[124, "abcdefqhij"], [346, "abcdefghijklmnopqrtsuvwxyz"]]
)
with pytest.raises(ValueError):
store.append("df_new", df_new)
# min_itemsize on Series index (GH 11412)
df = tm.makeMixedDataFrame().set_index("C")
store.append("ss", df["B"], min_itemsize={"index": 4})
tm.assert_series_equal(store.select("ss"), df["B"])
# same as above, with data_columns=True
store.append(
"ss2", df["B"], data_columns=True, min_itemsize={"index": 4}
)
tm.assert_series_equal(store.select("ss2"), df["B"])
# min_itemsize in index without appending (GH 10381)
store.put("ss3", df, format="table", min_itemsize={"index": 6})
# just make sure there is a longer string:
df2 = df.copy().reset_index().assign(C="longer").set_index("C")
store.append("ss3", df2)
tm.assert_frame_equal(store.select("ss3"), pd.concat([df, df2]))
# same as above, with a Series
store.put("ss4", df["B"], format="table", min_itemsize={"index": 6})
store.append("ss4", df2["B"])
tm.assert_series_equal(
store.select("ss4"), pd.concat([df["B"], df2["B"]])
)
# with nans
_maybe_remove(store, "df")
df = tm.makeTimeDataFrame()
df["string"] = "foo"
df.loc[1:4, "string"] = np.nan
df["string2"] = "bar"
df.loc[4:8, "string2"] = np.nan
df["string3"] = "bah"
df.loc[1:, "string3"] = np.nan
store.append("df", df)
result = store.select("df")
tm.assert_frame_equal(result, df)
with ensure_clean_store(setup_path) as store:
def check_col(key, name, size):
assert getattr(
store.get_storer(key).table.description, name
).itemsize, size
df = DataFrame(dict(A="foo", B="bar"), index=range(10))
# a min_itemsize that creates a data_column
_maybe_remove(store, "df")
store.append("df", df, min_itemsize={"A": 200})
check_col("df", "A", 200)
assert store.get_storer("df").data_columns == ["A"]
# a min_itemsize that creates a data_column2
_maybe_remove(store, "df")
store.append("df", df, data_columns=["B"], min_itemsize={"A": 200})
check_col("df", "A", 200)
assert store.get_storer("df").data_columns == ["B", "A"]
# a min_itemsize that creates a data_column2
_maybe_remove(store, "df")
store.append("df", df, data_columns=["B"], min_itemsize={"values": 200})
check_col("df", "B", 200)
check_col("df", "values_block_0", 200)
assert store.get_storer("df").data_columns == ["B"]
# infer the .typ on subsequent appends
_maybe_remove(store, "df")
store.append("df", df[:5], min_itemsize=200)
store.append("df", df[5:], min_itemsize=200)
tm.assert_frame_equal(store["df"], df)
# invalid min_itemsize keys
df = DataFrame(["foo", "foo", "foo", "barh", "barh", "barh"], columns=["A"])
_maybe_remove(store, "df")
with pytest.raises(ValueError):
store.append("df", df, min_itemsize={"foo": 20, "foobar": 20})
def test_append_with_empty_string(self, setup_path):
with ensure_clean_store(setup_path) as store:
# with all empty strings (GH 12242)
df = DataFrame({"x": ["a", "b", "c", "d", "e", "f", ""]})
store.append("df", df[:-1], min_itemsize={"x": 1})
store.append("df", df[-1:], min_itemsize={"x": 1})
tm.assert_frame_equal(store.select("df"), df)
def test_to_hdf_with_min_itemsize(self, setup_path):
with ensure_clean_path(setup_path) as path:
# min_itemsize in index with to_hdf (GH 10381)
df = tm.makeMixedDataFrame().set_index("C")
df.to_hdf(path, "ss3", format="table", min_itemsize={"index": 6})
# just make sure there is a longer string:
df2 = df.copy().reset_index().assign(C="longer").set_index("C")
df2.to_hdf(path, "ss3", append=True, format="table")
tm.assert_frame_equal(pd.read_hdf(path, "ss3"), pd.concat([df, df2]))
# same as above, with a Series
df["B"].to_hdf(path, "ss4", format="table", min_itemsize={"index": 6})
df2["B"].to_hdf(path, "ss4", append=True, format="table")
tm.assert_series_equal(
pd.read_hdf(path, "ss4"), pd.concat([df["B"], df2["B"]])
)
@pytest.mark.parametrize(
"format", [pytest.param("fixed", marks=td.xfail_non_writeable), "table"]
)
def test_to_hdf_errors(self, format, setup_path):
data = ["\ud800foo"]
ser = pd.Series(data, index=pd.Index(data))
with ensure_clean_path(setup_path) as path:
# GH 20835
ser.to_hdf(path, "table", format=format, errors="surrogatepass")
result = pd.read_hdf(path, "table", errors="surrogatepass")
tm.assert_series_equal(result, ser)
def test_append_with_data_columns(self, setup_path):
with ensure_clean_store(setup_path) as store:
df = tm.makeTimeDataFrame()
df.iloc[0, df.columns.get_loc("B")] = 1.0
_maybe_remove(store, "df")
store.append("df", df[:2], data_columns=["B"])
store.append("df", df[2:])
tm.assert_frame_equal(store["df"], df)
# check that we have indices created
assert store._handle.root.df.table.cols.index.is_indexed is True
assert store._handle.root.df.table.cols.B.is_indexed is True
# data column searching
result = store.select("df", "B>0")
expected = df[df.B > 0]
tm.assert_frame_equal(result, expected)
# data column searching (with an indexable and a data_columns)
result = store.select("df", "B>0 and index>df.index[3]")
df_new = df.reindex(index=df.index[4:])
expected = df_new[df_new.B > 0]
tm.assert_frame_equal(result, expected)
# data column selection with a string data_column
df_new = df.copy()
df_new["string"] = "foo"
df_new.loc[1:4, "string"] = np.nan
df_new.loc[5:6, "string"] = "bar"
_maybe_remove(store, "df")
store.append("df", df_new, data_columns=["string"])
result = store.select("df", "string='foo'")
expected = df_new[df_new.string == "foo"]
tm.assert_frame_equal(result, expected)
# using min_itemsize and a data column
def check_col(key, name, size):
assert (
getattr(store.get_storer(key).table.description, name).itemsize
== size
)
with ensure_clean_store(setup_path) as store:
_maybe_remove(store, "df")
store.append(
"df", df_new, data_columns=["string"], min_itemsize={"string": 30}
)
check_col("df", "string", 30)
_maybe_remove(store, "df")
store.append("df", df_new, data_columns=["string"], min_itemsize=30)
check_col("df", "string", 30)
_maybe_remove(store, "df")
store.append(
"df", df_new, data_columns=["string"], min_itemsize={"values": 30}
)
check_col("df", "string", 30)
with ensure_clean_store(setup_path) as store:
df_new["string2"] = "foobarbah"
df_new["string_block1"] = "foobarbah1"
df_new["string_block2"] = "foobarbah2"
_maybe_remove(store, "df")
store.append(
"df",
df_new,
data_columns=["string", "string2"],
min_itemsize={"string": 30, "string2": 40, "values": 50},
)
check_col("df", "string", 30)
check_col("df", "string2", 40)
check_col("df", "values_block_1", 50)
with ensure_clean_store(setup_path) as store:
# multiple data columns
df_new = df.copy()
df_new.iloc[0, df_new.columns.get_loc("A")] = 1.0
df_new.iloc[0, df_new.columns.get_loc("B")] = -1.0
df_new["string"] = "foo"
sl = df_new.columns.get_loc("string")
df_new.iloc[1:4, sl] = np.nan
df_new.iloc[5:6, sl] = "bar"
df_new["string2"] = "foo"
sl = df_new.columns.get_loc("string2")
df_new.iloc[2:5, sl] = np.nan
df_new.iloc[7:8, sl] = "bar"
_maybe_remove(store, "df")
store.append("df", df_new, data_columns=["A", "B", "string", "string2"])
result = store.select(
"df", "string='foo' and string2='foo' and A>0 and B<0"
)
expected = df_new[
(df_new.string == "foo")
& (df_new.string2 == "foo")
& (df_new.A > 0)
& (df_new.B < 0)
]
tm.assert_frame_equal(result, expected, check_index_type=False)
# yield an empty frame
result = store.select("df", "string='foo' and string2='cool'")
expected = df_new[(df_new.string == "foo") & (df_new.string2 == "cool")]
tm.assert_frame_equal(result, expected, check_index_type=False)
with ensure_clean_store(setup_path) as store:
# doc example
df_dc = df.copy()
df_dc["string"] = "foo"
df_dc.loc[4:6, "string"] = np.nan
df_dc.loc[7:9, "string"] = "bar"
df_dc["string2"] = "cool"
df_dc["datetime"] = Timestamp("20010102")
df_dc = df_dc._convert(datetime=True)
df_dc.loc[3:5, ["A", "B", "datetime"]] = np.nan
_maybe_remove(store, "df_dc")
store.append(
"df_dc", df_dc, data_columns=["B", "C", "string", "string2", "datetime"]
)
result = store.select("df_dc", "B>0")
expected = df_dc[df_dc.B > 0]
tm.assert_frame_equal(result, expected, check_index_type=False)
result = store.select("df_dc", ["B > 0", "C > 0", "string == foo"])
expected = df_dc[(df_dc.B > 0) & (df_dc.C > 0) & (df_dc.string == "foo")]
tm.assert_frame_equal(result, expected, check_index_type=False)
with ensure_clean_store(setup_path) as store:
# doc example part 2
np.random.seed(1234)
index = date_range("1/1/2000", periods=8)
df_dc = DataFrame(
np.random.randn(8, 3), index=index, columns=["A", "B", "C"]
)
df_dc["string"] = "foo"
df_dc.loc[4:6, "string"] = np.nan
df_dc.loc[7:9, "string"] = "bar"
df_dc.loc[:, ["B", "C"]] = df_dc.loc[:, ["B", "C"]].abs()
df_dc["string2"] = "cool"
# on-disk operations
store.append("df_dc", df_dc, data_columns=["B", "C", "string", "string2"])
result = store.select("df_dc", "B>0")
expected = df_dc[df_dc.B > 0]
tm.assert_frame_equal(result, expected)
result = store.select("df_dc", ["B > 0", "C > 0", 'string == "foo"'])
expected = df_dc[(df_dc.B > 0) & (df_dc.C > 0) & (df_dc.string == "foo")]
tm.assert_frame_equal(result, expected)
def test_create_table_index(self, setup_path):
with ensure_clean_store(setup_path) as store:
with catch_warnings(record=True):
def col(t, column):
return getattr(store.get_storer(t).table.cols, column)
# data columns
df = tm.makeTimeDataFrame()
df["string"] = "foo"
df["string2"] = "bar"
store.append("f", df, data_columns=["string", "string2"])
assert col("f", "index").is_indexed is True
assert col("f", "string").is_indexed is True
assert col("f", "string2").is_indexed is True
# specify index=columns
store.append(
"f2", df, index=["string"], data_columns=["string", "string2"]
)
assert col("f2", "index").is_indexed is False
assert col("f2", "string").is_indexed is True
assert col("f2", "string2").is_indexed is False
# try to index a non-table
_maybe_remove(store, "f2")
store.put("f2", df)
with pytest.raises(TypeError):
store.create_table_index("f2")
def test_append_hierarchical(self, setup_path):
index = MultiIndex(
levels=[["foo", "bar", "baz", "qux"], ["one", "two", "three"]],
codes=[[0, 0, 0, 1, 1, 2, 2, 3, 3, 3], [0, 1, 2, 0, 1, 1, 2, 0, 1, 2]],
names=["foo", "bar"],
)
df = DataFrame(np.random.randn(10, 3), index=index, columns=["A", "B", "C"])
with ensure_clean_store(setup_path) as store:
store.append("mi", df)
result = store.select("mi")
tm.assert_frame_equal(result, df)
# GH 3748
result = store.select("mi", columns=["A", "B"])
expected = df.reindex(columns=["A", "B"])
tm.assert_frame_equal(result, expected)
with ensure_clean_path("test.hdf") as path:
df.to_hdf(path, "df", format="table")
result = read_hdf(path, "df", columns=["A", "B"])
expected = df.reindex(columns=["A", "B"])
tm.assert_frame_equal(result, expected)
def test_column_multiindex(self, setup_path):
# GH 4710
# recreate multi-indexes properly
index = MultiIndex.from_tuples(
[("A", "a"), ("A", "b"), ("B", "a"), ("B", "b")], names=["first", "second"]
)
df = DataFrame(np.arange(12).reshape(3, 4), columns=index)
expected = df.copy()
if isinstance(expected.index, RangeIndex):
expected.index = Int64Index(expected.index)
with ensure_clean_store(setup_path) as store:
store.put("df", df)
tm.assert_frame_equal(
store["df"], expected, check_index_type=True, check_column_type=True
)
store.put("df1", df, format="table")
tm.assert_frame_equal(
store["df1"], expected, check_index_type=True, check_column_type=True
)
with pytest.raises(ValueError):
store.put("df2", df, format="table", data_columns=["A"])
with pytest.raises(ValueError):
store.put("df3", df, format="table", data_columns=True)
# appending multi-column on existing table (see GH 6167)
with ensure_clean_store(setup_path) as store:
store.append("df2", df)
store.append("df2", df)
tm.assert_frame_equal(store["df2"], concat((df, df)))
# non_index_axes name
df = DataFrame(
np.arange(12).reshape(3, 4), columns=Index(list("ABCD"), name="foo")
)
expected = df.copy()
if isinstance(expected.index, RangeIndex):
expected.index = Int64Index(expected.index)
with ensure_clean_store(setup_path) as store:
store.put("df1", df, format="table")
tm.assert_frame_equal(
store["df1"], expected, check_index_type=True, check_column_type=True
)
def test_store_multiindex(self, setup_path):
# validate multi-index names
# GH 5527
with ensure_clean_store(setup_path) as store:
def make_index(names=None):
return MultiIndex.from_tuples(
[
(datetime.datetime(2013, 12, d), s, t)
for d in range(1, 3)
for s in range(2)
for t in range(3)
],
names=names,
)
# no names
_maybe_remove(store, "df")
df = DataFrame(np.zeros((12, 2)), columns=["a", "b"], index=make_index())
store.append("df", df)
tm.assert_frame_equal(store.select("df"), df)
# partial names
_maybe_remove(store, "df")
df = DataFrame(
np.zeros((12, 2)),
columns=["a", "b"],
index=make_index(["date", None, None]),
)
store.append("df", df)
tm.assert_frame_equal(store.select("df"), df)
# series
_maybe_remove(store, "s")
s = Series(np.zeros(12), index=make_index(["date", None, None]))
store.append("s", s)
xp = Series(np.zeros(12), index=make_index(["date", "level_1", "level_2"]))
tm.assert_series_equal(store.select("s"), xp)
# dup with column
_maybe_remove(store, "df")
df = DataFrame(
np.zeros((12, 2)),
columns=["a", "b"],
index=make_index(["date", "a", "t"]),
)
with pytest.raises(ValueError):
store.append("df", df)
# dup within level
_maybe_remove(store, "df")
df = DataFrame(
np.zeros((12, 2)),
columns=["a", "b"],
index=make_index(["date", "date", "date"]),
)
with pytest.raises(ValueError):
store.append("df", df)
# fully names
_maybe_remove(store, "df")
df = DataFrame(
np.zeros((12, 2)),
columns=["a", "b"],
index=make_index(["date", "s", "t"]),
)
store.append("df", df)
tm.assert_frame_equal(store.select("df"), df)
def test_select_columns_in_where(self, setup_path):
# GH 6169
# recreate multi-indexes when columns is passed
# in the `where` argument
index = MultiIndex(
levels=[["foo", "bar", "baz", "qux"], ["one", "two", "three"]],
codes=[[0, 0, 0, 1, 1, 2, 2, 3, 3, 3], [0, 1, 2, 0, 1, 1, 2, 0, 1, 2]],
names=["foo_name", "bar_name"],
)
# With a DataFrame
df = DataFrame(np.random.randn(10, 3), index=index, columns=["A", "B", "C"])
with ensure_clean_store(setup_path) as store:
store.put("df", df, format="table")
expected = df[["A"]]
tm.assert_frame_equal(store.select("df", columns=["A"]), expected)
tm.assert_frame_equal(store.select("df", where="columns=['A']"), expected)
# With a Series
s = Series(np.random.randn(10), index=index, name="A")
with ensure_clean_store(setup_path) as store:
store.put("s", s, format="table")
tm.assert_series_equal(store.select("s", where="columns=['A']"), s)
def test_mi_data_columns(self, setup_path):
# GH 14435
idx = pd.MultiIndex.from_arrays(
[date_range("2000-01-01", periods=5), range(5)], names=["date", "id"]
)
df = pd.DataFrame({"a": [1.1, 1.2, 1.3, 1.4, 1.5]}, index=idx)
with ensure_clean_store(setup_path) as store:
store.append("df", df, data_columns=True)
actual = store.select("df", where="id == 1")
expected = df.iloc[[1], :]
tm.assert_frame_equal(actual, expected)
def test_pass_spec_to_storer(self, setup_path):
df = tm.makeDataFrame()
with ensure_clean_store(setup_path) as store:
store.put("df", df)
with pytest.raises(TypeError):
store.select("df", columns=["A"])
with pytest.raises(TypeError):
store.select("df", where=[("columns=A")])
@td.xfail_non_writeable
def test_append_misc(self, setup_path):
with ensure_clean_store(setup_path) as store:
df = tm.makeDataFrame()
store.append("df", df, chunksize=1)
result = store.select("df")
tm.assert_frame_equal(result, df)
store.append("df1", df, expectedrows=10)
result = store.select("df1")
tm.assert_frame_equal(result, df)
# more chunksize in append tests
def check(obj, comparator):
for c in [10, 200, 1000]:
with | ensure_clean_store(setup_path, mode="w") | pandas.tests.io.pytables.common.ensure_clean_store |
import numpy as np
import pandas as pd
#import scipy.stats
import random
import math
from time import time
names = locals()
#from ast import literal_eval
#ๆฐๆฎๅฏผๅ
ฅ
df_area = pd.read_csv('/public/home/hpc204212088/connected_vehicle/xin3/shortest_path/area.csv')
list_county = list(df_area['c_id'])
df_density = pd.read_csv('/public/home/hpc204212088/connected_vehicle/xin3/picture/vehicle_density.csv')
df_ppl_code=pd.read_csv(r'/public/home/hpc204212088/connected_vehicle/xin3/people_code/people_code.csv')
df_ppl_wai = df_ppl_code[df_ppl_code['c_id1']!=df_ppl_code['c_id2']].reset_index(drop=True)
list_id = list()
list_c = list()
for i in range(len(df_ppl_wai)):
for j in range(df_ppl_wai['id_start'][i],df_ppl_wai['id_end'][i]+1):
list_id.append(j)
list_c.append(df_ppl_wai['c_id1'][i])
print(i)
df_hibernat = pd.DataFrame()
df_hibernat['id'] = list_id
df_hibernat['c_id'] = list_c
df_ppl_nei = df_ppl_code[df_ppl_code['c_id1']==df_ppl_code['c_id2']].reset_index(drop=True)
list_id = list()
list_c = list()
for i in range(len(df_ppl_nei)):
for j in range(df_ppl_nei['id_start'][i],df_ppl_nei['id_end'][i]+1):
list_id.append(j)
list_c.append(df_ppl_nei['c_id1'][i])
print(i)
df_hibernat_nei = | pd.DataFrame() | pandas.DataFrame |
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Thu Nov 23 11:40:16 2017
@author: tobias
"""
import os
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
# Read the input data
input_file = '/Users/tobias/GitHub/seqcap_processor/data/processed/target_contigs/match_table.txt'
workdir = '/'.join(input_file.split('/')[:-1])
matrix = pd.read_csv(input_file,sep='\t',index_col=0)
data = np.matrix(matrix).T
y_labels = matrix.columns
x_labels = np.array(matrix.index)
num_x_labels = range(len(x_labels))
# Split dataset into thirds for better readability
third_data = np.split(data, 3,axis=1)
third_x_labels = np.split(np.matrix(x_labels), 3,axis=1)
third_num_x_labels = np.split(np.matrix(num_x_labels),3,axis=1)
# Plot the matrices
for i in range(len(third_data)):
fig = plt.figure()
plt.clf()
ax = fig.add_subplot(111)
ax.set_aspect(1)
res = ax.imshow(np.array(third_data[i]), cmap='GnBu')
height,width = third_data[i].shape
#cb = fig.colorbar(res)
plt.xlabel('exon index',fontsize=7)
plt.ylabel('sample index',fontsize=7)
xlabels = list(np.array(third_num_x_labels[i])[0])
plt.xticks(np.arange(width)[::30],xlabels[::30],fontsize=8)
plt.yticks(fontsize=8)
#ax.tick_params(left='off',labelleft='off')
fig.savefig(os.path.join(workdir,'contig_exon_matrix_%i.png'%i), dpi = 500)
# Write overview of exon indeces
key_to_exon_index = | pd.DataFrame({'index':num_x_labels,'locus_name': x_labels}) | pandas.DataFrame |
# -*- coding: utf-8 -*-
##########################################################################
# NSAp - Copyright (C) CEA, 2019
# Distributed under the terms of the CeCILL-B license, as published by
# the CEA-CNRS-INRIA. Refer to the LICENSE file or to
# http://www.cecill.info/licences/Licence_CeCILL-B_V1-en.html
# for details.
##########################################################################
"""
Module that provides core functions to load and split a dataset.
"""
# Imports
from collections import namedtuple, OrderedDict
import torch
from copy import deepcopy
import logging
import bisect
from torch.utils.data import Dataset, DataLoader, WeightedRandomSampler, RandomSampler, SequentialSampler
import numpy as np
from abc import abstractmethod, ABC
from tqdm import tqdm
import pandas as pd
from sklearn.model_selection import (
KFold, StratifiedKFold, ShuffleSplit, StratifiedShuffleSplit)
from sklearn.preprocessing import KBinsDiscretizer
# Global parameters
SetItem = namedtuple("SetItem", ["test", "train", "validation"], defaults=(None,) * 3)
DataItem = namedtuple("DataItem", ["inputs", "outputs", "labels"])
class ListTensors:
def __init__(self, *tensor_list):
self.list_tensors = list(tensor_list)
def __getitem__(self, item):
return self.list_tensors[item]
def to(self, device, **kwargs):
for i, e in enumerate(self.list_tensors):
self.list_tensors[i] = e.to(device, **kwargs)
return self.list_tensors
class AbstractDataManager(ABC):
@abstractmethod
def get_dataloader(self, train=False, validation=False, test=False,
fold_index=0, **kwargs):
pass
@abstractmethod
def get_nb_folds(self):
return
class DataManager(AbstractDataManager):
""" Data manager used to split a dataset in train, test and validation
pytorch datasets.
"""
def __init__(self, input_path, metadata_path, output_path=None, add_to_input=None, labels=None, stratify_label=None,
categorical_strat_label=True, custom_stratification=None, N_train_max=None, projection_labels=None,
number_of_folds=10, batch_size=1, sampler=None, in_features_transforms=None, input_transforms=None,
output_transforms=None, labels_transforms=None, stratify_label_transforms=None, data_augmentation=None,
self_supervision=None, add_input=False, patch_size=None, input_size=None, test_size=0.1, dataset=None,
device='cpu', sep=',', unique_df_keys=None, keys_to_keep=None, no_missing_mod=False,
mmap_mode=True, dataset_kwargs=None, **dataloader_kwargs):
""" Splits an input numpy array using memory-mapping into three sets:
test, train and validation. This function can stratify the data.
TODO: add how validation split is performed.
TODO: fix case number_of_folds=1
Parameters
----------
input_path: str or list[str] or dict(<mod>: list[str]) formatted as {modality: [i1, i2, ...]}
<str> are the paths to the numpy arrays containing the input tensors that will be splited/loaded.
metadata_path: str or list[str] or dict(<mod>: list[str]) formatted as {modality: [p1, p2,...]}
<str> are the paths to the metadata table in tsv format.
output_path: str or list[str] or dict(<mod>: list[str]) formatted as {modality: [i1, i2, ...]}, default None
<str> are the paths to the numpy arrays containing the output tensors
that will be splitted/loaded.
add_to_input: list of str, default None
list of features to add to the input
labels: list of str, default None
in case of classification/regression, the name of the column(s)
in the metadata table to be predicted.
stratify_label: str, default None
the name of the column in the metadata table containing the label
used during the stratification.
categorical_strat_label: bool, default True
is the stratification label a categorical or continuous variable ?
custom_stratification: dict, default None
same format as projection labels. It will split the dataset into train/test/val according
to the stratification defined in the dict.
N_train_max: int, default None
set the max number of training samples that can be put in the training set. The stratification is made
accordingly
projection_labels: dict, default None
selects only the data that match the conditions in the dict
{<column_name>: <value>}.
number_of_folds: int, default 10
the number of folds that will be used in the cross validation.
batch_size: int, default 1
the size of each mini-batch.
sampler: str in ["random", "weighted_random", "sequential"], default None
Whether we use a weighted random sampler (to deal with imbalanced classes issue), random sampler (without
replacement, to introduce shuffling in batches) or sequential (no shuffle)
input_transforms, output_transforms: list of callable, default None
transforms a list of samples with pre-defined transformations.
data_augmentation: list of callable, default None
transforms the training dataset input with pre-defined transformations on the fly during the training.
self_supervision: a callable, default None
applies a transformation to each input and generates a label
add_input: bool, default False
if true concatenate the input tensor to the output tensor.
test_size: float, default 0.1
should be between 0.0 and 1.0 and represent the proportion of the
dataset to include in the test split.
dataset: Dataset object, default None
The Dataset used to create the DataLoader. It must be a subclass of <ArrayDataset>
unique_df_keys: [str], default None
Unique keys to use to merge several df from multiple modalities
keys_to_keep: [str] or dict(mod: [str]), default None
If set, defines the keys to keep in the final df, can be per modality or globally
no_missing_mod: bool, default False
If set, mask out data with missing modalities (both in train and test).
"""
self.logger = logging.getLogger("pynet")
(input_path, metadata_path, output_path) = DataManager._reformat(input_path, metadata_path, output_path)
self.modalities = None
self.unique_df_keys = unique_df_keys
self.keys_to_keep = keys_to_keep
self.mmap_mode = mmap_mode
# Loads self.inputs, self.outputs, self.df, self.modalities
multimodal = self.load_data(inputs=input_path, dfs=metadata_path, outputs=output_path, sep=sep)
check_nan = deepcopy(labels) or []
if no_missing_mod:
check_nan.extend(["index_%s"%mod for mod in (self.modalities or [])])
mask = DataManager.get_mask(
df=self.df,
projection_labels=projection_labels,
check_nan=check_nan,
)
mask_indices = DataManager.get_indices_from_mask(mask)
# We should only work with masked data but we want to preserve the memory mapping so we are getting the right
# index at the end (in __getitem__ of ArrayDataset)
self.labels, self.stratify_label, self.features_to_add = (None, None, None)
if labels is not None:
if self_supervision is not None:
self.logger.warning("label and self_supervision are on.")
assert np.all(~self.df[labels][mask].isna())
self.labels = self.df[labels].values.copy()
self.labels = self.labels.squeeze()
if stratify_label is not None:
self.stratify_label = self.df[stratify_label].values.copy()
# Apply the labels transform here as a mapping to the integer representation of the classes
for i in mask_indices:
label = self.stratify_label[i]
for tf in (stratify_label_transforms or []):
label = tf(label)
self.stratify_label[i] = label
init_stratify_label_copy = self.stratify_label.copy()
# If necessary, discretizes the labels
if not categorical_strat_label:
self.stratify_label[mask] = DataManager.discretize_continous_label(self.stratify_label[mask],
verbose=True)
if add_to_input is not None:
self.features_to_add = np.array([self.df[f].values for f in add_to_input]).transpose()
self.metadata_path = metadata_path
self.projection_labels = projection_labels
self.number_of_folds = number_of_folds
self.batch_size = batch_size
self.input_transforms = input_transforms or []
self.output_transforms = output_transforms or []
self.labels_transforms = labels_transforms or []
self.data_augmentation = data_augmentation or []
self.self_supervision = self_supervision
self.add_input = add_input
self.data_loader_kwargs = dataloader_kwargs
assert sampler in [None, "weighted_random", "random", "sequential"], "Unknown sampler: %s" % str(sampler)
self.sampler = sampler
dataset_kwargs = dataset_kwargs or dict()
if dataset is not None:
dataset_cls = dataset
elif multimodal:
dataset_cls = MultiModalDataset
else:
dataset_cls = ArrayDataset
assert issubclass(dataset_cls, ArrayDataset), \
"Dataset %s must be a subclass of <ArrayDataset>"%dataset_cls.__name__
if self.sampler == "weighted_random":
if self.stratify_label is None:
raise ValueError('Impossible to use the WeightedRandomSampler if no stratify label is available.')
class_samples_count = [0 for _ in range(len(set(self.stratify_label[mask])))] # len == nb of classes
for label in self.stratify_label[mask]:
class_samples_count[label] += 1
# Imbalanced weights in case of imbalanced classes issue
self.sampler_weigths = 1. / torch.tensor(class_samples_count, dtype=torch.float)
self.dataset = dict((key, []) for key in ("train", "test", "validation"))
if N_train_max is not None:
assert custom_stratification is not None and \
{"train", "test"} <= set(custom_stratification.keys())
## 1st step: split into train/test (get only indices)
dummy_like_X_masked = np.ones(np.sum(mask))
_val_indices, _train_indices, _test_indices = (None, None, None)
if custom_stratification is not None:
if "validation" in custom_stratification and stratify_label is not None and N_train_max is None:
print("Warning: impossible to stratify the data: validation+test set already defined ! ")
train_mask, test_mask = (DataManager.get_mask(self.df, custom_stratification["train"]),
DataManager.get_mask(self.df, custom_stratification["test"]))
if "validation" in custom_stratification:
val_mask = DataManager.get_mask(self.df, custom_stratification["validation"])
val_mask &= mask
_val_indices = DataManager.get_indices_from_mask(val_mask)
train_mask &= mask
test_mask &= mask
_train_indices = DataManager.get_indices_from_mask(train_mask)
_test_indices = DataManager.get_indices_from_mask(test_mask)
elif stratify_label is not None:
splitter = StratifiedShuffleSplit(
n_splits=1, random_state=0, test_size=test_size)
_train_indices, _test_indices = next(
splitter.split(dummy_like_X_masked, self.stratify_label[mask]))
_train_indices = mask_indices[_train_indices]
_test_indices = mask_indices[_test_indices]
else:
if test_size == 1:
_train_indices, _test_indices = (None, mask_indices)
else:
splitter = ShuffleSplit(
n_splits=1, random_state=0, test_size=test_size)
_train_indices, _test_indices = next(splitter.split(dummy_like_X_masked))
_train_indices = mask_indices[_train_indices]
_test_indices = mask_indices[_test_indices]
if _train_indices is None:
return
assert len(set(_train_indices) & set(_test_indices)) == 0, 'Test set must be independent from train set'
self.dataset["test"] = dataset_cls(
self.inputs, _test_indices, labels=self.labels,
features_to_add=self.features_to_add,
outputs=self.outputs, add_input=self.add_input,
in_features_transforms=in_features_transforms,
input_transforms = self.input_transforms,
output_transforms = self.output_transforms,
label_transforms = self.labels_transforms,
self_supervision=self.self_supervision,
patch_size=patch_size, input_size=input_size,
concat_datasets=(self.inputs is not None),
df=self.df,
modalities=self.modalities,
device=device, **dataset_kwargs)
# 2nd step: split the training set into K folds (K-1 for training, 1
# for validation, K times)
if stratify_label is not None and not categorical_strat_label:
# Recomputes the discretization for the training set to get a split train/val with finer statistics
# (we do not assume that train+test has the same stats as train in case of custom stratification).
self.stratify_label[_train_indices] = \
DataManager.discretize_continous_label(init_stratify_label_copy[_train_indices], verbose=True)
dummy_like_X_train = np.ones(len(_train_indices))
if N_train_max is not None:
Splitter = ShuffleSplit if stratify_label is None else StratifiedShuffleSplit
kfold_splitter = Splitter(n_splits=self.number_of_folds,
train_size=float(N_train_max/len(_train_indices)), random_state=0)
strat_indices = np.array(self.stratify_label[_train_indices], dtype=np.int32) \
if stratify_label is not None else None
gen = kfold_splitter.split(dummy_like_X_train, strat_indices)
if _val_indices is not None:
gen = [(_train_indices[tr], _val_indices) for (tr, _) in gen]
else:
gen = [(_train_indices[tr], _train_indices[val]) for (tr, val) in gen]
elif _val_indices is not None:
gen = [(_train_indices, _val_indices) for _ in range(self.number_of_folds)]
if self.number_of_folds > 1:
print("Warning: same train/val/test split for all folds !", flush=True)
else:
if self.number_of_folds > 1:
Splitter = KFold if stratify_label is None else StratifiedKFold
kfold_splitter = Splitter(n_splits=self.number_of_folds)
strat_indices = np.array(self.stratify_label[_train_indices], dtype=np.int32) \
if stratify_label is not None else None
gen = kfold_splitter.split(dummy_like_X_train, strat_indices)
gen = [(_train_indices[tr], _train_indices[val]) for (tr, val) in gen]
else:
gen = [(_train_indices, [])]
for i, (fold_train_index, fold_val_index) in enumerate(gen):
assert len(set(fold_val_index) & set(fold_train_index)) == 0, \
'Validation set must be independant from training set'
train_dataset = dataset_cls(
self.inputs, fold_train_index,
labels=self.labels, outputs=self.outputs,
features_to_add=self.features_to_add,
add_input=self.add_input,
in_features_transforms=in_features_transforms,
input_transforms=self.input_transforms + self.data_augmentation,
output_transforms=self.output_transforms + self.data_augmentation,
label_transforms=self.labels_transforms,
self_supervision=self.self_supervision,
patch_size=patch_size, input_size=input_size,
concat_datasets=(self.inputs is not None),
df=self.df,
modalities=self.modalities,
device=device, **dataset_kwargs)
val_dataset = dataset_cls(
self.inputs, fold_val_index,
labels=self.labels, outputs=self.outputs,
features_to_add=self.features_to_add,
add_input=self.add_input,
in_features_transforms=in_features_transforms,
input_transforms=self.input_transforms,
output_transforms=self.output_transforms,
label_transforms=self.labels_transforms,
self_supervision=self.self_supervision,
patch_size=patch_size, input_size=input_size,
concat_datasets=(self.inputs is not None),
df=self.df,
modalities=self.modalities,
device=device, **dataset_kwargs)
self.dataset["train"].append(train_dataset)
self.dataset["validation"].append(val_dataset)
self.logger.info("Fold %i - Loaded."%i)
@staticmethod
def discretize_continous_label(labels, bins='sturges', verbose=False):
# Get an estimation of the best bin edges. 'Sturges' is conservative for pretty large datasets (N>1000).
bin_edges = np.histogram_bin_edges(labels, bins=bins)
if verbose:
print('Global histogram:\n', np.histogram(labels, bins=bin_edges, density=False), flush=True)
# Discretizes the values according to these bins
discretization = np.digitize(labels, bin_edges[1:], right=True)
if verbose:
print('Bin Counts after discretization:\n', np.bincount(discretization), flush=True)
return discretization
@staticmethod
def get_indices_from_mask(mask):
return np.arange(len(mask))[mask]
@staticmethod
def _reformat(input_path, metadata_path, output_path=None):
"""
:param input_path: None, str or list[str] or dict(mod: list[str])
:param metadata_path: str or list[str] or dict(mod: list[str])
Warning: there must be a correspondance 1:1 between list[str] in <metadata_path> and <input_path>
and all keys in both dicts must match
:param output_path: str or list[str] or dict(mod: list[str]), default None
:return: (dict(mod: list[str]), dict(mod: list[str]), dict(mod: list[str])) or (list[str], list[str], list[str])
if multimodal = False
"""
if isinstance(metadata_path, str):
if input_path is not None:
assert isinstance(input_path, str), "input path and metadata path must be both string"
input_path = [input_path]
metadata_path = [metadata_path]
if output_path is not None:
assert isinstance(output_path, str), "output path must be a string"
output_path = [output_path]
elif isinstance(metadata_path, list):
assert all([isinstance(p, str) for p in metadata_path]), "all metadata paths must be string"
if input_path is not None:
assert isinstance(input_path, list) and all([isinstance(p, str) for p in input_path]), \
"all input paths must be string"
assert len(input_path) == len(metadata_path), "input and metadata must have same length"
if output_path is not None:
assert isinstance(output_path, list) and all([isinstance(p, str) for p in output_path]), \
"all output paths must be string"
assert len(output_path) == len(input_path)
elif isinstance(metadata_path, dict):
assert isinstance(input_path, dict), "Input path must be a dict"
for key in metadata_path:
if input_path is not None and output_path is not None:
assert (key in input_path) and (key in output_path), "Missing key in input or output: %s"%key
(input_path[key], metadata_path[key], output_path[key]) = \
DataManager._reformat(input_path[key], metadata_path[key], output_path[key])
elif input_path is not None:
assert key in input_path, "Missing key in input: %s"%key
(input_path[key], metadata_path[key], _) = \
DataManager._reformat(input_path[key], metadata_path[key])
else:
(_, metadata_path[key], _) = DataManager._reformat(None, metadata_path[key])
return (input_path, metadata_path, output_path)
def load_data(self, inputs, dfs, outputs=None, sep=","):
"""
This method sets self.df, self.inputs, self.outputs for unimodal/multimodal datasets.
For unimodal dataset, it just concatenates all the pandas DataFrames and loads the np array with memory mapping.
For multimodal dataset, it merges all the concatenated DataFrame (one per modality) according to a UNIQUE_DF_KEY.
The order of each DataFrame can be retrieved with the set of added columns {index_<mod1>, index_<mod2>...}
where <mod> are the modalities (keys of dfs).
:param inputs: None or list[str] or dict(mod: list[str])
:param dfs: list[str] or dict(mod: list[str])
:param outputs: list[str] or dict(mod: list[str]), default None
:param sep: separator to read pandas DataFrame
:return: bool indicating if it is a multimodal dataset
"""
# Is it a multimodal dataset ?
multimodal = False
if isinstance(dfs, list):
if inputs is not None:
for (i, df) in zip(inputs, dfs):
self.logger.info('Correspondance {data} <==> {meta}'.format(data=i, meta=df))
self.inputs = [np.load(p, mmap_mode=('r'if self.mmap_mode else None)) for p in inputs] if inputs is not None else None
all_df = [pd.read_csv(df, sep=sep) for df in dfs]
assert (self.inputs is None) or all([len(i) == len(df) for (i, df) in zip(self.inputs, all_df)]), \
"All dataframes must match data length"
self.df = pd.concat(all_df, ignore_index=True, sort=False)
if outputs is not None:
self.outputs = [np.load(p, mmap_mode='r') for p in outputs]
else:
self.outputs = None
elif isinstance(dfs, dict):
modalities = self.modalities
if len(dfs) == 1: # Unimodal dataset
key = next(iter(dfs))
return self.load_data(inputs[key] if inputs is not None else inputs,
dfs[key], outputs if outputs is None else outputs[key], sep)
else: # Multimodal dataset
multimodal = True
# 1st: concatenate all df per modality as before and check the uniqueness of UNIQUE_DF_KEY
for key in list(dfs.keys()):
self.logger.info("Modality %s"%key)
self.load_data(inputs[key] if inputs is not None else inputs,
dfs[key], outputs if outputs is None else outputs[key], sep)
inputs[key], dfs[key] = self.inputs, self.df
if outputs is not None:
outputs[key] = self.outputs
assert dfs[key].set_index(self.unique_df_keys).index.is_unique, \
"Duplicated keys {} found in modality {}".format(self.unique_df_keys, key)
self.inputs, self.outputs = (inputs, outputs)
# 2nd: merge all concatenated df from different modalities (according to UNIQUE_DF_KEY) and keep the
# indexes for each modality. We assume all df share the same column names.
self.modalities = list(dfs.keys())
# Insert new "index_<mod>" column (or overwrite the previous one) ranging from 0 to len(df)-1
self.df = dfs[self.modalities[0]].copy()
self.df["index_%s"%self.modalities[0]] = pd.RangeIndex(len(self.df))
# keep only the relevant columns to avoid useless computations
keys_to_keep = self.keys_to_keep
if isinstance(keys_to_keep, list):
keys_to_keep = {mod: keys_to_keep for mod in self.modalities}
elif isinstance(keys_to_keep, dict):
assert set(self.modalities) <= set(keys_to_keep.keys()), "Missing modalities"
else:
raise ValueError("Unknown type: %s"%type(keys_to_keep))
self.df = self.df[keys_to_keep[self.modalities[0]]+["index_%s"%self.modalities[0]]]
for mod in self.modalities[1:]:
assert "index_%s"%mod not in dfs[mod], "index_%s already in df."%mod
indexed_df = dfs[mod].copy()
indexed_df["index_%s"%mod] = pd.RangeIndex(len(indexed_df))
self.df = pd.merge(self.df, indexed_df[keys_to_keep[mod]+["index_%s"%mod]],
on=self.unique_df_keys, how='outer', suffixes=(None, "_%s"%mod))
# # Remove duplicated columns during the merge
# def agg(x):
# if len(x) > 2 or len(x) < 1: raise ValueError()
# if len(x) == 1: return x[0]
# if pd.isna(x[0]): return x[1]
# return x[0]
# self.df = self.df.groupby(lambda x: x.split(unique_suffix)[0], axis=1).aggregate(agg)
# Basic Stats
mask_shared = True
for mod in self.modalities:
mask_shared &= ~self.df["index_%s"%mod].isna()
sharing_details = ""
# for mod in self.modalities:
# sharing_details += "\n Modality %s\n"%mod.upper()
# sharing_details += ", ".join(["%i %s"%((mask_shared & ~self.df["index_%s"%mod].isna() &
# self.df["study"].eq(s)).sum(), s)
# for s in set(self.df["study"])])
print("# Shared keys found: %i" % (mask_shared.sum()))#, sharing_details))
for mod in self.modalities:
mask_unique = ~self.df["index_%s"%mod].isna() & ~mask_shared
print("# Unique keys found in modality %s: %i"%(mod, mask_unique.sum()))
else:
raise ValueError("Unknown df type: %s"%type(inputs))
return multimodal
def get_nb_folds(self):
return self.number_of_folds
def __getitem__(self, item):
""" Return the requested item.
Returns
-------
item: Dataset or list of Dataset
the requested set of data: test, train or validation.
"""
if item not in ("train", "test", "validation"):
raise ValueError("Unknown set! Must be 'train', 'test' or "
"'validation'.")
return self.dataset[item]
def collate_fn(self, list_samples):
""" After fetching a list of samples using the indices from sampler,
the function passed as the collate_fn argument is used to collate lists
of samples into batches.
A custom collate_fn is used here to apply the transformations.
See https://pytorch.org/docs/stable/data.html#dataloader-collate-fn.
"""
data = OrderedDict()
for key in ("inputs", "outputs", "labels"):
if len(list_samples) == 0 or getattr(list_samples[-1], key) is None:
data[key] = None
else:
if self.modalities is None or len(self.modalities) == 1 or key == "labels":
if key == "inputs" and self.features_to_add is not None:
input_ = torch.stack([torch.as_tensor(getattr(s, key)[0], dtype=torch.float) for s in list_samples], dim=0)
features = torch.stack([torch.as_tensor(getattr(s, key)[1], dtype=torch.float) for s in list_samples], dim=0)
data[key] = ListTensors(input_, features)
else:
data[key] = torch.stack([torch.as_tensor(getattr(s, key), dtype=torch.float).clone() for s in list_samples], dim=0)
else:
data[key] = {m: [(torch.as_tensor(getattr(sample, key)[m], dtype=torch.float32)
if getattr(sample, key)[m] is not None else None) for sample in list_samples]
for m in self.modalities}
if data["labels"] is not None:
data["labels"] = data["labels"].type(torch.LongTensor)
return DataItem(**data)
def get_dataloader(self, train=False, validation=False, test=False,
fold_index=0, **kwargs):
""" Generate a pytorch DataLoader.
Parameters
----------
train: bool, default False
return the dataloader over the train set.
validation: bool, default False
return the dataloader over the validation set.
test: bool, default False
return the dataloader over the test set.
fold_index: int, default 0
the index of the fold to use for the training
Returns
-------
loaders: list of DataLoader
the requested data loaders.
"""
_test, _train, _validation, sampler = (None, None, None, None)
if test:
_test = DataLoader(
self.dataset["test"], batch_size=self.batch_size,
collate_fn=self.collate_fn, **self.data_loader_kwargs)
if train:
if self.sampler == "weighted_random":
indices = self.dataset["train"][fold_index].indices
samples_weigths = self.sampler_weigths[np.array(self.stratify_label[indices], dtype=np.int32)]
sampler = WeightedRandomSampler(samples_weigths, len(indices), replacement=True)
elif self.sampler == "random":
sampler = RandomSampler(self.dataset["train"][fold_index])
elif self.sampler == "sequential":
sampler = SequentialSampler(self.dataset["train"][fold_index])
_train = DataLoader(
self.dataset["train"][fold_index], batch_size=self.batch_size, sampler=sampler,
collate_fn=self.collate_fn, **self.data_loader_kwargs)
if validation:
_validation = DataLoader(
self.dataset["validation"][fold_index],
batch_size=self.batch_size, collate_fn=self.collate_fn,
**self.data_loader_kwargs)
return SetItem(test=_test, train=_train, validation=_validation)
@staticmethod
def get_mask(df, projection_labels=None, check_nan=None):
""" Filter a table.
Parameters
----------
df: a pandas DataFrame
a table data.
projection_labels: dict, default None
selects only the data that match the conditions in the dict
{<column_name>: <value>}.
check_nan: list of str, default None
check if there is nan in the selected columns. Select only the rows without nan
Returns
-------
mask: a list of boolean values
"""
mask = np.ones(len(df), dtype=np.bool)
if projection_labels is not None:
for (col, val) in projection_labels.items():
if isinstance(val, list):
mask &= getattr(df, col).isin(val)
elif val is not None:
mask &= getattr(df, col).eq(val)
if check_nan is not None:
for col in check_nan:
mask &= ~getattr(df, col).isna()
return mask
def dump_augmented_data(self, N_per_class, output_path, output_path_df):
## It takes all the dataset and computes, for each class sample, transformations that preserve the class
# distribution as homogeneously as possible
def apply_transforms(obj, tfs):
obj_tf = obj
for tf in tfs:
obj_tf = tf(obj)
return obj_tf
# First, get the mask to consider only relevant data for our application
df = pd.read_csv(self.metadata_path, sep="\t")
mask = DataManager.get_mask(df=df, projection_labels=self.projection_labels)
labels_mapping = {l: apply_transforms(l, self.labels_transforms) for l in set(self.labels[mask])}
class_repartition = [0 for _ in range(len(set(labels_mapping.values())))] # len == nb of classes
for i, label in enumerate(self.labels[mask]):
label = labels_mapping[label]
class_repartition[label] += 1
n_classes = len(class_repartition)
if isinstance(N_per_class, int):
N_per_class = [N_per_class for _ in class_repartition]
elif isinstance(N_per_class, list):
assert len(N_per_class) == n_classes
missing_samples_per_class = [N_per_class[i] for i in range(n_classes)]
adding_samples_per_class = [(missing_samples_per_class[i]-1)//class_repartition[i] + 1
if missing_samples_per_class[i] > 0 else 0 for i in range(n_classes)]
len_X_augmented = np.sum(missing_samples_per_class)
X_to_dump = np.memmap(output_path, dtype='float32', mode='w+', shape=(len_X_augmented,)+self.inputs[0].shape)
df_to_dump = np.zeros(shape=(len_X_augmented, len(df.columns)), dtype=object)
# For each class, add the missing samples with the data_augmentation_transforms
count = 0
pbar = tqdm(total=np.sum(mask), desc="Input images processed")
for i in DataManager.get_indices_from_mask(mask):
pbar.update()
sample = self.inputs[i]
label = labels_mapping[self.labels[i]]
if missing_samples_per_class[label] > 0:
for j in range(adding_samples_per_class[label]):
if missing_samples_per_class[label] > 0:
x_transformed = sample
for tf in self.data_augmentation:
x_transformed = tf(x_transformed)
X_to_dump[count] = x_transformed
df_to_dump[count] = df.values[i]
count += 1
missing_samples_per_class[label] -= 1
df_to_dump = | pd.DataFrame(df_to_dump, columns=df.columns) | pandas.DataFrame |
# AUTOGENERATED! DO NOT EDIT! File to edit: nbs/05-orchestrator.ipynb (unless otherwise specified).
__all__ = ['retry_request', 'if_possible_parse_local_datetime', 'SP_and_date_request', 'handle_capping',
'date_range_request', 'year_request', 'construct_year_month_pairs', 'year_and_month_request',
'clean_year_week', 'construct_year_week_pairs', 'year_and_week_request', 'non_temporal_request',
'query_orchestrator']
# Cell
import pandas as pd
from tqdm import tqdm
from warnings import warn
from requests.models import Response
from . import utils, raw
# Cell
def retry_request(raw, method, kwargs, n_attempts=3):
attempts = 0
success = False
while (attempts < n_attempts) and (success == False):
try:
r = getattr(raw, method)(**kwargs)
utils.check_status(r)
success = True
except Exception as e:
attempts += 1
if attempts == n_attempts:
raise e
return r
def if_possible_parse_local_datetime(df):
dt_cols_with_period_in_name = ['startTimeOfHalfHrPeriod', 'initialForecastPublishingPeriodCommencingTime', 'latestForecastPublishingPeriodCommencingTime', 'outTurnPublishingPeriodCommencingTime']
dt_cols = [col for col in df.columns if 'date' in col.lower() or col in dt_cols_with_period_in_name]
sp_cols = [col for col in df.columns if 'period' in col.lower() and col not in dt_cols_with_period_in_name]
if len(dt_cols)==1 and len(sp_cols)==1:
df = utils.parse_local_datetime(df, dt_col=dt_cols[0], SP_col=sp_cols[0])
return df
def SP_and_date_request(
method: str,
kwargs_map: dict,
func_params: list,
api_key: str,
start_date: str,
end_date: str,
n_attempts: int=3,
**kwargs
):
assert start_date is not None, '`start_date` must be specified'
assert end_date is not None, '`end_date` must be specified'
df = pd.DataFrame()
stream = '_'.join(method.split('_')[1:])
kwargs.update({
'APIKey': api_key,
'ServiceType': 'xml'
})
df_dates_SPs = utils.dt_rng_to_SPs(start_date, end_date)
date_SP_tuples = list(df_dates_SPs.reset_index().itertuples(index=False, name=None))[:-1]
for datetime, query_date, SP in tqdm(date_SP_tuples, desc=stream, total=len(date_SP_tuples)):
kwargs.update({
kwargs_map['date']: datetime.strftime('%Y-%m-%d'),
kwargs_map['SP']: SP,
})
missing_kwargs = list(set(func_params) - set(['SP', 'date'] + list(kwargs.keys())))
assert len(missing_kwargs) == 0, f"The following kwargs are missing: {', '.join(missing_kwargs)}"
r = retry_request(raw, method, kwargs, n_attempts=n_attempts)
df_SP = utils.parse_xml_response(r)
df = pd.concat([df, df_SP])
df = utils.expand_cols(df)
df = if_possible_parse_local_datetime(df)
return df
# Cell
def handle_capping(
r: Response,
df: pd.DataFrame,
method: str,
kwargs_map: dict,
func_params: list,
api_key: str,
end_date: str,
request_type: str,
**kwargs
):
capping_applied = utils.check_capping(r)
assert capping_applied != None, 'No information on whether or not capping limits had been breached could be found in the response metadata'
if capping_applied == True: # only subset of date range returned
dt_cols_with_period_in_name = ['startTimeOfHalfHrPeriod']
dt_cols = [col for col in df.columns if ('date' in col.lower() or col in dt_cols_with_period_in_name) and ('end' not in col.lower())]
if len(dt_cols) == 1:
start_date = pd.to_datetime(df[dt_cols[0]]).max().strftime('%Y-%m-%d')
if 'start_time' in kwargs.keys():
kwargs['start_time'] = '00:00'
if pd.to_datetime(start_date) >= pd.to_datetime(end_date):
warnings.warn(f'The `end_date` ({end_date}) was earlier than `start_date` ({start_date})\nThe `start_date` will be set one day earlier than the `end_date`.')
start_date = (pd.to_datetime(end_date) - pd.Timedelta(days=1)).strftime('%Y-%m-%d')
warn(f'Response was capped, request is rerunning for missing data from {start_date}')
df_rerun = date_range_request(
method=method,
kwargs_map=kwargs_map,
func_params=func_params,
api_key=api_key,
start_date=start_date,
end_date=end_date,
request_type=request_type,
**kwargs
)
df = pd.concat([df, df_rerun])
df = df.drop_duplicates()
else:
warn(f'Response was capped: a new `start_date` to continue requesting could not be determined automatically, please handle manually for `{method}`')
return df
def date_range_request(
method: str,
kwargs_map: dict,
func_params: list,
api_key: str,
start_date: str,
end_date: str,
request_type: str,
n_attempts: int=3,
**kwargs
):
assert start_date is not None, '`start_date` must be specified'
assert end_date is not None, '`end_date` must be specified'
kwargs.update({
'APIKey': api_key,
'ServiceType': 'xml'
})
for kwarg in ['start_time', 'end_time']:
if kwarg not in kwargs_map.keys():
kwargs_map[kwarg] = kwarg
kwargs[kwargs_map['start_date']], kwargs[kwargs_map['start_time']] = pd.to_datetime(start_date).strftime('%Y-%m-%d %H:%M:%S').split(' ')
kwargs[kwargs_map['end_date']], kwargs[kwargs_map['end_time']] = pd.to_datetime(end_date).strftime('%Y-%m-%d %H:%M:%S').split(' ')
if 'SP' in kwargs_map.keys():
kwargs[kwargs_map['SP']] = '*'
func_params.remove('SP')
func_params += [kwargs_map['SP']]
missing_kwargs = list(set(func_params) - set(['start_date', 'end_date', 'start_time', 'end_time'] + list(kwargs.keys())))
assert len(missing_kwargs) == 0, f"The following kwargs are missing: {', '.join(missing_kwargs)}"
if request_type == 'date_range':
kwargs.pop(kwargs_map['start_time'])
kwargs.pop(kwargs_map['end_time'])
r = retry_request(raw, method, kwargs, n_attempts=n_attempts)
df = utils.parse_xml_response(r)
df = if_possible_parse_local_datetime(df)
# Handling capping
df = handle_capping(
r,
df,
method=method,
kwargs_map=kwargs_map,
func_params=func_params,
api_key=api_key,
end_date=end_date,
request_type=request_type,
**kwargs
)
return df
# Cell
def year_request(
method: str,
kwargs_map: dict,
func_params: list,
api_key: str,
start_date: str,
end_date: str,
n_attempts: int=3,
**kwargs
):
assert start_date is not None, '`start_date` must be specified'
assert end_date is not None, '`end_date` must be specified'
df = pd.DataFrame()
stream = '_'.join(method.split('_')[1:])
kwargs.update({
'APIKey': api_key,
'ServiceType': 'xml'
})
start_year = int(pd.to_datetime(start_date).strftime('%Y'))
end_year = int(pd.to_datetime(end_date).strftime('%Y'))
for year in tqdm(range(start_year, end_year+1), desc=stream):
kwargs.update({kwargs_map['year']: year})
missing_kwargs = list(set(func_params) - set(['year'] + list(kwargs.keys())))
assert len(missing_kwargs) == 0, f"The following kwargs are missing: {', '.join(missing_kwargs)}"
r = retry_request(raw, method, kwargs, n_attempts=n_attempts)
df_year = utils.parse_xml_response(r)
df = | pd.concat([df, df_year]) | pandas.concat |
# Copyright (C) 2012 <NAME>
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to
# deal in the Software without restriction, including without limitation the
# rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
# sell copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
# FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
# IN THE SOFTWARE.
"""
Models for the data being analysed and manipulated.
@author: drusk
"""
import random as rand
import numpy as np
import pandas as pd
from pml.utils import plotting, pandas_util
from pml.utils.errors import InconsistentSampleIdError
from pml.utils.errors import UnlabelledDataSetError
class DataSet(object):
"""
A collection of data that may be analysed and manipulated.
Columns are interpreted as features in the data set, and rows are samples
or observations.
"""
def __init__(self, data, labels=None):
"""
Creates a new DataSet from data of an unknown type. If data is itself
a DataSet object, then its contents are copied and a new DataSet is
created from the copies.
Args:
data:
Data of unknown type. The supported types are:
1) pandas DataFrame
2) Python lists
3) numpy array
4) an existing DataSet object
labels: pandas Series, Python list or Python dictionary
The classification labels for the samples in data. If they are
not known (i.e. it is an unlabelled data set) the value None
should be used. Default value is None (unlabelled).
Raises:
ValueError if the data or labels are not of a supported type.
InconsistentSampleIdError if labels were provided whose sample ids
do not match those of the data.
"""
if isinstance(data, pd.DataFrame):
self._dataframe = data
elif isinstance(data, list):
self._dataframe = pd.DataFrame(data)
elif isinstance(data, np.ndarray):
self._dataframe = pd.DataFrame(data)
elif isinstance(data, DataSet):
self._dataframe = data._dataframe.copy()
else:
raise ValueError("Unsupported representation of data set")
if isinstance(labels, list) or isinstance(labels, dict):
self.labels = pd.Series(labels)
elif isinstance(labels, pd.Series) or labels is None:
self.labels = labels
else:
raise ValueError("Unsupported representation of labels")
if (self.labels is not None and
not (self.labels.index == self._dataframe.index).all()):
raise InconsistentSampleIdError(("The sample ids for the data "
"and the labels do not match."))
def __str__(self):
"""
Returns:
This object's string representation, primarily for debugging
purposes.
"""
return self.__repr__()
def __repr__(self):
"""
This gets called when the object's name is typed into IPython on its
own line, causing a string representation of the object to be
displayed.
Returns:
This object's string representation, providing some summary
information about it to the user.
"""
def display(boolean):
return "yes" if boolean else "no"
return "\n".join(("Features: %s" % self.feature_list(),
"Samples: %d" % self.num_samples(),
"Missing values? %s"
% display(self.has_missing_values()),
"Labelled? %s" % display(self.is_labelled())))
def copy(self):
"""
Creates a copy of this dataset. Changes made to one dataset will not
affect the other.
Returns:
A new DataSet with the current data and labels.
"""
def copy_if_not_none(copyable):
return copyable.copy() if copyable is not None else None
return DataSet(self._dataframe.copy(),
labels=copy_if_not_none(self.labels))
def get_data_frame(self):
"""
Retrieve the DataSet's underlying data as a pandas DataFrame object.
See also get_labelled_data_frame().
Returns:
A pandas DataFrame with the DataSet's main data, but no labels.
"""
return self._dataframe
def get_labelled_data_frame(self):
"""
Retrieve the DataSet's underlying data as a pandas DataFrame object,
including any labels.
See also get_data_frame().
Returns:
A pandas DataFrame with the DataSet's main data and the labels if
they are present attached as the rightmost column.
"""
if not self.is_labelled():
return self.get_data_frame()
return pd.concat([self.get_data_frame(), pd.DataFrame(self.labels)],
axis=1)
def num_samples(self):
"""
Returns:
The number of samples (rows) in the data set.
"""
return self._dataframe.shape[0]
def num_features(self):
"""
Returns:
The number of features (columns) in the data set.
"""
return self._dataframe.shape[1]
def is_labelled(self):
"""
Returns:
True if the dataset has classification labels for each sample,
False otherwise.
"""
return self.labels is not None
def has_missing_values(self):
"""
Returns:
True if the dataset is missing values. These will be represented
as np.NaN.
"""
# isnull returns booleans for each data point (True if null). The
# first any checks columns for any True, producing a 1d array of
# booleans. The second any checks that 1d array.
return pd.isnull(self._dataframe).any().any()
def feature_list(self):
"""
Returns:
The list of features in the dataset.
"""
return self._dataframe.columns.tolist()
def get_sample_ids(self):
"""
Returns:
A Python list of the ids of the samples in the dataset.
"""
return self._get_sample_ids_index().tolist()
def _get_sample_ids_index(self):
"""
Returns:
A pandas Index object containing the sample ids of the data set.
"""
return self.get_data_frame().index
def get_labels(self, indices=None):
"""
Selects classification labels for the specified samples (rows) in the
DataSet.
Args:
indices: list
The list of row indices (0 based) which should be selected.
Defaults to None, in which case all labels are selected.
Returns:
A pandas Series with the classification labels.
"""
if indices is None:
return self.labels
else:
return self.labels.take(indices)
def get_label_set(self):
"""
Returns the set of all labels in the DataSet.
Returns:
label_set: set
"""
if self.labels is None:
return set()
else:
return set(self.labels)
def get_feature_values(self, feature):
"""
Retrieves the set of values for a given feature.
Args:
feature: string
The feature whose unique values will be retrieved.
Returns:
value_set: set
The set of unique values for a feature.
"""
return set(self.get_feature_value_counts(feature).index)
def get_feature_value_counts(self, feature):
"""
Count the number of occurrences of each value of a given feature in
the data set.
Args:
feature: string
The feature whose values will be counted.
Returns:
value_counts: pandas.Series
A Series containing the counts of each label. It is indexable by
label. The index is ordered from highest to lowest count.
"""
return self.get_column(feature).value_counts()
def get_label_value_counts(self):
"""
Count the number of occurrences of each label.
NOTE: If the data set is unlabelled an empty set of results will be
returned.
Returns:
value_counts: pandas.Series
A Series containing the counts of each label. It is indexable by
label. The index is ordered from highest to lowest count.
"""
if self.is_labelled():
return self.labels.value_counts()
else:
return pd.Series() # blank result
def reduce_rows(self, function):
"""
Performs a row-wise reduction of the data set.
Args:
function:
the function which will be applied to each row in the data set.
Returns:
a pandas Series object which is the one dimensional result of
reduction (one value corresponding to each row).
"""
return self._dataframe.apply(function, axis=1)
def reduce_features(self, function):
"""
Performs a feature-wise (i.e. column-wise) reduction of the data set.
Args:
function:
The function which will be applied to each feature in the data set.
Returns:
A pandas Series object which is the one dimensional result of the
reduction (one value corresponding to each feature).
"""
return self._dataframe.apply(function, axis=0)
def _get_filtered_labels_if_exist(self, indices):
"""
Internal method used to filter the data set's labels if there are any.
Args:
indices:
The indices of the labels to keep.
Returns:
labels:
If the data set is labelled, this will be the labels at the
specified indices. If the data set is unlabelled, None will
be returned.
"""
return self.labels[indices] if self.is_labelled() else None
def sample_filter(self, samples_to_keep):
"""
Filters the data set based on its sample ids.
Args:
samples_to_keep:
The sample ids of the samples which should be kept. All others
will be removed.
Returns:
filtered: model.DataSet
The filtered data set.
"""
return DataSet(self._dataframe.ix[samples_to_keep],
self._get_filtered_labels_if_exist(samples_to_keep))
def value_filter(self, feature, values):
"""
Filters the data set based on its values for a given feature.
Args:
feature: string
The name of the feature whose value will be examined for each
sample.
values: single value or list of values.
Samples passing through the filter must have one of these
values for the specified feature.
Returns:
filtered: model.DataSet
The filtered data set.
"""
samples = pandas_util.find(self.get_column(feature), values)
return self.sample_filter(samples)
def label_filter(self, labels):
"""
Filters the data set based on its labels.
Args:
labels: single value or list of values
Samples with one of these labels will remain in the filtered data
set. All others will be removed.
Returns:
filtered: model.DataSet
The filtered data set.
Raises:
UnlabelledDataSetError if the data set is not labeled.
"""
if not self.is_labelled():
raise UnlabelledDataSetError()
return self.sample_filter(pandas_util.find(self.labels, labels))
def drop_column(self, index):
"""
Creates a copy of the data set with a specified column removed.
Args:
index:
the index (0 based) of the column to drop.
Returns:
a new DataSet with the specified column removed. The original
DataSet remains unaltered.
"""
return DataSet(self._dataframe.drop(index, axis=1),
labels=self.labels)
def drop_empty_samples(self):
"""
Creates a copy of the data set with any samples (rows) that had no
value for any feature removed.
Returns:
filtered: DataSet
A new DataSet with empty samples removed. The original DataSet
is unaltered.
"""
def all_null(row):
return | pd.isnull(row) | pandas.isnull |
"""
A warehouse for constant values required to initilize the PUDL Database.
This constants module stores and organizes a bunch of constant values which are
used throughout PUDL to populate static lists within the data packages or for
data cleaning purposes.
"""
import importlib.resources
import pandas as pd
import sqlalchemy as sa
######################################################################
# Constants used within the init.py module.
######################################################################
prime_movers = [
'steam_turbine',
'gas_turbine',
'hydro',
'internal_combustion',
'solar_pv',
'wind_turbine'
]
"""list: A list of the types of prime movers"""
rto_iso = {
'CAISO': 'California ISO',
'ERCOT': 'Electric Reliability Council of Texas',
'MISO': 'Midcontinent ISO',
'ISO-NE': 'ISO New England',
'NYISO': 'New York ISO',
'PJM': 'PJM Interconnection',
'SPP': 'Southwest Power Pool'
}
"""dict: A dictionary containing ISO/RTO abbreviations (keys) and names (values)
"""
us_states = {
'AK': 'Alaska',
'AL': 'Alabama',
'AR': 'Arkansas',
'AS': 'American Samoa',
'AZ': 'Arizona',
'CA': 'California',
'CO': 'Colorado',
'CT': 'Connecticut',
'DC': 'District of Columbia',
'DE': 'Delaware',
'FL': 'Florida',
'GA': 'Georgia',
'GU': 'Guam',
'HI': 'Hawaii',
'IA': 'Iowa',
'ID': 'Idaho',
'IL': 'Illinois',
'IN': 'Indiana',
'KS': 'Kansas',
'KY': 'Kentucky',
'LA': 'Louisiana',
'MA': 'Massachusetts',
'MD': 'Maryland',
'ME': 'Maine',
'MI': 'Michigan',
'MN': 'Minnesota',
'MO': 'Missouri',
'MP': 'Northern Mariana Islands',
'MS': 'Mississippi',
'MT': 'Montana',
'NA': 'National',
'NC': 'North Carolina',
'ND': 'North Dakota',
'NE': 'Nebraska',
'NH': 'New Hampshire',
'NJ': 'New Jersey',
'NM': 'New Mexico',
'NV': 'Nevada',
'NY': 'New York',
'OH': 'Ohio',
'OK': 'Oklahoma',
'OR': 'Oregon',
'PA': 'Pennsylvania',
'PR': 'Puerto Rico',
'RI': 'Rhode Island',
'SC': 'South Carolina',
'SD': 'South Dakota',
'TN': 'Tennessee',
'TX': 'Texas',
'UT': 'Utah',
'VA': 'Virginia',
'VI': 'Virgin Islands',
'VT': 'Vermont',
'WA': 'Washington',
'WI': 'Wisconsin',
'WV': 'West Virginia',
'WY': 'Wyoming'
}
"""dict: A dictionary containing US state abbreviations (keys) and names
(values)
"""
canada_prov_terr = {
'AB': 'Alberta',
'BC': 'British Columbia',
'CN': 'Canada',
'MB': 'Manitoba',
'NB': 'New Brunswick',
'NS': 'Nova Scotia',
'NL': 'Newfoundland and Labrador',
'NT': 'Northwest Territories',
'NU': 'Nunavut',
'ON': 'Ontario',
'PE': 'Prince Edwards Island',
'QC': 'Quebec',
'SK': 'Saskatchewan',
'YT': 'Yukon Territory',
}
"""dict: A dictionary containing Canadian provinces' and territories'
abbreviations (keys) and names (values)
"""
cems_states = {k: v for k, v in us_states.items() if v not in
{'Alaska',
'American Samoa',
'Guam',
'Hawaii',
'Northern Mariana Islands',
'National',
'Puerto Rico',
'Virgin Islands'}
}
"""dict: A dictionary containing US state abbreviations (keys) and names
(values) that are present in the CEMS dataset
"""
# This is imperfect for states that have split timezones. See:
# https://en.wikipedia.org/wiki/List_of_time_offsets_by_U.S._state_and_territory
# For states that are split, I went with where there seem to be more people
# List of timezones in pytz.common_timezones
# Canada: https://en.wikipedia.org/wiki/Time_in_Canada#IANA_time_zone_database
state_tz_approx = {
"AK": "US/Alaska", # Alaska; Not in CEMS
"AL": "US/Central", # Alabama
"AR": "US/Central", # Arkansas
"AS": "Pacific/Pago_Pago", # American Samoa; Not in CEMS
"AZ": "US/Arizona", # Arizona
"CA": "US/Pacific", # California
"CO": "US/Mountain", # Colorado
"CT": "US/Eastern", # Connecticut
"DC": "US/Eastern", # District of Columbia
"DE": "US/Eastern", # Delaware
"FL": "US/Eastern", # Florida (split state)
"GA": "US/Eastern", # Georgia
"GU": "Pacific/Guam", # Guam; Not in CEMS
"HI": "US/Hawaii", # Hawaii; Not in CEMS
"IA": "US/Central", # Iowa
"ID": "US/Mountain", # Idaho (split state)
"IL": "US/Central", # Illinois
"IN": "US/Eastern", # Indiana (split state)
"KS": "US/Central", # Kansas (split state)
"KY": "US/Eastern", # Kentucky (split state)
"LA": "US/Central", # Louisiana
"MA": "US/Eastern", # Massachusetts
"MD": "US/Eastern", # Maryland
"ME": "US/Eastern", # Maine
"MI": "America/Detroit", # Michigan (split state)
"MN": "US/Central", # Minnesota
"MO": "US/Central", # Missouri
"MP": "Pacific/Saipan", # Northern Mariana Islands; Not in CEMS
"MS": "US/Central", # Mississippi
"MT": "US/Mountain", # Montana
"NC": "US/Eastern", # North Carolina
"ND": "US/Central", # North Dakota (split state)
"NE": "US/Central", # Nebraska (split state)
"NH": "US/Eastern", # New Hampshire
"NJ": "US/Eastern", # New Jersey
"NM": "US/Mountain", # New Mexico
"NV": "US/Pacific", # Nevada
"NY": "US/Eastern", # New York
"OH": "US/Eastern", # Ohio
"OK": "US/Central", # Oklahoma
"OR": "US/Pacific", # Oregon (split state)
"PA": "US/Eastern", # Pennsylvania
"PR": "America/Puerto_Rico", # Puerto Rico; Not in CEMS
"RI": "US/Eastern", # Rhode Island
"SC": "US/Eastern", # South Carolina
"SD": "US/Central", # South Dakota (split state)
"TN": "US/Central", # Tennessee
"TX": "US/Central", # Texas
"UT": "US/Mountain", # Utah
"VA": "US/Eastern", # Virginia
"VI": "America/Puerto_Rico", # Virgin Islands; Not in CEMS
"VT": "US/Eastern", # Vermont
"WA": "US/Pacific", # Washington
"WI": "US/Central", # Wisconsin
"WV": "US/Eastern", # West Virginia
"WY": "US/Mountain", # Wyoming
# Canada (none of these are in CEMS)
"AB": "America/Edmonton", # Alberta
"BC": "America/Vancouver", # British Columbia (split province)
"MB": "America/Winnipeg", # Manitoba
"NB": "America/Moncton", # New Brunswick
"NS": "America/Halifax", # Nova Scotia
"NL": "America/St_Johns", # Newfoundland and Labrador (split province)
"NT": "America/Yellowknife", # Northwest Territories (split province)
"NU": "America/Iqaluit", # Nunavut (split province)
"ON": "America/Toronto", # Ontario (split province)
"PE": "America/Halifax", # Prince Edwards Island
"QC": "America/Montreal", # Quebec (split province)
"SK": "America/Regina", # Saskatchewan (split province)
"YT": "America/Whitehorse", # Yukon Territory
}
"""dict: A dictionary containing US and Canadian state/territory abbreviations
(keys) and timezones (values)
"""
# Construct a dictionary mapping a canonical fuel name to a list of strings
# which are used to represent that fuel in the FERC Form 1 Reporting. Case is
# ignored, as all fuel strings can be converted to a lower case in the data
# set.
# Previous categories of ferc1_biomass_strings and ferc1_stream_strings have
# been deleted and their contents redistributed to ferc1_waste_strings and
# ferc1_other_strings
ferc1_coal_strings = [
'coal', 'coal-subbit', 'lignite', 'coal(sb)', 'coal (sb)', 'coal-lignite',
'coke', 'coa', 'lignite/coal', 'coal - subbit', 'coal-subb', 'coal-sub',
'coal-lig', 'coal-sub bit', 'coals', 'ciak', 'petcoke', 'coal.oil',
'coal/gas', 'bit coal', 'coal-unit #3', 'coal-subbitum', 'coal tons',
'coal mcf', 'coal unit #3', 'pet. coke', 'coal-u3', 'coal&coke', 'tons'
]
"""
list: A list of strings which are used to represent coal fuel in FERC Form 1
reporting.
"""
ferc1_oil_strings = [
'oil', '#6 oil', '#2 oil', 'fuel oil', 'jet', 'no. 2 oil', 'no.2 oil',
'no.6& used', 'used oil', 'oil-2', 'oil (#2)', 'diesel oil',
'residual oil', '# 2 oil', 'resid. oil', 'tall oil', 'oil/gas',
'no.6 oil', 'oil-fuel', 'oil-diesel', 'oil / gas', 'oil bbls', 'oil bls',
'no. 6 oil', '#1 kerosene', 'diesel', 'no. 2 oils', 'blend oil',
'#2oil diesel', '#2 oil-diesel', '# 2 oil', 'light oil', 'heavy oil',
'gas.oil', '#2', '2', '6', 'bbl', 'no 2 oil', 'no 6 oil', '#1 oil', '#6',
'oil-kero', 'oil bbl', 'biofuel', 'no 2', 'kero', '#1 fuel oil',
'no. 2 oil', 'blended oil', 'no 2. oil', '# 6 oil', 'nno. 2 oil',
'#2 fuel', 'oill', 'oils', 'gas/oil', 'no.2 oil gas', '#2 fuel oil',
'oli', 'oil (#6)', 'oil/diesel', '2 oil', '#6 hvy oil', 'jet fuel',
'diesel/compos', 'oil-8', 'oil {6}', 'oil-unit #1', 'bbl.', 'oil.',
'oil #6', 'oil (6)', 'oil(#2)', 'oil-unit1&2', 'oil-6', '#2 fue oil',
'dielel oil', 'dielsel oil', '#6 & used', 'barrels', 'oil un 1 & 2',
'jet oil', 'oil-u1&2', 'oiul', 'pil', 'oil - 2', '#6 & used', 'oial'
]
"""
list: A list of strings which are used to represent oil fuel in FERC Form 1
reporting.
"""
ferc1_gas_strings = [
'gas', 'gass', 'methane', 'natural gas', 'blast gas', 'gas mcf',
'propane', 'prop', 'natural gas', 'nat.gas', 'nat gas',
'nat. gas', 'natl gas', 'ga', 'gas`', 'syngas', 'ng', 'mcf',
'blast gaa', 'nat gas', 'gac', 'syngass', 'prop.', 'natural', 'coal.gas',
'n. gas', 'lp gas', 'natuaral gas', 'coke gas', 'gas #2016', 'propane**',
'* propane', 'propane **', 'gas expander', 'gas ct', '# 6 gas', '#6 gas',
'coke oven gas'
]
"""
list: A list of strings which are used to represent gas fuel in FERC Form 1
reporting.
"""
ferc1_solar_strings = []
ferc1_wind_strings = []
ferc1_hydro_strings = []
ferc1_nuke_strings = [
'nuclear', 'grams of uran', 'grams of', 'grams of ura',
'grams', 'nucleur', 'nulear', 'nucl', 'nucleart', 'nucelar',
'gr.uranium', 'grams of urm', 'nuclear (9)', 'nulcear', 'nuc',
'gr. uranium', 'nuclear mw da', 'grams of ura'
]
"""
list: A list of strings which are used to represent nuclear fuel in FERC Form
1 reporting.
"""
ferc1_waste_strings = [
'tires', 'tire', 'refuse', 'switchgrass', 'wood waste', 'woodchips',
'biomass', 'wood', 'wood chips', 'rdf', 'tires/refuse', 'tire refuse',
'waste oil', 'waste', 'woodships', 'tire chips'
]
"""
list: A list of strings which are used to represent waste fuel in FERC Form 1
reporting.
"""
ferc1_other_strings = [
'steam', 'purch steam', 'all', 'tdf', 'n/a', 'purch. steam', 'other',
'composite', 'composit', 'mbtus', 'total', 'avg', 'avg.', 'blo',
'all fuel', 'comb.', 'alt. fuels', 'na', 'comb', '/#=2\x80รข\x91?',
'kรฃ\xadgvยธ\x9d?', "mbtu's", 'gas, oil', 'rrm', '3\x9c', 'average',
'furfural', '0', 'watson bng', 'toal', 'bng', '# 6 & used', 'combined',
'blo bls', 'compsite', '*', 'compos.', 'gas / oil', 'mw days', 'g', 'c',
'lime', 'all fuels', 'at right', '20', '1', 'comp oil/gas', 'all fuels to',
'the right are', 'c omposite', 'all fuels are', 'total pr crk',
'all fuels =', 'total pc', 'comp', 'alternative', 'alt. fuel', 'bio fuel',
'total prairie', ''
]
"""list: A list of strings which are used to represent other fuels in FERC Form
1 reporting.
"""
# There are also a bunch of other weird and hard to categorize strings
# that I don't know what to do with... hopefully they constitute only a
# small fraction of the overall generation.
ferc1_fuel_strings = {"coal": ferc1_coal_strings,
"oil": ferc1_oil_strings,
"gas": ferc1_gas_strings,
"solar": ferc1_solar_strings,
"wind": ferc1_wind_strings,
"hydro": ferc1_hydro_strings,
"nuclear": ferc1_nuke_strings,
"waste": ferc1_waste_strings,
"other": ferc1_other_strings
}
"""dict: A dictionary linking fuel types (keys) to lists of various strings
representing that fuel (values)
"""
# Similarly, dictionary for cleaning up fuel unit strings
ferc1_ton_strings = ['toms', 'taons', 'tones', 'col-tons', 'toncoaleq', 'coal',
'tons coal eq', 'coal-tons', 'ton', 'tons', 'tons coal',
'coal-ton', 'tires-tons', 'coal tons -2 ',
'coal tons 200', 'ton-2000', 'coal tons -2', 'coal tons',
'coal-tone', 'tire-ton', 'tire-tons', 'ton coal eqv']
"""list: A list of fuel unit strings for tons."""
ferc1_mcf_strings = \
['mcf', "mcf's", 'mcfs', 'mcf.', 'gas mcf', '"gas" mcf', 'gas-mcf',
'mfc', 'mct', ' mcf', 'msfs', 'mlf', 'mscf', 'mci', 'mcl', 'mcg',
'm.cu.ft.', 'kcf', '(mcf)', 'mcf *(4)', 'mcf00', 'm.cu.ft..']
"""list: A list of fuel unit strings for thousand cubic feet."""
ferc1_bbl_strings = \
['barrel', 'bbls', 'bbl', 'barrels', 'bbrl', 'bbl.', 'bbls.',
'oil 42 gal', 'oil-barrels', 'barrrels', 'bbl-42 gal',
'oil-barrel', 'bb.', 'barrells', 'bar', 'bbld', 'oil- barrel',
'barrels .', 'bbl .', 'barels', 'barrell', 'berrels', 'bb',
'bbl.s', 'oil-bbl', 'bls', 'bbl:', 'barrles', 'blb', 'propane-bbl',
'barriel', 'berriel', 'barrile', '(bbl.)', 'barrel *(4)', '(4) barrel',
'bbf', 'blb.', '(bbl)', 'bb1', 'bbsl', 'barrrel', 'barrels 100%',
'bsrrels', "bbl's", '*barrels', 'oil - barrels', 'oil 42 gal ba', 'bll',
'boiler barrel', 'gas barrel', '"boiler" barr', '"gas" barrel',
'"boiler"barre', '"boiler barre', 'barrels .']
"""list: A list of fuel unit strings for barrels."""
ferc1_gal_strings = ['gallons', 'gal.', 'gals', 'gals.', 'gallon', 'gal',
'galllons']
"""list: A list of fuel unit strings for gallons."""
ferc1_1kgal_strings = ['oil(1000 gal)', 'oil(1000)', 'oil (1000)', 'oil(1000',
'oil(1000ga)']
"""list: A list of fuel unit strings for thousand gallons."""
ferc1_gramsU_strings = [ # noqa: N816 (U-ranium is capitalized...)
'gram', 'grams', 'gm u', 'grams u235', 'grams u-235', 'grams of uran',
'grams: u-235', 'grams:u-235', 'grams:u235', 'grams u308', 'grams: u235',
'grams of', 'grams - n/a', 'gms uran', 's e uo2 grams', 'gms uranium',
'grams of urm', 'gms. of uran', 'grams (100%)', 'grams v-235',
'se uo2 grams'
]
"""list: A list of fuel unit strings for grams."""
ferc1_kgU_strings = [ # noqa: N816 (U-ranium is capitalized...)
'kg of uranium', 'kg uranium', 'kilg. u-235', 'kg u-235', 'kilograms-u23',
'kg', 'kilograms u-2', 'kilograms', 'kg of', 'kg-u-235', 'kilgrams',
'kilogr. u235', 'uranium kg', 'kg uranium25', 'kilogr. u-235',
'kg uranium 25', 'kilgr. u-235', 'kguranium 25', 'kg-u235'
]
"""list: A list of fuel unit strings for thousand grams."""
ferc1_mmbtu_strings = ['mmbtu', 'mmbtus', 'mbtus', '(mmbtu)',
"mmbtu's", 'nuclear-mmbtu', 'nuclear-mmbt']
"""list: A list of fuel unit strings for million British Thermal Units."""
ferc1_mwdth_strings = \
['mwd therman', 'mw days-therm', 'mwd thrml', 'mwd thermal',
'mwd/mtu', 'mw days', 'mwdth', 'mwd', 'mw day', 'dth', 'mwdaysthermal',
'mw day therml', 'mw days thrml', 'nuclear mwd', 'mmwd', 'mw day/therml'
'mw days/therm', 'mw days (th', 'ermal)']
"""list: A list of fuel unit strings for megawatt days thermal."""
ferc1_mwhth_strings = ['mwh them', 'mwh threm', 'nwh therm', 'mwhth',
'mwh therm', 'mwh', 'mwh therms.', 'mwh term.uts',
'mwh thermal', 'mwh thermals', 'mw hr therm',
'mwh therma', 'mwh therm.uts']
"""list: A list of fuel unit strings for megawatt hours thermal."""
ferc1_fuel_unit_strings = {'ton': ferc1_ton_strings,
'mcf': ferc1_mcf_strings,
'bbl': ferc1_bbl_strings,
'gal': ferc1_gal_strings,
'1kgal': ferc1_1kgal_strings,
'gramsU': ferc1_gramsU_strings,
'kgU': ferc1_kgU_strings,
'mmbtu': ferc1_mmbtu_strings,
'mwdth': ferc1_mwdth_strings,
'mwhth': ferc1_mwhth_strings
}
"""
dict: A dictionary linking fuel units (keys) to lists of various strings
representing those fuel units (values)
"""
# Categorizing the strings from the FERC Form 1 Plant Kind (plant_kind) field
# into lists. There are many strings that weren't categorized,
# Solar and Solar Project were not classified as these do not indicate if they
# are solar thermal or photovoltaic. Variants on Steam (e.g. "steam 72" and
# "steam and gas") were classified based on additional research of the plants
# on the Internet.
ferc1_plant_kind_steam_turbine = [
'coal', 'steam', 'steam units 1 2 3', 'steam units 4 5',
'steam fossil', 'steam turbine', 'steam a', 'steam 100',
'steam units 1 2 3', 'steams', 'steam 1', 'steam retired 2013', 'stream',
'steam units 1,2,3', 'steam units 4&5', 'steam units 4&6',
'steam conventional', 'unit total-steam', 'unit total steam',
'*resp. share steam', 'resp. share steam', 'steam (see note 1,',
'steam (see note 3)', 'mpc 50%share steam', '40% share steam'
'steam (2)', 'steam (3)', 'steam (4)', 'steam (5)', 'steam (6)',
'steam (7)', 'steam (8)', 'steam units 1 and 2', 'steam units 3 and 4',
'steam (note 1)', 'steam (retired)', 'steam (leased)', 'coal-fired steam',
'oil-fired steam', 'steam/fossil', 'steam (a,b)', 'steam (a)', 'stean',
'steam-internal comb', 'steam (see notes)', 'steam units 4 & 6',
'resp share stm note3' 'mpc50% share steam', 'mpc40%share steam',
'steam - 64%', 'steam - 100%', 'steam (1) & (2)', 'resp share st note3',
'mpc 50% shares steam', 'steam-64%', 'steam-100%', 'steam (see note 1)',
'mpc 50% share steam', 'steam units 1, 2, 3', 'steam units 4, 5',
'steam (2)', 'steam (1)', 'steam 4, 5', 'steam - 72%', 'steam (incl i.c.)',
'steam- 72%', 'steam;retired - 2013', "respondent's sh.-st.",
"respondent's sh-st", '40% share steam', 'resp share stm note3',
'mpc50% share steam', 'resp share st note 3', '\x02steam (1)',
]
"""
list: A list of strings from FERC Form 1 for the steam turbine plant kind.
"""
ferc1_plant_kind_combustion_turbine = [
'combustion turbine', 'gt', 'gas turbine',
'gas turbine # 1', 'gas turbine', 'gas turbine (note 1)',
'gas turbines', 'simple cycle', 'combustion turbine',
'comb.turb.peak.units', 'gas turbine', 'combustion turbine',
'com turbine peaking', 'gas turbine peaking', 'comb turb peaking',
'combustine turbine', 'comb. turine', 'conbustion turbine',
'combustine turbine', 'gas turbine (leased)', 'combustion tubine',
'gas turb', 'gas turbine peaker', 'gtg/gas', 'simple cycle turbine',
'gas-turbine', 'gas turbine-simple', 'gas turbine - note 1',
'gas turbine #1', 'simple cycle', 'gasturbine', 'combustionturbine',
'gas turbine (2)', 'comb turb peak units', 'jet engine',
'jet powered turbine', '*gas turbine', 'gas turb.(see note5)',
'gas turb. (see note', 'combutsion turbine', 'combustion turbin',
'gas turbine-unit 2', 'gas - turbine', 'comb turbine peaking',
'gas expander turbine', 'jet turbine', 'gas turbin (lease',
'gas turbine (leased', 'gas turbine/int. cm', 'comb.turb-gas oper.',
'comb.turb.gas/oil op', 'comb.turb.oil oper.', 'jet', 'comb. turbine (a)',
'gas turb.(see notes)', 'gas turb(see notes)', 'comb. turb-gas oper',
'comb.turb.oil oper', 'gas turbin (leasd)', 'gas turbne/int comb',
'gas turbine (note1)', 'combution turbin', '* gas turbine',
'add to gas turbine', 'gas turbine (a)', 'gas turbinint comb',
'gas turbine (note 3)', 'resp share gas note3', 'gas trubine',
'*gas turbine(note3)', 'gas turbine note 3,6', 'gas turbine note 4,6',
'gas turbine peakload', 'combusition turbine', 'gas turbine (lease)',
'comb. turb-gas oper.', 'combution turbine', 'combusion turbine',
'comb. turb. oil oper', 'combustion burbine', 'combustion and gas',
'comb. turb.', 'gas turbine (lease', 'gas turbine (leasd)',
'gas turbine/int comb', '*gas turbine(note 3)', 'gas turbine (see nos',
'i.c.e./gas turbine', 'gas turbine/intcomb', 'cumbustion turbine',
'gas turb, int. comb.', 'gas turb, diesel', 'gas turb, int. comb',
'i.c.e/gas turbine', 'diesel turbine', 'comubstion turbine',
'i.c.e. /gas turbine', 'i.c.e/ gas turbine', 'i.c.e./gas tubine',
]
"""list: A list of strings from FERC Form 1 for the combustion turbine plant
kind.
"""
ferc1_plant_kind_combined_cycle = [
'Combined cycle', 'combined cycle', 'combined', 'gas & steam turbine',
'gas turb. & heat rec', 'combined cycle', 'com. cyc', 'com. cycle',
'gas turb-combined cy', 'combined cycle ctg', 'combined cycle - 40%',
'com cycle gas turb', 'combined cycle oper', 'gas turb/comb. cyc',
'combine cycle', 'cc', 'comb. cycle', 'gas turb-combined cy',
'steam and cc', 'steam cc', 'gas steam', 'ctg steam gas',
'steam comb cycle', 'gas/steam comb. cycl', 'steam (comb. cycle)'
'gas turbine/steam', 'steam & gas turbine', 'gas trb & heat rec',
'steam & combined ce', 'st/gas turb comb cyc', 'gas tur & comb cycl',
'combined cycle (a,b)', 'gas turbine/ steam', 'steam/gas turb.',
'steam & comb cycle', 'gas/steam comb cycle', 'comb cycle (a,b)', 'igcc',
'steam/gas turbine', 'gas turbine / steam', 'gas tur & comb cyc',
'comb cyc (a) (b)', 'comb cycle', 'comb cyc', 'combined turbine',
'combine cycle oper', 'comb cycle/steam tur', 'cc / gas turb',
'steam (comb. cycle)', 'steam & cc', 'gas turbine/steam',
'gas turb/cumbus cycl', 'gas turb/comb cycle', 'gasturb/comb cycle',
'gas turb/cumb. cyc', 'igcc/gas turbine', 'gas / steam', 'ctg/steam-gas',
'ctg/steam -gas'
]
"""
list: A list of strings from FERC Form 1 for the combined cycle plant kind.
"""
ferc1_plant_kind_nuke = [
'nuclear', 'nuclear (3)', 'steam(nuclear)', 'nuclear(see note4)'
'nuclear steam', 'nuclear turbine', 'nuclear - steam',
'nuclear (a)(b)(c)', 'nuclear (b)(c)', '* nuclear', 'nuclear (b) (c)',
'nuclear (see notes)', 'steam (nuclear)', '* nuclear (note 2)',
'nuclear (note 2)', 'nuclear (see note 2)', 'nuclear(see note4)',
'nuclear steam', 'nuclear(see notes)', 'nuclear-steam',
'nuclear (see note 3)'
]
"""list: A list of strings from FERC Form 1 for the nuclear plant kind."""
ferc1_plant_kind_geothermal = [
'steam - geothermal', 'steam_geothermal', 'geothermal'
]
"""list: A list of strings from FERC Form 1 for the geothermal plant kind."""
ferc_1_plant_kind_internal_combustion = [
'ic', 'internal combustion', 'internal comb.', 'internl combustion'
'diesel turbine', 'int combust (note 1)', 'int. combust (note1)',
'int.combustine', 'comb. cyc', 'internal comb', 'diesel', 'diesel engine',
'internal combustion', 'int combust - note 1', 'int. combust - note1',
'internal comb recip', 'reciprocating engine', 'comb. turbine',
'internal combust.', 'int. combustion (1)', '*int combustion (1)',
"*internal combust'n", 'internal', 'internal comb.', 'steam internal comb',
'combustion', 'int. combustion', 'int combust (note1)', 'int. combustine',
'internl combustion', '*int. combustion (1)'
]
"""
list: A list of strings from FERC Form 1 for the internal combustion plant
kind.
"""
ferc1_plant_kind_wind = [
'wind', 'wind energy', 'wind turbine', 'wind - turbine', 'wind generation'
]
"""list: A list of strings from FERC Form 1 for the wind plant kind."""
ferc1_plant_kind_photovoltaic = [
'solar photovoltaic', 'photovoltaic', 'solar', 'solar project'
]
"""list: A list of strings from FERC Form 1 for the photovoltaic plant kind."""
ferc1_plant_kind_solar_thermal = ['solar thermal']
"""
list: A list of strings from FERC Form 1 for the solar thermal plant kind.
"""
# Making a dictionary of lists from the lists of plant_fuel strings to create
# a dictionary of plant fuel lists.
ferc1_plant_kind_strings = {
'steam': ferc1_plant_kind_steam_turbine,
'combustion_turbine': ferc1_plant_kind_combustion_turbine,
'combined_cycle': ferc1_plant_kind_combined_cycle,
'nuclear': ferc1_plant_kind_nuke,
'geothermal': ferc1_plant_kind_geothermal,
'internal_combustion': ferc_1_plant_kind_internal_combustion,
'wind': ferc1_plant_kind_wind,
'photovoltaic': ferc1_plant_kind_photovoltaic,
'solar_thermal': ferc1_plant_kind_solar_thermal
}
"""
dict: A dictionary of plant kinds (keys) and associated lists of plant_fuel
strings (values).
"""
# This is an alternative set of strings for simplifying the plant kind field
# from Uday & Laura at CPI. For the moment we have reverted to using our own
# categorizations which are more detailed, but these are preserved here for
# comparison and testing, if need be.
cpi_diesel_strings = ['DIESEL', 'Diesel Engine', 'Diesel Turbine', ]
"""
list: A list of strings for fuel type diesel compiled by Climate Policy
Initiative.
"""
cpi_geothermal_strings = ['Steam - Geothermal', ]
"""
list: A list of strings for fuel type geothermal compiled by Climate Policy
Initiative.
"""
cpi_natural_gas_strings = [
'Combined Cycle', 'Combustion Turbine', 'GT',
'GAS TURBINE', 'Comb. Turbine', 'Gas Turbine #1', 'Combine Cycle Oper',
'Combustion', 'Combined', 'Gas Turbine/Steam', 'Gas Turbine Peaker',
'Gas Turbine - Note 1', 'Resp Share Gas Note3', 'Gas Turbines',
'Simple Cycle', 'Gas / Steam', 'GasTurbine', 'Combine Cycle',
'CTG/Steam-Gas', 'GTG/Gas', 'CTG/Steam -Gas', 'Steam/Gas Turbine',
'CombustionTurbine', 'Gas Turbine-Simple', 'STEAM & GAS TURBINE',
'Gas & Steam Turbine', 'Gas', 'Gas Turbine (2)', 'COMBUSTION AND GAS',
'Com Turbine Peaking', 'Gas Turbine Peaking', 'Comb Turb Peaking',
'JET ENGINE', 'Comb. Cyc', 'Com. Cyc', 'Com. Cycle',
'GAS TURB-COMBINED CY', 'Gas Turb', 'Combined Cycle - 40%',
'IGCC/Gas Turbine', 'CC', 'Combined Cycle Oper', 'Simple Cycle Turbine',
'Steam and CC', 'Com Cycle Gas Turb', 'I.C.E/ Gas Turbine',
'Combined Cycle CTG', 'GAS-TURBINE', 'Gas Expander Turbine',
'Gas Turbine (Leased)', 'Gas Turbine # 1', 'Gas Turbine (Note 1)',
'COMBUSTINE TURBINE', 'Gas Turb, Int. Comb.', 'Combined Turbine',
'Comb Turb Peak Units', 'Combustion Tubine', 'Comb. Cycle',
'COMB.TURB.PEAK.UNITS', 'Steam and CC', 'I.C.E. /Gas Turbine',
'Conbustion Turbine', 'Gas Turbine/Int Comb', 'Steam & CC',
'GAS TURB. & HEAT REC', 'Gas Turb/Comb. Cyc', 'Comb. Turine',
]
"""list: A list of strings for fuel type gas compiled by Climate Policy
Initiative.
"""
cpi_nuclear_strings = ['Nuclear', 'Nuclear (3)', ]
"""list: A list of strings for fuel type nuclear compiled by Climate Policy
Initiative.
"""
cpi_other_strings = [
'IC', 'Internal Combustion', 'Int Combust - Note 1',
'Resp. Share - Note 2', 'Int. Combust - Note1', 'Resp. Share - Note 4',
'Resp Share - Note 5', 'Resp. Share - Note 7', 'Internal Comb Recip',
'Reciprocating Engine', 'Internal Comb', 'Resp. Share - Note 8',
'Resp. Share - Note 9', 'Resp Share - Note 11', 'Resp. Share - Note 6',
'INT.COMBUSTINE', 'Steam (Incl I.C.)', 'Other', 'Int Combust (Note 1)',
'Resp. Share (Note 2)', 'Int. Combust (Note1)', 'Resp. Share (Note 8)',
'Resp. Share (Note 9)', 'Resp Share (Note 11)', 'Resp. Share (Note 4)',
'Resp. Share (Note 6)', 'Plant retired- 2013', 'Retired - 2013',
]
"""list: A list of strings for fuel type other compiled by Climate Policy
Initiative.
"""
cpi_steam_strings = [
'Steam', 'Steam Units 1, 2, 3', 'Resp Share St Note 3',
'Steam Turbine', 'Steam-Internal Comb', 'IGCC', 'Steam- 72%', 'Steam (1)',
'Steam (1)', 'Steam Units 1,2,3', 'Steam/Fossil', 'Steams', 'Steam - 72%',
'Steam - 100%', 'Stream', 'Steam Units 4, 5', 'Steam - 64%', 'Common',
'Steam (A)', 'Coal', 'Steam;Retired - 2013', 'Steam Units 4 & 6',
]
"""list: A list of strings for fuel type steam compiled by Climate Policy
Initiative.
"""
cpi_wind_strings = ['Wind', 'Wind Turbine', 'Wind - Turbine', 'Wind Energy', ]
"""list: A list of strings for fuel type wind compiled by Climate Policy
Initiative.
"""
cpi_solar_strings = [
'Solar Photovoltaic', 'Solar Thermal', 'SOLAR PROJECT', 'Solar',
'Photovoltaic',
]
"""list: A list of strings for fuel type photovoltaic compiled by Climate
Policy Initiative.
"""
cpi_plant_kind_strings = {
'natural_gas': cpi_natural_gas_strings,
'diesel': cpi_diesel_strings,
'geothermal': cpi_geothermal_strings,
'nuclear': cpi_nuclear_strings,
'steam': cpi_steam_strings,
'wind': cpi_wind_strings,
'solar': cpi_solar_strings,
'other': cpi_other_strings,
}
"""dict: A dictionary linking fuel types (keys) to lists of strings associated
by Climate Policy Institute with those fuel types (values).
"""
# Categorizing the strings from the FERC Form 1 Type of Plant Construction
# (construction_type) field into lists.
# There are many strings that weren't categorized, including crosses between
# conventional and outdoor, PV, wind, combined cycle, and internal combustion.
# The lists are broken out into the two types specified in Form 1:
# conventional and outdoor. These lists are inclusive so that variants of
# conventional (e.g. "conventional full") and outdoor (e.g. "outdoor full"
# and "outdoor hrsg") are included.
ferc1_const_type_outdoor = [
'outdoor', 'outdoor boiler', 'full outdoor', 'outdoor boiler',
'outdoor boilers', 'outboilers', 'fuel outdoor', 'full outdoor',
'outdoors', 'outdoor', 'boiler outdoor& full', 'boiler outdoor&full',
'outdoor boiler& full', 'full -outdoor', 'outdoor steam',
'outdoor boiler', 'ob', 'outdoor automatic', 'outdoor repower',
'full outdoor boiler', 'fo', 'outdoor boiler & ful', 'full-outdoor',
'fuel outdoor', 'outoor', 'outdoor', 'outdoor boiler&full',
'boiler outdoor &full', 'outdoor boiler &full', 'boiler outdoor & ful',
'outdoor-boiler', 'outdoor - boiler', 'outdoor const.',
'4 outdoor boilers', '3 outdoor boilers', 'full outdoor', 'full outdoors',
'full oudoors', 'outdoor (auto oper)', 'outside boiler',
'outdoor boiler&full', 'outdoor hrsg', 'outdoor hrsg',
'outdoor-steel encl.', 'boiler-outdr & full',
'con.& full outdoor', 'partial outdoor', 'outdoor (auto. oper)',
'outdoor (auto.oper)', 'outdoor construction', '1 outdoor boiler',
'2 outdoor boilers', 'outdoor enclosure', '2 outoor boilers',
'boiler outdr.& full', 'boiler outdr. & full', 'ful outdoor',
'outdoor-steel enclos', 'outdoor (auto oper.)', 'con. & full outdoor',
'outdore', 'boiler & full outdor', 'full & outdr boilers',
'outodoor (auto oper)', 'outdoor steel encl.', 'full outoor',
'boiler & outdoor ful', 'otdr. blr. & f. otdr', 'f.otdr & otdr.blr.',
'oudoor (auto oper)', 'outdoor constructin', 'f. otdr. & otdr. blr',
]
"""list: A list of strings from FERC Form 1 associated with the outdoor
construction type.
"""
ferc1_const_type_semioutdoor = [
'more than 50% outdoo', 'more than 50% outdos', 'over 50% outdoor',
'over 50% outdoors', 'semi-outdoor', 'semi - outdoor', 'semi outdoor',
'semi-enclosed', 'semi-outdoor boiler', 'semi outdoor boiler',
'semi- outdoor', 'semi - outdoors', 'semi -outdoor'
'conven & semi-outdr', 'conv & semi-outdoor', 'conv & semi- outdoor',
'convent. semi-outdr', 'conv. semi outdoor', 'conv(u1)/semiod(u2)',
'conv u1/semi-od u2', 'conv-one blr-semi-od', 'convent semioutdoor',
'conv. u1/semi-od u2', 'conv - 1 blr semi od', 'conv. ui/semi-od u2',
'conv-1 blr semi-od', 'conven. semi-outdoor', 'conv semi-outdoor',
'u1-conv./u2-semi-od', 'u1-conv./u2-semi -od', 'convent. semi-outdoo',
'u1-conv. / u2-semi', 'conven & semi-outdr', 'semi -outdoor',
'outdr & conventnl', 'conven. full outdoor', 'conv. & outdoor blr',
'conv. & outdoor blr.', 'conv. & outdoor boil', 'conv. & outdr boiler',
'conv. & out. boiler', 'convntl,outdoor blr', 'outdoor & conv.',
'2 conv., 1 out. boil', 'outdoor/conventional', 'conv. boiler outdoor',
'conv-one boiler-outd', 'conventional outdoor', 'conventional outdor',
'conv. outdoor boiler', 'conv.outdoor boiler', 'conventional outdr.',
'conven,outdoorboiler', 'conven full outdoor', 'conven,full outdoor',
'1 out boil, 2 conv', 'conv. & full outdoor', 'conv. & outdr. boilr',
'conv outdoor boiler', 'convention. outdoor', 'conv. sem. outdoor',
'convntl, outdoor blr', 'conv & outdoor boil', 'conv & outdoor boil.',
'outdoor & conv', 'conv. broiler outdor', '1 out boilr, 2 conv',
'conv.& outdoor boil.', 'conven,outdr.boiler', 'conven,outdr boiler',
'outdoor & conventil', '1 out boilr 2 conv', 'conv & outdr. boilr',
'conven, full outdoor', 'conven full outdr.', 'conven, full outdr.',
'conv/outdoor boiler', "convnt'l outdr boilr", '1 out boil 2 conv',
'conv full outdoor', 'conven, outdr boiler', 'conventional/outdoor',
'conv&outdoor boiler', 'outdoor & convention', 'conv & outdoor boilr',
'conv & full outdoor', 'convntl. outdoor blr', 'conv - ob',
"1conv'l/2odboilers", "2conv'l/1odboiler", 'conv-ob', 'conv.-ob',
'1 conv/ 2odboilers', '2 conv /1 odboilers', 'conv- ob', 'conv -ob',
'con sem outdoor', 'cnvntl, outdr, boilr', 'less than 50% outdoo',
'under 50% outdoor', 'under 50% outdoors', '1cnvntnl/2odboilers',
'2cnvntnl1/1odboiler', 'con & ob', 'combination (b)', 'indoor & outdoor',
'conven. blr. & full', 'conv. & otdr. blr.', 'combination',
'indoor and outdoor', 'conven boiler & full', "2conv'l/10dboiler",
'4 indor/outdr boiler', '4 indr/outdr boilerr', '4 indr/outdr boiler',
'indoor & outdoof',
]
"""list: A list of strings from FERC Form 1 associated with the semi - outdoor
construction type, or a mix of conventional and outdoor construction.
"""
ferc1_const_type_conventional = [
'conventional', 'conventional', 'conventional boiler', 'conv-b',
'conventionall', 'convention', 'conventional', 'coventional',
'conven full boiler', 'c0nventional', 'conventtional', 'convential'
'underground', 'conventional bulb', 'conventrional',
'*conventional', 'convential', 'convetional', 'conventioanl',
'conventioinal', 'conventaional', 'indoor construction', 'convenional',
'conventional steam', 'conventinal', 'convntional', 'conventionl',
'conventionsl', 'conventiional', 'convntl steam plants', 'indoor const.',
'full indoor', 'indoor', 'indoor automatic', 'indoor boiler',
'(peak load) indoor', 'conventionl,indoor', 'conventionl, indoor',
'conventional, indoor', 'comb. cycle indoor', '3 indoor boiler',
'2 indoor boilers', '1 indoor boiler', '2 indoor boiler',
'3 indoor boilers', 'fully contained', 'conv - b', 'conventional/boiler',
'cnventional', 'comb. cycle indooor', 'sonventional',
]
"""list: A list of strings from FERC Form 1 associated with the conventional
construction type.
"""
# Making a dictionary of lists from the lists of construction_type strings to
# create a dictionary of construction type lists.
ferc1_const_type_strings = {
'outdoor': ferc1_const_type_outdoor,
'semioutdoor': ferc1_const_type_semioutdoor,
'conventional': ferc1_const_type_conventional,
}
"""dict: A dictionary of construction types (keys) and lists of construction
type strings associated with each type (values) from FERC Form 1.
"""
ferc1_power_purchase_type = {
'RQ': 'requirement',
'LF': 'long_firm',
'IF': 'intermediate_firm',
'SF': 'short_firm',
'LU': 'long_unit',
'IU': 'intermediate_unit',
'EX': 'electricity_exchange',
'OS': 'other_service',
'AD': 'adjustment'
}
"""dict: A dictionary of abbreviations (keys) and types (values) for power
purchase agreements from FERC Form 1.
"""
# Dictionary mapping DBF files (w/o .DBF file extension) to DB table names
ferc1_dbf2tbl = {
'F1_1': 'f1_respondent_id',
'F1_2': 'f1_acb_epda',
'F1_3': 'f1_accumdepr_prvsn',
'F1_4': 'f1_accumdfrrdtaxcr',
'F1_5': 'f1_adit_190_detail',
'F1_6': 'f1_adit_190_notes',
'F1_7': 'f1_adit_amrt_prop',
'F1_8': 'f1_adit_other',
'F1_9': 'f1_adit_other_prop',
'F1_10': 'f1_allowances',
'F1_11': 'f1_bal_sheet_cr',
'F1_12': 'f1_capital_stock',
'F1_13': 'f1_cash_flow',
'F1_14': 'f1_cmmn_utlty_p_e',
'F1_15': 'f1_comp_balance_db',
'F1_16': 'f1_construction',
'F1_17': 'f1_control_respdnt',
'F1_18': 'f1_co_directors',
'F1_19': 'f1_cptl_stk_expns',
'F1_20': 'f1_csscslc_pcsircs',
'F1_21': 'f1_dacs_epda',
'F1_22': 'f1_dscnt_cptl_stk',
'F1_23': 'f1_edcfu_epda',
'F1_24': 'f1_elctrc_erg_acct',
'F1_25': 'f1_elctrc_oper_rev',
'F1_26': 'f1_elc_oper_rev_nb',
'F1_27': 'f1_elc_op_mnt_expn',
'F1_28': 'f1_electric',
'F1_29': 'f1_envrnmntl_expns',
'F1_30': 'f1_envrnmntl_fclty',
'F1_31': 'f1_fuel',
'F1_32': 'f1_general_info',
'F1_33': 'f1_gnrt_plant',
'F1_34': 'f1_important_chg',
'F1_35': 'f1_incm_stmnt_2',
'F1_36': 'f1_income_stmnt',
'F1_37': 'f1_miscgen_expnelc',
'F1_38': 'f1_misc_dfrrd_dr',
'F1_39': 'f1_mthly_peak_otpt',
'F1_40': 'f1_mtrl_spply',
'F1_41': 'f1_nbr_elc_deptemp',
'F1_42': 'f1_nonutility_prop',
'F1_43': 'f1_note_fin_stmnt', # 37% of DB
'F1_44': 'f1_nuclear_fuel',
'F1_45': 'f1_officers_co',
'F1_46': 'f1_othr_dfrrd_cr',
'F1_47': 'f1_othr_pd_in_cptl',
'F1_48': 'f1_othr_reg_assets',
'F1_49': 'f1_othr_reg_liab',
'F1_50': 'f1_overhead',
'F1_51': 'f1_pccidica',
'F1_52': 'f1_plant_in_srvce',
'F1_53': 'f1_pumped_storage',
'F1_54': 'f1_purchased_pwr',
'F1_55': 'f1_reconrpt_netinc',
'F1_56': 'f1_reg_comm_expn',
'F1_57': 'f1_respdnt_control',
'F1_58': 'f1_retained_erng',
'F1_59': 'f1_r_d_demo_actvty',
'F1_60': 'f1_sales_by_sched',
'F1_61': 'f1_sale_for_resale',
'F1_62': 'f1_sbsdry_totals',
'F1_63': 'f1_schedules_list',
'F1_64': 'f1_security_holder',
'F1_65': 'f1_slry_wg_dstrbtn',
'F1_66': 'f1_substations',
'F1_67': 'f1_taxacc_ppchrgyr',
'F1_68': 'f1_unrcvrd_cost',
'F1_69': 'f1_utltyplnt_smmry',
'F1_70': 'f1_work',
'F1_71': 'f1_xmssn_adds',
'F1_72': 'f1_xmssn_elc_bothr',
'F1_73': 'f1_xmssn_elc_fothr',
'F1_74': 'f1_xmssn_line',
'F1_75': 'f1_xtraordnry_loss',
'F1_76': 'f1_codes_val',
'F1_77': 'f1_sched_lit_tbl',
'F1_78': 'f1_audit_log',
'F1_79': 'f1_col_lit_tbl',
'F1_80': 'f1_load_file_names',
'F1_81': 'f1_privilege',
'F1_82': 'f1_sys_error_log',
'F1_83': 'f1_unique_num_val',
'F1_84': 'f1_row_lit_tbl',
'F1_85': 'f1_footnote_data',
'F1_86': 'f1_hydro',
'F1_87': 'f1_footnote_tbl', # 52% of DB
'F1_88': 'f1_ident_attsttn',
'F1_89': 'f1_steam',
'F1_90': 'f1_leased',
'F1_91': 'f1_sbsdry_detail',
'F1_92': 'f1_plant',
'F1_93': 'f1_long_term_debt',
'F1_106_2009': 'f1_106_2009',
'F1_106A_2009': 'f1_106a_2009',
'F1_106B_2009': 'f1_106b_2009',
'F1_208_ELC_DEP': 'f1_208_elc_dep',
'F1_231_TRN_STDYCST': 'f1_231_trn_stdycst',
'F1_324_ELC_EXPNS': 'f1_324_elc_expns',
'F1_325_ELC_CUST': 'f1_325_elc_cust',
'F1_331_TRANSISO': 'f1_331_transiso',
'F1_338_DEP_DEPL': 'f1_338_dep_depl',
'F1_397_ISORTO_STL': 'f1_397_isorto_stl',
'F1_398_ANCL_PS': 'f1_398_ancl_ps',
'F1_399_MTH_PEAK': 'f1_399_mth_peak',
'F1_400_SYS_PEAK': 'f1_400_sys_peak',
'F1_400A_ISO_PEAK': 'f1_400a_iso_peak',
'F1_429_TRANS_AFF': 'f1_429_trans_aff',
'F1_ALLOWANCES_NOX': 'f1_allowances_nox',
'F1_CMPINC_HEDGE_A': 'f1_cmpinc_hedge_a',
'F1_CMPINC_HEDGE': 'f1_cmpinc_hedge',
'F1_EMAIL': 'f1_email',
'F1_RG_TRN_SRV_REV': 'f1_rg_trn_srv_rev',
'F1_S0_CHECKS': 'f1_s0_checks',
'F1_S0_FILING_LOG': 'f1_s0_filing_log',
'F1_SECURITY': 'f1_security'
# 'F1_PINS': 'f1_pins', # private data, not publicized.
# 'F1_FREEZE': 'f1_freeze', # private data, not publicized
}
"""dict: A dictionary mapping FERC Form 1 DBF files(w / o .DBF file extension)
(keys) to database table names (values).
"""
ferc1_huge_tables = {
'f1_footnote_tbl',
'f1_footnote_data',
'f1_note_fin_stmnt',
}
"""set: A set containing large FERC Form 1 tables.
"""
# Invert the map above so we can go either way as needed
ferc1_tbl2dbf = {v: k for k, v in ferc1_dbf2tbl.items()}
"""dict: A dictionary mapping database table names (keys) to FERC Form 1 DBF
files(w / o .DBF file extension) (values).
"""
# This dictionary maps the strings which are used to denote field types in the
# DBF objects to the corresponding generic SQLAlchemy Column types:
# These definitions come from a combination of the dbfread example program
# dbf2sqlite and this DBF file format documentation page:
# http://www.dbase.com/KnowledgeBase/int/db7_file_fmt.htm
# Un-mapped types left as 'XXX' which should obviously make an error...
dbf_typemap = {
'C': sa.String,
'D': sa.Date,
'F': sa.Float,
'I': sa.Integer,
'L': sa.Boolean,
'M': sa.Text, # 10 digit .DBT block number, stored as a string...
'N': sa.Float,
'T': sa.DateTime,
'0': sa.Integer, # based on dbf2sqlite mapping
'B': 'XXX', # .DBT block number, binary string
'@': 'XXX', # Timestamp... Date = Julian Day, Time is in milliseconds?
'+': 'XXX', # Autoincrement (e.g. for IDs)
'O': 'XXX', # Double, 8 bytes
'G': 'XXX', # OLE 10 digit/byte number of a .DBT block, stored as string
}
"""dict: A dictionary mapping field types in the DBF objects (keys) to the
corresponding generic SQLAlchemy Column types.
"""
# This is the set of tables which have been successfully integrated into PUDL:
ferc1_pudl_tables = (
'fuel_ferc1', # Plant-level data, linked to plants_steam_ferc1
'plants_steam_ferc1', # Plant-level data
'plants_small_ferc1', # Plant-level data
'plants_hydro_ferc1', # Plant-level data
'plants_pumped_storage_ferc1', # Plant-level data
'purchased_power_ferc1', # Inter-utility electricity transactions
'plant_in_service_ferc1', # Row-mapped plant accounting data.
# 'accumulated_depreciation_ferc1' # Requires row-mapping to be useful.
)
"""tuple: A tuple containing the FERC Form 1 tables that can be successfully
integrated into PUDL.
"""
ferc714_pudl_tables = (
"respondent_id_ferc714",
"id_certification_ferc714",
"gen_plants_ba_ferc714",
"demand_monthly_ba_ferc714",
"net_energy_load_ba_ferc714",
"adjacency_ba_ferc714",
"interchange_ba_ferc714",
"lambda_hourly_ba_ferc714",
"lambda_description_ferc714",
"description_pa_ferc714",
"demand_forecast_pa_ferc714",
"demand_hourly_pa_ferc714",
)
table_map_ferc1_pudl = {
'fuel_ferc1': 'f1_fuel',
'plants_steam_ferc1': 'f1_steam',
'plants_small_ferc1': 'f1_gnrt_plant',
'plants_hydro_ferc1': 'f1_hydro',
'plants_pumped_storage_ferc1': 'f1_pumped_storage',
'plant_in_service_ferc1': 'f1_plant_in_srvce',
'purchased_power_ferc1': 'f1_purchased_pwr',
# 'accumulated_depreciation_ferc1': 'f1_accumdepr_prvsn'
}
"""dict: A dictionary mapping PUDL table names (keys) to the corresponding FERC
Form 1 DBF table names.
"""
# This is the list of EIA923 tables that can be successfully pulled into PUDL
eia923_pudl_tables = ('generation_fuel_eia923',
'boiler_fuel_eia923',
'generation_eia923',
'coalmine_eia923',
'fuel_receipts_costs_eia923')
"""tuple: A tuple containing the EIA923 tables that can be successfully
integrated into PUDL.
"""
epaipm_pudl_tables = (
'transmission_single_epaipm',
'transmission_joint_epaipm',
'load_curves_epaipm',
'plant_region_map_epaipm',
)
"""tuple: A tuple containing the EPA IPM tables that can be successfully
integrated into PUDL.
"""
# List of entity tables
entity_tables = ['utilities_entity_eia',
'plants_entity_eia',
'generators_entity_eia',
'boilers_entity_eia',
'regions_entity_epaipm', ]
"""list: A list of PUDL entity tables.
"""
xlsx_maps_pkg = 'pudl.package_data.meta.xlsx_maps'
"""string: The location of the xlsx maps within the PUDL package data.
"""
##############################################################################
# EIA 923 Spreadsheet Metadata
##############################################################################
# patterns for matching columns to months:
month_dict_eia923 = {1: '_january$',
2: '_february$',
3: '_march$',
4: '_april$',
5: '_may$',
6: '_june$',
7: '_july$',
8: '_august$',
9: '_september$',
10: '_october$',
11: '_november$',
12: '_december$'}
"""dict: A dictionary mapping column numbers (keys) to months (values).
"""
##############################################################################
# EIA 860 Spreadsheet Metadata
##############################################################################
# This is the list of EIA860 tables that can be successfully pulled into PUDL
eia860_pudl_tables = (
'boiler_generator_assn_eia860',
'utilities_eia860',
'plants_eia860',
'generators_eia860',
'ownership_eia860'
)
"""tuple: A tuple containing the list of EIA 860 tables that can be
successfully pulled into PUDL.
"""
eia861_pudl_tables = (
"service_territory_eia861",
)
# The set of FERC Form 1 tables that have the same composite primary keys: [
# respondent_id, report_year, report_prd, row_number, spplmnt_num ].
# TODO: THIS ONLY PERTAINS TO 2015 AND MAY NEED TO BE ADJUSTED BY YEAR...
ferc1_data_tables = (
'f1_acb_epda', 'f1_accumdepr_prvsn', 'f1_accumdfrrdtaxcr',
'f1_adit_190_detail', 'f1_adit_190_notes', 'f1_adit_amrt_prop',
'f1_adit_other', 'f1_adit_other_prop', 'f1_allowances', 'f1_bal_sheet_cr',
'f1_capital_stock', 'f1_cash_flow', 'f1_cmmn_utlty_p_e',
'f1_comp_balance_db', 'f1_construction', 'f1_control_respdnt',
'f1_co_directors', 'f1_cptl_stk_expns', 'f1_csscslc_pcsircs',
'f1_dacs_epda', 'f1_dscnt_cptl_stk', 'f1_edcfu_epda', 'f1_elctrc_erg_acct',
'f1_elctrc_oper_rev', 'f1_elc_oper_rev_nb', 'f1_elc_op_mnt_expn',
'f1_electric', 'f1_envrnmntl_expns', 'f1_envrnmntl_fclty', 'f1_fuel',
'f1_general_info', 'f1_gnrt_plant', 'f1_important_chg', 'f1_incm_stmnt_2',
'f1_income_stmnt', 'f1_miscgen_expnelc', 'f1_misc_dfrrd_dr',
'f1_mthly_peak_otpt', 'f1_mtrl_spply', 'f1_nbr_elc_deptemp',
'f1_nonutility_prop', 'f1_note_fin_stmnt', 'f1_nuclear_fuel',
'f1_officers_co', 'f1_othr_dfrrd_cr', 'f1_othr_pd_in_cptl',
'f1_othr_reg_assets', 'f1_othr_reg_liab', 'f1_overhead', 'f1_pccidica',
'f1_plant_in_srvce', 'f1_pumped_storage', 'f1_purchased_pwr',
'f1_reconrpt_netinc', 'f1_reg_comm_expn', 'f1_respdnt_control',
'f1_retained_erng', 'f1_r_d_demo_actvty', 'f1_sales_by_sched',
'f1_sale_for_resale', 'f1_sbsdry_totals', 'f1_schedules_list',
'f1_security_holder', 'f1_slry_wg_dstrbtn', 'f1_substations',
'f1_taxacc_ppchrgyr', 'f1_unrcvrd_cost', 'f1_utltyplnt_smmry', 'f1_work',
'f1_xmssn_adds', 'f1_xmssn_elc_bothr', 'f1_xmssn_elc_fothr',
'f1_xmssn_line', 'f1_xtraordnry_loss',
'f1_hydro', 'f1_steam', 'f1_leased', 'f1_sbsdry_detail',
'f1_plant', 'f1_long_term_debt', 'f1_106_2009', 'f1_106a_2009',
'f1_106b_2009', 'f1_208_elc_dep', 'f1_231_trn_stdycst', 'f1_324_elc_expns',
'f1_325_elc_cust', 'f1_331_transiso', 'f1_338_dep_depl',
'f1_397_isorto_stl', 'f1_398_ancl_ps', 'f1_399_mth_peak',
'f1_400_sys_peak', 'f1_400a_iso_peak', 'f1_429_trans_aff',
'f1_allowances_nox', 'f1_cmpinc_hedge_a', 'f1_cmpinc_hedge',
'f1_rg_trn_srv_rev')
"""tuple: A tuple containing the FERC Form 1 tables that have the same composite
primary keys: [respondent_id, report_year, report_prd, row_number,
spplmnt_num].
"""
# Line numbers, and corresponding FERC account number
# from FERC Form 1 pages 204-207, Electric Plant in Service.
# Descriptions from: https://www.law.cornell.edu/cfr/text/18/part-101
ferc_electric_plant_accounts = pd.DataFrame.from_records([
# 1. Intangible Plant
(2, '301', 'Intangible: Organization'),
(3, '302', 'Intangible: Franchises and consents'),
(4, '303', 'Intangible: Miscellaneous intangible plant'),
(5, 'subtotal_intangible', 'Subtotal: Intangible Plant'),
# 2. Production Plant
# A. steam production
(8, '310', 'Steam production: Land and land rights'),
(9, '311', 'Steam production: Structures and improvements'),
(10, '312', 'Steam production: Boiler plant equipment'),
(11, '313', 'Steam production: Engines and engine-driven generators'),
(12, '314', 'Steam production: Turbogenerator units'),
(13, '315', 'Steam production: Accessory electric equipment'),
(14, '316', 'Steam production: Miscellaneous power plant equipment'),
(15, '317', 'Steam production: Asset retirement costs for steam production\
plant'),
(16, 'subtotal_steam_production', 'Subtotal: Steam Production Plant'),
# B. nuclear production
(18, '320', 'Nuclear production: Land and land rights (Major only)'),
(19, '321', 'Nuclear production: Structures and improvements (Major\
only)'),
(20, '322', 'Nuclear production: Reactor plant equipment (Major only)'),
(21, '323', 'Nuclear production: Turbogenerator units (Major only)'),
(22, '324', 'Nuclear production: Accessory electric equipment (Major\
only)'),
(23, '325', 'Nuclear production: Miscellaneous power plant equipment\
(Major only)'),
(24, '326', 'Nuclear production: Asset retirement costs for nuclear\
production plant (Major only)'),
(25, 'subtotal_nuclear_produciton', 'Subtotal: Nuclear Production Plant'),
# C. hydraulic production
(27, '330', 'Hydraulic production: Land and land rights'),
(28, '331', 'Hydraulic production: Structures and improvements'),
(29, '332', 'Hydraulic production: Reservoirs, dams, and waterways'),
(30, '333', 'Hydraulic production: Water wheels, turbines and generators'),
(31, '334', 'Hydraulic production: Accessory electric equipment'),
(32, '335', 'Hydraulic production: Miscellaneous power plant equipment'),
(33, '336', 'Hydraulic production: Roads, railroads and bridges'),
(34, '337', 'Hydraulic production: Asset retirement costs for hydraulic\
production plant'),
(35, 'subtotal_hydraulic_production', 'Subtotal: Hydraulic Production\
Plant'),
# D. other production
(37, '340', 'Other production: Land and land rights'),
(38, '341', 'Other production: Structures and improvements'),
(39, '342', 'Other production: Fuel holders, producers, and accessories'),
(40, '343', 'Other production: Prime movers'),
(41, '344', 'Other production: Generators'),
(42, '345', 'Other production: Accessory electric equipment'),
(43, '346', 'Other production: Miscellaneous power plant equipment'),
(44, '347', 'Other production: Asset retirement costs for other production\
plant'),
(None, '348', 'Other production: Energy Storage Equipment'),
(45, 'subtotal_other_production', 'Subtotal: Other Production Plant'),
(46, 'subtotal_production', 'Subtotal: Production Plant'),
# 3. Transmission Plant,
(48, '350', 'Transmission: Land and land rights'),
(None, '351', 'Transmission: Energy Storage Equipment'),
(49, '352', 'Transmission: Structures and improvements'),
(50, '353', 'Transmission: Station equipment'),
(51, '354', 'Transmission: Towers and fixtures'),
(52, '355', 'Transmission: Poles and fixtures'),
(53, '356', 'Transmission: Overhead conductors and devices'),
(54, '357', 'Transmission: Underground conduit'),
(55, '358', 'Transmission: Underground conductors and devices'),
(56, '359', 'Transmission: Roads and trails'),
(57, '359.1', 'Transmission: Asset retirement costs for transmission\
plant'),
(58, 'subtotal_transmission', 'Subtotal: Transmission Plant'),
# 4. Distribution Plant
(60, '360', 'Distribution: Land and land rights'),
(61, '361', 'Distribution: Structures and improvements'),
(62, '362', 'Distribution: Station equipment'),
(63, '363', 'Distribution: Storage battery equipment'),
(64, '364', 'Distribution: Poles, towers and fixtures'),
(65, '365', 'Distribution: Overhead conductors and devices'),
(66, '366', 'Distribution: Underground conduit'),
(67, '367', 'Distribution: Underground conductors and devices'),
(68, '368', 'Distribution: Line transformers'),
(69, '369', 'Distribution: Services'),
(70, '370', 'Distribution: Meters'),
(71, '371', 'Distribution: Installations on customers\' premises'),
(72, '372', 'Distribution: Leased property on customers\' premises'),
(73, '373', 'Distribution: Street lighting and signal systems'),
(74, '374', 'Distribution: Asset retirement costs for distribution plant'),
(75, 'subtotal_distribution', 'Subtotal: Distribution Plant'),
# 5. Regional Transmission and Market Operation Plant
(77, '380', 'Regional transmission: Land and land rights'),
(78, '381', 'Regional transmission: Structures and improvements'),
(79, '382', 'Regional transmission: Computer hardware'),
(80, '383', 'Regional transmission: Computer software'),
(81, '384', 'Regional transmission: Communication Equipment'),
(82, '385', 'Regional transmission: Miscellaneous Regional Transmission\
and Market Operation Plant'),
(83, '386', 'Regional transmission: Asset Retirement Costs for Regional\
Transmission and Market Operation\
Plant'),
(84, 'subtotal_regional_transmission', 'Subtotal: Transmission and Market\
Operation Plant'),
(None, '387', 'Regional transmission: [Reserved]'),
# 6. General Plant
(86, '389', 'General: Land and land rights'),
(87, '390', 'General: Structures and improvements'),
(88, '391', 'General: Office furniture and equipment'),
(89, '392', 'General: Transportation equipment'),
(90, '393', 'General: Stores equipment'),
(91, '394', 'General: Tools, shop and garage equipment'),
(92, '395', 'General: Laboratory equipment'),
(93, '396', 'General: Power operated equipment'),
(94, '397', 'General: Communication equipment'),
(95, '398', 'General: Miscellaneous equipment'),
(96, 'subtotal_general', 'Subtotal: General Plant'),
(97, '399', 'General: Other tangible property'),
(98, '399.1', 'General: Asset retirement costs for general plant'),
(99, 'total_general', 'TOTAL General Plant'),
(100, '101_and_106', 'Electric plant in service (Major only)'),
(101, '102_purchased', 'Electric plant purchased'),
(102, '102_sold', 'Electric plant sold'),
(103, '103', 'Experimental plant unclassified'),
(104, 'total_electric_plant', 'TOTAL Electric Plant in Service')],
columns=['row_number', 'ferc_account_id', 'ferc_account_description'])
"""list: A list of tuples containing row numbers, FERC account IDs, and FERC
account descriptions from FERC Form 1 pages 204 - 207, Electric Plant in
Service.
"""
# Line numbers, and corresponding FERC account number
# from FERC Form 1 page 219, ACCUMULATED PROVISION FOR DEPRECIATION
# OF ELECTRIC UTILITY PLANT (Account 108).
ferc_accumulated_depreciation = pd.DataFrame.from_records([
# Section A. Balances and Changes During Year
(1, 'balance_beginning_of_year', 'Balance Beginning of Year'),
(3, 'depreciation_expense', '(403) Depreciation Expense'),
(4, 'depreciation_expense_asset_retirement', \
'(403.1) Depreciation Expense for Asset Retirement Costs'),
(5, 'expense_electric_plant_leased_to_others', \
'(413) Exp. of Elec. Plt. Leas. to Others'),
(6, 'transportation_expenses_clearing',\
'Transportation Expenses-Clearing'),
(7, 'other_clearing_accounts', 'Other Clearing Accounts'),
(8, 'other_accounts_specified',\
'Other Accounts (Specify, details in footnote):'),
# blank: might also be other charges like line 17.
(9, 'other_charges', 'Other Charges:'),
(10, 'total_depreciation_provision_for_year',\
'TOTAL Deprec. Prov for Year (Enter Total of lines 3 thru 9)'),
(11, 'net_charges_for_plant_retired', 'Net Charges for Plant Retired:'),
(12, 'book_cost_of_plant_retired', 'Book Cost of Plant Retired'),
(13, 'cost_of_removal', 'Cost of Removal'),
(14, 'salvage_credit', 'Salvage (Credit)'),
(15, 'total_net_charges_for_plant_retired',\
'TOTAL Net Chrgs. for Plant Ret. (Enter Total of lines 12 thru 14)'),
(16, 'other_debit_or_credit_items',\
'Other Debit or Cr. Items (Describe, details in footnote):'),
# blank: can be "Other Charges", e.g. in 2012 for PSCo.
(17, 'other_charges_2', 'Other Charges 2'),
(18, 'book_cost_or_asset_retirement_costs_retired',\
'Book Cost or Asset Retirement Costs Retired'),
(19, 'balance_end_of_year', \
'Balance End of Year (Enter Totals of lines 1, 10, 15, 16, and 18)'),
# Section B. Balances at End of Year According to Functional Classification
(20, 'steam_production_end_of_year', 'Steam Production'),
(21, 'nuclear_production_end_of_year', 'Nuclear Production'),
(22, 'hydraulic_production_end_of_year',\
'Hydraulic Production-Conventional'),
(23, 'pumped_storage_end_of_year', 'Hydraulic Production-Pumped Storage'),
(24, 'other_production', 'Other Production'),
(25, 'transmission', 'Transmission'),
(26, 'distribution', 'Distribution'),
(27, 'regional_transmission_and_market_operation',
'Regional Transmission and Market Operation'),
(28, 'general', 'General'),
(29, 'total', 'TOTAL (Enter Total of lines 20 thru 28)')],
columns=['row_number', 'line_id', 'ferc_account_description'])
"""list: A list of tuples containing row numbers, FERC account IDs, and FERC
account descriptions from FERC Form 1 page 219, Accumulated Provision for
Depreciation of electric utility plant(Account 108).
"""
######################################################################
# Constants from EIA From 923 used within init.py module
######################################################################
# From Page 7 of EIA Form 923, Census Region a US state is located in
census_region = {
'NEW': 'New England',
'MAT': 'Middle Atlantic',
'SAT': 'South Atlantic',
'ESC': 'East South Central',
'WSC': 'West South Central',
'ENC': 'East North Central',
'WNC': 'West North Central',
'MTN': 'Mountain',
'PACC': 'Pacific Contiguous (OR, WA, CA)',
'PACN': 'Pacific Non-Contiguous (AK, HI)',
}
"""dict: A dictionary mapping Census Region abbreviations (keys) to Census
Region names (values).
"""
# From Page 7 of EIA Form923
# Static list of NERC (North American Electric Reliability Corporation)
# regions, used for where plant is located
nerc_region = {
'NPCC': 'Northeast Power Coordinating Council',
'ASCC': 'Alaska Systems Coordinating Council',
'HICC': 'Hawaiian Islands Coordinating Council',
'MRO': 'Midwest Reliability Organization',
'SERC': 'SERC Reliability Corporation',
'RFC': 'Reliability First Corporation',
'SPP': 'Southwest Power Pool',
'TRE': 'Texas Regional Entity',
'FRCC': 'Florida Reliability Coordinating Council',
'WECC': 'Western Electricity Coordinating Council'
}
"""dict: A dictionary mapping NERC Region abbreviations (keys) to NERC
Region names (values).
"""
# From Page 7 of EIA Form 923 EIAโs internal consolidated NAICS sectors.
# For internal purposes, EIA consolidates NAICS categories into seven groups.
sector_eia = {
# traditional regulated electric utilities
'1': 'Electric Utility',
# Independent power producers which are not cogenerators
'2': 'NAICS-22 Non-Cogen',
# Independent power producers which are cogenerators, but whose
# primary business purpose is the sale of electricity to the public
'3': 'NAICS-22 Cogen',
# Commercial non-cogeneration facilities that produce electric power,
# are connected to the gird, and can sell power to the public
'4': 'Commercial NAICS Non-Cogen',
# Commercial cogeneration facilities that produce electric power, are
# connected to the grid, and can sell power to the public
'5': 'Commercial NAICS Cogen',
# Industrial non-cogeneration facilities that produce electric power, are
# connected to the gird, and can sell power to the public
'6': 'Industrial NAICS Non-Cogen',
# Industrial cogeneration facilities that produce electric power, are
# connected to the gird, and can sell power to the public
'7': 'Industrial NAICS Cogen'
}
"""dict: A dictionary mapping EIA numeric codes (keys) to EIAโs internal
consolidated NAICS sectors (values).
"""
# EIA 923: EIA Type of prime mover:
prime_movers_eia923 = {
'BA': 'Energy Storage, Battery',
'BT': 'Turbines Used in a Binary Cycle. Including those used for geothermal applications',
'CA': 'Combined-Cycle -- Steam Part',
'CE': 'Energy Storage, Compressed Air',
'CP': 'Energy Storage, Concentrated Solar Power',
'CS': 'Combined-Cycle Single-Shaft Combustion Turbine and Steam Turbine share of single',
'CT': 'Combined-Cycle Combustion Turbine Part',
'ES': 'Energy Storage, Other (Specify on Schedule 9, Comments)',
'FC': 'Fuel Cell',
'FW': 'Energy Storage, Flywheel',
'GT': 'Combustion (Gas) Turbine. Including Jet Engine design',
'HA': 'Hydrokinetic, Axial Flow Turbine',
'HB': 'Hydrokinetic, Wave Buoy',
'HK': 'Hydrokinetic, Other',
'HY': 'Hydraulic Turbine. Including turbines associated with delivery of water by pipeline.',
'IC': 'Internal Combustion (diesel, piston, reciprocating) Engine',
'PS': 'Energy Storage, Reversible Hydraulic Turbine (Pumped Storage)',
'OT': 'Other',
'ST': 'Steam Turbine. Including Nuclear, Geothermal, and Solar Steam (does not include Combined Cycle).',
'PV': 'Photovoltaic',
'WT': 'Wind Turbine, Onshore',
'WS': 'Wind Turbine, Offshore'
}
"""dict: A dictionary mapping EIA 923 prime mover codes (keys) and prime mover
names / descriptions (values).
"""
# EIA 923: The fuel code reported to EIA.Two or three letter alphanumeric:
fuel_type_eia923 = {
'AB': 'Agricultural By-Products',
'ANT': 'Anthracite Coal',
'BFG': 'Blast Furnace Gas',
'BIT': 'Bituminous Coal',
'BLQ': 'Black Liquor',
'CBL': 'Coal, Blended',
'DFO': 'Distillate Fuel Oil. Including diesel, No. 1, No. 2, and No. 4 fuel oils.',
'GEO': 'Geothermal',
'JF': 'Jet Fuel',
'KER': 'Kerosene',
'LFG': 'Landfill Gas',
'LIG': 'Lignite Coal',
'MSB': 'Biogenic Municipal Solid Waste',
'MSN': 'Non-biogenic Municipal Solid Waste',
'MSW': 'Municipal Solid Waste',
'MWH': 'Electricity used for energy storage',
'NG': 'Natural Gas',
'NUC': 'Nuclear. Including Uranium, Plutonium, and Thorium.',
'OBG': 'Other Biomass Gas. Including digester gas, methane, and other biomass gases.',
'OBL': 'Other Biomass Liquids',
'OBS': 'Other Biomass Solids',
'OG': 'Other Gas',
'OTH': 'Other Fuel',
'PC': 'Petroleum Coke',
'PG': 'Gaseous Propane',
'PUR': 'Purchased Steam',
'RC': 'Refined Coal',
'RFO': 'Residual Fuel Oil. Including No. 5 & 6 fuel oils and bunker C fuel oil.',
'SC': 'Coal-based Synfuel. Including briquettes, pellets, or extrusions, which are formed by binding materials or processes that recycle materials.',
'SGC': 'Coal-Derived Synthesis Gas',
'SGP': 'Synthesis Gas from Petroleum Coke',
'SLW': 'Sludge Waste',
'SUB': 'Subbituminous Coal',
'SUN': 'Solar',
'TDF': 'Tire-derived Fuels',
'WAT': 'Water at a Conventional Hydroelectric Turbine and water used in Wave Buoy Hydrokinetic Technology, current Hydrokinetic Technology, Tidal Hydrokinetic Technology, and Pumping Energy for Reversible (Pumped Storage) Hydroelectric Turbines.',
'WC': 'Waste/Other Coal. Including anthracite culm, bituminous gob, fine coal, lignite waste, waste coal.',
'WDL': 'Wood Waste Liquids, excluding Black Liquor. Including red liquor, sludge wood, spent sulfite liquor, and other wood-based liquids.',
'WDS': 'Wood/Wood Waste Solids. Including paper pellets, railroad ties, utility polies, wood chips, bark, and other wood waste solids.',
'WH': 'Waste Heat not directly attributed to a fuel source',
'WND': 'Wind',
'WO': 'Waste/Other Oil. Including crude oil, liquid butane, liquid propane, naphtha, oil waste, re-refined moto oil, sludge oil, tar oil, or other petroleum-based liquid wastes.'
}
"""dict: A dictionary mapping EIA 923 fuel type codes (keys) and fuel type
names / descriptions (values).
"""
# Fuel type strings for EIA 923 generator fuel table
fuel_type_eia923_gen_fuel_coal_strings = [
'ant', 'bit', 'cbl', 'lig', 'pc', 'rc', 'sc', 'sub', 'wc', ]
"""list: The list of EIA 923 Generation Fuel strings associated with coal fuel.
"""
fuel_type_eia923_gen_fuel_oil_strings = [
'dfo', 'rfo', 'wo', 'jf', 'ker', ]
"""list: The list of EIA 923 Generation Fuel strings associated with oil fuel.
"""
fuel_type_eia923_gen_fuel_gas_strings = [
'bfg', 'lfg', 'ng', 'og', 'obg', 'pg', 'sgc', 'sgp', ]
"""list: The list of EIA 923 Generation Fuel strings associated with gas fuel.
"""
fuel_type_eia923_gen_fuel_solar_strings = ['sun', ]
"""list: The list of EIA 923 Generation Fuel strings associated with solar
power.
"""
fuel_type_eia923_gen_fuel_wind_strings = ['wnd', ]
"""list: The list of EIA 923 Generation Fuel strings associated with wind
power.
"""
fuel_type_eia923_gen_fuel_hydro_strings = ['wat', ]
"""list: The list of EIA 923 Generation Fuel strings associated with hydro
power.
"""
fuel_type_eia923_gen_fuel_nuclear_strings = ['nuc', ]
"""list: The list of EIA 923 Generation Fuel strings associated with nuclear
power.
"""
fuel_type_eia923_gen_fuel_waste_strings = [
'ab', 'blq', 'msb', 'msn', 'msw', 'obl', 'obs', 'slw', 'tdf', 'wdl', 'wds']
"""list: The list of EIA 923 Generation Fuel strings associated with solid waste
fuel.
"""
fuel_type_eia923_gen_fuel_other_strings = ['geo', 'mwh', 'oth', 'pur', 'wh', ]
"""list: The list of EIA 923 Generation Fuel strings associated with geothermal
power.
"""
fuel_type_eia923_gen_fuel_simple_map = {
'coal': fuel_type_eia923_gen_fuel_coal_strings,
'oil': fuel_type_eia923_gen_fuel_oil_strings,
'gas': fuel_type_eia923_gen_fuel_gas_strings,
'solar': fuel_type_eia923_gen_fuel_solar_strings,
'wind': fuel_type_eia923_gen_fuel_wind_strings,
'hydro': fuel_type_eia923_gen_fuel_hydro_strings,
'nuclear': fuel_type_eia923_gen_fuel_nuclear_strings,
'waste': fuel_type_eia923_gen_fuel_waste_strings,
'other': fuel_type_eia923_gen_fuel_other_strings,
}
"""dict: A dictionary mapping EIA 923 Generation Fuel fuel types (keys) to lists
of strings associated with that fuel type (values).
"""
# Fuel type strings for EIA 923 boiler fuel table
fuel_type_eia923_boiler_fuel_coal_strings = [
'ant', 'bit', 'lig', 'pc', 'rc', 'sc', 'sub', 'wc', ]
"""list: A list of strings from EIA 923 Boiler Fuel associated with fuel type
coal.
"""
fuel_type_eia923_boiler_fuel_oil_strings = ['dfo', 'rfo', 'wo', 'jf', 'ker', ]
"""list: A list of strings from EIA 923 Boiler Fuel associated with fuel type
oil.
"""
fuel_type_eia923_boiler_fuel_gas_strings = [
'bfg', 'lfg', 'ng', 'og', 'obg', 'pg', 'sgc', 'sgp', ]
"""list: A list of strings from EIA 923 Boiler Fuel associated with fuel type
gas.
"""
fuel_type_eia923_boiler_fuel_waste_strings = [
'ab', 'blq', 'msb', 'msn', 'obl', 'obs', 'slw', 'tdf', 'wdl', 'wds', ]
"""list: A list of strings from EIA 923 Boiler Fuel associated with fuel type
waste.
"""
fuel_type_eia923_boiler_fuel_other_strings = ['oth', 'pur', 'wh', ]
"""list: A list of strings from EIA 923 Boiler Fuel associated with fuel type
other.
"""
fuel_type_eia923_boiler_fuel_simple_map = {
'coal': fuel_type_eia923_boiler_fuel_coal_strings,
'oil': fuel_type_eia923_boiler_fuel_oil_strings,
'gas': fuel_type_eia923_boiler_fuel_gas_strings,
'waste': fuel_type_eia923_boiler_fuel_waste_strings,
'other': fuel_type_eia923_boiler_fuel_other_strings,
}
"""dict: A dictionary mapping EIA 923 Boiler Fuel fuel types (keys) to lists
of strings associated with that fuel type (values).
"""
# PUDL consolidation of EIA923 AER fuel type strings into same categories as
# 'energy_source_eia923' plus additional renewable and nuclear categories.
# These classifications are not currently used, as the EIA fuel type and energy
# source designations provide more detailed information.
aer_coal_strings = ['col', 'woc', 'pc']
"""list: A list of EIA 923 AER fuel type strings associated with coal.
"""
aer_gas_strings = ['mlg', 'ng', 'oog']
"""list: A list of EIA 923 AER fuel type strings associated with gas.
"""
aer_oil_strings = ['dfo', 'rfo', 'woo']
"""list: A list of EIA 923 AER fuel type strings associated with oil.
"""
aer_solar_strings = ['sun']
"""list: A list of EIA 923 AER fuel type strings associated with solar power.
"""
aer_wind_strings = ['wnd']
"""list: A list of EIA 923 AER fuel type strings associated with wind power.
"""
aer_hydro_strings = ['hps', 'hyc']
"""list: A list of EIA 923 AER fuel type strings associated with hydro power.
"""
aer_nuclear_strings = ['nuc']
"""list: A list of EIA 923 AER fuel type strings associated with nuclear power.
"""
aer_waste_strings = ['www']
"""list: A list of EIA 923 AER fuel type strings associated with waste.
"""
aer_other_strings = ['geo', 'orw', 'oth']
"""list: A list of EIA 923 AER fuel type strings associated with other fuel.
"""
aer_fuel_type_strings = {
'coal': aer_coal_strings,
'gas': aer_gas_strings,
'oil': aer_oil_strings,
'solar': aer_solar_strings,
'wind': aer_wind_strings,
'hydro': aer_hydro_strings,
'nuclear': aer_nuclear_strings,
'waste': aer_waste_strings,
'other': aer_other_strings
}
"""dict: A dictionary mapping EIA 923 AER fuel types (keys) to lists
of strings associated with that fuel type (values).
"""
# EIA 923: A partial aggregation of the reported fuel type codes into
# larger categories used by EIA in, for example,
# the Annual Energy Review (AER).Two or three letter alphanumeric.
# See the Fuel Code table (Table 5), below:
fuel_type_aer_eia923 = {
'SUN': 'Solar PV and thermal',
'COL': 'Coal',
'DFO': 'Distillate Petroleum',
'GEO': 'Geothermal',
'HPS': 'Hydroelectric Pumped Storage',
'HYC': 'Hydroelectric Conventional',
'MLG': 'Biogenic Municipal Solid Waste and Landfill Gas',
'NG': 'Natural Gas',
'NUC': 'Nuclear',
'OOG': 'Other Gases',
'ORW': 'Other Renewables',
'OTH': 'Other (including nonbiogenic MSW)',
'PC': 'Petroleum Coke',
'RFO': 'Residual Petroleum',
'WND': 'Wind',
'WOC': 'Waste Coal',
'WOO': 'Waste Oil',
'WWW': 'Wood and Wood Waste'
}
"""dict: A dictionary mapping EIA 923 AER fuel types (keys) to lists
of strings associated with that fuel type (values).
"""
fuel_type_eia860_coal_strings = ['ant', 'bit', 'cbl', 'lig', 'pc', 'rc', 'sc',
'sub', 'wc', 'coal', 'petroleum coke', 'col',
'woc']
"""list: A list of strings from EIA 860 associated with fuel type coal.
"""
fuel_type_eia860_oil_strings = ['dfo', 'jf', 'ker', 'rfo', 'wo', 'woo',
'petroleum']
"""list: A list of strings from EIA 860 associated with fuel type oil.
"""
fuel_type_eia860_gas_strings = ['bfg', 'lfg', 'mlg', 'ng', 'obg', 'og', 'pg',
'sgc', 'sgp', 'natural gas', 'other gas',
'oog', 'sg']
"""list: A list of strings from EIA 860 associated with fuel type gas.
"""
fuel_type_eia860_solar_strings = ['sun', 'solar']
"""list: A list of strings from EIA 860 associated with solar power.
"""
fuel_type_eia860_wind_strings = ['wnd', 'wind', 'wt']
"""list: A list of strings from EIA 860 associated with wind power.
"""
fuel_type_eia860_hydro_strings = ['wat', 'hyc', 'hps', 'hydro']
"""list: A list of strings from EIA 860 associated with hydro power.
"""
fuel_type_eia860_nuclear_strings = ['nuc', 'nuclear']
"""list: A list of strings from EIA 860 associated with nuclear power.
"""
fuel_type_eia860_waste_strings = ['ab', 'blq', 'bm', 'msb', 'msn', 'obl',
'obs', 'slw', 'tdf', 'wdl', 'wds', 'biomass',
'msw', 'www']
"""list: A list of strings from EIA 860 associated with fuel type waste.
"""
fuel_type_eia860_other_strings = ['mwh', 'oth', 'pur', 'wh', 'geo', 'none',
'orw', 'other']
"""list: A list of strings from EIA 860 associated with fuel type other.
"""
fuel_type_eia860_simple_map = {
'coal': fuel_type_eia860_coal_strings,
'oil': fuel_type_eia860_oil_strings,
'gas': fuel_type_eia860_gas_strings,
'solar': fuel_type_eia860_solar_strings,
'wind': fuel_type_eia860_wind_strings,
'hydro': fuel_type_eia860_hydro_strings,
'nuclear': fuel_type_eia860_nuclear_strings,
'waste': fuel_type_eia860_waste_strings,
'other': fuel_type_eia860_other_strings,
}
"""dict: A dictionary mapping EIA 860 fuel types (keys) to lists
of strings associated with that fuel type (values).
"""
# EIA 923/860: Lumping of energy source categories.
energy_source_eia_simple_map = {
'coal': ['ANT', 'BIT', 'LIG', 'PC', 'SUB', 'WC', 'RC'],
'oil': ['DFO', 'JF', 'KER', 'RFO', 'WO'],
'gas': ['BFG', 'LFG', 'NG', 'OBG', 'OG', 'PG', 'SG', 'SGC', 'SGP'],
'solar': ['SUN'],
'wind': ['WND'],
'hydro': ['WAT'],
'nuclear': ['NUC'],
'waste': ['AB', 'BLQ', 'MSW', 'OBL', 'OBS', 'SLW', 'TDF', 'WDL', 'WDS'],
'other': ['GEO', 'MWH', 'OTH', 'PUR', 'WH']
}
"""dict: A dictionary mapping EIA fuel types (keys) to fuel codes (values).
"""
fuel_group_eia923_simple_map = {
'coal': ['coal', 'petroleum coke'],
'oil': ['petroleum'],
'gas': ['natural gas', 'other gas']
}
"""dict: A dictionary mapping EIA 923 simple fuel types("oil", "coal", "gas")
(keys) to fuel types (values).
"""
# EIA 923: The type of physical units fuel consumption is reported in.
# All consumption is reported in either short tons for solids,
# thousands of cubic feet for gases, and barrels for liquids.
fuel_units_eia923 = {
'mcf': 'Thousands of cubic feet (for gases)',
'short_tons': 'Short tons (for solids)',
'barrels': 'Barrels (for liquids)'
}
"""dict: A dictionary mapping EIA 923 fuel units (keys) to fuel unit
descriptions (values).
"""
# EIA 923: Designates the purchase type under which receipts occurred
# in the reporting month. One or two character alphanumeric:
contract_type_eia923 = {
'C': 'Contract - Fuel received under a purchase order or contract with a term of one year or longer. Contracts with a shorter term are considered spot purchases ',
'NC': 'New Contract - Fuel received under a purchase order or contract with duration of one year or longer, under which deliveries were first made during the reporting month',
'S': 'Spot Purchase',
'T': 'Tolling Agreement โ Fuel received under a tolling agreement (bartering arrangement of fuel for generation)'
}
"""dict: A dictionary mapping EIA 923 contract codes (keys) to contract
descriptions (values) for each month in the Fuel Receipts and Costs table.
"""
# EIA 923: The fuel code associated with the fuel receipt.
# Defined on Page 7 of EIA Form 923
# Two or three character alphanumeric:
energy_source_eia923 = {
'ANT': 'Anthracite Coal',
'BFG': 'Blast Furnace Gas',
'BM': 'Biomass',
'BIT': 'Bituminous Coal',
'DFO': 'Distillate Fuel Oil. Including diesel, No. 1, No. 2, and No. 4 fuel oils.',
'JF': 'Jet Fuel',
'KER': 'Kerosene',
'LIG': 'Lignite Coal',
'NG': 'Natural Gas',
'PC': 'Petroleum Coke',
'PG': 'Gaseous Propone',
'OG': 'Other Gas',
'RC': 'Refined Coal',
'RFO': 'Residual Fuel Oil. Including No. 5 & 6 fuel oils and bunker C fuel oil.',
'SG': 'Synthesis Gas from Petroleum Coke',
'SGP': 'Petroleum Coke Derived Synthesis Gas',
'SC': 'Coal-based Synfuel. Including briquettes, pellets, or extrusions, which are formed by binding materials or processes that recycle materials.',
'SUB': 'Subbituminous Coal',
'WC': 'Waste/Other Coal. Including anthracite culm, bituminous gob, fine coal, lignite waste, waste coal.',
'WO': 'Waste/Other Oil. Including crude oil, liquid butane, liquid propane, naphtha, oil waste, re-refined moto oil, sludge oil, tar oil, or other petroleum-based liquid wastes.',
}
"""dict: A dictionary mapping fuel codes (keys) to fuel descriptions (values)
for each fuel receipt from the EIA 923 Fuel Receipts and Costs table.
"""
# EIA 923 Fuel Group, from Page 7 EIA Form 923
# Groups fossil fuel energy sources into fuel groups that are located in the
# Electric Power Monthly: Coal, Natural Gas, Petroleum, Petroleum Coke.
fuel_group_eia923 = (
'coal',
'natural_gas',
'petroleum',
'petroleum_coke',
'other_gas'
)
"""tuple: A tuple containing EIA 923 fuel groups.
"""
# EIA 923: Type of Coal Mine as defined on Page 7 of EIA Form 923
coalmine_type_eia923 = {
'P': 'Preparation Plant',
'S': 'Surface',
'U': 'Underground',
'US': 'Both an underground and surface mine with most coal extracted from underground',
'SU': 'Both an underground and surface mine with most coal extracted from surface',
}
"""dict: A dictionary mapping EIA 923 coal mine type codes (keys) to
descriptions (values).
"""
# EIA 923: State abbreviation related to coal mine location.
# Country abbreviations are also used in this category, but they are
# non-standard because of collisions with US state names. Instead of using
# the provided non-standard names, we convert to ISO-3166-1 three letter
# country codes https://en.wikipedia.org/wiki/ISO_3166-1_alpha-3
coalmine_country_eia923 = {
'AU': 'AUS', # Australia
'CL': 'COL', # Colombia
'CN': 'CAN', # Canada
'IS': 'IDN', # Indonesia
'PL': 'POL', # Poland
'RS': 'RUS', # Russia
'UK': 'GBR', # United Kingdom of Great Britain
'VZ': 'VEN', # Venezuela
'OC': 'other_country',
'IM': 'unknown'
}
"""dict: A dictionary mapping coal mine country codes (keys) to ISO-3166-1 three
letter country codes (values).
"""
# EIA 923: Mode for the longest / second longest distance.
transport_modes_eia923 = {
'RR': 'Rail: Shipments of fuel moved to consumers by rail \
(private or public/commercial). Included is coal hauled to or \
away from a railroad siding by truck if the truck did not use public\
roads.',
'RV': 'River: Shipments of fuel moved to consumers via river by barge. \
Not included are shipments to Great Lakes coal loading docks, \
tidewater piers, or coastal ports.',
'GL': 'Great Lakes: Shipments of coal moved to consumers via \
the Great Lakes. These shipments are moved via the Great Lakes \
coal loading docks, which are identified by name and location as \
follows: Conneaut Coal Storage & Transfer, Conneaut, Ohio; \
NS Coal Dock (Ashtabula Coal Dock), Ashtabula, Ohio; \
Sandusky Coal Pier, Sandusky, Ohio; Toledo Docks, Toledo, Ohio; \
KCBX Terminals Inc., Chicago, Illinois; \
Superior Midwest Energy Terminal, Superior, Wisconsin',
'TP': 'Tidewater Piers and Coastal Ports: Shipments of coal moved to \
Tidewater Piers and Coastal Ports for further shipments to consumers \
via coastal water or ocean. The Tidewater Piers and Coastal Ports \
are identified by name and location as follows: Dominion Terminal \
Associates, Newport News, Virginia; McDuffie Coal Terminal, Mobile, \
Alabama; IC Railmarine Terminal, Convent, Louisiana; \
International Marine Terminals, Myrtle Grove, Louisiana; \
Cooper/T. Smith Stevedoring Co. Inc., Darrow, Louisiana; \
Seward Terminal Inc., Seward, Alaska; Los Angeles Export Terminal, \
Inc., Los Angeles, California; Levin-Richmond Terminal Corp., \
Richmond, California; Baltimore Terminal, Baltimore, Maryland; \
Norfolk Southern Lamberts Point P-6, Norfolk, Virginia; \
Chesapeake Bay Piers, Baltimore, Maryland; Pier IX Terminal Company, \
Newport News, Virginia; Electro-Coal Transport Corp., Davant, \
Louisiana',
'WT': 'Water: Shipments of fuel moved to consumers by other waterways.',
'TR': 'Truck: Shipments of fuel moved to consumers by truck. \
Not included is fuel hauled to or away from a railroad siding by \
truck on non-public roads.',
'tr': 'Truck: Shipments of fuel moved to consumers by truck. \
Not included is fuel hauled to or away from a railroad siding by \
truck on non-public roads.',
'TC': 'Tramway/Conveyor: Shipments of fuel moved to consumers \
by tramway or conveyor.',
'SP': 'Slurry Pipeline: Shipments of coal moved to consumers \
by slurry pipeline.',
'PL': 'Pipeline: Shipments of fuel moved to consumers by pipeline'
}
"""dict: A dictionary mapping primary and secondary transportation mode codes
(keys) to descriptions (values).
"""
# we need to include all of the columns which we want to keep for either the
# entity or annual tables. The order here matters. We need to harvest the plant
# location before harvesting the location of the utilites for example.
entities = {
'plants': [ # base cols
['plant_id_eia'],
# static cols
['balancing_authority_code', 'balancing_authority_name',
'city', 'county', 'ferc_cogen_status',
'ferc_exempt_wholesale_generator', 'ferc_small_power_producer',
'grid_voltage_2_kv', 'grid_voltage_3_kv', 'grid_voltage_kv',
'iso_rto_code', 'latitude', 'longitude',
'nerc_region', 'plant_name_eia', 'primary_purpose_naics_id',
'sector_id', 'sector_name', 'state', 'street_address', 'zip_code'],
# annual cols
['ash_impoundment', 'ash_impoundment_lined', 'ash_impoundment_status',
'energy_storage', 'ferc_cogen_docket_no', 'water_source',
'ferc_exempt_wholesale_generator_docket_no',
'ferc_small_power_producer_docket_no',
'liquefied_natural_gas_storage',
'natural_gas_local_distribution_company', 'natural_gas_storage',
'natural_gas_pipeline_name_1', 'natural_gas_pipeline_name_2',
'natural_gas_pipeline_name_3', 'net_metering', 'pipeline_notes',
'regulatory_status_code', 'transmission_distribution_owner_id',
'transmission_distribution_owner_name',
'transmission_distribution_owner_state', 'utility_id_eia'],
# need type fixing
{},
# {'plant_id_eia': 'int64',
# 'grid_voltage_2_kv': 'float64',
# 'grid_voltage_3_kv': 'float64',
# 'grid_voltage_kv': 'float64',
# 'longitude': 'float64',
# 'latitude': 'float64',
# 'primary_purpose_naics_id': 'float64',
# 'sector_id': 'float64',
# 'zip_code': 'float64',
# 'utility_id_eia': 'float64'},
],
'generators': [ # base cols
['plant_id_eia', 'generator_id'],
# static cols
['prime_mover_code', 'duct_burners', 'operating_date',
'topping_bottoming_code', 'solid_fuel_gasification',
'pulverized_coal_tech', 'fluidized_bed_tech', 'subcritical_tech',
'supercritical_tech', 'ultrasupercritical_tech', 'stoker_tech',
'other_combustion_tech', 'bypass_heat_recovery',
'rto_iso_lmp_node_id', 'rto_iso_location_wholesale_reporting_id',
'associated_combined_heat_power', 'original_planned_operating_date',
'operating_switch', 'previously_canceled'],
# annual cols
['capacity_mw', 'fuel_type_code_pudl', 'multiple_fuels',
'ownership_code', 'deliver_power_transgrid', 'summer_capacity_mw',
'winter_capacity_mw', 'minimum_load_mw', 'technology_description',
'energy_source_code_1', 'energy_source_code_2',
'energy_source_code_3', 'energy_source_code_4',
'energy_source_code_5', 'energy_source_code_6',
'startup_source_code_1', 'startup_source_code_2',
'startup_source_code_3', 'startup_source_code_4',
'time_cold_shutdown_full_load_code', 'syncronized_transmission_grid',
'turbines_num', 'operational_status_code', 'operational_status',
'planned_modifications', 'planned_net_summer_capacity_uprate_mw',
'planned_net_winter_capacity_uprate_mw', 'planned_new_capacity_mw',
'planned_uprate_date', 'planned_net_summer_capacity_derate_mw',
'planned_net_winter_capacity_derate_mw', 'planned_derate_date',
'planned_new_prime_mover_code', 'planned_energy_source_code_1',
'planned_repower_date', 'other_planned_modifications',
'other_modifications_date', 'planned_retirement_date',
'carbon_capture', 'cofire_fuels', 'switch_oil_gas',
'turbines_inverters_hydrokinetics', 'nameplate_power_factor',
'uprate_derate_during_year', 'uprate_derate_completed_date',
'current_planned_operating_date', 'summer_estimated_capability_mw',
'winter_estimated_capability_mw', 'retirement_date', 'utility_id_eia'],
# need type fixing
{}
# {'plant_id_eia': 'int64',
# 'generator_id': 'str'},
],
# utilities must come after plants. plant location needs to be
# removed before the utility locations are compiled
'utilities': [ # base cols
['utility_id_eia'],
# static cols
['utility_name_eia',
'entity_type'],
# annual cols
['street_address', 'city', 'state', 'zip_code',
'plants_reported_owner', 'plants_reported_operator',
'plants_reported_asset_manager', 'plants_reported_other_relationship',
],
# need type fixing
{'utility_id_eia': 'int64', }, ],
'boilers': [ # base cols
['plant_id_eia', 'boiler_id'],
# static cols
['prime_mover_code'],
# annual cols
[],
# need type fixing
{}, ]}
"""dict: A dictionary containing table name strings (keys) and lists of columns
to keep for those tables (values).
"""
# EPA CEMS constants #####
epacems_rename_dict = {
"STATE": "state",
# "FACILITY_NAME": "plant_name", # Not reading from CSV
"ORISPL_CODE": "plant_id_eia",
"UNITID": "unitid",
# These op_date, op_hour, and op_time variables get converted to
# operating_date, operating_datetime and operating_time_interval in
# transform/epacems.py
"OP_DATE": "op_date",
"OP_HOUR": "op_hour",
"OP_TIME": "operating_time_hours",
"GLOAD (MW)": "gross_load_mw",
"GLOAD": "gross_load_mw",
"SLOAD (1000 lbs)": "steam_load_1000_lbs",
"SLOAD (1000lb/hr)": "steam_load_1000_lbs",
"SLOAD": "steam_load_1000_lbs",
"SO2_MASS (lbs)": "so2_mass_lbs",
"SO2_MASS": "so2_mass_lbs",
"SO2_MASS_MEASURE_FLG": "so2_mass_measurement_code",
# "SO2_RATE (lbs/mmBtu)": "so2_rate_lbs_mmbtu", # Not reading from CSV
# "SO2_RATE": "so2_rate_lbs_mmbtu", # Not reading from CSV
# "SO2_RATE_MEASURE_FLG": "so2_rate_measure_flg", # Not reading from CSV
"NOX_RATE (lbs/mmBtu)": "nox_rate_lbs_mmbtu",
"NOX_RATE": "nox_rate_lbs_mmbtu",
"NOX_RATE_MEASURE_FLG": "nox_rate_measurement_code",
"NOX_MASS (lbs)": "nox_mass_lbs",
"NOX_MASS": "nox_mass_lbs",
"NOX_MASS_MEASURE_FLG": "nox_mass_measurement_code",
"CO2_MASS (tons)": "co2_mass_tons",
"CO2_MASS": "co2_mass_tons",
"CO2_MASS_MEASURE_FLG": "co2_mass_measurement_code",
# "CO2_RATE (tons/mmBtu)": "co2_rate_tons_mmbtu", # Not reading from CSV
# "CO2_RATE": "co2_rate_tons_mmbtu", # Not reading from CSV
# "CO2_RATE_MEASURE_FLG": "co2_rate_measure_flg", # Not reading from CSV
"HEAT_INPUT (mmBtu)": "heat_content_mmbtu",
"HEAT_INPUT": "heat_content_mmbtu",
"FAC_ID": "facility_id",
"UNIT_ID": "unit_id_epa",
}
"""dict: A dictionary containing EPA CEMS column names (keys) and replacement
names to use when reading those columns into PUDL (values).
"""
# Any column that exactly matches one of these won't be read
epacems_columns_to_ignore = {
"FACILITY_NAME",
"SO2_RATE (lbs/mmBtu)",
"SO2_RATE",
"SO2_RATE_MEASURE_FLG",
"CO2_RATE (tons/mmBtu)",
"CO2_RATE",
"CO2_RATE_MEASURE_FLG",
}
"""set: The set of EPA CEMS columns to ignore when reading data.
"""
# Specify dtypes to for reading the CEMS CSVs
epacems_csv_dtypes = {
"STATE": pd.StringDtype(),
# "FACILITY_NAME": str, # Not reading from CSV
"ORISPL_CODE": pd.Int64Dtype(),
"UNITID": pd.StringDtype(),
# These op_date, op_hour, and op_time variables get converted to
# operating_date, operating_datetime and operating_time_interval in
# transform/epacems.py
"OP_DATE": pd.StringDtype(),
"OP_HOUR": pd.Int64Dtype(),
"OP_TIME": float,
"GLOAD (MW)": float,
"GLOAD": float,
"SLOAD (1000 lbs)": float,
"SLOAD (1000lb/hr)": float,
"SLOAD": float,
"SO2_MASS (lbs)": float,
"SO2_MASS": float,
"SO2_MASS_MEASURE_FLG": pd.StringDtype(),
# "SO2_RATE (lbs/mmBtu)": float, # Not reading from CSV
# "SO2_RATE": float, # Not reading from CSV
# "SO2_RATE_MEASURE_FLG": str, # Not reading from CSV
"NOX_RATE (lbs/mmBtu)": float,
"NOX_RATE": float,
"NOX_RATE_MEASURE_FLG": pd.StringDtype(),
"NOX_MASS (lbs)": float,
"NOX_MASS": float,
"NOX_MASS_MEASURE_FLG": pd.StringDtype(),
"CO2_MASS (tons)": float,
"CO2_MASS": float,
"CO2_MASS_MEASURE_FLG": pd.StringDtype(),
# "CO2_RATE (tons/mmBtu)": float, # Not reading from CSV
# "CO2_RATE": float, # Not reading from CSV
# "CO2_RATE_MEASURE_FLG": str, # Not reading from CSV
"HEAT_INPUT (mmBtu)": float,
"HEAT_INPUT": float,
"FAC_ID": pd.Int64Dtype(),
"UNIT_ID": pd.Int64Dtype(),
}
"""dict: A dictionary containing column names (keys) and data types (values)
for EPA CEMS.
"""
epacems_tables = ("hourly_emissions_epacems")
"""tuple: A tuple containing tables of EPA CEMS data to pull into PUDL.
"""
epacems_additional_plant_info_file = importlib.resources.open_text(
'pudl.package_data.epa.cems', 'plant_info_for_additional_cems_plants.csv')
"""typing.TextIO:
Todo:
Return to
"""
files_dict_epaipm = {
'transmission_single_epaipm': '*table_3-21*',
'transmission_joint_epaipm': '*transmission_joint_ipm*',
'load_curves_epaipm': '*table_2-2_*',
'plant_region_map_epaipm': '*needs_v6*',
}
"""dict: A dictionary of EPA IPM tables and strings that files of those tables
contain.
"""
epaipm_url_ext = {
'transmission_single_epaipm': 'table_3-21_annual_transmission_capabilities_of_u.s._model_regions_in_epa_platform_v6_-_2021.xlsx',
'load_curves_epaipm': 'table_2-2_load_duration_curves_used_in_epa_platform_v6.xlsx',
'plant_region_map_epaipm': 'needs_v6_november_2018_reference_case_0.xlsx',
}
"""dict: A dictionary of EPA IPM tables and associated URLs extensions for
downloading that table's data.
"""
read_excel_epaipm_dict = {
'transmission_single_epaipm': dict(
skiprows=3,
usecols='B:F',
index_col=[0, 1],
),
'transmission_joint_epaipm': {},
'load_curves_epaipm': dict(
skiprows=3,
usecols='B:AB',
),
'plant_region_map_epaipm_active': dict(
sheet_name='NEEDS v6_Active',
usecols='C,I',
),
'plant_region_map_epaipm_retired': dict(
sheet_name='NEEDS v6_Retired_Through2021',
usecols='C,I',
),
}
"""
dict: A dictionary of dictionaries containing EPA IPM tables and associated
information for reading those tables into PUDL (values).
"""
epaipm_region_names = [
'ERC_PHDL', 'ERC_REST', 'ERC_FRNT', 'ERC_GWAY', 'ERC_WEST',
'FRCC', 'NENG_CT', 'NENGREST', 'NENG_ME', 'MIS_AR', 'MIS_IL',
'MIS_INKY', 'MIS_IA', 'MIS_MIDA', 'MIS_LA', 'MIS_LMI', 'MIS_MNWI',
'MIS_D_MS', 'MIS_MO', 'MIS_MAPP', 'MIS_AMSO', 'MIS_WOTA',
'MIS_WUMS', 'NY_Z_A', 'NY_Z_B', 'NY_Z_C&E', 'NY_Z_D', 'NY_Z_F',
'NY_Z_G-I', 'NY_Z_J', 'NY_Z_K', 'PJM_West', 'PJM_AP', 'PJM_ATSI',
'PJM_COMD', 'PJM_Dom', 'PJM_EMAC', 'PJM_PENE', 'PJM_SMAC',
'PJM_WMAC', 'S_C_KY', 'S_C_TVA', 'S_D_AECI', 'S_SOU', 'S_VACA',
'SPP_NEBR', 'SPP_N', 'SPP_SPS', 'SPP_WEST', 'SPP_KIAM', 'SPP_WAUE',
'WECC_AZ', 'WEC_BANC', 'WECC_CO', 'WECC_ID', 'WECC_IID',
'WEC_LADW', 'WECC_MT', 'WECC_NM', 'WEC_CALN', 'WECC_NNV',
'WECC_PNW', 'WEC_SDGE', 'WECC_SCE', 'WECC_SNV', 'WECC_UT',
'WECC_WY', 'CN_AB', 'CN_BC', 'CN_NL', 'CN_MB', 'CN_NB', 'CN_NF',
'CN_NS', 'CN_ON', 'CN_PE', 'CN_PQ', 'CN_SK',
]
"""list: A list of EPA IPM region names."""
epaipm_region_aggregations = {
'PJM': [
'PJM_AP', 'PJM_ATSI', 'PJM_COMD', 'PJM_Dom',
'PJM_EMAC', 'PJM_PENE', 'PJM_SMAC', 'PJM_WMAC'
],
'NYISO': [
'NY_Z_A', 'NY_Z_B', 'NY_Z_C&E', 'NY_Z_D',
'NY_Z_F', 'NY_Z_G-I', 'NY_Z_J', 'NY_Z_K'
],
'ISONE': ['NENG_CT', 'NENGREST', 'NENG_ME'],
'MISO': [
'MIS_AR', 'MIS_IL', 'MIS_INKY', 'MIS_IA',
'MIS_MIDA', 'MIS_LA', 'MIS_LMI', 'MIS_MNWI', 'MIS_D_MS',
'MIS_MO', 'MIS_MAPP', 'MIS_AMSO', 'MIS_WOTA', 'MIS_WUMS'
],
'SPP': [
'SPP_NEBR', 'SPP_N', 'SPP_SPS', 'SPP_WEST', 'SPP_KIAM', 'SPP_WAUE'
],
'WECC_NW': [
'WECC_CO', 'WECC_ID', 'WECC_MT', 'WECC_NNV',
'WECC_PNW', 'WECC_UT', 'WECC_WY'
]
}
"""
dict: A dictionary containing EPA IPM regions (keys) and lists of their
associated abbreviations (values).
"""
epaipm_rename_dict = {
'transmission_single_epaipm': {
'From': 'region_from',
'To': 'region_to',
'Capacity TTC (MW)': 'firm_ttc_mw',
'Energy TTC (MW)': 'nonfirm_ttc_mw',
'Transmission Tariff (2016 mills/kWh)': 'tariff_mills_kwh',
},
'load_curves_epaipm': {
'day': 'day_of_year',
'region': 'region_id_epaipm',
},
'plant_region_map_epaipm': {
'ORIS Plant Code': 'plant_id_eia',
'Region Name': 'region',
},
}
glue_pudl_tables = ('plants_eia', 'plants_ferc', 'plants', 'utilities_eia',
'utilities_ferc', 'utilities', 'utility_plant_assn')
"""
dict: A dictionary of dictionaries containing EPA IPM tables (keys) and items
for each table to be renamed along with the replacement name (values).
"""
data_sources = (
'eia860',
'eia861',
'eia923',
'epacems',
'epaipm',
'ferc1',
# 'pudl'
)
"""tuple: A tuple containing the data sources we are able to pull into PUDL."""
# All the years for which we ought to be able to download these data sources
data_years = {
'eia860': tuple(range(2001, 2019)),
'eia861': tuple(range(1990, 2019)),
'eia923': tuple(range(2001, 2020)),
'epacems': tuple(range(1995, 2019)),
'epaipm': (None, ),
'ferc1': tuple(range(1994, 2019)),
}
"""
dict: A dictionary of data sources (keys) and tuples containing the years
that we expect to be able to download for each data source (values).
"""
# The full set of years we currently expect to be able to ingest, per source:
working_years = {
'eia860': tuple(range(2009, 2019)),
'eia861': tuple(range(1999, 2019)),
'eia923': tuple(range(2009, 2019)),
'epacems': tuple(range(1995, 2019)),
'epaipm': (None, ),
'ferc1': tuple(range(1994, 2019)),
}
"""
dict: A dictionary of data sources (keys) and tuples containing the years for
each data source that are able to be ingested into PUDL.
"""
pudl_tables = {
'eia860': eia860_pudl_tables,
'eia861': eia861_pudl_tables,
'eia923': eia923_pudl_tables,
'epacems': epacems_tables,
'epaipm': epaipm_pudl_tables,
'ferc1': ferc1_pudl_tables,
'ferc714': ferc714_pudl_tables,
'glue': glue_pudl_tables,
}
"""
dict: A dictionary containing data sources (keys) and the list of associated
tables from that datasource that can be pulled into PUDL (values).
"""
base_data_urls = {
'eia860': 'https://www.eia.gov/electricity/data/eia860',
'eia861': 'https://www.eia.gov/electricity/data/eia861/zip',
'eia923': 'https://www.eia.gov/electricity/data/eia923',
'epacems': 'ftp://newftp.epa.gov/dmdnload/emissions/hourly/monthly',
'ferc1': 'ftp://eforms1.ferc.gov/f1allyears',
'ferc714': 'https://www.ferc.gov/docs-filing/forms/form-714/data',
'ferceqr': 'ftp://eqrdownload.ferc.gov/DownloadRepositoryProd/BulkNew/CSV',
'msha': 'https://arlweb.msha.gov/OpenGovernmentData/DataSets',
'epaipm': 'https://www.epa.gov/sites/production/files/2019-03',
'pudl': 'https://catalyst.coop/pudl/'
}
"""
dict: A dictionary containing data sources (keys) and their base data URLs
(values).
"""
need_fix_inting = {
'plants_steam_ferc1': ('construction_year', 'installation_year'),
'plants_small_ferc1': ('construction_year', 'ferc_license_id'),
'plants_hydro_ferc1': ('construction_year', 'installation_year',),
'plants_pumped_storage_ferc1': ('construction_year', 'installation_year',),
'hourly_emissions_epacems': ('facility_id', 'unit_id_epa',),
}
"""
dict: A dictionary containing tables (keys) and column names (values)
containing integer - type columns whose null values need fixing.
"""
contributors = {
"catalyst-cooperative": {
"title": "C<NAME>ooperative",
"path": "https://catalyst.coop/",
"role": "publisher",
"email": "<EMAIL>",
"organization": "Catalyst Cooperative",
},
"zane-selvans": {
"title": "<NAME>",
"email": "<EMAIL>",
"path": "https://amateurearthling.org/",
"role": "wrangler",
"organization": "Catalyst Cooperative"
},
"christina-gosnell": {
"title": "<NAME>",
"email": "<EMAIL>",
"role": "contributor",
"organization": "Catalyst Cooperative",
},
"steven-winter": {
"title": "<NAME>",
"email": "<EMAIL>",
"role": "contributor",
"organization": "Catalyst Cooperative",
},
"alana-wilson": {
"title": "<NAME>",
"email": "<EMAIL>",
"role": "contributor",
"organization": "Catalyst Cooperative",
},
"karl-dunkle-werner": {
"title": "<NAME>",
"email": "<EMAIL>",
"path": "https://karldw.org/",
"role": "contributor",
"organization": "UC Berkeley",
},
'greg-schivley': {
"title": "<NAME>",
"role": "contributor",
},
}
"""
dict: A dictionary of dictionaries containing organization names (keys) and
their attributes (values).
"""
data_source_info = {
"eia860": {
"title": "EIA Form 860",
"path": "https://www.eia.gov/electricity/data/eia860/",
},
"eia861": {
"title": "EIA Form 861",
"path": "https://www.eia.gov/electricity/data/eia861/",
},
"eia923": {
"title": "EIA Form 923",
"path": "https://www.eia.gov/electricity/data/eia923/",
},
"eiawater": {
"title": "EIA Water Use for Power",
"path": "https://www.eia.gov/electricity/data/water/",
},
"epacems": {
"title": "EPA Air Markets Program Data",
"path": "https://ampd.epa.gov/ampd/",
},
"epaipm": {
"title": "EPA Integrated Planning Model",
"path": "https://www.epa.gov/airmarkets/national-electric-energy-data-system-needs-v6",
},
"ferc1": {
"title": "FERC Form 1",
"path": "https://www.ferc.gov/docs-filing/forms/form-1/data.asp",
},
"ferc714": {
"title": "FERC Form 714",
"path": "https://www.ferc.gov/docs-filing/forms/form-714/data.asp",
},
"ferceqr": {
"title": "FERC Electric Quarterly Report",
"path": "https://www.ferc.gov/docs-filing/eqr.asp",
},
"msha": {
"title": "Mining Safety and Health Administration",
"path": "https://www.msha.gov/mine-data-retrieval-system",
},
"phmsa": {
"title": "Pipelines and Hazardous Materials Safety Administration",
"path": "https://www.phmsa.dot.gov/data-and-statistics/pipeline/data-and-statistics-overview",
},
"pudl": {
"title": "The Public Utility Data Liberation Project (PUDL)",
"path": "https://catalyst.coop/pudl/",
"email": "<EMAIL>",
},
}
"""
dict: A dictionary of dictionaries containing datasources (keys) and
associated attributes (values)
"""
contributors_by_source = {
"pudl": [
"catalyst-cooperative",
"zane-selvans",
"christina-gosnell",
"steven-winter",
"alana-wilson",
"karl-dunkle-werner",
],
"eia923": [
"catalyst-cooperative",
"zane-selvans",
"christina-gosnell",
"steven-winter",
],
"eia860": [
"catalyst-cooperative",
"zane-selvans",
"christina-gosnell",
"steven-winter",
"alana-wilson",
],
"ferc1": [
"catalyst-cooperative",
"zane-selvans",
"christina-gosnell",
"steven-winter",
"alana-wilson",
],
"epacems": [
"catalyst-cooperative",
"karl-dunkle-werner",
"zane-selvans",
],
"epaipm": [
"greg-schivley",
],
}
"""
dict: A dictionary of data sources (keys) and lists of contributors (values).
"""
licenses = {
"cc-by-4.0": {
"name": "CC-BY-4.0",
"title": "Creative Commons Attribution 4.0",
"path": "https://creativecommons.org/licenses/by/4.0/"
},
"us-govt": {
"name": "other-pd",
"title": "U.S. Government Work",
"path": "http://www.usa.gov/publicdomain/label/1.0/",
}
}
"""
dict: A dictionary of dictionaries containing license types and their
attributes.
"""
output_formats = [
'sqlite',
'parquet',
'datapkg',
'notebook',
]
"""list: A list of types of PUDL output formats."""
keywords_by_data_source = {
'pudl': [
'us', 'electricity',
],
'eia860': [
'electricity', 'electric', 'boiler', 'generator', 'plant', 'utility',
'fuel', 'coal', 'natural gas', 'prime mover', 'eia860', 'retirement',
'capacity', 'planned', 'proposed', 'energy', 'hydro', 'solar', 'wind',
'nuclear', 'form 860', 'eia', 'annual', 'gas', 'ownership', 'steam',
'turbine', 'combustion', 'combined cycle', 'eia',
'energy information administration'
],
'eia923': [
'fuel', 'boiler', 'generator', 'plant', 'utility', 'cost', 'price',
'natural gas', 'coal', 'eia923', 'energy', 'electricity', 'form 923',
'receipts', 'generation', 'net generation', 'monthly', 'annual', 'gas',
'fuel consumption', 'MWh', 'energy information administration', 'eia',
'mercury', 'sulfur', 'ash', 'lignite', 'bituminous', 'subbituminous',
'heat content'
],
'epacems': [
'epa', 'us', 'emissions', 'pollution', 'ghg', 'so2', 'co2', 'sox',
'nox', 'load', 'utility', 'electricity', 'plant', 'generator', 'unit',
'generation', 'capacity', 'output', 'power', 'heat content', 'mmbtu',
'steam', 'cems', 'continuous emissions monitoring system', 'hourly'
'environmental protection agency', 'ampd', 'air markets program data',
],
'ferc1': [
'electricity', 'electric', 'utility', 'plant', 'steam', 'generation',
'cost', 'expense', 'price', 'heat content', 'ferc', 'form 1',
'federal energy regulatory commission', 'capital', 'accounting',
'depreciation', 'finance', 'plant in service', 'hydro', 'coal',
'natural gas', 'gas', 'opex', 'capex', 'accounts', 'investment',
'capacity'
],
'epaipm': [
'epaipm', 'integrated planning',
]
}
"""dict: A dictionary of datasets (keys) and keywords (values). """
column_dtypes = {
"ferc1": { # Obviously this is not yet a complete list...
"construction_year": pd.Int64Dtype(),
"installation_year": pd.Int64Dtype(),
'utility_id_ferc1': pd.Int64Dtype(),
'plant_id_pudl': pd.Int64Dtype(),
'plant_id_ferc1': pd.Int64Dtype(),
'utility_id_pudl': pd.Int64Dtype(),
'report_year': pd.Int64Dtype(),
'report_date': 'datetime64[ns]',
},
"ferc714": { # INCOMPLETE
"report_year": pd.Int64Dtype(),
"utility_id_ferc714": pd.Int64Dtype(),
"utility_id_eia": pd.Int64Dtype(),
"utility_name_ferc714": pd.StringDtype(),
"timezone": pd.CategoricalDtype(categories=[
"America/New_York", "America/Chicago", "America/Denver",
"America/Los_Angeles", "America/Anchorage", "Pacific/Honolulu"]),
"utc_datetime": "datetime64[ns]",
"peak_demand_summer_mw": float,
"peak_demand_winter_mw": float,
},
"epacems": {
'state': pd.StringDtype(),
'plant_id_eia': pd.Int64Dtype(), # Nullable Integer
'unitid': pd.StringDtype(),
'operating_datetime_utc': "datetime64[ns]",
'operating_time_hours': float,
'gross_load_mw': float,
'steam_load_1000_lbs': float,
'so2_mass_lbs': float,
'so2_mass_measurement_code': pd.StringDtype(),
'nox_rate_lbs_mmbtu': float,
'nox_rate_measurement_code': pd.StringDtype(),
'nox_mass_lbs': float,
'nox_mass_measurement_code': pd.StringDtype(),
'co2_mass_tons': float,
'co2_mass_measurement_code': pd.StringDtype(),
'heat_content_mmbtu': float,
'facility_id': pd.Int64Dtype(), # Nullable Integer
'unit_id_epa': pd.Int64Dtype(), # Nullable Integer
},
"eia": {
'ash_content_pct': float,
'ash_impoundment': pd.BooleanDtype(),
'ash_impoundment_lined': pd.BooleanDtype(),
# TODO: convert this field to more descriptive words
'ash_impoundment_status': pd.StringDtype(),
'associated_combined_heat_power': pd.BooleanDtype(),
'balancing_authority_code': pd.StringDtype(),
'balancing_authority_id_eia': pd.Int64Dtype(),
'balancing_authority_name': pd.StringDtype(),
'bga_source': pd.StringDtype(),
'boiler_id': pd.StringDtype(),
'bypass_heat_recovery': pd.BooleanDtype(),
'capacity_mw': float,
'carbon_capture': pd.BooleanDtype(),
'chlorine_content_ppm': float,
'city': pd.StringDtype(),
'cofire_fuels': pd.BooleanDtype(),
'contact_firstname': pd.StringDtype(),
'contact_firstname2': pd.StringDtype(),
'contact_lastname': pd.StringDtype(),
'contact_lastname2': pd.StringDtype(),
'contact_title': pd.StringDtype(),
'contact_title2': pd.StringDtype(),
'contract_expiration_date': 'datetime64[ns]',
'contract_type_code': pd.StringDtype(),
'county': pd.StringDtype(),
'county_id_fips': pd.StringDtype(), # Must preserve leading zeroes
'current_planned_operating_date': 'datetime64[ns]',
'deliver_power_transgrid': pd.BooleanDtype(),
'duct_burners': pd.BooleanDtype(),
'energy_source_code': pd.StringDtype(),
'energy_source_code_1': pd.StringDtype(),
'energy_source_code_2': pd.StringDtype(),
'energy_source_code_3': pd.StringDtype(),
'energy_source_code_4': pd.StringDtype(),
'energy_source_code_5': pd.StringDtype(),
'energy_source_code_6': pd.StringDtype(),
'energy_storage': pd.BooleanDtype(),
'entity_type': pd.StringDtype(),
'ferc_cogen_docket_no': pd.StringDtype(),
'ferc_cogen_status': pd.BooleanDtype(),
'ferc_exempt_wholesale_generator': pd.BooleanDtype(),
'ferc_exempt_wholesale_generator_docket_no': pd.StringDtype(),
'ferc_small_power_producer': pd.BooleanDtype(),
'ferc_small_power_producer_docket_no': pd.StringDtype(),
'fluidized_bed_tech': pd.BooleanDtype(),
'fraction_owned': float,
'fuel_consumed_for_electricity_mmbtu': float,
'fuel_consumed_for_electricity_units': float,
'fuel_consumed_mmbtu': float,
'fuel_consumed_units': float,
'fuel_cost_per_mmbtu': float,
'fuel_group_code': pd.StringDtype(),
'fuel_group_code_simple': pd.StringDtype(),
'fuel_mmbtu_per_unit': float,
'fuel_qty_units': float,
# are fuel_type and fuel_type_code the same??
# fuel_type includes 40 code-like things.. WAT, SUN, NUC, etc.
'fuel_type': pd.StringDtype(),
# from the boiler_fuel_eia923 table, there are 30 code-like things, like NG, BIT, LIG
'fuel_type_code': pd.StringDtype(),
'fuel_type_code_aer': pd.StringDtype(),
'fuel_type_code_pudl': pd.StringDtype(),
# this is a mix of integer-like values (2 or 5) and strings like AUGSF
'generator_id': pd.StringDtype(),
'grid_voltage_2_kv': float,
'grid_voltage_3_kv': float,
'grid_voltage_kv': float,
'heat_content_mmbtu_per_unit': float,
'iso_rto_code': pd.StringDtype(),
'latitude': float,
'liquefied_natural_gas_storage': pd.BooleanDtype(),
'longitude': float,
'mercury_content_ppm': float,
'mine_id_msha': pd.Int64Dtype(),
'mine_id_pudl': pd.Int64Dtype(),
'mine_name': pd.StringDtype(),
'mine_type_code': pd.StringDtype(),
'minimum_load_mw': float,
'moisture_content_pct': float,
'multiple_fuels': pd.BooleanDtype(),
'nameplate_power_factor': float,
'natural_gas_delivery_contract_type_code': pd.StringDtype(),
'natural_gas_local_distribution_company': pd.StringDtype(),
'natural_gas_pipeline_name_1': pd.StringDtype(),
'natural_gas_pipeline_name_2': pd.StringDtype(),
'natural_gas_pipeline_name_3': pd.StringDtype(),
'natural_gas_storage': pd.BooleanDtype(),
'natural_gas_transport_code': pd.StringDtype(),
'nerc_region': pd.StringDtype(),
'net_generation_mwh': float,
'net_metering': pd.BooleanDtype(),
'nuclear_unit_id': pd.Int64Dtype(),
'original_planned_operating_date': 'datetime64[ns]',
'operating_date': 'datetime64[ns]',
'operating_switch': pd.StringDtype(),
# TODO: double check this for early 860 years
'operational_status': pd.StringDtype(),
'operational_status_code': pd.StringDtype(),
'other_combustion_tech': pd.BooleanDtype(),
'other_modifications_date': 'datetime64[ns]',
'other_planned_modifications': pd.BooleanDtype(),
'owner_city': pd.StringDtype(),
'owner_name': pd.StringDtype(),
'owner_state': pd.StringDtype(),
'owner_street_address': | pd.StringDtype() | pandas.StringDtype |
import pandas as pd
from uhxl import UhExcelFile
DATA = "tests/data/test_merged_cell.xlsx"
FILE = UhExcelFile(DATA)
def test_merged_cell():
df = pd.read_excel(FILE)
assert isinstance(df, pd.DataFrame)
assert df.equals(pd.DataFrame({"merged": ["col1", "a"], None: ["col2", "b"]}))
def test_merged_cell_multi_columns():
df = | pd.read_excel(FILE, header=(0, 1)) | pandas.read_excel |
from re import findall
from pandas import Series
from omics import get_ome_regexp, get_omics_regexp
ome_re = get_ome_regexp()
omics_re = get_omics_regexp()
def test_ome_re():
assert findall(ome_re, 'genome') == ['genome']
assert findall(ome_re, '(genome') == ['genome']
assert findall(ome_re, 'genome proteome') == ['genome', 'proteome']
assert findall(ome_re, 'whole-exome') == ['whole-exome']
assert findall(ome_re, 'we highlight genome-proteome interactions') == ['genome', 'proteome']
assert findall(ome_re, 'microbiome, proteome, metabolome.') == ['microbiome', 'proteome', 'metabolome']
assert findall(ome_re, 'www.mycancergenome.org') == []
assert findall(ome_re, 'cancergenome.nih.gov') == []
assert findall(ome_re, 'cancergenome.nih/gov') == []
assert findall(ome_re, 'metabolome: ') == ['metabolome']
assert findall(ome_re, 'metabolome; ') == ['metabolome']
def test_omics_re():
assert findall(omics_re, 'omic') == []
assert findall(omics_re, 'genomics') == ['genomic']
assert findall(omics_re, ' genomics') == ['genomic']
assert findall(omics_re, 'genomics ') == ['genomic']
assert findall(omics_re, ' genomics ') == ['genomic']
assert findall(omics_re, 'prote-omic') == ['prote-omic']
assert findall(omics_re, 'prote-omics') == ['prote-omic']
# we do not want to count post-genomics as a reference to genomics
# as such a reference is likely to occur in some proteomic papers
assert findall(omics_re, 'post-genomics') == ['post-genomic']
assert findall(omics_re, 'transcriptomic proteomic') == ['transcriptomic', 'proteomic']
assert findall(omics_re, 'transcriptomic-proteomic') == ['transcriptomic', 'proteomic']
assert findall(omics_re, 'transcriptomic proteomic') == ['transcriptomic', 'proteomic']
assert list( | Series(['transcriptomic proteomic']) | pandas.Series |
import yfinance as yf
import matplotlib.pyplot as plt
import collections
import pandas as pd
import numpy as np
import cvxpy as cp
import efficient_frontier
import param_estimator
import backtest
import objective_functions
def port_opt(stock_picks, weight_constraints, control, trade_horizon, cardinality, target_return, risk_aversion):
selected_etfs = ['IWD', 'IYH', 'IYW', 'MDY', 'EWT', 'XLE', 'EWZ', 'EWY', 'IWB', 'EZU']
if cardinality >= 20:
selected_etfs = ['IWD', 'IYH', 'IYW', 'MDY', 'EWT', 'XLE', 'EWZ', 'EWY', 'IWB', 'EZU']
num_stocks = len(stock_picks)
train_start, train_end = '2016-12-01', '2021-11-30'
etf_table = 'americanetfs'
etf_tickers = selected_etfs
etf_returns_by_tick = []
for tick in etf_tickers:
returns = param_estimator.get_returns(tick, etf_table, train_start, train_end, freq='monthly')
if returns.empty:
continue
returns[tick] = returns['adj_close']
etf_returns_by_tick += [returns[[tick]]]
etf_returns = pd.concat(etf_returns_by_tick, axis=1).T.dropna()
train_etf_returns = etf_returns.T
etf_table = 'spy'
print(stock_picks)
stock_returns_by_tick = []
for tick in stock_picks:
returns = param_estimator.get_returns(tick, etf_table, train_start, train_end, freq='monthly')
if returns.empty:
continue
returns[tick] = returns['adj_close']
stock_returns_by_tick += [returns[[tick]]]
stock_returns = pd.concat(stock_returns_by_tick, axis=1).T.dropna()
train_stock_returns = stock_returns.T
# Fama-French factors
train_factors = param_estimator.get_factors(start=int(train_start[0:4] + train_start[5:7]),
end=int(train_end[0:4] + train_end[5:7]), freq='monthly')
asset_universe = stock_picks + selected_etfs
train_returns = | pd.concat([train_stock_returns, train_etf_returns], axis=1) | pandas.concat |
import math
import os
import pathlib
from functools import reduce
import pandas as pd
import seaborn as sns
import matplotlib.pyplot as plt
import numpy as np
import scipy.stats as stats
from experiment_definitions import ExperimentDefinitions
from data_collectors import MemtierCollector, MiddlewareCollector
class PlottingFunctions:
@staticmethod
def lineplot(dataframe, experiment_title, save_as_filename,
x=None, y=None, hue=None, style=None, ci='sd', err_style='band',
xlabel=None, ylabel=None, huelabel=None, stylelabel=None,
xlim=(0, None), ylim=(0, None),
xticks=None):
# markers = hue if style is None else True
# print(markers)
sns.lineplot(x, y, data=dataframe, legend="full", hue=hue, style=style, markers=True,
ci=ci, err_style='band').set(xlabel=xlabel, ylabel=ylabel,
title=experiment_title,
xlim=xlim, ylim=ylim)
sns.scatterplot(x, y, data=dataframe, legend=False, hue=hue, style=style,
ci=None).set(xlabel=xlabel, ylabel=ylabel,
title=experiment_title,
xlim=xlim, ylim=ylim)
if isinstance(xticks, tuple):
plt.xticks(xticks[0], xticks[1], rotation=45)
else:
if xticks[0] == 6 or xticks[0] == 2:
np.insert(xticks, 0, 0)
plt.xticks(xticks, rotation=45)
if huelabel is not None or stylelabel is not None:
legend = plt.legend(bbox_to_anchor=(1, 1), loc='upper left')
for txt in legend.get_texts():
if txt.get_text() is hue and huelabel is not None:
txt.set_text(huelabel)
continue
if txt.get_text() is style and stylelabel is not None:
txt.set_text(stylelabel)
continue
if save_as_filename is None:
plt.show()
else:
ExperimentPlotter.save_figure(save_as_filename)
@staticmethod
def barplot(dataframe, experiment_title, save_as_filename,
x=None, y=None, hue=None, ci='sd',
xlabel=None, ylabel=None, huelabel=None,
xlim=(None, None), ylim=(0, None),
xticks=None):
sns.barplot(x, y, hue, data=dataframe,
ci=ci, capsize=.1, errwidth=1.5).set(xlabel=xlabel, ylabel=ylabel, title=experiment_title,
xlim=xlim, ylim=ylim)
if isinstance(xticks, tuple):
plt.xticks(xticks[0], xticks[1], rotation=45)
else:
plt.xticks(xticks, rotation=45)
if huelabel is not None:
legend = plt.legend()
for txt in legend.get_texts():
if txt.get_text() is hue and huelabel is not None:
txt.set_text(huelabel)
continue
if save_as_filename is None:
plt.show()
else:
ExperimentPlotter.save_figure(save_as_filename)
@staticmethod
def distplot(histogram, experiment_title, save_as_filename,
bins=200, kde=False,
xlabel=None, ylabel=None, xlim=(0, None), ylim=(0, None),
xticks=None):
sns.distplot(histogram, bins=bins, kde=kde, hist=True).set(xlabel=xlabel, ylabel=ylabel,
title=experiment_title,
xlim=xlim, ylim=ylim)
if xticks is not None:
plt.xticks(*xticks)
if save_as_filename is None:
plt.show()
else:
ExperimentPlotter.save_figure(save_as_filename)
@staticmethod
def resplot(dataframe, experiment_title, save_as_filename,
x=None, y=None,
xlabel=None, ylabel=None):
sns.residplot(x, y, dataframe).set(xlabel=xlabel, ylabel=ylabel, title=experiment_title)
if save_as_filename is None:
plt.show()
else:
ExperimentPlotter.save_figure(save_as_filename)
@staticmethod
def qqplot(dataframe, experiment_title, save_as_filename,
x=None, fit_line=False):
stats.probplot(dataframe[x], dist="norm", fit=fit_line, plot=plt)
plt.title(experiment_title)
if save_as_filename is None:
plt.show()
else:
ExperimentPlotter.save_figure(save_as_filename)
@staticmethod
def plot_throughput_by_type(dataframe, experiment_title, save_as_filename,
x='Num_Clients', y='Request_Throughput', hue='RequestType', style='Worker_Threads',
ci='sd',
err_style='bars',
xlabel='Memtier Client Count', ylabel='Throughput (req/s)', huelabel='Request Type',
stylelabel='Worker Threads',
xlim=(0, None), ylim=(0, None),
xticks=None):
if xticks is None:
xticks = dataframe[x].unique()
PlottingFunctions.lineplot(dataframe, experiment_title, save_as_filename, x, y, hue, style, ci, err_style,
xlabel, ylabel, huelabel, stylelabel, xlim, ylim, xticks)
@staticmethod
def plot_throughput_family(dataframe, experiment_title, save_as_filename,
x='Num_Clients', y='Request_Throughput', hue='Worker_Threads', style=None,
ci='sd', err_style='bars',
xlabel='Memtier Client Count', ylabel='Throughput (req/s)', huelabel='Worker Threads',
stylelabel=None,
xlim=(0, None), ylim=(0, None),
xticks=None):
if xticks is None:
xticks = dataframe[x].unique()
PlottingFunctions.lineplot(dataframe, experiment_title, save_as_filename, x, y, hue, style, ci, err_style,
xlabel, ylabel, huelabel, stylelabel, xlim, ylim, xticks)
@staticmethod
def plot_response_time_by_type(dataframe, experiment_title, save_as_filename,
x='Num_Clients', y='Response_Time', hue='RequestType', style='Worker_Threads',
ci='sd',
err_style='bars',
xlabel='Memtier Client Count', ylabel='Response Time (ms)', huelabel='Request Type',
stylelabel='Worker Threads',
xlim=(0, None), ylim=(0, None),
xticks=None):
if xticks is None:
xticks = dataframe[x].unique()
PlottingFunctions.lineplot(dataframe, experiment_title, save_as_filename, x, y, hue, style, ci, err_style,
xlabel, ylabel, huelabel, stylelabel, xlim, ylim, xticks)
@staticmethod
def plot_response_time_family(dataframe, experiment_title, save_as_filename,
x='Num_Clients', y='Response_Time', hue='Worker_Threads', style=None, ci='sd',
err_style='bars',
xlabel='Memtier Client Count', ylabel='Response Time (ms)', huelabel='Worker Threads',
stylelabel=None,
xlim=(0, None), ylim=(0, None),
xticks=None):
if xticks is None:
xticks = dataframe[x].unique()
PlottingFunctions.lineplot(dataframe, experiment_title, save_as_filename, x, y, hue, style, ci, err_style,
xlabel, ylabel, huelabel, stylelabel, xlim, ylim, xticks)
@staticmethod
def plot_histogram(histogram, experiment_title, save_as_filename, bins=200, kde=False,
xlabel='Buckets (ms)', ylabel='Request Count', xlim=(0, 20), ylim=(0, 35000),
xticks=None):
if xticks is None:
xticks = (np.arange(0, (bins / 10) + 0.1, step=2.5), np.linspace(0, bins / 10, 9))
PlottingFunctions.distplot(histogram, experiment_title, save_as_filename, bins, kde,
xlabel, ylabel, xlim, ylim, xticks)
class StatisticsFunctions:
@staticmethod
def get_average_and_std(dataframe, aggregate_on):
return dataframe[aggregate_on].agg(['mean', 'std']).reset_index().rename(index=str,
columns={
"mean": aggregate_on + '_Mean',
"std": aggregate_on + '_Std'})
@staticmethod
def get_sum(dataframe, aggregate_on):
return dataframe[aggregate_on].agg(['sum']).reset_index().rename(index=str, columns={"sum": aggregate_on})
@staticmethod
def get_weighted_average(dataframe, aggregate_on):
return dataframe.apply(lambda x: np.average(x[aggregate_on], weights=x['Request_Throughput'])).reset_index() \
.rename(index=str, columns={0: aggregate_on})
@staticmethod
def get_arithmetic_mean(dataframe, aggregate_on):
return dataframe[aggregate_on].agg(['mean']).reset_index().rename(index=str, columns={"mean": aggregate_on})
@staticmethod
def get_percentiles(dataframe):
return dataframe.quantile(([.01, .05, .1, .15, .2, .25, .3, .35, .4, .45, .5, .525, .55, .575, .6, .625, .65,
.675, .7, .725, .75, .775, .8, .825, .85, .875, .90, .925, .95, .975, .99, 1])).reset_index().rename(
index=str,
columns={"level_2": 'Percentile'})
@staticmethod
def get_report_percentiles(dataframe):
return dataframe.quantile(([.25, .5, .75, .90, .99])).reset_index().rename(index=str,
columns={"level_2": 'Percentile'})
@staticmethod
def mm1(summary_table, plot=False):
calculations = []
for row in summary_table.itertuples():
lamb = row[4]
muh = row[-1]
measured_response_time = row[5]
measured_queue_waiting_time = row[6]
measured_queue_size = row[8]
traffic_intensity = lamb / muh
mean_nr_jobs_in_system = traffic_intensity / (1 - traffic_intensity)
mean_nr_jobs_in_queue = traffic_intensity * mean_nr_jobs_in_system
mean_response_time = (1 / muh) / (1 - traffic_intensity)
mean_waiting_time = traffic_intensity * mean_response_time
calculations.append({'Num_Clients': row[1],
'Worker_Threads': row[2],
'Maximum_Service_Rate': muh,
'Arrival_Rate': lamb,
'Traffic_Intensity': traffic_intensity,
'Mean_Number_Jobs_System': mean_nr_jobs_in_system,
'Measured_Response_Time': measured_response_time,
'Estimated_Response_Time': mean_response_time * 1000,
'Measured_Queue_Waiting_Time': measured_queue_waiting_time,
'Estimated_Queue_Waiting_Time': mean_waiting_time * 1000,
'Measured_Queue_Size': measured_queue_size,
'Estimated_Queue_Size': mean_nr_jobs_in_queue})
mm1_analysis = pd.DataFrame(calculations)
mm1_analysis = mm1_analysis[['Num_Clients', 'Worker_Threads', 'Maximum_Service_Rate', 'Arrival_Rate',
'Traffic_Intensity', 'Mean_Number_Jobs_System', 'Measured_Response_Time',
'Estimated_Response_Time', 'Measured_Queue_Waiting_Time',
'Estimated_Queue_Waiting_Time', 'Measured_Queue_Size', 'Estimated_Queue_Size']]
return mm1_analysis
@staticmethod
def mmm(summary_table, plot=False):
calculations = []
for row in summary_table.itertuples():
lamb = row[4]
servers = row[2] * 2
muh = row[-1] / servers
measured_response_time = row[5]
measured_queue_waiting_time = row[6]
measured_queue_size = row[8]
traffic_intensity = lamb / (muh * servers)
_param1 = math.pow(servers * traffic_intensity, servers) / (
math.factorial(servers) * (1 - traffic_intensity))
probability_zero_jobs_in_system = 1 / (1 + _param1 +
sum([pow(servers * traffic_intensity, n) / math.factorial(n) for n in
range(1, servers)]))
probability_of_queueing = probability_zero_jobs_in_system * _param1
mean_number_jobs_in_queue = (traffic_intensity * probability_of_queueing) / (1 - traffic_intensity)
mean_number_jobs_in_system = servers * traffic_intensity + mean_number_jobs_in_queue
average_utilization_each_server = traffic_intensity
mean_response_time = (1 / muh) * (1 + probability_of_queueing / (servers * (1 - traffic_intensity)))
mean_waiting_time = mean_number_jobs_in_queue / lamb
calculations.append({'Num_Clients': row[1],
'Worker_Threads': row[2],
'Maximum_Service_Rate': muh,
'Arrival_Rate': lamb,
'Traffic_Intensity': traffic_intensity,
'Mean_Number_Jobs_System': mean_number_jobs_in_system,
'Measured_Response_Time': measured_response_time,
'Estimated_Response_Time': mean_response_time * 1000,
'Measured_Queue_Waiting_Time': measured_queue_waiting_time,
'Estimated_Queue_Waiting_Time': mean_waiting_time * 1000,
'Measured_Queue_Size': measured_queue_size,
'Estimated_Queue_Size': mean_number_jobs_in_queue,
'Probability_Zero_Jobs_System': probability_zero_jobs_in_system,
'Probability_Queueing': probability_of_queueing,
'Mean_Average_Utilization_Each_Server': average_utilization_each_server})
mmm_analysis = pd.DataFrame(calculations)
mmm_analysis = mmm_analysis[['Num_Clients', 'Worker_Threads', 'Maximum_Service_Rate', 'Arrival_Rate',
'Traffic_Intensity', 'Mean_Number_Jobs_System', 'Measured_Response_Time',
'Estimated_Response_Time', 'Measured_Queue_Waiting_Time',
'Estimated_Queue_Waiting_Time', 'Measured_Queue_Size', 'Estimated_Queue_Size',
'Probability_Zero_Jobs_System', 'Probability_Queueing',
'Mean_Average_Utilization_Each_Server']]
return mmm_analysis
class ExperimentPlotter:
@staticmethod
def save_figure(save_as_filename):
current_dir = pathlib.Path(__file__).parent
figure_path = current_dir.joinpath("figures")
if not os.path.exists(figure_path):
os.makedirs(figure_path)
figure_path = figure_path.joinpath(save_as_filename + ".png")
plt.savefig(figure_path, dpi=150, bbox_inches='tight')
plt.close()
@staticmethod
def memtier_experiment(experiment_definition, histogram=False):
memtier_collector = MemtierCollector(experiment_definition)
memtier_collector.generate_dataframe(histogram)
return [[memtier_collector.dataframe_set, memtier_collector.dataframe_get],
[memtier_collector.dataframe_histogram_set, memtier_collector.dataframe_histogram_get]]
@staticmethod
def middleware_experiment(experiment_definition, histogram=False):
middleware_collector = MiddlewareCollector(experiment_definition)
middleware_collector.generate_dataframe(histogram)
return [[middleware_collector.dataframe_set, middleware_collector.dataframe_get],
[middleware_collector.dataframe_histogram_set, middleware_collector.dataframe_histogram_get]]
@staticmethod
def memtier_statistics_get_set(flattened, subexperiment, plot=True, throughput_y=(0, None),
response_time_y=(0, None)):
exp_name = "Experiment {}.{} - {}".format(subexperiment['experiment_id'], subexperiment['subexperiment_id'],
'Memtier')
set_group = flattened[0].groupby(['Num_Clients', 'Repetition', 'Worker_Threads', 'Type'])
get_group = flattened[1].groupby(['Num_Clients', 'Repetition', 'Worker_Threads', 'Type'])
throughput_set = StatisticsFunctions.get_sum(set_group, 'Request_Throughput')
throughput_get = StatisticsFunctions.get_sum(get_group, 'Request_Throughput')
response_time_set = StatisticsFunctions.get_weighted_average(set_group, 'Response_Time')
response_time_get = StatisticsFunctions.get_weighted_average(get_group, 'Response_Time')
hits_get = StatisticsFunctions.get_sum(get_group, 'Hits')
misses_get = StatisticsFunctions.get_sum(get_group, 'Misses')
if plot:
concatenated_throughput = pd.concat([throughput_set.assign(RequestType='SET'),
throughput_get.assign(RequestType='GET')])
concatenated_response_time = pd.concat([response_time_set.assign(RequestType='SET'),
response_time_get.assign(RequestType='GET')])
throughput_measured = concatenated_throughput[~concatenated_throughput.Type.str.contains('Interactive')]
throughput_interactive = concatenated_throughput[
concatenated_throughput.Type.str.contains('Interactive')]
response_time_measured = concatenated_response_time[
~concatenated_response_time.Type.str.contains('Interactive')]
response_time_interactive = concatenated_response_time[
concatenated_response_time.Type.str.contains('Interactive')]
plot_base = "{}-{}_".format(subexperiment['experiment_id'], subexperiment['subexperiment_id'])
PlottingFunctions.plot_throughput_by_type(throughput_measured, exp_name, plot_base + 'mt_throughput',
ylim=throughput_y)
PlottingFunctions.plot_response_time_by_type(response_time_measured, exp_name,
plot_base + 'mt_response_time', ylim=response_time_y)
PlottingFunctions.plot_throughput_by_type(throughput_interactive, exp_name + ' Interactive Law',
plot_base + 'mt_throughput-il', ylim=throughput_y)
PlottingFunctions.plot_response_time_by_type(response_time_interactive, exp_name + ' Interactive Law',
plot_base + 'mt_response-time-il', ylim=response_time_y)
response_time_set = pd.merge(throughput_set, response_time_set)
response_time_get = pd.merge(throughput_get, response_time_get)
hits_get = pd.merge(throughput_get, hits_get)
misses_get = pd.merge(throughput_get, misses_get)
plotted_throughput_set = throughput_set.groupby(['Num_Clients', 'Worker_Threads', 'Type'])
plotted_throughput_get = throughput_get.groupby(['Num_Clients', 'Worker_Threads', 'Type'])
plotted_response_time_set = response_time_set.groupby(['Num_Clients', 'Worker_Threads', 'Type'])
plotted_response_time_get = response_time_get.groupby(['Num_Clients', 'Worker_Threads', 'Type'])
hits_get = StatisticsFunctions.get_weighted_average(hits_get.groupby(['Num_Clients', 'Worker_Threads', 'Type']),
'Hits')
misses_get = StatisticsFunctions.get_weighted_average(
misses_get.groupby(['Num_Clients', 'Worker_Threads', 'Type']),
'Misses')
throughput_set_plotted = StatisticsFunctions.get_weighted_average(plotted_throughput_set, 'Request_Throughput')
throughput_get_plotted = StatisticsFunctions.get_weighted_average(plotted_throughput_get, 'Request_Throughput')
response_time_set_plotted = StatisticsFunctions.get_weighted_average(plotted_response_time_set, 'Response_Time')
response_time_get_plotted = StatisticsFunctions.get_weighted_average(plotted_response_time_get, 'Response_Time')
set_table_list = [throughput_set_plotted, response_time_set_plotted]
get_table_list = [throughput_get_plotted, response_time_get_plotted, misses_get, hits_get]
set_summary = reduce(lambda left, right: pd.merge(left, right,
on=['Num_Clients', 'Worker_Threads', 'Type']), set_table_list)
get_summary = reduce(lambda left, right: pd.merge(left, right,
on=['Num_Clients', 'Worker_Threads', 'Type']), get_table_list)
print(exp_name + " SET:")
print(set_summary)
print("====================\n")
print(exp_name + " GET:")
print(get_summary)
print("====================\n")
return [set_summary, get_summary]
@staticmethod
def memtier_statistics_request_family(flattened, subexperiment, r_type='SET', plot=True, throughput_y=(0, None),
response_time_y=(0, None)):
exp_name = "Experiment {}.{} - {}".format(subexperiment['experiment_id'], subexperiment['subexperiment_id'],
'Memtier')
family = flattened[0].groupby(['Num_Clients', 'Repetition', 'Worker_Threads', 'Type'])
throughput_family = StatisticsFunctions.get_sum(family, 'Request_Throughput')
response_time_family = StatisticsFunctions.get_weighted_average(family, 'Response_Time')
if plot:
concatenated_throughput = pd.concat([throughput_family.assign(RequestType=r_type)])
concatenated_response_time = pd.concat([response_time_family.assign(RequestType=r_type)])
throughput_measured = concatenated_throughput[~concatenated_throughput.Type.str.contains('Interactive')]
throughput_interactive = concatenated_throughput[concatenated_throughput.Type.str.contains('Interactive')]
response_time_measured = concatenated_response_time[
~concatenated_response_time.Type.str.contains('Interactive')]
response_time_interactive = concatenated_response_time[
concatenated_response_time.Type.str.contains('Interactive')]
plot_base = "{}-{}_".format(subexperiment['experiment_id'], subexperiment['subexperiment_id'])
PlottingFunctions.plot_throughput_by_type(throughput_measured, exp_name, plot_base + 'mt_throughput',
ylim=throughput_y)
PlottingFunctions.plot_response_time_by_type(response_time_measured, exp_name,
plot_base + 'mt_response_time', ylim=response_time_y)
PlottingFunctions.plot_throughput_by_type(throughput_interactive, exp_name + ' Interactive Law',
plot_base + 'mt_throughput-il', ylim=throughput_y)
PlottingFunctions.plot_response_time_by_type(response_time_interactive, exp_name + ' Interactive Law',
plot_base + 'mt_response-time-il', ylim=response_time_y)
response_time_family = pd.merge(throughput_family, response_time_family)
plotted_throughput_family = throughput_family.groupby(['Num_Clients', 'Worker_Threads', 'Type'])
plotted_response_time_family = response_time_family.groupby(['Num_Clients', 'Worker_Threads', 'Type'])
throughput_family_plotted = StatisticsFunctions.get_weighted_average(plotted_throughput_family,
'Request_Throughput')
response_time_family_plotted = StatisticsFunctions.get_weighted_average(plotted_response_time_family,
'Response_Time')
family_table_list = [throughput_family_plotted, response_time_family_plotted]
family_summary = reduce(lambda left, right: pd.merge(left, right,
on=['Num_Clients', 'Worker_Threads', 'Type']),
family_table_list)
print(exp_name + " " + r_type + ":")
print(family_summary)
print("====================\n")
return family_summary
@staticmethod
def memtier_statistics_multiget(flattened, subexperiment, plot=True, throughput_y=(0, None),
response_time_y=(0, None)):
exp_name = "Experiment {}.{} - {}".format(subexperiment['experiment_id'], subexperiment['subexperiment_id'],
'Memtier')
if subexperiment['subexperiment_id'] == 2:
req_types = 'Non-sharded MultiGET'
type_to_number_dict = {
"MULTIGET_1": 1,
"MULTIGET_3": 3,
"MULTIGET_6": 6,
"MULTIGET_9": 9
}
else:
req_types = 'Sharded MultiGET'
type_to_number_dict = {
"SHARDED_1": 1,
"SHARDED_3": 3,
"SHARDED_6": 6,
"SHARDED_9": 9
}
get_group = flattened[1][~flattened[1].Type.str.contains('Interactive')]
get_group['Type'] = get_group['Type'].replace(type_to_number_dict, regex=True)
pd.to_numeric(get_group['Type'])
get_group = get_group.groupby(['Type', 'Repetition', 'Worker_Threads'])
summed_get_throughput = StatisticsFunctions.get_sum(get_group, 'Request_Throughput')
average_get_response_time = StatisticsFunctions.get_weighted_average(get_group, 'Response_Time')
hits_get = StatisticsFunctions.get_sum(get_group, 'Hits')
misses_get = StatisticsFunctions.get_sum(get_group, 'Misses')
concatenated_throughput = pd.concat([summed_get_throughput.assign(RequestType='GET')])
concatenated_response_time = pd.concat([average_get_response_time.assign(RequestType='GET')])
if plot:
plot_base = "{}-{}_".format(subexperiment['experiment_id'], subexperiment['subexperiment_id'])
PlottingFunctions.lineplot(concatenated_throughput, exp_name, plot_base + 'mt_throughput', x='Type',
y='Request_Throughput',
xlabel=req_types, ylabel='Throughput (req/s)',
xlim=(0, None), ylim=throughput_y, xticks=[1, 3, 6, 9])
PlottingFunctions.lineplot(concatenated_response_time, exp_name, plot_base + 'mt_response-time', x='Type',
y='Response_Time',
xlabel=req_types, ylabel='Response Time (ms)',
xlim=(0, None), ylim=response_time_y, xticks=[1, 3, 6, 9])
average_get_response_time = pd.merge(summed_get_throughput, average_get_response_time)
hits_get = pd.merge(summed_get_throughput, hits_get)
misses_get = pd.merge(summed_get_throughput, misses_get)
plotted_throughput_get = summed_get_throughput.groupby(['Type'])
plotted_response_time_get = average_get_response_time.groupby(['Type'])
hits_get = StatisticsFunctions.get_weighted_average(hits_get.groupby(['Type']), 'Hits')
misses_get = StatisticsFunctions.get_weighted_average(misses_get.groupby(['Type']), 'Misses')
throughput_get_plotted = StatisticsFunctions.get_weighted_average(plotted_throughput_get, 'Request_Throughput')
response_time_get_plotted = StatisticsFunctions.get_weighted_average(plotted_response_time_get, 'Response_Time')
get_table_list = [throughput_get_plotted, response_time_get_plotted, misses_get, hits_get]
get_summary = reduce(lambda left, right: pd.merge(left, right, on=['Type']), get_table_list)
print(exp_name + " GET:")
print(get_summary)
print("====================\n\n")
return get_summary
@staticmethod
def middleware_statistics_get_set(flattened, subexperiment, plot=True, throughput_y=(0, None),
response_time_y=(0, None), queue_waiting_y=(0, None),
memcached_handling_y=(0, None)):
exp_name = "Experiment {}.{} - {}".format(subexperiment['experiment_id'], subexperiment['subexperiment_id'],
'Middleware')
set_group = flattened[0].groupby(['Num_Clients', 'Repetition', 'Worker_Threads', 'Type'])
get_group = flattened[1].groupby(['Num_Clients', 'Repetition', 'Worker_Threads', 'Type'])
throughput_set = StatisticsFunctions.get_sum(set_group, 'Request_Throughput')
throughput_get = StatisticsFunctions.get_sum(get_group, 'Request_Throughput')
response_time_set = StatisticsFunctions.get_weighted_average(set_group, 'Response_Time')
response_time_get = StatisticsFunctions.get_weighted_average(get_group, 'Response_Time')
set_group = flattened[0][~flattened[0].Type.str.contains('Interactive')]
set_group = set_group.groupby(['Num_Clients', 'Repetition', 'Worker_Threads', 'Type'])
get_group = flattened[1][~flattened[1].Type.str.contains('Interactive')]
get_group = get_group.groupby(['Num_Clients', 'Repetition', 'Worker_Threads', 'Type'])
queue_waiting_time_set = StatisticsFunctions.get_weighted_average(set_group, 'Queue_Waiting_Time')
queue_waiting_time_get = StatisticsFunctions.get_weighted_average(get_group, 'Queue_Waiting_Time')
memcached_communication_set = StatisticsFunctions.get_weighted_average(set_group, 'Memcached_Communication')
memcached_communication_get = StatisticsFunctions.get_weighted_average(get_group, 'Memcached_Communication')
queue_size_set = StatisticsFunctions.get_weighted_average(set_group, 'Queue_Size')
queue_size_get = StatisticsFunctions.get_weighted_average(get_group, 'Queue_Size')
hits_get = StatisticsFunctions.get_sum(get_group, 'Hits')
misses_get = StatisticsFunctions.get_sum(get_group, 'Misses')
if plot:
xticks = flattened[0]['Num_Clients'].unique()
concatenated_throughput = pd.concat([throughput_set.assign(RequestType='SET'),
throughput_get.assign(RequestType='GET')])
concatenated_response_time = pd.concat([response_time_set.assign(RequestType='SET'),
response_time_get.assign(RequestType='GET')])
concatenated_queue_waiting_time = pd.concat([queue_waiting_time_set.assign(RequestType='SET'),
queue_waiting_time_get.assign(RequestType='GET')])
concatenated_memcached_communication = pd.concat([memcached_communication_set.assign(RequestType='SET'),
memcached_communication_get.assign(RequestType='GET')])
concatenated_queue_size = pd.concat([queue_size_set.assign(RequestType='SET'),
queue_size_get.assign(RequestType='GET')])
throughput_measured = concatenated_throughput[~concatenated_throughput.Type.str.contains('Interactive')]
throughput_interactive = concatenated_throughput[
concatenated_throughput.Type.str.contains('Interactive')]
response_time_measured = concatenated_response_time[
~concatenated_response_time.Type.str.contains('Interactive')]
response_time_interactive = concatenated_response_time[
concatenated_response_time.Type.str.contains('Interactive')]
plot_base = "{}-{}_".format(subexperiment['experiment_id'], subexperiment['subexperiment_id'])
PlottingFunctions.plot_throughput_by_type(throughput_measured, exp_name, plot_base + 'mw_throughput',
ylim=throughput_y)
PlottingFunctions.plot_response_time_by_type(response_time_measured, exp_name,
plot_base + 'mw_response_time', ylim=response_time_y)
PlottingFunctions.plot_throughput_by_type(throughput_interactive, exp_name + ' Interactive Law',
plot_base + 'mw_throughput-il', ylim=throughput_y)
PlottingFunctions.plot_response_time_by_type(response_time_interactive, exp_name + ' Interactive Law',
plot_base + 'mw_response-time-il', ylim=response_time_y)
PlottingFunctions.lineplot(concatenated_queue_waiting_time, exp_name, plot_base + "mw_queue-wait-time",
x='Num_Clients', y='Queue_Waiting_Time', hue='RequestType',
style='Worker_Threads', xlabel='Number Memtier Clients',
ylabel='Queue Waiting Time (ms)', huelabel='Request Type',
stylelabel='Worker Threads', xlim=(0, None), ylim=queue_waiting_y, xticks=xticks)
PlottingFunctions.lineplot(concatenated_memcached_communication, exp_name, plot_base + "mw_mc-comm-time",
x='Num_Clients', y='Memcached_Communication', hue='RequestType',
style='Worker_Threads', xlabel='Number Memtier Clients',
ylabel='Memcached Handling (ms)',
huelabel='Request Type', stylelabel='Worker Threads',
xlim=(0, None), ylim=memcached_handling_y, xticks=xticks)
PlottingFunctions.lineplot(concatenated_queue_size, exp_name, plot_base + "mw_queue-size", x='Num_Clients',
y='Queue_Size', hue='RequestType', style='Worker_Threads',
xlabel='Number Memtier Clients', ylabel='Queue Size',
huelabel='Request Type', stylelabel='Worker Threads',
xlim=(0, None), ylim=(0, None), xticks=xticks)
response_time_set = pd.merge(throughput_set, response_time_set)
response_time_get = | pd.merge(throughput_get, response_time_get) | pandas.merge |
import pandas as pd
import numpy as np
import re
import openpyxl as openpyxl
import os
from os import listdir
from pathlib import Path
import geopandas as gpd
from geopandas.tools import sjoin
import sys
import argparse
## define pathnames
dropbox_general = "/Users/euniceliu/Dropbox (Dartmouth College)/"
DROPBOX_DATA_PATH = os.path.join(dropbox_general,
"qss20_finalproj_rawdata/summerwork/")
DATA_RAW_DIR = os.path.join(DROPBOX_DATA_PATH, "raw/")
DATA_ID_DIR = os.path.join(DROPBOX_DATA_PATH, "intermediate/")
PREDICTORS_WRITEFOLDER = os.path.join(DATA_ID_DIR)
JOBS_INTERSECT_PATH = os.path.join(DATA_ID_DIR, "h2a_tract_intersections.pkl")
DF_ACS_PATH_2014 = os.path.join(DATA_ID_DIR, "acs_tract_percentage2014.pkl")
DF_ACS_PATH_2015 = os.path.join(DATA_ID_DIR, "acs_tract_percentage2015.pkl")
DF_ACS_PATH_2016 = os.path.join(DATA_ID_DIR, "acs_tract_percentage2016.pkl")
DF_ACS_PATH_2017 = os.path.join(DATA_ID_DIR, "acs_tract_percentage2017.pkl")
DF_ACS_PATH_2018 = os.path.join(DATA_ID_DIR, "acs_tract_percentage2018.pkl")
DF_ACS_PATH_2019 = os.path.join(DATA_ID_DIR, "acs_tract_percentage2019.pkl")
## read in dataset
tract_intersect_id = pd.read_pickle(JOBS_INTERSECT_PATH)
df_acs_2014 = pd.read_pickle(DF_ACS_PATH_2014)
df_acs_2015 = pd.read_pickle(DF_ACS_PATH_2015)
df_acs_2016 = pd.read_pickle(DF_ACS_PATH_2016)
df_acs_2017 = pd.read_pickle(DF_ACS_PATH_2017)
df_acs_2018 = pd.read_pickle(DF_ACS_PATH_2018)
df_acs_2019 = | pd.read_pickle(DF_ACS_PATH_2019) | pandas.read_pickle |
#!/usr/bin/env python
# -*- coding:utf-8 -*-
"""
date: 2021/9/28 16:02
desc: ไธๆน่ดขๅฏ็ฝ-ๆฐๆฎไธญๅฟ-็น่ฒๆฐๆฎ-ๆบๆ่ฐ็
http://data.eastmoney.com/jgdy/
ไธๆน่ดขๅฏ็ฝ-ๆฐๆฎไธญๅฟ-็น่ฒๆฐๆฎ-ๆบๆ่ฐ็ -ๆบๆ่ฐ็ ็ป่ฎก: http://data.eastmoney.com/jgdy/tj.html
ไธๆน่ดขๅฏ็ฝ-ๆฐๆฎไธญๅฟ-็น่ฒๆฐๆฎ-ๆบๆ่ฐ็ -ๆบๆ่ฐ็ ่ฏฆ็ป: http://data.eastmoney.com/jgdy/xx.html
"""
import pandas as pd
import requests
from tqdm import tqdm
def stock_em_jgdy_tj(start_date: str = "20180928") -> pd.DataFrame:
"""
ไธๆน่ดขๅฏ็ฝ-ๆฐๆฎไธญๅฟ-็น่ฒๆฐๆฎ-ๆบๆ่ฐ็ -ๆบๆ่ฐ็ ็ป่ฎก
http://data.eastmoney.com/jgdy/tj.html
:param start_date: ๅผๅงๆถ้ด
:type start_date: str
:return: ๆบๆ่ฐ็ ็ป่ฎก
:rtype: pandas.DataFrame
"""
url = "http://datacenter-web.eastmoney.com/api/data/v1/get"
params = {
'sortColumns': 'NOTICE_DATE,SUM,RECEIVE_START_DATE,SECURITY_CODE',
'sortTypes': '-1,-1,-1,1',
'pageSize': '500',
'pageNumber': '1',
'reportName': 'RPT_ORG_SURVEYNEW',
'columns': 'ALL',
'quoteColumns': 'f2~01~SECURITY_CODE~CLOSE_PRICE,f3~01~SECURITY_CODE~CHANGE_RATE',
'source': 'WEB',
'client': 'WEB',
'filter': f"""(NUMBERNEW="1")(IS_SOURCE="1")(RECEIVE_START_DATE>'{'-'.join([start_date[:4], start_date[4:6], start_date[6:]])}')"""
}
r = requests.get(url, params=params)
data_json = r.json()
total_page = data_json['result']['pages']
big_df = pd.DataFrame()
for page in tqdm(range(1, total_page+1), leave=False):
params.update({"pageNumber": page})
r = requests.get(url, params=params)
data_json = r.json()
temp_df = pd.DataFrame(data_json['result']['data'])
big_df = big_df.append(temp_df)
big_df.reset_index(inplace=True)
big_df["index"] = list(range(1, len(big_df) + 1))
big_df.columns = [
"ๅบๅท",
"_",
"ไปฃ็ ",
"ๅ็งฐ",
"_",
"ๅ
ฌๅๆฅๆ",
"ๆฅๅพ
ๆฅๆ",
"_",
"_",
"_",
"_",
"_",
"_",
"_",
"ๆฅๅพ
ๅฐ็น",
"_",
"ๆฅๅพ
ๆนๅผ",
"_",
"ๆฅๅพ
ไบบๅ",
"_",
"_",
"_",
"_",
"_",
"ๆฅๅพ
ๆบๆๆฐ้",
"_",
"_",
"_",
"_",
"_",
"_",
"ๆถจ่ทๅน
",
"ๆๆฐไปท",
]
big_df = big_df[
[
"ๅบๅท",
"ไปฃ็ ",
"ๅ็งฐ",
"ๆๆฐไปท",
"ๆถจ่ทๅน
",
"ๆฅๅพ
ๆบๆๆฐ้",
"ๆฅๅพ
ๆนๅผ",
"ๆฅๅพ
ไบบๅ",
"ๆฅๅพ
ๅฐ็น",
"ๆฅๅพ
ๆฅๆ",
"ๅ
ฌๅๆฅๆ",
]
]
big_df['ๆๆฐไปท'] = pd.to_numeric(big_df['ๆๆฐไปท'], errors="coerce")
big_df['ๆถจ่ทๅน
'] = pd.to_numeric(big_df['ๆถจ่ทๅน
'], errors="coerce")
big_df['ๆฅๅพ
ๆบๆๆฐ้'] = pd.to_numeric(big_df['ๆฅๅพ
ๆบๆๆฐ้'], errors="coerce")
big_df['ๆฅๅพ
ๆฅๆ'] = pd.to_datetime(big_df['ๆฅๅพ
ๆฅๆ']).dt.date
big_df['ๅ
ฌๅๆฅๆ'] = pd.to_datetime(big_df['ๅ
ฌๅๆฅๆ']).dt.date
return big_df
def stock_em_jgdy_detail(start_date: str = "20180928") -> pd.DataFrame:
"""
ไธๆน่ดขๅฏ็ฝ-ๆฐๆฎไธญๅฟ-็น่ฒๆฐๆฎ-ๆบๆ่ฐ็ -ๆบๆ่ฐ็ ่ฏฆ็ป
http://data.eastmoney.com/jgdy/xx.html
:param start_date: ๅผๅงๆถ้ด
:type start_date: str
:return: ๆบๆ่ฐ็ ่ฏฆ็ป
:rtype: pandas.DataFrame
"""
url = "http://datacenter-web.eastmoney.com/api/data/v1/get"
params = {
'sortColumns': 'NOTICE_DATE,RECEIVE_START_DATE,SECURITY_CODE,NUMBERNEW',
'sortTypes': '-1,-1,1,-1',
'pageSize': '50000',
'pageNumber': '1',
'reportName': 'RPT_ORG_SURVEY',
'columns': 'ALL',
'quoteColumns': 'f2~01~SECURITY_CODE~CLOSE_PRICE,f3~01~SECURITY_CODE~CHANGE_RATE',
'source': 'WEB',
'client': 'WEB',
'filter': f"""(IS_SOURCE="1")(RECEIVE_START_DATE>'{'-'.join([start_date[:4], start_date[4:6], start_date[6:]])}')"""
}
r = requests.get(url, params=params)
data_json = r.json()
total_page = data_json['result']['pages']
big_df = pd.DataFrame()
for page in tqdm(range(1, total_page+1), leave=False):
params.update({"pageNumber": page})
r = requests.get(url, params=params)
data_json = r.json()
temp_df = pd.DataFrame(data_json['result']['data'])
big_df = big_df.append(temp_df)
big_df.reset_index(inplace=True)
big_df["index"] = list(range(1, len(big_df) + 1))
big_df.columns = [
"ๅบๅท",
"_",
"ไปฃ็ ",
"ๅ็งฐ",
"_",
"ๅ
ฌๅๆฅๆ",
"่ฐ็ ๆฅๆ",
"_",
"_",
"_",
"่ฐ็ ๆบๆ",
"_",
"_",
"_",
"ๆฅๅพ
ๅฐ็น",
"_",
"ๆฅๅพ
ๆนๅผ",
"่ฐ็ ไบบๅ",
"ๆฅๅพ
ไบบๅ",
"_",
"_",
"_",
"_",
"_",
"_",
"_",
"ๆบๆ็ฑปๅ",
"_",
"_",
"_",
"_",
"_",
"ๆๆฐไปท",
"ๆถจ่ทๅน
",
]
big_df = big_df[
[
"ๅบๅท",
"ไปฃ็ ",
"ๅ็งฐ",
"ๆๆฐไปท",
"ๆถจ่ทๅน
",
"่ฐ็ ๆบๆ",
"ๆบๆ็ฑปๅ",
"่ฐ็ ไบบๅ",
"ๆฅๅพ
ๆนๅผ",
"ๆฅๅพ
ไบบๅ",
"ๆฅๅพ
ๅฐ็น",
"่ฐ็ ๆฅๆ",
"ๅ
ฌๅๆฅๆ",
]
]
big_df['ๆๆฐไปท'] = pd.to_ | numeric(big_df['ๆๆฐไปท'], errors="coerce") | pandas.to_numeric |
import dash
import dash_core_components as dcc
import dash_html_components as html
from dash.dependencies import Output, Input
import plotly.express as px
import pandas as pd
import geopandas as gpd
import numpy as np
import folium
from folium.plugins import FastMarkerCluster
from datetime import date
app = dash.Dash(__name__)
def get_data():
# read the data required for this app
d = | pd.read_csv('Police_Department_Incident_Reports__2018_to_Present.csv') | pandas.read_csv |
from django.db.models.fields import Field
from django.http import HttpResponse
from django.utils.translation import gettext_lazy as _
from django.template.response import TemplateResponse
from django.core.exceptions import PermissionDenied
from django.urls import reverse_lazy
import csv
import urllib.parse
from .forms import ImportForm
from pandas import pandas as pd
from django.shortcuts import redirect
from django.forms import modelform_factory
from django.core.files.storage import FileSystemStorage
from os import path
import numpy as np
from deepdiff import DeepDiff
from functools import reduce, cache
from itertools import chain, filterfalse
from django.db import transaction
import logging
logging.getLogger(__name__)
class CsvExportModelMixin():
"""
This is intended to be mixed with django.contrib.admin.ModelAdmin
https://docs.djangoproject.com/en/3.1/ref/contrib/admin/#modeladmin-objects
To Export a model's data, mainly supposed to use at django admin site.
Available setting:
- file_name: name of the CSV file to Export, if not specified, the model's plural verbose name is used.
- encoding: Shift-JIS, UTF-8,...etc.
- dialect: csv.excel or csv.excel_tab. We can customize and register our own dialect.
- csv_export_fields: using to choose export fields.
- is_export_verbose_names: whether or not export verbose field names at the first row
- is_export_field_names: whether or not export field names
- fmt_params:
"""
class csv_quote_all(csv.excel):
quoting = csv.QUOTE_ALL
EXPORT_PERMISSION_CODE = 'export'
file_name = None
# encoding = 'Shift-JIS'
encoding = 'UTF-8'
# dialect = csv.excel
dialect = csv_quote_all
csv_export_fields = []
exclude_csv_export_fields = []
is_export_verbose_names = False
is_export_field_names = True
fmt_params = {}
actions = ['csv_export']
logger = logging.getLogger(__name__)
def csv_export(self, request, queryset):
opts = self.model._meta
def get_csv_export_fields():
def is_exportable(field):
return (field.concrete
and not getattr(field, 'many_to_many')
and (not self.csv_export_fields or field.name in self.csv_export_fields)
and (not self.exclude_csv_export_fields or field.name not in self.exclude_csv_export_fields)
)
return [f for f in opts.get_fields() if is_exportable(f)]
filename = self.file_name if self.file_name else urllib.parse.quote(opts.verbose_name_plural + ".csv")
logging.info(f'Exporting {opts.model_name}.')
response = HttpResponse(content_type='text/csv; encoding=%s' %(self.encoding) )
response['Content-Disposition'] = 'attachment; filename=%s' % (filename)
writer = csv.writer(response, self.dialect)
csv_field_names = [f.name for f in get_csv_export_fields()]
if self.is_export_verbose_names:
writer.writerow([opts.get_field(f).verbose_name.title() for f in csv_field_names ])
if self.is_export_field_names:
writer.writerow(csv_field_names)
for row in queryset.values_list(*csv_field_names):
writer.writerow(row)
return response
csv_export.short_description = _('CSV Export')
class CsvImportModelMixin():
"""
This is intended to be mixed with django.contrib.admin.ModelAdmin
https://docs.djangoproject.com/en/3.1/ref/contrib/admin/#modeladmin-objects
CSV import class to import a model's data, mainly supposed to use at django admin site.
Available setting:
- csv_import_fields: Field names to import, if not specified, header line is used.
- csv_excluded_fields: Field names to exclude from import.
- import_encoding: shift_jis, utf8,...etc.
- chunk_size: number of rows to be read into a dataframe at a time
- max_error_rows: maximum number of violation error rows
"""
csv_import_fields = []
csv_excluded_fields = []
unique_check_fields = ()
import_encoding = 'utf-8'
# import_encoding = 'shift_jis'
chunk_size = 10000
max_error_rows = 1000
is_skip_existing = False # True: skip imported row, False: update database with imported row
is_first_comer_priority = True # True: Inside a same chunk, first comer is saved to database. False: last was saved
change_list_template = 'admin/change_list_with_import.html'
import_template = 'admin/import.html'
IMPORT_PERMISSION_CODE = 'import'
def has_import_permission(self, request) -> bool:
"""
Returns whether a request has import permission.
"""
import_codename = self.IMPORT_PERMISSION_CODE
opts = self.model._meta
if opts.model_name.lower() == 'user' or opts.model_name.lower() == 'group':
return request.user.has_perm("%s.%s_%s" % (opts.app_label, 'add', opts.model_name))
return request.user.has_perm("%s.%s_%s" % (opts.app_label, import_codename, opts.model_name))
def changelist_view(self, request, extra_context=None):
"""
override of the ModelAdmin
"""
extra_context = extra_context or {}
extra_context['has_import_permission'] = self.has_import_permission(request)
return super(CsvImportModelMixin, self).changelist_view(request, extra_context=extra_context)
def get_urls(self):
"""
override of the ModelAdmin
"""
from django.urls import path
opts = self.model._meta
import_url = [
path('import/', self.admin_site.admin_view(self.import_action), name='%s_%s_import' % (opts.app_label, opts.model_name)),
]
return import_url + super(CsvImportModelMixin, self).get_urls()
@cache
def get_csv_import_fields(self) -> list[Field]:
def is_importable_fields(field):
return (field.concrete
and not getattr(field, 'many_to_many')
and (not self.csv_import_fields or field.name in self.csv_import_fields)
and (not self.get_csv_excluded_fields() or field.name not in self.get_csv_excluded_fields())
)
return [f for f in self.model._meta.get_fields() if is_importable_fields(f)]
@cache
def get_unique_check_fields(self) -> tuple[str]:
if self.unique_check_fields:
return self.unique_check_fields
opts = self.model._meta
if opts.unique_together:
return opts.unique_together
if opts.total_unique_constraints:
default_unique_contraint_name = '%s_unique' % opts.model_name
unique_constraints = [c for c in opts.total_unique_constraints if c.name == default_unique_contraint_name]
if unique_constraints:
return unique_constraints[0].fields
else:
return opts.total_unique_constraints[0].fields
return ()
def get_csv_excluded_fields(self) -> list[str]:
"""
Hook for excluding fields to import from csv
"""
return self.csv_excluded_fields
def get_csv_excluded_fields_init_values(self, request) -> dict:
"""
Hook for initializing excluded fields, if necessary, such as 'creator', 'created_at', 'updater'...
"""
return {}
def update_csv_excluded_fields(self, request, row) -> None:
"""
Hook for updating excluded fields, if necessary, such as 'updater', 'updated_at', and 'version'
"""
pass
def get_update_fields(self) -> list[str]:
"""
When a database rocord is duplicated with a imported row, tell which fields should be updated using the csv data.
"""
return [f.name for f in self.get_csv_import_fields() if not f.primary_key]
@transaction.non_atomic_requests
def import_action(self, request, *args, **kwargs):
"""
"""
def has_nonunique_violation(modelform):
"""
Collect all error codes except the unique contraint violation.
"""
return list(filterfalse(lambda e: e.code in ['unique', 'unique_together'],
[e for e in chain.from_iterable([errorlist.as_data() for errorlist in modelform.errors.values()])]
))
def get_unique_constraint_violation_fields(modelform) -> tuple[str]:
error = next(filter(lambda e: e.code in ['unique', 'unique_together'],
[e for e in chain.from_iterable([errorlist.as_data() for errorlist in modelform.errors.values()])]
))
return error.params['unique_check']
def exclude_duplication(rows, imported, modelform):
unique_fields = self.get_unique_check_fields()
if not unique_fields:
return imported
return reduce(lambda p, n: p if self.is_first_comer_priority else n, [
row for row in rows if not any(
[DeepDiff(getattr(row, f), getattr(imported, f), ignore_order=True) for f in unique_fields]
)
], imported
)
def read_record(request, new_rows: list, update_rows: list, errors: list, record: dict):
def add_error(errors: list, modelform):
if len(errors) < self.max_error_rows:
errors.append(modelform)
# Create an instance of the ModelForm class using one record of the csv data
modelform = modelform_class(self.get_csv_excluded_fields_init_values(request) | record)
if modelform.is_valid():
# newly imported data
row = self.model(**(self.get_csv_excluded_fields_init_values(request) | modelform.cleaned_data))
new_rows.append(exclude_duplication(new_rows, row, modelform))
else:
if has_nonunique_violation(modelform):
add_error(errors, modelform)
else:
if not self.is_skip_existing:
row = self.model.objects.get(**{k : record[k] for k in get_unique_constraint_violation_fields(modelform)})
# If we had same record in database, would update it by imported data
for k, v in modelform.cleaned_data.items():
setattr(row, k, v)
self.update_csv_excluded_fields(request, row)
update_rows.append(exclude_duplication(update_rows, row, modelform))
def disable_formfield(db_field, **kwargs):
form_field = db_field.formfield(**kwargs)
if form_field:
form_field.widget.attrs['disabled'] = 'true'
return form_field
if not self.has_import_permission(request):
raise PermissionDenied
opts = self.model._meta
title = _('Import %(name)s') % {'name': opts.verbose_name}
context = {
**self.admin_site.each_context(request),
'title': title,
'app_list': self.admin_site.get_app_list(request),
'opts': opts,
'has_view_permission': self.has_view_permission(request),
}
if request.method == "GET":
form = ImportForm()
if request.method == "POST":
form = ImportForm(request.POST, request.FILES)
if form.is_valid():
import_file = form.cleaned_data['import_file']
# I have to save the file to file system before read into pandas, otherwise the encoding is ignored by pandas.
fs = FileSystemStorage('temp')
file_name = fs.save(import_file.name, import_file)
file_path = path.join(fs.location, file_name)
logging.info(f'Importing {opts.model_name} from {file_path}.')
model_field_names = [f.name for f in self.get_csv_import_fields()]
# Dynamically generate ModelForm class
# modelform_class = globals()[opts.object_name + 'Form']
modelform_class = modelform_factory(self.model, fields = model_field_names, formfield_callback = disable_formfield)
read_csv_params = {'encoding' : self.import_encoding,
'chunksize' : self.chunk_size,
'na_filter' : False,
'dtype' : 'str'
}
errors = []
try:
for chunk in | pd.read_csv(file_path, **read_csv_params) | pandas.pandas.read_csv |
# coding=utf-8
# Copyright 2016-2018 <NAME>
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
__author__ = "<NAME>"
__copyright__ = "Copyright 2018, <NAME>"
__maintainer__ = "<NAME>"
__email__ = "<EMAIL>"
__date__ = "23/09/18"
import logging
import os
import json
import sys
import pandas as pd
import numpy as np
import random
import math
import itertools
import scipy.stats
from sklearn import linear_model
from math import exp, sqrt
import ai4materials.utils.unit_conversion as uc
logger = logging.getLogger('ai4materials')
def choose_atomic_features(selected_feature_list=None,
atomic_data_file=None, binary_data_file=None):
"""Choose primary features for the extended lasso procedure."""
df1 = pd.read_csv(atomic_data_file, index_col=False)
df2 = pd.read_csv(binary_data_file, index_col=False)
# merge two dataframes on Material
df = pd.merge(df1, df2, on='Mat')
# calculate r_sigma and r_pi [Phys. Rev. Lett. 33, 1095(1974)]
radii_s_p = ['rp(A)', 'rs(A)', 'rp(B)', 'rs(B)']
df['r_sigma'] = df[radii_s_p].apply(r_sigma, axis=1)
df['r_pi'] = df[radii_s_p].apply(r_pi, axis=1)
# calculate Es/sqrt(Zval) and Ep/sqrt(Zval)
e_val_z = ['Es(A)', 'val(A)']
df['Es(A)/sqrt(Zval(A))'] = df[e_val_z].apply(e_sqrt_z, axis=1)
e_val_z = ['Es(B)', 'val(B)']
df['Es(B)/sqrt(Zval(B))'] = df[e_val_z].apply(e_sqrt_z, axis=1)
e_val_z = ['Ep(A)', 'val(A)']
df['Ep(A)/sqrt(Zval(A))'] = df[e_val_z].apply(e_sqrt_z, axis=1)
e_val_z = ['Ep(B)', 'val(B)']
df['Ep(B)/sqrt(Zval(B))'] = df[e_val_z].apply(e_sqrt_z, axis=1)
column_list = df.columns.tolist()
feature_list = column_list
if 'Mat' in feature_list:
feature_list.remove('Mat')
if 'Edim' in feature_list:
feature_list.remove('Edim')
logger.debug("Available features: \n {}".format(feature_list))
df_selected = df[selected_feature_list]
df_selected.insert(0, 'Mat', df['Mat'])
if selected_feature_list:
logger.info("Primary features selected: \n {}".format(selected_feature_list))
else:
logger.error("No selected features.")
sys.exit(1)
return df_selected
def classify_rs_zb(structure):
"""Classify if a structure is rocksalt of zincblend from a list of NoMaD structure.
(one json file). Supports multiple frames (TO DO: check that). Hard-coded.
rocksalt:
atom_frac1 0.0 0.0 0.0
atom_frac2 0.5 0.5 0.5
zincblende:
atom_frac1 0.0 0.0 0.0
atom_frac2 0.25 0.25 0.25
zincblende --> label=0
rocksalt --> label=1
"""
energy = {}
chemical_formula = {}
label = {}
# gIndexRun=0
# gIndexDesc=1
for (gIndexRun, gIndexDesc), atoms in structure.atoms.iteritems():
if atoms is not None:
energy[gIndexRun, gIndexDesc] = structure.energy_eV[(gIndexRun, gIndexDesc)]
# energy=1.0
chemical_formula[gIndexRun, gIndexDesc] = structure.chemical_formula[(gIndexRun, gIndexDesc)]
# get labels, works only for RS/ZB dataset
pos_atom_2 = np.asarray(list(structure.scaled_positions.values())).reshape(2, 3)[1, :]
if all(i < 0.375 for i in pos_atom_2):
# label='zincblend'
label[gIndexRun, gIndexDesc] = 0
else:
# label='rocksalt'
label[gIndexRun, gIndexDesc] = 1
break
return chemical_formula, energy, label
def get_energy_diff(chemical_formula_list, energy_list, label_list):
""" Obtain difference in energy (eV) between rocksalt and zincblend structures of a given binary.
From a list of chemical formulas, energies and labels returns a dictionary
with {`material`: `delta_e`} where `delta_e` is the difference between the energy
with label 1 and energy with label 0, grouped by material.
Each element of such list corresponds to a json file.
The `delta_e` is exactly what reported in the PRL 114, 105503(2015).
.. todo:: Check if it works for multiple frames.
"""
energy_ = []
chemical_formula_ = []
label_ = []
# energy and chemical formula are lists even if only one frame is present
for i, energy_i in enumerate(energy_list):
energy_.append(energy_i.values())
for i, chemical_formula_i in enumerate(chemical_formula_list):
chemical_formula_.append(chemical_formula_i.values())
for i, label_i in enumerate(label_list):
label_.append(label_i.values())
# flatten the lists
energy = list(itertools.chain(*energy_))
chemical_formula = list(itertools.chain(*chemical_formula_))
label = list(itertools.chain(*label_))
df = pd.DataFrame()
df['Mat'] = chemical_formula
df['Energy'] = energy
df['Label'] = label
# generate summary dataframe with lowest zincblend and rocksalt energy
# zincblend --> label=0
# rocksalt --> label=1
df_summary = df.sort_values(by='Energy').groupby(['Mat', 'Label'], as_index=False).first()
groupby_mat = df_summary.groupby('Mat')
dict_delta_e = {}
for mat, df in groupby_mat:
# calculate the delta_e (E_RS - E_ZB)
energy_label_1 = df.loc[df['Label'] == 1].Energy.values
energy_label_0 = df.loc[df['Label'] == 0].Energy.values
# if energy_diff>0 --> rs
# if energy_diff<0 --> zb
if (energy_label_0 and energy_label_1):
# single element numpy array --> convert to scalar
energy_diff = (energy_label_1 - energy_label_0).item(0)
# divide by 2 because it is the energy_diff for each atom
energy_diff = energy_diff / 2.0
else:
logger.error(
"Could not find all the energies needed to calculate required property for material '{0}'".format(mat))
sys.exit(1)
dict_delta_e.update({mat: (energy_diff, energy_label_0, energy_label_1)})
return dict_delta_e
def get_lowest_energy_structures(structure, dict_delta_e):
"""Get lowest energy structure for each material and label type.
Works only with two possible labels for a given material.
.. todo:: Check if it works for multiple frames.
"""
energy = {}
chemical_formula = {}
is_lowest_energy = {}
for (gIndexRun, gIndexDesc), atoms in structure.atoms.items():
if atoms is not None:
energy[gIndexRun, gIndexDesc] = structure.energy_eV[gIndexRun, gIndexDesc]
chemical_formula[gIndexRun, gIndexDesc] = structure.chemical_formula[gIndexRun, gIndexDesc]
lowest_energy_label_0 = dict_delta_e.get(chemical_formula[gIndexRun, gIndexDesc])[1]
lowest_energy_label_1 = dict_delta_e.get(chemical_formula[gIndexRun, gIndexDesc])[2]
if lowest_energy_label_0 > lowest_energy_label_1:
lowest_energy_label_01 = lowest_energy_label_1
else:
lowest_energy_label_01 = lowest_energy_label_0
if energy[gIndexRun, gIndexDesc] == lowest_energy_label_01:
is_lowest_energy[gIndexRun, gIndexDesc] = True
else:
is_lowest_energy[gIndexRun, gIndexDesc] = False
return is_lowest_energy
def write_atomic_features(structure, selected_feature_list, df, dict_delta_e=None,
path=None, filename_suffix='.json', json_file=None):
"""Given the chemical composition, build the descriptor made of atomic features only.
Includes all the frames in the same json file.
.. todo:: Check if it works for multiple frames.
"""
# make dictionary {primary_feature: value} for each structure
# dictionary of a dictionary, key: Mat, value: atomic_features
dict_features = df.set_index('chemical_formula').T.to_dict()
# label=0: rocksalt, label=1: zincblend
#chemical_formula_, energy_, label_ = classify_rs_zb(structure)
#is_lowest_energy_ = get_lowest_energy_structures(structure, dict_delta_e)
if structure.isPeriodic == True:
for (gIndexRun, gIndexDesc), atoms in structure.atoms.items():
if atoms is not None:
# filename is the normalized absolute path
filename = os.path.abspath(os.path.normpath(os.path.join(path,
'{0}{1}'.format(structure.name, filename_suffix))))
outF = file(filename, 'w')
outF.write("""
{
"data":[""")
cell = structure.atoms[gIndexRun, gIndexDesc].get_cell()
cell = np.transpose(cell)
atoms = structure.atoms[gIndexRun, gIndexDesc]
chemical_formula = structure.chemical_formula_[gIndexRun, gIndexDesc]
energy = structure.energy_eV[gIndexRun, gIndexDesc]
label = label_[gIndexRun, gIndexDesc]
#target = dict_delta_e.get(chemical_formula_[gIndexRun, gIndexDesc])[0]
target = dict_delta_e.get(chemical_formula)
atomic_features = dict_features[structure.chemical_formula[gIndexRun, gIndexDesc]]
#is_lowest_energy = is_lowest_energy_[gIndexRun,gIndexDesc]
res = {
"checksum": structure.name,
"label": label,
"energy": energy,
#"is_lowest_energy": is_lowest_energy,
"delta_e_rs_zb": target,
"chemical_formula": chemical_formula,
"gIndexRun": gIndexRun,
"gIndexDesc": gIndexDesc,
"cell": cell.tolist(),
"particle_atom_number": map(lambda x: x.number, atoms),
"particle_position": map(lambda x: [x.x, x.y, x.z], atoms),
"atomic_features": atomic_features,
"main_json_file_name": json_file,
}
json.dump(res, outF, indent=2)
outF.write("""
] }""")
outF.flush()
return filename
def r_sigma(row):
"""Calculates r_sigma.
John-Bloch's indicator1: |rp(A) + rs(A) - rp(B) -rs(B)| from Phys. Rev. Lett. 33, 1095 (1974).
Input rp(A), rs(A), rp(B), rs(B)
They need to be given in this order.
"""
return abs(row[0] + row[1] - row[2] + row[3])
def r_pi(row):
"""Calculates r_pi.
John-Bloch's indicator2: |rp(A) - rs(A)| +| rp(B) -rs(B)| from Phys. Rev. Lett. 33, 1095 (1974).
Input rp(A), rs(A), rp(B), rs(B)
They need to be given in this order.
combine_features
"""
return abs(row[0] - row[1]) + abs(row[2] - row[3])
def e_sqrt_z(row):
"""Calculates e/sqrt(val_Z).
Es/sqrt(Zval) and Ep/sqrt(Zval) from Phys. Rev. B 85, 104104 (2012).
Input Es(A) or Ep(A), val(A) (A-->B)
They need to be given in this order.
"""
return row[0] / math.sqrt(row[1])
def _get_scaling_factors(columns, metadata_info, energy_unit, length_unit):
"""Calculates characteristic energy and length, given an atomic metadata"""
scaling_factor = []
if columns is not None:
for col in columns:
try:
col_unit = metadata_info[col.split('(', 1)[0]]['units']
# check allowed values, to avoid problem with substance - NOT IDEAD
if col_unit == 'J':
scaling_factor.append(uc.convert_unit(1, energy_unit, target_unit='eV'))
# divide all column by e_0
#df.loc[:, col] *= e_0
elif col_unit == 'm':
scaling_factor.append(uc.convert_unit(1, length_unit, target_unit='angstrom'))
# divide all column by e_0
#df.loc[:, col] *= d_0
else:
scaling_factor.append(1.0)
logger.debug("Feature units are not energy nor lengths. "
"No scale to characteristic length.")
except BaseException:
scaling_factor.append(1.0)
logger.debug("Feature units not included in metadata")
return scaling_factor
def _my_power_2(row):
return pow(row[0], 2)
def _my_power_3(row):
return pow(row[0], 3)
def _my_power_m1(row):
return pow(row[0], -1)
def _my_power_m2(row):
return pow(row[0], -2)
def _my_power_m3(row):
return pow(row[0], -3)
def _my_abs_sqrt(row):
return math.sqrtabs(abs(row[0]))
def _my_exp(row):
return exp(row[0])
def _my_exp_power_2(row):
return exp(pow(row[0], 2))
def _my_exp_power_3(row):
return exp(pow(row[0], 3))
def _my_sum(row):
return row[0] + row[1]
def _my_abs_sum(row):
return abs(row[0] + row[1])
def _my_abs_diff(row):
return abs(row[0] - row[1])
def _my_diff(row):
return row[0] - row[1]
def _my_div(row):
return row[0] / row[1]
def _my_sum_power_2(row):
return pow((row[0] + row[1]), 2)
def _my_sum_power_3(row):
return pow((row[0] + row[1]), 3)
def _my_sum_exp(row):
return exp(row[0] + row[1])
def _my_sum_exp_power_2(row):
return exp(pow(row[0] + row[1], 2))
def _my_sum_exp_power_3(row):
return exp(pow(row[0] + row[1], 3))
def combine_features(df=None, energy_unit=None, length_unit=None,
metadata_info=None, allowed_operations=None, derived_features=None):
"""Generate combination of features given a dataframe and a list of allowed operations.
For the exponentials, we introduce a characteristic energy/length
converting the
..todo:: Fix under/overflow errors, and introduce handling of exceptions.
"""
if allowed_operations:
logger.info('Selected operations:\n {0}'.format(allowed_operations))
else:
logger.warning('No allowed operations selected.')
# make derived features
if derived_features is not None:
if 'r_sigma' in derived_features:
# calculate r_sigma and r_pi [Phys. Rev. Lett. 33, 1095(1974)]
logger.info('Including rs and rp to allow r_sigma calculation')
radii_s_p = ['atomic_rp_max(A)', 'atomic_rs_max(A)', 'atomic_rp_max(B)', 'atomic_rs_max(B)']
df['r_sigma'] = df[radii_s_p].apply(r_sigma, axis=1)
if 'r_pi' in derived_features:
logger.info('Including rs and rp to allow r_pi calculation')
radii_s_p = ['atomic_rp_max(A)', 'atomic_rs_max(A)', 'atomic_rp_max(B)', 'atomic_rs_max(B)']
df['r_pi'] = df[radii_s_p].apply(r_pi, axis=1)
# calculate Es/sqrt(Zval) and Ep/sqrt(Zval)
# e_val_z = ['Es(A)', 'val(A)']
# df['Es(A)/sqrt(Zval(A))'] = df[e_val_z].apply(e_sqrt_z, axis=1)
# e_val_z = ['Es(B)', 'val(B)']
# df['Es(B)/sqrt(Zval(B))'] = df[e_val_z].apply(e_sqrt_z, axis=1)
#
# e_val_z = ['Ep(A)', 'val(A)']
# df['Ep(A)/sqrt(Zval(A))'] = df[e_val_z].apply(e_sqrt_z, axis=1)
# e_val_z = ['Ep(B)', 'val(B)']
# df['Ep(B)/sqrt(Zval(B))'] = df[e_val_z].apply(e_sqrt_z, axis=1)
columns_ = df.columns.tolist()
# define subclasses of features (see Phys. Rev. Lett. 114, 105503(2015) Supp. info. pag.1)
# make a dictionary {feature: subgroup}
# features belonging to a0 will not be combined, just added at the end
# dict_features = {
# u'val(B)': 'a0', u'val(A)': 'a0',
#
# u'period__el0':'a0',
# u'period__el1':'a0',
# u'atomic_number__el0': 'a0',
# u'atomic_number__el1': 'a0',
# u'group__el0': 'a0',
# u'group__el1': 'a0',
#
# u'atomic_ionization_potential__el0': 'a1',
# u'atomic_ionization_potential__el1': 'a1',
# u'atomic_electron_affinity__el0': 'a1',
# u'atomic_electron_affinity__el1': 'a1',
# u'atomic_homo_lumo_diff__el0': 'a1',
# u'atomic_homo_lumo_diff__el1': 'a1',
# u'atomic_electronic_binding_energy_el0': 'a1',
# u'atomic_electronic_binding_energy_el1': 'a1',
#
#
# u'HOMO(A)': 'a2', u'LUMO(A)': 'a2', u'HOMO(B)': 'a2', u'LUMO(B)': 'a2',
# u'HL_gap_AB': 'a2',
# u'Ebinding_AB': 'a2',
#
# u'atomic_rs_max__el0': 'a3',
# u'atomic_rs_max__el1': 'a3',
# u'atomic_rp_max__el0': 'a3',
# u'atomic_rp_max__el1': 'a3',
# u'atomic_rd_max__el0': 'a3',
# u'atomic_rd_max__el1': 'a3',
# u'atomic_r_by_2_dimer__el0': 'a3',
# u'atomic_r_by_2_dimer__el1': 'a3',
#
# u'd_AB': 'a3',
# u'r_sigma': 'a3', u'r_pi': 'a3',
#
# u'Eh': 'a4', u'C': 'a4'
# }
dict_features = {
u'period': 'a0',
u'atomic_number': 'a0',
u'group': 'a0',
u'atomic_ionization_potential': 'a1',
u'atomic_electron_affinity': 'a1',
u'atomic_homo_lumo_diff': 'a1',
u'atomic_electronic_binding_energy': 'a1',
u'atomic_homo': 'a2', u'atomic_lumo': 'a2',
u'atomic_rs_max': 'a3',
u'atomic_rp_max': 'a3',
u'atomic_rd_max': 'a3',
u'atomic_r_by_2_dimer': 'a3',
u'r_sigma': 'a3', u'r_pi': 'a3'
}
# standardize the data -
# we cannot reproduce the PRL if we standardize the data
#df_a0 = (df_a0 - df_a0.mean()) / (df_a0.max() - df_a0.min())
#df_a1 = (df_a1 - df_a1.mean()) / (df_a1.max() - df_a1.min())
#df_a2 = (df_a2 - df_a2.mean()) / (df_a2.max() - df_a2.min())
#df_a3 = (df_a3 - df_a3.mean()) / (df_a3.max() - df_a3.min())
#df_a4 = (df_a4 - df_a4.mean()) / (df_a4.max() - df_a4.min())
# df_a0 = df[[col for col in columns_ if dict_features.get(col)=='a0']].astype('float32')
df_a0 = df[[col for col in columns_ if dict_features.get(col.split('(', 1)[0]) == 'a0']].astype('float32')
df_a1 = df[[col for col in columns_ if dict_features.get(col.split('(', 1)[0]) == 'a1']].astype('float32')
df_a2 = df[[col for col in columns_ if dict_features.get(col.split('(', 1)[0]) == 'a2']].astype('float32')
df_a3 = df[[col for col in columns_ if dict_features.get(col.split('(', 1)[0]) == 'a3']].astype('float32')
df_a4 = df[[col for col in columns_ if dict_features.get(col.split('(', 1)[0]) == 'a4']].astype('float32')
col_a0 = df_a0.columns.tolist()
col_a1 = df_a1.columns.tolist()
col_a2 = df_a2.columns.tolist()
col_a3 = df_a3.columns.tolist()
col_a4 = df_a4.columns.tolist()
# this list will at the end all the dataframes created
df_list = []
df_b0_list = []
df_b1_list = []
df_b2_list = []
df_b3_list = []
df_c3_list = []
df_d3_list = []
df_e3_list = []
df_f1_list = []
df_f2_list = []
df_f3_list = []
df_x1_list = []
df_x2_list = []
df_x_list = []
# create b0: absolute differences and sums of a0
# this is not in the PRL.
for subset in itertools.combinations(col_a0, 2):
if '+' in allowed_operations:
cols = ['(' + subset[0] + '+' + subset[1] + ')']
data = df_a0[list(subset)].apply(_my_sum, axis=1)
df_b0_list.append(pd.DataFrame(data, columns=cols))
if '-' in allowed_operations:
cols = ['(' + subset[0] + '-' + subset[1] + ')']
data = df_a0[list(subset)].apply(_my_diff, axis=1)
df_b0_list.append(pd.DataFrame(data, columns=cols))
cols = ['(' + subset[1] + '-' + subset[0] + ')']
data = df_a0[list(subset)].apply(_my_diff, axis=1)
df_b0_list.append(pd.DataFrame(data, columns=cols))
if '|+|' in allowed_operations:
cols = ['|' + subset[0] + '+' + subset[1] + '|']
data = df_a0[list(subset)].apply(_my_abs_sum, axis=1)
df_b0_list.append(pd.DataFrame(data, columns=cols))
if '|-|' in allowed_operations:
cols = ['|' + subset[0] + '-' + subset[1] + '|']
data = df_a0[list(subset)].apply(_my_abs_diff, axis=1)
df_b0_list.append(pd.DataFrame(data, columns=cols))
if '/' in allowed_operations:
cols = [subset[0] + '/' + subset[1]]
data = df_a0[list(subset)].apply(_my_div, axis=1)
df_b0_list.append(pd.DataFrame(data, columns=cols))
cols = [subset[1] + '/' + subset[0]]
data = df_a0[list(subset)].apply(_my_div, axis=1)
df_b0_list.append(pd.DataFrame(data, columns=cols))
# we kept itertools.combinations to make the code more uniform with the binary operations
for subset in itertools.combinations(col_a0, 1):
if '^2' in allowed_operations:
cols = [subset[0] + '^2']
data = df_a0[list(subset)].apply(_my_power_2, axis=1)
df_b0_list.append(pd.DataFrame(data, columns=cols))
if '^3' in allowed_operations:
cols = [subset[0] + '^3']
data = df_a0[list(subset)].apply(_my_power_3, axis=1)
df_b0_list.append(pd.DataFrame(data, columns=cols))
if 'exp' in allowed_operations:
cols = ['exp(' + subset[0] + ')']
data = df_a0[list(subset)].apply(_my_exp, axis=1)
df_b0_list.append(pd.DataFrame(data, columns=cols))
# create b1: absolute differences and sums of a1
for subset in itertools.combinations(col_a1, 2):
if '+' in allowed_operations:
cols = ['(' + subset[0] + '+' + subset[1] + ')']
data = df_a1[list(subset)].apply(_my_sum, axis=1)
df_b1_list.append(pd.DataFrame(data, columns=cols))
if '-' in allowed_operations:
cols = ['(' + subset[0] + '-' + subset[1] + ')']
data = df_a1[list(subset)].apply(_my_diff, axis=1)
df_b1_list.append(pd.DataFrame(data, columns=cols))
if '|+|' in allowed_operations:
cols = ['|' + subset[0] + '+' + subset[1] + '|']
data = df_a1[list(subset)].apply(_my_abs_sum, axis=1)
df_b1_list.append(pd.DataFrame(data, columns=cols))
if '|-|' in allowed_operations:
cols = ['|' + subset[0] + '-' + subset[1] + '|']
data = df_a1[list(subset)].apply(_my_abs_diff, axis=1)
df_b1_list.append(pd.DataFrame(data, columns=cols))
# create b2: absolute differences and sums of a2
for subset in itertools.combinations(col_a2, 2):
if '+' in allowed_operations:
cols = ['(' + subset[0] + '+' + subset[1] + ')']
data = df_a2[list(subset)].apply(_my_sum, axis=1)
df_b2_list.append(pd.DataFrame(data, columns=cols))
if '-' in allowed_operations:
cols = ['(' + subset[0] + '-' + subset[1] + ')']
data = df_a2[list(subset)].apply(_my_diff, axis=1)
df_b2_list.append(pd.DataFrame(data, columns=cols))
if '|+|' in allowed_operations:
cols = ['|' + subset[0] + '+' + subset[1] + '|']
data = df_a2[list(subset)].apply(_my_abs_sum, axis=1)
df_b2_list.append(pd.DataFrame(data, columns=cols))
if '|-|' in allowed_operations:
cols = ['|' + subset[0] + '-' + subset[1] + '|']
data = df_a2[list(subset)].apply(_my_abs_diff, axis=1)
df_b2_list.append(pd.DataFrame(data, columns=cols))
# create b3: absolute differences and sums of a3
for subset in itertools.combinations(col_a3, 2):
if '+' in allowed_operations:
cols = ['(' + subset[0] + '+' + subset[1] + ')']
data = df_a3[list(subset)].apply(_my_sum, axis=1)
df_b3_list.append(pd.DataFrame(data, columns=cols))
if '-' in allowed_operations:
cols = ['(' + subset[0] + '-' + subset[1] + ')']
data = df_a3[list(subset)].apply(_my_diff, axis=1)
df_b3_list.append(pd.DataFrame(data, columns=cols))
if '|+|' in allowed_operations:
cols = ['|' + subset[0] + '+' + subset[1] + '|']
data = df_a3[list(subset)].apply(_my_abs_sum, axis=1)
df_b3_list.append(pd.DataFrame(data, columns=cols))
if '|-|' in allowed_operations:
cols = ['|' + subset[0] + '-' + subset[1] + '|']
data = df_a3[list(subset)].apply(_my_abs_diff, axis=1)
df_b3_list.append(pd.DataFrame(data, columns=cols))
# create c3: two steps:
# 1) squares of a3 - unary operations
# we kept itertools.combinations to make the code more uniform with the binary operations
for subset in itertools.combinations(col_a3, 1):
if '^2' in allowed_operations:
cols = [subset[0] + '^2']
data = df_a3[list(subset)].apply(_my_power_2, axis=1)
df_c3_list.append(pd.DataFrame(data, columns=cols))
if '^3' in allowed_operations:
cols = [subset[0] + '^3']
data = df_a3[list(subset)].apply(_my_power_3, axis=1)
df_c3_list.append(pd.DataFrame(data, columns=cols))
# 2) squares of b3 (only sums) --> sum squared of a3
for subset in itertools.combinations(col_a3, 2):
if '^2' in allowed_operations:
cols = ['(' + subset[0] + '+' + subset[1] + ')^2']
data = df_a3[list(subset)].apply(_my_sum_power_2, axis=1)
df_c3_list.append(pd.DataFrame(data, columns=cols))
if '^3' in allowed_operations:
cols = ['(' + subset[0] + '+' + subset[1] + ')^3']
data = df_a3[list(subset)].apply(_my_sum_power_3, axis=1)
df_c3_list.append(pd.DataFrame(data, columns=cols))
# create d3: two steps:
# 1) exponentials of a3 - unary operations
# we kept itertools.combinations to make the code more uniform with the binary operations
for subset in itertools.combinations(col_a3, 1):
if 'exp' in allowed_operations:
cols = ['exp(' + subset[0] + ')']
# find scaling factor for e_0 or d_0 for scaling
# and multiply each column by the scaling factor
scaling_factors = _get_scaling_factors(list(subset), metadata_info, energy_unit, length_unit)
df_subset = df_a3[list(subset)] * scaling_factors
data = df_subset.apply(_my_exp, axis=1)
df_d3_list.append(pd.DataFrame(data, columns=cols))
# 2) exponentials of b3 (only sums) --> exponential of sum of a3
for subset in itertools.combinations(col_a3, 2):
if 'exp' in allowed_operations:
cols = ['exp(' + subset[0] + '+' + subset[1] + ')']
# find scaling factor for e_0 or d_0 for scaling
# and multiply each column by the scaling factor
scaling_factors = _get_scaling_factors(list(subset), metadata_info, energy_unit, length_unit)
df_subset = df_a3[list(subset)] * scaling_factors
data = df_subset.apply(_my_sum_exp, axis=1)
df_d3_list.append(pd.DataFrame(data, columns=cols))
# create e3: two steps:
# 1) exponentials of squared a3 - unary operations
# we kept itertools.combinations to make the code more uniform with the binary operations
for subset in itertools.combinations(col_a3, 1):
operations = {'exp', '^2'}
if operations <= set(allowed_operations):
cols = ['exp(' + subset[0] + '^2)']
# find scaling factor for e_0 or d_0 for scaling
# and multiply each column by the scaling factor
scaling_factors = _get_scaling_factors(list(subset), metadata_info, energy_unit, length_unit)
df_subset = df_a3[list(subset)] * scaling_factors
data = df_subset.apply(_my_exp_power_2, axis=1)
df_e3_list.append(pd.DataFrame(data, columns=cols))
operations = {'exp', '^3'}
if operations <= set(allowed_operations):
try:
cols = ['exp(' + subset[0] + '^3)']
# find scaling factor for e_0 or d_0 for scaling
# and multiply each column by the scaling factor
scaling_factors = _get_scaling_factors(list(subset), metadata_info, energy_unit, length_unit)
df_subset = df_a3[list(subset)] * scaling_factors
data = df_subset.apply(_my_exp_power_3, axis=1)
df_e3_list.append(pd.DataFrame(data, columns=cols))
except OverflowError as e:
logger.warning('Dropping feature combination that caused under/overflow.\n')
# 2) exponentials of b3 (only sums) --> exponential of sum of a3
for subset in itertools.combinations(col_a3, 2):
operations = {'exp', '^2'}
if operations <= set(allowed_operations):
cols = ['exp((' + subset[0] + '+' + subset[1] + ')^2)']
# find scaling factor for e_0 or d_0 for scaling
# and multiply each column by the scaling factor
scaling_factors = _get_scaling_factors(list(subset), metadata_info, energy_unit, length_unit)
df_subset = df_a3[list(subset)] * scaling_factors
data = df_subset.apply(_my_sum_exp_power_2, axis=1)
df_e3_list.append(pd.DataFrame(data, columns=cols))
operations = {'exp', '^3'}
if operations <= set(allowed_operations):
try:
cols = ['exp((' + subset[0] + '+' + subset[1] + ')^3)']
# find scaling factor for e_0 or d_0 for scaling
# and multiply each column by the scaling factor
scaling_factors = _get_scaling_factors(list(subset), metadata_info, energy_unit, length_unit)
df_subset = df_a3[list(subset)] * scaling_factors
data = df_subset.apply(_my_sum_exp_power_3, axis=1)
df_e3_list.append( | pd.DataFrame(data, columns=cols) | pandas.DataFrame |
import pandas as pd
import numpy as np
from datetime import datetime
from sklearn.feature_extraction.text import TfidfVectorizer
from sklearn.metrics.pairwise import cosine_similarity
# from users import build_user_matrix
class Recommending:
def __init__(self):
'''Initializes the TFIDF Vectorizer Object'''
self.tfidf = TfidfVectorizer(stop_words='english', max_features=100)
self.results = {}
def fit_transform(self, X, df):
'''Fits and transforms TFIDF and fits KMeans.
Params:
X (array): Array of the descriptions of houses
df (DataFrame): dataframe of numerical values to add
Returns:
tfidf_matrix (array): matrix with words, price, and beds as
features
'''
self.tfidf.fit(X)
desc_tfidf = self.tfidf.transform(X)
tfidf_matrix = pd.concat([pd.DataFrame(desc_tfidf.todense()),
df['PRICE'] / 2000, df['BEDS'],
df['YEAR BUILT'] / 100], axis=1)
return tfidf_matrix
def cosine_sim(self, tfidf, df):
''' Creates a dictionary of the houses to consider based on cosine
similarity.
Params:
tfidf (Tfidf object): object created in fit method
df (DataFrame): dataframe for reference to address
'''
tfidf.fillna(0, inplace=True)
cosine_similarities = cosine_similarity(tfidf,tfidf)
for idx, row in df.iterrows():
if idx < len(df):
similar_indices = cosine_similarities[idx].argsort()[:-5:-1]
similar_items = [(cosine_similarities[idx][i], i) for i in similar_indices]
self.results[row['ID']] = similar_items[1:]
def recommend(self, id):
''' Prints the recommendations for that house.
Params:
id (int): id of house that needs recommendations
num (int): num of recommendations
Returns:
final_recs (list[tuples]): each item in the list is a
recommendation - each tuple is the score and the house id for
each recommendation
'''
recs = self.results[id]
final_recs = []
for rec in recs:
if rec[0] > 0:
final_recs.append(rec)
return final_recs
def item(self, id):
''' Helper method for returning item in dataframe when looking for
recommendations.
Params:
id (int): id of recommended house
Returns:
address (string): address of house that needs recommendations
'''
return df.loc[df.index == id]['ADDRESS'].tolist()[0]
def get_data(file, fave_file=None):
'''Takes in a filename and returns it as a dataframe.
Params:
file (csv): file in csv format
Returns:
df (DataFrame): pandas dataframe of data from file
'''
df = pd.read_csv(file)
df['FAVORITE'] = 'N'
if fave_file != None:
df_faves = | pd.read_csv(fave_file) | pandas.read_csv |
# -*- coding: utf-8 -*-
"""
:Author: <NAME>
<NAME>
:Date: 2018. 7. 18
"""
import os
import platform
import sys
from copy import deepcopy as dc
from datetime import datetime
from warnings import warn
import matplotlib
import matplotlib.pyplot as plt
import numpy as np
import pandas as pd
import pandas.core.common as com
import statsmodels.api as sm
from matplotlib import font_manager, rc
from pandas import DataFrame
from pandas import Series
from pandas.core.index import MultiIndex
from pandas.core.indexing import convert_to_index_sliceable
from performanceanalytics.charts.performance_summary import create_performance_summary
from .columns import *
from .outcomes import *
from ..io.downloader import download_latest_data
from ..util.checker import not_empty
import dropbox
import io
# Hangul font setting
# noinspection PyProtectedMember
font_manager._rebuild()
if platform.system() == 'Windows':
font_name = font_manager.FontProperties(fname='c:/Windows/Fonts/malgun.ttf').get_name()
elif platform.system() == 'Darwin': # OS X
font_name = font_manager.FontProperties(fname='/Library/Fonts/AppleGothic.ttf').get_name()
else: # Linux
fname = '/usr/share/fonts/truetype/nanum/NanumGothicOTF.ttf'
if not os.path.isfile(fname):
raise ResourceWarning("Please install NanumGothicOTF.ttf for plotting Hangul.")
font_name = font_manager.FontProperties(fname=fname).get_name()
rc('font', family=font_name)
# for fix broken Minus sign
matplotlib.rcParams['axes.unicode_minus'] = False
PERCENTAGE = 'percentage'
WEIGHT = 'weight'
WEIGHT_SUM = 'weight_sum'
START_DATE = datetime(year=2001, month=5, day=31)
QUANTILE = 'quantile'
RANK = 'rank'
RANK_CORRELATION = 'Rank correlation'
class Portfolio(DataFrame):
"""
"""
_benchmark = KOSPI
benchmarks = None
factors = None
@property
def _constructor(self):
return Portfolio
@not_empty
def __init__(self, data=None, index=None, columns=None, dtype=None, copy: bool = False,
start_date: datetime = START_DATE, end_date: datetime = None,
include_holding: bool = False, include_finance: bool = False,
include_managed: bool = False, include_suspended: bool = False):
if not end_date:
end_date = datetime.today()
if data is None:
print('Data is being downloaded from KSIF DROPBOX DATA STORAGE')
dbx = dropbox.Dropbox(
oauth2_access_token='<KEY>', timeout=None)
metadata, f = dbx.files_download('/preprocessed/final_msf.csv')
# metadata, f = dbx.files_download('/preprocessed/merged.csv')
binary_file = f.content
data = pd.read_csv(io.BytesIO(binary_file))
#
_, self.benchmarks, self.factors = download_latest_data(download_company_data=False)
#
# if not include_holding:
# data = data.loc[~data[HOLDING], :]
#
# if not include_finance:
# data = data.loc[data[FN_GUIDE_SECTOR] != '๊ธ์ต', :]
#
# if not include_managed:
# data = data.loc[~data[IS_MANAGED], :]
#
# if not include_suspended:
# data = data.loc[~data[IS_SUSPENDED], :]
#
# data = data.loc[(start_date <= data[DATE]) & (data[DATE] <= end_date), :]
else:
_, self.benchmarks, self.factors = download_latest_data(download_company_data=False)
self.benchmarks = self.benchmarks.loc[
(start_date <= self.benchmarks[DATE]) & (self.benchmarks[DATE] <= end_date), :]
self.factors = self.factors.loc[(start_date <= self.factors.index) & (self.factors.index <= end_date), :]
super(Portfolio, self).__init__(data=data) #, index=index, columns=columns, dtype=dtype, copy=copy)
# self.data = data
def __getitem__(self, key):
from pandas.core.dtypes.common import is_list_like, is_integer, is_iterator
key = com.apply_if_callable(key, self)
# shortcut if the key is in columns
try:
if self.columns.is_unique and key in self.columns:
if self.columns.nlevels > 1:
return self._getitem_multilevel(key)
return self._get_item_cache(key)
except (TypeError, ValueError):
# The TypeError correctly catches non hashable "key" (e.g. list)
# The ValueError can be removed once GH #21729 is fixed
pass
# Do we have a slicer (on rows)?
indexer = convert_to_index_sliceable(self, key)
if indexer is not None:
return self._slice(indexer, axis=0)
# Do we have a (boolean) DataFrame?
if isinstance(key, DataFrame):
return self._getitem_frame(key)
# Do we have a (boolean) 1d indexer?
if com.is_bool_indexer(key):
return self._getitem_bool_array(key)
# We are left with two options: a single key, and a collection of keys,
# We interpret tuples as collections only for non-MultiIndex
is_single_key = isinstance(key, tuple) or not is_list_like(key)
if is_single_key:
if self.columns.nlevels > 1:
return self._getitem_multilevel(key)
indexer = self.columns.get_loc(key)
if is_integer(indexer):
indexer = [indexer]
else:
if is_iterator(key):
key = list(key)
# noinspection PyProtectedMember
indexer = self.loc._convert_to_indexer(key, axis=1, raise_missing=True)
# take() does not accept boolean indexers
if getattr(indexer, "dtype", None) == bool:
indexer = np.where(indexer)[0]
data = self._take(indexer, axis=1)
if is_single_key:
# What does looking for a single key in a non-unique index return?
# The behavior is inconsistent. It returns a Series, except when
# - the key itself is repeated (test on data.shape, #9519), or
# - we have a MultiIndex on columns (test on self.columns, #21309)
if data.shape[1] == 1 and not isinstance(self.columns, MultiIndex):
data = data[key]
return data
@property
def benchmark(self):
return self._benchmark
@not_empty
def get_benchmark(self, benchmark: str = None) -> DataFrame:
"""
Return a benchmark of this portfolio period.
:param benchmark: (str) The name of benchmark to use. If benchmark is None, use self.benchmark.
:return selected_benchmark: (DataFrame)
code | (str) The name of benchmark. ex) ์ฝ์คํผ, ์ฝ์ค๋ฅ...
date | (datetime)
benchmark_return_1 | (float) 1 month return.
benchmark_return_3 | (float) 3 month return.
benchmark_return_6 | (float) 6 month return.
benchmark_return_12 | (float) 12 month return.
"""
if benchmark is not None and benchmark not in BENCHMARKS:
raise ValueError('{} is not registered.'.format(benchmark))
if benchmark:
selected_benchmark = self.benchmarks.loc[(self.benchmarks[CODE] == benchmark) &
(self.benchmarks[DATE] >= min(self[DATE])) &
(self.benchmarks[DATE] <= max(self[DATE])), :]
else:
selected_benchmark = self.benchmarks.loc[(self.benchmarks[CODE] == self._benchmark) &
(self.benchmarks[DATE] >= min(self[DATE])) &
(self.benchmarks[DATE] <= max(self[DATE])), :]
selected_benchmark.set_index(DATE, inplace=True)
selected_benchmark = selected_benchmark.loc[
:, [BENCHMARK_RET_1, BENCHMARK_RET_3, BENCHMARK_RET_6, BENCHMARK_RET_12]
]
return selected_benchmark
def set_benchmark(self, benchmark):
if benchmark not in BENCHMARKS:
raise ValueError('{} is not registered.'.format(benchmark))
else:
self._benchmark = benchmark
# noinspection PyPep8Naming
@property
def SMB(self) -> Series:
return self.factors.loc[:, SMB]
# noinspection PyPep8Naming
@property
def HML(self) -> Series:
return self.factors.loc[:, HML]
@not_empty
def to_dataframe(self, deepcopy: bool = True) -> DataFrame:
"""
Convert portfolio to dataframe type.
:param deepcopy : (bool) If deepcopy is True, convert to dataframe based on deepcopy. Or, convert to dataframe
based on shallow copy.
:return dataframe : (DataFrame) Converted dataframe type portfolio
"""
if deepcopy:
dataframe = DataFrame(dc(self))
else:
dataframe = DataFrame(self)
return dataframe
def outcome(self, benchmark: str = None, weighted: str = None,
long_transaction_cost_ratio: float = 0.0025, short_transaction_cost_ratio: float = 0.0025,
show_plot: bool = False):
"""
Calculate various indices of the portfolio.
:param benchmark: (str) The name of benchmark. If benchmark is None, use a default benchmark.
:param weighted: (str) If weighted is a string, use the string to calculate weighted portfolio.
If there are negative weights, calculate long-short weighted portfolio.
:param long_transaction_cost_ratio: (float) A transaction cost ratio for long investment
:param short_transaction_cost_ratio: (float) A transaction cost ratio for short investment
:param show_plot: (bool) If show_plot is True, show a performance summary graph.
:return result: (dict)
portfolio_return | (float) Total compound return of the portfolio
benchmark_return | (float) Total compound return of the benchmark
active_return | (float) Annualized average excess return
active_risk | (float) Annualized tracking error
sharpe_ratio | (float) Sharpe ratio
information_ratio | (float) Average excess return / tracking error
compound_annual_growth_rate | (float) Annual compound return of the portfolio
maximum_drawdown | (float) The maximum loss from a peak to a trough of a portfolio,
before a new peak is attained
Fama_French_alpha | (float) An abnormal return from Fama-French 3 Factor model
(Fama and French, 1993)
Fama_French_alpha_p_value | (float) A p-value of the the abnormal return
Fama_French_beta | (float) A market beta from Fama-French 3 Factor model
(Fama and French, 1993)
turnover | (float) Annual average turnover
"""
if benchmark is not None and benchmark not in BENCHMARKS:
raise ValueError('{} is not registered.'.format(benchmark))
if benchmark is None:
benchmark = self._benchmark
portfolio, portfolio_returns, turnovers = self.get_returns_and_turnovers(long_transaction_cost_ratio,
short_transaction_cost_ratio, weighted)
turnover = turnovers.mean() * 12
benchmarks = portfolio.get_benchmark(benchmark=benchmark).loc[:, [BENCHMARK_RET_1]]
merged_returns = pd.merge(portfolio_returns, benchmarks, on=DATE)
merged_returns = pd.merge(merged_returns,
portfolio.get_benchmark(CD91).rename(columns={BENCHMARK_RET_1: CD91}).loc[:, [CD91]],
on=DATE)
# Portfolio return, benchmark return
portfolio_return = self._calculate_total_return(merged_returns[PORTFOLIO_RETURN])
benchmark_return = self._calculate_total_return(merged_returns[BENCHMARK_RET_1])
# CAGR
period_len = len(portfolio[DATE].unique())
compound_annual_growth_rate = (portfolio_return + 1) ** (12 / period_len) - 1
# Active return, active risk, information ratio
benchmark_excess_returns = merged_returns[PORTFOLIO_RETURN] - merged_returns[BENCHMARK_RET_1]
average_excess_return = np.average(benchmark_excess_returns)
tracking_error = np.std(benchmark_excess_returns)
active_return = average_excess_return * 12
active_risk = tracking_error * np.sqrt(12)
information_ratio = average_excess_return / tracking_error
# Sharpe ratio
risk_free_excess_returns = merged_returns[PORTFOLIO_RETURN] - merged_returns[CD91]
sharpe_ratio = np.average(risk_free_excess_returns) / np.std(risk_free_excess_returns)
# Maximum drawdown
portfolio_cumulative_assets = merged_returns[PORTFOLIO_RETURN].add(1).cumprod()
maximum_drawdown = portfolio_cumulative_assets.div(portfolio_cumulative_assets.cummax()).sub(1).min()
# Fama-French, 1993
market_excess_returns = merged_returns[BENCHMARK_RET_1] - merged_returns[CD91]
risk_free_excess_return = 'risk_free_excess_return'
market_excess_return = 'market_excess_return'
ff_data = pd.concat([
DataFrame(risk_free_excess_returns, columns=[risk_free_excess_return]),
DataFrame(market_excess_returns, columns=[market_excess_return]),
self.factors
], axis=1, join='inner').dropna()
model = sm.OLS(
ff_data.loc[:, risk_free_excess_return],
sm.add_constant(ff_data.loc[:, [market_excess_return, SMB, HML]])
).fit()
fama_french_alpha = model.params[0]
fama_french_alpha_p_value = model.pvalues[0]
fama_french_beta = model.params[1]
result = {
PORTFOLIO_RETURN: portfolio_return,
BENCHMARK_RETURN: benchmark_return,
ACTIVE_RETURN: active_return,
ACTIVE_RISK: active_risk,
SR: sharpe_ratio,
IR: information_ratio,
CAGR: compound_annual_growth_rate,
MDD: maximum_drawdown,
FAMA_FRENCH_ALPHA: fama_french_alpha,
FAMA_FRENCH_ALPHA_P_VALUE: fama_french_alpha_p_value,
FAMA_FRENCH_BETA: fama_french_beta,
TURNOVER: turnover,
}
if show_plot:
plotting_returns = dc(merged_returns).loc[:, [PORTFOLIO_RETURN, BENCHMARK_RET_1]]
plotting_returns.rename(columns={
PORTFOLIO_RETURN: 'Portfolio',
BENCHMARK_RET_1: benchmark
}, inplace=True)
create_performance_summary(plotting_returns, other_cols=range(1, 2))
plt.show()
return result
def get_returns(self, weighted: str = None,
long_transaction_cost_ratio: float = 0.0025,
short_transaction_cost_ratio: float = 0.0025, cumulative=False) -> DataFrame:
_, returns, _ = self.get_returns_and_turnovers(
long_transaction_cost_ratio, short_transaction_cost_ratio, weighted
)
if cumulative:
returns = _cumulate(returns)
return returns
def get_returns_and_turnovers(self, long_transaction_cost_ratio, short_transaction_cost_ratio, weighted):
portfolio = self.dropna(subset=[RET_1])
returns = pd.DataFrame()
if weighted:
if weighted not in self.columns:
raise ValueError('{} is not in Portfolio.columns.'.format(weighted))
portfolio = portfolio.dropna(subset=[weighted])
long_portfolio = portfolio.loc[portfolio[weighted] > 0, :]
short_portfolio = portfolio.loc[portfolio[weighted] < 0, :]
short_portfolio.loc[:, RET_1] = -1 * short_portfolio.loc[:, RET_1]
short_portfolio.loc[:, weighted] = -short_portfolio.loc[:, weighted]
long_returns = long_portfolio.groupby([DATE]).apply(
lambda x: np.average(x[RET_1], weights=x[weighted])
)
short_returns = short_portfolio.groupby([DATE]).apply(
lambda x: np.average(x[RET_1], weights=x[weighted])
)
long_turnovers = _get_turnovers(long_portfolio, weighted)
short_turnovers = _get_turnovers(short_portfolio, weighted)
if short_returns.empty:
returns[PORTFOLIO_RETURN] = long_returns.subtract(
long_turnovers.multiply(long_transaction_cost_ratio), fill_value=0)
turnovers = long_turnovers
else:
returns[PORTFOLIO_RETURN] = long_returns.subtract(
long_turnovers.multiply(long_transaction_cost_ratio), fill_value=0
).add(
short_returns.subtract(
short_turnovers.multiply(short_transaction_cost_ratio), fill_value=0)
)
if pd.isna(returns[PORTFOLIO_RETURN]).any():
warn("When calculating long-short portfolio, weighted should have positive and negative values "
"in same periods. Otherwise, the return of the period is not calculated.")
returns.dropna(inplace=True)
turnovers = long_turnovers.add(short_turnovers)
else:
turnovers = _get_turnovers(portfolio)
returns[PORTFOLIO_RETURN] = portfolio.groupby([DATE]).apply(
lambda x: np.average(x[RET_1])
).subtract(turnovers.multiply(short_transaction_cost_ratio), fill_value=0)
return portfolio, returns, turnovers
@not_empty
def _calculate_total_return(self, grouped_data):
data = grouped_data.dropna()
total_return = _cumulate(data).iloc[-1]
return total_return
@not_empty
def periodic_rank(self, min_rank: int = 1, max_rank: int = sys.maxsize, factor: str = MKTCAP,
bottom: bool = False, drop_rank: bool = True):
"""
Select companies which have a rank bigger than or equal to min_rank, and smaller than or equal to max_rank
for each period.
:param min_rank: (int) The minimum rank of selected companies.
The selected_companies includes the minimum ranked company.
:param max_rank: (int) The maximum rank of selected companies.
The selected_companies includes the maximum ranked company.
:param factor: (str) The factor used to determine rank.
:param bottom: (bool) If bottom is True, select the companies from bottom. Or, select the companies from top.
:param drop_rank: (bool) If drop_rank is True, delete rank column from the selected_companies.
:return selected_companies: (DataFrame) Selected companies for each period by rank of the factor.
"""
assert min_rank > 0, "min_rank should be bigger than 0."
assert max_rank > min_rank, "max_rank should be bigger than min_rank."
all_companies = dc(self)
all_companies = all_companies.dropna(subset=[factor])
all_companies[RANK] = all_companies.groupby(by=[DATE])[factor].transform(
lambda x: x.rank(ascending=bottom)
)
selected_companies = all_companies.loc[(all_companies[RANK] >= min_rank) & (all_companies[RANK] <= max_rank), :]
selected_companies = selected_companies.sort_values(by=[DATE, RANK])
if drop_rank:
del selected_companies[RANK]
return selected_companies
@not_empty
def periodic_percentage(self, min_percentage: float, max_percentage: float, factor: str = MKTCAP,
bottom: bool = False):
"""
Select companies which have a percentage bigger than or equal to min_percentage, and smaller than or equal to
max_percentage for each period.
:param min_percentage: (float) The minimum percentage of selected companies.
The selected_companies includes the minimum percent company.
:param max_percentage: (float) The maximum percentage of selected companies.
The selected_companies does not include the maximum percent company.
:param factor: (str) The factor used to determine rank.
:param bottom: (bool) If bottom is True, select the companies from bottom. Or, select the companies from top.
:return selected_companies: (DataFrame) Selected companies for each period by quantile of the factor.
"""
assert min_percentage >= 0, "min_percentage should be bigger than or equal to 0."
assert max_percentage > min_percentage, "max_percentage should be bigger than min_percentage."
assert max_percentage <= 1, "max_percentage should be smaller than or equal to 0."
all_companies = dc(self)
all_companies = all_companies.dropna(subset=[factor])
all_companies[PERCENTAGE] = all_companies.groupby(by=[DATE])[factor].transform(
lambda x: x.rank(ascending=bottom, pct=True)
)
selected_companies = all_companies.loc[
(all_companies[PERCENTAGE] >= min_percentage) &
(all_companies[PERCENTAGE] < max_percentage), :]
del selected_companies[PERCENTAGE]
return selected_companies
@not_empty
def periodic_standardize(self, factor: str, prefix: str = 'std_'):
"""
Standardize a factor periodically.
:param factor: (str) The name of factor will be standardized.
:param prefix: (str) The prefix preceding a name of standardized factor.
:return standardized_companies: (DataFrame) Standardized companies for each period by factor.
"""
unstandardized_companies = dc(self.loc[~np.isnan(self[factor]), :])
unstandardized_companies[prefix + factor] = unstandardized_companies.groupby(by=[DATE])[factor].transform(
lambda x: (x - x.mean()) / x.std()
)
standardized_companies = unstandardized_companies
return standardized_companies
@not_empty
def quantile_distribution_ratio(self, factor: str, chunk_num: int = 10, cumulative: bool = True,
weighted: bool = False, only_positive: bool = False, show_plot: bool = False,
show_bar_chart: bool = False, title: str = None) -> DataFrame:
"""
Make quantile portfolios by the given factor, and calculate returns.
:param factor: (str) The name of factor used to make quantile portfolios.
:param chunk_num: (int) The number of portfolios.
:param cumulative: (bool) If cumulative is true, calculate cumulative returns.
:param weighted: (bool) If weighted is true, each portfolio is a weighted portfolio based on MKTCAP
:param only_positive: (bool) If only_positive is true, use only positive value of the factor.
:param show_plot: (bool) If show_plot is true, show a time series line chart of groups.
:param show_bar_chart: (bool) If show_bar_chart is true, show a arithmetic average bar chart of groups.
:param title: (str) If title is not None, set the title.
:return quantile_portfolio_returns: (DataFrame) The returns of each group
--------------------------------------------------------------
date | (datetime)
--------------------------------------------------------------
1 | (float) The return of group 1 portfolio at the date.
2 | (float) The return of group 2 portfolio at the date.
3 | (float) The return of group 3 portfolio at the date.
...
--------------------------------------------------------------
"""
assert chunk_num > 1, "chunk_num should be bigger than 1."
labels = [str(x) for x in range(1, chunk_num + 1)]
portfolio = dc(self)
portfolio = portfolio.dropna(subset=[factor, RET_1])
if only_positive:
portfolio = portfolio.loc[portfolio[factor] > 0, :]
portfolio[QUANTILE] = portfolio.groupby(by=[DATE])[factor].transform(
lambda x: pd.qcut(x, chunk_num, labels=labels, duplicates='drop')
)
portfolio[QUANTILE] = portfolio[QUANTILE].apply(int).apply(str)
quantile_portfolio_returns = DataFrame()
for label in labels:
labelled_data = portfolio.loc[portfolio[QUANTILE] == label, :]
if weighted:
grouped_data = labelled_data.groupby([DATE]).apply(lambda x: np.average(x[RET_1], weights=x[MKTCAP]))
else:
grouped_data = labelled_data.groupby([DATE])[RET_1].mean()
grouped_data = grouped_data.rename(label)
grouped_data = _cumulate(grouped_data, cumulative)
quantile_portfolio_returns = pd.concat([quantile_portfolio_returns, grouped_data], axis=1, sort=True)
if show_plot:
plt.figure()
quantile_portfolio_returns.plot()
if title:
plt.title(title)
else:
plt.title(factor.upper())
plt.ylabel("Return")
plt.xlabel("Date")
plt.legend(loc='upper left')
plt.show()
if show_bar_chart:
plt.figure()
quantile_result = portfolio.quantile_distribution_ratio(
factor, chunk_num=chunk_num, cumulative=False, weighted=weighted, only_positive=only_positive,
show_plot=False, show_bar_chart=False, title=None
)
quantile_result.mean(axis=0).plot(kind='bar')
if title:
plt.title(title)
else:
plt.title(factor.upper())
plt.ylabel("Return")
plt.xlabel("Group")
plt.show()
return quantile_portfolio_returns
def rank_correlation(self, factor: str, ranked_by: str = RET_1, rolling: int = 6,
show_plot=False, title: str = '') -> DataFrame:
portfolio = dc(self.dropna(subset=[ranked_by]))
portfolio = portfolio.periodic_rank(factor=factor, drop_rank=False)
factor_rank = "{factor}_rank".format(factor=factor)
portfolio = portfolio.rename(index=str, columns={"rank": factor_rank})
portfolio = portfolio.periodic_rank(factor=ranked_by, drop_rank=False)
actual_rank = "{ranked_by}_rank".format(ranked_by=ranked_by)
portfolio = portfolio.rename(index=str, columns={"rank": actual_rank})
rank_ic = portfolio.groupby(by=[DATE]).apply(
lambda x: 1 - (6 * ((x[factor_rank] - x[actual_rank]) ** 2).sum()) / (len(x) * (len(x) ** 2 - 1)))
rank_ic = pd.DataFrame(rank_ic, columns=[RANK_CORRELATION])
rolling_column_name = 'rolling_{}'.format(rolling)
rank_ic[rolling_column_name] = rank_ic[RANK_CORRELATION].rolling(window=rolling).mean()
if show_plot:
rank_ic.plot()
plt.title(title)
plt.axhline(y=0, color='black')
plt.ylabel('Rank IC')
plt.xlabel('Date')
plt.show()
return rank_ic
def show_plot(self, cumulative: bool = True, weighted: bool = False, title: str = None,
show_benchmark: bool = True, save: bool = False):
portfolio = self.dropna(subset=[RET_1])
if weighted:
grouped_data = portfolio.groupby([DATE]).apply(lambda x: np.average(x[RET_1], weights=x[MKTCAP]))
else:
grouped_data = portfolio.groupby([DATE])[RET_1].mean()
# noinspection PyProtectedMember
grouped_data = _cumulate(grouped_data, cumulative)
plt.figure()
if show_benchmark:
benchmark = self.get_benchmark().loc[:, [BENCHMARK_RET_1]]
benchmark = _cumulate(benchmark, cumulative).dropna().reset_index(drop=False)
grouped_data = grouped_data.reset_index(drop=False)
grouped_data = | pd.merge(grouped_data, benchmark, on=[DATE]) | pandas.merge |
#!/usr/bin/env python3
import os
import sys
import pandas as pd
from json import load
infolder = sys.argv[1]
cwd = os.getcwd()
os.chdir(infolder)
with open('ica_decomposition.json', 'r') as f:
comp = load(f)
del(comp['Method'])
# Prepare list of components for projections
acc = ''
rej = ''
ign = ''
for n, ic in enumerate(comp):
if comp[ic]['classification'] == 'rejected':
print(f'{n} rej')
rej += f'{n},'
if comp[ic]['classification'] == 'accepted':
print(f'{n} acc')
acc += f'{n},'
if comp[ic]['classification'] == 'ignored':
print(f'{n}')
ign += f'{n},'
with open('rejected_list.1D', 'w+') as f:
f.write(rej[:-1])
with open('accepted_list.1D', 'w+') as f:
f.write(acc[:-1])
with open('ignored_list.1D', 'w+') as f:
f.write(ign[:-1])
# Prepare same list for 4D denoise
comp_data = []
for ic in comp:
comp_data.append([comp[ic]['normalized variance explained'],
comp[ic]['classification']])
dt = | pd.DataFrame(comp_data, columns=['var', 'class']) | pandas.DataFrame |
import re
import numpy as np
import pytest
import pandas as pd
import pandas._testing as tm
from pandas.core.arrays import IntervalArray
class TestSeriesReplace:
def test_replace_explicit_none(self):
# GH#36984 if the user explicitly passes value=None, give it to them
ser = pd.Series([0, 0, ""], dtype=object)
result = ser.replace("", None)
expected = pd.Series([0, 0, None], dtype=object)
tm.assert_series_equal(result, expected)
df = pd.DataFrame(np.zeros((3, 3)))
df.iloc[2, 2] = ""
result = df.replace("", None)
expected = pd.DataFrame(
{
0: np.zeros(3),
1: np.zeros(3),
2: np.array([0.0, 0.0, None], dtype=object),
}
)
assert expected.iloc[2, 2] is None
tm.assert_frame_equal(result, expected)
# GH#19998 same thing with object dtype
ser = pd.Series([10, 20, 30, "a", "a", "b", "a"])
result = ser.replace("a", None)
expected = pd.Series([10, 20, 30, None, None, "b", None])
assert expected.iloc[-1] is None
tm.assert_series_equal(result, expected)
def test_replace_noop_doesnt_downcast(self):
# GH#44498
ser = pd.Series([None, None, pd.Timestamp("2021-12-16 17:31")], dtype=object)
res = ser.replace({np.nan: None}) # should be a no-op
tm.assert_series_equal(res, ser)
assert res.dtype == object
# same thing but different calling convention
res = ser.replace(np.nan, None)
tm.assert_series_equal(res, ser)
assert res.dtype == object
def test_replace(self):
N = 100
ser = pd.Series(np.random.randn(N))
ser[0:4] = np.nan
ser[6:10] = 0
# replace list with a single value
return_value = ser.replace([np.nan], -1, inplace=True)
assert return_value is None
exp = ser.fillna(-1)
tm.assert_series_equal(ser, exp)
rs = ser.replace(0.0, np.nan)
ser[ser == 0.0] = np.nan
tm.assert_series_equal(rs, ser)
ser = pd.Series(np.fabs(np.random.randn(N)), tm.makeDateIndex(N), dtype=object)
ser[:5] = np.nan
ser[6:10] = "foo"
ser[20:30] = "bar"
# replace list with a single value
rs = ser.replace([np.nan, "foo", "bar"], -1)
assert (rs[:5] == -1).all()
assert (rs[6:10] == -1).all()
assert (rs[20:30] == -1).all()
assert (pd.isna(ser[:5])).all()
# replace with different values
rs = ser.replace({np.nan: -1, "foo": -2, "bar": -3})
assert (rs[:5] == -1).all()
assert (rs[6:10] == -2).all()
assert (rs[20:30] == -3).all()
assert (pd.isna(ser[:5])).all()
# replace with different values with 2 lists
rs2 = ser.replace([np.nan, "foo", "bar"], [-1, -2, -3])
tm.assert_series_equal(rs, rs2)
# replace inplace
return_value = ser.replace([np.nan, "foo", "bar"], -1, inplace=True)
assert return_value is None
assert (ser[:5] == -1).all()
assert (ser[6:10] == -1).all()
assert (ser[20:30] == -1).all()
def test_replace_nan_with_inf(self):
ser = pd.Series([np.nan, 0, np.inf])
tm.assert_series_equal(ser.replace(np.nan, 0), ser.fillna(0))
ser = pd.Series([np.nan, 0, "foo", "bar", np.inf, None, pd.NaT])
tm.assert_series_equal(ser.replace(np.nan, 0), ser.fillna(0))
filled = ser.copy()
filled[4] = 0
tm.assert_series_equal(ser.replace(np.inf, 0), filled)
def test_replace_listlike_value_listlike_target(self, datetime_series):
ser = pd.Series(datetime_series.index)
tm.assert_series_equal(ser.replace(np.nan, 0), ser.fillna(0))
# malformed
msg = r"Replacement lists must match in length\. Expecting 3 got 2"
with pytest.raises(ValueError, match=msg):
ser.replace([1, 2, 3], [np.nan, 0])
# ser is dt64 so can't hold 1 or 2, so this replace is a no-op
result = ser.replace([1, 2], [np.nan, 0])
tm.assert_series_equal(result, ser)
ser = pd.Series([0, 1, 2, 3, 4])
result = ser.replace([0, 1, 2, 3, 4], [4, 3, 2, 1, 0])
tm.assert_series_equal(result, pd.Series([4, 3, 2, 1, 0]))
def test_replace_gh5319(self):
# API change from 0.12?
# GH 5319
ser = pd.Series([0, np.nan, 2, 3, 4])
expected = ser.ffill()
result = ser.replace([np.nan])
tm.assert_series_equal(result, expected)
ser = pd.Series([0, np.nan, 2, 3, 4])
expected = ser.ffill()
result = ser.replace(np.nan)
tm.assert_series_equal(result, expected)
def test_replace_datetime64(self):
# GH 5797
ser = pd.Series(pd.date_range("20130101", periods=5))
expected = ser.copy()
expected.loc[2] = pd.Timestamp("20120101")
result = ser.replace({pd.Timestamp("20130103"): pd.Timestamp("20120101")})
tm.assert_series_equal(result, expected)
result = ser.replace(pd.Timestamp("20130103"), pd.Timestamp("20120101"))
tm.assert_series_equal(result, expected)
def test_replace_nat_with_tz(self):
# GH 11792: Test with replacing NaT in a list with tz data
ts = pd.Timestamp("2015/01/01", tz="UTC")
s = pd.Series([pd.NaT, pd.Timestamp("2015/01/01", tz="UTC")])
result = s.replace([np.nan, pd.NaT], pd.Timestamp.min)
expected = pd.Series([pd.Timestamp.min, ts], dtype=object)
tm.assert_series_equal(expected, result)
def test_replace_timedelta_td64(self):
tdi = pd.timedelta_range(0, periods=5)
ser = pd.Series(tdi)
# Using a single dict argument means we go through replace_list
result = ser.replace({ser[1]: ser[3]})
expected = pd.Series([ser[0], ser[3], ser[2], ser[3], ser[4]])
tm.assert_series_equal(result, expected)
def test_replace_with_single_list(self):
ser = pd.Series([0, 1, 2, 3, 4])
result = ser.replace([1, 2, 3])
tm.assert_series_equal(result, pd.Series([0, 0, 0, 0, 4]))
s = ser.copy()
return_value = s.replace([1, 2, 3], inplace=True)
assert return_value is None
tm.assert_series_equal(s, pd.Series([0, 0, 0, 0, 4]))
# make sure things don't get corrupted when fillna call fails
s = ser.copy()
msg = (
r"Invalid fill method\. Expecting pad \(ffill\) or backfill "
r"\(bfill\)\. Got crash_cymbal"
)
with pytest.raises(ValueError, match=msg):
return_value = s.replace([1, 2, 3], inplace=True, method="crash_cymbal")
assert return_value is None
tm.assert_series_equal(s, ser)
def test_replace_mixed_types(self):
ser = pd.Series(np.arange(5), dtype="int64")
def check_replace(to_rep, val, expected):
sc = ser.copy()
result = ser.replace(to_rep, val)
return_value = sc.replace(to_rep, val, inplace=True)
assert return_value is None
tm.assert_series_equal(expected, result)
tm.assert_series_equal(expected, sc)
# 3.0 can still be held in our int64 series, so we do not upcast GH#44940
tr, v = [3], [3.0]
check_replace(tr, v, ser)
# Note this matches what we get with the scalars 3 and 3.0
check_replace(tr[0], v[0], ser)
# MUST upcast to float
e = pd.Series([0, 1, 2, 3.5, 4])
tr, v = [3], [3.5]
check_replace(tr, v, e)
# casts to object
e = pd.Series([0, 1, 2, 3.5, "a"])
tr, v = [3, 4], [3.5, "a"]
check_replace(tr, v, e)
# again casts to object
e = pd.Series([0, 1, 2, 3.5, pd.Timestamp("20130101")])
tr, v = [3, 4], [3.5, pd.Timestamp("20130101")]
check_replace(tr, v, e)
# casts to object
e = pd.Series([0, 1, 2, 3.5, True], dtype="object")
tr, v = [3, 4], [3.5, True]
check_replace(tr, v, e)
# test an object with dates + floats + integers + strings
dr = pd.Series(pd.date_range("1/1/2001", "1/10/2001", freq="D"))
result = dr.astype(object).replace([dr[0], dr[1], dr[2]], [1.0, 2, "a"])
expected = pd.Series([1.0, 2, "a"] + dr[3:].tolist(), dtype=object)
tm.assert_series_equal(result, expected)
def test_replace_bool_with_string_no_op(self):
s = pd.Series([True, False, True])
result = s.replace("fun", "in-the-sun")
tm.assert_series_equal(s, result)
def test_replace_bool_with_string(self):
# nonexistent elements
s = pd.Series([True, False, True])
result = s.replace(True, "2u")
expected = pd.Series(["2u", False, "2u"])
tm.assert_series_equal(expected, result)
def test_replace_bool_with_bool(self):
s = pd.Series([True, False, True])
result = s.replace(True, False)
expected = pd.Series([False] * len(s))
tm.assert_series_equal(expected, result)
def test_replace_with_dict_with_bool_keys(self):
s = pd.Series([True, False, True])
result = s.replace({"asdf": "asdb", True: "yes"})
expected = pd.Series(["yes", False, "yes"])
tm.assert_series_equal(result, expected)
def test_replace_Int_with_na(self, any_int_ea_dtype):
# GH 38267
result = pd.Series([0, None], dtype=any_int_ea_dtype).replace(0, pd.NA)
expected = pd.Series([pd.NA, pd.NA], dtype=any_int_ea_dtype)
tm.assert_series_equal(result, expected)
result = pd.Series([0, 1], dtype=any_int_ea_dtype).replace(0, pd.NA)
result.replace(1, pd.NA, inplace=True)
tm.assert_series_equal(result, expected)
def test_replace2(self):
N = 100
ser = pd.Series(np.fabs(np.random.randn(N)), tm.makeDateIndex(N), dtype=object)
ser[:5] = np.nan
ser[6:10] = "foo"
ser[20:30] = "bar"
# replace list with a single value
rs = ser.replace([np.nan, "foo", "bar"], -1)
assert (rs[:5] == -1).all()
assert (rs[6:10] == -1).all()
assert (rs[20:30] == -1).all()
assert (pd.isna(ser[:5])).all()
# replace with different values
rs = ser.replace({np.nan: -1, "foo": -2, "bar": -3})
assert (rs[:5] == -1).all()
assert (rs[6:10] == -2).all()
assert (rs[20:30] == -3).all()
assert (pd.isna(ser[:5])).all()
# replace with different values with 2 lists
rs2 = ser.replace([np.nan, "foo", "bar"], [-1, -2, -3])
tm.assert_series_equal(rs, rs2)
# replace inplace
return_value = ser.replace([np.nan, "foo", "bar"], -1, inplace=True)
assert return_value is None
assert (ser[:5] == -1).all()
assert (ser[6:10] == -1).all()
assert (ser[20:30] == -1).all()
def test_replace_with_dictlike_and_string_dtype(self, nullable_string_dtype):
# GH 32621, GH#44940
ser = pd.Series(["one", "two", np.nan], dtype=nullable_string_dtype)
expected = pd.Series(["1", "2", np.nan], dtype=nullable_string_dtype)
result = ser.replace({"one": "1", "two": "2"})
tm.assert_series_equal(expected, result)
def test_replace_with_empty_dictlike(self):
# GH 15289
s = pd.Series(list("abcd"))
tm.assert_series_equal(s, s.replace({}))
with tm.assert_produces_warning(FutureWarning):
empty_series = pd.Series([])
tm.assert_series_equal(s, s.replace(empty_series))
def test_replace_string_with_number(self):
# GH 15743
s = pd.Series([1, 2, 3])
result = s.replace("2", np.nan)
expected = pd.Series([1, 2, 3])
tm.assert_series_equal(expected, result)
def test_replace_replacer_equals_replacement(self):
# GH 20656
# make sure all replacers are matching against original values
s = pd.Series(["a", "b"])
expected = pd.Series(["b", "a"])
result = s.replace({"a": "b", "b": "a"})
tm.assert_series_equal(expected, result)
def test_replace_unicode_with_number(self):
# GH 15743
s = pd.Series([1, 2, 3])
result = s.replace("2", np.nan)
expected = pd.Series([1, 2, 3])
tm.assert_series_equal(expected, result)
def test_replace_mixed_types_with_string(self):
# Testing mixed
s = pd.Series([1, 2, 3, "4", 4, 5])
result = s.replace([2, "4"], np.nan)
expected = pd.Series([1, np.nan, 3, np.nan, 4, 5])
tm.assert_series_equal(expected, result)
@pytest.mark.parametrize(
"categorical, numeric",
[
(pd.Categorical(["A"], categories=["A", "B"]), [1]),
(pd.Categorical(["A", "B"], categories=["A", "B"]), [1, 2]),
],
)
def test_replace_categorical(self, categorical, numeric):
# GH 24971, GH#23305
ser = pd.Series(categorical)
result = ser.replace({"A": 1, "B": 2})
expected = pd.Series(numeric).astype("category")
if 2 not in expected.cat.categories:
# i.e. categories should be [1, 2] even if there are no "B"s present
# GH#44940
expected = expected.cat.add_categories(2)
tm.assert_series_equal(expected, result)
def test_replace_categorical_single(self):
# GH 26988
dti = pd.date_range("2016-01-01", periods=3, tz="US/Pacific")
s = pd.Series(dti)
c = s.astype("category")
expected = c.copy()
expected = expected.cat.add_categories("foo")
expected[2] = "foo"
expected = expected.cat.remove_unused_categories()
assert c[2] != "foo"
result = c.replace(c[2], "foo")
tm.assert_series_equal(expected, result)
assert c[2] != "foo" # ensure non-inplace call does not alter original
return_value = c.replace(c[2], "foo", inplace=True)
assert return_value is None
tm.assert_series_equal(expected, c)
first_value = c[0]
return_value = c.replace(c[1], c[0], inplace=True)
assert return_value is None
assert c[0] == c[1] == first_value # test replacing with existing value
def test_replace_with_no_overflowerror(self):
# GH 25616
# casts to object without Exception from OverflowError
s = pd.Series([0, 1, 2, 3, 4])
result = s.replace([3], ["100000000000000000000"])
expected = pd.Series([0, 1, 2, "100000000000000000000", 4])
tm.assert_series_equal(result, expected)
s = pd.Series([0, "100000000000000000000", "100000000000000000001"])
result = s.replace(["100000000000000000000"], [1])
expected = pd.Series([0, 1, "100000000000000000001"])
tm.assert_series_equal(result, expected)
@pytest.mark.parametrize(
"ser, to_replace, exp",
[
([1, 2, 3], {1: 2, 2: 3, 3: 4}, [2, 3, 4]),
(["1", "2", "3"], {"1": "2", "2": "3", "3": "4"}, ["2", "3", "4"]),
],
)
def test_replace_commutative(self, ser, to_replace, exp):
# GH 16051
# DataFrame.replace() overwrites when values are non-numeric
series = pd.Series(ser)
expected = pd.Series(exp)
result = series.replace(to_replace)
tm.assert_series_equal(result, expected)
@pytest.mark.parametrize(
"ser, exp", [([1, 2, 3], [1, True, 3]), (["x", 2, 3], ["x", True, 3])]
)
def test_replace_no_cast(self, ser, exp):
# GH 9113
# BUG: replace int64 dtype with bool coerces to int64
series = pd.Series(ser)
result = series.replace(2, True)
expected = pd.Series(exp)
tm.assert_series_equal(result, expected)
def test_replace_invalid_to_replace(self):
# GH 18634
# API: replace() should raise an exception if invalid argument is given
series = pd.Series(["a", "b", "c "])
msg = (
r"Expecting 'to_replace' to be either a scalar, array-like, "
r"dict or None, got invalid type.*"
)
with pytest.raises(TypeError, match=msg):
series.replace(lambda x: x.strip())
@pytest.mark.parametrize("frame", [False, True])
def test_replace_nonbool_regex(self, frame):
obj = pd.Series(["a", "b", "c "])
if frame:
obj = obj.to_frame()
msg = "'to_replace' must be 'None' if 'regex' is not a bool"
with pytest.raises(ValueError, match=msg):
obj.replace(to_replace=["a"], regex="foo")
@pytest.mark.parametrize("frame", [False, True])
def test_replace_empty_copy(self, frame):
obj = pd.Series([], dtype=np.float64)
if frame:
obj = obj.to_frame()
res = obj.replace(4, 5, inplace=True)
assert res is None
res = obj.replace(4, 5, inplace=False)
tm.assert_equal(res, obj)
assert res is not obj
def test_replace_only_one_dictlike_arg(self, fixed_now_ts):
# GH#33340
ser = pd.Series([1, 2, "A", fixed_now_ts, True])
to_replace = {0: 1, 2: "A"}
value = "foo"
msg = "Series.replace cannot use dict-like to_replace and non-None value"
with pytest.raises(ValueError, match=msg):
ser.replace(to_replace, value)
to_replace = 1
value = {0: "foo", 2: "bar"}
msg = "Series.replace cannot use dict-value and non-None to_replace"
with pytest.raises(ValueError, match=msg):
ser.replace(to_replace, value)
def test_replace_extension_other(self, frame_or_series):
# https://github.com/pandas-dev/pandas/issues/34530
obj = frame_or_series(pd.array([1, 2, 3], dtype="Int64"))
result = obj.replace("", "") # no exception
# should not have changed dtype
tm.assert_equal(obj, result)
def _check_replace_with_method(self, ser: pd.Series):
df = ser.to_frame()
res = ser.replace(ser[1], method="pad")
expected = pd.Series([ser[0], ser[0]] + list(ser[2:]), dtype=ser.dtype)
tm.assert_series_equal(res, expected)
res_df = df.replace(ser[1], method="pad")
tm.assert_frame_equal(res_df, expected.to_frame())
ser2 = ser.copy()
res2 = ser2.replace(ser[1], method="pad", inplace=True)
assert res2 is None
tm.assert_series_equal(ser2, expected)
res_df2 = df.replace(ser[1], method="pad", inplace=True)
assert res_df2 is None
tm.assert_frame_equal(df, expected.to_frame())
def test_replace_ea_dtype_with_method(self, any_numeric_ea_dtype):
arr = pd.array([1, 2, pd.NA, 4], dtype=any_numeric_ea_dtype)
ser = pd.Series(arr)
self._check_replace_with_method(ser)
@pytest.mark.parametrize("as_categorical", [True, False])
def test_replace_interval_with_method(self, as_categorical):
# in particular interval that can't hold NA
idx = pd.IntervalIndex.from_breaks(range(4))
ser = pd.Series(idx)
if as_categorical:
ser = ser.astype("category")
self._check_replace_with_method(ser)
@pytest.mark.parametrize("as_period", [True, False])
@pytest.mark.parametrize("as_categorical", [True, False])
def test_replace_datetimelike_with_method(self, as_period, as_categorical):
idx = pd.date_range("2016-01-01", periods=5, tz="US/Pacific")
if as_period:
idx = idx.tz_localize(None).to_period("D")
ser = pd.Series(idx)
ser.iloc[-2] = pd.NaT
if as_categorical:
ser = ser.astype("category")
self._check_replace_with_method(ser)
def test_replace_with_compiled_regex(self):
# https://github.com/pandas-dev/pandas/issues/35680
s = pd.Series(["a", "b", "c"])
regex = re.compile("^a$")
result = s.replace({regex: "z"}, regex=True)
expected = pd.Series(["z", "b", "c"])
tm.assert_series_equal(result, expected)
def test_pandas_replace_na(self):
# GH#43344
ser = pd.Series(["AA", "BB", "CC", "DD", "EE", "", pd.NA], dtype="string")
regex_mapping = {
"AA": "CC",
"BB": "CC",
"EE": "CC",
"CC": "CC-REPL",
}
result = ser.replace(regex_mapping, regex=True)
exp = pd.Series(["CC", "CC", "CC-REPL", "DD", "CC", "", pd.NA], dtype="string")
tm.assert_series_equal(result, exp)
@pytest.mark.parametrize(
"dtype, input_data, to_replace, expected_data",
[
("bool", [True, False], {True: False}, [False, False]),
("int64", [1, 2], {1: 10, 2: 20}, [10, 20]),
("Int64", [1, 2], {1: 10, 2: 20}, [10, 20]),
("float64", [1.1, 2.2], {1.1: 10.1, 2.2: 20.5}, [10.1, 20.5]),
("Float64", [1.1, 2.2], {1.1: 10.1, 2.2: 20.5}, [10.1, 20.5]),
("string", ["one", "two"], {"one": "1", "two": "2"}, ["1", "2"]),
(
pd.IntervalDtype("int64"),
IntervalArray([pd.Interval(1, 2), pd.Interval(2, 3)]),
{pd.Interval(1, 2): pd.Interval(10, 20)},
IntervalArray([pd.Interval(10, 20), pd.Interval(2, 3)]),
),
(
| pd.IntervalDtype("float64") | pandas.IntervalDtype |
from opendatatools.common import RestAgent, md5
from progressbar import ProgressBar
import json
import pandas as pd
import io
import hashlib
import time
index_map = {
'Barclay_Hedge_Fund_Index' : 'ghsndx',
'Convertible_Arbitrage_Index' : 'ghsca',
'Distressed_Securities_Index' : 'ghsds',
'Emerging_Markets_Index' : 'ghsem',
'Equity_Long_Bias_Index' : 'ghselb',
'Equity_Long_Short_Index' : 'ghsels',
'Equity_Market_Neutral_Index' : 'ghsemn',
'European_Equities_Index' : 'ghsee',
'Event_Driven_Index' : 'ghsed',
'Fixed_Income_Arbitrage_Index' : 'ghsfia',
'Fund_of_Funds_Index' : 'ghsfof',
'Global_Macro_Index' : 'ghsmc',
'Healthcare_&_Biotechnology_Index': 'ghsbio',
'Merger_Arbitrage_Index' : 'ghsma',
'Multi_Strategy_Index' : 'ghsms',
'Pacific_Rim_Equities_Index' : 'ghspre',
'Technology_Index' : 'ghstec',
}
class SimuAgent(RestAgent):
def __init__(self):
RestAgent.__init__(self)
self.user_info = None
self.df_fundlist = None
self.cookies = None
def login(self, username, password):
url = 'https://passport.simuwang.com/index.php?m=Passport&c=auth&a=login&type=login&name=%s&pass=%s&reme=1&rn=1' % (username, password)
self.add_headers({'Referer': 'https://dc.simuwang.com/'})
response = self.do_request(url)
if response is None:
return None, '็ปๅฝๅคฑ่ดฅ'
jsonobj = json.loads(response)
suc = jsonobj['suc']
msg = jsonobj['msg']
if suc != 1:
return None, msg
self.cookies = self.get_cookies()
self.user_info = jsonobj['data']
return self.user_info, msg
def prepare_cookies(self, url):
response = self.do_request(url, None)
if response is not None:
cookies = self.get_cookies()
return cookies
else:
return None
def _get_rz_token(self, time):
mk = time * 158995555893
mtoken = md5(md5(str(mk))) + '.' + str(time)
return mtoken
def _get_fund_list_page(self, page_no):
url = 'https://dc.simuwang.com/ranking/get?page=%s&condition=fund_type:1,6,4,3,8,2;ret:9;rating_year:1;istiered:0;company_type:1;sort_name:profit_col2;sort_asc:desc;keyword:' % page_no
response = self.do_request(url)
if response is None:
return None, '่ทๅๆฐๆฎๅคฑ่ดฅ', None
jsonobj = json.loads(response)
code = jsonobj['code']
msg = jsonobj['msg']
if code != 1000:
return None, msg, None
df = | pd.DataFrame(jsonobj['data']) | pandas.DataFrame |
import pandas as pd
import pyspark
from flytekitplugins.spark.task import Spark
import flytekit
from flytekit import kwtypes, task, workflow
from flytekit.types.schema import FlyteSchema
try:
from typing import Annotated
except ImportError:
from typing_extensions import Annotated
def test_wf1_with_spark():
@task(task_config=Spark())
def my_spark(a: int) -> (int, str):
session = flytekit.current_context().spark_session
assert session.sparkContext.appName == "FlyteSpark: ex:local:local:local"
return a + 2, "world"
@task
def t2(a: str, b: str) -> str:
return b + a
@workflow
def my_wf(a: int, b: str) -> (int, str):
x, y = my_spark(a=a)
d = t2(a=y, b=b)
return x, d
x = my_wf(a=5, b="hello ")
assert x == (7, "hello world")
def test_spark_dataframe_input():
my_schema = FlyteSchema[kwtypes(name=str, age=int)]
@task
def my_dataset() -> my_schema:
return pd.DataFrame(data={"name": ["Alice"], "age": [5]})
@task(task_config=Spark())
def my_spark(df: pyspark.sql.DataFrame) -> my_schema:
session = flytekit.current_context().spark_session
new_df = session.createDataFrame([("Bob", 10)], my_schema.column_names())
return df.union(new_df)
@workflow
def my_wf() -> my_schema:
df = my_dataset()
return my_spark(df=df)
x = my_wf()
assert x
reader = x.open()
df2 = reader.all()
assert df2 is not None
def test_fs_sd_compatibility():
my_schema = FlyteSchema[kwtypes(name=str, age=int)]
@task
def my_dataset() -> pd.DataFrame:
return pd.DataFrame(data={"name": ["Alice"], "age": [5]})
@task(task_config=Spark())
def my_spark(df: pyspark.sql.DataFrame) -> my_schema:
session = flytekit.current_context().spark_session
new_df = session.createDataFrame([("Bob", 10)], my_schema.column_names())
return df.union(new_df)
@task(task_config=Spark())
def read_spark_df(df: pyspark.sql.DataFrame) -> int:
return df.count()
@workflow
def my_wf() -> int:
df = my_dataset()
fs = my_spark(df=df)
return read_spark_df(df=fs)
res = my_wf()
assert res == 2
def test_spark_dataframe_return():
my_schema = FlyteSchema[kwtypes(name=str, age=int)]
@task(task_config=Spark())
def my_spark(a: int) -> my_schema:
session = flytekit.current_context().spark_session
df = session.createDataFrame([("Alice", a)], my_schema.column_names())
return df
@workflow
def my_wf(a: int) -> my_schema:
return my_spark(a=a)
x = my_wf(a=5)
reader = x.open(pd.DataFrame)
df2 = reader.all()
result_df = df2.reset_index(drop=True) == | pd.DataFrame(data={"name": ["Alice"], "age": [5]}) | pandas.DataFrame |
import os
import pandas as pd
from tqdm import tqdm
import json
import numpy as np
from sklearn.model_selection import train_test_split
def bb_iou(boxA, boxB):
# determine the (x, y)-coordinates of the intersection rectangle
xA = max(boxA[0], boxB[0])
yA = max(boxA[1], boxB[1])
xB = min(boxA[2], boxB[2])
yB = min(boxA[3], boxB[3])
# compute the area of intersection rectangle
interArea = max(0, xB - xA + 1) * max(0, yB - yA + 1)
# compute the area of both the prediction and ground-truth
# rectangles
boxAArea = (boxA[2] - boxA[0] + 1) * (boxA[3] - boxA[1] + 1)
boxBArea = (boxB[2] - boxB[0] + 1) * (boxB[3] - boxB[1] + 1)
# compute the intersection over union by taking the intersection
# area and dividing it by the sum of prediction + ground-truth
# areas - the interesection area
iou = interArea / float(boxAArea + boxBArea - interArea)
# return the intersection over union value
return iou
def averageCoordinates(df, threshold):
tmp_df = df.reset_index()
duplicate = {}
for index1, row1 in tmp_df.iterrows():
if index1 < len(tmp_df) - 1:
next_index = index1 + 1
for index2, row2 in tmp_df.loc[next_index:, :].iterrows():
if row1["class_id"] == row2["class_id"]:
boxA = [row1["x_min"], row1["y_min"], row1["x_max"], row1["y_max"]]
boxB = [row2["x_min"], row2["y_min"], row2["x_max"], row2["y_max"]]
iou = bb_iou(boxA, boxB)
if iou > threshold:
if row1["index"] not in duplicate:
duplicate[row1["index"]] = []
duplicate[row1["index"]].append(row2["index"])
remove_keys = []
for k in duplicate:
for i in duplicate[k]:
if i in duplicate:
for id in duplicate[i]:
if id not in duplicate[k]:
duplicate[k].append(id)
if i not in remove_keys:
remove_keys.append(i)
for i in remove_keys:
del duplicate[i]
rows = []
removed_index = []
for k in duplicate:
row = tmp_df[tmp_df["index"] == k].iloc[0]
X_min = [row["x_min"]]
X_max = [row["x_max"]]
Y_min = [row["y_min"]]
Y_max = [row["y_max"]]
removed_index.append(k)
for i in duplicate[k]:
removed_index.append(i)
row = tmp_df[tmp_df["index"] == i].iloc[0]
X_min.append(row["x_min"])
X_max.append(row["x_max"])
Y_min.append(row["y_min"])
Y_max.append(row["y_max"])
X_min_avg = sum(X_min) / len(X_min)
X_max_avg = sum(X_max) / len(X_max)
Y_min_avg = sum(Y_min) / len(Y_min)
Y_max_avg = sum(Y_max) / len(Y_max)
new_row = [
row["image_id"],
row["class_name"],
row["class_id"],
X_min_avg,
Y_min_avg,
X_max_avg,
Y_max_avg,
row["width"],
row["height"],
]
rows.append(new_row)
for index, row in tmp_df.iterrows():
if row["index"] not in removed_index:
new_row = [
row["image_id"],
row["class_name"],
row["class_id"],
row["x_min"],
row["y_min"],
row["x_max"],
row["y_max"],
row["width"],
row["height"],
]
rows.append(new_row)
new_df = pd.DataFrame(
rows,
columns=[
"image_id",
"class_name",
"class_id",
"x_min",
"y_min",
"x_max",
"y_max",
"width",
"height",
],
)
return new_df
def createImagesTxt(_images, filepath, data_dir):
images_dir = data_dir + "/png_1024l/train/"
rows = []
for img_id in _images:
rows.append(images_dir + img_id + ".png")
f = open(filepath, "w")
f.write("\n".join(rows))
f.close()
if __name__ == "__main__":
server = input("51, 53:")
if server == "51":
DATA_DIR = "/data2/minki/kaggle/vinbigdata-cxr"
elif server == "53":
DATA_DIR = "/data/minki/kaggle/vinbigdata-cxr"
df_train = | pd.read_csv(DATA_DIR + "/train.csv") | pandas.read_csv |
from datetime import timedelta
import operator
import numpy as np
import pytest
import pytz
from pandas._libs.tslibs import IncompatibleFrequency
from pandas.core.dtypes.common import is_datetime64_dtype, is_datetime64tz_dtype
import pandas as pd
from pandas import (
Categorical,
Index,
IntervalIndex,
Series,
Timedelta,
bdate_range,
date_range,
isna,
)
import pandas._testing as tm
from pandas.core import nanops, ops
def _permute(obj):
return obj.take(np.random.permutation(len(obj)))
class TestSeriesFlexArithmetic:
@pytest.mark.parametrize(
"ts",
[
(lambda x: x, lambda x: x * 2, False),
(lambda x: x, lambda x: x[::2], False),
(lambda x: x, lambda x: 5, True),
(lambda x: tm.makeFloatSeries(), lambda x: tm.makeFloatSeries(), True),
],
)
@pytest.mark.parametrize(
"opname", ["add", "sub", "mul", "floordiv", "truediv", "pow"]
)
def test_flex_method_equivalence(self, opname, ts):
# check that Series.{opname} behaves like Series.__{opname}__,
tser = tm.makeTimeSeries().rename("ts")
series = ts[0](tser)
other = ts[1](tser)
check_reverse = ts[2]
op = getattr(Series, opname)
alt = getattr(operator, opname)
result = op(series, other)
expected = alt(series, other)
tm.assert_almost_equal(result, expected)
if check_reverse:
rop = getattr(Series, "r" + opname)
result = rop(series, other)
expected = alt(other, series)
tm.assert_almost_equal(result, expected)
def test_flex_method_subclass_metadata_preservation(self, all_arithmetic_operators):
# GH 13208
class MySeries(Series):
_metadata = ["x"]
@property
def _constructor(self):
return MySeries
opname = all_arithmetic_operators
op = getattr(Series, opname)
m = MySeries([1, 2, 3], name="test")
m.x = 42
result = op(m, 1)
assert result.x == 42
def test_flex_add_scalar_fill_value(self):
# GH12723
s = Series([0, 1, np.nan, 3, 4, 5])
exp = s.fillna(0).add(2)
res = s.add(2, fill_value=0)
tm.assert_series_equal(res, exp)
pairings = [(Series.div, operator.truediv, 1), (Series.rdiv, ops.rtruediv, 1)]
for op in ["add", "sub", "mul", "pow", "truediv", "floordiv"]:
fv = 0
lop = getattr(Series, op)
lequiv = getattr(operator, op)
rop = getattr(Series, "r" + op)
# bind op at definition time...
requiv = lambda x, y, op=op: getattr(operator, op)(y, x)
pairings.append((lop, lequiv, fv))
pairings.append((rop, requiv, fv))
@pytest.mark.parametrize("op, equiv_op, fv", pairings)
def test_operators_combine(self, op, equiv_op, fv):
def _check_fill(meth, op, a, b, fill_value=0):
exp_index = a.index.union(b.index)
a = a.reindex(exp_index)
b = b.reindex(exp_index)
amask = isna(a)
bmask = isna(b)
exp_values = []
for i in range(len(exp_index)):
with np.errstate(all="ignore"):
if amask[i]:
if bmask[i]:
exp_values.append(np.nan)
continue
exp_values.append(op(fill_value, b[i]))
elif bmask[i]:
if amask[i]:
exp_values.append(np.nan)
continue
exp_values.append(op(a[i], fill_value))
else:
exp_values.append(op(a[i], b[i]))
result = meth(a, b, fill_value=fill_value)
expected = Series(exp_values, exp_index)
tm.assert_series_equal(result, expected)
a = Series([np.nan, 1.0, 2.0, 3.0, np.nan], index=np.arange(5))
b = Series([np.nan, 1, np.nan, 3, np.nan, 4.0], index=np.arange(6))
result = op(a, b)
exp = equiv_op(a, b)
tm.assert_series_equal(result, exp)
_check_fill(op, equiv_op, a, b, fill_value=fv)
# should accept axis=0 or axis='rows'
op(a, b, axis=0)
class TestSeriesArithmetic:
# Some of these may end up in tests/arithmetic, but are not yet sorted
def test_add_series_with_period_index(self):
rng = pd.period_range("1/1/2000", "1/1/2010", freq="A")
ts = Series(np.random.randn(len(rng)), index=rng)
result = ts + ts[::2]
expected = ts + ts
expected.iloc[1::2] = np.nan
tm.assert_series_equal(result, expected)
result = ts + _permute(ts[::2])
tm.assert_series_equal(result, expected)
msg = "Input has different freq=D from PeriodIndex\\(freq=A-DEC\\)"
with pytest.raises(IncompatibleFrequency, match=msg):
ts + ts.asfreq("D", how="end")
@pytest.mark.parametrize(
"target_add,input_value,expected_value",
[
("!", ["hello", "world"], ["hello!", "world!"]),
("m", ["hello", "world"], ["hellom", "worldm"]),
],
)
def test_string_addition(self, target_add, input_value, expected_value):
# GH28658 - ensure adding 'm' does not raise an error
a = Series(input_value)
result = a + target_add
expected = Series(expected_value)
tm.assert_series_equal(result, expected)
def test_divmod(self):
# GH#25557
a = Series([1, 1, 1, np.nan], index=["a", "b", "c", "d"])
b = Series([2, np.nan, 1, np.nan], index=["a", "b", "d", "e"])
result = a.divmod(b)
expected = divmod(a, b)
tm.assert_series_equal(result[0], expected[0])
tm.assert_series_equal(result[1], expected[1])
result = a.rdivmod(b)
expected = divmod(b, a)
tm.assert_series_equal(result[0], expected[0])
tm.assert_series_equal(result[1], expected[1])
@pytest.mark.parametrize("index", [None, range(9)])
def test_series_integer_mod(self, index):
# GH#24396
s1 = Series(range(1, 10))
s2 = Series("foo", index=index)
msg = "not all arguments converted during string formatting"
with pytest.raises(TypeError, match=msg):
s2 % s1
def test_add_with_duplicate_index(self):
# GH14227
s1 = Series([1, 2], index=[1, 1])
s2 = Series([10, 10], index=[1, 2])
result = s1 + s2
expected = Series([11, 12, np.nan], index=[1, 1, 2])
tm.assert_series_equal(result, expected)
def test_add_na_handling(self):
from datetime import date
from decimal import Decimal
s = Series(
[Decimal("1.3"), Decimal("2.3")], index=[date(2012, 1, 1), date(2012, 1, 2)]
)
result = s + s.shift(1)
result2 = s.shift(1) + s
assert isna(result[0])
assert isna(result2[0])
def test_add_corner_cases(self, datetime_series):
empty = Series([], index=Index([]), dtype=np.float64)
result = datetime_series + empty
assert np.isnan(result).all()
result = empty + empty.copy()
assert len(result) == 0
# FIXME: dont leave commented-out
# TODO: this returned NotImplemented earlier, what to do?
# deltas = Series([timedelta(1)] * 5, index=np.arange(5))
# sub_deltas = deltas[::2]
# deltas5 = deltas * 5
# deltas = deltas + sub_deltas
# float + int
int_ts = datetime_series.astype(int)[:-5]
added = datetime_series + int_ts
expected = Series(
datetime_series.values[:-5] + int_ts.values,
index=datetime_series.index[:-5],
name="ts",
)
tm.assert_series_equal(added[:-5], expected)
def test_mul_empty_int_corner_case(self):
s1 = Series([], [], dtype=np.int32)
s2 = Series({"x": 0.0})
tm.assert_series_equal(s1 * s2, Series([np.nan], index=["x"]))
def test_sub_datetimelike_align(self):
# GH#7500
# datetimelike ops need to align
dt = Series(date_range("2012-1-1", periods=3, freq="D"))
dt.iloc[2] = np.nan
dt2 = dt[::-1]
expected = Series([timedelta(0), timedelta(0), pd.NaT])
# name is reset
result = dt2 - dt
tm.assert_series_equal(result, expected)
expected = Series(expected, name=0)
result = (dt2.to_frame() - dt.to_frame())[0]
tm.assert_series_equal(result, expected)
def test_alignment_doesnt_change_tz(self):
# GH#33671
dti = pd.date_range("2016-01-01", periods=10, tz="CET")
dti_utc = dti.tz_convert("UTC")
ser = Series(10, index=dti)
ser_utc = Series(10, index=dti_utc)
# we don't care about the result, just that original indexes are unchanged
ser * ser_utc
assert ser.index is dti
assert ser_utc.index is dti_utc
def test_arithmetic_with_duplicate_index(self):
# GH#8363
# integer ops with a non-unique index
index = [2, 2, 3, 3, 4]
ser = Series(np.arange(1, 6, dtype="int64"), index=index)
other = Series(np.arange(5, dtype="int64"), index=index)
result = ser - other
expected = Series(1, index=[2, 2, 3, 3, 4])
tm.assert_series_equal(result, expected)
# GH#8363
# datetime ops with a non-unique index
ser = Series(date_range("20130101 09:00:00", periods=5), index=index)
other = Series(date_range("20130101", periods=5), index=index)
result = ser - other
expected = Series(Timedelta("9 hours"), index=[2, 2, 3, 3, 4])
tm.assert_series_equal(result, expected)
# ------------------------------------------------------------------
# Comparisons
class TestSeriesFlexComparison:
@pytest.mark.parametrize("axis", [0, None, "index"])
def test_comparison_flex_basic(self, axis, all_compare_operators):
op = all_compare_operators.strip("__")
left = Series(np.random.randn(10))
right = Series(np.random.randn(10))
result = getattr(left, op)(right, axis=axis)
expected = getattr(operator, op)(left, right)
tm.assert_series_equal(result, expected)
def test_comparison_bad_axis(self, all_compare_operators):
op = all_compare_operators.strip("__")
left = Series(np.random.randn(10))
right = Series(np.random.randn(10))
msg = "No axis named 1 for object type"
with pytest.raises(ValueError, match=msg):
getattr(left, op)(right, axis=1)
@pytest.mark.parametrize(
"values, op",
[
([False, False, True, False], "eq"),
([True, True, False, True], "ne"),
([False, False, True, False], "le"),
([False, False, False, False], "lt"),
([False, True, True, False], "ge"),
([False, True, False, False], "gt"),
],
)
def test_comparison_flex_alignment(self, values, op):
left = Series([1, 3, 2], index=list("abc"))
right = Series([2, 2, 2], index=list("bcd"))
result = getattr(left, op)(right)
expected = Series(values, index=list("abcd"))
tm.assert_series_equal(result, expected)
@pytest.mark.parametrize(
"values, op, fill_value",
[
([False, False, True, True], "eq", 2),
([True, True, False, False], "ne", 2),
([False, False, True, True], "le", 0),
([False, False, False, True], "lt", 0),
([True, True, True, False], "ge", 0),
([True, True, False, False], "gt", 0),
],
)
def test_comparison_flex_alignment_fill(self, values, op, fill_value):
left = Series([1, 3, 2], index=list("abc"))
right = Series([2, 2, 2], index=list("bcd"))
result = getattr(left, op)(right, fill_value=fill_value)
expected = Series(values, index=list("abcd"))
tm.assert_series_equal(result, expected)
class TestSeriesComparison:
def test_comparison_different_length(self):
a = Series(["a", "b", "c"])
b = Series(["b", "a"])
msg = "only compare identically-labeled Series"
with pytest.raises(ValueError, match=msg):
a < b
a = Series([1, 2])
b = Series([2, 3, 4])
with pytest.raises(ValueError, match=msg):
a == b
@pytest.mark.parametrize("opname", ["eq", "ne", "gt", "lt", "ge", "le"])
def test_ser_flex_cmp_return_dtypes(self, opname):
# GH#15115
ser = Series([1, 3, 2], index=range(3))
const = 2
result = getattr(ser, opname)(const).dtypes
expected = np.dtype("bool")
assert result == expected
@pytest.mark.parametrize("opname", ["eq", "ne", "gt", "lt", "ge", "le"])
def test_ser_flex_cmp_return_dtypes_empty(self, opname):
# GH#15115 empty Series case
ser = Series([1, 3, 2], index=range(3))
empty = ser.iloc[:0]
const = 2
result = getattr(empty, opname)(const).dtypes
expected = np.dtype("bool")
assert result == expected
@pytest.mark.parametrize(
"op",
[operator.eq, operator.ne, operator.le, operator.lt, operator.ge, operator.gt],
)
@pytest.mark.parametrize(
"names", [(None, None, None), ("foo", "bar", None), ("baz", "baz", "baz")]
)
def test_ser_cmp_result_names(self, names, op):
# datetime64 dtype
dti = pd.date_range("1949-06-07 03:00:00", freq="H", periods=5, name=names[0])
ser = Series(dti).rename(names[1])
result = op(ser, dti)
assert result.name == names[2]
# datetime64tz dtype
dti = dti.tz_localize("US/Central")
dti = pd.DatetimeIndex(dti, freq="infer") # freq not preserved by tz_localize
ser = Series(dti).rename(names[1])
result = op(ser, dti)
assert result.name == names[2]
# timedelta64 dtype
tdi = dti - dti.shift(1)
ser = Series(tdi).rename(names[1])
result = op(ser, tdi)
assert result.name == names[2]
# interval dtype
if op in [operator.eq, operator.ne]:
# interval dtype comparisons not yet implemented
ii = pd.interval_range(start=0, periods=5, name=names[0])
ser = Series(ii).rename(names[1])
result = op(ser, ii)
assert result.name == names[2]
# categorical
if op in [operator.eq, operator.ne]:
# categorical dtype comparisons raise for inequalities
cidx = tdi.astype("category")
ser = Series(cidx).rename(names[1])
result = op(ser, cidx)
assert result.name == names[2]
def test_comparisons(self):
left = np.random.randn(10)
right = np.random.randn(10)
left[:3] = np.nan
result = nanops.nangt(left, right)
with np.errstate(invalid="ignore"):
expected = (left > right).astype("O")
expected[:3] = np.nan
tm.assert_almost_equal(result, expected)
s = Series(["a", "b", "c"])
s2 = Series([False, True, False])
# it works!
exp = Series([False, False, False])
tm.assert_series_equal(s == s2, exp)
tm.assert_series_equal(s2 == s, exp)
# -----------------------------------------------------------------
# Categorical Dtype Comparisons
def test_categorical_comparisons(self):
# GH#8938
# allow equality comparisons
a = Series(list("abc"), dtype="category")
b = Series(list("abc"), dtype="object")
c = Series(["a", "b", "cc"], dtype="object")
d = Series(list("acb"), dtype="object")
e = Categorical(list("abc"))
f = Categorical(list("acb"))
# vs scalar
assert not (a == "a").all()
assert ((a != "a") == ~(a == "a")).all()
assert not ("a" == a).all()
assert (a == "a")[0]
assert ("a" == a)[0]
assert not ("a" != a)[0]
# vs list-like
assert (a == a).all()
assert not (a != a).all()
assert (a == list(a)).all()
assert (a == b).all()
assert (b == a).all()
assert ((~(a == b)) == (a != b)).all()
assert ((~(b == a)) == (b != a)).all()
assert not (a == c).all()
assert not (c == a).all()
assert not (a == d).all()
assert not (d == a).all()
# vs a cat-like
assert (a == e).all()
assert (e == a).all()
assert not (a == f).all()
assert not (f == a).all()
assert (~(a == e) == (a != e)).all()
assert (~(e == a) == (e != a)).all()
assert (~(a == f) == (a != f)).all()
assert (~(f == a) == (f != a)).all()
# non-equality is not comparable
msg = "can only compare equality or not"
with pytest.raises(TypeError, match=msg):
a < b
with pytest.raises(TypeError, match=msg):
b < a
with pytest.raises(TypeError, match=msg):
a > b
with pytest.raises(TypeError, match=msg):
b > a
def test_unequal_categorical_comparison_raises_type_error(self):
# unequal comparison should raise for unordered cats
cat = Series(Categorical(list("abc")))
msg = "can only compare equality or not"
with pytest.raises(TypeError, match=msg):
cat > "b"
cat = Series(Categorical(list("abc"), ordered=False))
with pytest.raises(TypeError, match=msg):
cat > "b"
# https://github.com/pandas-dev/pandas/issues/9836#issuecomment-92123057
# and following comparisons with scalars not in categories should raise
# for unequal comps, but not for equal/not equal
cat = Series(Categorical(list("abc"), ordered=True))
msg = "Invalid comparison between dtype=category and str"
with pytest.raises(TypeError, match=msg):
cat < "d"
with pytest.raises(TypeError, match=msg):
cat > "d"
with pytest.raises(TypeError, match=msg):
"d" < cat
with pytest.raises(TypeError, match=msg):
"d" > cat
tm.assert_series_equal(cat == "d", Series([False, False, False]))
tm.assert_series_equal(cat != "d", Series([True, True, True]))
# -----------------------------------------------------------------
def test_comparison_tuples(self):
# GH#11339
# comparisons vs tuple
s = Series([(1, 1), (1, 2)])
result = s == (1, 2)
expected = Series([False, True])
tm.assert_series_equal(result, expected)
result = s != (1, 2)
expected = Series([True, False])
tm.assert_series_equal(result, expected)
result = s == (0, 0)
expected = Series([False, False])
| tm.assert_series_equal(result, expected) | pandas._testing.assert_series_equal |
# Modified version for Erie County, New York
# Contact: <EMAIL>
from functools import reduce
from typing import Generator, Tuple, Dict, Any, Optional
import os
import pandas as pd
import streamlit as st
import numpy as np
import matplotlib
from bs4 import BeautifulSoup
import requests
import ipyvuetify as v
from traitlets import Unicode, List
from datetime import date, datetime, timedelta
import time
import altair as alt
from collections import namedtuple
from scipy.integrate import odeint
matplotlib.use("Agg")
import matplotlib.pyplot as plt
# Create S3 object to get the ENV variable from Heroku
#secret = os.environ['SECRET_KEY']
# Prompt the user for the secret
#password = st.text_input("Secret Handshake:", value="", type="password")
# If the secrete provided matches the ENV, proceeed with the app
#if password == secret:
# hide_menu_style = """
# <style>
#MainMenu {visibility: hidden;}
# </style>
# """
#st.markdown(hide_menu_style, unsafe_allow_html=True)
###########################
# Models and base functions
###########################
def sir(
s: float, i: float, r: float, beta: float, gamma: float, n: float
) -> Tuple[float, float, float]:
"""The SIR model, one time step."""
s_n = (-beta * s * i) + s
i_n = (beta * s * i - gamma * i) + i
r_n = gamma * i + r
if s_n < 0.0:
s_n = 0.0
if i_n < 0.0:
i_n = 0.0
if r_n < 0.0:
r_n = 0.0
scale = n / (s_n + i_n + r_n)
return s_n * scale, i_n * scale, r_n * scale
def gen_sir(
s: float, i: float, r: float, beta: float, gamma: float, n_days: int
) -> Generator[Tuple[float, float, float], None, None]:
"""Simulate SIR model forward in time yielding tuples."""
s, i, r = (float(v) for v in (s, i, r))
n = s + i + r
for _ in range(n_days + 1):
yield s, i, r
s, i, r = sir(s, i, r, beta, gamma, n)
def sim_sir(
s: float, i: float, r: float, beta: float, gamma: float, n_days: int
) -> Tuple[np.ndarray, np.ndarray, np.ndarray]:
"""Simulate the SIR model forward in time."""
s, i, r = (float(v) for v in (s, i, r))
n = s + i + r
s_v, i_v, r_v = [s], [i], [r]
for day in range(n_days):
s, i, r = sir(s, i, r, beta, gamma, n)
s_v.append(s)
i_v.append(i)
r_v.append(r)
return (
np.array(s_v),
np.array(i_v),
np.array(r_v),
)
def sim_sir_df(
p) -> pd.DataFrame:
"""Simulate the SIR model forward in time.
p is a Parameters instance. for circuluar dependency reasons i can't annotate it.
"""
return pd.DataFrame(
data=gen_sir(S, total_infections, recovered, beta, gamma, n_days),
columns=("Susceptible", "Infected", "Recovered"),
)
def get_dispositions(
patient_state: np.ndarray, rates: Tuple[float, ...], regional_hosp_share: float = 1.0
) -> Tuple[np.ndarray, ...]:
"""Get dispositions of infected adjusted by rate and market_share."""
return (*(patient_state * rate * regional_hosp_share for rate in rates),)
def build_admissions_df(
dispositions) -> pd.DataFrame:
"""Build admissions dataframe from Parameters."""
days = np.array(range(0, n_days + 1))
data_dict = dict(
zip(
["day", "hosp", "icu", "vent"],
[days] + [disposition for disposition in dispositions],
)
)
projection = pd.DataFrame.from_dict(data_dict)
counter = 0
for i in hosp_list:
projection[groups[0]+"_"+i] = projection.hosp*bed_share.iloc[3,counter]
projection[groups[1]+"_"+i] = projection.icu*bed_share.iloc[3,counter]
projection[groups[2]+"_"+i] = projection.vent*bed_share.iloc[3,counter]
counter +=1
if counter == 4: break
# New cases
projection_admits = projection.iloc[:-1, :] - projection.shift(1)
projection_admits["day"] = range(projection_admits.shape[0])
return projection_admits
def build_admissions_df_n(
dispositions) -> pd.DataFrame:
"""Build admissions dataframe from Parameters."""
days = np.array(range(0, n_days))
data_dict = dict(
zip(
["day", "hosp", "icu", "vent"],
[days] + [disposition for disposition in dispositions],
)
)
projection = pd.DataFrame.from_dict(data_dict)
counter = 0
for i in hosp_list:
projection[groups[0]+"_"+i] = projection.hosp*bed_share.iloc[3,counter]
projection[groups[1]+"_"+i] = projection.icu*bed_share.iloc[3,counter]
projection[groups[2]+"_"+i] = projection.vent*bed_share.iloc[3,counter]
counter +=1
if counter == 4: break
# New cases
projection_admits = projection.iloc[:-1, :] - projection.shift(1)
projection_admits["day"] = range(projection_admits.shape[0])
return projection_admits
def build_prev_df_n(
dispositions) -> pd.DataFrame:
"""Build admissions dataframe from Parameters."""
days = np.array(range(0, n_days))
data_dict = dict(
zip(
["day", "hosp", "icu", "vent"],
[days] + [disposition for disposition in dispositions],
)
)
projection = pd.DataFrame.from_dict(data_dict)
counter = 0
for i in hosp_list:
projection[groups[0]+"_"+i] = projection.hosp*bed_share.iloc[3,counter]
projection[groups[1]+"_"+i] = projection.icu*bed_share.iloc[3,counter]
projection[groups[2]+"_"+i] = projection.vent*bed_share.iloc[3,counter]
counter +=1
if counter == 4: break
# New cases
projection_admits = projection.iloc[:-1, :] - projection.shift(1)
projection_admits["day"] = range(projection_admits.shape[0])
return projection_admits
def build_census_df(
projection_admits: pd.DataFrame) -> pd.DataFrame:
"""ALOS for each category of COVID-19 case (total guesses)"""
n_days = np.shape(projection_admits)[0]
los_dict = {
"hosp": hosp_los, "icu": icu_los, "vent": vent_los}
census_dict = dict()
for k, los in los_dict.items():
census = (
projection_admits.cumsum().iloc[:-los, :]
- projection_admits.cumsum().shift(los).fillna(0)
).apply(np.ceil)
census_dict[k] = census[k]
census_df = pd.DataFrame(census_dict)
census_df["day"] = census_df.index
census_df = census_df[["day", "hosp", "icu", "vent"]]
census_df = census_df.head(n_days-10)
return census_df
def seir(
s: float, e: float, i: float, r: float, beta: float, gamma: float, alpha: float, n: float
) -> Tuple[float, float, float, float]:
"""The SIR model, one time step."""
s_n = (-beta * s * i) + s
e_n = (beta * s * i) - alpha * e + e
i_n = (alpha * e - gamma * i) + i
r_n = gamma * i + r
if s_n < 0.0:
s_n = 0.0
if e_n < 0.0:
e_n = 0.0
if i_n < 0.0:
i_n = 0.0
if r_n < 0.0:
r_n = 0.0
scale = n / (s_n + e_n+ i_n + r_n)
return s_n * scale, e_n * scale, i_n * scale, r_n * scale
def sim_seir(
s: float, e:float, i: float, r: float, beta: float, gamma: float, alpha: float, n_days: int
) -> Tuple[np.ndarray, np.ndarray, np.ndarray, np.ndarray]:
"""Simulate the SIR model forward in time."""
s, e, i, r = (float(v) for v in (s, e, i, r))
n = s + e + i + r
s_v, e_v, i_v, r_v = [s], [e], [i], [r]
for day in range(n_days):
s, e, i, r = seir(s, e, i, r, beta, gamma, alpha, n)
s_v.append(s)
e_v.append(e)
i_v.append(i)
r_v.append(r)
return (
np.array(s_v),
np.array(e_v),
np.array(i_v),
np.array(r_v),
)
def gen_seir(
s: float, e: float, i: float, r: float, beta: float, gamma: float, alpha: float, n_days: int
) -> Generator[Tuple[float, float, float, float], None, None]:
"""Simulate SIR model forward in time yielding tuples."""
s, e, i, r = (float(v) for v in (s, e, i, r))
n = s + e + i + r
for _ in range(n_days + 1):
yield s, e, i, r
s, e, i, r = seir(s, e, i, r, beta, gamma, alpha, n)
# phase-adjusted https://www.nature.com/articles/s41421-020-0148-0
def sim_seir_decay(
s: float, e:float, i: float, r: float, beta: float, gamma: float, alpha: float, n_days: int,
decay1:float, decay2:float, decay3: float, decay4: float, step1_delta: int
) -> Tuple[np.ndarray, np.ndarray, np.ndarray, np.ndarray]:
"""Simulate the SIR model forward in time."""
s, e, i, r = (float(v) for v in (s, e, i, r))
n = s + e + i + r
s_v, e_v, i_v, r_v = [s], [e], [i], [r]
for day in range(n_days):
if start_day<=day<=int1_delta:
beta_decay=beta*(1-decay1)
elif int1_delta<=day<=int2_delta:
beta_decay=beta*(1-decay2)
elif int2_delta<=day<=step1_delta:
beta_decay=beta*(1-decay3)
else:
beta_decay=beta*(1-decay4)
s, e, i, r = seir(s, e, i, r, beta_decay, gamma, alpha, n)
s_v.append(s)
e_v.append(e)
i_v.append(i)
r_v.append(r)
return (
np.array(s_v),
np.array(e_v),
np.array(i_v),
np.array(r_v),
)
def seird(
s: float, e: float, i: float, r: float, d: float, beta: float, gamma: float, alpha: float, n: float, fatal: float
) -> Tuple[float, float, float, float]:
"""The SIR model, one time step."""
s_n = (-beta * s * i) + s
e_n = (beta * s * i) - alpha * e + e
i_n = (alpha * e - gamma * i) + i
r_n = (1-fatal)*gamma * i + r
d_n = (fatal)*gamma * i +d
if s_n < 0.0:
s_n = 0.0
if e_n < 0.0:
e_n = 0.0
if i_n < 0.0:
i_n = 0.0
if r_n < 0.0:
r_n = 0.0
if d_n < 0.0:
d_n = 0.0
scale = n / (s_n + e_n+ i_n + r_n + d_n)
return s_n * scale, e_n * scale, i_n * scale, r_n * scale, d_n * scale
def sim_seird_decay(
s: float, e:float, i: float, r: float, d: float, beta: float, gamma: float, alpha: float, n_days: int,
decay1:float, decay2:float, decay3: float, decay4: float, step1_delta: int, fatal: float
) -> Tuple[np.ndarray, np.ndarray, np.ndarray, np.ndarray]:
"""Simulate the SIR model forward in time."""
s, e, i, r, d= (float(v) for v in (s, e, i, r, d))
n = s + e + i + r + d
s_v, e_v, i_v, r_v, d_v = [s], [e], [i], [r], [d]
for day in range(n_days):
if start_day<=day<=int1_delta:
beta_decay=beta*(1-decay1)
elif int1_delta<=day<=int2_delta:
beta_decay=beta*(1-decay2)
elif int2_delta<=day<=step1_delta:
beta_decay=beta*(1-decay3)
else:
beta_decay=beta*(1-decay4)
s, e, i, r,d = seird(s, e, i, r, d, beta_decay, gamma, alpha, n, fatal)
s_v.append(s)
e_v.append(e)
i_v.append(i)
r_v.append(r)
d_v.append(d)
return (
np.array(s_v),
np.array(e_v),
np.array(i_v),
np.array(r_v),
np.array(d_v)
)
# Model with high social distancing
def sim_seird_decay_social(
s: float, e:float, i: float, r: float, d: float, beta: float, gamma: float, alpha: float, n_days: int,
decay1:float, decay2:float, decay3: float, decay4: float, step1_delta: int, fatal: float
) -> Tuple[np.ndarray, np.ndarray, np.ndarray, np.ndarray]:
"""Simulate the SIR model forward in time."""
s, e, i, r, d= (float(v) for v in (s, e, i, r, d))
n = s + e + i + r + d
s_v, e_v, i_v, r_v, d_v = [s], [e], [i], [r], [d]
for day in range(n_days):
if start_day<=day<=int1_delta:
beta = (alpha+(2 ** (1 / 2) - 1))*((2 ** (1 / 2) - 1) + (1/infectious_period)) / (alpha*S)
beta_decay=beta*(1-.02)
elif int1_delta<=day<=int2_delta:
beta = (alpha+(2 ** (1 / 2) - 1))*((2 ** (1 / 2) - 1)+ (1/infectious_period)) / (alpha*S)
beta_decay=beta*(1-.52)
elif int2_delta<=day<=step1_delta:
beta = (alpha+(2 ** (1 / 2) - 1))*((2 ** (1 / 2) - 1)+ (1/infectious_period)) / (alpha*S)
beta_decay=beta*(1-.83)
else:
beta = (alpha+(2 ** (1 / 2) - 1))*((2 ** (1 / 2) - 1)+ (1/infectious_period)) / (alpha*S)
beta_decay=beta*(1-.73)
s, e, i, r,d = seird(s, e, i, r, d, beta_decay, gamma, alpha, n, fatal)
s_v.append(s)
e_v.append(e)
i_v.append(i)
r_v.append(r)
d_v.append(d)
return (
np.array(s_v),
np.array(e_v),
np.array(i_v),
np.array(r_v),
np.array(d_v)
)
# Model with dynamic doubling time
def sim_seird_decay_erie(
s: float, e:float, i: float, r: float, d: float, beta: float, gamma: float, alpha: float, n_days: int,
decay1:float, decay2:float, decay3: float, decay4: float, step1_delta: int, fatal: float
) -> Tuple[np.ndarray, np.ndarray, np.ndarray, np.ndarray]:
"""Simulate the SIR model forward in time."""
s, e, i, r, d= (float(v) for v in (s, e, i, r, d))
n = s + e + i + r + d
s_v, e_v, i_v, r_v, d_v = [s], [e], [i], [r], [d]
for day in range(n_days):
if start_day<=day<=int1_delta:
beta = (alpha+(2 ** (1 / 1.61) - 1))*((2 ** (1 / 1.61) - 1) + (1/infectious_period)) / (alpha*S)
beta_decay=beta*(1-.3)
elif int1_delta<=day<=int2_delta:
beta = (alpha+(2 ** (1 / 2.65) - 1))*((2 ** (1 / 2.65) - 1)+ (1/infectious_period)) / (alpha*S)
beta_decay=beta*(1-.3)
elif int2_delta<=day<=step1_delta:
beta = (alpha+(2 ** (1 / 5.32) - 1))*((2 ** (1 / 5.32) - 1)+ (1/infectious_period)) / (alpha*S)
beta_decay=beta*(1-.5)
else:
beta = (alpha+(2 ** (1 / 9.70) - 1))*((2 ** (1 / 9.70) - 1)+ (1/infectious_period)) / (alpha*S)
beta_decay=beta*(1-.30)
s, e, i, r,d = seird(s, e, i, r, d, beta_decay, gamma, alpha, n, fatal)
s_v.append(s)
e_v.append(e)
i_v.append(i)
r_v.append(r)
d_v.append(d)
return (
np.array(s_v),
np.array(e_v),
np.array(i_v),
np.array(r_v),
np.array(d_v)
)
def seijcrd(
s: float, e: float, i: float, j:float, c:float, r: float, d: float, beta: float, gamma: float, alpha: float, n: float, fatal_hosp: float, hosp_rate:float, icu_rate:float, icu_days:float,crit_lag:float, death_days:float
) -> Tuple[float, float, float, float]:
"""The SIR model, one time step."""
s_n = (-beta * s * (i+j+c)) + s
e_n = (beta * s * (i+j+c)) - alpha * e + e
i_n = (alpha * e - gamma * i) + i
j_n = hosp_rate * i * gamma + (1-icu_rate)* c *icu_days + j
c_n = icu_rate * j * (1/crit_lag) - c * (1/death_days)
r_n = (1-hosp_rate)*gamma * i + (1-icu_rate) * (1/crit_lag)* j + r
d_n = (fatal_hosp)* c * (1/crit_lag)+d
if s_n < 0.0:
s_n = 0.0
if e_n < 0.0:
e_n = 0.0
if i_n < 0.0:
i_n = 0.0
if j_n < 0.0:
j_n = 0.0
if c_n < 0.0:
c_n = 0.0
if r_n < 0.0:
r_n = 0.0
if d_n < 0.0:
d_n = 0.0
scale = n / (s_n + e_n+ i_n + j_n+ c_n+ r_n + d_n)
return s_n * scale, e_n * scale, i_n * scale, j_n* scale, c_n*scale, r_n * scale, d_n * scale
def sim_seijcrd_decay(
s: float, e:float, i: float, j:float, c: float, r: float, d: float, beta: float, gamma: float, alpha: float, n_days: int,
decay1:float, decay2:float, decay3: float, decay4: float, step1_delta: int, fatal_hosp: float, hosp_rate: float, icu_rate: float, icu_days:float, crit_lag: float, death_days:float
) -> Tuple[np.ndarray, np.ndarray, np.ndarray, np.ndarray]:
"""Simulate the SIR model forward in time."""
s, e, i, j, c, r, d= (float(v) for v in (s, e, i, c, j, r, d))
n = s + e + i + j+r + d
s_v, e_v, i_v, j_v, c_v, r_v, d_v = [s], [e], [i], [j], [c], [r], [d]
for day in range(n_days):
if 0<=day<=21:
beta = (alpha+(2 ** (1 / 1.61) - 1))*((2 ** (1 / 1.61) - 1) + (1/infectious_period)) / (alpha*S)
beta_decay=beta*(1-decay1)
elif 22<=day<=28:
beta = (alpha+(2 ** (1 / 2.65) - 1))*((2 ** (1 / 2.65) - 1)+ (1/infectious_period)) / (alpha*S)
beta_decay=beta*(1-decay2)
elif 29<=day<=step1_delta:
beta = (alpha+(2 ** (1 / 5.32) - 1))*((2 ** (1 / 5.32) - 1)+ (1/infectious_period)) / (alpha*S)
beta_decay=beta*(1-decay3)
else:
beta = (alpha+(2 ** (1 / 9.70) - 1))*((2 ** (1 / 9.70) - 1)+ (1/infectious_period)) / (alpha*S)
beta_decay=beta*(1-decay4)
s, e, i,j, c, r,d = seijcrd(s, e, i,j, c, r, d, beta_decay, gamma, alpha, n, fatal_hosp, hosp_rate, icu_rate, icu_days, crit_lag, death_days)
s_v.append(s)
e_v.append(e)
i_v.append(i)
j_v.append(j)
c_v.append(c)
r_v.append(r)
d_v.append(d)
return (
np.array(s_v),
np.array(e_v),
np.array(i_v),
np.array(j_v),
np.array(c_v),
np.array(r_v),
np.array(d_v)
)
def betanew(t,beta):
if start_day<= t <= int1_delta:
beta_decay=beta*(1-decay1)
elif int1_delta<=t<int2_delta:
beta_decay=beta*(1-decay2)
elif int2_delta<=t<int3_delta:
beta_decay=beta*(1-decay3)
elif int3_delta<=t<=step1_delta:
beta_decay=beta*(1-decay4)
elif step1_delta<=t<=step2_delta:
beta_decay=beta*(1-decay5)
else:
beta_decay=beta*(1-decay6)
return beta_decay
#The SIR model differential equations with ODE solver.
def derivdecay(y, t, N, beta, gamma1, gamma2, alpha, p, hosp, q, l, n_days, decay1, decay2, decay3, decay4, decay5, decay6, decay7, decay8, decay9, decay10, decay11,
start_day, int1_delta, int2_delta, step1_delta, step2_delta, step3_delta, step4_delta, step5_delta, step6_delta, step7_delta, fatal_hosp ):
S, E, A, I,J, R,D,counter = y
dSdt = - betanew(t, beta) * S * (q*I + l*J + A)/N
dEdt = betanew(t, beta) * S * (q*I + l*J + A)/N - alpha * E
dAdt = alpha * E*(1-p)-gamma1*A
dIdt = p* alpha* E - gamma1 * I- hosp*I
dJdt = hosp * I -gamma2*J
dRdt = (1-fatal_hosp)*gamma2 * J + gamma1*(A+I)
dDdt = fatal_hosp * gamma2 * J
counter = (1-fatal_hosp)*gamma2 * J
return dSdt, dEdt,dAdt, dIdt, dJdt, dRdt, dDdt, counter
def sim_seaijrd_decay_ode(
s, e,a,i, j,r, d, beta, gamma1, gamma2, alpha, n_days, decay1, decay2,decay3, decay4, decay5, decay6, decay7, decay8, decay9, decay10, decay11,
start_day, int1_delta, int2_delta, step1_delta, step2_delta, step3_delta, step4_delta, step5_delta, step6_delta, step7_delta,
fatal_hosp, p, hosp, q,
l):
n = s + e + a + i + j+ r + d
rh=0
y0= s,e,a,i,j,r,d, rh
t=np.arange(0, n_days, step=1)
ret = odeint(derivdecay, y0, t, args=(n, beta, gamma1, gamma2, alpha, p, hosp,q,l, n_days, decay1, decay2, decay3, decay4, decay5, decay6, decay7, decay8, decay9, decay10, decay11,
start_day, int1_delta, int2_delta, step1_delta, step2_delta, step3_delta, step4_delta, step5_delta, step6_delta, step7_delta, fatal_hosp))
S_n, E_n,A_n, I_n,J_n, R_n, D_n ,RH_n= ret.T
return (S_n, E_n,A_n, I_n,J_n, R_n, D_n, RH_n)
####The SIR model differential equations with ODE solver. Presymptomatic and masks
def betanew2(t,beta,x,p_m1, pm_2, p_m3, p_m4, p_m5, p_m6, p_m7, p_m8):
if start_day<= t <= int1_delta:
beta_decay=beta*(1-decay1)
elif int1_delta<=t<int2_delta:
beta_decay=beta*(1-decay2)
elif int2_delta<=t<int3_delta:
beta_decay=beta*(1-decay3)
elif int3_delta<=t<=step1_delta:
beta_decay=beta*(1-decay4)*(1-(x*p_m1))**2
elif step1_delta<=t<=step2_delta:
beta_decay=beta*(1-decay5)*(1-(x*p_m2))**2
elif step2_delta<=t<=step3_delta:
beta_decay=beta*(1-decay6)*(1-(x*p_m3))**2
elif step3_delta<=t<=step4_delta:
beta_decay=beta*(1-decay7)*(1-(x*p_m4))**2
elif step4_delta<=t<=step5_delta:
beta_decay=beta*(1-decay8)*(1-(x*p_m5))**2
elif step5_delta<=t<=step6_delta:
beta_decay=beta*(1-decay9)*(1-(x*p_m6))**2
elif step6_delta<=t<=step7_delta:
beta_decay=beta*(1-decay10)*(1-(x*p_m7))**2
else:
beta_decay=beta*(1-decay11)*(1-(x*p_m8))**2
return beta_decay
def derivdecayP(y, t, beta, gamma1, gamma2, alpha, sym, hosp,q,l,n_days, decay1,decay2, decay3, decay4, decay5, decay6, decay7, decay8, decay9, decay10, decay11,
start_day, int1_delta, int2_delta, step1_delta, step2_delta, step3_delta, step4_delta, step5_delta, step6_delta, step7_delta,
fatal_hosp, x, p_m1, p_m2, p_m3, p_m4, p_m5, p_m6, p_m7, p_m8, delta_p ):
S, E, P,A, I,J, R,D,counter = y
N=S+E+P+A+I+J+R+D
dSdt = - betanew2(t, beta, x, p_m1, p_m2, p_m3, p_m4, p_m5, p_m6, p_m7, p_m8) * S * (q*I + l*J +P+ A)/N
dEdt = betanew2(t, beta, x, p_m1, p_m2, p_m3, p_m4, p_m5, p_m6, p_m7, p_m8) * S * (q*I + l*J +P+ A)/N - alpha * E
dPdt = alpha * E - delta_p * P
dAdt = delta_p* P *(1-sym)-gamma1*A
dIdt = sym* delta_p* P - gamma1 * I- hosp*I
dJdt = hosp * I -gamma2*J
dRdt = (1-fatal_hosp)*gamma2 * J + gamma1*(A+I)
dDdt = fatal_hosp * gamma2 * J
counter = (1-fatal_hosp)*gamma2 * J
return dSdt, dEdt,dPdt,dAdt, dIdt, dJdt, dRdt, dDdt, counter
def sim_sepaijrd_decay_ode(
s, e,p,a,i, j,r, d, beta, gamma1, gamma2, alpha, n_days,decay1,decay2,decay3, decay4, decay5, decay6, decay7, decay8, decay9, decay10, decay11,
start_day, int1_delta, int2_delta, step1_delta, step2_delta, step3_delta, step4_delta, step5_delta, step6_delta, step7_delta,
fatal_hosp, sym, hosp, q,
l,x, p_m1, p_m2, p_m3, p_m4, p_m5, p_m6, p_m7, p_m8, delta_p):
n = s + e + p+a + i + j+ r + d
rh=0
y0= s,e,p,a,i,j,r,d, rh
t=np.arange(0, n_days, step=1)
ret = odeint(derivdecayP, y0, t, args=(beta, gamma1, gamma2, alpha, sym, hosp,q,l, n_days, decay1, decay2, decay3, decay4, decay5, decay6, decay7, decay8, decay9, decay10, decay11,
start_day, int1_delta, int2_delta, step1_delta, step2_delta, step3_delta, step4_delta, step5_delta, step6_delta, step7_delta,
fatal_hosp, x,
p_m1, p_m2, p_m3, p_m4, p_m5, p_m6, p_m7, p_m8, delta_p))
S_n, E_n,P_n,A_n, I_n,J_n, R_n, D_n ,RH_n= ret.T
return (S_n, E_n,P_n,A_n, I_n,J_n, R_n, D_n, RH_n)
# End Models #
# Add dates #
def add_date_column(
df: pd.DataFrame, drop_day_column: bool = False, date_format: Optional[str] = None,
) -> pd.DataFrame:
"""Copies input data frame and converts "day" column to "date" column
Assumes that day=0 is today and allocates dates for each integer day.
Day range can must not be continous.
Columns will be organized as original frame with difference that date
columns come first.
Arguments:
df: The data frame to convert.
drop_day_column: If true, the returned data frame will not have a day column.
date_format: If given, converts date_time objetcts to string format specified.
Raises:
KeyError: if "day" column not in df
ValueError: if "day" column is not of type int
"""
if not "day" in df:
raise KeyError("Input data frame for converting dates has no 'day column'.")
if not | pd.api.types.is_integer_dtype(df.day) | pandas.api.types.is_integer_dtype |
import os
import pandas as pd
from google.cloud import storage
#็ฐๅขๅคๆฐ
os.environ["GOOGLE_APPLICATION_CREDENTIALS"] = "../auth/credential.json"
def load_data_from_gcs(bucket_name="pj_horidasimono", prefix="dataset/train/ElectricalAppliance"):
client = storage.Client()
blobs = client.list_blobs(bucket_name, prefix=prefix)
df = pd.DataFrame()
for blob in blobs:
bucket = client.get_bucket(bucket_name)
r = storage.Blob(blob.name, bucket)
content = r.download_as_string()
df = df.append( | pd.read_json(content) | pandas.read_json |
import math
# from datetime import timedelta, datetime
from itertools import combinations
from datetime import datetime
import numpy as np
import pandas as pd
import scipy.stats as stats
from sklearn import linear_model
import matplotlib.pyplot as plt
# https://zhuanlan.zhihu.com/p/37605060
# https://realpython.com/numpy-scipy-pandas-correlation-python/
# PieceWiseLinearRegressionContext, options for PieceWiseLinearRegression
class PieceWiseLinearRegressionContext:
def __init__(
self,
recordNumber,
minSegmentCount=1,
maxSegmentCount=5,
maxFirstInflection=None,
minLastInflection=None,
ceofDiffEpsilon=0.00001,
ceofThreshold=0.99,
rollingWinSize=5,
fixedPointRate=0.3,
numPotentialInflection=15,
debugLevel=0):
self.N = recordNumber
self.minSegmentCount = minSegmentCount
self.maxSegmentCount = maxSegmentCount
self.epsilon = ceofDiffEpsilon
self.threshold = ceofThreshold
# default: to test all possibility of potential Inflections.
# [t[0], maxFirstInflection]: valid scope for first potential Inflections.
# [minLatInflection(2), t[-1]]: valid scope for last potential Inflections.
if maxFirstInflection is None:
self.maxFirstInflection = self.N - 2
else:
self.maxFirstInflection = maxFirstInflection
if minLastInflection is None:
self.minLastInflection = 1
else:
self.minLastInflection = minLastInflection
self.rollingWinSize = rollingWinSize
self.fixedPointRate = fixedPointRate
self.numPotentialInflection = numPotentialInflection
self.debug = debugLevel
class PieceWiseLinearResult:
def __init__(
self,
corcoef,
regcoefs,
inflectionPoints,
generalCorcoef,
generalPvalue,
psCorcoefs,
psPvalues,
yPred):
self.corcoef = corcoef
self.regcoefs = regcoefs
self.inflectionPoints = inflectionPoints
self.generalCorcoef = generalCorcoef
self.generalPvalue = generalPvalue
self.psCorcoefs = psCorcoefs
self.psPvalues = psPvalues
self.yPred = yPred
class PieceWiseLinearRegression:
def __init__(
self,
t, y, potentialInflectionPoints,
context):
"""
t, y with shape of (N, 1)
"""
self.y = y
self.t = t
self.potentialInflectionPoints = potentialInflectionPoints
self.ctx = context
def fit(self):
# N = len(self.t)
epsilon = ctx.epsilon
threshold = ctx.threshold
# M segments, c[1, ..., M-1]
minSegCnt = ctx.minSegmentCount
maxSegCnt = ctx.maxSegmentCount
isDebug = ctx.debug
M = maxSegCnt
cr = [0] * (M + 1) # for storing max correlation coefficient at M
rr = [[]] * (M + 1) # for storing max regression coefficient at M
yps = [[]] * (M + 1) # for storing predictions with max correlation coefficient at M
ips = [[]] * (M + 1) # for storing inflection points at M
ts = [0] * (M + 1) # for storing time comsming at M
sc = M # for storing section count
cr[0] = - math.inf
t1 = datetime.now()
for M in range(minSegCnt, maxSegCnt + 1):
cr[M], rr[M], ips[M], yps[M] = self.MaxCorrcoef(M)
sc = M
# level 1 debug for time and max corrcoef at M segments.
if isDebug > 0:
print("current M segments: {0}".format(M))
t2 = datetime.now()
ts[M] = (t2-t1).seconds
t2 = t1
print("{0} pieces, comsuming {1} secends:".format(M, ts[M]))
print("max corrcoef {0} & breaks at {1}:\n".format(cr[M], ips[M]))
# stop iterating by following condition:
# 1. when max corrcoef is close to 1 (over threshold)
if abs(cr[M]) > threshold:
if isDebug > 0:
print("{0} piecewise and split points:{1} max corrcoef {2}.\
\n".format(M, ips[M], cr[M]))
r_general, p_values_general, r_pw, p_values_pw = self.calculateCeofsByPiece(yps[M], ips[M])
# print("abs of cor_coefs: ", cor_coefs)
if min(r_pw) > threshold:
sc = M
break
# 2. when corrcoef varies small enough
if abs(cr[M] - cr[M - 1]) < epsilon:
if isDebug > 0:
print("{0} piecewise and split points:{1} with max corrcoef \
{2}:{3}. \n".format(M-1, ips[M-1], cr[M-1], cr[M]))
# return cr[M-1], c[M-1]
sc = M - 1
break
r_general, p_values_general, r_pw, p_values_pw = self.calculateCeofsByPiece(yps[sc], ips[sc])
pwRes = PieceWiseLinearResult(cr[sc],
rr[sc],
ips[sc],
r_general,
p_values_general,
r_pw,
p_values_pw,
yps[sc])
return pwRes
def MaxCorrcoef(self, M):
max_c = None
max_cor_coef = -1
max_reg_coefs = None
predictions = []
if M == 1:
max_cor_ceof, max_reg_coefs, predictions = self.calculateMultipleLinearRegression([])
return max_cor_ceof, max_reg_coefs , [], predictions
cs = combinations(self.potentialInflectionPoints, M - 1)
maxFirstInflection = self.ctx.maxFirstInflection
minLastInflection = self.ctx.minLastInflection
isDebug = self.ctx.debug
# index = 0
for c in cs:
if (c[0] == self.t[0]
or c[0] > maxFirstInflection
or c[-1] < minLastInflection
or c[-1] == self.t[ctx.N-1]):
continue
cor_coef,reg_coef, tmp_predictions = self.calculateMultipleLinearRegression(c)
if cor_coef > max_cor_coef:
max_cor_coef = cor_coef
max_reg_coefs = reg_coef
predictions = tmp_predictions
max_c = c
# debug level 2 for print internal max coef at specific M
if isDebug > 1:
print(c, cor_coef)
print(max_reg_coefs)
return max_cor_coef, max_reg_coefs, max_c, predictions
def calculateMultipleLinearRegression(self, c):
# M = len(c) + 1
N = len(self.y)
cor_ceof = None
reg_ceofs = None
cc = np.concatenate((self.t[:1], np.array(c), self.t[-1:]), axis=0)
if self.ctx.debug > 0:
print(cc)
X = self.setupVirtualX(self.t, cc)
Y = self.y.reshape((N, 1))
lm = linear_model.LinearRegression()
lm.fit(X, Y)
reg_ceofs = lm.coef_.flatten()
predictions = lm.predict(X).flatten()
cor_ceof = np.corrcoef(predictions, self.y)
return cor_ceof[0, 1], reg_ceofs, predictions
# T[0, 1, ..., N-1, N]
# C[0, 1, ..., M-1, M]
def setupVirtualX(self, T, C):
# N = len(t)
M = len(C)
TT = T.reshape((-1, 1))
# print(TT)
def vFunc(vArr):
t = vArr[0]
x = np.zeros(M)
x[0] = 1
for j in range(1, M): # test where is t located [ *, C[j-1], *, C[j], *]
if t > C[j]:
x[j] = C[j] - C[j-1]
elif t < C[j -1]:
break
else: # C[j-1] <= t <= C[j]
x[j] = t - C[j-1]
return x
X = np.apply_along_axis(vFunc, 1, TT)
# print(C, X)
return X[:,1:]
# calclate Corrceof piece by piece
def calculateCeofsByPiece(self, ps, c):
M = len(c) + 1
rs = [0] * M
p_values = [0] * M
cc = np.concatenate((np.array(c), self.t[-1:]), axis=0)
# print("cc:", cc)
# print("t: ", self.t)
# print("y: ", self.y)
# print("yp: ", ps)
p0 = self.t[:1]
# print("calculateCeofsByPiece:", cc, ps)
for i in range(M):
p1 = cc[i]
indexs = np.logical_and(self.t >= p0 , self.t <= p1)
rs[i], p_values[i] = stats.pearsonr(ps[indexs], self.y[indexs])
# print("piecewise {0}.".format(self.t[indexs]))
# print("y: {0}, yp:{1}.".format(self.y[indexs], ps[indexs]))
# print("corrcoef: {0}, p_value:{1}.\n".format(rs[i], p_values[i]))
p0 = p1
r_tatal, p_values_tatal = stats.pearsonr(ps, self.y)
return r_tatal, p_values_tatal, rs, p_values
# input:df -- pandas.Dataframe, include [T, Y]
# caution: t -- [1,2...N],
def doPieceWise(df, ctx):
if ctx.numPotentialInflection in [None, 0, ctx.N]:
potentialInflections = df['T'][1:-1].to_numpy()
else:
doMovingAverages(df, ctx)
nlargest_row = df.nlargest(ctx.numPotentialInflection, 'Y_GRD')
df['NLG_GRD'] = nlargest_row['Y_AVG']
nlargest_day = np.sort(nlargest_row['T'].to_numpy())
potentialInflections = nlargest_day
if ctx.debug:
print("-----------------------------------------")
print("potential inflection points", potentialInflections)
t = df['T'].to_numpy()
y = df['Y'].to_numpy()
pwlr = PieceWiseLinearRegression(t, y, potentialInflections, ctx)
pwRes = pwlr.fit()
df.loc[:, ['Y_PRED']] = pwRes.yPred
return pwRes
# helper functions
#
def doMovingAverages(df, ctx):
df['Y_AVG'] = df['Y'].rolling(
window=ctx.rollingWinSize, min_periods=1, center=True).mean()
(row, _) = df.shape
numFixPoint = int(row * ctx.fixedPointRate)
df['Y_DELTA'] = abs(df['Y_AVG'] - df['Y'])
reset_threshold = df['Y_DELTA'].nlargest(numFixPoint).min()
df.loc[df['Y_DELTA'] >= reset_threshold, 'Y_AVG'] = df['Y']
y_avg = df['Y_AVG'].to_numpy()
y_avg_grd = np.abs(np.gradient(np.gradient(y_avg)))
df['Y_GRD'] = y_avg_grd
# print(df)
# dispaly df with T, Y, Y_PRED fields
def doDisplay(df):
# the sie of figure, unit: inch
FIG_SIZE = (16, 12)
# Plot outputs
(cn_row, cn_col) = df.shape
x = df['T']
y = df['Y']
y_pred = df['Y_PRED']
plt.figure(figsize=FIG_SIZE)
plt.scatter(y, y_pred)
plt.figure(figsize=FIG_SIZE)
# plt.plot(x, ndvi, color='blue', linewidth=1)
plt.grid(b=True, which='both')
# grid(b=None, which='major', axis='both', **kwargs)[source]ยถ
y_line, = plt.plot(x, y, color='red', label="Y", linewidth=1)
y_pred_line, = plt.plot(
x, y_pred, color='blue', label="Y_PRED", linewidth=1)
plt.scatter(x, y, marker="D")
plt.scatter(x, y_pred, marker='o')
handles = [y_line, y_pred_line]
plt.legend(handles=handles, loc='upper right')
def exportResult(filepath, psRes, df):
msgs = []
msg = "max_cor_coef: {0}\n".format(psRes.corcoef)
msgs.append(msg)
msg = "reg_coef: {0}\n".format(psRes.regcoefs)
msgs.append(msg)
msg = "inflection points: {0}\n".format(psRes.inflectionPoints)
msgs.append(msg)
msg = "general correlation coefficient: {0}\n".format(psRes.generalCorcoef)
msgs.append(msg)
msg = "eneral correlation p_values : {0}\n".format(psRes.generalPvalue)
msgs.append(msg)
msg = "cor_coef_piecewise: {0}\n".format(psRes.psCorcoefs)
msgs.append(msg)
msg = "p_values_piecewise: {0}\n".format(psRes.psPvalues)
msgs.append(msg)
with open(filepath, "w") as fo:
fo.writelines(msgs)
filepath = filepath.replace(".", ".det.")
df.to_csv(filepath, sep="\t", index=False, float_format='%10.6f')
if __name__ == "__main__":
#
DATA_LOADFROM_FILE = True
ENABLE_DISPLAY = True
# ------------------------------------------------
# load data
if DATA_LOADFROM_FILE:
input_filepath = "/Users/hurricane/share/data.txt"
df = | pd.read_csv(input_filepath, sep='\t') | pandas.read_csv |
"""Preprocessing WSDM Dataset.
Author: DHSong
Last Modified At: 2020.07.07
Preprocessing WSDM Dataset.
"""
import os
from collections import Counter
from tqdm import tqdm
import pandas as pd
import matplotlib.font_manager as fm
import matplotlib.pyplot as plt
import seaborn as sns
class PreprocessingWorker:
"""Worker for Preprocessing.
Worker for Preprocessing.
Attributes:
train_raw: pandas Dataframe for Train Dataset(train.csv).
test_raw: pandas Dataframe for Train Dataset(test.csv).
sample_submission_raw: pandas Dataframe for Submission Dataset(sample_submission.csv).
songs_raw: pandas Dataframe for Song Dataset(songs.csv).
members_raw: pandas Dataframe for Member Dataset(members.csv).
song_extra_info_raw: pandas Dataframe for Additional Song Dataset(song_extra_info.csv).
"""
def __init__(self, data_dirname='./data', font_path='./static/fonts/D2Coding.ttc'):
"""Inits Dataframe for data in data directory."""
self._matplotlib_setting(font_path)
self.train_raw = pd.read_csv(os.path.join(data_dirname, 'train.csv'))
self.test_raw = pd.read_csv(os.path.join(data_dirname, 'test.csv'))
self.sample_submission_raw = pd.read_csv(os.path.join(data_dirname, 'sample_submission.csv'))
self.songs_raw = pd.read_csv(os.path.join(data_dirname, 'songs.csv'))
self.members_raw = pd.read_csv(os.path.join(data_dirname, 'members.csv'))
self.song_extra_info_raw = pd.read_csv(os.path.join(data_dirname, 'song_extra_info.csv'))
def _matplotlib_setting(self, font_path):
"""set matplotlib fonts and style."""
font_family = fm.FontProperties(fname=font_path).get_name()
plt.rcParams['font.family'] = font_family
plt.rcParams['font.size'] = 14
plt.style.use('seaborn-darkgrid')
def _barplot(self, df, column, horizontal=True):
plt.figure(figsize=(16, 9))
if horizontal:
sns.countplot(y=column, hue='target', data=df, order=df[column].value_counts().index)
else:
sns.countplot(x=column, hue='target', data=df, order=df[column].value_counts().index)
plt.title('{} Distribution by target'.format(column))
plt.legend(loc='upper right')
plt.savefig('./figures/preprocessing-barplot-{}'.format(column))
def preprocess_train_test(self):
"""Preprocess train.csv and test.csv.
preprocess train.csv and test.csv. Select values to be considered.
Arags:
Return:
train: Pandas Dataframe. Select values to be considered in train.csv.
test: Pandas Dataframe. Select values to be considered in test.csv.
"""
train = self.train_raw.fillna('<blank>')
test = self.test_raw.fillna('<blank>')
selected_values_by_columns = {
'source_system_tab': [
'<blank>', '<not selected>',
'my library', 'discover', 'search', 'radio'
],
'source_screen_name': [
'<blank>', '<not selected>',
'Local playlist more', 'Online playlist more', 'Radio',
'Album more', 'Search', 'Artist more', 'Discover Feature',
'Discover Chart', 'Others profile more'
],
'source_type': [
'<blank>', '<not selected>',
'local-library', 'online-playlist', 'local-playlist',
'radio', 'album', 'top-hits-for-artist'
]
}
for column, values in selected_values_by_columns.items():
train.loc[~train[column].isin(values), column] = '<not selected>'
test.loc[~test[column].isin(values), column] = '<not selected>'
for column in selected_values_by_columns.keys():
self._barplot(train, column)
return train, test
def preprocess_members(self):
"""Preprocess members.csv.
preprocess members.csv. Select values to be considered.
Arags:
Return:
members: Pandas Dataframe. Select values to be considered in members.csv.
"""
# fill all the NA with <blank>.
members = self.members_raw.fillna('<blank>')
# calculate membership days.
members['registration_init_time'] = pd.to_datetime(members.registration_init_time, format='%Y%m%d')
members['expiration_date'] = pd.to_datetime(members.expiration_date, format='%Y%m%d')
members['membership_days'] = (members.expiration_date - members.registration_init_time).dt.days
# binning membership days.
invalid_membership_days = members.membership_days < 0
members.loc[invalid_membership_days, 'membership_days'] = -1
members.loc[invalid_membership_days, 'membership_dyas_bin'] = '<invalid>'
members.loc[~invalid_membership_days, 'membership_dyas_bin'] = pd.qcut(members.loc[~invalid_membership_days, 'membership_days'], 3)
# binning bd(age).
invalid_bd = (members.bd < 0) | (members.bd >= 100)
members.loc[invalid_bd, 'bd'] = -1
members.loc[invalid_bd, 'bd_bin'] = '<invalid>'
members.loc[~invalid_bd, 'bd_bin'] = | pd.cut(members.loc[~invalid_bd, 'bd'], 5) | pandas.cut |
import json
from typing import Optional
import pandas as pd
from .api_methods import API
from .namespaces import symbols_in_namespace
def search(search_string: str, namespace: Optional[str] = None, response_format: str = 'frame') -> json:
# search for string in a single namespace
if namespace:
df = symbols_in_namespace(namespace.upper())
condition1 = df['name'].str.contains(search_string, case=False)
condition2 = df['ticker'].str.contains(search_string, case=False)
frame_response = df[condition1 | condition2]
if response_format.lower() == 'frame':
return frame_response
elif response_format.lower() == 'json':
return frame_response.to_json(orient='records')
else:
raise ValueError('response_format must be "json" or "frame"')
# search for string in all namespaces
string_response = API.search(search_string)
json_response = json.loads(string_response)
if response_format.lower() == 'frame':
df = | pd.DataFrame(json_response[1:], columns=json_response[0]) | pandas.DataFrame |
'''
Extracting Apple Watch Health Data
'''
import os
from datetime import datetime
from xml.dom import minidom
import numpy as np
import pandas as pd
class AppleWatchData(object):
'''
Object to contain all relevant data access calls for Apple Watch health data.
'''
# TODO: make parsing of xml file a helper function
def __init__(self, xml_data_file_path, source_name, tag_name='Record'):
"""
Class can be generalized to retrieve data from sources other than Apple Watch.
:param xml_data_file_path: local path to xml file exported by Health App on iPhone
:param source_name: source of health data (i.e. Apple Watch)
:param tag_name: xml tag to parse data from
"""
if xml_data_file_path.startswith('~'):
self.file_path = os.path.expanduser(xml_data_file_path)
else:
self.file_path =xml_data_file_path
self.source_name = source_name
self.tag_name = tag_name
self.xmldoc = minidom.parse(self.file_path)
self.records = self.xmldoc.getElementsByTagName(self.tag_name)
def parse_tag(self, attribute):
"""
Filter for records in Health Data matching attribute name.
:param attribute: attribute name of xml Record tag
:return: a list of all records matching class's source name and attribute name
"""
record_list = []
for s in self.records:
found1 = s.attributes['type'].value == attribute
if self.source_name in 'Apple Watch':
self.source_name = self.source_name.replace('Apple Watch', u'Apple\xa0Watch')
found2 = self.source_name in s.attributes['sourceName'].value
# parse the record
if found1 and found2:
record_list.append(s)
return record_list
def parse_record(self, record):
"""
For a given record pull and start timestamp, end timestamp, and health data value.
:param record: xml object with tag name of Record
:return: Record's start timestamp, end timestamp, and biometric data
"""
# Extract start and end timestamps
start_timestamp_string = record.attributes['startDate'].value
end_timestamp_string = record.attributes['endDate'].value
try:
start_time = datetime.strptime(start_timestamp_string, '%Y-%m-%d %H:%M:%S -0500')
end_time = datetime.strptime(end_timestamp_string, '%Y-%m-%d %H:%M:%S -0500')
except ValueError:
start_time = datetime.strptime(start_timestamp_string, '%Y-%m-%d %H:%M:%S -0400')
end_time = datetime.strptime(end_timestamp_string, '%Y-%m-%d %H:%M:%S -0400')
# Extract biometric data
try:
# convert to float for numerical values
biometric = float(record.attributes['value'].value)
except:
biometric = record.attributes['value'].value
return start_time, end_time, biometric
def parse_record_list(self, record_list):
"""
Generate array of timestamps and data values returned by multiple records.
:param record_list: list of xml objects with tag name Record
:return: array of timestamps and data values returned by parse_record()
"""
# vectorize extraction record values
apple_data = list(map(lambda record: self.parse_record(record), record_list))
apple_array = np.array(apple_data)
return apple_array
def load_heart_rate_data(self):
"""
:return: data frame of instantaneous beats per minute and respective time stamps
"""
# count data
attribute = 'HKQuantityTypeIdentifierHeartRate'
record_list = self.parse_tag(attribute)
hr_data_df = pd.DataFrame()
# parse records
apple_array = self.parse_record_list(record_list)
hr_data_df['start_timestamp'] = apple_array[:, 0]
hr_data_df['end_timestamp'] = apple_array[:, 1]
hr_data_df['heart_rate'] = pd.to_numeric(apple_array[:, 2], errors='ignore')
# sort by start time
hr_data_df.sort_values('start_timestamp', inplace=True)
return hr_data_df
def load_heart_rate_variability_data(self):
"""
:return: data frame of average standard deviation of NN (beat-to-beat) intervals and
instantaneous heart rate measures (BPM) used to derive this estimate
"""
# units of milliseconds
attribute = 'HKQuantityTypeIdentifierHeartRateVariabilitySDNN'
record_list = self.parse_tag(attribute)
hrv_data_df = pd.DataFrame()
# parse records
apple_array = self.parse_record_list(record_list)
# parse metadata list
instantaneous_bpm = []
for s in record_list:
meta_data = {'bpm': [], 'time': []}
nodes = s.childNodes[1].getElementsByTagName('InstantaneousBeatsPerMinute')
for node in nodes:
meta_data['bpm'].append(node.attributes['bpm'].value)
meta_data['time'].append(node.attributes['time'].value)
instantaneous_bpm.append(meta_data)
hrv_data_df['start_timestamp'] = apple_array[:, 0]
hrv_data_df['end_timestamp'] = apple_array[:, 1]
hrv_data_df['heart_rate_variability'] = pd.to_numeric(apple_array[:, 2], errors='ignore')
hrv_data_df['instantaneous_bpm'] = instantaneous_bpm
return hrv_data_df
def load_resting_heart_rate_data(self):
"""
:return: data frame of average resting heart rate (BPM) per diem
"""
# units of BPM
attribute = 'HKQuantityTypeIdentifierRestingHeartRate'
record_list = self.parse_tag(attribute)
resting_hr_data_df = pd.DataFrame()
# parse records
apple_array = self.parse_record_list(record_list)
resting_hr_data_df['start_timestamp'] = apple_array[:, 0]
resting_hr_data_df['end_timestamp'] = apple_array[:, 1]
resting_hr_data_df['resting_heart_rate'] = pd.to_numeric(apple_array[:, 2], errors='ignore')
# sort by start time
resting_hr_data_df.sort_values('start_timestamp', inplace=True)
return resting_hr_data_df
def load_walking_heart_rate_data(self):
"""
:return: data frame of average walking heart rate (BPM) per diem
"""
# units of BPM
attribute = 'HKQuantityTypeIdentifierWalkingHeartRateAverage'
record_list = self.parse_tag(attribute)
walking_hr_data_df = pd.DataFrame()
# parse records
apple_array = self.parse_record_list(record_list)
walking_hr_data_df['start_timestamp'] = apple_array[:, 0]
walking_hr_data_df['end_timestamp'] = apple_array[:, 1]
walking_hr_data_df['walking_heart_rate'] = | pd.to_numeric(apple_array[:, 2], errors='ignore') | pandas.to_numeric |
import tqdm
from offline.infra.netlink import NetLink
import pandas as pd
import numpy as np
from csv import QUOTE_ALL
from dataclasses import dataclass
from collections import namedtuple,defaultdict
from itertools import chain, combinations
from datetime import datetime
import functools
# Cross = namedtuple("Cross", "row_indicator col_indicator value is_quarterly")
Cross = namedtuple("Cross", "row_indicator col_indicator value")
# @dataclass
# class Cross:
# """
# crosses are parts of tables that include a central cell,
# and two cells from the same column/row
# that serve to identify the meaning of the central cell. For example:
# |
# |
# 2020 <--- year
# |
# |
# ---- revenue -- 1234.5 ----
# |
# |
# |
# in this example, the row indicator is "revenue", the col indicator is 2020,
# and the cell value is 1234.5
# """
# row_indicator: str
# col_indicator: str
# value: float
# is_quarterly : bool
class CrossBuilder:
def __init__(
self, row_keyword_regex, col_value_list, max_col_value, only_floats=True
):
self.row_keyword_regex = row_keyword_regex
self.col_value_list = [v for v in col_value_list if v <= max_col_value]
self.only_floats = only_floats
def _extract_row_indicator_places(self, df):
nrow = df.shape[0]
is_pattern_matched = [
df.iloc[x, :].str.contains(self.row_keyword_regex, case=False)
for x in range(nrow)
]
row_inds, col_inds = np.nonzero(is_pattern_matched)
return row_inds, col_inds
def _extract_col_indicator_places(self, df):
ncols = df.shape[1]
is_col_header_found = [
np.any(
[df.iloc[:, x].astype(str) == str(y) for y in self.col_value_list],
axis=0,
)
for x in range(ncols)
]
col_inds, row_inds = np.nonzero(
is_col_header_found
) # note that we iterated over columns
return row_inds, col_inds
def __call__(self, df):
df_str = df.applymap(lambda x: str(x or "")) # convert None to ""
row_indicator_places = self._extract_row_indicator_places(df_str)
col_indicator_places = self._extract_col_indicator_places(df_str)
crosses = []
for row_ind_i, row_ind_j in zip(*row_indicator_places):
for col_ind_i, col_ind_j in zip(*col_indicator_places):
number_found_is_the_indicator = row_ind_i <= col_ind_i
if number_found_is_the_indicator:
continue
try:
assert np.isfinite(float(df.iloc[row_ind_i, col_ind_j]))
except BaseException:
if self.only_floats:
continue
if self.only_floats:
crosses.append(
Cross(
df.iloc[row_ind_i, row_ind_j],
df.iloc[col_ind_i, col_ind_j],
float(df.iloc[row_ind_i, col_ind_j]),
)
)
else:
crosses.append(
Cross(
df.iloc[row_ind_i, row_ind_j],
df.iloc[col_ind_i, col_ind_j],
df.iloc[row_ind_i, col_ind_j],
)
)
return list(sorted(set(crosses), key=lambda x: x[1:]))
class TableLink(NetLink):
entity_name = "table"
def _break_to_entities(self, entire_document):
all_tables = | pd.read_html(entire_document) | pandas.read_html |
# coding=utf-8
# pylint: disable-msg=E1101,W0612
from datetime import datetime, timedelta
from numpy import nan
import numpy as np
import pandas as pd
from pandas.types.common import is_integer, is_scalar
from pandas import Index, Series, DataFrame, isnull, date_range
from pandas.core.index import MultiIndex
from pandas.core.indexing import IndexingError
from pandas.tseries.index import Timestamp
from pandas.tseries.offsets import BDay
from pandas.tseries.tdi import Timedelta
from pandas.compat import lrange, range
from pandas import compat
from pandas.util.testing import assert_series_equal, assert_almost_equal
import pandas.util.testing as tm
from pandas.tests.series.common import TestData
JOIN_TYPES = ['inner', 'outer', 'left', 'right']
class TestSeriesIndexing(TestData, tm.TestCase):
_multiprocess_can_split_ = True
def test_get(self):
# GH 6383
s = Series(np.array([43, 48, 60, 48, 50, 51, 50, 45, 57, 48, 56, 45,
51, 39, 55, 43, 54, 52, 51, 54]))
result = s.get(25, 0)
expected = 0
self.assertEqual(result, expected)
s = Series(np.array([43, 48, 60, 48, 50, 51, 50, 45, 57, 48, 56,
45, 51, 39, 55, 43, 54, 52, 51, 54]),
index=pd.Float64Index(
[25.0, 36.0, 49.0, 64.0, 81.0, 100.0,
121.0, 144.0, 169.0, 196.0, 1225.0,
1296.0, 1369.0, 1444.0, 1521.0, 1600.0,
1681.0, 1764.0, 1849.0, 1936.0],
dtype='object'))
result = s.get(25, 0)
expected = 43
self.assertEqual(result, expected)
# GH 7407
# with a boolean accessor
df = pd.DataFrame({'i': [0] * 3, 'b': [False] * 3})
vc = df.i.value_counts()
result = vc.get(99, default='Missing')
self.assertEqual(result, 'Missing')
vc = df.b.value_counts()
result = vc.get(False, default='Missing')
self.assertEqual(result, 3)
result = vc.get(True, default='Missing')
self.assertEqual(result, 'Missing')
def test_delitem(self):
# GH 5542
# should delete the item inplace
s = Series(lrange(5))
del s[0]
expected = Series(lrange(1, 5), index=lrange(1, 5))
assert_series_equal(s, expected)
del s[1]
expected = Series(lrange(2, 5), index=lrange(2, 5))
assert_series_equal(s, expected)
# empty
s = Series()
def f():
del s[0]
self.assertRaises(KeyError, f)
# only 1 left, del, add, del
s = Series(1)
del s[0]
assert_series_equal(s, Series(dtype='int64', index=Index(
[], dtype='int64')))
s[0] = 1
assert_series_equal(s, Series(1))
del s[0]
assert_series_equal(s, Series(dtype='int64', index=Index(
[], dtype='int64')))
# Index(dtype=object)
s = Series(1, index=['a'])
del s['a']
assert_series_equal(s, Series(dtype='int64', index=Index(
[], dtype='object')))
s['a'] = 1
assert_series_equal(s, Series(1, index=['a']))
del s['a']
assert_series_equal(s, Series(dtype='int64', index=Index(
[], dtype='object')))
def test_getitem_setitem_ellipsis(self):
s = Series(np.random.randn(10))
np.fix(s)
result = s[...]
assert_series_equal(result, s)
s[...] = 5
self.assertTrue((result == 5).all())
def test_getitem_negative_out_of_bounds(self):
s = Series(tm.rands_array(5, 10), index=tm.rands_array(10, 10))
self.assertRaises(IndexError, s.__getitem__, -11)
self.assertRaises(IndexError, s.__setitem__, -11, 'foo')
def test_pop(self):
# GH 6600
df = DataFrame({'A': 0, 'B': np.arange(5, dtype='int64'), 'C': 0, })
k = df.iloc[4]
result = k.pop('B')
self.assertEqual(result, 4)
expected = Series([0, 0], index=['A', 'C'], name=4)
assert_series_equal(k, expected)
def test_getitem_get(self):
idx1 = self.series.index[5]
idx2 = self.objSeries.index[5]
self.assertEqual(self.series[idx1], self.series.get(idx1))
self.assertEqual(self.objSeries[idx2], self.objSeries.get(idx2))
self.assertEqual(self.series[idx1], self.series[5])
self.assertEqual(self.objSeries[idx2], self.objSeries[5])
self.assertEqual(
self.series.get(-1), self.series.get(self.series.index[-1]))
self.assertEqual(self.series[5], self.series.get(self.series.index[5]))
# missing
d = self.ts.index[0] - BDay()
self.assertRaises(KeyError, self.ts.__getitem__, d)
# None
# GH 5652
for s in [Series(), Series(index=list('abc'))]:
result = s.get(None)
self.assertIsNone(result)
def test_iget(self):
s = Series(np.random.randn(10), index=lrange(0, 20, 2))
# 10711, deprecated
with tm.assert_produces_warning(FutureWarning):
s.iget(1)
# 10711, deprecated
with tm.assert_produces_warning(FutureWarning):
s.irow(1)
# 10711, deprecated
with tm.assert_produces_warning(FutureWarning):
s.iget_value(1)
for i in range(len(s)):
result = s.iloc[i]
exp = s[s.index[i]]
assert_almost_equal(result, exp)
# pass a slice
result = s.iloc[slice(1, 3)]
expected = s.ix[2:4]
assert_series_equal(result, expected)
# test slice is a view
result[:] = 0
self.assertTrue((s[1:3] == 0).all())
# list of integers
result = s.iloc[[0, 2, 3, 4, 5]]
expected = s.reindex(s.index[[0, 2, 3, 4, 5]])
assert_series_equal(result, expected)
def test_iget_nonunique(self):
s = Series([0, 1, 2], index=[0, 1, 0])
self.assertEqual(s.iloc[2], 2)
def test_getitem_regression(self):
s = Series(lrange(5), index=lrange(5))
result = s[lrange(5)]
assert_series_equal(result, s)
def test_getitem_setitem_slice_bug(self):
s = Series(lrange(10), lrange(10))
result = s[-12:]
assert_series_equal(result, s)
result = s[-7:]
assert_series_equal(result, s[3:])
result = s[:-12]
assert_series_equal(result, s[:0])
s = Series(lrange(10), lrange(10))
s[-12:] = 0
self.assertTrue((s == 0).all())
s[:-12] = 5
self.assertTrue((s == 0).all())
def test_getitem_int64(self):
idx = np.int64(5)
self.assertEqual(self.ts[idx], self.ts[5])
def test_getitem_fancy(self):
slice1 = self.series[[1, 2, 3]]
slice2 = self.objSeries[[1, 2, 3]]
self.assertEqual(self.series.index[2], slice1.index[1])
self.assertEqual(self.objSeries.index[2], slice2.index[1])
self.assertEqual(self.series[2], slice1[1])
self.assertEqual(self.objSeries[2], slice2[1])
def test_getitem_boolean(self):
s = self.series
mask = s > s.median()
# passing list is OK
result = s[list(mask)]
expected = s[mask]
assert_series_equal(result, expected)
self.assert_index_equal(result.index, s.index[mask])
def test_getitem_boolean_empty(self):
s = Series([], dtype=np.int64)
s.index.name = 'index_name'
s = s[s.isnull()]
self.assertEqual(s.index.name, 'index_name')
self.assertEqual(s.dtype, np.int64)
# GH5877
# indexing with empty series
s = Series(['A', 'B'])
expected = Series(np.nan, index=['C'], dtype=object)
result = s[Series(['C'], dtype=object)]
assert_series_equal(result, expected)
s = Series(['A', 'B'])
expected = Series(dtype=object, index=Index([], dtype='int64'))
result = s[Series([], dtype=object)]
assert_series_equal(result, expected)
# invalid because of the boolean indexer
# that's empty or not-aligned
def f():
s[Series([], dtype=bool)]
self.assertRaises(IndexingError, f)
def f():
s[Series([True], dtype=bool)]
self.assertRaises(IndexingError, f)
def test_getitem_generator(self):
gen = (x > 0 for x in self.series)
result = self.series[gen]
result2 = self.series[iter(self.series > 0)]
expected = self.series[self.series > 0]
assert_series_equal(result, expected)
assert_series_equal(result2, expected)
def test_type_promotion(self):
# GH12599
s = pd.Series()
s["a"] = pd.Timestamp("2016-01-01")
s["b"] = 3.0
s["c"] = "foo"
expected = Series([pd.Timestamp("2016-01-01"), 3.0, "foo"],
index=["a", "b", "c"])
assert_series_equal(s, expected)
def test_getitem_boolean_object(self):
# using column from DataFrame
s = self.series
mask = s > s.median()
omask = mask.astype(object)
# getitem
result = s[omask]
expected = s[mask]
assert_series_equal(result, expected)
# setitem
s2 = s.copy()
cop = s.copy()
cop[omask] = 5
s2[mask] = 5
assert_series_equal(cop, s2)
# nans raise exception
omask[5:10] = np.nan
self.assertRaises(Exception, s.__getitem__, omask)
self.assertRaises(Exception, s.__setitem__, omask, 5)
def test_getitem_setitem_boolean_corner(self):
ts = self.ts
mask_shifted = ts.shift(1, freq=BDay()) > ts.median()
# these used to raise...??
self.assertRaises(Exception, ts.__getitem__, mask_shifted)
self.assertRaises(Exception, ts.__setitem__, mask_shifted, 1)
# ts[mask_shifted]
# ts[mask_shifted] = 1
self.assertRaises(Exception, ts.ix.__getitem__, mask_shifted)
self.assertRaises(Exception, ts.ix.__setitem__, mask_shifted, 1)
# ts.ix[mask_shifted]
# ts.ix[mask_shifted] = 2
def test_getitem_setitem_slice_integers(self):
s = Series(np.random.randn(8), index=[2, 4, 6, 8, 10, 12, 14, 16])
result = s[:4]
expected = s.reindex([2, 4, 6, 8])
assert_series_equal(result, expected)
s[:4] = 0
self.assertTrue((s[:4] == 0).all())
self.assertTrue(not (s[4:] == 0).any())
def test_getitem_out_of_bounds(self):
# don't segfault, GH #495
self.assertRaises(IndexError, self.ts.__getitem__, len(self.ts))
# GH #917
s = Series([])
self.assertRaises(IndexError, s.__getitem__, -1)
def test_getitem_setitem_integers(self):
# caused bug without test
s = Series([1, 2, 3], ['a', 'b', 'c'])
self.assertEqual(s.ix[0], s['a'])
s.ix[0] = 5
self.assertAlmostEqual(s['a'], 5)
def test_getitem_box_float64(self):
value = self.ts[5]
tm.assertIsInstance(value, np.float64)
def test_getitem_ambiguous_keyerror(self):
s = Series(lrange(10), index=lrange(0, 20, 2))
self.assertRaises(KeyError, s.__getitem__, 1)
self.assertRaises(KeyError, s.ix.__getitem__, 1)
def test_getitem_unordered_dup(self):
obj = Series(lrange(5), index=['c', 'a', 'a', 'b', 'b'])
self.assertTrue(is_scalar(obj['c']))
self.assertEqual(obj['c'], 0)
def test_getitem_dups_with_missing(self):
# breaks reindex, so need to use .ix internally
# GH 4246
s = Series([1, 2, 3, 4], ['foo', 'bar', 'foo', 'bah'])
expected = s.ix[['foo', 'bar', 'bah', 'bam']]
result = s[['foo', 'bar', 'bah', 'bam']]
assert_series_equal(result, expected)
def test_getitem_dups(self):
s = Series(range(5), index=['A', 'A', 'B', 'C', 'C'], dtype=np.int64)
expected = Series([3, 4], index=['C', 'C'], dtype=np.int64)
result = s['C']
assert_series_equal(result, expected)
def test_getitem_dataframe(self):
rng = list(range(10))
s = pd.Series(10, index=rng)
df = pd.DataFrame(rng, index=rng)
self.assertRaises(TypeError, s.__getitem__, df > 5)
def test_getitem_callable(self):
# GH 12533
s = pd.Series(4, index=list('ABCD'))
result = s[lambda x: 'A']
self.assertEqual(result, s.loc['A'])
result = s[lambda x: ['A', 'B']]
tm.assert_series_equal(result, s.loc[['A', 'B']])
result = s[lambda x: [True, False, True, True]]
tm.assert_series_equal(result, s.iloc[[0, 2, 3]])
def test_setitem_ambiguous_keyerror(self):
s = Series(lrange(10), index=lrange(0, 20, 2))
# equivalent of an append
s2 = s.copy()
s2[1] = 5
expected = s.append(Series([5], index=[1]))
assert_series_equal(s2, expected)
s2 = s.copy()
s2.ix[1] = 5
expected = s.append(Series([5], index=[1]))
assert_series_equal(s2, expected)
def test_setitem_float_labels(self):
# note labels are floats
s = Series(['a', 'b', 'c'], index=[0, 0.5, 1])
tmp = s.copy()
s.ix[1] = 'zoo'
tmp.iloc[2] = 'zoo'
assert_series_equal(s, tmp)
def test_setitem_callable(self):
# GH 12533
s = pd.Series([1, 2, 3, 4], index=list('ABCD'))
s[lambda x: 'A'] = -1
tm.assert_series_equal(s, pd.Series([-1, 2, 3, 4], index=list('ABCD')))
def test_setitem_other_callable(self):
# GH 13299
inc = lambda x: x + 1
s = pd.Series([1, 2, -1, 4])
s[s < 0] = inc
expected = pd.Series([1, 2, inc, 4])
tm.assert_series_equal(s, expected)
def test_slice(self):
numSlice = self.series[10:20]
numSliceEnd = self.series[-10:]
objSlice = self.objSeries[10:20]
self.assertNotIn(self.series.index[9], numSlice.index)
self.assertNotIn(self.objSeries.index[9], objSlice.index)
self.assertEqual(len(numSlice), len(numSlice.index))
self.assertEqual(self.series[numSlice.index[0]],
numSlice[numSlice.index[0]])
self.assertEqual(numSlice.index[1], self.series.index[11])
self.assertTrue(tm.equalContents(numSliceEnd, np.array(self.series)[
-10:]))
# test return view
sl = self.series[10:20]
sl[:] = 0
self.assertTrue((self.series[10:20] == 0).all())
def test_slice_can_reorder_not_uniquely_indexed(self):
s = Series(1, index=['a', 'a', 'b', 'b', 'c'])
s[::-1] # it works!
def test_slice_float_get_set(self):
self.assertRaises(TypeError, lambda: self.ts[4.0:10.0])
def f():
self.ts[4.0:10.0] = 0
self.assertRaises(TypeError, f)
self.assertRaises(TypeError, self.ts.__getitem__, slice(4.5, 10.0))
self.assertRaises(TypeError, self.ts.__setitem__, slice(4.5, 10.0), 0)
def test_slice_floats2(self):
s = Series(np.random.rand(10), index=np.arange(10, 20, dtype=float))
self.assertEqual(len(s.ix[12.0:]), 8)
self.assertEqual(len(s.ix[12.5:]), 7)
i = np.arange(10, 20, dtype=float)
i[2] = 12.2
s.index = i
self.assertEqual(len(s.ix[12.0:]), 8)
self.assertEqual(len(s.ix[12.5:]), 7)
def test_slice_float64(self):
values = np.arange(10., 50., 2)
index = Index(values)
start, end = values[[5, 15]]
s = Series(np.random.randn(20), index=index)
result = s[start:end]
expected = s.iloc[5:16]
assert_series_equal(result, expected)
result = s.loc[start:end]
assert_series_equal(result, expected)
df = DataFrame(np.random.randn(20, 3), index=index)
result = df[start:end]
expected = df.iloc[5:16]
tm.assert_frame_equal(result, expected)
result = df.loc[start:end]
tm.assert_frame_equal(result, expected)
def test_setitem(self):
self.ts[self.ts.index[5]] = np.NaN
self.ts[[1, 2, 17]] = np.NaN
self.ts[6] = np.NaN
self.assertTrue(np.isnan(self.ts[6]))
self.assertTrue(np.isnan(self.ts[2]))
self.ts[np.isnan(self.ts)] = 5
self.assertFalse(np.isnan(self.ts[2]))
# caught this bug when writing tests
series = Series(tm.makeIntIndex(20).astype(float),
index=tm.makeIntIndex(20))
series[::2] = 0
self.assertTrue((series[::2] == 0).all())
# set item that's not contained
s = self.series.copy()
s['foobar'] = 1
app = Series([1], index=['foobar'], name='series')
expected = self.series.append(app)
assert_series_equal(s, expected)
# Test for issue #10193
key = pd.Timestamp('2012-01-01')
series = pd.Series()
series[key] = 47
expected = pd.Series(47, [key])
assert_series_equal(series, expected)
series = pd.Series([], pd.DatetimeIndex([], freq='D'))
series[key] = 47
expected = pd.Series(47, pd.DatetimeIndex([key], freq='D'))
assert_series_equal(series, expected)
def test_setitem_dtypes(self):
# change dtypes
# GH 4463
expected = Series([np.nan, 2, 3])
s = Series([1, 2, 3])
s.iloc[0] = np.nan
assert_series_equal(s, expected)
s = Series([1, 2, 3])
s.loc[0] = np.nan
assert_series_equal(s, expected)
s = Series([1, 2, 3])
s[0] = np.nan
assert_series_equal(s, expected)
s = Series([False])
s.loc[0] = np.nan
assert_series_equal(s, Series([np.nan]))
s = Series([False, True])
s.loc[0] = np.nan
assert_series_equal(s, Series([np.nan, 1.0]))
def test_set_value(self):
idx = self.ts.index[10]
res = self.ts.set_value(idx, 0)
self.assertIs(res, self.ts)
self.assertEqual(self.ts[idx], 0)
# equiv
s = self.series.copy()
res = s.set_value('foobar', 0)
self.assertIs(res, s)
self.assertEqual(res.index[-1], 'foobar')
self.assertEqual(res['foobar'], 0)
s = self.series.copy()
s.loc['foobar'] = 0
self.assertEqual(s.index[-1], 'foobar')
self.assertEqual(s['foobar'], 0)
def test_setslice(self):
sl = self.ts[5:20]
self.assertEqual(len(sl), len(sl.index))
self.assertTrue(sl.index.is_unique)
def test_basic_getitem_setitem_corner(self):
# invalid tuples, e.g. self.ts[:, None] vs. self.ts[:, 2]
with tm.assertRaisesRegexp(ValueError, 'tuple-index'):
self.ts[:, 2]
with tm.assertRaisesRegexp(ValueError, 'tuple-index'):
self.ts[:, 2] = 2
# weird lists. [slice(0, 5)] will work but not two slices
result = self.ts[[slice(None, 5)]]
expected = self.ts[:5]
assert_series_equal(result, expected)
# OK
self.assertRaises(Exception, self.ts.__getitem__,
[5, slice(None, None)])
self.assertRaises(Exception, self.ts.__setitem__,
[5, slice(None, None)], 2)
def test_basic_getitem_with_labels(self):
indices = self.ts.index[[5, 10, 15]]
result = self.ts[indices]
expected = self.ts.reindex(indices)
assert_series_equal(result, expected)
result = self.ts[indices[0]:indices[2]]
expected = self.ts.ix[indices[0]:indices[2]]
assert_series_equal(result, expected)
# integer indexes, be careful
s = Series(np.random.randn(10), index=lrange(0, 20, 2))
inds = [0, 2, 5, 7, 8]
arr_inds = np.array([0, 2, 5, 7, 8])
result = s[inds]
expected = s.reindex(inds)
assert_series_equal(result, expected)
result = s[arr_inds]
expected = s.reindex(arr_inds)
assert_series_equal(result, expected)
# GH12089
# with tz for values
s = Series(pd.date_range("2011-01-01", periods=3, tz="US/Eastern"),
index=['a', 'b', 'c'])
expected = Timestamp('2011-01-01', tz='US/Eastern')
result = s.loc['a']
self.assertEqual(result, expected)
result = s.iloc[0]
self.assertEqual(result, expected)
result = s['a']
self.assertEqual(result, expected)
def test_basic_setitem_with_labels(self):
indices = self.ts.index[[5, 10, 15]]
cp = self.ts.copy()
exp = self.ts.copy()
cp[indices] = 0
exp.ix[indices] = 0
assert_series_equal(cp, exp)
cp = self.ts.copy()
exp = self.ts.copy()
cp[indices[0]:indices[2]] = 0
exp.ix[indices[0]:indices[2]] = 0
assert_series_equal(cp, exp)
# integer indexes, be careful
s = Series(np.random.randn(10), index=lrange(0, 20, 2))
inds = [0, 4, 6]
arr_inds = np.array([0, 4, 6])
cp = s.copy()
exp = s.copy()
s[inds] = 0
s.ix[inds] = 0
assert_series_equal(cp, exp)
cp = s.copy()
exp = s.copy()
s[arr_inds] = 0
s.ix[arr_inds] = 0
assert_series_equal(cp, exp)
inds_notfound = [0, 4, 5, 6]
arr_inds_notfound = np.array([0, 4, 5, 6])
self.assertRaises(Exception, s.__setitem__, inds_notfound, 0)
self.assertRaises(Exception, s.__setitem__, arr_inds_notfound, 0)
# GH12089
# with tz for values
s = Series(pd.date_range("2011-01-01", periods=3, tz="US/Eastern"),
index=['a', 'b', 'c'])
s2 = s.copy()
expected = Timestamp('2011-01-03', tz='US/Eastern')
s2.loc['a'] = expected
result = s2.loc['a']
self.assertEqual(result, expected)
s2 = s.copy()
s2.iloc[0] = expected
result = s2.iloc[0]
self.assertEqual(result, expected)
s2 = s.copy()
s2['a'] = expected
result = s2['a']
self.assertEqual(result, expected)
def test_ix_getitem(self):
inds = self.series.index[[3, 4, 7]]
assert_series_equal(self.series.ix[inds], self.series.reindex(inds))
assert_series_equal(self.series.ix[5::2], self.series[5::2])
# slice with indices
d1, d2 = self.ts.index[[5, 15]]
result = self.ts.ix[d1:d2]
expected = self.ts.truncate(d1, d2)
assert_series_equal(result, expected)
# boolean
mask = self.series > self.series.median()
assert_series_equal(self.series.ix[mask], self.series[mask])
# ask for index value
self.assertEqual(self.ts.ix[d1], self.ts[d1])
self.assertEqual(self.ts.ix[d2], self.ts[d2])
def test_ix_getitem_not_monotonic(self):
d1, d2 = self.ts.index[[5, 15]]
ts2 = self.ts[::2][[1, 2, 0]]
self.assertRaises(KeyError, ts2.ix.__getitem__, slice(d1, d2))
self.assertRaises(KeyError, ts2.ix.__setitem__, slice(d1, d2), 0)
def test_ix_getitem_setitem_integer_slice_keyerrors(self):
s = Series(np.random.randn(10), index=lrange(0, 20, 2))
# this is OK
cp = s.copy()
cp.ix[4:10] = 0
self.assertTrue((cp.ix[4:10] == 0).all())
# so is this
cp = s.copy()
cp.ix[3:11] = 0
self.assertTrue((cp.ix[3:11] == 0).values.all())
result = s.ix[4:10]
result2 = s.ix[3:11]
expected = s.reindex([4, 6, 8, 10])
assert_series_equal(result, expected)
assert_series_equal(result2, expected)
# non-monotonic, raise KeyError
s2 = s.iloc[lrange(5) + lrange(5, 10)[::-1]]
self.assertRaises(KeyError, s2.ix.__getitem__, slice(3, 11))
self.assertRaises(KeyError, s2.ix.__setitem__, slice(3, 11), 0)
def test_ix_getitem_iterator(self):
idx = iter(self.series.index[:10])
result = self.series.ix[idx]
assert_series_equal(result, self.series[:10])
def test_setitem_with_tz(self):
for tz in ['US/Eastern', 'UTC', 'Asia/Tokyo']:
orig = pd.Series(pd.date_range('2016-01-01', freq='H', periods=3,
tz=tz))
self.assertEqual(orig.dtype, 'datetime64[ns, {0}]'.format(tz))
# scalar
s = orig.copy()
s[1] = pd.Timestamp('2011-01-01', tz=tz)
exp = pd.Series([pd.Timestamp('2016-01-01 00:00', tz=tz),
pd.Timestamp('2011-01-01 00:00', tz=tz),
pd.Timestamp('2016-01-01 02:00', tz=tz)])
tm.assert_series_equal(s, exp)
s = orig.copy()
s.loc[1] = pd.Timestamp('2011-01-01', tz=tz)
tm.assert_series_equal(s, exp)
s = orig.copy()
s.iloc[1] = pd.Timestamp('2011-01-01', tz=tz)
tm.assert_series_equal(s, exp)
# vector
vals = pd.Series([pd.Timestamp('2011-01-01', tz=tz),
pd.Timestamp('2012-01-01', tz=tz)], index=[1, 2])
self.assertEqual(vals.dtype, 'datetime64[ns, {0}]'.format(tz))
s[[1, 2]] = vals
exp = pd.Series([pd.Timestamp('2016-01-01 00:00', tz=tz),
pd.Timestamp('2011-01-01 00:00', tz=tz),
pd.Timestamp('2012-01-01 00:00', tz=tz)])
tm.assert_series_equal(s, exp)
s = orig.copy()
s.loc[[1, 2]] = vals
tm.assert_series_equal(s, exp)
s = orig.copy()
s.iloc[[1, 2]] = vals
tm.assert_series_equal(s, exp)
def test_setitem_with_tz_dst(self):
# GH XXX
tz = 'US/Eastern'
orig = pd.Series(pd.date_range('2016-11-06', freq='H', periods=3,
tz=tz))
self.assertEqual(orig.dtype, 'datetime64[ns, {0}]'.format(tz))
# scalar
s = orig.copy()
s[1] = pd.Timestamp('2011-01-01', tz=tz)
exp = pd.Series([pd.Timestamp('2016-11-06 00:00', tz=tz),
pd.Timestamp('2011-01-01 00:00', tz=tz),
pd.Timestamp('2016-11-06 02:00', tz=tz)])
tm.assert_series_equal(s, exp)
s = orig.copy()
s.loc[1] = pd.Timestamp('2011-01-01', tz=tz)
tm.assert_series_equal(s, exp)
s = orig.copy()
s.iloc[1] = pd.Timestamp('2011-01-01', tz=tz)
tm.assert_series_equal(s, exp)
# vector
vals = pd.Series([pd.Timestamp('2011-01-01', tz=tz),
pd.Timestamp('2012-01-01', tz=tz)], index=[1, 2])
self.assertEqual(vals.dtype, 'datetime64[ns, {0}]'.format(tz))
s[[1, 2]] = vals
exp = pd.Series([pd.Timestamp('2016-11-06 00:00', tz=tz),
pd.Timestamp('2011-01-01 00:00', tz=tz),
pd.Timestamp('2012-01-01 00:00', tz=tz)])
tm.assert_series_equal(s, exp)
s = orig.copy()
s.loc[[1, 2]] = vals
tm.assert_series_equal(s, exp)
s = orig.copy()
s.iloc[[1, 2]] = vals
tm.assert_series_equal(s, exp)
def test_where(self):
s = Series(np.random.randn(5))
cond = s > 0
rs = s.where(cond).dropna()
rs2 = s[cond]
assert_series_equal(rs, rs2)
rs = s.where(cond, -s)
assert_series_equal(rs, s.abs())
rs = s.where(cond)
assert (s.shape == rs.shape)
assert (rs is not s)
# test alignment
cond = Series([True, False, False, True, False], index=s.index)
s2 = -(s.abs())
expected = s2[cond].reindex(s2.index[:3]).reindex(s2.index)
rs = s2.where(cond[:3])
assert_series_equal(rs, expected)
expected = s2.abs()
expected.ix[0] = s2[0]
rs = s2.where(cond[:3], -s2)
assert_series_equal(rs, expected)
self.assertRaises(ValueError, s.where, 1)
self.assertRaises(ValueError, s.where, cond[:3].values, -s)
# GH 2745
s = Series([1, 2])
s[[True, False]] = [0, 1]
expected = Series([0, 2])
assert_series_equal(s, expected)
# failures
self.assertRaises(ValueError, s.__setitem__, tuple([[[True, False]]]),
[0, 2, 3])
self.assertRaises(ValueError, s.__setitem__, tuple([[[True, False]]]),
[])
# unsafe dtype changes
for dtype in [np.int8, np.int16, np.int32, np.int64, np.float16,
np.float32, np.float64]:
s = Series(np.arange(10), dtype=dtype)
mask = s < 5
s[mask] = lrange(2, 7)
expected = Series(lrange(2, 7) + lrange(5, 10), dtype=dtype)
assert_series_equal(s, expected)
self.assertEqual(s.dtype, expected.dtype)
# these are allowed operations, but are upcasted
for dtype in [np.int64, np.float64]:
s = Series(np.arange(10), dtype=dtype)
mask = s < 5
values = [2.5, 3.5, 4.5, 5.5, 6.5]
s[mask] = values
expected = Series(values + lrange(5, 10), dtype='float64')
assert_series_equal(s, expected)
self.assertEqual(s.dtype, expected.dtype)
# GH 9731
s = Series(np.arange(10), dtype='int64')
mask = s > 5
values = [2.5, 3.5, 4.5, 5.5]
s[mask] = values
expected = Series(lrange(6) + values, dtype='float64')
assert_series_equal(s, expected)
# can't do these as we are forced to change the itemsize of the input
# to something we cannot
for dtype in [np.int8, np.int16, np.int32, np.float16, np.float32]:
s = Series(np.arange(10), dtype=dtype)
mask = s < 5
values = [2.5, 3.5, 4.5, 5.5, 6.5]
self.assertRaises(Exception, s.__setitem__, tuple(mask), values)
# GH3235
s = Series(np.arange(10), dtype='int64')
mask = s < 5
s[mask] = lrange(2, 7)
expected = Series(lrange(2, 7) + lrange(5, 10), dtype='int64')
assert_series_equal(s, expected)
self.assertEqual(s.dtype, expected.dtype)
s = Series(np.arange(10), dtype='int64')
mask = s > 5
s[mask] = [0] * 4
expected = Series([0, 1, 2, 3, 4, 5] + [0] * 4, dtype='int64')
assert_series_equal(s, expected)
s = Series(np.arange(10))
mask = s > 5
def f():
s[mask] = [5, 4, 3, 2, 1]
self.assertRaises(ValueError, f)
def f():
s[mask] = [0] * 5
self.assertRaises(ValueError, f)
# dtype changes
s = Series([1, 2, 3, 4])
result = s.where(s > 2, np.nan)
expected = Series([np.nan, np.nan, 3, 4])
assert_series_equal(result, expected)
# GH 4667
# setting with None changes dtype
s = Series(range(10)).astype(float)
s[8] = None
result = s[8]
self.assertTrue(isnull(result))
s = Series(range(10)).astype(float)
s[s > 8] = None
result = s[isnull(s)]
expected = Series(np.nan, index=[9])
assert_series_equal(result, expected)
def test_where_setitem_invalid(self):
# GH 2702
# make sure correct exceptions are raised on invalid list assignment
# slice
s = Series(list('abc'))
def f():
s[0:3] = list(range(27))
self.assertRaises(ValueError, f)
s[0:3] = list(range(3))
expected = Series([0, 1, 2])
assert_series_equal(s.astype(np.int64), expected, )
# slice with step
s = Series(list('abcdef'))
def f():
s[0:4:2] = list(range(27))
self.assertRaises(ValueError, f)
s = Series(list('abcdef'))
s[0:4:2] = list(range(2))
expected = Series([0, 'b', 1, 'd', 'e', 'f'])
assert_series_equal(s, expected)
# neg slices
s = Series(list('abcdef'))
def f():
s[:-1] = list(range(27))
self.assertRaises(ValueError, f)
s[-3:-1] = list(range(2))
expected = Series(['a', 'b', 'c', 0, 1, 'f'])
assert_series_equal(s, expected)
# list
s = Series(list('abc'))
def f():
s[[0, 1, 2]] = list(range(27))
self.assertRaises(ValueError, f)
s = Series(list('abc'))
def f():
s[[0, 1, 2]] = list(range(2))
self.assertRaises(ValueError, f)
# scalar
s = Series(list('abc'))
s[0] = list(range(10))
expected = Series([list(range(10)), 'b', 'c'])
assert_series_equal(s, expected)
def test_where_broadcast(self):
# Test a variety of differently sized series
for size in range(2, 6):
# Test a variety of boolean indices
for selection in [
# First element should be set
np.resize([True, False, False, False, False], size),
# Set alternating elements]
np.resize([True, False], size),
# No element should be set
np.resize([False], size)]:
# Test a variety of different numbers as content
for item in [2.0, np.nan, np.finfo(np.float).max,
np.finfo(np.float).min]:
# Test numpy arrays, lists and tuples as the input to be
# broadcast
for arr in [np.array([item]), [item], (item, )]:
data = np.arange(size, dtype=float)
s = Series(data)
s[selection] = arr
# Construct the expected series by taking the source
# data or item based on the selection
expected = Series([item if use_item else data[
i] for i, use_item in enumerate(selection)])
assert_series_equal(s, expected)
s = Series(data)
result = s.where(~selection, arr)
assert_series_equal(result, expected)
def test_where_inplace(self):
s = Series(np.random.randn(5))
cond = s > 0
rs = s.copy()
rs.where(cond, inplace=True)
assert_series_equal(rs.dropna(), s[cond])
assert_series_equal(rs, s.where(cond))
rs = s.copy()
rs.where(cond, -s, inplace=True)
assert_series_equal(rs, s.where(cond, -s))
def test_where_dups(self):
# GH 4550
# where crashes with dups in index
s1 = Series(list(range(3)))
s2 = Series(list(range(3)))
comb = pd.concat([s1, s2])
result = comb.where(comb < 2)
expected = Series([0, 1, np.nan, 0, 1, np.nan],
index=[0, 1, 2, 0, 1, 2])
assert_series_equal(result, expected)
# GH 4548
# inplace updating not working with dups
comb[comb < 1] = 5
expected = Series([5, 1, 2, 5, 1, 2], index=[0, 1, 2, 0, 1, 2])
assert_series_equal(comb, expected)
comb[comb < 2] += 10
expected = Series([5, 11, 2, 5, 11, 2], index=[0, 1, 2, 0, 1, 2])
assert_series_equal(comb, expected)
def test_where_datetime(self):
s = Series(date_range('20130102', periods=2))
expected = Series([10, 10], dtype='datetime64[ns]')
mask = np.array([False, False])
rs = s.where(mask, [10, 10])
assert_series_equal(rs, expected)
rs = s.where(mask, 10)
assert_series_equal(rs, expected)
rs = s.where(mask, 10.0)
assert_series_equal(rs, expected)
rs = s.where(mask, [10.0, 10.0])
assert_series_equal(rs, expected)
rs = s.where(mask, [10.0, np.nan])
expected = Series([10, None], dtype='datetime64[ns]')
assert_series_equal(rs, expected)
def test_where_timedelta(self):
s = Series([1, 2], dtype='timedelta64[ns]')
expected = Series([10, 10], dtype='timedelta64[ns]')
mask = np.array([False, False])
rs = s.where(mask, [10, 10])
assert_series_equal(rs, expected)
rs = s.where(mask, 10)
assert_series_equal(rs, expected)
rs = s.where(mask, 10.0)
assert_series_equal(rs, expected)
rs = s.where(mask, [10.0, 10.0])
assert_series_equal(rs, expected)
rs = s.where(mask, [10.0, np.nan])
expected = Series([10, None], dtype='timedelta64[ns]')
assert_series_equal(rs, expected)
def test_mask(self):
# compare with tested results in test_where
s = Series(np.random.randn(5))
cond = s > 0
rs = s.where(~cond, np.nan)
assert_series_equal(rs, s.mask(cond))
rs = s.where(~cond)
rs2 = s.mask(cond)
assert_series_equal(rs, rs2)
rs = s.where(~cond, -s)
rs2 = s.mask(cond, -s)
assert_series_equal(rs, rs2)
cond = Series([True, False, False, True, False], index=s.index)
s2 = -(s.abs())
rs = s2.where(~cond[:3])
rs2 = s2.mask(cond[:3])
assert_series_equal(rs, rs2)
rs = s2.where(~cond[:3], -s2)
rs2 = s2.mask(cond[:3], -s2)
assert_series_equal(rs, rs2)
self.assertRaises(ValueError, s.mask, 1)
self.assertRaises(ValueError, s.mask, cond[:3].values, -s)
# dtype changes
s = Series([1, 2, 3, 4])
result = s.mask(s > 2, np.nan)
expected = Series([1, 2, np.nan, np.nan])
assert_series_equal(result, expected)
def test_mask_broadcast(self):
# GH 8801
# copied from test_where_broadcast
for size in | range(2, 6) | pandas.compat.range |
# -*- coding: utf-8 -*-
"""
Map ReEDS geographic regions and classes to Supply Curve points
"""
import logging
import numpy as np
import os
import pandas as pd
from warnings import warn
from reVX.utilities.exceptions import ReedsValueError, ReedsKeyError
from reVX.utilities.utilities import log_versions
from rex.utilities.utilities import parse_table
logger = logging.getLogger(__name__)
class ReedsClassifier:
"""
Create ReEDS resource classes
"""
TABLE_OUT_COLS = ('sc_gid', 'region', 'class', 'bin', 'capacity',
'mean_lcoe', 'trans_cap_cost', 'total_lcoe')
AGG_TABLE_OUT_COLS = ('region', 'class', 'bin', 'capacity',
'trans_cap_cost', 'dist_mi')
def __init__(self, rev_table, resource_classes, region_map='reeds_region',
cap_bins=5, sort_bins_by='trans_cap_cost', pre_filter=None,
trg_by_region=False):
"""
Parameters
----------
rev_table : str | pandas.DataFrame
reV supply curve or aggregation table,
or path to file containing table
resource_classes : str | pandas.DataFrame
Resource classes, either provided in a .csv, .json or a DataFrame
Allowable columns:
- 'class' -> class labels to use
- 'TRG_cap' -> TRG capacity bins to use to create TRG classes
- any column in 'rev_table' -> Used for categorical bins
- '*_min' and '*_max' where * is a numberical column in 'rev_table'
-> used for range binning
NOTE: 'TRG_cap' can only be combined with categorical bins
region_map : str | pandas.DataFrame
Mapping of supply curve points to region to create classes for
cap_bins : int
Number of equal capacity bins to create for each
region-class
sort_bins_by : str | list
Column(s) to sort by before capacity binning
pre_filter : dict | NoneType
Column value pair(s) to filter on. If None don't filter
trg_by_region : bool
Groupby on region when computing TRGs
"""
log_versions(logger)
rev_table = self._parse_table(rev_table)
if pre_filter is not None:
for col, v in pre_filter.items():
logger.debug('Subsetting reV table to {} in {}'
.format(v, col))
mask = rev_table[col] == v
rev_table = rev_table.loc[mask]
rev_table = self._map_region(rev_table, region_map)
rev_table = self._resource_classes(rev_table, resource_classes,
trg_by_region=trg_by_region)
self._rev_table = self._capacity_bins(rev_table, cap_bins,
sort_bins_by=sort_bins_by)
self._groups = self._rev_table.groupby(['region', 'class', 'bin'])
self._i = 0
def __repr__(self):
msg = ("{} contains {} region-class-class groups"
.format(self.__class__.__name__, len(self)))
return msg
def __len__(self):
return len(self._groups)
def __getitem__(self, key):
if key in self:
group = self._groups.get_group(key)
else:
msg = "{} is an invalid group:\n{}".format(key, self.keys)
logger.error(msg)
raise ReedsKeyError(msg)
return group
def __iter__(self):
return self
def __next__(self):
if self._i >= len(self):
self._i = 0
raise StopIteration
key = self.keys[self._i]
group = self[key]
self._i += 1
return group
def __contains__(self, key):
test = key in self._groups.groups
if not test:
msg = "{} does not exist in {}".format(key, self)
raise ReedsKeyError(msg)
return test
@property
def regions(self):
"""
Unique ReEDS geographic regions
Returns
-------
ndarray
"""
return np.sort(self._rev_table['region'].unique())
@property
def resource_classes(self):
"""
Unique ReEDS resource classes
Returns
-------
ndarray
"""
return np.sort(self._rev_table['class'].unique())
@property
def sc_bins(self):
"""
Unique ReEDS supply curve bins (clusters)
Returns
-------
ndarray
"""
return np.sort(self._rev_table['bin'].unique())
@property
def table(self):
"""
Supply curve or aggregation table
Returns
-------
pandas.DataFrame
"""
return self._rev_table
@property
def table_slim(self):
"""
Supply curve or aggregation table with only columns in TABLE_OUT_COLS
Returns
-------
pandas.DataFrame
"""
cols = [c for c in self.TABLE_OUT_COLS if c in self.table]
return self.table[cols]
@property
def keys(self):
"""
All unique group keys
Returns
-------
list
"""
return sorted(list(self._groups.groups.keys()))
@property
def region_class_bin_groups(self):
"""
All unique (region, class, bin) groups
Returns
-------
list
"""
return self.keys
@property
def groups(self):
"""
All unique group keys
Returns
-------
list
"""
return self.keys
@property
def aggregate_table(self):
"""
Region, class, bin aggregate table
Returns
-------
agg_table : pandas.DataFrame
"""
cols = ['area_sq_km', 'capacity', 'trans_capacity']
sum_table = self._groups[cols].sum()
cols = ['latitude', 'longitude', 'mean_cf', 'mean_lcoe', 'mean_res',
'trans_cap_cost', 'dist_mi', 'lcot', 'total_lcoe']
mean_table = self._groups[cols].mean()
agg_table = sum_table.join(mean_table)
return agg_table.reset_index()
@property
def aggregate_table_slim(self):
"""
Aggregate table with only columns in AGG_TABLE_OUT_COLS
Returns
-------
agg_table : pandas.DataFrame
"""
agg_table = self.aggregate_table
cols = [c for c in agg_table if c in self.AGG_TABLE_OUT_COLS]
return agg_table[cols]
@staticmethod
def _parse_table(input_table):
"""
Parse table from input argument
Parameters
----------
input_table : str | pandas.DataFrame
Input table to parse
Returns
-------
table : pandas.DataFrame
Parsed table
"""
try:
table = parse_table(input_table)
except ValueError as ex:
logger.error(ex)
raise
return table
@staticmethod
def _parse_region_map(region_map, rev_table):
"""
Parse region map from input arg
Parameters
----------
region_map : str | pandas.DataFrame | None
Mapping of supply curve points to region to create classes for
rev_table : pandas.DataFrame
reV supply curve or aggregation table
Returns
-------
region_map : pandas.DataFrame
Mapping of region to sc_gid
"""
if isinstance(region_map, str):
if os.path.isfile(region_map):
region_map = ReedsClassifier._parse_table(region_map)
elif region_map in rev_table:
region_map = rev_table[['sc_gid', region_map]].copy()
else:
msg = ('{} is not a valid file path or reV table '
'column label'.format(type(region_map)))
logger.error(msg)
raise ReedsValueError(msg)
elif not isinstance(region_map, pd.DataFrame):
msg = ('Cannot parse region map from type {}'
.format(type(region_map)))
logger.error(msg)
raise ReedsValueError(msg)
return region_map
@staticmethod
def _map_region(rev_table, region_map=None):
"""
Map regions to sc points and append to rev_table
Parameters
----------
rev_table : pandas.DataFrame
reV supply curve or aggregation table
region_map : str | pandas.DataFrame | None
Mapping of supply curve points to region to create classes for
Returns
-------
rev_table : pandas.DataFrame
Updated table with region_id added
"""
if region_map is None:
rev_table['region'] = 1
else:
region_map = ReedsClassifier._parse_region_map(region_map,
rev_table)
if 'sc_gid' not in region_map:
merge_cols = [c for c in region_map.columns if c in rev_table]
if not merge_cols:
msg = ('region map must contain a "sc_gid" column or a '
'column in common with the Supply Curve table.')
logger.error(msg)
raise ReedsValueError(msg)
region_map = pd.merge(rev_table[['sc_gid', ] + merge_cols],
region_map, on=merge_cols)
region_col = [c for c in region_map.columns if c != 'sc_gid']
rev_table['region'] = None
rev_table = rev_table.set_index('sc_gid')
for r, df in region_map.groupby(region_col):
rev_table.loc[df['sc_gid'], 'region'] = r
mask = ~rev_table['region'].isnull()
rev_table = rev_table.loc[mask].reset_index()
return rev_table
@staticmethod
def _TRG_bins(rev_table, trg_bins, by_region=False):
"""
Create TRG (technical resource groups) using given cummulative
capacity bin widths
Parameters
----------
rev_table : pandas.DataFrame
reV supply curve or aggregation table
trg_bins : pandas.Series
Cummulative capacity bin widths to create TRGs from
(in GW)
by_region : bool
Groupby on region
Returns
-------
rev_table : pandas.DataFrame
Updated table with TRG classes added
"""
cap_breaks = np.cumsum(trg_bins['TRG_cap'].values) * 1000 # GW to MW
cap_breaks = np.concatenate(([0., ], cap_breaks),
axis=0)
labels = trg_bins.index.values
cols = ['sc_gid', 'capacity', 'mean_lcoe', 'region']
trg_classes = rev_table[cols].copy()
if by_region:
classes = []
trg_classes['class'] = 1
for _, df in trg_classes.groupby('region'):
df = df.sort_values('mean_lcoe')
cum_cap = df['capacity'].cumsum()
df['class'] = pd.cut(x=cum_cap, bins=cap_breaks, labels=labels)
classes.append(df)
trg_classes = pd.concat(classes)
else:
trg_classes = trg_classes.sort_values('mean_lcoe')
cum_cap = trg_classes['capacity'].cumsum()
trg_classes.loc[:, 'class'] = pd.cut(x=cum_cap, bins=cap_breaks,
labels=labels)
rev_table = rev_table.merge(trg_classes[['sc_gid', 'class']],
on='sc_gid', how='left')
return rev_table
@staticmethod
def _TRG_classes(rev_table, trg_bins, by_region=False):
"""
Create TRG (technical resource groups) using given cummulative
capacity bin widths
Parameters
----------
rev_table : pandas.DataFrame
reV supply curve or aggregation table
trg_bins : pandas.Series
Cummulative capacity bin widths to create TRGs from
(in GW)
by_region : bool
Groupby on region
Returns
-------
rev_table : pandas.DataFrame
Updated table with TRG classes added
Raises
------
ValueError
If categorical columns do not exist in rev_table
"""
cat_cols = [c for c in trg_bins if c != 'TRG_cap']
if cat_cols:
missing = [c for c in cat_cols if c not in rev_table]
if missing:
msg = ("categorical column(s) supplied with 'TRG_cap' "
"are not valid columns of 'rev_table': {}"
.format(missing))
logger.error(msg)
raise ValueError(msg)
else:
msg = ("Additional columns were supplied with "
"'TRG_cap'! \n TRG bins will be computed for all "
"unique combinations of {}".format(cat_cols))
logger.warning(msg)
warn(msg)
tables = []
rev_groups = rev_table.groupby(cat_cols)
for grp, bins in trg_bins.groupby(cat_cols):
group_table = rev_groups.get_group(grp)
tables.append(ReedsClassifier._TRG_bins(group_table, bins,
by_region=by_region))
rev_table = pd.concat(tables).reset_index(drop=True)
else:
rev_table = ReedsClassifier._TRG_bins(rev_table, trg_bins,
by_region=by_region)
return rev_table
@staticmethod
def _bin_classes(rev_table, class_bins):
"""
Bin classes based on categorical or range bins
Parameters
----------
rev_table : pandas.DataFrame
reV supply curve or aggregation table
class_bins : pandas.DataFrame
Class bins to use:
- categorical: single value
- range: *_min and *_max pair of values -> (min, max]
Returns
-------
rev_table : pandas.DataFrame
Updated table with TRG classes added
Raises
------
ValueError
If range min and max are not supplied for range bins
"""
range_cols = [c for c in class_bins if c.endswith(('min', 'max'))]
if len(range_cols) % 2 != 0:
msg = ("A '*_min' and a '*_max' value are neede for range bins! "
"Values provided: {}".format(range_cols))
logger.error(msg)
raise ValueError(msg)
rev_cols = [c.rstrip('_min').rstrip('_max') for c in range_cols]
rev_cols = list(set(rev_cols))
for col in rev_cols:
cols = ['{}_min'.format(col), '{}_max'.format(col)]
class_bins[col] = list(class_bins[cols].values)
class_bins = class_bins.drop(columns=range_cols)
missing = [c for c in class_bins if c not in rev_table]
if missing:
msg = "Bin columns {} are not in 'rev_table'!".format(missing)
logger.error(msg)
raise ValueError(msg)
rev_table['class'] = None
for label, bins in class_bins.iterrows():
mask = True
for col, value in bins.iteritems():
if isinstance(value, (list, np.ndarray)):
bin_mask = ((rev_table[col] > value[0])
& (rev_table[col] <= value[1]))
else:
bin_mask = rev_table[col] == value
mask *= bin_mask
rev_table.loc[mask, 'class'] = label
return rev_table
@classmethod
def _resource_classes(cls, rev_table, resource_classes,
trg_by_region=False):
"""
Create resource classes
Parameters
----------
rev_table : pandas.DataFrame
reV supply curve or aggregation table
resource_classes : str | pandas.DataFrame
Resource classes, either provided in a .csv, .json
as a DataFrame or Series, or in a dictionary
trg_by_region : bool
Groupby on region for TRGs
Returns
-------
rev_table : pandas.DataFrame
Updated table with resource classes added
"""
resource_classes = cls._parse_table(resource_classes)
if 'class' in resource_classes:
resource_classes = resource_classes.set_index('class')
if 'TRG_cap' in resource_classes:
rev_table = cls._TRG_classes(rev_table, resource_classes,
by_region=trg_by_region)
else:
rev_table = cls._bin_classes(rev_table, resource_classes)
return rev_table
@staticmethod
def _capacity_bins(rev_table, cap_bins, sort_bins_by='trans_cap_cost'):
"""
Create equal capacity bins in each region-class sorted by given
column(s)
Parameters
----------
rev_table : pandas.DataFrame
reV supply curve or aggregation table
cap_bins : int
Number of equal capacity bins to create for each
region-class
sort_bins_by : str | list, optional
Column(s) to sort by before capacity binning,
by default 'trans_cap_cost'
Returns
-------
rev_table : pandas.DataFrame
Updated table with classes
"""
if not isinstance(sort_bins_by, list):
sort_bins_by = [sort_bins_by]
cols = ['sc_gid', 'capacity', 'region', 'class'] + sort_bins_by
capacity_bins = rev_table[cols].copy()
bins = []
capacity_bins['bin'] = 1
labels = list(range(1, cap_bins + 1))
for g, df in capacity_bins.groupby(['region', 'class']):
df = df.sort_values(sort_bins_by)
cum_cap = df['capacity'].cumsum()
bin_labels = | pd.cut(x=cum_cap, bins=cap_bins, labels=labels) | pandas.cut |
# date: 2021-11-25
"""Train the model
Usage: train_model.py --train_file=<train_file> --test_file=<test_file> --out_file_train=<out_file_train> --out_file_result=<out_file_result>
Options:
--train_file=<train_file> the train dataframe to train
--test_file=<test_file> the test dataframe to evaluate
--out_file_train=<out_file_train> Path (including filename) of where to locally write the model
--out_file_result=<out_file_result> Path (including filename) of where to locally write the results
"""
from docopt import docopt
from tool.tool_function import *
import os
import shap
import pickle
import numpy as np
import pandas as pd
import seaborn as sns
from boruta import BorutaPy
import matplotlib.pyplot as plt
from sklearn.ensemble import RandomForestClassifier
from sklearn.linear_model import LogisticRegression, Ridge
from sklearn.model_selection import cross_validate
from sklearn.feature_selection import RFECV
from sklearn.svm import SVC
from sklearn.model_selection import RandomizedSearchCV
from catboost import CatBoostClassifier
from sklearn.metrics import (
accuracy_score,
f1_score,
precision_score,
recall_score,
)
def feature_selection(X_train, y_train):
pipe_rfe_ridgecv = RFECV(Ridge(), cv=2)
pipe_rfe_ridgecv.fit(X_train, y_train)
forest = RandomForestClassifier(n_jobs=-1, class_weight='balanced', max_depth=5)
feat_selector = BorutaPy(forest, n_estimators='auto', verbose=2, random_state=1, perc=90)
feat_selector.fit(X_train.values, y_train)
feature_ranks = pd.DataFrame([X_train.columns, feat_selector.ranking_, feat_selector.support_]).T
feature_ranks.columns = ["feature", "rank", "keep"]
feature_ranks["keep_ridge"] = feature_ranks.feature.apply(lambda x: True if x in pipe_rfe_ridgecv.get_feature_names_out() else False)
feature_ranks = feature_ranks.sort_values("rank")
feature_ranks
features_1 = feature_ranks["feature"].to_list()
features_2 = feature_ranks[feature_ranks["keep"] == True]["feature"].to_list()
features_3 = feature_ranks[(feature_ranks["keep"] == True) | (feature_ranks.head(300)["keep_ridge"] == True)]["feature"].to_list()
return features_1, features_2, features_3
def model_selection(models, feature_lists, X_train, y_train, out_file_result):
rst_all = []
for features_list in feature_lists:
rst_df=[]
for model in models:
tmp_df=pd.DataFrame(mean_std_cross_val_scores(models[model], X_train[features_list], y_train,
return_train_score=True, cv=5), columns=[model+"_"+str(len(features_list))])
rst_df.append(tmp_df)
rst_df=pd.concat(rst_df, axis=1)
rst_all.append(rst_df)
rst_all = pd.concat(rst_all, axis=1).T
col_name = ["CatBoost", "LR", "RF", "SVM"]
rst_all_test = rst_all[["test_score"]].copy()
rst_all_test.loc[:, "model"] = [x.split("_")[0] for x in rst_all_test.index]
rst_all_test.loc[:, "var_num"] = [x.split("_")[1] for x in rst_all_test.index]
rst_all_test.loc[:, "test_score"] = [float(x.split("(+")[0].strip()) for x in rst_all_test.test_score]
rst_all_test = pd.crosstab(columns=rst_all_test["model"], index=rst_all_test["var_num"], values=rst_all_test["test_score"], aggfunc=sum)
rst_all_test.columns = col_name
rst_all_train = rst_all[["train_score"]].copy()
rst_all_train.loc[:, "model"] = [x.split("_")[0] for x in rst_all_train.index]
rst_all_train.loc[:, "var_num"] = [x.split("_")[1] for x in rst_all_train.index]
rst_all_train.loc[:, "test_score"] = [float(x.split("(+")[0].strip()) for x in rst_all_train.train_score]
rst_all_train = | pd.crosstab(columns=rst_all_train["model"], index=rst_all_train["var_num"], values=rst_all_train["test_score"], aggfunc=sum) | pandas.crosstab |
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
data_path = 'Bike-Sharing-Dataset/hour.csv'
rides = | pd.read_csv(data_path) | pandas.read_csv |
import numpy as np
from scipy.io import loadmat
import os
from pathlib import Path
# from mpl_toolkits.mplot3d import Axes3D
from matplotlib import pyplot as plt
import seaborn as sns
import pandas as pd
# plotting parameters
sns.set(font_scale=1.1)
sns.set_context("talk")
sns.set_palette(['#701f57', '#ad1759', '#e13342', '#f37651'])
transparent = False
markers = ['o','^','s']
plot_types = ['box','point']
estimator = np.median
est ='median'
# Number of microphones fixed, vary number of loudspeakers
path = Path(__file__).parent / os.path.join('..','matlab','data') # path to the saved results from matlab
outpath = os.path.join(Path(__file__).parent,'figures')
if not os.path.exists(outpath):
os.makedirs(outpath)
Ks = range(6,12) # number of loudspeakers
M = 12 # number of microphones
runs = 200
thresh = 10**-3 # lower bound for error
algs = ['SDR + LM', 'Wang']
suffs = ['real_data_clean_sdr_lm', 'wang_real_data_clean']
res_data = | pd.DataFrame() | pandas.DataFrame |
#
# Licensed to the Apache Software Foundation (ASF) under one or more
# contributor license agreements. See the NOTICE file distributed with
# this work for additional information regarding copyright ownership.
# The ASF licenses this file to You under the Apache License, Version 2.0
# (the "License"); you may not use this file except in compliance with
# the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
"""
A wrapper for ResampledData to behave similar to pandas Resampler.
"""
from abc import ABCMeta
from distutils.version import LooseVersion
from functools import partial
from typing import (
Any,
Generic,
List,
Optional,
)
import numpy as np
import pandas as pd
from pandas.tseries.frequencies import to_offset
if LooseVersion(pd.__version__) >= LooseVersion("1.3.0"):
from pandas.core.common import _builtin_table # type: ignore[attr-defined]
else:
from pandas.core.base import SelectionMixin
_builtin_table = SelectionMixin._builtin_table # type: ignore[attr-defined]
from pyspark import SparkContext
from pyspark.sql import Column, functions as F
from pyspark.sql.types import (
NumericType,
StructField,
TimestampType,
)
from pyspark import pandas as ps # For running doctests and reference resolution in PyCharm.
from pyspark.pandas._typing import FrameLike
from pyspark.pandas.frame import DataFrame
from pyspark.pandas.internal import (
InternalField,
InternalFrame,
SPARK_DEFAULT_INDEX_NAME,
)
from pyspark.pandas.missing.resample import (
MissingPandasLikeDataFrameResampler,
MissingPandasLikeSeriesResampler,
)
from pyspark.pandas.series import Series, first_series
from pyspark.pandas.utils import (
scol_for,
verify_temp_column_name,
)
class Resampler(Generic[FrameLike], metaclass=ABCMeta):
"""
Class for resampling datetimelike data, a groupby-like operation.
It's easiest to use obj.resample(...) to use Resampler.
Parameters
----------
psdf : DataFrame
Returns
-------
a Resampler of the appropriate type
Notes
-----
After resampling, see aggregate, apply, and transform functions.
"""
def __init__(
self,
psdf: DataFrame,
resamplekey: Optional[Series],
rule: str,
closed: Optional[str] = None,
label: Optional[str] = None,
agg_columns: List[Series] = [],
):
self._psdf = psdf
self._resamplekey = resamplekey
self._offset = to_offset(rule)
if self._offset.rule_code not in ["A-DEC", "M", "D", "H", "T", "S"]:
raise ValueError("rule code {} is not supported".format(self._offset.rule_code))
if not self._offset.n > 0: # type: ignore[attr-defined]
raise ValueError("rule offset must be positive")
if closed is None:
self._closed = "right" if self._offset.rule_code in ["A-DEC", "M"] else "left"
elif closed in ["left", "right"]:
self._closed = closed
else:
raise ValueError("invalid closed: '{}'".format(closed))
if label is None:
self._label = "right" if self._offset.rule_code in ["A-DEC", "M"] else "left"
elif label in ["left", "right"]:
self._label = label
else:
raise ValueError("invalid label: '{}'".format(label))
self._agg_columns = agg_columns
@property
def _resamplekey_scol(self) -> Column:
if self._resamplekey is None:
return self._psdf.index.spark.column
else:
return self._resamplekey.spark.column
@property
def _agg_columns_scols(self) -> List[Column]:
return [s.spark.column for s in self._agg_columns]
def _bin_time_stamp(self, origin: pd.Timestamp, ts_scol: Column) -> Column:
sql_utils = SparkContext._active_spark_context._jvm.PythonSQLUtils
origin_scol = F.lit(origin)
(rule_code, n) = (self._offset.rule_code, self._offset.n) # type: ignore[attr-defined]
left_closed, right_closed = (self._closed == "left", self._closed == "right")
left_labeled, right_labeled = (self._label == "left", self._label == "right")
if rule_code == "A-DEC":
assert (
origin.month == 12
and origin.day == 31
and origin.hour == 0
and origin.minute == 0
and origin.second == 0
)
diff = F.year(ts_scol) - F.year(origin_scol)
mod = F.lit(0) if n == 1 else (diff % n)
edge_cond = (mod == 0) & (F.month(ts_scol) == 12) & (F.dayofmonth(ts_scol) == 31)
edge_label = F.year(ts_scol)
if left_closed and right_labeled:
edge_label += n
elif right_closed and left_labeled:
edge_label -= n
if left_labeled:
non_edge_label = F.when(mod == 0, F.year(ts_scol) - n).otherwise(
F.year(ts_scol) - mod
)
else:
non_edge_label = F.when(mod == 0, F.year(ts_scol)).otherwise(
F.year(ts_scol) - (mod - n)
)
return F.to_timestamp(
F.make_date(
F.when(edge_cond, edge_label).otherwise(non_edge_label), F.lit(12), F.lit(31)
)
)
elif rule_code == "M":
assert (
origin.is_month_end
and origin.hour == 0
and origin.minute == 0
and origin.second == 0
)
diff = (
(F.year(ts_scol) - F.year(origin_scol)) * 12
+ F.month(ts_scol)
- F.month(origin_scol)
)
mod = F.lit(0) if n == 1 else (diff % n)
edge_cond = (mod == 0) & (F.dayofmonth(ts_scol) == F.dayofmonth(F.last_day(ts_scol)))
truncated_ts_scol = F.date_trunc("MONTH", ts_scol)
edge_label = truncated_ts_scol
if left_closed and right_labeled:
edge_label += sql_utils.makeInterval("MONTH", F.lit(n)._jc)
elif right_closed and left_labeled:
edge_label -= sql_utils.makeInterval("MONTH", F.lit(n)._jc)
if left_labeled:
non_edge_label = F.when(
mod == 0,
truncated_ts_scol - sql_utils.makeInterval("MONTH", F.lit(n)._jc),
).otherwise(truncated_ts_scol - sql_utils.makeInterval("MONTH", mod._jc))
else:
non_edge_label = F.when(mod == 0, truncated_ts_scol).otherwise(
truncated_ts_scol - sql_utils.makeInterval("MONTH", (mod - n)._jc)
)
return F.to_timestamp(
F.last_day(F.when(edge_cond, edge_label).otherwise(non_edge_label))
)
elif rule_code == "D":
assert origin.hour == 0 and origin.minute == 0 and origin.second == 0
if n == 1:
# NOTE: the logic to process '1D' is different from the cases with n>1,
# since hour/minute/second parts are taken into account to determine edges!
edge_cond = (
(F.hour(ts_scol) == 0) & (F.minute(ts_scol) == 0) & (F.second(ts_scol) == 0)
)
if left_closed and left_labeled:
return F.date_trunc("DAY", ts_scol)
elif left_closed and right_labeled:
return F.date_trunc("DAY", F.date_add(ts_scol, 1))
elif right_closed and left_labeled:
return F.when(edge_cond, F.date_trunc("DAY", F.date_sub(ts_scol, 1))).otherwise(
F.date_trunc("DAY", ts_scol)
)
else:
return F.when(edge_cond, F.date_trunc("DAY", ts_scol)).otherwise(
F.date_trunc("DAY", F.date_add(ts_scol, 1))
)
else:
diff = F.datediff(end=ts_scol, start=origin_scol)
mod = diff % n
edge_cond = mod == 0
truncated_ts_scol = F.date_trunc("DAY", ts_scol)
edge_label = truncated_ts_scol
if left_closed and right_labeled:
edge_label = F.date_add(truncated_ts_scol, n)
elif right_closed and left_labeled:
edge_label = F.date_sub(truncated_ts_scol, n)
if left_labeled:
non_edge_label = F.date_sub(truncated_ts_scol, mod)
else:
non_edge_label = F.date_sub(truncated_ts_scol, mod - n)
return F.when(edge_cond, edge_label).otherwise(non_edge_label)
elif rule_code in ["H", "T", "S"]:
unit_mapping = {"H": "HOUR", "T": "MINUTE", "S": "SECOND"}
unit_str = unit_mapping[rule_code]
truncated_ts_scol = F.date_trunc(unit_str, ts_scol)
diff = sql_utils.timestampDiff(unit_str, origin_scol._jc, truncated_ts_scol._jc)
mod = F.lit(0) if n == 1 else (diff % F.lit(n))
if rule_code == "H":
assert origin.minute == 0 and origin.second == 0
edge_cond = (mod == 0) & (F.minute(ts_scol) == 0) & (F.second(ts_scol) == 0)
elif rule_code == "T":
assert origin.second == 0
edge_cond = (mod == 0) & (F.second(ts_scol) == 0)
else:
edge_cond = mod == 0
edge_label = truncated_ts_scol
if left_closed and right_labeled:
edge_label += sql_utils.makeInterval(unit_str, F.lit(n)._jc)
elif right_closed and left_labeled:
edge_label -= sql_utils.makeInterval(unit_str, F.lit(n)._jc)
if left_labeled:
non_edge_label = F.when(mod == 0, truncated_ts_scol).otherwise(
truncated_ts_scol - sql_utils.makeInterval(unit_str, mod._jc)
)
else:
non_edge_label = F.when(
mod == 0,
truncated_ts_scol + sql_utils.makeInterval(unit_str, F.lit(n)._jc),
).otherwise(truncated_ts_scol - sql_utils.makeInterval(unit_str, (mod - n)._jc))
return F.when(edge_cond, edge_label).otherwise(non_edge_label)
else:
raise ValueError("Got the unexpected unit {}".format(rule_code))
def _downsample(self, f: str) -> DataFrame:
"""
Downsample the defined function.
Parameters
----------
how : string / mapped function
**kwargs : kw args passed to how function
"""
# a simple example to illustrate the computation:
# dates = [
# datetime.datetime(2012, 1, 2),
# datetime.datetime(2012, 5, 3),
# datetime.datetime(2022, 5, 3),
# ]
# index = pd.DatetimeIndex(dates)
# pdf = pd.DataFrame(np.array([1,2,3]), index=index, columns=['A'])
# pdf.resample('3Y').max()
# A
# 2012-12-31 2.0
# 2015-12-31 NaN
# 2018-12-31 NaN
# 2021-12-31 NaN
# 2024-12-31 3.0
#
# in this case:
# 1, obtain one origin point to bin all timestamps, we can get one (2009-12-31)
# from the minimum timestamp (2012-01-02);
# 2, the default intervals for 'Y' are right-closed, so intervals are:
# (2009-12-31, 2012-12-31], (2012-12-31, 2015-12-31], (2015-12-31, 2018-12-31], ...
# 3, bin all timestamps, for example, 2022-05-03 belongs to interval
# (2021-12-31, 2024-12-31], since the default label is 'right', label it with the right
# edge 2024-12-31;
# 4, some intervals maybe too large for this down sampling, so we need to pad the dataframe
# to avoid missing some results, like: 2015-12-31, 2018-12-31 and 2021-12-31;
# 5, union the binned dataframe and padded dataframe, and apply aggregation 'max' to get
# the final results;
# one action to obtain the range, in the future we may cache it in the index.
ts_min, ts_max = (
self._psdf._internal.spark_frame.select(
F.min(self._resamplekey_scol), F.max(self._resamplekey_scol)
)
.toPandas()
.iloc[0]
)
# the logic to obtain an origin point to bin the timestamps is too complex to follow,
# here just use Pandas' resample on a 1-length series to get it.
ts_origin = (
pd.Series([0], index=[ts_min])
.resample(rule=self._offset.freqstr, closed=self._closed, label="left")
.sum()
.index[0]
)
assert ts_origin <= ts_min
bin_col_name = "__tmp_resample_bin_col__"
bin_col_label = verify_temp_column_name(self._psdf, bin_col_name)
bin_col_field = InternalField(
dtype=np.dtype("datetime64[ns]"),
struct_field=StructField(bin_col_name, TimestampType(), True),
)
bin_scol = self._bin_time_stamp(
ts_origin,
self._resamplekey_scol,
)
agg_columns = [
psser for psser in self._agg_columns if (isinstance(psser.spark.data_type, NumericType))
]
assert len(agg_columns) > 0
# in the binning side, label the timestamps according to the origin and the freq(rule)
bin_sdf = self._psdf._internal.spark_frame.select(
F.col(SPARK_DEFAULT_INDEX_NAME),
bin_scol.alias(bin_col_name),
*[psser.spark.column for psser in agg_columns],
)
# in the padding side, insert necessary points
# again, directly apply Pandas' resample on a 2-length series to obtain the indices
pad_sdf = (
ps.from_pandas(
| pd.Series([0, 0], index=[ts_min, ts_max]) | pandas.Series |
# -*- coding: utf-8 -*-
"""
Process an EPW file
.. moduleauthor:: <NAME> (<EMAIL>, <EMAIL>)
"""
import csv
from collections import OrderedDict
import pandas as pd
from datetime import datetime
class EpwFile:
def __init__(self, filepath):
"""
Load an EPW file into memory
:param filepath: String, Path to the file
"""
self.data = []
self.columns = [
{
'name': 'year',
'long_name': 'year',
'type': 'int',
'units': '',
}, {
'name': 'month',
'long_name': 'month',
'type': 'int',
'units': '',
}, {
'name': 'day',
'long_name': 'day',
'type': 'int',
'units': '',
}, {
'name': 'hour',
'long_name': 'hour',
'type': 'int',
'units': '',
}, {
'name': 'minute',
'long_name': 'minute',
'type': 'int',
'units': '',
}, {
'name': 'data_source',
'long_name': 'data_source',
'type': 'str',
'units': '',
}, {
'name': 'dry_bulb',
'long_name': 'dry_bulb',
'type': 'float',
'units': 'deg C',
}, {
'name': 'dew_point',
'long_name': 'dew_point',
'type': 'float',
'units': 'deg C',
}, {
'name': 'rh',
'long_name': 'rh',
'type': 'float',
'units': 'percent',
}
]
self.column_names = [c['name'] for c in self.columns]
self.start_day_of_week = '0' # Sunday
# '', '', 'atmos_pressure', 'ext_horz_rad', 'ext_dir_rad',
# 'horz_ir_sky', 'glo_horz_rad', 'dir_norm_rad', 'dif_horz_rad',
# 'glo_horz_illum', 'dir_norm_illum', 'dif_horz_illum', 'zen_lum', 'wind_dir',
# 'wind_spd', 'tot_sky_cvr', 'opaq_sky_cvr', 'visibility', 'ceiling_hgt',
# 'pres_weath_obs', 'pres_weath_codes', 'precip_wtr', 'aerosol_opt_depth',
# 'snow_depth', ' days_since_last_snow', 'albedo', 'rain', 'rain_quantity']
# Date,HH:MM,Datasource,DryBulb {C},DewPoint {C},RelHum {%},Atmos Pressure {Pa},ExtHorzRad {Wh/m2},ExtDirRad {Wh/m2},HorzIRSky {Wh/m2},GloHorzRad {Wh/m2},DirNormRad {Wh/m2},DifHorzRad {Wh/m2},GloHorzIllum {lux},DirNormIllum {lux},DifHorzIllum {lux},ZenLum {Cd/m2},WindDir {deg},WindSpd {m/s},TotSkyCvr {.1},OpaqSkyCvr {.1},Visibility {km},Ceiling Hgt {m},PresWeathObs,PresWeathCodes,Precip Wtr {mm},Aerosol Opt Depth {.001},SnowDepth {cm},Days Last Snow,Albedo {.01},Rain {mm},Rain Quantity {hr}
with open(filepath, 'r') as csvfile:
reader = csv.reader(csvfile)
for row in reader:
if row[0] == 'LOCATION':
pass
# print('Parsing Location')
elif row[0] == 'DESIGN CONDITIONS':
pass
# print('Parsing Design Conditions')
elif row[0] == 'TYPICAL/EXTREME PERIODS':
pass
# print('Parsing Typical / Extreme Periods')
elif row[0] == 'GROUND TEMPERATURES':
pass
# print('Parsing Ground Temperatures')
elif row[0] == 'HOLIDAYS/DAYLIGHT SAVINGS':
pass
# print('Parsing Holidays / Daylight Savings')
elif row[0] == 'COMMENTS 1':
pass
# print('Parsing Comments 1')
elif row[0] == 'COMMENTS 2':
pass
# print('Parsing Comments 2')
elif row[0] == 'DATA PERIODS':
pass
# print('Parsing Data Periods')
else:
self._append_row(row)
self.post_process_data()
def _append_row(self, row):
data_types = [c['type'] for c in self.columns]
row = [eval("%s(\'%s\')" % cast) for cast in zip(data_types, row)]
self.data.append(OrderedDict(zip(self.column_names, row)))
def post_process_data(self):
"""
Add in derived columns
:return:
"""
for index, datum in enumerate(self.data):
dt = "%s/%s/2017 %s:00" % (datum['month'], datum['day'], datum['hour'] - 1)
self.data[index]['datetime'] = dt
# convert to dt
dt_obj = datetime.strptime(dt, '%m/%d/%Y %H:%M')
# Add in the DayOfYear for convenience.
self.data[index]['DayOfYear'] = dt_obj.strftime('%j')
# add in the day of the week
self.data[index]['dayofweek'] = dt_obj.strftime('%A')
self.data[index]['dayofweek_int'] = dt_obj.strftime('%w') # 0 = sunday, 1 = monday, ...
def as_dataframe(self):
"""
Return the EPW file as a dataframe. This drops the data_source column for brevity.
:return: pandas DataFrame
"""
return | pd.DataFrame(self.data) | pandas.DataFrame |
#!/usr/bin/env python
import argparse
import pandas as pd
import re
#read arguments
parser = argparse.ArgumentParser(description="Subset the exon clusters by species pairs based on the pairwise reclustered gene orthogroups")
parser.add_argument("--exon_pairs", "-ep", required=True)
parser.add_argument("--reclustered_genes", "-rg", required=True)
parser.add_argument("--exon_clusters", "-ec", required=True)
parser.add_argument("--species1", "-s1", required=True)
parser.add_argument("--species2", "-s2", required=True)
parser.add_argument("--output_file", "-out", required=True)
args = parser.parse_args()
my_exon_pairs = args.exon_pairs
my_reclustered_genes = args.reclustered_genes
my_exon_clusters = args.exon_clusters
species1 = args.species1
species2 = args.species2
my_output = args.output_file
###### Main
#read input
reclustered_genes_df = pd.read_table(my_reclustered_genes, sep="\t", header=None, names=["ClusterID", "Species", "GeneID"])
exon_pairs_df = pd.read_table(my_exon_pairs, sep="\t", header=None, names=["GeneID1", "ExonID1", "GeneID2", "ExonID2", "Species1", "Species2"])
exon_clusters_df = pd.read_table(my_exon_clusters, sep="\t", header=0) #header=[ExCluster_ID, GeneID, Coordinate, Species, Membership_score]
#filter only for species pair of interest
exon_pairs_sub_df = exon_pairs_df.loc[exon_pairs_df.Species1.isin([species1, species2])]
exon_pairs_sub_df = exon_pairs_sub_df.loc[exon_pairs_df.Species2.isin([species1, species2])]
#Get geneID-reclusteredID dictionary.
geneID_reclusteredID_dict = pd.Series(reclustered_genes_df.ClusterID.values, index=reclustered_genes_df.GeneID).to_dict()
#translate geneID with relative reclusteredID.
exon_pairs_sub_df["ClusterID1"] = exon_pairs_sub_df["GeneID1"].map(geneID_reclusteredID_dict)
exon_pairs_sub_df["ClusterID2"] = exon_pairs_sub_df["GeneID2"].map(geneID_reclusteredID_dict)
#filter only for exon hits within genes in the gene subcluster.
filtered_pairs_df = exon_pairs_sub_df.loc[exon_pairs_sub_df.ClusterID1 == exon_pairs_sub_df.ClusterID2]
#get a ExonID-ExClusterID dictionary.
exonID_exclusterID_dict = | pd.Series(exon_clusters_df.ExCluster_ID.values, index=exon_clusters_df.Coordinate) | pandas.Series |
from qutip import *
from ..mf import *
import pandas as pd
from scipy.interpolate import interp1d
from copy import deepcopy
import matplotlib.pyplot as plt
def ham_gen_jc(params, alpha=0):
sz = tensor(sigmaz(), qeye(params.c_levels))
sm = tensor(sigmam(), qeye(params.c_levels))
a = tensor(qeye(2), destroy(params.c_levels)) + alpha
ham = (params.fc-params.fd)*a.dag()*a
ham += params.eps*(a+a.dag())
ham += 0.5*(params.f01-params.fd)*sz
ham += params.g*(a*sm.dag() + a.dag()*sm)
ham *= 2*np.pi
return ham
def c_ops_gen_jc(params, alpha=0):
c_ops = []
sm = tensor(sigmam(), qeye(params.c_levels))
a = tensor(qeye(2), destroy(params.c_levels)) + alpha
if params.gamma > 0.0:
c_ops.append(np.sqrt(2*np.pi*params.gamma*(1+params.n_t))*sm)
if params.n_t > 0:
c_ops.append(np.sqrt(2*np.pi*params.gamma*params.n_t)*sm.dag())
if params.gamma_phi > 0.0:
c_ops.append(np.sqrt(2*np.pi*params.gamma_phi)*sm.dag()*sm)
if params.kappa > 0.0:
c_ops.append(np.sqrt(2*np.pi*params.kappa*(1+params.n_c))*a)
if params.n_c > 0:
c_ops.append(np.sqrt(2*np.pi*params.kappa*params.n_c)*a.dag())
return c_ops
def iterative_alpha_calc(params, n_cycles=10, initial_alpha=0):
alpha = initial_alpha
try:
for idx in range(n_cycles):
ham = ham_gen_jc(params, alpha=alpha)
c_ops = c_ops_gen_jc(params, alpha=alpha)
rho = steadystate(ham, c_ops)
a = tensor(qeye(2), destroy(params.c_levels)) + alpha
a_exp = expect(a, rho)
alpha = a_exp
except:
alpha = None
return alpha
class Spectrum:
def __init__(self, parameters):
print('hello')
self.parameters = deepcopy(parameters)
self.mf_amplitude = None
self.me_amplitude = None
self.transmission_exp = None
self.hilbert_params = None
def iterative_calculate(self, fd_array, initial_alpha=0, n_cycles=10, prune=True):
if self.parameters.fc < self.parameters.f01:
change = 'hard'
else:
change = 'soft'
params = deepcopy(self.parameters)
fd_array = np.sort(fd_array)
a_array = np.zeros(fd_array.shape[0], dtype=complex)
alpha = initial_alpha
for fd_idx, fd in tqdm(enumerate(fd_array)):
params.fd = fd
alpha = iterative_alpha_calc(params, initial_alpha=alpha, n_cycles=n_cycles)
a_array[fd_idx] = alpha
if change is 'hard':
alpha_bright_iterative = pd.Series(a_array, index=fd_array, name='alpha_bright')
else:
alpha_dim_iterative = pd.Series(a_array, index=fd_array, name='alpha_dim')
fd_array = np.flip(fd_array)
a_array = np.zeros(fd_array.shape[0], dtype=complex)
alpha = initial_alpha
for fd_idx, fd in tqdm(enumerate(fd_array)):
params.fd = fd
alpha = iterative_alpha_calc(params, initial_alpha=alpha, n_cycles=n_cycles)
a_array[fd_idx] = alpha
if change is 'hard':
alpha_dim_iterative = pd.Series(a_array, index=fd_array, name='alpha_dim')
else:
alpha_bright_iterative = pd.Series(a_array, index=fd_array, name='alpha_bright')
if prune:
alpha_dim_iterative = alpha_dim_iterative.dropna()
alpha_bright_iterative = alpha_bright_iterative.dropna()
alpha_dim_iterative.sort_index(inplace=True)
alpha_bright_iterative.sort_index(inplace=True)
if change is 'hard':
# alpha_dim_diff = np.diff(alpha_dim_iterative)/np.diff(alpha_dim_iterative.index)
# first_dim_idx = np.argmax(np.abs(alpha_dim_diff)) + 1
first_dim_idx = np.argmax(alpha_dim_iterative.real)
alpha_dim_iterative = alpha_dim_iterative.iloc[first_dim_idx:]
# alpha_bright_diff = np.diff(alpha_bright_iterative) / np.diff(alpha_bright_iterative.index)
# last_bright_idx = np.argmax(np.abs(alpha_bright_diff))
last_bright_idx = np.argmin(alpha_bright_iterative.imag)
alpha_bright_iterative = alpha_bright_iterative.iloc[:last_bright_idx + 1]
else:
first_bright_idx = np.argmin(alpha_bright_iterative.imag)
alpha_bright_iterative = alpha_bright_iterative.iloc[first_bright_idx:]
last_dim_idx = np.argmin(alpha_dim_iterative.real)
alpha_dim_iterative = alpha_dim_iterative.iloc[:last_dim_idx+1]
self.iterative_amplitude = pd.concat([alpha_dim_iterative, alpha_bright_iterative], axis=1)
def gen_raw_hilbert_params(self, fd_array, c_levels):
self.hilbert_params = pd.DataFrame(np.zeros([fd_array.shape[0], 1]), index=fd_array, columns=['alpha_0'])
self.hilbert_params['c_levels'] = c_levels
def gen_iterative_hilbert_params(self, fd_limits, kind='linear', fill_value='extrapolate', fraction=0.5,
level_scaling=1.0, max_shift=False, max_levels=True, relative='dim', relative_crossover=None, c_levels_bistable=None):
if self.parameters.fc < self.parameters.f01:
change = 'hard'
else:
change = 'soft'
alpha_dim = self.iterative_amplitude['alpha_dim'].dropna()
# alpha_dim.sort_index(inplace=True)
# alpha_dim_diff = np.diff(alpha_dim)/np.diff(alpha_dim.index)
# first_dim_idx = np.argmax(np.abs(alpha_dim_diff)) + 1
# alpha_dim = alpha_dim.iloc[first_dim_idx:]
alpha_bright = self.iterative_amplitude['alpha_bright'].dropna()
# alpha_bright.sort_index(inplace=True)
# alpha_bright_diff = np.diff(alpha_bright) / np.diff(alpha_bright.index)
# last_bright_idx = np.argmax(np.abs(alpha_bright_diff))
# alpha_bright = alpha_bright.iloc[:last_bright_idx]
new_iterative_alphas = pd.concat([alpha_dim, alpha_bright], axis=1)
self.iterative_amplitude = new_iterative_alphas
alpha_dim_real_func = interp1d(alpha_dim.index, alpha_dim.real, kind=kind, fill_value=fill_value)
alpha_dim_imag_func = interp1d(alpha_dim.index, alpha_dim.imag, kind=kind, fill_value=fill_value)
def alpha_dim_func_single(fd):
alpha_dim = alpha_dim_real_func(fd) + 1j * alpha_dim_imag_func(fd)
return alpha_dim
alpha_dim_func_vec = np.vectorize(alpha_dim_func_single)
def alpha_dim_func(fd_array):
alpha_dim_array = alpha_dim_func_vec(fd_array)
alpha_dim_series = pd.Series(alpha_dim_array, index=fd_array, name='alpha_dim_func')
return alpha_dim_series
alpha_bright_real_func = interp1d(alpha_bright.index, alpha_bright.real, kind=kind,
fill_value=fill_value)
alpha_bright_imag_func = interp1d(alpha_bright.index, alpha_bright.imag, kind=kind,
fill_value=fill_value)
def alpha_bright_func_single(fd):
alpha_bright = alpha_bright_real_func(fd) + 1j * alpha_bright_imag_func(fd)
return alpha_bright
alpha_bright_func_vec = np.vectorize(alpha_bright_func_single)
def alpha_bright_func(fd_array):
alpha_bright_array = alpha_bright_func_vec(fd_array)
alpha_bright_series = pd.Series(alpha_bright_array, index=fd_array, name='alpha_bright')
return alpha_bright_series
alpha_dim_interp = alpha_dim_func(self.iterative_amplitude.index)
alpha_bright_interp = alpha_bright_func(self.iterative_amplitude.index)
alpha_diff_interp = (alpha_bright_interp - alpha_dim_interp).dropna()
if max_shift:
min_diff = np.min(np.abs(alpha_diff_interp))
alpha_diff_unit_interp = alpha_diff_interp / np.abs(alpha_diff_interp)
if relative is 'dim':
alpha_0_interp = alpha_dim_interp + fraction * min_diff * alpha_diff_unit_interp
elif relative is 'bright':
alpha_0_interp = alpha_bright_interp - fraction * min_diff * alpha_diff_unit_interp
elif relative is 'both':
if change is 'soft':
alpha_0_interp = alpha_dim_interp + fraction * min_diff * alpha_diff_unit_interp
alpha_0_interp[relative_crossover:] = alpha_bright_interp[relative_crossover:] - fraction * min_diff * alpha_diff_unit_interp[relative_crossover:]
else:
alpha_0_interp = alpha_dim_interp + fraction * min_diff * alpha_diff_unit_interp
alpha_0_interp[:relative_crossover] = alpha_bright_interp[:relative_crossover] - fraction * min_diff * alpha_diff_unit_interp[:relative_crossover]
else:
raise Exception('Relative is neither bright, dim nor both.')
else:
if relative is 'dim':
alpha_0_interp = alpha_dim_interp + fraction * alpha_diff_interp
elif relative is 'bright':
alpha_0_interp = alpha_bright_interp - fraction * alpha_diff_interp
else:
raise Exception('Relative is neither bright norm dim.')
alpha_diff_interp.name = 'alpha_diff'
alpha_0_interp.name = 'alpha_0'
hilbert_params = pd.concat([alpha_diff_interp, alpha_0_interp], axis=1)
if max_levels:
if c_levels_bistable is not None:
hilbert_params['c_levels'] = c_levels_bistable
else:
min_diff = np.min(np.abs(alpha_diff_interp))
hilbert_params['c_levels'] = np.int(np.ceil(level_scaling * min_diff ** 2))
else:
hilbert_params['c_levels'] = np.ceil(level_scaling * np.abs(alpha_diff_interp.values) ** 2).astype(int)
hilbert_params['c_levels'].loc[:fd_limits[0]] = self.parameters.c_levels
hilbert_params['c_levels'].loc[fd_limits[1]:] = self.parameters.c_levels
if change is 'hard':
hilbert_params['alpha_0'].loc[:fd_limits[0]] = self.iterative_amplitude['alpha_bright'].loc[:fd_limits[0]]
hilbert_params['alpha_0'].loc[fd_limits[1]:] = self.iterative_amplitude['alpha_dim'].loc[fd_limits[1]:]
else:
hilbert_params['alpha_0'].loc[:fd_limits[0]] = self.iterative_amplitude['alpha_dim'].loc[:fd_limits[0]]
hilbert_params['alpha_0'].loc[fd_limits[1]:] = self.iterative_amplitude['alpha_bright'].loc[fd_limits[1]:]
# hilbert_params = pd.concat([hilbert_params, alpha_dim_interp, alpha_bright_interp], axis=1)
self.alpha_dim_interp = alpha_dim_interp
self.alpha_bright_interp = alpha_bright_interp
self.alpha_diff_interp = alpha_diff_interp
self.hilbert_params = hilbert_params
self.completed = np.zeros(hilbert_params.index.shape[0])
self.attempted = np.zeros(hilbert_params.index.shape[0])
a_array = np.zeros(hilbert_params.index.shape[0], dtype=complex)
self.me_amplitude = pd.DataFrame(a_array, index=hilbert_params.index)
def mf_calculate(self, fd_array, characterise_only=False):
if self.mf_amplitude is None:
self.mf_amplitude = map_mf_jc(self.parameters, fd_array=fd_array, characterise_only=characterise_only)
else:
fd0 = fd_array[0]
fd1 = fd_array[-1]
idx0 = self.mf_amplitude.index.get_loc(fd0, method='nearest')
idx1 = self.mf_amplitude.index.get_loc(fd1, method='nearest')
alpha0_dim = self.mf_amplitude['a_dim'].iloc[idx0]
sm0_dim = self.mf_amplitude['sm_dim'].iloc[idx0]
sz0_dim = self.mf_amplitude['sz_dim'].iloc[idx0]
alpha0_bright = self.mf_amplitude['a_bright'].iloc[idx1]
sm0_bright = self.mf_amplitude['sm_bright'].iloc[idx1]
sz0_bright = self.mf_amplitude['sz_bright'].iloc[idx1]
mf_amplitude_new = mf_characterise_jc(self.parameters, fd_array, alpha0_bright=alpha0_bright,
sm0_bright=sm0_bright, sz0_bright=sz0_bright, alpha0_dim=alpha0_dim,
sm0_dim=sm0_dim, sz0_dim=sz0_dim, check_bistability=False)
self.mf_amplitude = pd.concat([self.mf_amplitude, mf_amplitude_new])
self.mf_amplitude = self.mf_amplitude.sort_index()
self.mf_amplitude = self.mf_amplitude[~self.mf_amplitude.index.duplicated(keep='first')]
def generate_hilbert_params(self, c_levels_bi_scale=1.0, scale=0.5, fd_limits=None, max_shift=True,
c_levels_mono=10, c_levels_bi=10, alpha_0_mono=0, alpha_0_bi=0, kind='linear',
method='extrapolate_alpha_0'):
print(c_levels_bi)
self.hilbert_params = generate_hilbert_params(self.mf_amplitude, c_levels_bi_scale=c_levels_bi_scale,
scale=scale, fd_limits=fd_limits, kind=kind,
max_shift=max_shift, c_levels_mono=c_levels_mono,
c_levels_bi=c_levels_bi, alpha_0_mono=alpha_0_mono,
alpha_0_bi=alpha_0_bi, method=method)
def me_calculate(self, solver_kwargs={}, c_levels_bi_scale=1.0, scale=0.5, fd_limits=None, fill_value='extrapolate',
max_shift=False, c_levels_mono=10, c_levels_bi=10, alpha_0_mono=0, alpha_0_bi=0, kind='linear',
method='extrapolate_alpha_0', level_scaling=1.0, max_levels=True, save_name=None, resume_uncompleted=True):
if self.hilbert_params is None:
if method is 'iterative':
frequencies = self.iterative_amplitude.index
self.gen_iterative_hilbert_params(fd_limits, kind=kind, fill_value=fill_value, fraction=scale,
level_scaling=level_scaling, max_shift=max_shift, max_levels=max_levels)
else:
frequencies = self.mf_amplitude.index
self.generate_hilbert_params(c_levels_bi_scale=c_levels_bi_scale, scale=scale, max_shift=max_shift,
c_levels_mono=c_levels_mono, c_levels_bi=c_levels_bi,
alpha_0_mono=alpha_0_mono,
alpha_0_bi=alpha_0_bi, fd_limits=fd_limits, kind=kind, method=method)
if self.me_amplitude is None:
self.completed = np.zeros(self.hilbert_params.index.shape[0])
self.attempted = np.zeros(self.hilbert_params.index.shape[0])
a_array = np.zeros(self.hilbert_params.index.shape[0], dtype=complex)
self.me_amplitude = pd.DataFrame(a_array, index=self.hilbert_params.index)
frequencies = self.hilbert_params.index
a_array = self.me_amplitude.values[:,0]
params = deepcopy(self.parameters)
for fd_idx, fd, alpha0, c_levels in tqdm(
zip(np.arange(self.hilbert_params.index.shape[0]), self.hilbert_params.index,
self.hilbert_params['alpha_0'], self.hilbert_params['c_levels'])):
if (resume_uncompleted and self.completed[fd_idx] == 0) or (not resume_uncompleted and self.attempted[fd_idx] == 0):
params.fd = fd
params.c_levels = c_levels
ham = ham_gen_jc(params, alpha=alpha0)
c_ops = c_ops_gen_jc(params, alpha=alpha0)
self.attempted[fd_idx] = 1
try:
rho = steadystate(ham, c_ops, **solver_kwargs)
a = tensor(qeye(2), destroy(params.c_levels)) + alpha0
a_array[fd_idx] = expect(rho, a)
self.me_amplitude = pd.DataFrame(a_array, index=frequencies)
if save_name is not None:
qsave(self, save_name)
self.completed[fd_idx] = 1
except:
print('Failure at fd = ' + str(fd))
a_array[fd_idx] = np.nan
def plot(self, axes=None, mf=True, me=True, db=True, me_kwargs={'marker': 'o'}, mf_kwargs={'marker': 'o'}):
if axes is None:
fig, axes = plt.subplots(1, 1, figsize=(10, 6))
axes.set_xlabel(r'$f_d$ (GHz)')
axes.set_ylabel(r'|$\langle a \rangle$|')
if db:
if me:
if self.me_amplitude is not None:
axes.plot(self.me_amplitude.dropna().index, 20 * np.log10(np.abs(self.me_amplitude.dropna())),
**me_kwargs)
if mf:
if self.mf_amplitude.shape[1] == 1:
axes.plot(self.mf_amplitude.index, 20 * np.log10(np.abs(self.mf_amplitude['a'])), **mf_kwargs)
else:
axes.plot(self.mf_amplitude.index, 20 * np.log10(np.abs(self.mf_amplitude['a_bright'])),
**mf_kwargs)
axes.plot(self.mf_amplitude.index, 20 * np.log10(np.abs(self.mf_amplitude['a_dim'])), **mf_kwargs)
else:
if me:
if self.me_amplitude is not None:
axes.plot(self.me_amplitude.dropna().index, np.abs(self.me_amplitude.dropna()), **me_kwargs)
if mf:
if self.mf_amplitude.shape[1] == 1:
axes.plot(self.mf_amplitude.index, np.abs(self.mf_amplitude['a']), **mf_kwargs)
else:
axes.plot(self.mf_amplitude.index, np.abs(self.mf_amplitude['a_bright']), **mf_kwargs)
axes.plot(self.mf_amplitude.index, np.abs(self.mf_amplitude['a_dim']), **mf_kwargs)
def plot_transmission(self, axes=None, scale=4.851024710399999e-09, exp=True, sim=True, me_kwargs={'marker': 'o'},
mf_kwargs={'marker': 'o'}):
if axes is None:
fig, axes = plt.subplots(1, 1, figsize=(10, 6))
axes.set_ylabel(r'$T_{NA}$ (dB)')
axes.set_xlabel(r'$f_{df}$ (GHz)')
if sim and self.me_amplitude is not None:
self.transmission = scale * np.abs(self.me_amplitude.dropna()) ** 2 / self.parameters.eps ** 2
axes.plot(self.transmission.index, 10 * np.log10(self.transmission), label='Sim', **me_kwargs)
if exp and self.transmission_exp is not None:
axes.plot(self.transmission_exp.index, self.transmission_exp, label='Exp')
def load_exp(self, path):
self.transmission_exp = pd.read_csv(path, dtype=float, header=None).T
self.transmission_exp = self.transmission_exp.set_index(0)
def generate_hilbert_params(mf_amplitude, fd_limits=None, scale=0.5, c_levels_mono=10, c_levels_bi=10, alpha_0_mono=0, alpha_0_bi=0,
c_levels_bi_scale=1.0, max_shift=True, kind='linear', method='extrapolate_alpha_0'):
if 'a_dim' not in mf_amplitude.columns:
hilbert_params = deepcopy(mf_amplitude)
hilbert_params.columns = ['alpha_0']
hilbert_params['c_levels'] = c_levels_mono
elif method is 'static':
n_frequencies = mf_amplitude.shape[0]
hilbert_params = pd.DataFrame(alpha_0_mono*np.ones([n_frequencies,1],dtype=complex), columns=['alpha_0'], index=mf_amplitude.index)
hilbert_params['c_levels'] = c_levels_mono
if fd_limits is not None:
hilbert_params['c_levels'][fd_limits[0]:fd_limits[1]] = c_levels_bi
hilbert_params['alpha_0'][fd_limits[0]:fd_limits[1]] = alpha_0_bi
else:
mf_amplitude_bistable = mf_amplitude.dropna()
bistable_frequencies = mf_amplitude_bistable.index
alpha_diff_bistable = mf_amplitude_bistable['a_bright'] - mf_amplitude_bistable['a_dim']
alpha_diff_bistable_min = np.min(np.abs(alpha_diff_bistable))
alpha_dim_bistable = mf_amplitude_bistable['a_dim']
if max_shift:
alpha_diff_bistable_unit = alpha_diff_bistable / np.abs(alpha_diff_bistable)
alpha_0_bistable = alpha_dim_bistable + scale * alpha_diff_bistable_min * alpha_diff_bistable_unit
else:
alpha_0_bistable = alpha_dim_bistable + scale * alpha_diff_bistable
if fd_limits is not None:
if method not in ['extrapolate_alpha_0', 'extrapolate_diff']:
raise Exception('Method not recognised.')
bistable_frequencies = mf_amplitude[fd_limits[0]:fd_limits[1]].index
if method is 'extrapolate_alpha_0':
alpha_0_bistable_re_func = interp1d(alpha_0_bistable.index, alpha_0_bistable.values.real,
fill_value='extrapolate', kind=kind)
alpha_0_bistable_im_func = interp1d(alpha_0_bistable.index, alpha_0_bistable.values.imag,
fill_value='extrapolate', kind=kind)
def alpha_0_bistable_func_single(fd):
return alpha_0_bistable_re_func(fd) + 1j * alpha_0_bistable_im_func(fd)
alpha_0_bistable_func = np.vectorize(alpha_0_bistable_func_single, otypes=[complex])
alpha_0_bistable = alpha_0_bistable_func(bistable_frequencies)
alpha_0_bistable = pd.Series(alpha_0_bistable, index=bistable_frequencies)
elif method is 'extrapolate_diff':
diff_re_func = interp1d(alpha_diff_bistable.index, alpha_diff_bistable.values.real,
fill_value='extrapolate', kind=kind)
diff_im_func = interp1d(alpha_diff_bistable.index, alpha_diff_bistable.values.imag,
fill_value='extrapolate', kind=kind)
def diff_func_single(fd):
return diff_re_func(fd) + 1j * diff_im_func(fd)
diff_func = np.vectorize(diff_func_single, otypes=[complex])
upper_mf_bistable_fd = mf_amplitude.dropna().index[-1]
if fd_limits[1] < upper_mf_bistable_fd or fd_limits[0] > upper_mf_bistable_fd:
raise Exception('Frequency range does not cover the upper bistability crossover.')
lower_midpoint_frequencies = mf_amplitude[fd_limits[0]:upper_mf_bistable_fd].index
diff_lower = diff_func(lower_midpoint_frequencies)
diff_lower_unit = diff_lower / np.abs(diff_lower)
alpha_dim_lower = mf_amplitude['a_dim'][lower_midpoint_frequencies]
alpha_0_lower = alpha_dim_lower + scale * alpha_diff_bistable_min * diff_lower_unit
alpha_0_lower = | pd.Series(alpha_0_lower, index=lower_midpoint_frequencies) | pandas.Series |
import py
from csvuploader import HeaderCsv
import pandas as pd
from pandas.util.testing import assert_frame_equal
from StringIO import StringIO
def test_load_file(request):
test_dir = py.path.local(request.module.__file__)
with test_dir.dirpath('data', 'simple.csv').open('r') as f:
text = f.read()
assert text == 'A,B\n1,2'
h = HeaderCsv.load(f)
assert h.metadata is None
assert_frame_equal(h.df, pd.DataFrame([[1, 2]], columns=['A', 'B']).set_index('A'))
def test_load_file_with_header(request):
test_dir = py.path.local(request.module.__file__)
with test_dir.dirpath('data', 'simple_with_header.csv').open('r') as f:
h = HeaderCsv.load(f)
assert h.metadata == { 'name': 'Simple with header' }
assert_frame_equal(h.df, pd.DataFrame([[1, 2]], columns=['A', 'B']).set_index('A'))
def test_roundtrip():
stream = StringIO()
h1 = HeaderCsv(None, | pd.DataFrame([[1, 2]], columns=['A', 'B']) | pandas.DataFrame |
# Copyright (C) 2020 University of Oxford
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from datetime import datetime, timedelta
import pandas as pd
def date_parser(date):
''' for expanding abbreviated dates'''
try:
if '.' in date:
res = datetime.strptime(date, '%d.%m.%Y')
elif ',' in date:
res = datetime.strptime(date, '%b %d, %Y')
else:
res = datetime.strptime(date, '%d %b %Y')
return res
except ValueError:
if ',' in date:
date = date.split(',')[0]
date = datetime.strptime(date, '%b %d')
else:
date = ' '.join(date.split(' ')[0:-1])
date = datetime.strptime(date, '%d %b')
current_year = datetime.today().year
current_month = datetime.today().month
if date.month in [11, 12] and current_month in [1, 2]:
date = date.replace(year=current_year - 1)
else:
date = date.replace(year=current_year)
return date
def isDate(element):
try:
date_parser(element)
return True
except:
return False
def getTitle(chart_element):
gtag = chart_element.find("g")
chart_title = gtag.text
return chart_title
def isData(chart_element):
# the text for charts of interest begins with a header and then moves straight into dates
titleList = ['Total Cases', 'Total Deaths', 'Total Recoveries', 'Total Cases in Sindh', 'Total Deaths in Sindh', 'Total Recoveries in Sindh']
return getTitle(chart_element) in titleList
def parseChartData(chart_element, debug_file):
# The title
gtag = chart_element.find("g")
title = gtag.text
# The body data
body = gtag.nextSibling
clipper = body.find("g")
circles = clipper.nextSibling
axisMarks = circles.nextSibling
# Starts with x-axis labels. These are dates
axisText = axisMarks.findAll("text")
textTagList = [tag.text for tag in axisText]
with open(debug_file, "a+") as outF:
print(textTagList, file = outF)
dates = [tag for tag in textTagList if isDate(tag)]
# Next come values
valuesTag = axisMarks.nextSibling
valuesText = valuesTag.findAll("text")
values = [tag.text for tag in valuesText]
with open(debug_file, "a+") as outF:
print(values, file = outF)
# Turn the values into numbers
values = [int(value.replace('.', '')) for value in values]
# Replace the x-axis labels with a list of dates corresponding to values
# We can extract the first date and determine what the last date should be
firstDate = date_parser(dates[0])
d = timedelta(days=len(values) - 1)
lastDate = firstDate + d
# check the last date in the x-axis is what it should be
lastDateCheck = dates[-1].split(',')[0]
if not date_parser(lastDateCheck) == lastDate:
raise Exception('date range does not match length of value list')
# now generate the list of dates
current = firstDate
fullDateList = [current]
while current != lastDate:
current += timedelta(days=1)
fullDateList.append(current)
# the chart title is not consistently styled, so take the first two words
splitted = title.split()
title = " ".join(splitted[0:2])
# build a dataframe with the dates and values, using chart title as column name
df = | pd.DataFrame() | pandas.DataFrame |
import pandas as pd
import pandas as pd
sample1 = pd.read_table('MUT-1_2.annotate.csv', sep='\t', index_col=0)["score"]
sample2 = pd.read_table('MUT-2_2.annotate.csv', sep='\t', index_col=0)["score"]
sample3 = pd.read_table('MUT-4_2.annotate.csv', sep='\t', index_col=0)["score"]
sample4 = pd.read_table('MUT-5_2.annotate.csv', sep='\t', index_col=0)["score"]
sample5 = pd.read_table('MUT-6_2.annotate.csv', sep='\t', index_col=0)["score"]
sample6 = pd.read_table('WT-1_2.annotate.csv', sep='\t', index_col=0)["score"]
sample7 = pd.read_table('WT-2_2.annotate.csv', sep='\t', index_col=0)["score"]
sample8 = pd.read_table('WT-3_2.annotate.csv', sep='\t', index_col=0)["score"]
sample9 = pd.read_table('WT-4_2.annotate.csv', sep='\t', index_col=0)["score"]
sample10 = pd.read_table('WT-5_2.annotate.csv', sep='\t', index_col=0)["score"]
#
meta1 = pd.read_table('MUT-1_2.annotate.csv', sep='\t', index_col=0).loc[:,['splice_site','intron_size', 'anchor','genes', 'exons_skipped','transcripts']]
meta2 = pd.read_table('MUT-2_2.annotate.csv', sep='\t', index_col=0).loc[:,['splice_site','intron_size', 'anchor','genes', 'exons_skipped','transcripts']]
meta3 = pd.read_table('MUT-4_2.annotate.csv', sep='\t', index_col=0).loc[:,['splice_site','intron_size', 'anchor','genes', 'exons_skipped','transcripts']]
meta4 = pd.read_table('MUT-5_2.annotate.csv', sep='\t', index_col=0).loc[:,['splice_site','intron_size', 'anchor','genes', 'exons_skipped','transcripts']]
meta5 = pd.read_table('MUT-6_2.annotate.csv', sep='\t', index_col=0).loc[:,['splice_site','intron_size', 'anchor','genes', 'exons_skipped','transcripts']]
meta6 = pd.read_table('WT-1_2.annotate.csv', sep='\t', index_col=0).loc[:,['splice_site','intron_size', 'anchor','genes', 'exons_skipped','transcripts']]
meta7= pd.read_table('WT-2_2.annotate.csv', sep='\t', index_col=0).loc[:,['splice_site','intron_size', 'anchor','genes', 'exons_skipped','transcripts']]
meta8 = pd.read_table('WT-3_2.annotate.csv', sep='\t', index_col=0).loc[:,['splice_site','intron_size', 'anchor','genes', 'exons_skipped','transcripts']]
meta9 = pd.read_table('WT-4_2.annotate.csv', sep='\t', index_col=0).loc[:,['splice_site','intron_size', 'anchor','genes', 'exons_skipped','transcripts']]
meta10 = pd.read_table('WT-5_2.annotate.csv', sep='\t', index_col=0).loc[:,['splice_site','intron_size', 'anchor','genes', 'exons_skipped','transcripts']]
concat = pd.concat([sample1,sample2,sample3,sample4,sample5,sample6,sample7,sample8,sample9,sample10], axis=1).fillna(0)
concat.columns = ["MUT_V", "MUT1", "MUT2", "MUT4", "MUT3", "WT_V", "WT1", "WT2", "WT3", "WT4"]
meta = pd.concat([meta1,meta2,meta3,meta4,meta5,meta6,meta7,meta8,meta9,meta10])
meta = meta[~meta.index.duplicated(keep="first")]
concat = | pd.concat([concat, meta], axis=1) | pandas.concat |
import math
import numpy as np
import pandas as pd
import pytest
from numpy.testing import assert_array_equal
from sklearn.metrics import mean_absolute_error
from sklearn.metrics import median_absolute_error
from sklearn.metrics import r2_score
from greykite.common.constants import ACTUAL_COL
from greykite.common.constants import COVERAGE_VS_INTENDED_DIFF
from greykite.common.constants import LOWER_BAND_COVERAGE
from greykite.common.constants import PREDICTED_COL
from greykite.common.constants import PREDICTION_BAND_COVERAGE
from greykite.common.constants import PREDICTION_BAND_WIDTH
from greykite.common.constants import UPPER_BAND_COVERAGE
from greykite.common.evaluation import ElementwiseEvaluationMetricEnum
from greykite.common.evaluation import EvaluationMetricEnum
from greykite.common.evaluation import ValidationMetricEnum
from greykite.common.evaluation import add_finite_filter_to_scorer
from greykite.common.evaluation import add_preaggregation_to_scorer
from greykite.common.evaluation import aggregate_array
from greykite.common.evaluation import all_equal_length
from greykite.common.evaluation import calc_pred_coverage
from greykite.common.evaluation import calc_pred_err
from greykite.common.evaluation import correlation
from greykite.common.evaluation import elementwise_absolute_error
from greykite.common.evaluation import elementwise_absolute_percent_error
from greykite.common.evaluation import elementwise_outside_tolerance
from greykite.common.evaluation import elementwise_quantile
from greykite.common.evaluation import elementwise_residual
from greykite.common.evaluation import elementwise_squared_error
from greykite.common.evaluation import elementwise_within_bands
from greykite.common.evaluation import fraction_outside_tolerance
from greykite.common.evaluation import fraction_within_bands
from greykite.common.evaluation import mean_absolute_percent_error
from greykite.common.evaluation import median_absolute_percent_error
from greykite.common.evaluation import prediction_band_width
from greykite.common.evaluation import quantile_loss
from greykite.common.evaluation import quantile_loss_q
from greykite.common.evaluation import r2_null_model_score
from greykite.common.evaluation import root_mean_squared_error
from greykite.common.evaluation import symmetric_mean_absolute_percent_error
from greykite.common.evaluation import valid_elements_for_evaluation
def test_all_equal_length():
"""Tests all_equal_length function"""
assert all_equal_length() is True
assert all_equal_length(
np.array([1, 2, 3])
) is True
assert all_equal_length(
np.array([1, 2, 3]),
[4, 5, 6],
pd.Series([7, 8, 9])
) is True
assert all_equal_length(
np.array([1, 2, 3]),
[4, 6],
pd.Series([7, 8, 9])
) is False
# constants and None are ignored
assert all_equal_length(
np.array([1, 2, 3]),
4,
None,
pd.Series([7, 8, 9])
) is True
def test_valid_elements_for_evaluation():
"""Tests valid_elements_for_evaluation function"""
with pytest.warns(Warning) as record:
y_true = [1.0, np.nan, 2.0, np.Inf]
y_pred = [np.nan, 2.0, 1.0, 2.0]
y_another = [2.0, 1.0, np.nan, np.Inf]
y_true, y_pred, y_another = valid_elements_for_evaluation(
reference_arrays=[y_true],
arrays=[y_pred, y_another],
reference_array_names="y_true",
drop_leading_only=False,
keep_inf=False)
assert "2 value(s) in y_true were NA or infinite and are omitted in error calc." in record[0].message.args[0]
assert_array_equal(y_true, np.array([1.0, 2.0]))
assert_array_equal(y_pred, np.array([np.nan, 1.0]))
assert_array_equal(y_another, np.array([2.0, np.nan]))
# Leading NAs and keep inf
with pytest.warns(Warning) as record:
y_true = [np.nan, 2.0, np.nan, np.Inf]
y_pred = [np.nan, 2.0, 1.0, 2.0]
y_another = [2.0, 1.0, np.nan, np.Inf]
y_true, y_pred, y_another = valid_elements_for_evaluation(
reference_arrays=[y_true],
arrays=[y_pred, y_another],
reference_array_names="y_true",
drop_leading_only=True,
keep_inf=True)
assert "1 value(s) in y_true were NA and are omitted in error calc." in record[0].message.args[0]
assert_array_equal(y_true, np.array([2.0, np.nan, np.Inf]))
assert_array_equal(y_pred, np.array([2.0, 1.0, 2.0]))
assert_array_equal(y_another, np.array([1.0, np.nan, np.Inf]))
# All NAs and drop inf
with pytest.warns(Warning) as record:
y_true = [np.nan, np.nan, 2.0, np.Inf]
y_pred = [np.nan, 2.0, 1.0, 2.0]
y_another = [2.0, 1.0, np.nan, np.Inf]
y_true, y_pred, y_another = valid_elements_for_evaluation(
reference_arrays=[y_true],
arrays=[y_pred, y_another],
reference_array_names="y_true",
drop_leading_only=False,
keep_inf=False)
assert "3 value(s) in y_true were NA or infinite and are omitted in error calc." in record[0].message.args[0]
assert_array_equal(y_true, np.array([2.0]))
assert_array_equal(y_pred, np.array([1.0]))
assert_array_equal(y_another, np.array([np.nan]))
# All NAs and keep inf
with pytest.warns(Warning) as record:
y_true = [np.nan, 2.0, np.nan, np.Inf]
y_pred = [np.nan, 2.0, 1.0, 2.0]
y_another = [2.0, 1.0, np.nan, np.Inf]
y_true, y_pred, y_another = valid_elements_for_evaluation(
reference_arrays=[y_true],
arrays=[y_pred, y_another],
reference_array_names="y_true",
drop_leading_only=False,
keep_inf=True)
assert "2 value(s) in y_true were NA and are omitted in error calc." in record[0].message.args[
0]
assert_array_equal(y_true, np.array([2.0, np.Inf]))
assert_array_equal(y_pred, np.array([2.0, 2.0]))
assert_array_equal(y_another, np.array([1.0, np.Inf]))
with pytest.warns(Warning) as record:
y_true = [1.0, np.nan, 2.0, np.Inf]
y_pred = [np.nan, 2.0, 1.0, 2.0]
y_another = 2.0
y_last = None
y_true, y_pred, y_another, y_last = valid_elements_for_evaluation(
reference_arrays=[y_true],
arrays=[y_pred, y_another, y_last],
reference_array_names="y_true",
drop_leading_only=False,
keep_inf=False)
assert_array_equal(y_true, np.array([1.0, 2.0]))
assert_array_equal(y_pred, np.array([np.nan, 1.0]))
assert y_another == 2.0
assert y_last is None
assert "2 value(s) in y_true were NA or infinite and are omitted in error calc." in record[0].message.args[0]
with pytest.warns(Warning) as record:
y_true = [np.nan, np.Inf]
y_pred = [np.nan, 2.0]
y_another = 2.0
y_last = None
y_true, y_pred, y_another, y_last = valid_elements_for_evaluation(
reference_arrays=[y_true],
arrays=[y_pred, y_another, y_last],
reference_array_names="y_true",
drop_leading_only=False,
keep_inf=False)
assert y_another == 2.0
assert y_last is None
assert "There are 0 non-null elements for evaluation." in record[0].message.args[0]
def test_aggregate_array():
"""Tests aggregate_array function"""
# tests defaults
assert_array_equal(
aggregate_array(pd.Series(np.arange(15))),
np.array([28.0, 77.0])
)
# tests warning
with pytest.warns(Warning) as record:
assert_array_equal(
aggregate_array([1.0, 2.0], agg_periods=3, agg_func=np.sum),
np.array([3.0])
)
assert "Using all for aggregation" in record[0].message.args[0]
# tests aggregation with dropping incomplete bin from the left
assert_array_equal(
aggregate_array([1.0, 2.0, 3.0], agg_periods=3, agg_func=np.sum),
np.array([6.0])
)
assert_array_equal(
aggregate_array([1.0, 2.0, 3.0, 4.0], agg_periods=3, agg_func=np.sum),
np.array([9.0]) # drops 1.0, adds the rest
)
assert_array_equal(
aggregate_array([1.0, 2.0, 3.0, 4.0, 5.0], agg_periods=3, agg_func=np.sum),
np.array([12.0]) # drops 1.0 and 2.0, adds the rest
)
assert_array_equal(
aggregate_array([1.0, 2.0, 3.0, 4.0, 5.0, 6.0], agg_periods=3, agg_func=np.sum),
np.array([6.0, 15.0])
)
assert_array_equal(
aggregate_array([1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0], agg_periods=3, agg_func=np.sum),
np.array([9.0, 18.0])
)
# tests np.array input
assert_array_equal(
aggregate_array(np.array([1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0]), agg_periods=3, agg_func=np.sum),
np.array([9.0, 18.0])
)
# tests pd.Series input
assert_array_equal(
aggregate_array( | pd.Series([1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0]) | pandas.Series |
# -*- coding: utf-8 -*-
import pandas as pd
import numpy as np
from config_fh import get_db_engine, get_db_session, get_cache_file_path, STR_FORMAT_DATE
from fh_tools.fh_utils import return_risk_analysis, str_2_date
from fh_tools import fh_utils
import matplotlib.pyplot as plt # pycharm ้่ฆ้่ฟ็ฐๅฎ่ฐ็จ plt.show ๆ่ฝๆพ็คบplot
from datetime import date, datetime, timedelta
from sqlalchemy.types import String, Date, FLOAT
import datetime as dt
import logging
logger = logging.getLogger()
STRATEGY_TYPE_CN_EN_DIC = {'ๅบๅธ็ญ็ฅ': 'fixed_income',
'ๅฅๅฉ็ญ็ฅ': 'arbitrage',
'็ฎก็ๆ่ดง็ญ็ฅ': 'cta',
'่ก็ฅจๅคๅคด็ญ็ฅ': 'long_only',
'้ฟๅฐๆณ็ญ็ฅ': 'alpha',
'ๅฎ่ง็ญ็ฅ': 'macro',
'็ปๅๅบ้็ญ็ฅ': 'fof'}
STRATEGY_TYPE_EN_CN_DIC = {en: cn for cn, en in STRATEGY_TYPE_CN_EN_DIC.items()}
def calc_wind_code_list_index(wind_code_list, date_since, file_name=None):
"""
่ฎก็ฎ wind_code_list ็ปๆ็ๆๆฐ
:param wind_code_list:
:param date_since:
:param file_name: ้ป่ฎคไธบNone๏ผไธ็ๆๆไปถ
:return: ๅๆๅ็ๆๆฐๆฏๆฅๆถ็็ๅ่กจ
"""
# ่ทๅๆ ทๆฌๅญๅบ้่กๆ
ๆฐๆฎ
wind_code_list_str = ', '.join(["'" + wind_code + "'" for wind_code in wind_code_list])
query_base_str = r'''select fv.wind_code, nav_date_week, fv.nav_acc
from (
select wind_code, adddate(nav_date, 4 - weekday(nav_date)) as nav_date_week, max(nav_date) as nav_date_max
from fund_nav
where wind_code in (%s)
group by wind_code, nav_date_week
) as ffv,
fund_nav fv
where ffv.nav_date_week >= %s
and fv.wind_code = ffv.wind_code
and fv.nav_date = ffv.nav_date_max
group by wind_code, nav_date_week
order by nav_date_week desc'''
query_str = query_base_str % (wind_code_list_str, date_since)
# logger.info(query_str)
engine = get_db_engine()
fund_nav_df = pd.read_sql_query(query_str, engine)
# ่ทๅๆ ทๆฌๅญๅบ้ๅ็งฐ
sql_str = """select wind_code, sec_name
from fund_info
where wind_code in (%s)"""
query_str = sql_str % wind_code_list_str
with get_db_session(engine) as session:
table = session.execute(query_str)
fund_code_name_dic = dict(table.fetchall())
# logger.info(df_fund_nav)
df_fund = fund_nav_df.pivot(index='nav_date_week', columns='wind_code', values='nav_acc')
df_fund.rename(columns=fund_code_name_dic, inplace=True)
# df_fund.to_csv('%s-%sใ%dใ %s_%s.csv' % (strategy_name, sample_name, len(wind_code_list), date_from, date_to))
df_fund.interpolate(inplace=True)
df_fund.dropna(inplace=True)
wind_code_list = list(df_fund.columns)
wind_code_count = len(wind_code_list)
if wind_code_count == 0:
logger.info('wind_code_list_str has no data')
# df_fund.to_csv('%s_df_fund.csv' % sample_name)
weight = 1 / wind_code_count
# logger.info(df_fund)
fund_pct_df = df_fund.pct_change().fillna(0)
if file_name is not None:
file_path = get_cache_file_path(file_name)
fund_index_df = (1 + fund_pct_df).cumprod()
fund_index_df.to_csv(file_path)
fund_pct_df *= weight
# logger.info(df_fund_pct)
nav_index_pct_s = None
for wind_code in wind_code_list:
if nav_index_pct_s is None:
nav_index_pct_s = fund_pct_df[wind_code]
else:
nav_index_pct_s += fund_pct_df[wind_code]
# logger.info("df_nav_index_pct_s[%s]:\n" % wind_code, df_nav_index_pct_s)
date_list = list(fund_pct_df.index)
if len(date_list) == 0:
file_path = get_cache_file_path('df_fund_%s_%s.csv' % (file_name, date_since))
logger.info('ๅญๅบ้ๅๅผๆฅๆไบค้ไธบ็ฉบ, ๅ่ง %sๆไปถๆฅ็ๅ
ทไฝๆฐๆฎ', file_path)
df_fund.to_csv(file_path)
logger.info('between: %s ~ %s', min(date_list), max(date_list))
return nav_index_pct_s
def calc_strategy_index(strategy_name, date_from, date_to, calc_sample_name=None, create_sub_index_csv=False):
"""
่ฎก็ฎ็ญ็ฅๆๆฐ
ๆ นๆฎ็ญ็ฅๅ็งฐ๏ผๆๅ็ญ็ฅๆ ทๆฌๅบ้ไปฃ็ ๏ผ็ญๆ้ๆๅๆๆฐ
:param strategy_name:็ญ็ฅๅ็งฐ
:param date_from:่ตทๅงๆฅๆ
:param date_to:ๆชๆญขๆฅๆ
:param calc_sample_name: ้่ฆ่ฎก็ฎ็ sample_nameใ'main'ไธบไธปๆๆฐ๏ผnoneไธบๅ
จ้จๆ ทๆฌๆๆฐใ
:return:่ฟๅไธปๆๆฐ๏ผๅๅ
ถไปๆ ทๆฌๆๆฐ็df
"""
# logger.info('strategy %s between: %s %s', strategy_name, date_from, date_to)
with get_db_session() as session:
# ่ทๅ nav_date ๅ่กจ
stg_table = session.execute(
'SELECT nav_date_week, wind_code_str, sample_name FROM strategy_index_info where strategy_name=:stg_name order by nav_date_week desc',
{'stg_name': strategy_name})
date_last = None
index_pct_s = None
sample_name_list = []
sample_val_list = []
stg_table_data_list = []
for stg_info in stg_table.fetchall():
# date_since = stg_info[0]
# wind_code_str = stg_info[1]
sample_name = stg_info[2]
# logger.info('stg_info %s', stg_info)
if calc_sample_name is not None and sample_name != calc_sample_name:
continue
stg_table_data_list.append(
{'nav_date_week': stg_info[0], 'wind_code_str': stg_info[1], 'sample_name': sample_name})
stg_table_df = pd.DataFrame(stg_table_data_list)
logger.debug('stg_table_df.shape %s', stg_table_df.shape)
stg_table_df_gp = stg_table_df.groupby('sample_name')
stg_table_df_gp_dic = stg_table_df_gp.groups
for sample_name, row_num_list in stg_table_df_gp_dic.items():
index_pct_s = None
date_last = None
for row_num in row_num_list:
wind_code_str = stg_table_df.iloc[row_num]['wind_code_str']
date_since = stg_table_df.iloc[row_num]['nav_date_week']
wind_code_list = wind_code_str.split(sep=',')
if create_sub_index_csv:
file_name = '%s_%s_since_%s.csv' % (strategy_name, sample_name, date_since)
else:
file_name = None
nav_index_pct_s = calc_wind_code_list_index(wind_code_list, date_since, file_name)
logger.debug('%s\n%s', sample_name, nav_index_pct_s)
if date_last is None:
date_available = [d for d in nav_index_pct_s.index if date_from <= d <= date_to and date_since <= d]
else:
date_available = [d for d in nav_index_pct_s.index if
date_from <= d <= date_to and date_since <= d < date_last]
date_last = date_since
if index_pct_s is None:
index_pct_s = nav_index_pct_s.ix[date_available]
else:
index_pct_s.append(nav_index_pct_s.ix[date_available])
# logger.info(sample_name, '\n', index_pct_s)
sample_val_s = (1 + index_pct_s).cumprod()
sample_name_list.append(sample_name)
sample_val_list.append(sample_val_s)
# sample_val_s.to_csv('%s %s_%s.csv' % (strategy_name, date_from, date_to))
if len(sample_val_list) == 0:
index_df = None
else:
index_df = pd.DataFrame(sample_val_list, index=sample_name_list).T
index_df.rename(columns={'main': strategy_name}, inplace=True)
index_df.interpolate(inplace=True)
return index_df
def update_strategy_index(date_from_str, date_to_str):
"""
strategy_index_info ไธญๆๆ strategy_name ๆดๆฐๆๆฐๅๅผๅฐๆฐๆฎๅบ strategy_index_val ไธญ
:param date_from_str: ่ตทๅงๆฅๆ %Y-%m-%d
:param date_to_str: ๆชๆญขๆฅๆ %Y-%m-%d
:return:
"""
engine = get_db_engine()
with get_db_session(engine) as session:
stg_table = session.execute('select strategy_name from strategy_index_info group by strategy_name')
strategy_name_list = [stg_info[0] for stg_info in stg_table.fetchall()]
strategy_name_count = len(strategy_name_list)
if strategy_name_count == 0:
logger.info('strategy_index_info table is empty')
return
# strategy_name_list = ['long_only', 'cta', 'arbitrage', 'alpha', 'macro']
date_from = datetime.strptime(date_from_str, '%Y-%m-%d').date()
date_to = datetime.strptime(date_to_str, '%Y-%m-%d').date()
index_df_list = []
for strategy_name in strategy_name_list:
# strategy_name = 'long_only'
# index_df = calc_strategy_index(strategy_name, date_from, date_to, calc_sample_name='main')
stg_index_s = get_strategy_index_by_name(strategy_name, date_from, date_to, statistic=False)
if stg_index_s is not None:
logger.info('็ๆ%s็ญ็ฅๆๆฐใ%s ~ %sใ', strategy_name, stg_index_s.index[0], stg_index_s.index[-1])
# index_df.to_csv('%s_sample_%s_%s.csv' % (strategy_name, date_from, date_to))
index_df = pd.DataFrame({'value': stg_index_s})
index_df.index.rename('nav_date', inplace=True)
index_df.reset_index(inplace=True)
# index_df.rename(columns={'nav_date_week': 'nav_date', strategy_name: 'value'}, inplace=True)
index_df['index_name'] = strategy_name
index_df_list.append(index_df)
else:
logger.info('No Data for shown on %s', strategy_name)
index_df_all = pd.concat(index_df_list)
index_df_all.set_index(['index_name', 'nav_date'], inplace=True)
# ้็ฝฎๅ
ๅฎน
table_name = 'strategy_index_val'
with get_db_session(engine) as session:
# session.execute("delete from %s where nav_date between '%s' and '%s'" % (table_name, date_from_str, date_to_str))
session.execute("truncate table %s" % table_name)
index_df_all.to_sql(table_name, engine, if_exists='append',
dtype={
'index_name': String(20),
'nav_date': Date,
'value': FLOAT,
}
)
def stat_fund_by_stg(strategy_type, date_from, date_to):
"""
็ป่ฎกๅถๅฎๆฅๆๆฎตๅ
็ญ็ฅ่กจ็ฐๆ
ๅต๏ผๅ
ๆฌ๏ผๆ ทๆฌๆฐใ่็๏ผ1%ไปฅไธใ-1%ไปฅไธๅ ๆฏ็ญ
:param strategy_type:
:param date_from:
:param date_to:
:return:
"""
sql_str = """select fv.wind_code, nav_date_week, fv.nav_acc
from (
select wind_code, adddate(nav_date, 4 - weekday(nav_date)) as nav_date_week, max(nav_date) as nav_date_max
from fund_nav
where wind_code in (select wind_code from fund_info where strategy_type = '%s')
group by wind_code, nav_date_week
having nav_date_week between '%s' and '%s'
) as ffv,
fund_nav fv
where ffv.nav_date_week between '%s' and '%s'
and fv.wind_code = ffv.wind_code
and fv.nav_date = ffv.nav_date_max
group by wind_code, nav_date_week
order by nav_date_week desc"""
query_str = sql_str % (strategy_type, date_from, date_to, date_from, date_to)
engine = get_db_engine()
fund_nav_df = | pd.read_sql_query(query_str, engine) | pandas.read_sql_query |
'''
ML-Based Trading Strategy
'''
import cbpro
import zmq
import sys
import json
import time
import os
import pickle
import pandas as pd
import numpy as np
import datetime as dt
# the following libraries are to update the persisted ML model
from sklearn.svm import SVC
from sklearn.metrics import accuracy_score
# overload the on_message behavior of cbpro.WebsocketClient
class MyWebsocketClient(cbpro.WebsocketClient):
def on_open(self):
self.url = "wss://ws-feed.pro.coinbase.com/"
self.products = symbol
self.channels = ['ticker']
self.should_print = False
def on_message(self, msg):
self.data = msg
def on_close(self):
print("-- Goodbye! --")
def logger_monitor(message, time=True, sep=True):
# logger and monitor function
with open(log_file, 'a') as f:
t = str(dt.datetime.now())
msg = ''
if time:
msg += ',' + t + ','
if sep:
msg += 3 * '='
msg += ',' + message + ','
# sends the message via the socket
socket.send_string(msg)
# writes the message to the log file
f.write(msg)
return
def report_positions(pos):
'''Logs and sends position data'''
out = ''
out += ',Going {},'.format(pos) + ','
time.sleep(0.033) # waits for the order to be executed
# get orders (will possibly make multiple HTTP requests)
#get_orders_gen = auth_client.get_orders()
get_fills = list(fills_gen)
out += ',' + str(get_fills) + ','
logger_monitor(out)
return
# callback function - algo trading minimal working example
# https://en.wikipedia.org/wiki/Minimal_working_example
def trading_mwe(symbol, amount, position, bar, min_bars, twentyfour, df_accounts, df_fills):
# Welcome message
print('')
print('*'*50)
print('*** Welcome to Tenzin II Crypto Trader ***')
print('*'*50)
print('')
print('Trading: ', symbol)
print('Amount per trade: ', amount)
print('')
print('Last 24 hrs:')
print('')
print('Open: .........', twentyfour['open'])
print('Last: .........', twentyfour['last'])
print('High: .........', twentyfour['high'])
print('Low: .......', twentyfour['low'])
print('Volume: ......', twentyfour['volume'])
print('30 day Volume: ', twentyfour['volume_30day'])
print('')
print('Recent orders: ')
print(df_fills.loc[-3:,['product_id', 'fee', 'side', 'settled', 'usd_volume']])
print('')
print('Account Positions: ')
print(df_accounts[['currency', 'balance']])
print('')
# global variables
global wsClient, df, dataframe, algorithm, log_file
# intialize variables
trading = 'n' # default == not trading
# ask to start trading
trading = input('Start trading? [Y]/[n]:')
while trading == 'Y':
while wsClient.data:
start = time.process_time() # reference for start of trading
end = start + 10.0 # when to end trading in minutes
tick = wsClient.data
dataframe = dataframe.append(tick, ignore_index=True)
dataframe.index = | pd.to_datetime(dataframe['time'], infer_datetime_format=True) | pandas.to_datetime |
"""
An exhaustive list of pandas methods exercising NDFrame.__finalize__.
"""
import operator
import re
import numpy as np
import pytest
import pandas as pd
# TODO:
# * Binary methods (mul, div, etc.)
# * Binary outputs (align, etc.)
# * top-level methods (concat, merge, get_dummies, etc.)
# * window
# * cumulative reductions
not_implemented_mark = pytest.mark.xfail(reason="not implemented")
mi = pd.MultiIndex.from_product([["a", "b"], [0, 1]], names=["A", "B"])
frame_data = ({"A": [1]},)
frame_mi_data = ({"A": [1, 2, 3, 4]}, mi)
# Tuple of
# - Callable: Constructor (Series, DataFrame)
# - Tuple: Constructor args
# - Callable: pass the constructed value with attrs set to this.
_all_methods = [
(
pd.Series,
(np.array([0], dtype="float64")),
operator.methodcaller("view", "int64"),
),
(pd.Series, ([0],), operator.methodcaller("take", [])),
(pd.Series, ([0],), operator.methodcaller("__getitem__", [True])),
(pd.Series, ([0],), operator.methodcaller("repeat", 2)),
pytest.param(
(pd.Series, ([0],), operator.methodcaller("reset_index")),
marks=pytest.mark.xfail,
),
(pd.Series, ([0],), operator.methodcaller("reset_index", drop=True)),
pytest.param(
(pd.Series, ([0],), operator.methodcaller("to_frame")), marks=pytest.mark.xfail
),
(pd.Series, ([0, 0],), operator.methodcaller("drop_duplicates")),
(pd.Series, ([0, 0],), operator.methodcaller("duplicated")),
(pd.Series, ([0, 0],), operator.methodcaller("round")),
(pd.Series, ([0, 0],), operator.methodcaller("rename", lambda x: x + 1)),
(pd.Series, ([0, 0],), operator.methodcaller("rename", "name")),
(pd.Series, ([0, 0],), operator.methodcaller("set_axis", ["a", "b"])),
(pd.Series, ([0, 0],), operator.methodcaller("reindex", [1, 0])),
(pd.Series, ([0, 0],), operator.methodcaller("drop", [0])),
(pd.Series, (pd.array([0, pd.NA]),), operator.methodcaller("fillna", 0)),
(pd.Series, ([0, 0],), operator.methodcaller("replace", {0: 1})),
(pd.Series, ([0, 0],), operator.methodcaller("shift")),
(pd.Series, ([0, 0],), operator.methodcaller("isin", [0, 1])),
(pd.Series, ([0, 0],), operator.methodcaller("between", 0, 2)),
(pd.Series, ([0, 0],), operator.methodcaller("isna")),
(pd.Series, ([0, 0],), operator.methodcaller("isnull")),
(pd.Series, ([0, 0],), operator.methodcaller("notna")),
(pd.Series, ([0, 0],), operator.methodcaller("notnull")),
(pd.Series, ([1],), operator.methodcaller("add", pd.Series([1]))),
# TODO: mul, div, etc.
(
pd.Series,
([0], pd.period_range("2000", periods=1)),
operator.methodcaller("to_timestamp"),
),
(
pd.Series,
([0], pd.date_range("2000", periods=1)),
operator.methodcaller("to_period"),
),
pytest.param(
(
pd.DataFrame,
frame_data,
operator.methodcaller("dot", pd.DataFrame(index=["A"])),
),
marks=pytest.mark.xfail(reason="Implement binary finalize"),
),
(pd.DataFrame, frame_data, operator.methodcaller("transpose")),
(pd.DataFrame, frame_data, operator.methodcaller("__getitem__", "A")),
(pd.DataFrame, frame_data, operator.methodcaller("__getitem__", ["A"])),
(pd.DataFrame, frame_data, operator.methodcaller("__getitem__", np.array([True]))),
(pd.DataFrame, ({("A", "a"): [1]},), operator.methodcaller("__getitem__", ["A"])),
(pd.DataFrame, frame_data, operator.methodcaller("query", "A == 1")),
(pd.DataFrame, frame_data, operator.methodcaller("eval", "A + 1", engine="python")),
(pd.DataFrame, frame_data, operator.methodcaller("select_dtypes", include="int")),
(pd.DataFrame, frame_data, operator.methodcaller("assign", b=1)),
(pd.DataFrame, frame_data, operator.methodcaller("set_axis", ["A"])),
(pd.DataFrame, frame_data, operator.methodcaller("reindex", [0, 1])),
(pd.DataFrame, frame_data, operator.methodcaller("drop", columns=["A"])),
(pd.DataFrame, frame_data, operator.methodcaller("drop", index=[0])),
(pd.DataFrame, frame_data, operator.methodcaller("rename", columns={"A": "a"})),
(pd.DataFrame, frame_data, operator.methodcaller("rename", index=lambda x: x)),
(pd.DataFrame, frame_data, operator.methodcaller("fillna", "A")),
(pd.DataFrame, frame_data, operator.methodcaller("fillna", method="ffill")),
(pd.DataFrame, frame_data, operator.methodcaller("set_index", "A")),
(pd.DataFrame, frame_data, operator.methodcaller("reset_index")),
(pd.DataFrame, frame_data, operator.methodcaller("isna")),
(pd.DataFrame, frame_data, operator.methodcaller("isnull")),
(pd.DataFrame, frame_data, operator.methodcaller("notna")),
(pd.DataFrame, frame_data, operator.methodcaller("notnull")),
(pd.DataFrame, frame_data, operator.methodcaller("dropna")),
(pd.DataFrame, frame_data, operator.methodcaller("drop_duplicates")),
(pd.DataFrame, frame_data, operator.methodcaller("duplicated")),
(pd.DataFrame, frame_data, operator.methodcaller("sort_values", by="A")),
(pd.DataFrame, frame_data, operator.methodcaller("sort_index")),
(pd.DataFrame, frame_data, operator.methodcaller("nlargest", 1, "A")),
(pd.DataFrame, frame_data, operator.methodcaller("nsmallest", 1, "A")),
(pd.DataFrame, frame_mi_data, operator.methodcaller("swaplevel")),
pytest.param(
(
pd.DataFrame,
frame_data,
operator.methodcaller("add", pd.DataFrame(*frame_data)),
),
marks=not_implemented_mark,
),
# TODO: div, mul, etc.
pytest.param(
(
pd.DataFrame,
frame_data,
operator.methodcaller("combine", | pd.DataFrame(*frame_data) | pandas.DataFrame |
#!/usr/bin/env python
# -*- coding:utf-8 -*-
"""
Date: 2022/2/14 18:19
Desc: ๆฐๆตช่ดข็ป-่ก็ฅจๆๆ
https://stock.finance.sina.com.cn/option/quotes.html
ๆๆ-ไธญ้ๆ-ๆฒชๆทฑ 300 ๆๆฐ
https://stock.finance.sina.com.cn/futures/view/optionsCffexDP.php
ๆๆ-ไธไบคๆ-50ETF
ๆๆ-ไธไบคๆ-300ETF
https://stock.finance.sina.com.cn/option/quotes.html
"""
import json
import datetime
from typing import Dict, List, Tuple
import requests
from bs4 import BeautifulSoup
import pandas as pd
# ๆๆ-ไธญ้ๆ-ๆฒชๆทฑ300ๆๆฐ
def option_cffex_hs300_list_sina() -> Dict[str, List[str]]:
"""
ๆฐๆตช่ดข็ป-ไธญ้ๆ-ๆฒชๆทฑ300ๆๆฐ-ๆๆๅ็บฆ, ่ฟๅ็็ฌฌไธไธชๅ็บฆไธบไธปๅๅ็บฆ
็ฎๅๆฐๆตช่ดข็ป-ไธญ้ๆๅชๆ ๆฒชๆทฑ300ๆๆฐ ไธไธชๅ็ง็ๆฐๆฎ
:return: ไธญ้ๆ-ๆฒชๆทฑ300ๆๆฐ-ๆๆๅ็บฆ
:rtype: dict
"""
url = "https://stock.finance.sina.com.cn/futures/view/optionsCffexDP.php"
r = requests.get(url)
soup = BeautifulSoup(r.text, "lxml")
symbol = soup.find(attrs={"id": "option_symbol"}).find("li").text
temp_attr = soup.find(attrs={"id": "option_suffix"}).find_all("li")
contract = [item.text for item in temp_attr]
return {symbol: contract}
def option_cffex_hs300_spot_sina(symbol: str = "io2104") -> pd.DataFrame:
"""
ไธญ้ๆ-ๆฒชๆทฑ300ๆๆฐ-ๆๅฎๅ็บฆ-ๅฎๆถ่กๆ
https://stock.finance.sina.com.cn/futures/view/optionsCffexDP.php
:param symbol: ๅ็บฆไปฃ็ ; ็จ option_cffex_hs300_list_sina ๅฝๆฐๆฅ็
:type symbol: str
:return: ไธญ้ๆ-ๆฒชๆทฑ300ๆๆฐ-ๆๅฎๅ็บฆ-็ๆถจ็่ทๅฎๆถ่กๆ
:rtype: pd.DataFrame
"""
url = "https://stock.finance.sina.com.cn/futures/api/openapi.php/OptionService.getOptionData"
params = {
"type": "futures",
"product": "io",
"exchange": "cffex",
"pinzhong": symbol,
}
r = requests.get(url, params=params)
data_text = r.text
data_json = json.loads(data_text[data_text.find("{") : data_text.rfind("}") + 1])
option_call_df = pd.DataFrame(
data_json["result"]["data"]["up"],
columns=[
"็ๆถจๅ็บฆ-ไนฐ้",
"็ๆถจๅ็บฆ-ไนฐไปท",
"็ๆถจๅ็บฆ-ๆๆฐไปท",
"็ๆถจๅ็บฆ-ๅไปท",
"็ๆถจๅ็บฆ-ๅ้",
"็ๆถจๅ็บฆ-ๆไป้",
"็ๆถจๅ็บฆ-ๆถจ่ท",
"่กๆไปท",
"็ๆถจๅ็บฆ-ๆ ่ฏ",
],
)
option_put_df = pd.DataFrame(
data_json["result"]["data"]["down"],
columns=[
"็่ทๅ็บฆ-ไนฐ้",
"็่ทๅ็บฆ-ไนฐไปท",
"็่ทๅ็บฆ-ๆๆฐไปท",
"็่ทๅ็บฆ-ๅไปท",
"็่ทๅ็บฆ-ๅ้",
"็่ทๅ็บฆ-ๆไป้",
"็่ทๅ็บฆ-ๆถจ่ท",
"็่ทๅ็บฆ-ๆ ่ฏ",
],
)
data_df = pd.concat([option_call_df, option_put_df], axis=1)
data_df['็ๆถจๅ็บฆ-ไนฐ้'] = pd.to_numeric(data_df['็ๆถจๅ็บฆ-ไนฐ้'])
data_df['็ๆถจๅ็บฆ-ไนฐไปท'] = pd.to_numeric(data_df['็ๆถจๅ็บฆ-ไนฐไปท'])
data_df['็ๆถจๅ็บฆ-ๆๆฐไปท'] = pd.to_numeric(data_df['็ๆถจๅ็บฆ-ๆๆฐไปท'])
data_df['็ๆถจๅ็บฆ-ๅไปท'] = pd.to_numeric(data_df['็ๆถจๅ็บฆ-ๅไปท'])
data_df['็ๆถจๅ็บฆ-ๅ้'] = pd.to_numeric(data_df['็ๆถจๅ็บฆ-ๅ้'])
data_df['็ๆถจๅ็บฆ-ๆไป้'] = pd.to_numeric(data_df['็ๆถจๅ็บฆ-ๆไป้'])
data_df['็ๆถจๅ็บฆ-ๆถจ่ท'] = pd.to_numeric(data_df['็ๆถจๅ็บฆ-ๆถจ่ท'])
data_df['่กๆไปท'] = pd.to_ | numeric(data_df['่กๆไปท']) | pandas.to_numeric |
import feedparser
import pprint
import requests
import pandas as pd
import numpy as np
def loadFiles( codes ):
"""Devuelve una lista de dataframes para solo codigo"""
#codes = ['Est_Mercat_Immobiliari_Lloguer_Mitja_Mensual']
parameters = {'rows': '1000'}
url = 'http://opendata-ajuntament.barcelona.cat/data/api/3/action/package_search'
response = requests.get(url,params=parameters)
catalogo = pd.DataFrame(response.json()['result']['results'])
i = 0
for index, row in catalogo.iterrows():
if i== 0:
fuente_datos = pd.DataFrame(row['resources'])
fuente_datos['code'] = row['code']
i = 1
aux = | pd.DataFrame(row['resources']) | pandas.DataFrame |
# Licensed to Elasticsearch B.V. under one or more contributor
# license agreements. See the NOTICE file distributed with
# this work for additional information regarding copyright
# ownership. Elasticsearch B.V. licenses this file to you under
# the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
import re
import warnings
from enum import Enum
from typing import Any, Callable, Dict, List, Optional, Tuple, Union, cast
import numpy as np # type: ignore
import pandas as pd # type: ignore
from elasticsearch import Elasticsearch
# Default number of rows displayed (different to pandas where ALL could be displayed)
DEFAULT_NUM_ROWS_DISPLAYED = 60
DEFAULT_CHUNK_SIZE = 10000
DEFAULT_CSV_BATCH_OUTPUT_SIZE = 10000
DEFAULT_PROGRESS_REPORTING_NUM_ROWS = 10000
DEFAULT_ES_MAX_RESULT_WINDOW = 10000 # index.max_result_window
DEFAULT_PAGINATION_SIZE = 5000 # for composite aggregations
PANDAS_VERSION: Tuple[int, ...] = tuple(
int(part) for part in pd.__version__.split(".") if part.isdigit()
)[:2]
with warnings.catch_warnings():
warnings.simplefilter("ignore")
EMPTY_SERIES_DTYPE = pd.Series().dtype
def build_pd_series(
data: Dict[str, Any], dtype: Optional[np.dtype] = None, **kwargs: Any
) -> pd.Series:
"""Builds a pd.Series while squelching the warning
for unspecified dtype on empty series
"""
dtype = dtype or (EMPTY_SERIES_DTYPE if not data else dtype)
if dtype is not None:
kwargs["dtype"] = dtype
return pd.Series(data, **kwargs)
def docstring_parameter(*sub: Any) -> Callable[[Any], Any]:
def dec(obj: Any) -> Any:
obj.__doc__ = obj.__doc__.format(*sub)
return obj
return dec
class SortOrder(Enum):
ASC = 0
DESC = 1
@staticmethod
def reverse(order: "SortOrder") -> "SortOrder":
if order == SortOrder.ASC:
return SortOrder.DESC
return SortOrder.ASC
@staticmethod
def to_string(order: "SortOrder") -> str:
if order == SortOrder.ASC:
return "asc"
return "desc"
@staticmethod
def from_string(order: str) -> "SortOrder":
if order == "asc":
return SortOrder.ASC
return SortOrder.DESC
def elasticsearch_date_to_pandas_date(
value: Union[int, str], date_format: Optional[str]
) -> pd.Timestamp:
"""
Given a specific Elasticsearch format for a date datatype, returns the
'partial' `to_datetime` function to parse a given value in that format
**Date Formats: https://www.elastic.co/guide/en/elasticsearch/reference/current/mapping-date-format.html#built-in-date-formats
Parameters
----------
value: Union[int, str]
The date value.
date_format: str
The Elasticsearch date format (ex. 'epoch_millis', 'epoch_second', etc.)
Returns
-------
datetime: pd.Timestamp
From https://www.elastic.co/guide/en/elasticsearch/reference/current/date.html
Date formats can be customised, but if no format is specified then it uses the default:
"strict_date_optional_time||epoch_millis"
Therefore if no format is specified we assume either strict_date_optional_time
or epoch_millis.
"""
if date_format is None or isinstance(value, (int, float)):
try:
return pd.to_datetime(
value, unit="s" if date_format == "epoch_second" else "ms"
)
except ValueError:
return pd.to_datetime(value)
elif date_format == "epoch_millis":
return pd.to_datetime(value, unit="ms")
elif date_format == "epoch_second":
return pd.to_datetime(value, unit="s")
elif date_format == "strict_date_optional_time":
return pd.to_datetime(value, format="%Y-%m-%dT%H:%M:%S.%f%z", exact=False)
elif date_format == "basic_date":
return pd.to_datetime(value, format="%Y%m%d")
elif date_format == "basic_date_time":
return pd.to_datetime(value, format="%Y%m%dT%H%M%S.%f", exact=False)
elif date_format == "basic_date_time_no_millis":
return pd.to_datetime(value, format="%Y%m%dT%H%M%S%z")
elif date_format == "basic_ordinal_date":
return pd.to_datetime(value, format="%Y%j")
elif date_format == "basic_ordinal_date_time":
return pd.to_datetime(value, format="%Y%jT%H%M%S.%f%z", exact=False)
elif date_format == "basic_ordinal_date_time_no_millis":
return pd.to_datetime(value, format="%Y%jT%H%M%S%z")
elif date_format == "basic_time":
return pd.to_datetime(value, format="%H%M%S.%f%z", exact=False)
elif date_format == "basic_time_no_millis":
return pd.to_datetime(value, format="%H%M%S%z")
elif date_format == "basic_t_time":
return pd.to_datetime(value, format="T%H%M%S.%f%z", exact=False)
elif date_format == "basic_t_time_no_millis":
return pd.to_datetime(value, format="T%H%M%S%z")
elif date_format == "basic_week_date":
return pd.to_datetime(value, format="%GW%V%u")
elif date_format == "basic_week_date_time":
return pd.to_datetime(value, format="%GW%V%uT%H%M%S.%f%z", exact=False)
elif date_format == "basic_week_date_time_no_millis":
return pd.to_datetime(value, format="%GW%V%uT%H%M%S%z")
elif date_format == "strict_date":
return pd.to_datetime(value, format="%Y-%m-%d")
elif date_format == "date":
return pd.to_datetime(value, format="%Y-%m-%d")
elif date_format == "strict_date_hour":
return pd.to_datetime(value, format="%Y-%m-%dT%H")
elif date_format == "date_hour":
return pd.to_datetime(value, format="%Y-%m-%dT%H")
elif date_format == "strict_date_hour_minute":
return pd.to_datetime(value, format="%Y-%m-%dT%H:%M")
elif date_format == "date_hour_minute":
return pd.to_datetime(value, format="%Y-%m-%dT%H:%M")
elif date_format == "strict_date_hour_minute_second":
return pd.to_datetime(value, format="%Y-%m-%dT%H:%M:%S")
elif date_format == "date_hour_minute_second":
return pd.to_datetime(value, format="%Y-%m-%dT%H:%M:%S")
elif date_format == "strict_date_hour_minute_second_fraction":
return pd.to_datetime(value, format="%Y-%m-%dT%H:%M:%S.%f", exact=False)
elif date_format == "date_hour_minute_second_fraction":
return pd.to_datetime(value, format="%Y-%m-%dT%H:%M:%S.%f", exact=False)
elif date_format == "strict_date_hour_minute_second_millis":
return pd.to_datetime(value, format="%Y-%m-%dT%H:%M:%S.%f", exact=False)
elif date_format == "date_hour_minute_second_millis":
return pd.to_datetime(value, format="%Y-%m-%dT%H:%M:%S.%f", exact=False)
elif date_format == "strict_date_time":
return pd.to_datetime(value, format="%Y-%m-%dT%H:%M:%S.%f%z", exact=False)
elif date_format == "date_time":
return pd.to_datetime(value, format="%Y-%m-%dT%H:%M:%S.%f%z", exact=False)
elif date_format == "strict_date_time_no_millis":
return pd.to_datetime(value, format="%Y-%m-%dT%H:%M:%S%z")
elif date_format == "date_time_no_millis":
return pd.to_datetime(value, format="%Y-%m-%dT%H:%M:%S%z")
elif date_format == "strict_hour":
return pd.to_datetime(value, format="%H")
elif date_format == "hour":
return pd.to_datetime(value, format="%H")
elif date_format == "strict_hour_minute":
return pd.to_datetime(value, format="%H:%M")
elif date_format == "hour_minute":
return pd.to_datetime(value, format="%H:%M")
elif date_format == "strict_hour_minute_second":
return pd.to_datetime(value, format="%H:%M:%S")
elif date_format == "hour_minute_second":
return pd.to_datetime(value, format="%H:%M:%S")
elif date_format == "strict_hour_minute_second_fraction":
return pd.to_datetime(value, format="%H:%M:%S.%f", exact=False)
elif date_format == "hour_minute_second_fraction":
return pd.to_datetime(value, format="%H:%M:%S.%f", exact=False)
elif date_format == "strict_hour_minute_second_millis":
return pd.to_datetime(value, format="%H:%M:%S.%f", exact=False)
elif date_format == "hour_minute_second_millis":
return pd.to_datetime(value, format="%H:%M:%S.%f", exact=False)
elif date_format == "strict_ordinal_date":
return pd.to_datetime(value, format="%Y-%j")
elif date_format == "ordinal_date":
return pd.to_datetime(value, format="%Y-%j")
elif date_format == "strict_ordinal_date_time":
return pd.to_datetime(value, format="%Y-%jT%H:%M:%S.%f%z", exact=False)
elif date_format == "ordinal_date_time":
return pd.to_datetime(value, format="%Y-%jT%H:%M:%S.%f%z", exact=False)
elif date_format == "strict_ordinal_date_time_no_millis":
return pd.to_datetime(value, format="%Y-%jT%H:%M:%S%z")
elif date_format == "ordinal_date_time_no_millis":
return pd.to_datetime(value, format="%Y-%jT%H:%M:%S%z")
elif date_format == "strict_time":
return pd.to_datetime(value, format="%H:%M:%S.%f%z", exact=False)
elif date_format == "time":
return pd.to_datetime(value, format="%H:%M:%S.%f%z", exact=False)
elif date_format == "strict_time_no_millis":
return pd.to_datetime(value, format="%H:%M:%S%z")
elif date_format == "time_no_millis":
return pd.to_datetime(value, format="%H:%M:%S%z")
elif date_format == "strict_t_time":
return pd.to_datetime(value, format="T%H:%M:%S.%f%z", exact=False)
elif date_format == "t_time":
return pd.to_datetime(value, format="T%H:%M:%S.%f%z", exact=False)
elif date_format == "strict_t_time_no_millis":
return pd.to_datetime(value, format="T%H:%M:%S%z")
elif date_format == "t_time_no_millis":
return pd.to_datetime(value, format="T%H:%M:%S%z")
elif date_format == "strict_week_date":
return pd.to_datetime(value, format="%G-W%V-%u")
elif date_format == "week_date":
return pd.to_datetime(value, format="%G-W%V-%u")
elif date_format == "strict_week_date_time":
return pd.to_datetime(value, format="%G-W%V-%uT%H:%M:%S.%f%z", exact=False)
elif date_format == "week_date_time":
return pd.to_datetime(value, format="%G-W%V-%uT%H:%M:%S.%f%z", exact=False)
elif date_format == "strict_week_date_time_no_millis":
return pd.to_datetime(value, format="%G-W%V-%uT%H:%M:%S%z")
elif date_format == "week_date_time_no_millis":
return | pd.to_datetime(value, format="%G-W%V-%uT%H:%M:%S%z") | pandas.to_datetime |
# Authors: <NAME> <<EMAIL>>
# License: BSD 3 clause
import os
import pickle as pkl
import numpy as np
from numba import njit
from numba.experimental import jitclass
from numba import types, _helperlib
from .types import float32, boolean, uint32, string, void, get_array_2d_type
from .checks import check_X_y, check_array
from .sample import (
SamplesCollection,
add_samples,
samples_collection_to_dict,
dict_to_samples_collection,
)
from .tree import (
TreeClassifier,
TreeRegressor,
tree_classifier_to_dict,
tree_regressor_to_dict,
)
from .tree_methods import (
tree_classifier_partial_fit,
tree_regressor_partial_fit,
tree_classifier_predict,
tree_regressor_predict,
tree_regressor_weighted_depth,
)
from .node_collection import dict_to_nodes_classifier, dict_to_nodes_regressor
from .utils import get_type
spec_amf_learner = [
("n_features", uint32),
("n_estimators", uint32),
("step", float32),
("loss", string),
("use_aggregation", boolean),
("split_pure", boolean),
("n_jobs", uint32),
("n_samples_increment", uint32),
("verbose", boolean),
("samples", get_type(SamplesCollection)),
("iteration", uint32),
]
spec_amf_classifier = spec_amf_learner + [
("n_classes", uint32),
("dirichlet", float32),
("trees", types.List(get_type(TreeClassifier), reflected=True)),
]
# TODO: we can force pre-compilation when creating the nopython forest
@jitclass(spec_amf_classifier)
class AMFClassifierNoPython(object):
def __init__(
self,
n_classes,
n_features,
n_estimators,
step,
loss,
use_aggregation,
dirichlet,
split_pure,
n_jobs,
n_samples_increment,
verbose,
samples,
trees_iteration,
trees_n_nodes,
trees_n_nodes_capacity,
):
self.n_classes = n_classes
self.n_features = n_features
self.n_estimators = n_estimators
self.step = step
self.loss = loss
self.use_aggregation = use_aggregation
self.dirichlet = dirichlet
self.split_pure = split_pure
self.n_jobs = n_jobs
self.n_samples_increment = n_samples_increment
self.verbose = verbose
self.samples = samples
if trees_iteration.size == 0:
self.iteration = 0
# TODO: reflected lists will be replaced by typed list soon...
iteration = 0
n_nodes = 0
n_nodes_capacity = 0
trees = [
TreeClassifier(
self.n_classes,
self.n_features,
self.step,
self.loss,
self.use_aggregation,
self.dirichlet,
self.split_pure,
self.samples,
iteration,
n_nodes,
n_nodes_capacity,
)
for _ in range(n_estimators)
]
self.trees = trees
else:
trees = [
TreeClassifier(
self.n_classes,
self.n_features,
self.step,
self.loss,
self.use_aggregation,
self.dirichlet,
self.split_pure,
self.samples,
trees_iteration[n_estimator],
trees_n_nodes[n_estimator],
trees_n_nodes_capacity[n_estimator],
)
for n_estimator in range(n_estimators)
]
self.trees = trees
@njit(void(get_type(AMFClassifierNoPython), get_array_2d_type(float32), float32[::1],))
def forest_classifier_partial_fit(forest, X, y):
n_samples_batch, n_features = X.shape
# First, we save the new batch of data
n_samples_before = forest.samples.n_samples
# Add the samples in the forest
add_samples(forest.samples, X, y)
for i in range(n_samples_before, n_samples_before + n_samples_batch):
# Then we fit all the trees using all new samples
for tree in forest.trees:
tree_classifier_partial_fit(tree, i)
forest.iteration += 1
# TODO: code predict
# def predict(self, X, scores):
# scores.fill(0.0)
# n_samples_batch, _ = X.shape
# if self.iteration > 0:
# scores_tree = np.empty(self.n_classes, float32)
# for i in range(n_samples_batch):
# # print('i:', i)
# scores_i = scores[i]
# x_i = X[i]
# # print('x_i:', x_i)
# # The prediction is simply the average of the predictions
# for tree in self.trees:
# tree_predict(tree, x_i, scores_tree, self.use_aggregation)
# # print('scores_tree:', scores_tree)
# scores_i += scores_tree
# scores_i /= self.n_estimators
# # print('scores_i:', scores_i)
# else:
# raise RuntimeError("You must call ``partial_fit`` before ``predict``.")
@njit(
void(
get_type(AMFClassifierNoPython),
get_array_2d_type(float32),
get_array_2d_type(float32),
)
)
def forest_classifier_predict_proba(forest, X, scores):
# TODO: use predict_proba_tree from below ? Or put it in the tree ?
scores.fill(0.0)
n_samples_batch, _ = X.shape
scores_tree = np.empty(forest.n_classes, float32)
for i in range(n_samples_batch):
scores_i = scores[i]
x_i = X[i]
# The prediction is simply the average of the predictions
for tree in forest.trees:
tree_classifier_predict(tree, x_i, scores_tree, forest.use_aggregation)
scores_i += scores_tree
scores_i /= forest.n_estimators
@njit(
get_array_2d_type(float32)(
get_type(AMFClassifierNoPython), uint32, get_array_2d_type(float32)
)
)
def forest_classifier_predict_proba_tree(forest, idx_tree, X):
n_samples_batch, _ = X.shape
scores = np.empty((n_samples_batch, forest.n_classes), dtype=float32)
tree = forest.trees[idx_tree]
for i in range(n_samples_batch):
scores_i = scores[i]
x_i = X[i]
tree_classifier_predict(tree, x_i, scores_i, forest.use_aggregation)
return scores
def amf_classifier_nopython_to_dict(forest):
d = {}
for key, _ in spec_amf_classifier:
if key == "samples":
d["samples"] = samples_collection_to_dict(forest.samples)
elif key == "trees":
d["trees"] = [tree_classifier_to_dict(tree) for tree in forest.trees]
else:
d[key] = getattr(forest, key)
return d
def dict_to_amf_classifier_nopython(d):
n_classes = d["n_classes"]
n_features = d["n_features"]
n_estimators = d["n_estimators"]
step = d["step"]
loss = d["loss"]
use_aggregation = d["use_aggregation"]
dirichlet = d["dirichlet"]
split_pure = d["split_pure"]
n_jobs = d["n_jobs"]
n_samples_increment = d["n_samples_increment"]
verbose = d["verbose"]
# Create the samples jitclass from a dict
samples = dict_to_samples_collection(d["samples"])
trees_dict = d["trees"]
trees_iteration = np.array(
[tree_dict["iteration"] for tree_dict in trees_dict], dtype=np.uint32
)
trees_n_nodes = np.array(
[tree_dict["nodes"]["n_nodes"] for tree_dict in trees_dict], dtype=np.uint32
)
trees_n_nodes_capacity = np.array(
[tree_dict["nodes"]["n_nodes_capacity"] for tree_dict in trees_dict],
dtype=np.uint32,
)
no_python = AMFClassifierNoPython(
n_classes,
n_features,
n_estimators,
step,
loss,
use_aggregation,
dirichlet,
split_pure,
n_jobs,
n_samples_increment,
verbose,
samples,
trees_iteration,
trees_n_nodes,
trees_n_nodes_capacity,
)
no_python.iteration = d["iteration"]
no_python.samples = samples
trees = no_python.trees
# no_python is initialized, it remains to initialize the nodes
for n_estimator in range(n_estimators):
tree_dict = trees_dict[n_estimator]
nodes_dict = tree_dict["nodes"]
tree = trees[n_estimator]
nodes = tree.nodes
# Copy node information
dict_to_nodes_classifier(nodes, nodes_dict)
# Copy intensities
tree.intensities[:] = tree_dict["intensities"]
return no_python
spec_amf_regressor = spec_amf_learner + [
("trees", types.List(get_type(TreeRegressor), reflected=True)),
]
# TODO: we can force pre-compilation when creating the nopython forest
@jitclass(spec_amf_regressor)
class AMFRegressorNoPython(object):
def __init__(
self,
n_features,
n_estimators,
step,
loss,
use_aggregation,
split_pure,
n_jobs,
n_samples_increment,
verbose,
samples,
trees_iteration,
trees_n_nodes,
trees_n_nodes_capacity,
):
self.n_features = n_features
self.n_estimators = n_estimators
self.step = step
self.loss = loss
self.use_aggregation = use_aggregation
self.split_pure = split_pure
self.n_jobs = n_jobs
self.n_samples_increment = n_samples_increment
self.verbose = verbose
self.samples = samples
if trees_iteration.size == 0:
self.iteration = 0
iteration = 0
n_nodes = 0
n_nodes_capacity = 0
trees = [
TreeRegressor(
self.n_features,
self.step,
self.loss,
self.use_aggregation,
self.split_pure,
self.samples,
iteration,
n_nodes,
n_nodes_capacity,
)
for _ in range(n_estimators)
]
self.trees = trees
else:
trees = [
TreeRegressor(
self.n_features,
self.step,
self.loss,
self.use_aggregation,
self.split_pure,
self.samples,
trees_iteration[n_estimator],
trees_n_nodes[n_estimator],
trees_n_nodes_capacity[n_estimator],
)
for n_estimator in range(n_estimators)
]
self.trees = trees
@njit(void(get_type(AMFRegressorNoPython), get_array_2d_type(float32), float32[::1]))
def forest_regressor_partial_fit(forest, X, y):
n_samples_batch, n_features = X.shape
# First, we save the new batch of data
n_samples_before = forest.samples.n_samples
# Add the samples in the forest
add_samples(forest.samples, X, y)
for i in range(n_samples_before, n_samples_before + n_samples_batch):
# Then we fit all the trees using all new samples
for tree in forest.trees:
tree_regressor_partial_fit(tree, i)
forest.iteration += 1
# TODO: code predict
# def predict(self, X, scores):
# scores.fill(0.0)
# n_samples_batch, _ = X.shape
# if self.iteration > 0:
# scores_tree = np.empty(self.n_classes, float32)
# for i in range(n_samples_batch):
# # print('i:', i)
# scores_i = scores[i]
# x_i = X[i]
# # print('x_i:', x_i)
# # The prediction is simply the average of the predictions
# for tree in self.trees:
# tree_predict(tree, x_i, scores_tree, self.use_aggregation)
# # print('scores_tree:', scores_tree)
# scores_i += scores_tree
# scores_i /= self.n_estimators
# # print('scores_i:', scores_i)
# else:
# raise RuntimeError("You must call ``partial_fit`` before ``predict``.")
@njit(void(get_type(AMFRegressorNoPython), get_array_2d_type(float32), float32[::1]))
def forest_regressor_predict(forest, X, predictions):
# TODO: Useless ?
predictions.fill(0.0)
n_samples_batch, _ = X.shape
for i in range(n_samples_batch):
x_i = X[i]
prediction = 0
# The prediction is simply the average of the predictions
for tree in forest.trees:
prediction += tree_regressor_predict(tree, x_i, forest.use_aggregation)
predictions[i] = prediction / forest.n_estimators
def amf_regressor_nopython_to_dict(forest):
d = {}
for key, _ in spec_amf_regressor:
if key == "samples":
d["samples"] = samples_collection_to_dict(forest.samples)
elif key == "trees":
d["trees"] = [tree_regressor_to_dict(tree) for tree in forest.trees]
else:
d[key] = getattr(forest, key)
return d
def dict_to_amf_regressor_nopython(d):
n_features = d["n_features"]
n_estimators = d["n_estimators"]
step = d["step"]
loss = d["loss"]
use_aggregation = d["use_aggregation"]
split_pure = d["split_pure"]
n_jobs = d["n_jobs"]
n_samples_increment = d["n_samples_increment"]
verbose = d["verbose"]
# Create the samples jitclass from a dict
samples = dict_to_samples_collection(d["samples"])
trees_dict = d["trees"]
trees_iteration = np.array(
[tree_dict["iteration"] for tree_dict in trees_dict], dtype=np.uint32
)
trees_n_nodes = np.array(
[tree_dict["nodes"]["n_nodes"] for tree_dict in trees_dict], dtype=np.uint32
)
trees_n_nodes_capacity = np.array(
[tree_dict["nodes"]["n_nodes_capacity"] for tree_dict in trees_dict],
dtype=np.uint32,
)
no_python = AMFRegressorNoPython(
n_features,
n_estimators,
step,
loss,
use_aggregation,
split_pure,
n_jobs,
n_samples_increment,
verbose,
samples,
trees_iteration,
trees_n_nodes,
trees_n_nodes_capacity,
)
no_python.iteration = d["iteration"]
no_python.samples = samples
trees = no_python.trees
# no_python is initialized, it remains to initialize the nodes
for n_estimator in range(n_estimators):
tree_dict = trees_dict[n_estimator]
nodes_dict = tree_dict["nodes"]
tree = trees[n_estimator]
nodes = tree.nodes
# Copy node information
dict_to_nodes_regressor(nodes, nodes_dict)
# Copy intensities
tree.intensities[:] = tree_dict["intensities"]
return no_python
@njit(
void(
get_type(AMFRegressorNoPython),
get_array_2d_type(float32),
get_array_2d_type(float32),
)
)
def forest_regressor_weighted_depths(forest, X, weighted_depths):
n_samples_batch, _ = X.shape
for i in range(n_samples_batch):
x_i = X[i]
n_tree = 0
for tree in forest.trees:
weighted_depth = tree_regressor_weighted_depth(
tree, x_i, forest.use_aggregation
)
weighted_depths[i, n_tree] = weighted_depth
n_tree += 1
@njit(
get_array_2d_type(float32)(
get_type(AMFClassifierNoPython), uint32, get_array_2d_type(float32)
)
)
def forest_classifier_predict_proba_tree(forest, idx_tree, X):
n_samples_batch, _ = X.shape
scores = np.empty((n_samples_batch, forest.n_classes), dtype=float32)
tree = forest.trees[idx_tree]
for i in range(n_samples_batch):
scores_i = scores[i]
x_i = X[i]
tree_classifier_predict(tree, x_i, scores_i, forest.use_aggregation)
return scores
# TODO: make amf.nopython.partial_fit work in a jitted function, test it and document it
class AMFLearner(object):
"""Base class for Aggregated Mondrian Forest classifier and regressors for online
learning.
Note
----
This class is not intended for end users but for development only.
"""
def __init__(
self,
n_estimators,
step,
loss,
use_aggregation,
split_pure,
n_jobs,
n_samples_increment,
random_state,
verbose,
):
"""Instantiates a `AMFLearner` instance.
Parameters
----------
n_estimators : :obj:`int`
The number of trees in the forest.
step : :obj:`float`
Step-size for the aggregation weights.
loss : :obj:`str`
The loss used for the computation of the aggregation weights.
use_aggregation : :obj:`bool`
Controls if aggregation is used in the trees. It is highly recommended to
leave it as `True`.
split_pure : :obj:`bool`
Controls if nodes that contains only sample of the same class should be
split ("pure" nodes). Default is `False`, namely pure nodes are not split,
but `True` can be sometimes better.
n_jobs : :obj:`int`
Sets the number of threads used to grow the tree in parallel. The default is
n_jobs=1, namely single-threaded. Fow now, this parameter has no effect and
only a single thread can be used.
n_samples_increment : :obj:`int`
Sets the minimum amount of memory which is pre-allocated each time extra
memory is required for new samples and new nodes. Decreasing it can slow
down training. If you know that each ``partial_fit`` will be called with
approximately `n` samples, you can set n_samples_increment = `n` if `n` is
larger than the default.
random_state : :obj:`int` or :obj:`None`
Controls the randomness involved in the trees.
verbose : :obj:`bool`, default = `False`
Controls the verbosity when fitting and predicting.
"""
# We will instantiate the numba class when data is passed to
# `partial_fit`, since we need to know about `n_features` among others things
self.no_python = None
self._n_features = None
self.n_estimators = n_estimators
self.step = step
self.loss = loss
self.use_aggregation = use_aggregation
self.split_pure = split_pure
self.n_jobs = n_jobs
self.n_samples_increment = n_samples_increment
self.random_state = random_state
self.verbose = verbose
if os.getenv("NUMBA_DISABLE_JIT", None) == "1":
self._using_numba = False
else:
self._using_numba = True
def partial_fit_helper(self, X, y):
"""Updates the classifier with the given batch of samples.
Parameters
----------
X : :obj:`np.ndarray`, shape=(n_samples, n_features)
Input features matrix.
y : :obj:`np.ndarray`
Input labels vector.
classes : :obj:`None`
Must not be used, only here for backwards compatibility
Returns
-------
output : :obj:`AMFClassifier`
Updated instance of :obj:`AMFClassifier`
"""
# First,ensure that X and y are C-contiguous and with float32 dtype
X, y = check_X_y(
X,
y,
accept_sparse=False,
accept_large_sparse=False,
dtype="float32",
order="C",
copy=False,
force_all_finite=True,
ensure_2d=True,
allow_nd=False,
multi_output=False,
ensure_min_samples=1,
ensure_min_features=1,
y_numeric=True,
estimator=self.__class__.__name__,
)
n_samples, n_features = X.shape
self._extra_y_test(y)
# This is the first call to `partial_fit`, so we need to instantiate
# the no python class
if self.no_python is None:
self._n_features = n_features
self._instantiate_nopython_class()
else:
_, n_features = X.shape
if n_features != self.n_features:
raise ValueError(
"`partial_fit` was first called with n_features=%d while "
"n_features=%d in this call" % (self.n_features, n_features)
)
self._set_random_state()
self._partial_fit(X, y)
self._put_back_random_state()
return self
# TODO: such methods should be private
def predict_helper(self, X):
"""Helper method for the predictions of the given features vectors. This is used
in the ``predict`` and ``predict_proba`` methods of ``AMFRegressor`` and
``AMFClassifier``.
Parameters
----------
X : :obj:`np.ndarray`, shape=(n_samples, n_features)
Input features matrix to predict for.
Returns
-------
output : :obj:`np.ndarray`
Returns the predictions for the input features
"""
X = check_array(
X,
accept_sparse=False,
accept_large_sparse=False,
dtype=["float32"],
order="C",
copy=False,
force_all_finite=True,
ensure_2d=True,
allow_nd=False,
ensure_min_samples=1,
ensure_min_features=1,
estimator=self.__class__.__name__,
)
n_samples, n_features = X.shape
if not self.no_python:
raise RuntimeError(
"You must call `partial_fit` before asking for predictions"
)
else:
if n_features != self.n_features:
raise ValueError(
"`partial_fit` was called with n_features=%d while predictions are "
"asked with n_features=%d" % (self.n_features, n_features)
)
# TODO: this is useless for predictions ?!?
self._set_random_state()
predictions = self._compute_predictions(X)
self._put_back_random_state()
return predictions
def weighted_depth_helper(self, X):
X = check_array(
X,
accept_sparse=False,
accept_large_sparse=False,
dtype=["float32"],
order="C",
copy=False,
force_all_finite=True,
ensure_2d=True,
allow_nd=False,
ensure_min_samples=1,
ensure_min_features=1,
estimator=self.__class__.__name__,
)
n_samples, n_features = X.shape
if not self.no_python:
raise RuntimeError(
"You must call `partial_fit` before asking for weighted depths"
)
else:
if n_features != self.n_features:
raise ValueError(
"`partial_fit` was called with n_features=%d while depths are "
"asked with n_features=%d" % (self.n_features, n_features)
)
weighted_depths = self._compute_weighted_depths(X)
return weighted_depths
@classmethod
def load(cls, filename):
"""Loads a AMF object from file (created with :meth:`save`)
Parameters
----------
filename : :obj:`str`
Filename containing the serialized AMF object
Returns
-------
output : object
Either AMFClassifier or AMFRegressor contained in the file
"""
with open(filename, "rb") as f:
d = pkl.load(f)
return cls._from_dict(d)
def save(self, filename):
"""Saves a AMF object to file using pickle
Parameters
----------
filename : :obj:`str`
Filename containing the serialized AMF object
"""
with open(filename, "wb") as f:
d = self._to_dict()
pkl.dump(d, f)
def _compute_predictions(self, X):
pass
def _extra_y_test(self, y):
pass
def _instantiate_nopython_class(self):
pass
def _set_random_state(self):
# This uses a trick by <NAME>,
# see https://github.com/numba/numba/issues/3249
if self._random_state >= 0:
if self._using_numba:
r = np.random.RandomState(self._random_state)
ptr = _helperlib.rnd_get_np_state_ptr()
ints, index = r.get_state()[1:3]
_helperlib.rnd_set_state(ptr, (index, [int(x) for x in ints]))
self._ptr = ptr
self._r = r
else:
np.random.seed(self._random_state)
def _put_back_random_state(self):
# This uses a trick by <NAME>,
# see https://github.com/numba/numba/issues/3249
if self._random_state >= 0:
if self._using_numba:
ptr = self._ptr
r = self._r
index, ints = _helperlib.rnd_get_state(ptr)
r.set_state(("MT19937", ints, index, 0, 0.0))
def get_nodes_df(self, idx_tree):
import pandas as pd
tree = self.no_python.trees[idx_tree]
nodes = tree.nodes
n_nodes = nodes.n_nodes
index = nodes.index[:n_nodes]
parent = nodes.parent[:n_nodes]
left = nodes.left[:n_nodes]
right = nodes.right[:n_nodes]
feature = nodes.feature[:n_nodes]
threshold = nodes.threshold[:n_nodes]
time = nodes.time[:n_nodes]
depth = nodes.depth[:n_nodes]
memory_range_min = nodes.memory_range_min[:n_nodes]
memory_range_max = nodes.memory_range_max[:n_nodes]
n_samples = nodes.n_samples[:n_nodes]
weight = nodes.weight[:n_nodes]
log_weight_tree = nodes.log_weight_tree[:n_nodes]
is_leaf = nodes.is_leaf[:n_nodes]
# is_memorized = nodes.is_memorized[:n_nodes]
counts = nodes.counts[:n_nodes]
columns = [
"id",
"parent",
"left",
"right",
"depth",
"is_leaf",
"feature",
"threshold",
"time",
"n_samples",
"weight",
"log_weight_tree",
"memory_range_min",
"memory_range_max",
"counts",
]
data = {
"id": index,
"parent": parent,
"left": left,
"right": right,
"depth": depth,
"feature": feature,
"threshold": threshold,
"is_leaf": is_leaf,
"time": time,
"n_samples": n_samples,
"weight": weight,
"log_weight_tree": log_weight_tree,
"memory_range_min": [tuple(t) for t in memory_range_min],
"memory_range_max": [tuple(t) for t in memory_range_max],
"counts": [tuple(t) for t in counts],
}
df = | pd.DataFrame(data, columns=columns) | pandas.DataFrame |
# -*- coding: utf-8 -*-
import pandas as pd
import numpy as np
import operator as op
import seaborn as sns
# http://data8.org/datascience/_modules/datascience/tables.html
#####################
# Frame Manipulation
def relabel(df, OriginalName, NewName):
return df.rename(index=str, columns={OriginalName: NewName})
# https://docs.python.org/3.4/library/operator.html
def where(df, column, value, operation=op.eq):
return pd.DataFrame( df.loc[operation(df.loc[:,column], value) ,:] )
def select(df, *column_or_columns):
table = pd.DataFrame()
for column in column_or_columns:
table[column] = df.loc[:, column].values
return table
def column(df, index_or_label):
"""Return the values of a column as an array.
Args:
label (int or str): The index or label of a column
Returns:
An instance of ``numpy.array``.
Raises:
``ValueError``: When the ``index_or_label`` is not in the table.
"""
if (isinstance(index_or_label, str)):
if (index_or_label not in df.columns):
raise ValueError(
'The column "{}" is not in the table. The table contains '
'these columns: {}'
.format(index_or_label, ', '.join(df.labels))
)
else:
return df.loc[:, index_or_label].values
if (isinstance(index_or_label, int)):
if (not 0 <= index_or_label < len(df.columns)):
raise ValueError(
'The index {} is not in the table. Only indices between '
'0 and {} are valid'
.format(index_or_label, len(df.labels) - 1)
)
else:
return df.iloc[:,index_or_label].values
def drop(df, index_or_label):
if (isinstance(index_or_label, str)):
if (index_or_label not in df.columns):
raise ValueError(
'The column "{}" is not in the table. The table contains '
'these columns: {}'
.format(index_or_label, ', '.join(df.labels))
)
else:
return df.drop(index_or_label, axis=1)
if (isinstance(index_or_label, int)):
if (not 0 <= index_or_label < len(df.columns)):
raise ValueError(
'The index {} is not in the table. Only indices between '
'0 and {} are valid'
.format(index_or_label, len(df.labels) - 1)
)
else:
return df.drop(index_or_label, axis=0)
return
def row(df, index):
"""Return the values of a row as an array.
Args:
label (int): The index or label of a column
Returns:
An instance of ``numpy.array``.
Raises:
``ValueError``: When the ``index_or_label`` is not in the table.
"""
return df.iloc[index,:].values
def cell(df, row, column):
return df.iloc[column, row]
def exclude(df, toexclude_df, column):
the_join = pd.merge(df, toexclude_df, on=[column], how="outer", indicator=True)
return ( pd.DataFrame(the_join).where('_merge', "left_only") )
def format(df, num_format=lambda x: '{:,.1f}'.format(x)):
"""Returns a better number formated table. Is Slow
Args:
label (int or str): The index or label of a column
Returns:
pandas dataframe
"""
#TODO: this looks inefficient
def build_formatters_ints(df):
return {
column:lambda x: '{:,}'.format(x)
for column, dtype in df.dtypes.items()
if dtype in [ np.dtype('int64') ]
}
def build_formatters_floats(df):
return {
column:lambda x: '{:.1f}'.format(x)
for column, dtype in df.dtypes.items()
if dtype in [ np.dtype('float64') ]
}
format_int = build_formatters_ints(df)
format_float = build_formatters_floats(df)
style = '<style>.dataframe td { text-align: right; }</style>'
return df.style.set_table_styles(style).format(format_int).format(format_float)
def group(df, column, rename=""):
df_gp = pd.DataFrame(df[column].value_counts())
if rename != "":
return relabel(df_gp,column,rename)
else:
return relabel(df_gp,column,column + "_count")
def count(df, column):
return len( np.unique( df[column] ))
def showna(df):
return sns.heatmap(df.isnull(),yticklabels=False,cbar=False,cmap='viridis')
def sort(df, col, ascending=True):
return pd.DataFrame(df.sort_values(col, ascending=ascending))
##
def variance(df, column1):
return np.var( pd.DataFrame(df)[column1] )
def median(df, column1):
return np.median( pd.DataFrame(df)[column1] )
def avg(df, column1):
return np.mean( pd.DataFrame(df)[column1] )
def average(df, column1):
return np.mean( | pd.DataFrame(df) | pandas.DataFrame |
#! /usr/bin/env python3
import numpy as np
import matplotlib.pyplot as plt
import pandas
import os
from sklearn.cluster import KMeans
# Load in raw data files
county_data_filename = "county_facts.csv" # Census statistics
election_data_filename = "2016_US_County_Level_Presidential_Results.csv" # Election outcomes
county_data = | pandas.read_csv(county_data_filename) | pandas.read_csv |
import numpy as np
import pandas as pd
import requests # Coleta de conteรบdo em Webpage
from requests.exceptions import HTTPError
from bs4 import BeautifulSoup as bs # Scraping webpages
from time import sleep
import json
import re #biblioteca para trabalhar com regular expressions - regex
import string
import unidecode
import nltk
#nltk.download('punkt')
#nltk.download('stopwords')
from nltk.stem import SnowballStemmer
from nltk.tokenize import word_tokenize
from nltk.tokenize import RegexpTokenizer
from nltk.corpus import stopwords
from operator import itemgetter
def cleanhtml(raw_html):
cleanr = re.compile('<.*?>|&[.*?]')
cleantext = re.sub(cleanr, '', raw_html)
return cleantext
#remove todas as pontuaรงรตes e retorna lista de palavras
def clean_text (text):
text = text.translate(str.maketrans('', '', string.punctuation)) #remove todas as pontuaรงรตes: '!"#$%&\'()*+,-./:;<=>?@[\\]^_`{|}~'
text = text.replace('\n',' ').strip()
text = text.lower()
text = unidecode.unidecode(text)
return text
def read_stackoverflow_overview(tags=[], tab='Frequent', pages=5):
link = 'https://stackoverflow.com/questions'
selector='question-summary'
if tags:
tags_link = '/tagged/'
pre=''
for t in tags:
tags_link += str(pre) + t
pre = '+or+'
link += tags_link
link += '?tab='+tab
questions_text = ''
soup_selection = []
for page in range(1,pages+1):
page_link = '&page='+str(page)
try:
request = requests.get(link+page_link)
request.raise_for_status()
try:
soup = bs(request.text, 'html.parser')
soup_selection.append(soup.select('.'+selector))
except: print ("Could not transform to soup object by selecting ",selector)
except HTTPError:
print ("Could not download page ", page)
sleep(0.05)
return soup_selection
def questions_overview(questions_overview_raw):
questions_overview = { 'questions':[]}
for soups in questions_overview_raw:
for soup in soups:
title = soup.select_one('.question-hyperlink').getText()
link = 'https://stackoverflow.com'+soup.select_one('.question-hyperlink').get('href')
summary = soup.select_one('.excerpt').getText()
vote_count = soup.select_one('.vote-count-post').getText()
answers_count = soup.select_one('.answered-accepted')
answers_count = re.sub('\D','',answers_count.getText('')) if answers_count else '0'
views = re.sub('views','',soup.select_one('.views').attrs['title'])
views = re.sub(',','',views)
tags = []
for tag in soup.select('.post-tag'): tags.append(tag.getText())
questions_overview['questions'].append({
'title': title,
'link': link,
'summary': summary,
'vote_count': int(vote_count),
'answers_count': int(answers_count),
'views': int(views),
'tags': tags,
'full_question': '',
'best_answer': '',
})
questions_df = | pd.DataFrame(questions_overview['questions']) | pandas.DataFrame |
import os, glob, gc, time, yaml, shutil, random
import addict
import argparse
from collections import defaultdict
from tqdm import tqdm
import numpy as np
import pandas as pd
from sklearn.metrics import accuracy_score, roc_auc_score
from sklearn.preprocessing import StandardScaler, LabelBinarizer, LabelEncoder, QuantileTransformer, KBinsDiscretizer
from datasets import (Features, transform_joint, normalize_npnan,
get_feat_cols, get_folds, save_preprocessed)
from datasets import ViralDataset
from utils import create_out_dir
from models import get_model_class
import torch
import torch.nn as nn
from torch.optim.lr_scheduler import CosineAnnealingLR
# import apex
# from apex import amp
# import torchsummary
import warnings
warnings.filterwarnings('ignore')
pd.set_option("display.max_colwidth", 100)
| pd.set_option("display.max_rows", 20) | pandas.set_option |
import pandas as pd
import numpy as np
df =pd.read_csv('user49.csv')
dfcp=pd.read_csv('mdbcp.csv')
dfData={'id': dfcp['id'],'avg':dfcp['avg']}
df2=pd.DataFrame(dfData)
#print(type(df['id'][0]))
df=df.set_index('id').join(df2.set_index('id'))
df=df.dropna()
df['ratio']=df['rating']-df['avg']
df=df.drop(columns=['rating','avg'])
print(df.head())
#ratio calculated
dfDir= | pd.read_csv('dir.csv') | pandas.read_csv |
# coding: utf-8
import numpy as np
import matplotlib
import matplotlib.pyplot as plt
import pandas as pd
import random
import seaborn as sns
from sklearn import preprocessing
from sklearn.preprocessing import StandardScaler
import glob, os
import errno
from sklearn.linear_model import LogisticRegression
from sklearn.ensemble import RandomForestClassifier, AdaBoostClassifier
from sklearn.neighbors import KNeighborsClassifier
from sklearn.model_selection import KFold
import warnings
from sklearn.model_selection import cross_val_predict
from sklearn.model_selection import cross_val_score, cross_validate
from sklearn.metrics import accuracy_score, f1_score, precision_score, recall_score, classification_report, confusion_matrix
from sklearn.svm import SVC
from sklearn import svm
from sklearn.model_selection import GridSearchCV
path = r'D:\ml\BAtestdata'
files = glob.glob(os.path.join(path, "*.csv"))
df_ba = []
nr_files = len(files)
print(nr_files)
for i in range(len(files)):
file = files[i]
df_ba.append(pd.read_csv(file,index_col=0))
print(files)
collected_data = pd.read_csv('DatasetThesis.csv',index_col=0)
#Replace the missing values with the median
median = collected_data["Nr. of Competitors"].median()
collected_data["Nr. of Competitors"].fillna(median,inplace=True)
for i in range(nr_files):
df_ba[i]["Nr. of Competitors"].fillna(median,inplace=True)
print(df_ba[i])
#Replace the missing values with the mean
mean = collected_data["Revenue(millions of dollars)"].mean()
collected_data["Revenue(millions of dollars)"].fillna(mean,inplace=True)
#Heatmap of the correlations between all the variable
fig = matplotlib.pyplot.gcf()
fig.set_size_inches(18.5, 10.5)
sns.heatmap(collected_data.corr(),annot=True, fmt=".2f")
#Normalizing the data and dropping some of the columns
target = collected_data['Acquired'].values
sc_X = StandardScaler()
X_train = collected_data.drop(columns=["Acquired","Nr. of articles","Rounds of seeding ","Revenue(millions of dollars)",
"Nr.of employees"],axis=0)
X_train = sc_X.fit_transform(X_train)
ba_target = []
for i in range(len(files)):
ba_target.append(df_ba[i]["Acquired"].values)
df_ba[i] = df_ba[i].drop(columns=["Acquired","Nr. of articles","Rounds of seeding ","Revenue(millions of dollars)",
"Nr.of employees"],axis=0)
y_pred=[]
clf_svc = svm.SVC()
clf_rf = RandomForestClassifier()
lr = LogisticRegression()
best_models=[]
models = [
clf_rf,
lr,
clf_svc,
]
for i, model in enumerate(models):
# finding the best hyperparameters for each model using gridsearch
if(model==clf_rf):
grid_search = GridSearchCV(model, param_grid={ 'bootstrap': [True, False],
'criterion': ['gini', 'entropy'], 'n_estimators': [3,10,30,100,300,1000]},
cv=10, scoring='accuracy', return_train_score=True,n_jobs=-1)
grid_search.fit(X_train,target)
best_params= grid_search.best_params_
best_rf = RandomForestClassifier(bootstrap = best_params['bootstrap'],criterion=best_params['criterion'],
n_estimators= best_params['n_estimators'])
print(best_params)
best_models.append(best_rf)
if(model==lr):
grid_search = GridSearchCV(model, param_grid={ 'penalty':["l1","l2"], 'C': [0.001, 0.01, 0.1, 1, 10]}
, cv=10, scoring='accuracy', return_train_score=True,n_jobs=-1)
grid_search.fit(X_train,target)
best_params= grid_search.best_params_
best_lr = LogisticRegression(penalty= best_params['penalty'], C=best_params['C'])
print(best_params)
best_models.append(best_lr)
if(model==clf_svc):
grid_search = GridSearchCV(model, param_grid={ 'decision_function_shape':('ovo','ovr'),
'shrinking':(True,False),'kernel':('linear', 'rbf','poly'), 'C': [0.001, 0.01, 0.1, 1, 10],
'gamma' : [0.001, 0.01, 0.1, 1]}, cv=10, scoring='accuracy', return_train_score=True,n_jobs=-1)
grid_search.fit(X_train,target)
best_params = grid_search.best_params_
best_svc = svm.SVC(decision_function_shape=best_params['decision_function_shape'],
shrinking= best_params['shrinking'],kernel=best_params['kernel'], C= best_params['C'],
gamma = best_params['gamma'])
print(best_params)
best_models.append(best_svc)
kfold = KFold(n_splits=10, random_state=1)
models_table = pd.DataFrame(columns=['Classifier_name', 'train_score', 'vald_score',"ba0_test","ba1_test",
"ba2_test","ba3_test",'ba4_test'])
metrics_table = pd.DataFrame(columns=['Classifier_name', "precision0","recall0","precision1","recall1",
"precision2","recall2","precision3","recall3","precision4","recall4",])
vald_table = | pd.DataFrame(columns=['Classifier_name',"vald_precision","vald_recall"]) | pandas.DataFrame |
import pandas as pd
data = | pd.read_csv('data/T_UWWTPS.csv') | pandas.read_csv |
from os import sep
from numpy.core.fromnumeric import mean
import pandas as pd
import matplotlib.pyplot as plt
import math
from sklearn.cluster import KMeans
X = [7, 3, 1, 5, 1, 7, 8, 5]
Y = [1, 4, 5, 8, 3, 8, 2, 9]
labels = ["x1", "x2", "x3", "x4", "x5", "x6", "x7", "x8"]
kdata = pd.DataFrame({"X": X, "Y": Y}, index=labels)
plt.scatter(kdata.X, kdata.Y)
for i in range(len(kdata.index)):
plt.text(kdata.loc[labels[i], "X"], kdata.loc[labels[i], "Y"], '%s' % (str(labels[i])), size=15, zorder=1)
plt.xlabel("X")
plt.ylabel("Y")
plt.show()
kmeans = KMeans(n_clusters=3, init=kdata.loc[["x1", "x2", "x3"], :]).fit(kdata)
print(kmeans.cluster_centers_)
print(kmeans.labels_)
print(kmeans.inertia_)
separation = 0
distance = lambda x1, x2: math.sqrt(((x1.X - x2.X) ** 2) + ((x1.Y - x2.Y) ** 2))
m = kdata.mean()
for i in list(set(kmeans.labels_)):
mi = kdata.loc[kmeans.labels_ == i, :].mean()
Ci = len(kdata.loc[kmeans.labels_ == i, :].index)
separation += Ci * (distance(m, mi) ** 2)
print(separation)
plt.scatter(kdata.X, kdata.Y, c=kmeans.labels_)
for i in range(len(kdata.index)):
plt.text(kdata.loc[labels[i], "X"], kdata.loc[labels[i], "Y"], '%s' % (str(labels[i])), size=15, zorder=1)
plt.scatter(kmeans.cluster_centers_[:, 0], kmeans.cluster_centers_[:, 1], marker="+", s=169, c=range(3))
plt.xlabel("X")
plt.ylabel("Y")
plt.show()
cdata = | pd.read_csv("./cdata.txt") | pandas.read_csv |
"""Provincial road network loss maps
"""
import os
import sys
from collections import OrderedDict
import geopandas as gpd
import pandas as pd
import cartopy.crs as ccrs
import cartopy.io.shapereader as shpreader
import matplotlib.pyplot as plt
from shapely.geometry import LineString
from vtra.utils import *
def main():
config = load_config()
regions = ['Lao Cai', 'Binh Dinh', 'Thanh Hoa']
plot_set = [
{
'column': 'min_tr_loss',
'title': 'Min Rerouting loss',
'legend_label': "Rerouting Loss (USD/day)",
'divisor': 1,
'significance': 0
},
{
'column': 'max_tr_loss',
'title': 'Max Rerouting loss',
'legend_label': "Rerouting Loss (USD/day)",
'divisor': 1,
'significance': 0
},
{
'column': 'min_netrev',
'title': 'Min Net revenue disrupted',
'legend_label': "Net revenue disrupted ('000 USD/day)",
'divisor': 1000,
'significance': 0
},
{
'column': 'max_netrev',
'title': 'Max Net revenue disrupted',
'legend_label': "Net revenue disrupted ('000 USD/day)",
'divisor': 1000,
'significance': 0
},
{
'column': 'min_econ_impact',
'title': 'Min Economic loss',
'legend_label': "Economic Loss ('000 USD/day)",
'divisor': 1000,
'significance': 0
},
{
'column': 'max_econ_impact',
'title': 'Max Economic loss',
'legend_label': "Economic Loss ('000 USD/day)",
'divisor': 1000,
'significance': 0
},
{
'column': 'min_croptons',
'title': 'Min Daily Crop Tons disrupted',
'legend_label': "Crops transport disrupted (tons/day)",
'divisor': 1,
'significance': 0
},
{
'column': 'max_croptons',
'title': 'Max Daily Crop Tons disrupted',
'legend_label': "Crops transport disrupted (tons/day)",
'divisor': 1,
'significance': 0
}
]
for region in regions:
region_file_path = os.path.join(config['paths']['data'], 'post_processed_networks',
'{}_roads_edges.shp'.format(region.lower().replace(' ', '')))
flow_file_path = os.path.join(config['paths']['output'], 'failure_results','minmax_combined_scenarios',
'single_edge_failures_minmax_{}_5_tons_100_percent_disrupt.csv'.format(region.lower().replace(' ', '')))
region_file = gpd.read_file(region_file_path,encoding='utf-8')
flow_file = pd.read_csv(flow_file_path)
region_file = | pd.merge(region_file,flow_file,how='left', on=['edge_id']) | pandas.merge |
import sys
import os
import os.path, time
import glob
import datetime
import pandas as pd
import numpy as np
import csv
import featuretools as ft
import pyasx
import pyasx.data.companies
def get_holdings(file):
'''
holdings can come from export or data feed (simple)
'''
simple_csv = False
with open(file, encoding="utf8") as csvfile:
hold = csv.reader(csvfile, delimiter=',', quotechar='|')
line_count = 0
for row in hold:
if line_count == 1:
break
if 'Code' in row:
simple_csv = True
line_count += 1
if simple_csv:
holdings = pd.read_csv(file, header=0)
else:
holdings = | pd.read_csv(file, skiprows=[0, 1, 3], header=0) | pandas.read_csv |
# -*- coding: utf-8 -*-
"""
Created on Tue May 21 12:10:32 2019
@author: gh2668
"""
import pandas as pd
import read_attributes_signatures
def read_data():
meta_df = read_attributes_signatures.read_meta()
att_df, sig_df = read_attributes_signatures.seperate_attributes_signatures(meta_df)
knoben = | pd.read_csv("catchment_clusters_with_continoues_climate.csv", index_col=1) | pandas.read_csv |
#!/usr/bin/env python
# ------------------------------------------------------------------------------------------------------%
# Created by "Thieu" at 13:56, 28/01/2021 %
# %
# Email: <EMAIL> %
# Homepage: https://www.researchgate.net/profile/Nguyen_Thieu2 %
# Github: https://github.com/thieu1995 %
# ------------------------------------------------------------------------------------------------------%
from pathlib import Path
from pandas import DataFrame
import pickle as pkl
from numpy import insert, array, concatenate, reshape
from config import Config
def save_experiment_results_multi(solutions, g_best, g_best_dict, training_info, name_paras, path_results, save_training=True):
## Save results
path_results = f'{path_results}/experiment_results'
Path(path_results).mkdir(parents=True, exist_ok=True)
file_name = f'{path_results}/{name_paras}'
## Save fitness
df1 = DataFrame(g_best)
df1.index.name = "Solution"
df1.to_csv(f'{file_name}-results.csv', header=["Power", "Latency", "Cost"], index=True)
df2 = DataFrame(training_info)
df2.to_csv(f'{file_name}-training_info.csv', index=False)
## Save solution
schedule_object_save_path = open(f'{file_name}-solution.pkl', 'wb')
pkl.dump(solutions, schedule_object_save_path)
schedule_object_save_path.close()
## Save training process
if save_training:
fit_list = array([[0, 0, 0, 0]])
for key, value in g_best_dict.items():
value = insert(reshape(value, (-1, 3)), 0, key, axis=1)
fit_list = concatenate((fit_list, value), axis=0)
fitness_df = DataFrame(fit_list)
fitness_df = fitness_df.iloc[1:]
fitness_df.to_csv(f'{file_name}-training.csv', index=False, header=["Epoch", "Power", "Latency", "Cost"])
def save_visualization_results_multi(solution, name_model, name_paras, path_results):
from utils.visual.scatter import visualize_front_3d, visualize_front_2d, visualize_front_1d
path_png = f'{path_results}/visualization/png'
path_pdf = f'{path_results}/visualization/pdf'
Path(path_png).mkdir(parents=True, exist_ok=True)
Path(path_pdf).mkdir(parents=True, exist_ok=True)
file_name = f'{path_results}/{name_paras}'
fn_3d = f'/{file_name}-3d'
fn_2d_PS = f'/{file_name}-2d-PS'
fn_2d_PM = f'/{file_name}-2d-PM'
fn_2d_SM = f'/{file_name}-2d-SM'
fn_1d_P = f'/{file_name}-2d-P'
fn_1d_S = f'/{file_name}-2d-S'
fn_1d_M = f'/{file_name}-2d-M'
visualize_front_3d([solution], Config.OBJ_NAME_1, [name_model],["red"], ["o"], fn_3d, [path_png, path_pdf], [".png", ".pdf"], True)
visualize_front_2d([solution[:, 0:2]], Config.OBJ_NAME_2, [name_model], ["red"], ["o"], fn_2d_PS, [path_png, path_pdf], [".png", ".pdf"])
visualize_front_2d([solution[:, [0, 2]]], Config.OBJ_NAME_3, [name_model], ["red"], ["o"], fn_2d_PM, [path_png, path_pdf], [".png", ".pdf"])
visualize_front_2d([solution[:, 1:3]], Config.OBJ_NAME_4, [name_model], ["red"], ["o"], fn_2d_SM, [path_png, path_pdf], [".png", ".pdf"])
visualize_front_1d([solution[:, 0]], Config.OBJ_NAME_5, [name_model], ["red"], ["o"], fn_1d_P, [path_png, path_pdf], [".png", ".pdf"])
visualize_front_1d([solution[:, 1]], Config.OBJ_NAME_6, [name_model], ["red"], ["o"], fn_1d_S, [path_png, path_pdf], [".png", ".pdf"])
visualize_front_1d([solution[:, 2]], Config.OBJ_NAME_7, [name_model], ["red"], ["o"], fn_1d_M, [path_png, path_pdf], [".png", ".pdf"])
def save_experiment_results_single(problem, solution, list_fitness, name_paras, time_total, path_results, save_training=True):
from model.fitness import Fitness
from utils.schedule_util import matrix_to_schedule
## Saving fitness
path_results = f'{path_results}/experiment_results'
Path(path_results).mkdir(parents=True, exist_ok=True)
fit_obj = Fitness(problem)
schedule = matrix_to_schedule(problem, solution)
power = fit_obj.calc_power_consumption(schedule)
latency = fit_obj.calc_latency(schedule)
cost = fit_obj.calc_cost(schedule)
fitness = fit_obj.fitness(schedule)
file_name = f'{path_results}/{name_paras}'
experiment_results = array([[power, latency, cost, fitness, time_total]])
df1 = DataFrame(experiment_results)
df1.index.name = "Solution"
df1.to_csv(f'{file_name}-results.csv', header=["Power", "Latency", "Cost", "Fitness", "Time"], index=True)
## Saving model
schedule_object_save_path = open(f'{file_name}-solution.pkl', 'wb')
pkl.dump(schedule, schedule_object_save_path)
schedule_object_save_path.close()
## Saving training process
if save_training:
fitness_df = | DataFrame(list_fitness) | pandas.DataFrame |
import pandas as pd
import os
from utils.composition import _fractional_composition
def norm_form(formula):
comp = _fractional_composition(formula)
form = ''
for key, value in comp.items():
form += f'{key}{str(value)[0:9]}'
return form
def count_elems(string):
count = 0
switch = 1
for c in string:
if c.isalpha():
count += switch
switch = 0
if c.isnumeric():
switch = 1
return count
# %%
if __name__ == '__main__':
print('processing all model predictions and calculating metrics')
print('this will take a few minutes...')
# %%
results_path = 'publication_predictions'
benchmark_path = 'data/benchmark_data'
test_directories = os.listdir(results_path)
benchmark_props = os.listdir(benchmark_path)
benchmark_test_directories = [test for test in test_directories if "benchmark" in test]
dataset_results = {}
dataset_preds = {}
dataset_acts = {}
test_maes = pd.DataFrame()
df_stats = pd.DataFrame()
for benchmark in benchmark_props:
df_compositions = pd.DataFrame()
df_preds = pd.DataFrame()
df_acts = pd.DataFrame()
models = []
for directory in benchmark_test_directories:
df_train_orig = pd.read_csv(f'{benchmark_path}/{benchmark}/train.csv',
keep_default_na=False, na_values=[''])
df_val = pd.read_csv(f'{benchmark_path}/{benchmark}/val.csv',
keep_default_na=False, na_values=[''])
df_train = pd.concat([df_train_orig, df_val], ignore_index=True)
df_train['formula'] = [norm_form(formula) for formula in df_train['formula']]
df_train.index = df_train['formula']
files = os.listdir(f'{results_path}\{directory}')
file = [file for file in files if benchmark in file and 'test' in file]
if len(file) > 0:
models.append(directory.split('_')[0])
file = file[0]
df = pd.read_csv(f'{results_path}\{directory}\{file}',
keep_default_na=False, na_values=[''])
composition = df['formula']
pred = df['predicted']
act = df['actual']
print(f'processing {benchmark} {models[-1]}')
df_compositions = pd.concat([df_compositions, composition], axis=1)
df_preds = pd.concat([df_preds, pred], axis=1)
df_acts = pd.concat([df_acts, act], axis=1)
n_total = act.count() + df_val.shape[0] + df_train_orig.shape[0]
df_stats.at[benchmark, 'mean_test'] = act.mean()
df_stats.at[benchmark, 'std_test'] = act.std()
df_stats.at[benchmark, 'n_test'] = act.count()
df_stats.at[benchmark, 'mean_train'] = df_train['target'].mean()
df_stats.at[benchmark, 'std_train'] = df_train['target'].std()
df_stats.at[benchmark, 'n_train'] = df_train_orig.shape[0]
df_stats.at[benchmark, 'n_val'] = df_val.shape[0]
df_stats.at[benchmark, 'n_total'] = n_total
df_stats.at[benchmark, 'prop_train'] = df_train_orig.shape[0] / n_total
df_stats.at[benchmark, 'prop_val'] = df_val.shape[0] / n_total
df_stats.at[benchmark, 'prop_test'] = act.count() / n_total
df_compositions.columns = models
df_preds.columns = models
df_acts.columns = models
df_diff = df_preds - df_acts
df_mae = df_diff.abs().mean()
test_maes[benchmark] = df_mae
dataset_results[benchmark] = df_compositions
dataset_preds[benchmark] = df_preds
dataset_acts[benchmark] = df_acts
maes = test_maes.T
model_names = ['roost', 'mat2vec', 'onehot', 'elemnet', 'rf']
out_1 = maes[model_names]
out = pd.concat([out_1, df_stats], axis=1)
df_benchmark = out.copy()
# %%
results_path = 'publication_predictions'
matbench_path = 'data/matbench_cv'
test_directories = os.listdir(results_path)
matbench_props = os.listdir(matbench_path)
matbench_test_directories = [test for test in test_directories if "matbench" in test]
dataset_results = {}
dataset_preds = {}
dataset_acts = {}
test_maes = pd.DataFrame()
df_stats = pd.DataFrame()
for matbench in matbench_props:
df_compositions = pd.DataFrame()
df_preds = pd.DataFrame()
df_acts = pd.DataFrame()
models = []
for directory in matbench_test_directories:
train_files = os.listdir(f'{matbench_path}/{matbench}')
train_files = [file for file in train_files if 'train' in file]
test_files = os.listdir(f'{results_path}/{directory}')
test_files = [file for file in test_files if matbench in file and 'test' in file]
for i, (train_file, test_file) in enumerate(zip(train_files, test_files)):
df_train_orig = pd.read_csv(f'{matbench_path}/{matbench}/{train_file}',
keep_default_na=False, na_values=[''])
df_val = pd.read_csv(f'{matbench_path}/{matbench}/{train_file.replace("train", "val")}',
keep_default_na=False, na_values=[''])
df_train = pd.concat([df_train_orig, df_val], ignore_index=True)
df_train['formula'] = [norm_form(formula) for formula in df_train['formula']]
df_train.index = df_train['formula']
if len(file) > 0:
models.append(directory.split('_')[0]+f'_{i}')
file = file[0]
df = pd.read_csv(f'{results_path}\{directory}\{test_file}',
keep_default_na=False, na_values=[''])
df.index = df['formula'].values
composition = df['formula']
pred = df['predicted']
act = df['actual']
print(f'processing {matbench} {models[-1]}')
df_compositions = pd.concat([df_compositions, composition], axis=1)
df_preds = | pd.concat([df_preds, pred], axis=1) | pandas.concat |
# -*- coding: utf-8 -*-
import sys
from random import randint
import dash
import dash_core_components as dcc
import dash_html_components as html
from coronadash.dash_components import Col, Row
from coronadash.conf.config import myapp
from coronadash.conf.config import mydash
import pandas as pd
import datetime
from dash.dependencies import Input, Output
#import dash_dangerously_set_inner_html
from collections import OrderedDict
import plotly.graph_objs as go
import random
import requests
from tornado import httpclient
from coronadash.dash_server import app
#
# Setup the embedded Dash App and create the actual dash layout, callbacks, etc.:
# see: _create_app()
#
def _create_app_layout(*args, **kwargs):
'''
Creates the actual dash application and layout
Just put any Dash layout in here.
Documentation and examples: https://dash.plot.ly/
The default route is: /dash which calls the handler/dash.py which creates the app
and renders the pow_dash template.
'''
import os
df = pd.read_csv(os.path.join(os.path.dirname(__file__), "data/2019_ncov_data.csv"))
df.columns = map(str.lower, df.columns)
df["date"] = | pd.to_datetime(df["date"]) | pandas.to_datetime |
import pytest
import pandas as pd
from pathlib import Path
from eobox import sampledata
from eobox.raster import cube
@pytest.fixture
def eocube_input_1(tmpdir):
year = 2008
dataset = sampledata.get_dataset("lsts")
layers_paths = [Path(p) for p in dataset["raster_files"]]
layers_df = pd.Series([p.stem for p in layers_paths]).str.split("_", expand=True) \
.rename({0: "sceneid", 1:"band"}, axis=1)
layers_df["date"] = pd.to_datetime(layers_df.sceneid.str[9:16], format="%Y%j")
layers_df["uname"] = layers_df.sceneid.str[:3] + "_" + layers_df.date.dt.strftime("%Y-%m-%d") + \
"_" + layers_df.band.str[::]
layers_df["path"] = layers_paths
layers_df = layers_df.sort_values(["date", "band"])
layers_df = layers_df.reset_index(drop=True)
layers_df_year = layers_df[(layers_df.date >= str(year)) & (layers_df.date < str(year+1))]
layers_df_year = layers_df_year.reset_index(drop=True)
input_kwargs = {
"df_layers": layers_df_year,
"tmpdir": tmpdir.mkdir("temp_dst_dir-0")
}
return input_kwargs
@pytest.fixture
def eocube_input_onescene(tmpdir):
dataset = sampledata.get_dataset("lsts")
layers_paths = [Path(p) for p in dataset["raster_files"][:4]]
layers_df = pd.Series([p.stem for p in layers_paths]).str.split("_", expand=True) \
.rename({0: "sceneid", 1:"band"}, axis=1)
layers_df["date"] = | pd.to_datetime(layers_df.sceneid.str[9:16], format="%Y%j") | pandas.to_datetime |
import pandas as pd
def read_local_data(data_dir):
static_vars = | pd.read_csv(data_dir + 'static_vars.csv') | pandas.read_csv |
from context import dero
import pandas as pd
from pandas.util.testing import assert_frame_equal
from pandas import Timestamp
from numpy import nan
import numpy
class DataFrameTest:
df = pd.DataFrame([
(10516, 'a', '1/1/2000', 1.01),
(10516, 'a', '1/2/2000', 1.02),
(10516, 'a', '1/3/2000', 1.03),
(10516, 'a', '1/4/2000', 1.04),
(10516, 'b', '1/1/2000', 1.05),
(10516, 'b', '1/2/2000', 1.06),
(10516, 'b', '1/3/2000', 1.07),
(10516, 'b', '1/4/2000', 1.08),
(10517, 'a', '1/1/2000', 1.09),
(10517, 'a', '1/2/2000', 1.10),
(10517, 'a', '1/3/2000', 1.11),
(10517, 'a', '1/4/2000', 1.12),
], columns = ['PERMNO','byvar','Date', 'RET'])
df_duplicate_row = pd.DataFrame([
(10516, 'a', '1/1/2000', 1.01),
(10516, 'a', '1/2/2000', 1.02),
(10516, 'a', '1/3/2000', 1.03),
(10516, 'a', '1/3/2000', 1.03), #this is a duplicated row
(10516, 'a', '1/4/2000', 1.04),
(10516, 'b', '1/1/2000', 1.05),
(10516, 'b', '1/2/2000', 1.06),
(10516, 'b', '1/3/2000', 1.07),
(10516, 'b', '1/4/2000', 1.08),
(10517, 'a', '1/1/2000', 1.09),
(10517, 'a', '1/2/2000', 1.10),
(10517, 'a', '1/3/2000', 1.11),
(10517, 'a', '1/4/2000', 1.12),
], columns = ['PERMNO','byvar','Date', 'RET'])
df_weight = pd.DataFrame(data = [
(10516, 'a', '1/1/2000', 1.01, 0),
(10516, 'a', '1/2/2000', 1.02, 1),
(10516, 'a', '1/3/2000', 1.03, 1),
(10516, 'a', '1/4/2000', 1.04, 0),
(10516, 'b', '1/1/2000', 1.05, 1),
(10516, 'b', '1/2/2000', 1.06, 1),
(10516, 'b', '1/3/2000', 1.07, 1),
(10516, 'b', '1/4/2000', 1.08, 1),
(10517, 'a', '1/1/2000', 1.09, 0),
(10517, 'a', '1/2/2000', 1.1, 0),
(10517, 'a', '1/3/2000', 1.11, 0),
(10517, 'a', '1/4/2000', 1.12, 1),
], columns = ['PERMNO', 'byvar', 'Date', 'RET', 'weight'])
df_nan_byvar = pd.DataFrame(data = [
('a', 1),
(nan, 2),
('b', 3),
('b', 4),
], columns = ['byvar', 'val'])
df_nan_byvar_and_val = pd.DataFrame(data = [
('a', 1),
(nan, 2),
('b', nan),
('b', 4),
], columns = ['byvar', 'val'])
single_ticker_df = pd.DataFrame(data = [
('a', Timestamp('2000-01-01 00:00:00'), 'ADM'),
], columns = ['byvar', 'Date', 'TICKER'])
df_datetime = df.copy()
df_datetime['Date'] = pd.to_datetime(df_datetime['Date'])
df_datetime_no_ret = df_datetime.copy()
df_datetime_no_ret.drop('RET', axis=1, inplace=True)
df_gvkey_str = pd.DataFrame([
('001076','3/1/1995'),
('001076','4/1/1995'),
('001722','1/1/2012'),
('001722','7/1/2012'),
('001722', nan),
(nan ,'1/1/2012')
], columns=['GVKEY','Date'])
df_gvkey_str['Date'] = pd.to_datetime(df_gvkey_str['Date'])
df_gvkey_num = df_gvkey_str.copy()
df_gvkey_num['GVKEY'] = df_gvkey_num['GVKEY'].astype('float64')
df_gvkey_str2 = pd.DataFrame([
('001076','2/1/1995'),
('001076','3/2/1995'),
('001722','11/1/2011'),
('001722','10/1/2011'),
('001722', nan),
(nan ,'1/1/2012')
], columns=['GVKEY','Date'])
df_gvkey_str2['Date'] = pd.to_datetime(df_gvkey_str2['Date'])
df_fill_data = pd.DataFrame(
data=[
(4, 'c', nan, 'a'),
(1, 'd', 3, 'a'),
(10, 'e', 100, 'a'),
(2, nan, 6, 'b'),
(5, 'f', 8, 'b'),
(11, 'g', 150, 'b'),
],
columns=['y', 'x1', 'x2', 'group']
)
class TestCumulate(DataFrameTest):
expect_between_1_3 = pd.DataFrame(data = [
(10516, 'a', '1/1/2000', 1.01, 1.01),
(10516, 'a', '1/2/2000', 1.02, 1.02),
(10516, 'a', '1/3/2000', 1.03, 1.0506),
(10516, 'a', '1/4/2000', 1.04, 1.04),
(10516, 'b', '1/1/2000', 1.05, 1.05),
(10516, 'b', '1/2/2000', 1.06, 1.06),
(10516, 'b', '1/3/2000', 1.07, 1.1342),
(10516, 'b', '1/4/2000', 1.08, 1.08),
(10517, 'a', '1/1/2000', 1.09, 1.09),
(10517, 'a', '1/2/2000', 1.1, 1.1),
(10517, 'a', '1/3/2000', 1.11, 1.2210000000000003),
(10517, 'a', '1/4/2000', 1.12, 1.12),
], columns = ['PERMNO', 'byvar', 'Date', 'RET', 'cum_RET'])
expect_first = pd.DataFrame([
(10516, 'a', '1/1/2000', 1.01, 1.01),
(10516, 'a', '1/2/2000', 1.02, 1.02),
(10516, 'a', '1/3/2000', 1.03, 1.0506),
(10516, 'a', '1/4/2000', 1.04, 1.092624),
(10516, 'b', '1/1/2000', 1.05, 1.05),
(10516, 'b', '1/2/2000', 1.06, 1.06),
(10516, 'b', '1/3/2000', 1.07, 1.1342),
(10516, 'b', '1/4/2000', 1.08, 1.224936),
(10517, 'a', '1/1/2000', 1.09, 1.09),
(10517, 'a', '1/2/2000', 1.10, 1.10),
(10517, 'a', '1/3/2000', 1.11, 1.221),
(10517, 'a', '1/4/2000', 1.12, 1.36752),
], columns = ['PERMNO','byvar','Date', 'RET', 'cum_RET'])
def test_method_between_1_3(self):
cum_df = dero.pandas.cumulate(self.df, 'RET', 'between', periodvar='Date',
byvars=['PERMNO','byvar'], time=[1,3])
assert_frame_equal(self.expect_between_1_3, cum_df, check_dtype=False)
def test_method_between_m2_0(self):
cum_df = dero.pandas.cumulate(self.df, 'RET', 'between', periodvar='Date',
byvars=['PERMNO','byvar'], time=[-2,0])
#Actually same result as [1,3]
assert_frame_equal(self.expect_between_1_3, cum_df, check_dtype=False)
def test_shifted_index(self):
df = self.df.copy()
df.index = df.index + 10
cum_df = dero.pandas.cumulate(df, 'RET', 'between', periodvar='Date',
byvars=['PERMNO','byvar'], time=[-2,0])
assert_frame_equal(self.expect_between_1_3, cum_df, check_dtype=False)
def test_method_first(self):
cum_df = dero.pandas.cumulate(self.df, 'RET', 'first', periodvar='Date',
byvars=['PERMNO','byvar'])
assert_frame_equal(self.expect_first, cum_df, check_dtype=False)
def test_grossify(self):
df = self.df.copy() #don't overwrite original
df['RET'] -= 1 #ungrossify
expect_first_grossify = self.expect_first.copy()
expect_first_grossify['cum_RET'] -= 1
expect_first_grossify['RET'] -= 1
cum_df = dero.pandas.cumulate(df, 'RET', 'first', periodvar='Date',
byvars=['PERMNO','byvar'], grossify=True)
assert_frame_equal(expect_first_grossify, cum_df, check_dtype=False)
class TestGroupbyMerge(DataFrameTest):
def test_subset_max(self):
byvars = ['PERMNO','byvar']
out = dero.pandas.groupby_merge(self.df, byvars, 'max', subset='RET')
expect_df = pd.DataFrame(
[(10516, 'a', '1/1/2000', 1.01, 1.04),
(10516, 'a', '1/2/2000', 1.02, 1.04),
(10516, 'a', '1/3/2000', 1.03, 1.04),
(10516, 'a', '1/4/2000', 1.04, 1.04),
(10516, 'b', '1/1/2000', 1.05, 1.08),
(10516, 'b', '1/2/2000', 1.06, 1.08),
(10516, 'b', '1/3/2000', 1.07, 1.08),
(10516, 'b', '1/4/2000', 1.08, 1.08),
(10517, 'a', '1/1/2000', 1.09, 1.12),
(10517, 'a', '1/2/2000', 1.10, 1.12),
(10517, 'a', '1/3/2000', 1.11, 1.12),
(10517, 'a', '1/4/2000', 1.12, 1.12)],
columns = ['PERMNO','byvar','Date', 'RET', 'RET_max'])
assert_frame_equal(expect_df, out)
def test_subset_std(self):
byvars = ['PERMNO','byvar']
out = dero.pandas.groupby_merge(self.df, byvars, 'std', subset='RET')
expect_df = pd.DataFrame(
[(10516, 'a', '1/1/2000', 1.01, 0.012909944487358068),
(10516, 'a', '1/2/2000', 1.02, 0.012909944487358068),
(10516, 'a', '1/3/2000', 1.03, 0.012909944487358068),
(10516, 'a', '1/4/2000', 1.04, 0.012909944487358068),
(10516, 'b', '1/1/2000', 1.05, 0.012909944487358068),
(10516, 'b', '1/2/2000', 1.06, 0.012909944487358068),
(10516, 'b', '1/3/2000', 1.07, 0.012909944487358068),
(10516, 'b', '1/4/2000', 1.08, 0.012909944487358068),
(10517, 'a', '1/1/2000', 1.09, 0.012909944487358068),
(10517, 'a', '1/2/2000', 1.10, 0.012909944487358068),
(10517, 'a', '1/3/2000', 1.11, 0.012909944487358068),
(10517, 'a', '1/4/2000', 1.12, 0.012909944487358068)],
columns = ['PERMNO','byvar','Date', 'RET', 'RET_std'])
assert_frame_equal(expect_df, out)
def test_nan_byvar_transform(self):
expect_df = self.df_nan_byvar.copy()
expect_df['val_transform'] = expect_df['val']
out = dero.pandas.groupby_merge(self.df_nan_byvar, 'byvar', 'transform', (lambda x: x))
assert_frame_equal(expect_df, out)
def test_nan_byvar_and_nan_val_transform_numeric(self):
non_standard_index = self.df_nan_byvar_and_val.copy()
non_standard_index.index = [5,6,7,8]
expect_df = self.df_nan_byvar_and_val.copy()
expect_df['val_transform'] = expect_df['val'] + 1
expect_df.index = [5,6,7,8]
out = dero.pandas.groupby_merge(non_standard_index, 'byvar', 'transform', (lambda x: x + 1))
assert_frame_equal(expect_df, out)
def test_nan_byvar_and_nan_val_and_nonstandard_index_transform_numeric(self):
expect_df = self.df_nan_byvar_and_val.copy()
expect_df['val_transform'] = expect_df['val'] + 1
def test_nan_byvar_sum(self):
expect_df = pd.DataFrame(data = [
('a', 1, 1.0),
(nan, 2, nan),
('b', 3, 7.0),
('b', 4, 7.0),
], columns = ['byvar', 'val', 'val_sum'])
out = dero.pandas.groupby_merge(self.df_nan_byvar, 'byvar', 'sum')
assert_frame_equal(expect_df, out)
class TestLongToWide:
expect_df_with_colindex = pd.DataFrame(data = [
(10516, 'a', 1.01, 1.02, 1.03, 1.04),
(10516, 'b', 1.05, 1.06, 1.07, 1.08),
(10517, 'a', 1.09, 1.1, 1.11, 1.12),
], columns = ['PERMNO', 'byvar',
'RET1/1/2000', 'RET1/2/2000',
'RET1/3/2000', 'RET1/4/2000'])
expect_df_no_colindex = pd.DataFrame(data = [
(10516, 'a', '1/1/2000', 1.01, 1.02, 1.03, 1.04),
(10516, 'a', '1/2/2000', 1.01, 1.02, 1.03, 1.04),
(10516, 'a', '1/3/2000', 1.01, 1.02, 1.03, 1.04),
(10516, 'a', '1/4/2000', 1.01, 1.02, 1.03, 1.04),
(10516, 'b', '1/1/2000', 1.05, 1.06, 1.07, 1.08),
(10516, 'b', '1/2/2000', 1.05, 1.06, 1.07, 1.08),
(10516, 'b', '1/3/2000', 1.05, 1.06, 1.07, 1.08),
(10516, 'b', '1/4/2000', 1.05, 1.06, 1.07, 1.08),
(10517, 'a', '1/1/2000', 1.09, 1.1, 1.11, 1.12),
(10517, 'a', '1/2/2000', 1.09, 1.1, 1.11, 1.12),
(10517, 'a', '1/3/2000', 1.09, 1.1, 1.11, 1.12),
(10517, 'a', '1/4/2000', 1.09, 1.1, 1.11, 1.12),
], columns = ['PERMNO', 'byvar', 'Date', 'RET0',
'RET1', 'RET2', 'RET3'])
input_data = DataFrameTest()
ltw_no_dup_colindex = dero.pandas.long_to_wide(input_data.df,
['PERMNO', 'byvar'], 'RET', colindex='Date')
ltw_dup_colindex = dero.pandas.long_to_wide(input_data.df_duplicate_row,
['PERMNO', 'byvar'], 'RET', colindex='Date')
ltw_no_dup_no_colindex = dero.pandas.long_to_wide(input_data.df,
['PERMNO', 'byvar'], 'RET')
ltw_dup_no_colindex = dero.pandas.long_to_wide(input_data.df_duplicate_row,
['PERMNO', 'byvar'], 'RET')
df_list = [ltw_no_dup_colindex, ltw_dup_colindex,
ltw_no_dup_no_colindex, ltw_dup_no_colindex]
def test_no_duplicates_with_colindex(self):
assert_frame_equal(self.expect_df_with_colindex, self.ltw_no_dup_colindex)
def test_duplicates_with_colindex(self):
assert_frame_equal(self.expect_df_with_colindex, self.ltw_dup_colindex)
def test_no_duplicates_no_colindex(self):
assert_frame_equal(self.expect_df_no_colindex, self.ltw_no_dup_no_colindex)
def test_duplicates_no_colindex(self):
assert_frame_equal(self.expect_df_no_colindex, self.ltw_dup_no_colindex)
def test_no_extra_vars(self):
for df in self.df_list:
assert ('__idx__','__key__') not in df.columns
class TestPortfolioAverages:
input_data = DataFrameTest()
expect_avgs_no_wt = pd.DataFrame(data = [
(1, 'a', 1.0250000000000001),
(1, 'b', 1.0550000000000002),
(2, 'a', 1.1050000000000002),
(2, 'b', 1.0750000000000002),
], columns = ['portfolio', 'byvar', 'RET'])
expect_avgs_wt = pd.DataFrame(data = [
(1, 'a', 1.0250000000000001, 1.025),
(1, 'b', 1.0550000000000002, 1.0550000000000002),
(2, 'a', 1.1050000000000002, 1.12),
(2, 'b', 1.0750000000000002, 1.0750000000000002),
], columns = ['portfolio', 'byvar', 'RET', 'RET_wavg'])
expect_ports = pd.DataFrame(data = [
(10516, 'a', '1/1/2000', 1.01, 0, 1),
(10516, 'a', '1/2/2000', 1.02, 1, 1),
(10516, 'a', '1/3/2000', 1.03, 1, 1),
(10516, 'a', '1/4/2000', 1.04, 0, 1),
(10516, 'b', '1/1/2000', 1.05, 1, 1),
(10516, 'b', '1/2/2000', 1.06, 1, 1),
(10516, 'b', '1/3/2000', 1.07, 1, 2),
(10516, 'b', '1/4/2000', 1.08, 1, 2),
(10517, 'a', '1/1/2000', 1.09, 0, 2),
(10517, 'a', '1/2/2000', 1.1, 0, 2),
(10517, 'a', '1/3/2000', 1.11, 0, 2),
(10517, 'a', '1/4/2000', 1.12, 1, 2),
], columns = ['PERMNO', 'byvar', 'Date', 'RET', 'weight', 'portfolio'])
avgs, ports = dero.pandas.portfolio_averages(input_data.df_weight, 'RET', 'RET', ngroups=2,
byvars='byvar')
w_avgs, w_ports = dero.pandas.portfolio_averages(input_data.df_weight, 'RET', 'RET', ngroups=2,
byvars='byvar', wtvar='weight')
def test_simple_averages(self):
assert_frame_equal(self.expect_avgs_no_wt, self.avgs, check_dtype=False)
def test_weighted_averages(self):
assert_frame_equal(self.expect_avgs_wt, self.w_avgs, check_dtype=False)
def test_portfolio_construction(self):
print(self.ports)
assert_frame_equal(self.expect_ports, self.ports, check_dtype=False)
assert_frame_equal(self.expect_ports, self.w_ports, check_dtype=False)
class TestWinsorize(DataFrameTest):
def test_winsor_40_subset_byvars(self):
expect_df = pd.DataFrame(data = [
(10516, 'a', '1/1/2000', 1.022624),
(10516, 'a', '1/2/2000', 1.022624),
(10516, 'a', '1/3/2000', 1.02672),
(10516, 'a', '1/4/2000', 1.02672),
(10516, 'b', '1/1/2000', 1.062624),
(10516, 'b', '1/2/2000', 1.062624),
(10516, 'b', '1/3/2000', 1.06672),
(10516, 'b', '1/4/2000', 1.06672),
(10517, 'a', '1/1/2000', 1.102624),
(10517, 'a', '1/2/2000', 1.102624),
(10517, 'a', '1/3/2000', 1.10672),
(10517, 'a', '1/4/2000', 1.10672),
], columns = ['PERMNO', 'byvar', 'Date', 'RET'])
wins = dero.pandas.winsorize(self.df, .4, subset='RET', byvars=['PERMNO','byvar'])
assert_frame_equal(expect_df, wins, check_less_precise=True)
class TestRegBy(DataFrameTest):
def create_indf(self):
indf = self.df_weight.copy()
indf['key'] = indf['PERMNO'].astype(str) + '_' + indf['byvar']
return indf
def test_regby_nocons(self):
indf = self.create_indf()
expect_df = pd.DataFrame(data = [
(0.48774684748988806, '10516_a'),
(0.9388636664168903, '10516_b'),
(0.22929206076239614, '10517_a'),
], columns = ['coef_RET', 'key'])
rb = dero.pandas.reg_by(indf, 'weight', 'RET', 'key', cons=False)
print('Reg by: ', rb)
assert_frame_equal(expect_df, rb)
def test_regby_cons(self):
indf = self.create_indf()
expect_df = pd.DataFrame(data = [
(0.49999999999999645, 5.329070518200751e-15, '10516_a'),
(0.9999999999999893, 1.0658141036401503e-14, '10516_b'),
(-32.89999999999997, 29.999999999999982, '10517_a'),
], columns = ['const', 'coef_RET', 'key'])
rb = dero.pandas.reg_by(indf, 'weight', 'RET', 'key')
print('Reg by: ', rb)
assert_frame_equal(expect_df, rb)
def test_regby_cons_low_obs(self):
indf = self.create_indf().loc[:8,:] #makes it so that one byvar only has one obs
expect_df = pd.DataFrame(data = [
(0.49999999999999645, 5.329070518200751e-15, '10516_a'),
(0.9999999999999893, 1.0658141036401503e-14, '10516_b'),
(nan, nan, '10517_a'),
], columns = ['const', 'coef_RET', 'key'])
rb = dero.pandas.reg_by(indf, 'weight', 'RET', 'key')
print('Reg by: ', rb)
assert_frame_equal(expect_df, rb)
class TestExpandMonths(DataFrameTest):
def test_expand_months_tradedays(self):
expect_df = pd.DataFrame(data = [
(Timestamp('2000-01-03 00:00:00'), 'a', Timestamp('2000-01-01 00:00:00'), 'ADM'),
(Timestamp('2000-01-04 00:00:00'), 'a', Timestamp('2000-01-01 00:00:00'), 'ADM'),
(Timestamp('2000-01-05 00:00:00'), 'a', Timestamp('2000-01-01 00:00:00'), 'ADM'),
(Timestamp('2000-01-06 00:00:00'), 'a', Timestamp('2000-01-01 00:00:00'), 'ADM'),
(Timestamp('2000-01-07 00:00:00'), 'a', Timestamp('2000-01-01 00:00:00'), 'ADM'),
(Timestamp('2000-01-10 00:00:00'), 'a', Timestamp('2000-01-01 00:00:00'), 'ADM'),
(Timestamp('2000-01-11 00:00:00'), 'a', Timestamp('2000-01-01 00:00:00'), 'ADM'),
(Timestamp('2000-01-12 00:00:00'), 'a', Timestamp('2000-01-01 00:00:00'), 'ADM'),
(Timestamp('2000-01-13 00:00:00'), 'a', Timestamp('2000-01-01 00:00:00'), 'ADM'),
(Timestamp('2000-01-14 00:00:00'), 'a', Timestamp('2000-01-01 00:00:00'), 'ADM'),
(Timestamp('2000-01-18 00:00:00'), 'a', Timestamp('2000-01-01 00:00:00'), 'ADM'),
(Timestamp('2000-01-19 00:00:00'), 'a', Timestamp('2000-01-01 00:00:00'), 'ADM'),
(Timestamp('2000-01-20 00:00:00'), 'a', Timestamp('2000-01-01 00:00:00'), 'ADM'),
(Timestamp('2000-01-21 00:00:00'), 'a', Timestamp('2000-01-01 00:00:00'), 'ADM'),
(Timestamp('2000-01-24 00:00:00'), 'a', Timestamp('2000-01-01 00:00:00'), 'ADM'),
(Timestamp('2000-01-25 00:00:00'), 'a', Timestamp('2000-01-01 00:00:00'), 'ADM'),
(Timestamp('2000-01-26 00:00:00'), 'a', Timestamp('2000-01-01 00:00:00'), 'ADM'),
(Timestamp('2000-01-27 00:00:00'), 'a', Timestamp('2000-01-01 00:00:00'), 'ADM'),
(Timestamp('2000-01-28 00:00:00'), 'a', Timestamp('2000-01-01 00:00:00'), 'ADM'),
(Timestamp('2000-01-31 00:00:00'), 'a', Timestamp('2000-01-01 00:00:00'), 'ADM'),
], columns = ['Daily Date', 'byvar', 'Date', 'TICKER'])
em = dero.pandas.expand_months(self.single_ticker_df)
assert_frame_equal(expect_df.sort_index(axis=1), em.sort_index(axis=1))
def test_expand_months_calendardays(self):
expect_df = pd.DataFrame(data = [
(Timestamp('2000-01-01 00:00:00'), 'a', Timestamp('2000-01-01 00:00:00'), 'ADM'),
(Timestamp('2000-01-02 00:00:00'), 'a', Timestamp('2000-01-01 00:00:00'), 'ADM'),
(Timestamp('2000-01-03 00:00:00'), 'a', Timestamp('2000-01-01 00:00:00'), 'ADM'),
(Timestamp('2000-01-04 00:00:00'), 'a', Timestamp('2000-01-01 00:00:00'), 'ADM'),
(Timestamp('2000-01-05 00:00:00'), 'a', Timestamp('2000-01-01 00:00:00'), 'ADM'),
(Timestamp('2000-01-06 00:00:00'), 'a', Timestamp('2000-01-01 00:00:00'), 'ADM'),
(Timestamp('2000-01-07 00:00:00'), 'a', Timestamp('2000-01-01 00:00:00'), 'ADM'),
(Timestamp('2000-01-08 00:00:00'), 'a', Timestamp('2000-01-01 00:00:00'), 'ADM'),
(Timestamp('2000-01-09 00:00:00'), 'a', Timestamp('2000-01-01 00:00:00'), 'ADM'),
(Timestamp('2000-01-10 00:00:00'), 'a', Timestamp('2000-01-01 00:00:00'), 'ADM'),
(Timestamp('2000-01-11 00:00:00'), 'a', Timestamp('2000-01-01 00:00:00'), 'ADM'),
(Timestamp('2000-01-12 00:00:00'), 'a', Timestamp('2000-01-01 00:00:00'), 'ADM'),
(Timestamp('2000-01-13 00:00:00'), 'a', Timestamp('2000-01-01 00:00:00'), 'ADM'),
(Timestamp('2000-01-14 00:00:00'), 'a', Timestamp('2000-01-01 00:00:00'), 'ADM'),
(Timestamp('2000-01-15 00:00:00'), 'a', Timestamp('2000-01-01 00:00:00'), 'ADM'),
(Timestamp('2000-01-16 00:00:00'), 'a', Timestamp('2000-01-01 00:00:00'), 'ADM'),
(Timestamp('2000-01-17 00:00:00'), 'a', Timestamp('2000-01-01 00:00:00'), 'ADM'),
(Timestamp('2000-01-18 00:00:00'), 'a', Timestamp('2000-01-01 00:00:00'), 'ADM'),
(Timestamp('2000-01-19 00:00:00'), 'a', Timestamp('2000-01-01 00:00:00'), 'ADM'),
(Timestamp('2000-01-20 00:00:00'), 'a', Timestamp('2000-01-01 00:00:00'), 'ADM'),
(Timestamp('2000-01-21 00:00:00'), 'a', Timestamp('2000-01-01 00:00:00'), 'ADM'),
(Timestamp('2000-01-22 00:00:00'), 'a', Timestamp('2000-01-01 00:00:00'), 'ADM'),
(Timestamp('2000-01-23 00:00:00'), 'a', Timestamp('2000-01-01 00:00:00'), 'ADM'),
(Timestamp('2000-01-24 00:00:00'), 'a', Timestamp('2000-01-01 00:00:00'), 'ADM'),
(Timestamp('2000-01-25 00:00:00'), 'a', Timestamp('2000-01-01 00:00:00'), 'ADM'),
(Timestamp('2000-01-26 00:00:00'), 'a', Timestamp('2000-01-01 00:00:00'), 'ADM'),
(Timestamp('2000-01-27 00:00:00'), 'a', Timestamp('2000-01-01 00:00:00'), 'ADM'),
(Timestamp('2000-01-28 00:00:00'), 'a', Timestamp('2000-01-01 00:00:00'), 'ADM'),
(Timestamp('2000-01-29 00:00:00'), 'a', Timestamp('2000-01-01 00:00:00'), 'ADM'),
(Timestamp('2000-01-30 00:00:00'), 'a', Timestamp('2000-01-01 00:00:00'), 'ADM'),
(Timestamp('2000-01-31 00:00:00'), 'a', Timestamp('2000-01-01 00:00:00'), 'ADM'),
], columns = ['Daily Date', 'byvar', 'Date', 'TICKER'])
em = dero.pandas.expand_months(self.single_ticker_df, trade_days=False)
assert_frame_equal(expect_df.sort_index(axis=1), em.sort_index(axis=1))
class TestPortfolio(DataFrameTest):
def test_portfolio_byvars(self):
expect_df = pd.DataFrame(data = [
(10516, 'a', '1/1/2000', 1.01, 1),
(10516, 'a', '1/2/2000', 1.02, 1),
(10516, 'a', '1/3/2000', 1.03, 2),
(10516, 'a', '1/4/2000', 1.04, 2),
(10516, 'b', '1/1/2000', 1.05, 1),
(10516, 'b', '1/2/2000', 1.06, 1),
(10516, 'b', '1/3/2000', 1.07, 2),
(10516, 'b', '1/4/2000', 1.08, 2),
(10517, 'a', '1/1/2000', 1.09, 1),
(10517, 'a', '1/2/2000', 1.1, 1),
(10517, 'a', '1/3/2000', 1.11, 2),
(10517, 'a', '1/4/2000', 1.12, 2),
], columns = ['PERMNO', 'byvar', 'Date', 'RET', 'portfolio'])
p = dero.pandas.portfolio(self.df, 'RET', ngroups=2, byvars=['PERMNO','byvar'])
assert_frame_equal(expect_df, p, check_dtype=False)
def test_portfolio_with_nan_and_byvars(self):
expect_df = pd.DataFrame(data = [
(10516, 'a', '1/1/2000', nan, 0),
(10516, 'a', '1/2/2000', 1.02, 1),
(10516, 'a', '1/3/2000', 1.03, 1), #changed from 2 to 1 when updated nan handling
(10516, 'a', '1/4/2000', 1.04, 2),
(10516, 'b', '1/1/2000', 1.05, 1),
(10516, 'b', '1/2/2000', 1.06, 1),
(10516, 'b', '1/3/2000', 1.07, 2),
(10516, 'b', '1/4/2000', 1.08, 2),
(10517, 'a', '1/1/2000', 1.09, 1),
(10517, 'a', '1/2/2000', 1.1, 1),
(10517, 'a', '1/3/2000', 1.11, 2),
(10517, 'a', '1/4/2000', 1.12, 2),
], columns = ['PERMNO', 'byvar', 'Date', 'RET', 'portfolio'])
indf = self.df.copy()
indf.loc[0, 'RET'] = nan
p = dero.pandas.portfolio(indf, 'RET', ngroups=2, byvars=['PERMNO','byvar'])
assert_frame_equal(expect_df, p, check_dtype=False)
class TestConvertSASDateToPandasDate:
df_sasdate = pd.DataFrame(data = [
('011508', 16114.0),
('011508', 16482.0),
('011508', 17178.0),
('011508', 17197.0),
('011508', 17212.0),
], columns = ['gvkey', 'datadate'])
df_sasdate_nan = pd.DataFrame(data = [
('011508', 16114.0),
('011508', 16482.0),
('011508', 17178.0),
('011508', 17197.0),
('011508', nan),
('011508', 17212.0),
], columns = ['gvkey', 'datadate'])
def test_convert(self):
expect_df = pd.DataFrame(data = [
(numpy.datetime64('2004-02-13T00:00:00.000000000'),),
(numpy.datetime64('2005-02-15T00:00:00.000000000'),),
(numpy.datetime64('2007-01-12T00:00:00.000000000'),),
(numpy.datetime64('2007-01-31T00:00:00.000000000'),),
(numpy.datetime64('2007-02-15T00:00:00.000000000'),),
], columns = [0])
converted = pd.DataFrame(dero.pandas.convert_sas_date_to_pandas_date(self.df_sasdate['datadate']))
assert_frame_equal(expect_df, converted)
def test_convert_nan(self):
expect_df = pd.DataFrame(data = [
(numpy.datetime64('2004-02-13T00:00:00.000000000'),),
(numpy.datetime64('2005-02-15T00:00:00.000000000'),),
(numpy.datetime64('2007-01-12T00:00:00.000000000'),),
(numpy.datetime64('2007-01-31T00:00:00.000000000'),),
(numpy.datetime64('NaT'),),
(numpy.datetime64('2007-02-15T00:00:00.000000000'),),
], columns = [0])
converted = pd.DataFrame(dero.pandas.convert_sas_date_to_pandas_date(self.df_sasdate_nan['datadate']))
assert_frame_equal(expect_df, converted)
class TestMapWindows(DataFrameTest):
times = [
[-4, -2, 0],
[-3, 1, 2],
[4, 5, 6],
[0, 1, 2],
[-1, 0, 1]
]
df_period_str = pd.DataFrame([
(10516, '1/1/2000', 1.01),
(10516, '1/2/2000', 1.02),
(10516, '1/3/2000', 1.03),
(10516, '1/4/2000', 1.04),
(10516, '1/5/2000', 1.05),
(10516, '1/6/2000', 1.06),
(10516, '1/7/2000', 1.07),
(10516, '1/8/2000', 1.08),
(10517, '1/1/2000', 1.09),
(10517, '1/2/2000', 1.10),
(10517, '1/3/2000', 1.11),
(10517, '1/4/2000', 1.12),
(10517, '1/5/2000', 1.05),
(10517, '1/6/2000', 1.06),
(10517, '1/7/2000', 1.07),
(10517, '1/8/2000', 1.08),
], columns = ['PERMNO','Date', 'RET'])
df_period = df_period_str.copy()
df_period['Date'] = pd.to_datetime(df_period['Date'])
expect_dfs = [
pd.DataFrame(data = [
(10516, Timestamp('2000-01-01 00:00:00'), 1.01, 0),
(10516, Timestamp('2000-01-02 00:00:00'), 1.02, 1),
(10516, Timestamp('2000-01-03 00:00:00'), 1.03, 1),
(10516, Timestamp('2000-01-04 00:00:00'), 1.04, 2),
(10516, Timestamp('2000-01-05 00:00:00'), 1.05, 2),
(10516, Timestamp('2000-01-06 00:00:00'), 1.06, 3),
(10516, Timestamp('2000-01-07 00:00:00'), 1.07, 3),
(10516, Timestamp('2000-01-08 00:00:00'), 1.08, 3),
(10517, Timestamp('2000-01-01 00:00:00'), 1.09, 0),
(10517, Timestamp('2000-01-02 00:00:00'), 1.1, 1),
(10517, Timestamp('2000-01-03 00:00:00'), 1.11, 1),
(10517, Timestamp('2000-01-04 00:00:00'), 1.12, 2),
(10517, Timestamp('2000-01-05 00:00:00'), 1.05, 2),
(10517, Timestamp('2000-01-06 00:00:00'), 1.06, 3),
(10517, Timestamp('2000-01-07 00:00:00'), 1.07, 3),
(10517, Timestamp('2000-01-08 00:00:00'), 1.08, 3),
], columns = ['PERMNO', 'Date', 'RET', '__map_window__']),
pd.DataFrame(data = [
(10516, Timestamp('2000-01-01 00:00:00'), 1.01, 0),
(10516, Timestamp('2000-01-02 00:00:00'), 1.02, 1),
(10516, Timestamp('2000-01-03 00:00:00'), 1.03, 1),
(10516, Timestamp('2000-01-04 00:00:00'), 1.04, 1),
(10516, Timestamp('2000-01-05 00:00:00'), 1.05, 1),
(10516, Timestamp('2000-01-06 00:00:00'), 1.06, 2),
(10516, Timestamp('2000-01-07 00:00:00'), 1.07, 3),
(10516, Timestamp('2000-01-08 00:00:00'), 1.08, 3),
(10517, Timestamp('2000-01-01 00:00:00'), 1.09, 0),
(10517, Timestamp('2000-01-02 00:00:00'), 1.1, 1),
(10517, Timestamp('2000-01-03 00:00:00'), 1.11, 1),
(10517, Timestamp('2000-01-04 00:00:00'), 1.12, 1),
(10517, Timestamp('2000-01-05 00:00:00'), 1.05, 1),
(10517, Timestamp('2000-01-06 00:00:00'), 1.06, 2),
(10517, Timestamp('2000-01-07 00:00:00'), 1.07, 3),
(10517, Timestamp('2000-01-08 00:00:00'), 1.08, 3),
], columns = ['PERMNO', 'Date', 'RET', '__map_window__']),
pd.DataFrame(data = [
(10516, Timestamp('2000-01-01 00:00:00'), 1.01, 0),
(10516, Timestamp('2000-01-02 00:00:00'), 1.02, 1),
(10516, Timestamp('2000-01-03 00:00:00'), 1.03, 2),
(10516, Timestamp('2000-01-04 00:00:00'), 1.04, 3),
(10516, Timestamp('2000-01-05 00:00:00'), 1.05, 3),
(10516, Timestamp('2000-01-06 00:00:00'), 1.06, 3),
(10516, Timestamp('2000-01-07 00:00:00'), 1.07, 3),
(10516, Timestamp('2000-01-08 00:00:00'), 1.08, 3),
(10517, Timestamp('2000-01-01 00:00:00'), 1.09, 0),
(10517, Timestamp('2000-01-02 00:00:00'), 1.1, 1),
(10517, Timestamp('2000-01-03 00:00:00'), 1.11, 2),
(10517, Timestamp('2000-01-04 00:00:00'), 1.12, 3),
(10517, Timestamp('2000-01-05 00:00:00'), 1.05, 3),
(10517, Timestamp('2000-01-06 00:00:00'), 1.06, 3),
(10517, | Timestamp('2000-01-07 00:00:00') | pandas.Timestamp |
# -*- coding:utf-8 -*-
# !/usr/bin/env python
"""
Date: 2021/11/2 21:08
Desc: ๅ่ฑ้กบ-ๆฐๆฎไธญๅฟ-ๆๆฏ้่ก
http://data.10jqka.com.cn/rank/cxg/
"""
import pandas as pd
import requests
from bs4 import BeautifulSoup
from py_mini_racer import py_mini_racer
from tqdm import tqdm
from akshare.datasets import get_ths_js
def _get_file_content_ths(file: str = "ths.js") -> str:
"""
่ทๅ JS ๆไปถ็ๅ
ๅฎน
:param file: JS ๆไปถๅ
:type file: str
:return: ๆไปถๅ
ๅฎน
:rtype: str
"""
setting_file_path = get_ths_js(file)
with open(setting_file_path) as f:
file_data = f.read()
return file_data
def stock_rank_cxg_ths(symbol: str = "ๅๆๆฐ้ซ") -> pd.DataFrame:
"""
ๅ่ฑ้กบ-ๆฐๆฎไธญๅฟ-ๆๆฏ้่ก-ๅๆฐ้ซ
http://data.10jqka.com.cn/rank/cxg/
:param symbol: choice of {"ๅๆๆฐ้ซ", "ๅๅนดๆฐ้ซ", "ไธๅนดๆฐ้ซ", "ๅๅฒๆฐ้ซ"}
:type symbol: str
:return: ๅๆฐ้ซๆฐๆฎ
:rtype: pandas.DataFrame
"""
symbol_map = {
"ๅๆๆฐ้ซ": "4",
"ๅๅนดๆฐ้ซ": "3",
"ไธๅนดๆฐ้ซ": "2",
"ๅๅฒๆฐ้ซ": "1",
}
js_code = py_mini_racer.MiniRacer()
js_content = _get_file_content_ths("ths.js")
js_code.eval(js_content)
v_code = js_code.call("v")
headers = {
"User-Agent": "Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/89.0.4389.90 Safari/537.36",
"Cookie": f"v={v_code}",
}
url = f"http://data.10jqka.com.cn/rank/cxg/board/{symbol_map[symbol]}/field/stockcode/order/asc/page/1/ajax/1/free/1/"
r = requests.get(url, headers=headers)
soup = BeautifulSoup(r.text, "lxml")
try:
total_page = soup.find("span", attrs={"class": "page_info"}).text.split("/")[1]
except AttributeError as e:
total_page = 1
big_df = pd.DataFrame()
for page in tqdm(range(1, int(total_page) + 1), leave=False):
v_code = js_code.call("v")
headers = {
"User-Agent": "Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/89.0.4389.90 Safari/537.36",
"Cookie": f"v={v_code}",
}
url = f"http://data.10jqka.com.cn/rank/cxg/board/{symbol_map[symbol]}/field/stockcode/order/asc/page/{page}/ajax/1/free/1/"
r = requests.get(url, headers=headers)
temp_df = pd.read_html(r.text)[0]
big_df = big_df.append(temp_df, ignore_index=True)
big_df.columns = ["ๅบๅท", "่ก็ฅจไปฃ็ ", "่ก็ฅจ็ฎ็งฐ", "ๆถจ่ทๅน
", "ๆขๆ็", "ๆๆฐไปท", "ๅๆ้ซ็น", "ๅๆ้ซ็นๆฅๆ"]
big_df["่ก็ฅจไปฃ็ "] = big_df["่ก็ฅจไปฃ็ "].astype(str).str.zfill(6)
big_df["ๆถจ่ทๅน
"] = big_df["ๆถจ่ทๅน
"].str.strip("%")
big_df["ๆขๆ็"] = big_df["ๆขๆ็"].str.strip("%")
big_df["ๅๆ้ซ็นๆฅๆ"] = pd.to_datetime(big_df["ๅๆ้ซ็นๆฅๆ"]).dt.date
big_df["ๆถจ่ทๅน
"] = pd.to_numeric(big_df["ๆถจ่ทๅน
"])
big_df["ๆขๆ็"] = pd.to_numeric(big_df["ๆขๆ็"])
big_df["ๆๆฐไปท"] = pd.to_numeric(big_df["ๆๆฐไปท"])
big_df["ๅๆ้ซ็น"] = pd.to_numeric(big_df["ๅๆ้ซ็น"])
return big_df
def stock_rank_cxd_ths(symbol: str = "ๅๆๆฐไฝ") -> pd.DataFrame:
"""
ๅ่ฑ้กบ-ๆฐๆฎไธญๅฟ-ๆๆฏ้่ก-ๅๆฐไฝ
http://data.10jqka.com.cn/rank/cxd/
:param symbol: choice of {"ๅๆๆฐไฝ", "ๅๅนดๆฐไฝ", "ไธๅนดๆฐไฝ", "ๅๅฒๆฐไฝ"}
:type symbol: str
:return: ๅๆฐไฝๆฐๆฎ
:rtype: pandas.DataFrame
"""
symbol_map = {
"ๅๆๆฐไฝ": "4",
"ๅๅนดๆฐไฝ": "3",
"ไธๅนดๆฐไฝ": "2",
"ๅๅฒๆฐไฝ": "1",
}
js_code = py_mini_racer.MiniRacer()
js_content = _get_file_content_ths("ths.js")
js_code.eval(js_content)
v_code = js_code.call("v")
headers = {
"User-Agent": "Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/89.0.4389.90 Safari/537.36",
"Cookie": f"v={v_code}",
}
url = f"http://data.10jqka.com.cn/rank/cxd/board/{symbol_map[symbol]}/field/stockcode/order/asc/page/1/ajax/1/free/1/"
r = requests.get(url, headers=headers)
soup = BeautifulSoup(r.text, "lxml")
try:
total_page = soup.find("span", attrs={"class": "page_info"}).text.split("/")[1]
except AttributeError as e:
total_page = 1
big_df = pd.DataFrame()
for page in tqdm(range(1, int(total_page) + 1), leave=False):
v_code = js_code.call("v")
headers = {
"User-Agent": "Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/89.0.4389.90 Safari/537.36",
"Cookie": f"v={v_code}",
}
url = f"http://data.10jqka.com.cn/rank/cxd/board/{symbol_map[symbol]}/field/stockcode/order/asc/page/{page}/ajax/1/free/1/"
r = requests.get(url, headers=headers)
temp_df = pd.read_html(r.text)[0]
big_df = big_df.append(temp_df, ignore_index=True)
big_df.columns = ["ๅบๅท", "่ก็ฅจไปฃ็ ", "่ก็ฅจ็ฎ็งฐ", "ๆถจ่ทๅน
", "ๆขๆ็", "ๆๆฐไปท", "ๅๆไฝ็น", "ๅๆไฝ็นๆฅๆ"]
big_df["่ก็ฅจไปฃ็ "] = big_df["่ก็ฅจไปฃ็ "].astype(str).str.zfill(6)
big_df["ๆถจ่ทๅน
"] = big_df["ๆถจ่ทๅน
"].str.strip("%")
big_df["ๆขๆ็"] = big_df["ๆขๆ็"].str.strip("%")
big_df["ๅๆไฝ็นๆฅๆ"] = pd.to_datetime(big_df["ๅๆไฝ็นๆฅๆ"]).dt.date
big_df["ๆถจ่ทๅน
"] = pd.to_numeric(big_df["ๆถจ่ทๅน
"])
big_df["ๆขๆ็"] = pd.to_numeric(big_df["ๆขๆ็"])
big_df["ๆๆฐไปท"] = pd.to_numeric(big_df["ๆๆฐไปท"])
big_df["ๅๆไฝ็น"] = pd.to_numeric(big_df["ๅๆไฝ็น"])
return big_df
def stock_rank_lxsz_ths() -> pd.DataFrame:
"""
ๅ่ฑ้กบ-ๆฐๆฎไธญๅฟ-ๆๆฏ้่ก-่ฟ็ปญไธๆถจ
http://data.10jqka.com.cn/rank/lxsz/
:return: ่ฟ็ปญไธๆถจ
:rtype: pandas.DataFrame
"""
js_code = py_mini_racer.MiniRacer()
js_content = _get_file_content_ths("ths.js")
js_code.eval(js_content)
v_code = js_code.call("v")
headers = {
"User-Agent": "Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/89.0.4389.90 Safari/537.36",
"Cookie": f"v={v_code}",
}
url = f"http://data.10jqka.com.cn/rank/lxsz/field/lxts/order/desc/page/1/ajax/1/free/1/"
r = requests.get(url, headers=headers)
soup = BeautifulSoup(r.text, "lxml")
try:
total_page = soup.find("span", attrs={"class": "page_info"}).text.split("/")[1]
except AttributeError as e:
total_page = 1
big_df = pd.DataFrame()
for page in tqdm(range(1, int(total_page) + 1), leave=False):
v_code = js_code.call("v")
headers = {
"User-Agent": "Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/89.0.4389.90 Safari/537.36",
"Cookie": f"v={v_code}",
}
url = f"http://data.10jqka.com.cn/rank/lxsz/field/lxts/order/desc/page/{page}/ajax/1/free/1/"
r = requests.get(url, headers=headers)
temp_df = | pd.read_html(r.text, converters={"่ก็ฅจไปฃ็ ": str}) | pandas.read_html |
from unittest import TestCase
from nose_parameterized import parameterized
import os
import gzip
import pandas as pd
from pandas import read_csv
from pyfolio.utils import to_utc
from pandas.util.testing import assert_frame_equal, assert_series_equal
from pyfolio.risk import (compute_style_factor_exposures,
compute_sector_exposures,
compute_cap_exposures,
compute_volume_exposures)
class RiskTestCase(TestCase):
__location__ = os.path.realpath(
os.path.join(os.getcwd(), os.path.dirname(__file__)))
test_pos = to_utc(read_csv(
gzip.open(__location__ + '/test_data/test_pos.csv.gz'),
index_col=0, parse_dates=True))
test_pos.columns = [351, 1419, 1787, 25317, 3321, 3951, 4922, 'cash']
test_txn = to_utc(read_csv(
gzip.open(
__location__ + '/test_data/test_txn.csv.gz'),
index_col=0, parse_dates=True))
test_sectors = to_utc(read_csv(
__location__ + '/test_data/test_sectors.csv',
index_col=0, parse_dates=True))
expected_sectors_longed = to_utc(read_csv(
__location__ + '/test_data/expected_sectors_longed.csv',
index_col=0, parse_dates=True))
expected_sectors_shorted = to_utc(read_csv(
__location__ + '/test_data/expected_sectors_shorted.csv',
index_col=0, parse_dates=True))
expected_sectors_grossed = to_utc(read_csv(
__location__ + '/test_data/expected_sectors_grossed.csv',
index_col=0, parse_dates=True))
test_caps = to_utc(read_csv(
__location__ + '/test_data/test_caps.csv',
index_col=0, parse_dates=True))
expected_caps_longed = to_utc(read_csv(
__location__ + '/test_data/expected_caps_longed.csv',
index_col=0, parse_dates=True))
expected_caps_shorted = to_utc(read_csv(
__location__ + '/test_data/expected_caps_shorted.csv',
index_col=0, parse_dates=True))
expected_caps_grossed = to_utc(read_csv(
__location__ + '/test_data/expected_caps_grossed.csv',
index_col=0, parse_dates=True))
expected_caps_netted = to_utc(read_csv(
__location__ + '/test_data/expected_caps_netted.csv',
index_col=0, parse_dates=True))
test_shares_held = to_utc(read_csv(
__location__ + '/test_data/test_shares_held.csv',
index_col=0, parse_dates=True))
test_volumes = to_utc(read_csv(
__location__ + '/test_data/test_volumes.csv',
index_col=0, parse_dates=True))
expected_volumes = to_utc(read_csv(
__location__ + '/test_data/expected_volumes.csv',
index_col=0, parse_dates=True))
test_dict = {}
styles = ['LT_MOMENTUM', 'LMCAP', 'VLTY', 'MACDSignal']
for style in styles:
df = to_utc(read_csv(
__location__ + '/test_data/test_{}.csv'.format(style),
index_col=0, parse_dates=True))
test_dict.update({style: df})
test_styles = | pd.Panel() | pandas.Panel |
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Sat Nov 17 17:32:41 2018
@author: brettwang
Daily Open + Volume plot for one cryptocurrency during specific date range
dependency:
beautifulsoup
pandas
"""
from bs4 import BeautifulSoup
import requests
import pandas as pd
#import seaborn as sns
import matplotlib.pyplot as plt
import sys
def getHistoricalData(coin,t_start,t_end):
#t_start = '20170101'
#t_end = '20171231'
#coin = 'waves'
t_start = str(t_start)
t_end = str(t_end)
url = "https://coinmarketcap.com/currencies/"+coin+"/historical-data/?start="+t_start+"&end="+t_end
content = requests.get(url).content
soup = BeautifulSoup(content,'html.parser')
table = soup.find('table', {'class': 'table'})
data = [[td.text.strip() for td in tr.findChildren('td')]
for tr in table.findChildren('tr')]
df = | pd.DataFrame(data) | pandas.DataFrame |
# -*- coding: utf-8 -*-
"""
Created on Sun Oct 6 10:10:03 2019
@author: <NAME>
"""
import numpy as np
import pandas as pd
import glob as glob
from tg_set_globalplotting import tg_set_globalplotting
from tg_simulate_behaviour import tg_simulate_behaviour
from tg_suboptimal_goal_choice import tg_suboptimal_goal_choice
from tg_suboptimal_goal_choice_sim import tg_suboptimal_goal_choice_sim
from tg_performance_sim import tg_performance_sim
tg_set_globalplotting(style='frontiers')
dat = | pd.read_csv('../Results/preprocessed_results.csv') | pandas.read_csv |
import os
import datajoint as dj
import numpy as np
import pathlib
from datetime import datetime
import pandas as pd
import uuid
import matplotlib as mpl
import matplotlib.pyplot as plt
from matplotlib.gridspec import GridSpec
from mpl_toolkits.mplot3d import Axes3D
import seaborn as sns
import io
from PIL import Image
import itertools
from pipeline import experiment, ephys, psth, tracking, lab, histology, ccf, foraging_analysis
from pipeline.plot import behavior_plot, unit_characteristic_plot, unit_psth, histology_plot, PhotostimError, foraging_plot
from pipeline import get_schema_name
from pipeline.plot.util import _plot_with_sem, _jointplot_w_hue
from pipeline.util import _get_trial_event_times
import warnings
warnings.filterwarnings('ignore')
schema = dj.schema(get_schema_name('report'))
os.environ['DJ_SUPPORT_FILEPATH_MANAGEMENT'] = "TRUE"
DEFAULT_REPORT_STORE = {
"protocol": "s3",
"endpoint": "s3.amazonaws.com",
"bucket": "map-report",
"location": "report/v2",
"stage": "./data/report_stage",
"access_key": "",
"secret_key": ""
}
if 'stores' not in dj.config:
dj.config['stores'] = {}
if 'report_store' not in dj.config['stores']:
dj.config['stores']['report_store'] = DEFAULT_REPORT_STORE
report_cfg = dj.config['stores']['report_store']
if report_cfg['protocol'] == 's3':
store_location = (pathlib.Path(report_cfg['bucket'])
/ pathlib.Path(report_cfg['location']))
store_location = 'S3: ' + str(store_location)
else:
store_location = pathlib.Path(report_cfg['location'])
store_stage = pathlib.Path(report_cfg['stage'])
mpl.rcParams['font.size'] = 16
# ============================= SESSION LEVEL ====================================
@schema
class SessionLevelReport(dj.Computed):
definition = """
-> experiment.Session
---
behavior_performance: filepath@report_store
"""
key_source = experiment.Session & experiment.BehaviorTrial & experiment.PhotostimBrainRegion
def make(self, key):
water_res_num, sess_date = get_wr_sessdate(key)
sess_dir = store_stage / water_res_num / sess_date
sess_dir.mkdir(parents=True, exist_ok=True)
# ---- behavior_performance ----
# photostim
photostims = (experiment.Photostim * experiment.PhotostimBrainRegion & key).fetch(as_dict=True,
order_by='stim_brain_area')
fig1, axs = plt.subplots(int(1 + np.ceil(len(photostims) / 3)), 3, figsize=(16, 16))
fig1.subplots_adjust(wspace=0.5)
[a.axis('off') for a in axs.flatten()]
gs = axs.flatten()[0].get_gridspec()
[a.remove() for a in axs.flatten()[:2]]
ax1 = fig1.add_subplot(gs[0, :])
# the plot part
behavior_plot.plot_correct_proportion(key, axs=ax1)
ax1.axis('on')
for ax, stim_key in zip(axs.flatten()[3:], photostims):
stim_loc = ' '.join([stim_key['stim_laterality'], stim_key['stim_brain_area']]).upper()
try:
behavior_plot.plot_photostim_effect(key, stim_key, axs=ax, title=stim_loc)
except ValueError:
ax.remove()
ax.axis('on')
# ---- Save fig and insert ----
fn_prefix = f'{water_res_num}_{sess_date}_'
fig_dict = save_figs((fig1,), ('behavior_performance',), sess_dir, fn_prefix)
plt.close('all')
self.insert1({**key, **fig_dict})
@schema
class SessionLevelCDReport(dj.Computed):
definition = """
-> experiment.Session
---
cd_probe_count: int
coding_direction: filepath@report_store
"""
@property
def key_source(self):
# Only process Session with UnitSelectivity computation fully completed
# - only on probe insertions with RecordableBrainRegion
ks = experiment.Session.aggr(ephys.ProbeInsertion, probe_count='count(*)')
ks = ks - (ephys.ProbeInsertion - ephys.ProbeInsertion.RecordableBrainRegion)
unit = ks.aggr(ephys.Unit & 'unit_quality != "all"', unit_count='count(*)')
sel_unit = ks.aggr(psth.UnitSelectivity, sel_unit_count='count(*)')
return unit * sel_unit & 'unit_count = sel_unit_count'
def make(self, key):
water_res_num, sess_date = get_wr_sessdate(key)
sess_dir = store_stage / water_res_num / sess_date
sess_dir.mkdir(parents=True, exist_ok=True)
# ---- Setup ----
time_period = (-0.4, 0)
probe_keys = (ephys.ProbeInsertion & key).fetch('KEY', order_by='insertion_number')
fig1, axs = plt.subplots(len(probe_keys), len(probe_keys), figsize=(16, 16))
if len(probe_keys) > 1:
[a.axis('off') for a in axs.flatten()]
# ---- Plot Coding Direction per probe ----
probe_proj = {}
for pid, probe in enumerate(probe_keys):
units = ephys.Unit & probe
label = (ephys.ProbeInsertion & probe).aggr(ephys.ProbeInsertion.RecordableBrainRegion.proj(
brain_region='CONCAT(hemisphere, " ", brain_area)'),
brain_regions='GROUP_CONCAT(brain_region SEPARATOR", ")').fetch1('brain_regions')
label = '({}) {}'.format(probe['insertion_number'], label)
_, period_starts = _get_trial_event_times(['sample', 'delay', 'go'], units, 'good_noearlylick_hit')
# ---- compute CD projected PSTH ----
_, proj_contra_trial, proj_ipsi_trial, time_stamps, hemi = psth.compute_CD_projected_psth(
units.fetch('KEY'), time_period=time_period)
# ---- save projection results ----
probe_proj[pid] = (proj_contra_trial, proj_ipsi_trial, time_stamps, label, hemi)
# ---- generate fig with CD plot for this probe ----
fig, ax = plt.subplots(1, 1, figsize=(6, 6))
_plot_with_sem(proj_contra_trial, time_stamps, ax=ax, c='b')
_plot_with_sem(proj_ipsi_trial, time_stamps, ax=ax, c='r')
# cosmetic
for x in period_starts:
ax.axvline(x=x, linestyle = '--', color = 'k')
ax.spines['right'].set_visible(False)
ax.spines['top'].set_visible(False)
ax.set_ylabel('CD projection (a.u.)')
ax.set_xlabel('Time (s)')
ax.set_title(label)
fig.tight_layout()
# ---- plot this fig into the main figure ----
buf = io.BytesIO()
fig.savefig(buf, format='png')
buf.seek(0)
axs[pid, pid].imshow(Image.open(buf))
buf.close()
plt.close(fig)
# ---- Plot probe-pair correlation ----
for p1, p2 in itertools.combinations(probe_proj.keys(), r=2):
proj_contra_trial_g1, proj_ipsi_trial_g1, time_stamps, label_g1, p1_hemi = probe_proj[p1]
proj_contra_trial_g2, proj_ipsi_trial_g2, time_stamps, label_g2, p2_hemi = probe_proj[p2]
labels = [label_g1, label_g2]
# plot trial CD-endpoint correlation
p_start, p_end = time_period
contra_cdend_1 = proj_contra_trial_g1[:, np.logical_and(
time_stamps >= p_start, time_stamps < p_end)].mean(axis=1)
ipsi_cdend_1 = proj_ipsi_trial_g1[:, np.logical_and(
time_stamps >= p_start, time_stamps < p_end)].mean(axis=1)
if p1_hemi == p2_hemi:
contra_cdend_2 = proj_contra_trial_g2[:, np.logical_and(
time_stamps >= p_start, time_stamps < p_end)].mean(axis=1)
ipsi_cdend_2 = proj_ipsi_trial_g2[:, np.logical_and(
time_stamps >= p_start, time_stamps < p_end)].mean(axis=1)
else:
contra_cdend_2 = proj_ipsi_trial_g2[:, np.logical_and(
time_stamps >= p_start, time_stamps < p_end)].mean(axis=1)
ipsi_cdend_2 = proj_contra_trial_g2[:, np.logical_and(
time_stamps >= p_start, time_stamps < p_end)].mean(axis=1)
c_df = pd.DataFrame([contra_cdend_1, contra_cdend_2]).T
c_df.columns = labels
c_df['trial-type'] = 'contra'
i_df = | pd.DataFrame([ipsi_cdend_1, ipsi_cdend_2]) | pandas.DataFrame |
# Copyright 2021 Research Institute of Systems Planning, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from abc import ABCMeta, abstractmethod
from typing import Tuple, Optional, List
import numpy as np
import pandas as pd
from trace_analysis.record import RecordsInterface
class LatencyBase(metaclass=ABCMeta):
@abstractmethod
def to_records(self) -> RecordsInterface:
pass
def to_dataframe(
self, remove_dropped=False, *, column_names: Optional[List[str]] = None
) -> pd.DataFrame:
records = self.to_records()
df = records.to_dataframe()
if remove_dropped:
df.dropna(inplace=True)
if column_names is not None:
column_names_set = set(column_names)
df_columns_set = set(df.columns)
has_columns = column_names_set & df_columns_set == column_names_set
if has_columns:
return df[column_names]
else:
return | pd.DataFrame(columns=column_names) | pandas.DataFrame |
import os
import glob
import psycopg2
import pandas as pd
import numpy as np
from sql_queries import *
from typing import Union
def _type_converter(data):
"""This is a simple utility method we use for type conversion
Args:
data (Union[np.float64, np.float32, np.int64), np.int32, object)]): Data we are going to convert its type
Returns:
Union[int, float, object]: data converted to Python type
"""
if any([isinstance(data, np.float64), isinstance(data, np.float32)]):
return float(data)
if any([isinstance(data, np.int64), isinstance(data, np.int32)]):
return int(data)
return data
def process_song_file(cur, filepath):
"""This method processes a single song file.
Args:
cur (psycopg2.cursor): an instance of Postgres cursor class.
filepath (str): full path of the song file in JSON format.
"""
# open song file
df = pd.read_json(filepath, lines=True)
# insert song record
song_data = df[["song_id", "title", "artist_id", "year", "duration"]].loc[0].values
song_data = [_type_converter(x) for x in song_data]
cur.execute(song_table_insert, song_data)
# insert artist record
artist_data = (
df[
[
"artist_id",
"artist_name",
"artist_location",
"artist_latitude",
"artist_longitude",
]
]
.loc[0]
.values
)
artist_data = [_type_converter(x) for x in artist_data]
cur.execute(artist_table_insert, artist_data)
def process_log_file(cur, filepath):
"""This method processes a single log file.
Args:
cur (psycopg2.cursor): an instance of Postgres cursor class.
filepath (str): full path of the log file in JSON format.
"""
# open log file
df = | pd.read_json(filepath, lines=True) | pandas.read_json |
# ---
# jupyter:
# jupytext:
# formats: ipynb,py:light
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.5.0
# kernelspec:
# display_name: Python [conda env:PROJ_irox_oer] *
# language: python
# name: conda-env-PROJ_irox_oer-py
# ---
# # Compute formal oxidation state from Kirsten's CatKit code
# ---
# ### Import Modules
# +
import os
print(os.getcwd())
import sys
import time; ti = time.time()
import copy
import shutil
import numpy as np
import pandas as pd
pd.set_option("display.max_columns", None)
# pd.set_option('display.max_rows', None)
# pd.options.display.max_colwidth = 100
# #########################################################
from methods import (
get_df_jobs_anal,
get_df_atoms_sorted_ind,
get_df_active_sites,
create_name_str_from_tup,
get_df_atoms_sorted_ind,
get_df_jobs_paths,
get_df_features,
get_df_coord_wrap,
get_df_features_targets,
get_df_slabs_to_run,
)
# from methods_features import original_slab_is_good
# #########################################################
from local_methods import set_formal_oxidation_state, get_connectivity
from local_methods import get_catkit_form_oxid_state_wrap
from local_methods import get_effective_ox_state__test
# -
from methods import isnotebook
isnotebook_i = isnotebook()
if isnotebook_i:
from tqdm.notebook import tqdm
verbose = True
else:
from tqdm import tqdm
verbose = False
verbose = False
# # Read Data
# +
df_jobs_anal = get_df_jobs_anal()
df_jobs_anal_i = df_jobs_anal
df_atoms_sorted_ind = get_df_atoms_sorted_ind()
df_active_sites = get_df_active_sites()
df_slabs_to_run = get_df_slabs_to_run()
df_slabs_to_run = df_slabs_to_run.set_index(["compenv", "slab_id", "att_num"])
df_features_targets = get_df_features_targets()
df_features = get_df_features()
df_atoms_sorted_ind = get_df_atoms_sorted_ind()
df_jobs_paths = get_df_jobs_paths()
# + active=""
#
#
#
# +
df_jobs_anal_done = df_jobs_anal[df_jobs_anal.job_completely_done == True]
df_jobs_anal_i = df_jobs_anal_i[df_jobs_anal_i.job_completely_done == True]
# Selecting *O and *OH systems to process
df_index = df_jobs_anal_i.index.to_frame()
df_index_i = df_index[
df_index.ads.isin(["o", "oh", ])
]
df_jobs_anal_i = df_jobs_anal_i.loc[
df_index_i.index
]
# -
# # Further filtering df_jobs_anal
# +
# #########################################################
indices_to_run = []
# #########################################################
for name_i, row_i in df_jobs_anal_i.iterrows():
# #####################################################
run_row = True
if name_i in df_atoms_sorted_ind.index:
row_atoms_i = df_atoms_sorted_ind.loc[name_i]
# #####################################################
failed_to_sort_i = row_atoms_i.failed_to_sort
# #####################################################
if failed_to_sort_i:
run_row = False
else:
run_row = False
if run_row:
indices_to_run.append(name_i)
# #########################################################
df_jobs_anal_i = df_jobs_anal_i.loc[
indices_to_run
]
# +
df_ind = df_jobs_anal_i.index.to_frame()
df = df_ind
df = df[
# (df["compenv"] == compenv_i) &
# (df["slab_id"] == slab_id_i) &
(df["ads"] == "o") &
# (df["active_site"] == active_site_i) &
# (df["att_num"] == att_num_i) &
# (df[""] == ads_i) &
[True for i in range(len(df))]
]
df_jobs_anal_i = df_jobs_anal_i.loc[
df.index
]
df_jobs_anal_i
# +
# assert False
# +
slab_ids = [
"tofebave_45",
"titawupu_08",
"rudosavu_57",
"filetumi_93",
"ralutiwa_59",
"lilotuta_67",
"bikoradi_95",
"kakalito_08",
"wefakuko_75",
"filetumi_93",
"rudosavu_57",
"filetumi_93",
"titawupu_08",
"wefakuko_75",
"vinamepa_43",
"filetumi_93",
"wesaburu_95",
"rudosavu_57",
"dukavula_34",
"bikoradi_95",
"lilotuta_67",
"lilotuta_67",
"bikoradi_95",
"vinamepa_43",
"ramufalu_44",
"wefakuko_75",
"putarude_21",
"dukavula_34",
"vinamepa_43",
"putarude_21",
"wefakuko_75",
"vinamepa_43",
"fogopemi_28",
"vinamepa_43",
"tofebave_45",
"kakalito_08",
"lilotuta_67",
]
# slab_ids = [
# # "titawupu_08",
# "ralutiwa_59",
# ]
df_ind = df_jobs_anal_i.index.to_frame()
df_jobs_anal_i = df_jobs_anal_i.loc[
df_ind[df_ind.slab_id.isin(slab_ids)].index
]
# +
# ('slac', 'ralutiwa_59', 'o', 31.0, 1, False)
# +
# #########################################################
data_dict_list = []
# #########################################################
iterator = tqdm(df_jobs_anal_i.index, desc="1st loop")
for i_cnt, name_i in enumerate(iterator):
# print(name_i)
# #####################################################
data_dict_i = dict()
# #####################################################
row_i = df_jobs_anal_i.loc[name_i]
# #####################################################
compenv_i = name_i[0]
slab_id_i = name_i[1]
ads_i = name_i[2]
active_site_i = name_i[3]
att_num_i = name_i[4]
# #####################################################
job_id_max_i = row_i.job_id_max
# #####################################################
if verbose:
name_concat_i = "_".join([str(i) for i in list(name_i)])
print(40 * "=")
print(name_concat_i)
print(name_i)
# #####################################################
name_dict_i = dict(zip(
list(df_jobs_anal_i.index.names), list(name_i)))
# #####################################################
row_atoms_i = df_atoms_sorted_ind.loc[name_i]
# #####################################################
atoms_sorted_good_i = row_atoms_i.atoms_sorted_good
# #####################################################
atoms = atoms_sorted_good_i
# #####################################################
row_sites_i = df_active_sites.loc[slab_id_i]
# #####################################################
active_sites_unique_i = row_sites_i.active_sites_unique
# #####################################################
data_dict_i["job_id_max"] = job_id_max_i
if active_site_i != "NaN":
# read_orig_O_df_coord_i = False
active_site_j = active_site_i
# oxid_state_i = get_catkit_form_oxid_state_wrap()
data_out_dict_i = get_catkit_form_oxid_state_wrap(
atoms=atoms,
name=name_i,
active_site=active_site_j,
)
oxid_state_i = data_out_dict_i["form_oxid"]
atoms_out_i = data_out_dict_i["atoms_out"]
neigh_dict_i = data_out_dict_i["neigh_dict"]
# atoms_out_i.write("tmp.traj")
# #################################################
data_dict_j = dict()
# #################################################
data_dict_j["from_oh"] = True
data_dict_j["form_oxid_state__catkit"] = oxid_state_i
data_dict_j["atoms_catkit"] = atoms_out_i
data_dict_j["neigh_dict"] = neigh_dict_i
# #################################################
data_dict_j.update(name_dict_i)
# data_dict_j.update(out_dict)
data_dict_j.update(data_dict_i)
# data_dict_j.update(data_out_dict_i)
# #################################################
data_dict_list.append(data_dict_j)
# #################################################
else:
for active_site_j in active_sites_unique_i:
if verbose:
print("active_site_j:", active_site_j)
# oxid_state_i = get_catkit_form_oxid_state_wrap(
data_out_dict_i = get_catkit_form_oxid_state_wrap(
atoms=atoms,
name=name_i,
active_site=active_site_j,
)
oxid_state_i = data_out_dict_i["form_oxid"]
atoms_out_i = data_out_dict_i["atoms_out"]
neigh_dict_i = data_out_dict_i["neigh_dict"]
# atoms_out_i.write("tmp.traj")
# #############################################
data_dict_j = dict()
# #############################################
data_dict_j["from_oh"] = False
data_dict_j["form_oxid_state__catkit"] = oxid_state_i
data_dict_j["active_site"] = active_site_j
data_dict_j["atoms_catkit"] = atoms_out_i
data_dict_j["neigh_dict"] = neigh_dict_i
# #############################################
name_dict_i_cpy = copy.deepcopy(name_dict_i)
name_dict_i_cpy.pop("active_site")
data_dict_j.update(name_dict_i_cpy)
# data_dict_j.update(out_dict)
data_dict_j.update(data_dict_i)
# data_dict_j.update(data_out_dict_i)
# #############################################
data_dict_list.append(data_dict_j)
# #############################################
# #########################################################
df_eff_ox = pd.DataFrame(data_dict_list)
df_eff_ox = df_eff_ox.set_index(["compenv", "slab_id", "ads", "active_site", "att_num", "from_oh", ])
# #########################################################
# +
shared_indices = df_features.index.intersection(
df_eff_ox.index
).unique()
data_dict_list = []
for index_i in shared_indices:
data_dict_i = dict()
# #####################################################
row_feat_i = df_features.loc[index_i]
# #####################################################
eff_oxid_state__mine = row_feat_i["features"]["eff_oxid_state"]
# #####################################################
# #####################################################
row_ox_i = df_eff_ox.loc[index_i]
# #####################################################
eff_oxid_state__catkit = row_ox_i["form_oxid_state__catkit"]
job_id_i = row_ox_i.job_id_max
atoms_catkit_i = row_ox_i.atoms_catkit
neigh_dict__catkit_i = row_ox_i["neigh_dict"]
# #####################################################
index_slabs_to_run = (index_i[0], index_i[1], index_i[4], )
if index_slabs_to_run in df_slabs_to_run.index:
row_slab_i = df_slabs_to_run.loc[
index_slabs_to_run
]
status_i = row_slab_i.status
else:
status_i = "NaN"
# row_slab_i = df_slabs_to_run.loc[
# # (name_i[0], name_i[1], name_i[4], )
# (index_i[0], index_i[1], index_i[4], )
# ]
# status_i = row_slab_i.status
if not np.isnan(eff_oxid_state__mine) and not np.isnan(eff_oxid_state__catkit):
isclose_i = np.isclose(
eff_oxid_state__mine,
eff_oxid_state__catkit,
atol=1e-05,
equal_nan=True,
)
if not isclose_i:
if True:
# if status_i == "ok":
print(
status_i,
" | ",
index_i,
": ",
np.round(eff_oxid_state__mine, 3),
" | ",
np.round(eff_oxid_state__catkit, 3),
sep="")
# #############################################
data_dict_i["status"] = status_i
data_dict_i["index"] = index_i
data_dict_i["compenv"] = index_i[0]
data_dict_i["slab_id"] = index_i[1]
data_dict_i["ads"] = index_i[2]
data_dict_i["active_site"] = index_i[3]
data_dict_i["att_num"] = index_i[4]
data_dict_i["from_oh"] = index_i[5]
data_dict_i["job_id"] = job_id_i
data_dict_i["atoms_catkit"] = atoms_catkit_i
data_dict_i["neigh_dict__catkit"] = neigh_dict__catkit_i
# #############################################
data_dict_list.append(data_dict_i)
# #############################################
# #########################################################
df_oxi_comp = | pd.DataFrame(data_dict_list) | pandas.DataFrame |
'''
Library for Google Sheets functions.
'''
import configparser
import os
import pickle
import logging
import re
import math
from string import ascii_uppercase
from typing import List
import pandas as pd
import numpy as np
from constants import rgx_age, rgx_sex, rgx_date, rgx_lives_in_wuhan, date_columns, column_to_type
from spreadsheet import GoogleSheet
def get_GoogleSheets(config: configparser.ConfigParser) -> List[GoogleSheet]:
'''
Loop through different sheets in config file, and init objects.
Args :
config (ConfigParser) : configuration
Returns :
values (list) : list of GoogleSheet objects.
'''
# Fetch all sections in config referring to sheets.
sheets = []
pattern = r'^SHEET\d*$'
sections = config.sections()
for s in sections:
if re.match(pattern, s):
id_ = config[s]['ID']
sid = config[s]['SID']
name = config[s]['NAME']
googlesheet = GoogleSheet(sid, name, id_,
config['SHEETS'].get("TOKEN"),
config['SHEETS'].get('CREDENTIALS'),
config['SHEETS'].get('IS_SERVICE_ACCOUNT'))
sheets.append(googlesheet)
return sheets
def values2dataframe(values: list) -> pd.DataFrame:
'''
Convert raw values as retrieved from read_values to a pandas dataframe.
Adds empty values so that all lists have the same length.
Args:
values (list) : list of lists with values from read_values
Returns:
data (pd.DataFrame): same data with stripped column names.
'''
columns = values[0]
for i, c in enumerate(columns):
# added when column name disappeared.
if c.strip() == '' and columns[i-1] == 'province':
columns[i] = 'country'
ncols = len(columns)
data = values[1:]
for d in data:
if len(d) < ncols:
extension = ['']*(ncols-len(d))
d.extend(extension)
data = | pd.DataFrame(data=data, columns=columns) | pandas.DataFrame |
from __future__ import print_function
from datetime import datetime, timedelta
import numpy as np
import pandas as pd
from pandas import (Series, Index, Int64Index, Timestamp, Period,
DatetimeIndex, PeriodIndex, TimedeltaIndex,
Timedelta, timedelta_range, date_range, Float64Index,
_np_version_under1p10)
import pandas.tslib as tslib
import pandas.tseries.period as period
import pandas.util.testing as tm
from pandas.tests.test_base import Ops
class TestDatetimeIndexOps(Ops):
tz = [None, 'UTC', 'Asia/Tokyo', 'US/Eastern', 'dateutil/Asia/Singapore',
'dateutil/US/Pacific']
def setUp(self):
super(TestDatetimeIndexOps, self).setUp()
mask = lambda x: (isinstance(x, DatetimeIndex) or
isinstance(x, PeriodIndex))
self.is_valid_objs = [o for o in self.objs if mask(o)]
self.not_valid_objs = [o for o in self.objs if not mask(o)]
def test_ops_properties(self):
self.check_ops_properties(
['year', 'month', 'day', 'hour', 'minute', 'second', 'weekofyear',
'week', 'dayofweek', 'dayofyear', 'quarter'])
self.check_ops_properties(['date', 'time', 'microsecond', 'nanosecond',
'is_month_start', 'is_month_end',
'is_quarter_start',
'is_quarter_end', 'is_year_start',
'is_year_end', 'weekday_name'],
lambda x: isinstance(x, DatetimeIndex))
def test_ops_properties_basic(self):
# sanity check that the behavior didn't change
# GH7206
for op in ['year', 'day', 'second', 'weekday']:
self.assertRaises(TypeError, lambda x: getattr(self.dt_series, op))
# attribute access should still work!
s = Series(dict(year=2000, month=1, day=10))
self.assertEqual(s.year, 2000)
self.assertEqual(s.month, 1)
self.assertEqual(s.day, 10)
self.assertRaises(AttributeError, lambda: s.weekday)
def test_asobject_tolist(self):
idx = pd.date_range(start='2013-01-01', periods=4, freq='M',
name='idx')
expected_list = [Timestamp('2013-01-31'),
Timestamp('2013-02-28'),
Timestamp('2013-03-31'),
Timestamp('2013-04-30')]
expected = pd.Index(expected_list, dtype=object, name='idx')
result = idx.asobject
self.assertTrue(isinstance(result, Index))
self.assertEqual(result.dtype, object)
self.assert_index_equal(result, expected)
self.assertEqual(result.name, expected.name)
self.assertEqual(idx.tolist(), expected_list)
idx = pd.date_range(start='2013-01-01', periods=4, freq='M',
name='idx', tz='Asia/Tokyo')
expected_list = [Timestamp('2013-01-31', tz='Asia/Tokyo'),
Timestamp('2013-02-28', tz='Asia/Tokyo'),
Timestamp('2013-03-31', tz='Asia/Tokyo'),
Timestamp('2013-04-30', tz='Asia/Tokyo')]
expected = pd.Index(expected_list, dtype=object, name='idx')
result = idx.asobject
self.assertTrue(isinstance(result, Index))
self.assertEqual(result.dtype, object)
self.assert_index_equal(result, expected)
self.assertEqual(result.name, expected.name)
self.assertEqual(idx.tolist(), expected_list)
idx = DatetimeIndex([datetime(2013, 1, 1), datetime(2013, 1, 2),
pd.NaT, datetime(2013, 1, 4)], name='idx')
expected_list = [Timestamp('2013-01-01'),
Timestamp('2013-01-02'), pd.NaT,
Timestamp('2013-01-04')]
expected = pd.Index(expected_list, dtype=object, name='idx')
result = idx.asobject
self.assertTrue(isinstance(result, Index))
self.assertEqual(result.dtype, object)
self.assert_index_equal(result, expected)
self.assertEqual(result.name, expected.name)
self.assertEqual(idx.tolist(), expected_list)
def test_minmax(self):
for tz in self.tz:
# monotonic
idx1 = pd.DatetimeIndex(['2011-01-01', '2011-01-02',
'2011-01-03'], tz=tz)
self.assertTrue(idx1.is_monotonic)
# non-monotonic
idx2 = pd.DatetimeIndex(['2011-01-01', pd.NaT, '2011-01-03',
'2011-01-02', pd.NaT], tz=tz)
self.assertFalse(idx2.is_monotonic)
for idx in [idx1, idx2]:
self.assertEqual(idx.min(), Timestamp('2011-01-01', tz=tz))
self.assertEqual(idx.max(), Timestamp('2011-01-03', tz=tz))
self.assertEqual(idx.argmin(), 0)
self.assertEqual(idx.argmax(), 2)
for op in ['min', 'max']:
# Return NaT
obj = DatetimeIndex([])
self.assertTrue(pd.isnull(getattr(obj, op)()))
obj = DatetimeIndex([pd.NaT])
self.assertTrue(pd.isnull(getattr(obj, op)()))
obj = DatetimeIndex([pd.NaT, pd.NaT, pd.NaT])
self.assertTrue(pd.isnull(getattr(obj, op)()))
def test_numpy_minmax(self):
dr = pd.date_range(start='2016-01-15', end='2016-01-20')
self.assertEqual(np.min(dr),
Timestamp('2016-01-15 00:00:00', freq='D'))
self.assertEqual(np.max(dr),
Timestamp('2016-01-20 00:00:00', freq='D'))
errmsg = "the 'out' parameter is not supported"
tm.assertRaisesRegexp(ValueError, errmsg, np.min, dr, out=0)
tm.assertRaisesRegexp(ValueError, errmsg, np.max, dr, out=0)
self.assertEqual(np.argmin(dr), 0)
self.assertEqual(np.argmax(dr), 5)
if not _np_version_under1p10:
errmsg = "the 'out' parameter is not supported"
tm.assertRaisesRegexp(ValueError, errmsg, np.argmin, dr, out=0)
tm.assertRaisesRegexp(ValueError, errmsg, np.argmax, dr, out=0)
def test_round(self):
for tz in self.tz:
rng = pd.date_range(start='2016-01-01', periods=5,
freq='30Min', tz=tz)
elt = rng[1]
expected_rng = DatetimeIndex([
Timestamp('2016-01-01 00:00:00', tz=tz, freq='30T'),
Timestamp('2016-01-01 00:00:00', tz=tz, freq='30T'),
Timestamp('2016-01-01 01:00:00', tz=tz, freq='30T'),
Timestamp('2016-01-01 02:00:00', tz=tz, freq='30T'),
Timestamp('2016-01-01 02:00:00', tz=tz, freq='30T'),
])
expected_elt = expected_rng[1]
tm.assert_index_equal(rng.round(freq='H'), expected_rng)
self.assertEqual(elt.round(freq='H'), expected_elt)
msg = pd.tseries.frequencies._INVALID_FREQ_ERROR
with tm.assertRaisesRegexp(ValueError, msg):
rng.round(freq='foo')
with tm.assertRaisesRegexp(ValueError, msg):
elt.round(freq='foo')
msg = "<MonthEnd> is a non-fixed frequency"
tm.assertRaisesRegexp(ValueError, msg, rng.round, freq='M')
tm.assertRaisesRegexp(ValueError, msg, elt.round, freq='M')
def test_repeat_range(self):
rng = date_range('1/1/2000', '1/1/2001')
result = rng.repeat(5)
self.assertIsNone(result.freq)
self.assertEqual(len(result), 5 * len(rng))
for tz in self.tz:
index = pd.date_range('2001-01-01', periods=2, freq='D', tz=tz)
exp = pd.DatetimeIndex(['2001-01-01', '2001-01-01',
'2001-01-02', '2001-01-02'], tz=tz)
for res in [index.repeat(2), np.repeat(index, 2)]:
tm.assert_index_equal(res, exp)
self.assertIsNone(res.freq)
index = pd.date_range('2001-01-01', periods=2, freq='2D', tz=tz)
exp = pd.DatetimeIndex(['2001-01-01', '2001-01-01',
'2001-01-03', '2001-01-03'], tz=tz)
for res in [index.repeat(2), np.repeat(index, 2)]:
tm.assert_index_equal(res, exp)
self.assertIsNone(res.freq)
index = pd.DatetimeIndex(['2001-01-01', 'NaT', '2003-01-01'],
tz=tz)
exp = pd.DatetimeIndex(['2001-01-01', '2001-01-01', '2001-01-01',
'NaT', 'NaT', 'NaT',
'2003-01-01', '2003-01-01', '2003-01-01'],
tz=tz)
for res in [index.repeat(3), np.repeat(index, 3)]:
tm.assert_index_equal(res, exp)
self.assertIsNone(res.freq)
def test_repeat(self):
reps = 2
msg = "the 'axis' parameter is not supported"
for tz in self.tz:
rng = pd.date_range(start='2016-01-01', periods=2,
freq='30Min', tz=tz)
expected_rng = DatetimeIndex([
Timestamp('2016-01-01 00:00:00', tz=tz, freq='30T'),
Timestamp('2016-01-01 00:00:00', tz=tz, freq='30T'),
Timestamp('2016-01-01 00:30:00', tz=tz, freq='30T'),
Timestamp('2016-01-01 00:30:00', tz=tz, freq='30T'),
])
res = rng.repeat(reps)
tm.assert_index_equal(res, expected_rng)
self.assertIsNone(res.freq)
tm.assert_index_equal(np.repeat(rng, reps), expected_rng)
tm.assertRaisesRegexp(ValueError, msg, np.repeat,
rng, reps, axis=1)
def test_representation(self):
idx = []
idx.append(DatetimeIndex([], freq='D'))
idx.append(DatetimeIndex(['2011-01-01'], freq='D'))
idx.append(DatetimeIndex(['2011-01-01', '2011-01-02'], freq='D'))
idx.append(DatetimeIndex(
['2011-01-01', '2011-01-02', '2011-01-03'], freq='D'))
idx.append(DatetimeIndex(
['2011-01-01 09:00', '2011-01-01 10:00', '2011-01-01 11:00'
], freq='H', tz='Asia/Tokyo'))
idx.append(DatetimeIndex(
['2011-01-01 09:00', '2011-01-01 10:00', pd.NaT], tz='US/Eastern'))
idx.append(DatetimeIndex(
['2011-01-01 09:00', '2011-01-01 10:00', pd.NaT], tz='UTC'))
exp = []
exp.append("""DatetimeIndex([], dtype='datetime64[ns]', freq='D')""")
exp.append("DatetimeIndex(['2011-01-01'], dtype='datetime64[ns]', "
"freq='D')")
exp.append("DatetimeIndex(['2011-01-01', '2011-01-02'], "
"dtype='datetime64[ns]', freq='D')")
exp.append("DatetimeIndex(['2011-01-01', '2011-01-02', '2011-01-03'], "
"dtype='datetime64[ns]', freq='D')")
exp.append("DatetimeIndex(['2011-01-01 09:00:00+09:00', "
"'2011-01-01 10:00:00+09:00', '2011-01-01 11:00:00+09:00']"
", dtype='datetime64[ns, Asia/Tokyo]', freq='H')")
exp.append("DatetimeIndex(['2011-01-01 09:00:00-05:00', "
"'2011-01-01 10:00:00-05:00', 'NaT'], "
"dtype='datetime64[ns, US/Eastern]', freq=None)")
exp.append("DatetimeIndex(['2011-01-01 09:00:00+00:00', "
"'2011-01-01 10:00:00+00:00', 'NaT'], "
"dtype='datetime64[ns, UTC]', freq=None)""")
with pd.option_context('display.width', 300):
for indx, expected in zip(idx, exp):
for func in ['__repr__', '__unicode__', '__str__']:
result = getattr(indx, func)()
self.assertEqual(result, expected)
def test_representation_to_series(self):
idx1 = DatetimeIndex([], freq='D')
idx2 = DatetimeIndex(['2011-01-01'], freq='D')
idx3 = DatetimeIndex(['2011-01-01', '2011-01-02'], freq='D')
idx4 = DatetimeIndex(
['2011-01-01', '2011-01-02', '2011-01-03'], freq='D')
idx5 = DatetimeIndex(['2011-01-01 09:00', '2011-01-01 10:00',
'2011-01-01 11:00'], freq='H', tz='Asia/Tokyo')
idx6 = DatetimeIndex(['2011-01-01 09:00', '2011-01-01 10:00', pd.NaT],
tz='US/Eastern')
idx7 = DatetimeIndex(['2011-01-01 09:00', '2011-01-02 10:15'])
exp1 = """Series([], dtype: datetime64[ns])"""
exp2 = """0 2011-01-01
dtype: datetime64[ns]"""
exp3 = """0 2011-01-01
1 2011-01-02
dtype: datetime64[ns]"""
exp4 = """0 2011-01-01
1 2011-01-02
2 2011-01-03
dtype: datetime64[ns]"""
exp5 = """0 2011-01-01 09:00:00+09:00
1 2011-01-01 10:00:00+09:00
2 2011-01-01 11:00:00+09:00
dtype: datetime64[ns, Asia/Tokyo]"""
exp6 = """0 2011-01-01 09:00:00-05:00
1 2011-01-01 10:00:00-05:00
2 NaT
dtype: datetime64[ns, US/Eastern]"""
exp7 = """0 2011-01-01 09:00:00
1 2011-01-02 10:15:00
dtype: datetime64[ns]"""
with pd.option_context('display.width', 300):
for idx, expected in zip([idx1, idx2, idx3, idx4,
idx5, idx6, idx7],
[exp1, exp2, exp3, exp4,
exp5, exp6, exp7]):
result = repr(Series(idx))
self.assertEqual(result, expected)
def test_summary(self):
# GH9116
idx1 = DatetimeIndex([], freq='D')
idx2 = DatetimeIndex(['2011-01-01'], freq='D')
idx3 = DatetimeIndex(['2011-01-01', '2011-01-02'], freq='D')
idx4 = DatetimeIndex(
['2011-01-01', '2011-01-02', '2011-01-03'], freq='D')
idx5 = DatetimeIndex(['2011-01-01 09:00', '2011-01-01 10:00',
'2011-01-01 11:00'],
freq='H', tz='Asia/Tokyo')
idx6 = DatetimeIndex(['2011-01-01 09:00', '2011-01-01 10:00', pd.NaT],
tz='US/Eastern')
exp1 = """DatetimeIndex: 0 entries
Freq: D"""
exp2 = """DatetimeIndex: 1 entries, 2011-01-01 to 2011-01-01
Freq: D"""
exp3 = """DatetimeIndex: 2 entries, 2011-01-01 to 2011-01-02
Freq: D"""
exp4 = """DatetimeIndex: 3 entries, 2011-01-01 to 2011-01-03
Freq: D"""
exp5 = ("DatetimeIndex: 3 entries, 2011-01-01 09:00:00+09:00 "
"to 2011-01-01 11:00:00+09:00\n"
"Freq: H")
exp6 = """DatetimeIndex: 3 entries, 2011-01-01 09:00:00-05:00 to NaT"""
for idx, expected in zip([idx1, idx2, idx3, idx4, idx5, idx6],
[exp1, exp2, exp3, exp4, exp5, exp6]):
result = idx.summary()
self.assertEqual(result, expected)
def test_resolution(self):
for freq, expected in zip(['A', 'Q', 'M', 'D', 'H', 'T',
'S', 'L', 'U'],
['day', 'day', 'day', 'day', 'hour',
'minute', 'second', 'millisecond',
'microsecond']):
for tz in self.tz:
idx = pd.date_range(start='2013-04-01', periods=30, freq=freq,
tz=tz)
self.assertEqual(idx.resolution, expected)
def test_union(self):
for tz in self.tz:
# union
rng1 = pd.date_range('1/1/2000', freq='D', periods=5, tz=tz)
other1 = pd.date_range('1/6/2000', freq='D', periods=5, tz=tz)
expected1 = pd.date_range('1/1/2000', freq='D', periods=10, tz=tz)
rng2 = pd.date_range('1/1/2000', freq='D', periods=5, tz=tz)
other2 = pd.date_range('1/4/2000', freq='D', periods=5, tz=tz)
expected2 = pd.date_range('1/1/2000', freq='D', periods=8, tz=tz)
rng3 = pd.date_range('1/1/2000', freq='D', periods=5, tz=tz)
other3 = pd.DatetimeIndex([], tz=tz)
expected3 = pd.date_range('1/1/2000', freq='D', periods=5, tz=tz)
for rng, other, expected in [(rng1, other1, expected1),
(rng2, other2, expected2),
(rng3, other3, expected3)]:
result_union = rng.union(other)
tm.assert_index_equal(result_union, expected)
def test_add_iadd(self):
for tz in self.tz:
# offset
offsets = [pd.offsets.Hour(2), timedelta(hours=2),
np.timedelta64(2, 'h'), Timedelta(hours=2)]
for delta in offsets:
rng = pd.date_range('2000-01-01', '2000-02-01', tz=tz)
result = rng + delta
expected = pd.date_range('2000-01-01 02:00',
'2000-02-01 02:00', tz=tz)
tm.assert_index_equal(result, expected)
rng += delta
tm.assert_index_equal(rng, expected)
# int
rng = pd.date_range('2000-01-01 09:00', freq='H', periods=10,
tz=tz)
result = rng + 1
expected = pd.date_range('2000-01-01 10:00', freq='H', periods=10,
tz=tz)
tm.assert_index_equal(result, expected)
rng += 1
tm.assert_index_equal(rng, expected)
idx = DatetimeIndex(['2011-01-01', '2011-01-02'])
msg = "cannot add a datelike to a DatetimeIndex"
with tm.assertRaisesRegexp(TypeError, msg):
idx + Timestamp('2011-01-01')
with tm.assertRaisesRegexp(TypeError, msg):
Timestamp('2011-01-01') + idx
def test_add_dti_dti(self):
# previously performed setop (deprecated in 0.16.0), now raises
# TypeError (GH14164)
dti = date_range('20130101', periods=3)
dti_tz = date_range('20130101', periods=3).tz_localize('US/Eastern')
with tm.assertRaises(TypeError):
dti + dti
with tm.assertRaises(TypeError):
dti_tz + dti_tz
with tm.assertRaises(TypeError):
dti_tz + dti
with tm.assertRaises(TypeError):
dti + dti_tz
def test_difference(self):
for tz in self.tz:
# diff
rng1 = pd.date_range('1/1/2000', freq='D', periods=5, tz=tz)
other1 = pd.date_range('1/6/2000', freq='D', periods=5, tz=tz)
expected1 = pd.date_range('1/1/2000', freq='D', periods=5, tz=tz)
rng2 = pd.date_range('1/1/2000', freq='D', periods=5, tz=tz)
other2 = pd.date_range('1/4/2000', freq='D', periods=5, tz=tz)
expected2 = pd.date_range('1/1/2000', freq='D', periods=3, tz=tz)
rng3 = pd.date_range('1/1/2000', freq='D', periods=5, tz=tz)
other3 = pd.DatetimeIndex([], tz=tz)
expected3 = pd.date_range('1/1/2000', freq='D', periods=5, tz=tz)
for rng, other, expected in [(rng1, other1, expected1),
(rng2, other2, expected2),
(rng3, other3, expected3)]:
result_diff = rng.difference(other)
tm.assert_index_equal(result_diff, expected)
def test_sub_isub(self):
for tz in self.tz:
# offset
offsets = [pd.offsets.Hour(2), timedelta(hours=2),
np.timedelta64(2, 'h'), Timedelta(hours=2)]
for delta in offsets:
rng = pd.date_range('2000-01-01', '2000-02-01', tz=tz)
expected = pd.date_range('1999-12-31 22:00',
'2000-01-31 22:00', tz=tz)
result = rng - delta
tm.assert_index_equal(result, expected)
rng -= delta
tm.assert_index_equal(rng, expected)
# int
rng = pd.date_range('2000-01-01 09:00', freq='H', periods=10,
tz=tz)
result = rng - 1
expected = pd.date_range('2000-01-01 08:00', freq='H', periods=10,
tz=tz)
tm.assert_index_equal(result, expected)
rng -= 1
tm.assert_index_equal(rng, expected)
def test_sub_dti_dti(self):
# previously performed setop (deprecated in 0.16.0), now changed to
# return subtraction -> TimeDeltaIndex (GH ...)
dti = date_range('20130101', periods=3)
dti_tz = date_range('20130101', periods=3).tz_localize('US/Eastern')
dti_tz2 = date_range('20130101', periods=3).tz_localize('UTC')
expected = TimedeltaIndex([0, 0, 0])
result = dti - dti
tm.assert_index_equal(result, expected)
result = dti_tz - dti_tz
tm.assert_index_equal(result, expected)
with tm.assertRaises(TypeError):
dti_tz - dti
with tm.assertRaises(TypeError):
dti - dti_tz
with tm.assertRaises(TypeError):
dti_tz - dti_tz2
# isub
dti -= dti
tm.assert_index_equal(dti, expected)
# different length raises ValueError
dti1 = date_range('20130101', periods=3)
dti2 = date_range('20130101', periods=4)
with tm.assertRaises(ValueError):
dti1 - dti2
# NaN propagation
dti1 = DatetimeIndex(['2012-01-01', np.nan, '2012-01-03'])
dti2 = DatetimeIndex(['2012-01-02', '2012-01-03', np.nan])
expected = TimedeltaIndex(['1 days', np.nan, np.nan])
result = dti2 - dti1
tm.assert_index_equal(result, expected)
def test_sub_period(self):
# GH 13078
# not supported, check TypeError
p = pd.Period('2011-01-01', freq='D')
for freq in [None, 'D']:
idx = pd.DatetimeIndex(['2011-01-01', '2011-01-02'], freq=freq)
with tm.assertRaises(TypeError):
idx - p
with tm.assertRaises(TypeError):
p - idx
def test_comp_nat(self):
left = pd.DatetimeIndex([pd.Timestamp('2011-01-01'), pd.NaT,
pd.Timestamp('2011-01-03')])
right = pd.DatetimeIndex([pd.NaT, pd.NaT, pd.Timestamp('2011-01-03')])
for l, r in [(left, right), (left.asobject, right.asobject)]:
result = l == r
expected = np.array([False, False, True])
tm.assert_numpy_array_equal(result, expected)
result = l != r
expected = np.array([True, True, False])
tm.assert_numpy_array_equal(result, expected)
expected = np.array([False, False, False])
tm.assert_numpy_array_equal(l == pd.NaT, expected)
tm.assert_numpy_array_equal(pd.NaT == r, expected)
expected = np.array([True, True, True])
tm.assert_numpy_array_equal(l != pd.NaT, expected)
tm.assert_numpy_array_equal(pd.NaT != l, expected)
expected = np.array([False, False, False])
tm.assert_numpy_array_equal(l < pd.NaT, expected)
tm.assert_numpy_array_equal(pd.NaT > l, expected)
def test_value_counts_unique(self):
# GH 7735
for tz in self.tz:
idx = pd.date_range('2011-01-01 09:00', freq='H', periods=10)
# create repeated values, 'n'th element is repeated by n+1 times
idx = DatetimeIndex(np.repeat(idx.values, range(1, len(idx) + 1)),
tz=tz)
exp_idx = pd.date_range('2011-01-01 18:00', freq='-1H', periods=10,
tz=tz)
expected = Series(range(10, 0, -1), index=exp_idx, dtype='int64')
for obj in [idx, Series(idx)]:
tm.assert_series_equal(obj.value_counts(), expected)
expected = pd.date_range('2011-01-01 09:00', freq='H', periods=10,
tz=tz)
tm.assert_index_equal(idx.unique(), expected)
idx = DatetimeIndex(['2013-01-01 09:00', '2013-01-01 09:00',
'2013-01-01 09:00', '2013-01-01 08:00',
'2013-01-01 08:00', pd.NaT], tz=tz)
exp_idx = DatetimeIndex(['2013-01-01 09:00', '2013-01-01 08:00'],
tz=tz)
expected = Series([3, 2], index=exp_idx)
for obj in [idx, Series(idx)]:
tm.assert_series_equal(obj.value_counts(), expected)
exp_idx = DatetimeIndex(['2013-01-01 09:00', '2013-01-01 08:00',
pd.NaT], tz=tz)
expected = Series([3, 2, 1], index=exp_idx)
for obj in [idx, Series(idx)]:
tm.assert_series_equal(obj.value_counts(dropna=False),
expected)
tm.assert_index_equal(idx.unique(), exp_idx)
def test_nonunique_contains(self):
# GH 9512
for idx in map(DatetimeIndex,
([0, 1, 0], [0, 0, -1], [0, -1, -1],
['2015', '2015', '2016'], ['2015', '2015', '2014'])):
tm.assertIn(idx[0], idx)
def test_order(self):
# with freq
idx1 = DatetimeIndex(['2011-01-01', '2011-01-02',
'2011-01-03'], freq='D', name='idx')
idx2 = DatetimeIndex(['2011-01-01 09:00', '2011-01-01 10:00',
'2011-01-01 11:00'], freq='H',
tz='Asia/Tokyo', name='tzidx')
for idx in [idx1, idx2]:
ordered = idx.sort_values()
self.assert_index_equal(ordered, idx)
self.assertEqual(ordered.freq, idx.freq)
ordered = idx.sort_values(ascending=False)
expected = idx[::-1]
self.assert_index_equal(ordered, expected)
self.assertEqual(ordered.freq, expected.freq)
self.assertEqual(ordered.freq.n, -1)
ordered, indexer = idx.sort_values(return_indexer=True)
self.assert_index_equal(ordered, idx)
self.assert_numpy_array_equal(indexer,
np.array([0, 1, 2]),
check_dtype=False)
self.assertEqual(ordered.freq, idx.freq)
ordered, indexer = idx.sort_values(return_indexer=True,
ascending=False)
expected = idx[::-1]
self.assert_index_equal(ordered, expected)
self.assert_numpy_array_equal(indexer,
np.array([2, 1, 0]),
check_dtype=False)
self.assertEqual(ordered.freq, expected.freq)
self.assertEqual(ordered.freq.n, -1)
# without freq
for tz in self.tz:
idx1 = DatetimeIndex(['2011-01-01', '2011-01-03', '2011-01-05',
'2011-01-02', '2011-01-01'],
tz=tz, name='idx1')
exp1 = DatetimeIndex(['2011-01-01', '2011-01-01', '2011-01-02',
'2011-01-03', '2011-01-05'],
tz=tz, name='idx1')
idx2 = DatetimeIndex(['2011-01-01', '2011-01-03', '2011-01-05',
'2011-01-02', '2011-01-01'],
tz=tz, name='idx2')
exp2 = DatetimeIndex(['2011-01-01', '2011-01-01', '2011-01-02',
'2011-01-03', '2011-01-05'],
tz=tz, name='idx2')
idx3 = DatetimeIndex([pd.NaT, '2011-01-03', '2011-01-05',
'2011-01-02', pd.NaT], tz=tz, name='idx3')
exp3 = DatetimeIndex([pd.NaT, pd.NaT, '2011-01-02', '2011-01-03',
'2011-01-05'], tz=tz, name='idx3')
for idx, expected in [(idx1, exp1), (idx2, exp2), (idx3, exp3)]:
ordered = idx.sort_values()
self.assert_index_equal(ordered, expected)
self.assertIsNone(ordered.freq)
ordered = idx.sort_values(ascending=False)
self.assert_index_equal(ordered, expected[::-1])
self.assertIsNone(ordered.freq)
ordered, indexer = idx.sort_values(return_indexer=True)
self.assert_index_equal(ordered, expected)
exp = np.array([0, 4, 3, 1, 2])
self.assert_numpy_array_equal(indexer, exp, check_dtype=False)
self.assertIsNone(ordered.freq)
ordered, indexer = idx.sort_values(return_indexer=True,
ascending=False)
self.assert_index_equal(ordered, expected[::-1])
exp = np.array([2, 1, 3, 4, 0])
self.assert_numpy_array_equal(indexer, exp, check_dtype=False)
self.assertIsNone(ordered.freq)
def test_getitem(self):
idx1 = pd.date_range('2011-01-01', '2011-01-31', freq='D', name='idx')
idx2 = pd.date_range('2011-01-01', '2011-01-31', freq='D',
tz='Asia/Tokyo', name='idx')
for idx in [idx1, idx2]:
result = idx[0]
self.assertEqual(result, Timestamp('2011-01-01', tz=idx.tz))
result = idx[0:5]
expected = pd.date_range('2011-01-01', '2011-01-05', freq='D',
tz=idx.tz, name='idx')
self.assert_index_equal(result, expected)
self.assertEqual(result.freq, expected.freq)
result = idx[0:10:2]
expected = pd.date_range('2011-01-01', '2011-01-09', freq='2D',
tz=idx.tz, name='idx')
self.assert_index_equal(result, expected)
self.assertEqual(result.freq, expected.freq)
result = idx[-20:-5:3]
expected = pd.date_range('2011-01-12', '2011-01-24', freq='3D',
tz=idx.tz, name='idx')
self.assert_index_equal(result, expected)
self.assertEqual(result.freq, expected.freq)
result = idx[4::-1]
expected = DatetimeIndex(['2011-01-05', '2011-01-04', '2011-01-03',
'2011-01-02', '2011-01-01'],
freq='-1D', tz=idx.tz, name='idx')
self.assert_index_equal(result, expected)
self.assertEqual(result.freq, expected.freq)
def test_drop_duplicates_metadata(self):
# GH 10115
idx = pd.date_range('2011-01-01', '2011-01-31', freq='D', name='idx')
result = idx.drop_duplicates()
self.assert_index_equal(idx, result)
self.assertEqual(idx.freq, result.freq)
idx_dup = idx.append(idx)
self.assertIsNone(idx_dup.freq) # freq is reset
result = idx_dup.drop_duplicates()
self.assert_index_equal(idx, result)
self.assertIsNone(result.freq)
def test_drop_duplicates(self):
# to check Index/Series compat
base = pd.date_range('2011-01-01', '2011-01-31', freq='D', name='idx')
idx = base.append(base[:5])
res = idx.drop_duplicates()
tm.assert_index_equal(res, base)
res = Series(idx).drop_duplicates()
tm.assert_series_equal(res, Series(base))
res = idx.drop_duplicates(keep='last')
exp = base[5:].append(base[:5])
tm.assert_index_equal(res, exp)
res = Series(idx).drop_duplicates(keep='last')
tm.assert_series_equal(res, Series(exp, index=np.arange(5, 36)))
res = idx.drop_duplicates(keep=False)
tm.assert_index_equal(res, base[5:])
res = Series(idx).drop_duplicates(keep=False)
tm.assert_series_equal(res, Series(base[5:], index=np.arange(5, 31)))
def test_take(self):
# GH 10295
idx1 = pd.date_range('2011-01-01', '2011-01-31', freq='D', name='idx')
idx2 = pd.date_range('2011-01-01', '2011-01-31', freq='D',
tz='Asia/Tokyo', name='idx')
for idx in [idx1, idx2]:
result = idx.take([0])
self.assertEqual(result, Timestamp('2011-01-01', tz=idx.tz))
result = idx.take([0, 1, 2])
expected = pd.date_range('2011-01-01', '2011-01-03', freq='D',
tz=idx.tz, name='idx')
self.assert_index_equal(result, expected)
self.assertEqual(result.freq, expected.freq)
result = idx.take([0, 2, 4])
expected = pd.date_range('2011-01-01', '2011-01-05', freq='2D',
tz=idx.tz, name='idx')
self.assert_index_equal(result, expected)
self.assertEqual(result.freq, expected.freq)
result = idx.take([7, 4, 1])
expected = pd.date_range('2011-01-08', '2011-01-02', freq='-3D',
tz=idx.tz, name='idx')
self.assert_index_equal(result, expected)
self.assertEqual(result.freq, expected.freq)
result = idx.take([3, 2, 5])
expected = DatetimeIndex(['2011-01-04', '2011-01-03',
'2011-01-06'],
freq=None, tz=idx.tz, name='idx')
self.assert_index_equal(result, expected)
self.assertIsNone(result.freq)
result = idx.take([-3, 2, 5])
expected = DatetimeIndex(['2011-01-29', '2011-01-03',
'2011-01-06'],
freq=None, tz=idx.tz, name='idx')
self.assert_index_equal(result, expected)
self.assertIsNone(result.freq)
def test_take_invalid_kwargs(self):
idx = pd.date_range('2011-01-01', '2011-01-31', freq='D', name='idx')
indices = [1, 6, 5, 9, 10, 13, 15, 3]
msg = r"take\(\) got an unexpected keyword argument 'foo'"
tm.assertRaisesRegexp(TypeError, msg, idx.take,
indices, foo=2)
msg = "the 'out' parameter is not supported"
tm.assertRaisesRegexp(ValueError, msg, idx.take,
indices, out=indices)
msg = "the 'mode' parameter is not supported"
tm.assertRaisesRegexp(ValueError, msg, idx.take,
indices, mode='clip')
def test_infer_freq(self):
# GH 11018
for freq in ['A', '2A', '-2A', 'Q', '-1Q', 'M', '-1M', 'D', '3D',
'-3D', 'W', '-1W', 'H', '2H', '-2H', 'T', '2T', 'S',
'-3S']:
idx = pd.date_range('2011-01-01 09:00:00', freq=freq, periods=10)
result = pd.DatetimeIndex(idx.asi8, freq='infer')
tm.assert_index_equal(idx, result)
self.assertEqual(result.freq, freq)
def test_nat_new(self):
idx = pd.date_range('2011-01-01', freq='D', periods=5, name='x')
result = idx._nat_new()
exp = pd.DatetimeIndex([pd.NaT] * 5, name='x')
tm.assert_index_equal(result, exp)
result = idx._nat_new(box=False)
exp = np.array([tslib.iNaT] * 5, dtype=np.int64)
tm.assert_numpy_array_equal(result, exp)
def test_shift(self):
# GH 9903
for tz in self.tz:
idx = pd.DatetimeIndex([], name='xxx', tz=tz)
tm.assert_index_equal(idx.shift(0, freq='H'), idx)
tm.assert_index_equal(idx.shift(3, freq='H'), idx)
idx = pd.DatetimeIndex(['2011-01-01 10:00', '2011-01-01 11:00'
'2011-01-01 12:00'], name='xxx', tz=tz)
tm.assert_index_equal(idx.shift(0, freq='H'), idx)
exp = pd.DatetimeIndex(['2011-01-01 13:00', '2011-01-01 14:00'
'2011-01-01 15:00'], name='xxx', tz=tz)
tm.assert_index_equal(idx.shift(3, freq='H'), exp)
exp = pd.DatetimeIndex(['2011-01-01 07:00', '2011-01-01 08:00'
'2011-01-01 09:00'], name='xxx', tz=tz)
tm.assert_index_equal(idx.shift(-3, freq='H'), exp)
def test_nat(self):
self.assertIs(pd.DatetimeIndex._na_value, pd.NaT)
self.assertIs(pd.DatetimeIndex([])._na_value, pd.NaT)
for tz in [None, 'US/Eastern', 'UTC']:
idx = pd.DatetimeIndex(['2011-01-01', '2011-01-02'], tz=tz)
self.assertTrue(idx._can_hold_na)
tm.assert_numpy_array_equal(idx._isnan, np.array([False, False]))
self.assertFalse(idx.hasnans)
tm.assert_numpy_array_equal(idx._nan_idxs,
np.array([], dtype=np.intp))
idx = pd.DatetimeIndex(['2011-01-01', 'NaT'], tz=tz)
self.assertTrue(idx._can_hold_na)
tm.assert_numpy_array_equal(idx._isnan, np.array([False, True]))
self.assertTrue(idx.hasnans)
tm.assert_numpy_array_equal(idx._nan_idxs,
np.array([1], dtype=np.intp))
def test_equals(self):
# GH 13107
for tz in [None, 'UTC', 'US/Eastern', 'Asia/Tokyo']:
idx = pd.DatetimeIndex(['2011-01-01', '2011-01-02', 'NaT'])
self.assertTrue(idx.equals(idx))
self.assertTrue(idx.equals(idx.copy()))
self.assertTrue(idx.equals(idx.asobject))
self.assertTrue(idx.asobject.equals(idx))
self.assertTrue(idx.asobject.equals(idx.asobject))
self.assertFalse(idx.equals(list(idx)))
self.assertFalse(idx.equals(pd.Series(idx)))
idx2 = pd.DatetimeIndex(['2011-01-01', '2011-01-02', 'NaT'],
tz='US/Pacific')
self.assertFalse(idx.equals(idx2))
self.assertFalse(idx.equals(idx2.copy()))
self.assertFalse(idx.equals(idx2.asobject))
self.assertFalse(idx.asobject.equals(idx2))
self.assertFalse(idx.equals(list(idx2)))
self.assertFalse(idx.equals(pd.Series(idx2)))
# same internal, different tz
idx3 = pd.DatetimeIndex._simple_new(idx.asi8, tz='US/Pacific')
tm.assert_numpy_array_equal(idx.asi8, idx3.asi8)
self.assertFalse(idx.equals(idx3))
self.assertFalse(idx.equals(idx3.copy()))
self.assertFalse(idx.equals(idx3.asobject))
self.assertFalse(idx.asobject.equals(idx3))
self.assertFalse(idx.equals(list(idx3)))
self.assertFalse(idx.equals(pd.Series(idx3)))
class TestTimedeltaIndexOps(Ops):
def setUp(self):
super(TestTimedeltaIndexOps, self).setUp()
mask = lambda x: isinstance(x, TimedeltaIndex)
self.is_valid_objs = [o for o in self.objs if mask(o)]
self.not_valid_objs = []
def test_ops_properties(self):
self.check_ops_properties(['days', 'hours', 'minutes', 'seconds',
'milliseconds'])
self.check_ops_properties(['microseconds', 'nanoseconds'])
def test_asobject_tolist(self):
idx = timedelta_range(start='1 days', periods=4, freq='D', name='idx')
expected_list = [Timedelta('1 days'), Timedelta('2 days'),
Timedelta('3 days'), Timedelta('4 days')]
expected = pd.Index(expected_list, dtype=object, name='idx')
result = idx.asobject
self.assertTrue(isinstance(result, Index))
self.assertEqual(result.dtype, object)
self.assert_index_equal(result, expected)
self.assertEqual(result.name, expected.name)
self.assertEqual(idx.tolist(), expected_list)
idx = TimedeltaIndex([timedelta(days=1), timedelta(days=2), pd.NaT,
timedelta(days=4)], name='idx')
expected_list = [Timedelta('1 days'), Timedelta('2 days'), pd.NaT,
Timedelta('4 days')]
expected = pd.Index(expected_list, dtype=object, name='idx')
result = idx.asobject
self.assertTrue(isinstance(result, Index))
self.assertEqual(result.dtype, object)
self.assert_index_equal(result, expected)
self.assertEqual(result.name, expected.name)
self.assertEqual(idx.tolist(), expected_list)
def test_minmax(self):
# monotonic
idx1 = TimedeltaIndex(['1 days', '2 days', '3 days'])
self.assertTrue(idx1.is_monotonic)
# non-monotonic
idx2 = TimedeltaIndex(['1 days', np.nan, '3 days', 'NaT'])
self.assertFalse(idx2.is_monotonic)
for idx in [idx1, idx2]:
self.assertEqual(idx.min(), Timedelta('1 days')),
self.assertEqual(idx.max(), Timedelta('3 days')),
self.assertEqual(idx.argmin(), 0)
self.assertEqual(idx.argmax(), 2)
for op in ['min', 'max']:
# Return NaT
obj = TimedeltaIndex([])
self.assertTrue(pd.isnull(getattr(obj, op)()))
obj = TimedeltaIndex([pd.NaT])
self.assertTrue(pd.isnull(getattr(obj, op)()))
obj = TimedeltaIndex([pd.NaT, pd.NaT, pd.NaT])
self.assertTrue(pd.isnull(getattr(obj, op)()))
def test_numpy_minmax(self):
dr = pd.date_range(start='2016-01-15', end='2016-01-20')
td = TimedeltaIndex(np.asarray(dr))
self.assertEqual(np.min(td), Timedelta('16815 days'))
self.assertEqual(np.max(td), Timedelta('16820 days'))
errmsg = "the 'out' parameter is not supported"
tm.assertRaisesRegexp(ValueError, errmsg, np.min, td, out=0)
tm.assertRaisesRegexp(ValueError, errmsg, np.max, td, out=0)
self.assertEqual(np.argmin(td), 0)
self.assertEqual(np.argmax(td), 5)
if not _np_version_under1p10:
errmsg = "the 'out' parameter is not supported"
tm.assertRaisesRegexp(ValueError, errmsg, np.argmin, td, out=0)
tm.assertRaisesRegexp(ValueError, errmsg, np.argmax, td, out=0)
def test_round(self):
td = pd.timedelta_range(start='16801 days', periods=5, freq='30Min')
elt = td[1]
expected_rng = TimedeltaIndex([
Timedelta('16801 days 00:00:00'),
Timedelta('16801 days 00:00:00'),
Timedelta('16801 days 01:00:00'),
Timedelta('16801 days 02:00:00'),
Timedelta('16801 days 02:00:00'),
])
expected_elt = expected_rng[1]
tm.assert_index_equal(td.round(freq='H'), expected_rng)
self.assertEqual(elt.round(freq='H'), expected_elt)
msg = pd.tseries.frequencies._INVALID_FREQ_ERROR
with self.assertRaisesRegexp(ValueError, msg):
td.round(freq='foo')
with tm.assertRaisesRegexp(ValueError, msg):
elt.round(freq='foo')
msg = "<MonthEnd> is a non-fixed frequency"
tm.assertRaisesRegexp(ValueError, msg, td.round, freq='M')
tm.assertRaisesRegexp(ValueError, msg, elt.round, freq='M')
def test_representation(self):
idx1 = TimedeltaIndex([], freq='D')
idx2 = TimedeltaIndex(['1 days'], freq='D')
idx3 = TimedeltaIndex(['1 days', '2 days'], freq='D')
idx4 = TimedeltaIndex(['1 days', '2 days', '3 days'], freq='D')
idx5 = TimedeltaIndex(['1 days 00:00:01', '2 days', '3 days'])
exp1 = """TimedeltaIndex([], dtype='timedelta64[ns]', freq='D')"""
exp2 = ("TimedeltaIndex(['1 days'], dtype='timedelta64[ns]', "
"freq='D')")
exp3 = ("TimedeltaIndex(['1 days', '2 days'], "
"dtype='timedelta64[ns]', freq='D')")
exp4 = ("TimedeltaIndex(['1 days', '2 days', '3 days'], "
"dtype='timedelta64[ns]', freq='D')")
exp5 = ("TimedeltaIndex(['1 days 00:00:01', '2 days 00:00:00', "
"'3 days 00:00:00'], dtype='timedelta64[ns]', freq=None)")
with pd.option_context('display.width', 300):
for idx, expected in zip([idx1, idx2, idx3, idx4, idx5],
[exp1, exp2, exp3, exp4, exp5]):
for func in ['__repr__', '__unicode__', '__str__']:
result = getattr(idx, func)()
self.assertEqual(result, expected)
def test_representation_to_series(self):
idx1 = TimedeltaIndex([], freq='D')
idx2 = TimedeltaIndex(['1 days'], freq='D')
idx3 = TimedeltaIndex(['1 days', '2 days'], freq='D')
idx4 = TimedeltaIndex(['1 days', '2 days', '3 days'], freq='D')
idx5 = TimedeltaIndex(['1 days 00:00:01', '2 days', '3 days'])
exp1 = """Series([], dtype: timedelta64[ns])"""
exp2 = """0 1 days
dtype: timedelta64[ns]"""
exp3 = """0 1 days
1 2 days
dtype: timedelta64[ns]"""
exp4 = """0 1 days
1 2 days
2 3 days
dtype: timedelta64[ns]"""
exp5 = """0 1 days 00:00:01
1 2 days 00:00:00
2 3 days 00:00:00
dtype: timedelta64[ns]"""
with pd.option_context('display.width', 300):
for idx, expected in zip([idx1, idx2, idx3, idx4, idx5],
[exp1, exp2, exp3, exp4, exp5]):
result = repr(pd.Series(idx))
self.assertEqual(result, expected)
def test_summary(self):
# GH9116
idx1 = TimedeltaIndex([], freq='D')
idx2 = TimedeltaIndex(['1 days'], freq='D')
idx3 = TimedeltaIndex(['1 days', '2 days'], freq='D')
idx4 = TimedeltaIndex(['1 days', '2 days', '3 days'], freq='D')
idx5 = TimedeltaIndex(['1 days 00:00:01', '2 days', '3 days'])
exp1 = """TimedeltaIndex: 0 entries
Freq: D"""
exp2 = """TimedeltaIndex: 1 entries, 1 days to 1 days
Freq: D"""
exp3 = """TimedeltaIndex: 2 entries, 1 days to 2 days
Freq: D"""
exp4 = """TimedeltaIndex: 3 entries, 1 days to 3 days
Freq: D"""
exp5 = ("TimedeltaIndex: 3 entries, 1 days 00:00:01 to 3 days "
"00:00:00")
for idx, expected in zip([idx1, idx2, idx3, idx4, idx5],
[exp1, exp2, exp3, exp4, exp5]):
result = idx.summary()
self.assertEqual(result, expected)
def test_add_iadd(self):
# only test adding/sub offsets as + is now numeric
# offset
offsets = [pd.offsets.Hour(2), timedelta(hours=2),
np.timedelta64(2, 'h'), Timedelta(hours=2)]
for delta in offsets:
rng = timedelta_range('1 days', '10 days')
result = rng + delta
expected = timedelta_range('1 days 02:00:00', '10 days 02:00:00',
freq='D')
tm.assert_index_equal(result, expected)
rng += delta
tm.assert_index_equal(rng, expected)
# int
rng = timedelta_range('1 days 09:00:00', freq='H', periods=10)
result = rng + 1
expected = timedelta_range('1 days 10:00:00', freq='H', periods=10)
tm.assert_index_equal(result, expected)
rng += 1
tm.assert_index_equal(rng, expected)
def test_sub_isub(self):
# only test adding/sub offsets as - is now numeric
# offset
offsets = [pd.offsets.Hour(2), timedelta(hours=2),
np.timedelta64(2, 'h'), Timedelta(hours=2)]
for delta in offsets:
rng = timedelta_range('1 days', '10 days')
result = rng - delta
expected = timedelta_range('0 days 22:00:00', '9 days 22:00:00')
tm.assert_index_equal(result, expected)
rng -= delta
tm.assert_index_equal(rng, expected)
# int
rng = timedelta_range('1 days 09:00:00', freq='H', periods=10)
result = rng - 1
expected = timedelta_range('1 days 08:00:00', freq='H', periods=10)
tm.assert_index_equal(result, expected)
rng -= 1
tm.assert_index_equal(rng, expected)
idx = TimedeltaIndex(['1 day', '2 day'])
msg = "cannot subtract a datelike from a TimedeltaIndex"
with tm.assertRaisesRegexp(TypeError, msg):
idx - Timestamp('2011-01-01')
result = Timestamp('2011-01-01') + idx
expected = DatetimeIndex(['2011-01-02', '2011-01-03'])
tm.assert_index_equal(result, expected)
def test_ops_compat(self):
offsets = [pd.offsets.Hour(2), timedelta(hours=2),
np.timedelta64(2, 'h'), Timedelta(hours=2)]
rng = timedelta_range('1 days', '10 days', name='foo')
# multiply
for offset in offsets:
self.assertRaises(TypeError, lambda: rng * offset)
# divide
expected = Int64Index((np.arange(10) + 1) * 12, name='foo')
for offset in offsets:
result = rng / offset
tm.assert_index_equal(result, expected, exact=False)
# divide with nats
rng = TimedeltaIndex(['1 days', pd.NaT, '2 days'], name='foo')
expected = Float64Index([12, np.nan, 24], name='foo')
for offset in offsets:
result = rng / offset
tm.assert_index_equal(result, expected)
# don't allow division by NaT (make could in the future)
self.assertRaises(TypeError, lambda: rng / pd.NaT)
def test_subtraction_ops(self):
# with datetimes/timedelta and tdi/dti
tdi = TimedeltaIndex(['1 days', pd.NaT, '2 days'], name='foo')
dti = date_range('20130101', periods=3, name='bar')
td = Timedelta('1 days')
dt = Timestamp('20130101')
self.assertRaises(TypeError, lambda: tdi - dt)
self.assertRaises(TypeError, lambda: tdi - dti)
self.assertRaises(TypeError, lambda: td - dt)
self.assertRaises(TypeError, lambda: td - dti)
result = dt - dti
expected = TimedeltaIndex(['0 days', '-1 days', '-2 days'], name='bar')
tm.assert_index_equal(result, expected)
result = dti - dt
expected = TimedeltaIndex(['0 days', '1 days', '2 days'], name='bar')
tm.assert_index_equal(result, expected)
result = tdi - td
expected = TimedeltaIndex(['0 days', pd.NaT, '1 days'], name='foo')
tm.assert_index_equal(result, expected, check_names=False)
result = td - tdi
expected = TimedeltaIndex(['0 days', pd.NaT, '-1 days'], name='foo')
tm.assert_index_equal(result, expected, check_names=False)
result = dti - td
expected = DatetimeIndex(
['20121231', '20130101', '20130102'], name='bar')
tm.assert_index_equal(result, expected, check_names=False)
result = dt - tdi
expected = DatetimeIndex(['20121231', pd.NaT, '20121230'], name='foo')
tm.assert_index_equal(result, expected)
def test_subtraction_ops_with_tz(self):
# check that dt/dti subtraction ops with tz are validated
dti = date_range('20130101', periods=3)
ts = Timestamp('20130101')
dt = ts.to_pydatetime()
dti_tz = date_range('20130101', periods=3).tz_localize('US/Eastern')
ts_tz = Timestamp('20130101').tz_localize('US/Eastern')
ts_tz2 = Timestamp('20130101').tz_localize('CET')
dt_tz = ts_tz.to_pydatetime()
td = Timedelta('1 days')
def _check(result, expected):
self.assertEqual(result, expected)
self.assertIsInstance(result, Timedelta)
# scalars
result = ts - ts
expected = Timedelta('0 days')
_check(result, expected)
result = dt_tz - ts_tz
expected = Timedelta('0 days')
_check(result, expected)
result = ts_tz - dt_tz
expected = Timedelta('0 days')
_check(result, expected)
# tz mismatches
self.assertRaises(TypeError, lambda: dt_tz - ts)
self.assertRaises(TypeError, lambda: dt_tz - dt)
self.assertRaises(TypeError, lambda: dt_tz - ts_tz2)
self.assertRaises(TypeError, lambda: dt - dt_tz)
self.assertRaises(TypeError, lambda: ts - dt_tz)
self.assertRaises(TypeError, lambda: ts_tz2 - ts)
self.assertRaises(TypeError, lambda: ts_tz2 - dt)
self.assertRaises(TypeError, lambda: ts_tz - ts_tz2)
# with dti
self.assertRaises(TypeError, lambda: dti - ts_tz)
self.assertRaises(TypeError, lambda: dti_tz - ts)
self.assertRaises(TypeError, lambda: dti_tz - ts_tz2)
result = dti_tz - dt_tz
expected = TimedeltaIndex(['0 days', '1 days', '2 days'])
tm.assert_index_equal(result, expected)
result = dt_tz - dti_tz
expected = TimedeltaIndex(['0 days', '-1 days', '-2 days'])
tm.assert_index_equal(result, expected)
result = dti_tz - ts_tz
expected = TimedeltaIndex(['0 days', '1 days', '2 days'])
tm.assert_index_equal(result, expected)
result = ts_tz - dti_tz
expected = TimedeltaIndex(['0 days', '-1 days', '-2 days'])
tm.assert_index_equal(result, expected)
result = td - td
expected = Timedelta('0 days')
_check(result, expected)
result = dti_tz - td
expected = DatetimeIndex(
['20121231', '20130101', '20130102'], tz='US/Eastern')
tm.assert_index_equal(result, expected)
def test_dti_tdi_numeric_ops(self):
# These are normally union/diff set-like ops
tdi = TimedeltaIndex(['1 days', pd.NaT, '2 days'], name='foo')
dti = date_range('20130101', periods=3, name='bar')
# TODO(wesm): unused?
# td = Timedelta('1 days')
# dt = Timestamp('20130101')
result = tdi - tdi
expected = TimedeltaIndex(['0 days', pd.NaT, '0 days'], name='foo')
tm.assert_index_equal(result, expected)
result = tdi + tdi
expected = TimedeltaIndex(['2 days', pd.NaT, '4 days'], name='foo')
tm.assert_index_equal(result, expected)
result = dti - tdi # name will be reset
expected = DatetimeIndex(['20121231', pd.NaT, '20130101'])
tm.assert_index_equal(result, expected)
def test_sub_period(self):
# GH 13078
# not supported, check TypeError
p = pd.Period('2011-01-01', freq='D')
for freq in [None, 'H']:
idx = pd.TimedeltaIndex(['1 hours', '2 hours'], freq=freq)
with tm.assertRaises(TypeError):
idx - p
with tm.assertRaises(TypeError):
p - idx
def test_addition_ops(self):
# with datetimes/timedelta and tdi/dti
tdi = TimedeltaIndex(['1 days', pd.NaT, '2 days'], name='foo')
dti = date_range('20130101', periods=3, name='bar')
td = Timedelta('1 days')
dt = Timestamp('20130101')
result = tdi + dt
expected = DatetimeIndex(['20130102', pd.NaT, '20130103'], name='foo')
tm.assert_index_equal(result, expected)
result = dt + tdi
expected = DatetimeIndex(['20130102', pd.NaT, '20130103'], name='foo')
tm.assert_index_equal(result, expected)
result = td + tdi
expected = TimedeltaIndex(['2 days', pd.NaT, '3 days'], name='foo')
tm.assert_index_equal(result, expected)
result = tdi + td
expected = TimedeltaIndex(['2 days', pd.NaT, '3 days'], name='foo')
tm.assert_index_equal(result, expected)
# unequal length
self.assertRaises(ValueError, lambda: tdi + dti[0:1])
self.assertRaises(ValueError, lambda: tdi[0:1] + dti)
# random indexes
self.assertRaises(TypeError, lambda: tdi + Int64Index([1, 2, 3]))
# this is a union!
# self.assertRaises(TypeError, lambda : Int64Index([1,2,3]) + tdi)
result = tdi + dti # name will be reset
expected = DatetimeIndex(['20130102', pd.NaT, '20130105'])
tm.assert_index_equal(result, expected)
result = dti + tdi # name will be reset
expected = DatetimeIndex(['20130102', pd.NaT, '20130105'])
tm.assert_index_equal(result, expected)
result = dt + td
expected = Timestamp('20130102')
self.assertEqual(result, expected)
result = td + dt
expected = Timestamp('20130102')
self.assertEqual(result, expected)
def test_comp_nat(self):
left = pd.TimedeltaIndex([pd.Timedelta('1 days'), pd.NaT,
pd.Timedelta('3 days')])
right = pd.TimedeltaIndex([pd.NaT, pd.NaT, pd.Timedelta('3 days')])
for l, r in [(left, right), (left.asobject, right.asobject)]:
result = l == r
expected = np.array([False, False, True])
tm.assert_numpy_array_equal(result, expected)
result = l != r
expected = np.array([True, True, False])
tm.assert_numpy_array_equal(result, expected)
expected = np.array([False, False, False])
tm.assert_numpy_array_equal(l == pd.NaT, expected)
tm.assert_numpy_array_equal(pd.NaT == r, expected)
expected = np.array([True, True, True])
tm.assert_numpy_array_equal(l != pd.NaT, expected)
tm.assert_numpy_array_equal(pd.NaT != l, expected)
expected = np.array([False, False, False])
tm.assert_numpy_array_equal(l < pd.NaT, expected)
tm.assert_numpy_array_equal(pd.NaT > l, expected)
def test_value_counts_unique(self):
# GH 7735
idx = timedelta_range('1 days 09:00:00', freq='H', periods=10)
# create repeated values, 'n'th element is repeated by n+1 times
idx = TimedeltaIndex(np.repeat(idx.values, range(1, len(idx) + 1)))
exp_idx = timedelta_range('1 days 18:00:00', freq='-1H', periods=10)
expected = Series(range(10, 0, -1), index=exp_idx, dtype='int64')
for obj in [idx, Series(idx)]:
tm.assert_series_equal(obj.value_counts(), expected)
expected = timedelta_range('1 days 09:00:00', freq='H', periods=10)
tm.assert_index_equal(idx.unique(), expected)
idx = TimedeltaIndex(['1 days 09:00:00', '1 days 09:00:00',
'1 days 09:00:00', '1 days 08:00:00',
'1 days 08:00:00', pd.NaT])
exp_idx = TimedeltaIndex(['1 days 09:00:00', '1 days 08:00:00'])
expected = Series([3, 2], index=exp_idx)
for obj in [idx, Series(idx)]:
tm.assert_series_equal(obj.value_counts(), expected)
exp_idx = TimedeltaIndex(['1 days 09:00:00', '1 days 08:00:00',
pd.NaT])
expected = Series([3, 2, 1], index=exp_idx)
for obj in [idx, Series(idx)]:
tm.assert_series_equal(obj.value_counts(dropna=False), expected)
tm.assert_index_equal(idx.unique(), exp_idx)
def test_nonunique_contains(self):
# GH 9512
for idx in map(TimedeltaIndex, ([0, 1, 0], [0, 0, -1], [0, -1, -1],
['00:01:00', '00:01:00', '00:02:00'],
['00:01:00', '00:01:00', '00:00:01'])):
tm.assertIn(idx[0], idx)
def test_unknown_attribute(self):
# GH 9680
tdi = pd.timedelta_range(start=0, periods=10, freq='1s')
ts = pd.Series(np.random.normal(size=10), index=tdi)
self.assertNotIn('foo', ts.__dict__.keys())
self.assertRaises(AttributeError, lambda: ts.foo)
def test_order(self):
# GH 10295
idx1 = TimedeltaIndex(['1 day', '2 day', '3 day'], freq='D',
name='idx')
idx2 = TimedeltaIndex(
['1 hour', '2 hour', '3 hour'], freq='H', name='idx')
for idx in [idx1, idx2]:
ordered = idx.sort_values()
self.assert_index_equal(ordered, idx)
self.assertEqual(ordered.freq, idx.freq)
ordered = idx.sort_values(ascending=False)
expected = idx[::-1]
self.assert_index_equal(ordered, expected)
self.assertEqual(ordered.freq, expected.freq)
self.assertEqual(ordered.freq.n, -1)
ordered, indexer = idx.sort_values(return_indexer=True)
self.assert_index_equal(ordered, idx)
self.assert_numpy_array_equal(indexer,
np.array([0, 1, 2]),
check_dtype=False)
self.assertEqual(ordered.freq, idx.freq)
ordered, indexer = idx.sort_values(return_indexer=True,
ascending=False)
self.assert_index_equal(ordered, idx[::-1])
self.assertEqual(ordered.freq, expected.freq)
self.assertEqual(ordered.freq.n, -1)
idx1 = TimedeltaIndex(['1 hour', '3 hour', '5 hour',
'2 hour ', '1 hour'], name='idx1')
exp1 = TimedeltaIndex(['1 hour', '1 hour', '2 hour',
'3 hour', '5 hour'], name='idx1')
idx2 = TimedeltaIndex(['1 day', '3 day', '5 day',
'2 day', '1 day'], name='idx2')
# TODO(wesm): unused?
# exp2 = TimedeltaIndex(['1 day', '1 day', '2 day',
# '3 day', '5 day'], name='idx2')
# idx3 = TimedeltaIndex([pd.NaT, '3 minute', '5 minute',
# '2 minute', pd.NaT], name='idx3')
# exp3 = TimedeltaIndex([pd.NaT, pd.NaT, '2 minute', '3 minute',
# '5 minute'], name='idx3')
for idx, expected in [(idx1, exp1), (idx1, exp1), (idx1, exp1)]:
ordered = idx.sort_values()
self.assert_index_equal(ordered, expected)
self.assertIsNone(ordered.freq)
ordered = idx.sort_values(ascending=False)
self.assert_index_equal(ordered, expected[::-1])
self.assertIsNone(ordered.freq)
ordered, indexer = idx.sort_values(return_indexer=True)
self.assert_index_equal(ordered, expected)
exp = np.array([0, 4, 3, 1, 2])
self.assert_numpy_array_equal(indexer, exp, check_dtype=False)
self.assertIsNone(ordered.freq)
ordered, indexer = idx.sort_values(return_indexer=True,
ascending=False)
self.assert_index_equal(ordered, expected[::-1])
exp = np.array([2, 1, 3, 4, 0])
self.assert_numpy_array_equal(indexer, exp, check_dtype=False)
self.assertIsNone(ordered.freq)
def test_getitem(self):
idx1 = pd.timedelta_range('1 day', '31 day', freq='D', name='idx')
for idx in [idx1]:
result = idx[0]
self.assertEqual(result, pd.Timedelta('1 day'))
result = idx[0:5]
expected = pd.timedelta_range('1 day', '5 day', freq='D',
name='idx')
self.assert_index_equal(result, expected)
self.assertEqual(result.freq, expected.freq)
result = idx[0:10:2]
expected = pd.timedelta_range('1 day', '9 day', freq='2D',
name='idx')
self.assert_index_equal(result, expected)
self.assertEqual(result.freq, expected.freq)
result = idx[-20:-5:3]
expected = pd.timedelta_range('12 day', '24 day', freq='3D',
name='idx')
self.assert_index_equal(result, expected)
self.assertEqual(result.freq, expected.freq)
result = idx[4::-1]
expected = TimedeltaIndex(['5 day', '4 day', '3 day',
'2 day', '1 day'],
freq='-1D', name='idx')
self.assert_index_equal(result, expected)
self.assertEqual(result.freq, expected.freq)
def test_drop_duplicates_metadata(self):
# GH 10115
idx = pd.timedelta_range('1 day', '31 day', freq='D', name='idx')
result = idx.drop_duplicates()
self.assert_index_equal(idx, result)
self.assertEqual(idx.freq, result.freq)
idx_dup = idx.append(idx)
self.assertIsNone(idx_dup.freq) # freq is reset
result = idx_dup.drop_duplicates()
self.assert_index_equal(idx, result)
self.assertIsNone(result.freq)
def test_drop_duplicates(self):
# to check Index/Series compat
base = pd.timedelta_range('1 day', '31 day', freq='D', name='idx')
idx = base.append(base[:5])
res = idx.drop_duplicates()
tm.assert_index_equal(res, base)
res = Series(idx).drop_duplicates()
tm.assert_series_equal(res, Series(base))
res = idx.drop_duplicates(keep='last')
exp = base[5:].append(base[:5])
tm.assert_index_equal(res, exp)
res = Series(idx).drop_duplicates(keep='last')
tm.assert_series_equal(res, Series(exp, index=np.arange(5, 36)))
res = idx.drop_duplicates(keep=False)
tm.assert_index_equal(res, base[5:])
res = Series(idx).drop_duplicates(keep=False)
tm.assert_series_equal(res, Series(base[5:], index=np.arange(5, 31)))
def test_take(self):
# GH 10295
idx1 = pd.timedelta_range('1 day', '31 day', freq='D', name='idx')
for idx in [idx1]:
result = idx.take([0])
self.assertEqual(result, pd.Timedelta('1 day'))
result = idx.take([-1])
self.assertEqual(result, pd.Timedelta('31 day'))
result = idx.take([0, 1, 2])
expected = pd.timedelta_range('1 day', '3 day', freq='D',
name='idx')
self.assert_index_equal(result, expected)
self.assertEqual(result.freq, expected.freq)
result = idx.take([0, 2, 4])
expected = pd.timedelta_range('1 day', '5 day', freq='2D',
name='idx')
self.assert_index_equal(result, expected)
self.assertEqual(result.freq, expected.freq)
result = idx.take([7, 4, 1])
expected = pd.timedelta_range('8 day', '2 day', freq='-3D',
name='idx')
self.assert_index_equal(result, expected)
self.assertEqual(result.freq, expected.freq)
result = idx.take([3, 2, 5])
expected = TimedeltaIndex(['4 day', '3 day', '6 day'], name='idx')
self.assert_index_equal(result, expected)
self.assertIsNone(result.freq)
result = idx.take([-3, 2, 5])
expected = TimedeltaIndex(['29 day', '3 day', '6 day'], name='idx')
self.assert_index_equal(result, expected)
self.assertIsNone(result.freq)
def test_take_invalid_kwargs(self):
idx = pd.timedelta_range('1 day', '31 day', freq='D', name='idx')
indices = [1, 6, 5, 9, 10, 13, 15, 3]
msg = r"take\(\) got an unexpected keyword argument 'foo'"
tm.assertRaisesRegexp(TypeError, msg, idx.take,
indices, foo=2)
msg = "the 'out' parameter is not supported"
tm.assertRaisesRegexp(ValueError, msg, idx.take,
indices, out=indices)
msg = "the 'mode' parameter is not supported"
tm.assertRaisesRegexp(ValueError, msg, idx.take,
indices, mode='clip')
def test_infer_freq(self):
# GH 11018
for freq in ['D', '3D', '-3D', 'H', '2H', '-2H', 'T', '2T', 'S', '-3S'
]:
idx = pd.timedelta_range('1', freq=freq, periods=10)
result = pd.TimedeltaIndex(idx.asi8, freq='infer')
tm.assert_index_equal(idx, result)
self.assertEqual(result.freq, freq)
def test_nat_new(self):
idx = pd.timedelta_range('1', freq='D', periods=5, name='x')
result = idx._nat_new()
exp = pd.TimedeltaIndex([pd.NaT] * 5, name='x')
tm.assert_index_equal(result, exp)
result = idx._nat_new(box=False)
exp = np.array([tslib.iNaT] * 5, dtype=np.int64)
tm.assert_numpy_array_equal(result, exp)
def test_shift(self):
# GH 9903
idx = pd.TimedeltaIndex([], name='xxx')
tm.assert_index_equal(idx.shift(0, freq='H'), idx)
tm.assert_index_equal(idx.shift(3, freq='H'), idx)
idx = pd.TimedeltaIndex(['5 hours', '6 hours', '9 hours'], name='xxx')
tm.assert_index_equal(idx.shift(0, freq='H'), idx)
exp = pd.TimedeltaIndex(['8 hours', '9 hours', '12 hours'], name='xxx')
tm.assert_index_equal(idx.shift(3, freq='H'), exp)
exp = pd.TimedeltaIndex(['2 hours', '3 hours', '6 hours'], name='xxx')
tm.assert_index_equal(idx.shift(-3, freq='H'), exp)
tm.assert_index_equal(idx.shift(0, freq='T'), idx)
exp = pd.TimedeltaIndex(['05:03:00', '06:03:00', '9:03:00'],
name='xxx')
tm.assert_index_equal(idx.shift(3, freq='T'), exp)
exp = pd.TimedeltaIndex(['04:57:00', '05:57:00', '8:57:00'],
name='xxx')
tm.assert_index_equal(idx.shift(-3, freq='T'), exp)
def test_repeat(self):
index = pd.timedelta_range('1 days', periods=2, freq='D')
exp = pd.TimedeltaIndex(['1 days', '1 days', '2 days', '2 days'])
for res in [index.repeat(2), np.repeat(index, 2)]:
tm.assert_index_equal(res, exp)
self.assertIsNone(res.freq)
index = TimedeltaIndex(['1 days', 'NaT', '3 days'])
exp = TimedeltaIndex(['1 days', '1 days', '1 days',
'NaT', 'NaT', 'NaT',
'3 days', '3 days', '3 days'])
for res in [index.repeat(3), np.repeat(index, 3)]:
tm.assert_index_equal(res, exp)
self.assertIsNone(res.freq)
def test_nat(self):
self.assertIs(pd.TimedeltaIndex._na_value, pd.NaT)
self.assertIs(pd.TimedeltaIndex([])._na_value, pd.NaT)
idx = pd.TimedeltaIndex(['1 days', '2 days'])
self.assertTrue(idx._can_hold_na)
tm.assert_numpy_array_equal(idx._isnan, np.array([False, False]))
self.assertFalse(idx.hasnans)
tm.assert_numpy_array_equal(idx._nan_idxs,
np.array([], dtype=np.intp))
idx = pd.TimedeltaIndex(['1 days', 'NaT'])
self.assertTrue(idx._can_hold_na)
tm.assert_numpy_array_equal(idx._isnan, np.array([False, True]))
self.assertTrue(idx.hasnans)
tm.assert_numpy_array_equal(idx._nan_idxs,
np.array([1], dtype=np.intp))
def test_equals(self):
# GH 13107
idx = pd.TimedeltaIndex(['1 days', '2 days', 'NaT'])
self.assertTrue(idx.equals(idx))
self.assertTrue(idx.equals(idx.copy()))
self.assertTrue(idx.equals(idx.asobject))
self.assertTrue(idx.asobject.equals(idx))
self.assertTrue(idx.asobject.equals(idx.asobject))
self.assertFalse(idx.equals(list(idx)))
self.assertFalse(idx.equals(pd.Series(idx)))
idx2 = pd.TimedeltaIndex(['2 days', '1 days', 'NaT'])
self.assertFalse(idx.equals(idx2))
self.assertFalse(idx.equals(idx2.copy()))
self.assertFalse(idx.equals(idx2.asobject))
self.assertFalse(idx.asobject.equals(idx2))
self.assertFalse(idx.asobject.equals(idx2.asobject))
self.assertFalse(idx.equals(list(idx2)))
self.assertFalse(idx.equals(pd.Series(idx2)))
class TestPeriodIndexOps(Ops):
def setUp(self):
super(TestPeriodIndexOps, self).setUp()
mask = lambda x: (isinstance(x, DatetimeIndex) or
isinstance(x, PeriodIndex))
self.is_valid_objs = [o for o in self.objs if mask(o)]
self.not_valid_objs = [o for o in self.objs if not mask(o)]
def test_ops_properties(self):
self.check_ops_properties(
['year', 'month', 'day', 'hour', 'minute', 'second', 'weekofyear',
'week', 'dayofweek', 'dayofyear', 'quarter'])
self.check_ops_properties(['qyear'],
lambda x: isinstance(x, PeriodIndex))
def test_asobject_tolist(self):
idx = pd.period_range(start='2013-01-01', periods=4, freq='M',
name='idx')
expected_list = [pd.Period('2013-01-31', freq='M'),
pd.Period('2013-02-28', freq='M'),
pd.Period('2013-03-31', freq='M'),
pd.Period('2013-04-30', freq='M')]
expected = pd.Index(expected_list, dtype=object, name='idx')
result = idx.asobject
self.assertTrue(isinstance(result, Index))
self.assertEqual(result.dtype, object)
self.assert_index_equal(result, expected)
self.assertEqual(result.name, expected.name)
self.assertEqual(idx.tolist(), expected_list)
idx = PeriodIndex(['2013-01-01', '2013-01-02', 'NaT',
'2013-01-04'], freq='D', name='idx')
expected_list = [pd.Period('2013-01-01', freq='D'),
pd.Period('2013-01-02', freq='D'),
pd.Period('NaT', freq='D'),
pd.Period('2013-01-04', freq='D')]
expected = pd.Index(expected_list, dtype=object, name='idx')
result = idx.asobject
self.assertTrue(isinstance(result, Index))
self.assertEqual(result.dtype, object)
tm.assert_index_equal(result, expected)
for i in [0, 1, 3]:
self.assertEqual(result[i], expected[i])
self.assertIs(result[2], pd.NaT)
self.assertEqual(result.name, expected.name)
result_list = idx.tolist()
for i in [0, 1, 3]:
self.assertEqual(result_list[i], expected_list[i])
self.assertIs(result_list[2], pd.NaT)
def test_minmax(self):
# monotonic
idx1 = pd.PeriodIndex([pd.NaT, '2011-01-01', '2011-01-02',
'2011-01-03'], freq='D')
self.assertTrue(idx1.is_monotonic)
# non-monotonic
idx2 = pd.PeriodIndex(['2011-01-01', pd.NaT, '2011-01-03',
'2011-01-02', pd.NaT], freq='D')
self.assertFalse(idx2.is_monotonic)
for idx in [idx1, idx2]:
self.assertEqual(idx.min(), pd.Period('2011-01-01', freq='D'))
self.assertEqual(idx.max(), pd.Period('2011-01-03', freq='D'))
self.assertEqual(idx1.argmin(), 1)
self.assertEqual(idx2.argmin(), 0)
self.assertEqual(idx1.argmax(), 3)
self.assertEqual(idx2.argmax(), 2)
for op in ['min', 'max']:
# Return NaT
obj = PeriodIndex([], freq='M')
result = getattr(obj, op)()
self.assertIs(result, tslib.NaT)
obj = PeriodIndex([pd.NaT], freq='M')
result = getattr(obj, op)()
self.assertIs(result, tslib.NaT)
obj = PeriodIndex([pd.NaT, pd.NaT, pd.NaT], freq='M')
result = getattr(obj, op)()
self.assertIs(result, tslib.NaT)
def test_numpy_minmax(self):
pr = pd.period_range(start='2016-01-15', end='2016-01-20')
self.assertEqual(np.min(pr), Period('2016-01-15', freq='D'))
self.assertEqual(np.max(pr), Period('2016-01-20', freq='D'))
errmsg = "the 'out' parameter is not supported"
tm.assertRaisesRegexp(ValueError, errmsg, np.min, pr, out=0)
tm.assertRaisesRegexp(ValueError, errmsg, np.max, pr, out=0)
self.assertEqual(np.argmin(pr), 0)
self.assertEqual(np.argmax(pr), 5)
if not _np_version_under1p10:
errmsg = "the 'out' parameter is not supported"
tm.assertRaisesRegexp(ValueError, errmsg, np.argmin, pr, out=0)
tm.assertRaisesRegexp(ValueError, errmsg, np.argmax, pr, out=0)
def test_representation(self):
# GH 7601
idx1 = PeriodIndex([], freq='D')
idx2 = PeriodIndex(['2011-01-01'], freq='D')
idx3 = PeriodIndex(['2011-01-01', '2011-01-02'], freq='D')
idx4 = PeriodIndex(['2011-01-01', '2011-01-02', '2011-01-03'],
freq='D')
idx5 = PeriodIndex(['2011', '2012', '2013'], freq='A')
idx6 = PeriodIndex(['2011-01-01 09:00', '2012-02-01 10:00',
'NaT'], freq='H')
idx7 = pd.period_range('2013Q1', periods=1, freq="Q")
idx8 = pd.period_range('2013Q1', periods=2, freq="Q")
idx9 = pd.period_range('2013Q1', periods=3, freq="Q")
idx10 = PeriodIndex(['2011-01-01', '2011-02-01'], freq='3D')
exp1 = """PeriodIndex([], dtype='period[D]', freq='D')"""
exp2 = """PeriodIndex(['2011-01-01'], dtype='period[D]', freq='D')"""
exp3 = ("PeriodIndex(['2011-01-01', '2011-01-02'], dtype='period[D]', "
"freq='D')")
exp4 = ("PeriodIndex(['2011-01-01', '2011-01-02', '2011-01-03'], "
"dtype='period[D]', freq='D')")
exp5 = ("PeriodIndex(['2011', '2012', '2013'], dtype='period[A-DEC]', "
"freq='A-DEC')")
exp6 = ("PeriodIndex(['2011-01-01 09:00', '2012-02-01 10:00', 'NaT'], "
"dtype='period[H]', freq='H')")
exp7 = ("PeriodIndex(['2013Q1'], dtype='period[Q-DEC]', "
"freq='Q-DEC')")
exp8 = ("PeriodIndex(['2013Q1', '2013Q2'], dtype='period[Q-DEC]', "
"freq='Q-DEC')")
exp9 = ("PeriodIndex(['2013Q1', '2013Q2', '2013Q3'], "
"dtype='period[Q-DEC]', freq='Q-DEC')")
exp10 = ("PeriodIndex(['2011-01-01', '2011-02-01'], "
"dtype='period[3D]', freq='3D')")
for idx, expected in zip([idx1, idx2, idx3, idx4, idx5,
idx6, idx7, idx8, idx9, idx10],
[exp1, exp2, exp3, exp4, exp5,
exp6, exp7, exp8, exp9, exp10]):
for func in ['__repr__', '__unicode__', '__str__']:
result = getattr(idx, func)()
self.assertEqual(result, expected)
def test_representation_to_series(self):
# GH 10971
idx1 = PeriodIndex([], freq='D')
idx2 = PeriodIndex(['2011-01-01'], freq='D')
idx3 = PeriodIndex(['2011-01-01', '2011-01-02'], freq='D')
idx4 = PeriodIndex(['2011-01-01', '2011-01-02',
'2011-01-03'], freq='D')
idx5 = PeriodIndex(['2011', '2012', '2013'], freq='A')
idx6 = PeriodIndex(['2011-01-01 09:00', '2012-02-01 10:00',
'NaT'], freq='H')
idx7 = pd.period_range('2013Q1', periods=1, freq="Q")
idx8 = pd.period_range('2013Q1', periods=2, freq="Q")
idx9 = pd.period_range('2013Q1', periods=3, freq="Q")
exp1 = """Series([], dtype: object)"""
exp2 = """0 2011-01-01
dtype: object"""
exp3 = """0 2011-01-01
1 2011-01-02
dtype: object"""
exp4 = """0 2011-01-01
1 2011-01-02
2 2011-01-03
dtype: object"""
exp5 = """0 2011
1 2012
2 2013
dtype: object"""
exp6 = """0 2011-01-01 09:00
1 2012-02-01 10:00
2 NaT
dtype: object"""
exp7 = """0 2013Q1
dtype: object"""
exp8 = """0 2013Q1
1 2013Q2
dtype: object"""
exp9 = """0 2013Q1
1 2013Q2
2 2013Q3
dtype: object"""
for idx, expected in zip([idx1, idx2, idx3, idx4, idx5,
idx6, idx7, idx8, idx9],
[exp1, exp2, exp3, exp4, exp5,
exp6, exp7, exp8, exp9]):
result = repr(pd.Series(idx))
self.assertEqual(result, expected)
def test_summary(self):
# GH9116
idx1 = PeriodIndex([], freq='D')
idx2 = PeriodIndex(['2011-01-01'], freq='D')
idx3 = PeriodIndex(['2011-01-01', '2011-01-02'], freq='D')
idx4 = PeriodIndex(
['2011-01-01', '2011-01-02', '2011-01-03'], freq='D')
idx5 = PeriodIndex(['2011', '2012', '2013'], freq='A')
idx6 = PeriodIndex(
['2011-01-01 09:00', '2012-02-01 10:00', 'NaT'], freq='H')
idx7 = pd.period_range('2013Q1', periods=1, freq="Q")
idx8 = pd.period_range('2013Q1', periods=2, freq="Q")
idx9 = pd.period_range('2013Q1', periods=3, freq="Q")
exp1 = """PeriodIndex: 0 entries
Freq: D"""
exp2 = """PeriodIndex: 1 entries, 2011-01-01 to 2011-01-01
Freq: D"""
exp3 = """PeriodIndex: 2 entries, 2011-01-01 to 2011-01-02
Freq: D"""
exp4 = """PeriodIndex: 3 entries, 2011-01-01 to 2011-01-03
Freq: D"""
exp5 = """PeriodIndex: 3 entries, 2011 to 2013
Freq: A-DEC"""
exp6 = """PeriodIndex: 3 entries, 2011-01-01 09:00 to NaT
Freq: H"""
exp7 = """PeriodIndex: 1 entries, 2013Q1 to 2013Q1
Freq: Q-DEC"""
exp8 = """PeriodIndex: 2 entries, 2013Q1 to 2013Q2
Freq: Q-DEC"""
exp9 = """PeriodIndex: 3 entries, 2013Q1 to 2013Q3
Freq: Q-DEC"""
for idx, expected in zip([idx1, idx2, idx3, idx4, idx5,
idx6, idx7, idx8, idx9],
[exp1, exp2, exp3, exp4, exp5,
exp6, exp7, exp8, exp9]):
result = idx.summary()
self.assertEqual(result, expected)
def test_resolution(self):
for freq, expected in zip(['A', 'Q', 'M', 'D', 'H',
'T', 'S', 'L', 'U'],
['day', 'day', 'day', 'day',
'hour', 'minute', 'second',
'millisecond', 'microsecond']):
idx = pd.period_range(start='2013-04-01', periods=30, freq=freq)
self.assertEqual(idx.resolution, expected)
def test_union(self):
# union
rng1 = pd.period_range('1/1/2000', freq='D', periods=5)
other1 = pd.period_range('1/6/2000', freq='D', periods=5)
expected1 = pd.period_range('1/1/2000', freq='D', periods=10)
rng2 = pd.period_range('1/1/2000', freq='D', periods=5)
other2 = pd.period_range('1/4/2000', freq='D', periods=5)
expected2 = pd.period_range('1/1/2000', freq='D', periods=8)
rng3 = pd.period_range('1/1/2000', freq='D', periods=5)
other3 = pd.PeriodIndex([], freq='D')
expected3 = pd.period_range('1/1/2000', freq='D', periods=5)
rng4 = pd.period_range('2000-01-01 09:00', freq='H', periods=5)
other4 = pd.period_range('2000-01-02 09:00', freq='H', periods=5)
expected4 = pd.PeriodIndex(['2000-01-01 09:00', '2000-01-01 10:00',
'2000-01-01 11:00', '2000-01-01 12:00',
'2000-01-01 13:00', '2000-01-02 09:00',
'2000-01-02 10:00', '2000-01-02 11:00',
'2000-01-02 12:00', '2000-01-02 13:00'],
freq='H')
rng5 = pd.PeriodIndex(['2000-01-01 09:01', '2000-01-01 09:03',
'2000-01-01 09:05'], freq='T')
other5 = pd.PeriodIndex(['2000-01-01 09:01', '2000-01-01 09:05'
'2000-01-01 09:08'],
freq='T')
expected5 = pd.PeriodIndex(['2000-01-01 09:01', '2000-01-01 09:03',
'2000-01-01 09:05', '2000-01-01 09:08'],
freq='T')
rng6 = pd.period_range('2000-01-01', freq='M', periods=7)
other6 = pd.period_range('2000-04-01', freq='M', periods=7)
expected6 = pd.period_range('2000-01-01', freq='M', periods=10)
rng7 = pd.period_range('2003-01-01', freq='A', periods=5)
other7 = pd.period_range('1998-01-01', freq='A', periods=8)
expected7 = pd.period_range('1998-01-01', freq='A', periods=10)
for rng, other, expected in [(rng1, other1, expected1),
(rng2, other2, expected2),
(rng3, other3, expected3), (rng4, other4,
expected4),
(rng5, other5, expected5), (rng6, other6,
expected6),
(rng7, other7, expected7)]:
result_union = rng.union(other)
tm.assert_index_equal(result_union, expected)
def test_add_iadd(self):
rng = pd.period_range('1/1/2000', freq='D', periods=5)
other = pd.period_range('1/6/2000', freq='D', periods=5)
# previously performed setop union, now raises TypeError (GH14164)
with tm.assertRaises(TypeError):
rng + other
with tm.assertRaises(TypeError):
rng += other
# offset
# DateOffset
rng = pd.period_range('2014', '2024', freq='A')
result = rng + pd.offsets.YearEnd(5)
expected = pd.period_range('2019', '2029', freq='A')
tm.assert_index_equal(result, expected)
rng += pd.offsets.YearEnd(5)
tm.assert_index_equal(rng, expected)
for o in [pd.offsets.YearBegin(2), pd.offsets.MonthBegin(1),
pd.offsets.Minute(), np.timedelta64(365, 'D'),
timedelta(365), Timedelta(days=365)]:
msg = ('Input has different freq(=.+)? '
'from PeriodIndex\\(freq=A-DEC\\)')
with tm.assertRaisesRegexp(period.IncompatibleFrequency, msg):
rng + o
rng = pd.period_range('2014-01', '2016-12', freq='M')
result = rng + pd.offsets.MonthEnd(5)
expected = pd.period_range('2014-06', '2017-05', freq='M')
tm.assert_index_equal(result, expected)
rng += pd.offsets.MonthEnd(5)
tm.assert_index_equal(rng, expected)
for o in [pd.offsets.YearBegin(2), pd.offsets.MonthBegin(1),
pd.offsets.Minute(), np.timedelta64(365, 'D'),
timedelta(365), Timedelta(days=365)]:
rng = pd.period_range('2014-01', '2016-12', freq='M')
msg = 'Input has different freq(=.+)? from PeriodIndex\\(freq=M\\)'
with tm.assertRaisesRegexp(period.IncompatibleFrequency, msg):
rng + o
# Tick
offsets = [pd.offsets.Day(3), timedelta(days=3),
np.timedelta64(3, 'D'), pd.offsets.Hour(72),
timedelta(minutes=60 * 24 * 3), np.timedelta64(72, 'h'),
Timedelta('72:00:00')]
for delta in offsets:
rng = pd.period_range('2014-05-01', '2014-05-15', freq='D')
result = rng + delta
expected = pd.period_range('2014-05-04', '2014-05-18', freq='D')
tm.assert_index_equal(result, expected)
rng += delta
tm.assert_index_equal(rng, expected)
for o in [pd.offsets.YearBegin(2), pd.offsets.MonthBegin(1),
pd.offsets.Minute(), np.timedelta64(4, 'h'),
timedelta(hours=23), Timedelta('23:00:00')]:
rng = pd.period_range('2014-05-01', '2014-05-15', freq='D')
msg = 'Input has different freq(=.+)? from PeriodIndex\\(freq=D\\)'
with tm.assertRaisesRegexp(period.IncompatibleFrequency, msg):
rng + o
offsets = [pd.offsets.Hour(2), timedelta(hours=2),
np.timedelta64(2, 'h'), pd.offsets.Minute(120),
timedelta(minutes=120), np.timedelta64(120, 'm'),
Timedelta(minutes=120)]
for delta in offsets:
rng = pd.period_range('2014-01-01 10:00', '2014-01-05 10:00',
freq='H')
result = rng + delta
expected = pd.period_range('2014-01-01 12:00', '2014-01-05 12:00',
freq='H')
tm.assert_index_equal(result, expected)
rng += delta
tm.assert_index_equal(rng, expected)
for delta in [pd.offsets.YearBegin(2), timedelta(minutes=30),
np.timedelta64(30, 's'), Timedelta(seconds=30)]:
rng = pd.period_range('2014-01-01 10:00', '2014-01-05 10:00',
freq='H')
msg = 'Input has different freq(=.+)? from PeriodIndex\\(freq=H\\)'
with tm.assertRaisesRegexp(period.IncompatibleFrequency, msg):
result = rng + delta
with tm.assertRaisesRegexp(period.IncompatibleFrequency, msg):
rng += delta
# int
rng = pd.period_range('2000-01-01 09:00', freq='H', periods=10)
result = rng + 1
expected = pd.period_range('2000-01-01 10:00', freq='H', periods=10)
tm.assert_index_equal(result, expected)
rng += 1
tm.assert_index_equal(rng, expected)
def test_difference(self):
# diff
rng1 = pd.period_range('1/1/2000', freq='D', periods=5)
other1 = pd.period_range('1/6/2000', freq='D', periods=5)
expected1 = pd.period_range('1/1/2000', freq='D', periods=5)
rng2 = pd.period_range('1/1/2000', freq='D', periods=5)
other2 = pd.period_range('1/4/2000', freq='D', periods=5)
expected2 = pd.period_range('1/1/2000', freq='D', periods=3)
rng3 = pd.period_range('1/1/2000', freq='D', periods=5)
other3 = pd.PeriodIndex([], freq='D')
expected3 = pd.period_range('1/1/2000', freq='D', periods=5)
rng4 = pd.period_range('2000-01-01 09:00', freq='H', periods=5)
other4 = pd.period_range('2000-01-02 09:00', freq='H', periods=5)
expected4 = rng4
rng5 = pd.PeriodIndex(['2000-01-01 09:01', '2000-01-01 09:03',
'2000-01-01 09:05'], freq='T')
other5 = pd.PeriodIndex(
['2000-01-01 09:01', '2000-01-01 09:05'], freq='T')
expected5 = pd.PeriodIndex(['2000-01-01 09:03'], freq='T')
rng6 = pd.period_range('2000-01-01', freq='M', periods=7)
other6 = pd.period_range('2000-04-01', freq='M', periods=7)
expected6 = pd.period_range('2000-01-01', freq='M', periods=3)
rng7 = pd.period_range('2003-01-01', freq='A', periods=5)
other7 = pd.period_range('1998-01-01', freq='A', periods=8)
expected7 = pd.period_range('2006-01-01', freq='A', periods=2)
for rng, other, expected in [(rng1, other1, expected1),
(rng2, other2, expected2),
(rng3, other3, expected3),
(rng4, other4, expected4),
(rng5, other5, expected5),
(rng6, other6, expected6),
(rng7, other7, expected7), ]:
result_union = rng.difference(other)
tm.assert_index_equal(result_union, expected)
def test_sub_isub(self):
# previously performed setop, now raises TypeError (GH14164)
# TODO needs to wait on #13077 for decision on result type
rng = pd.period_range('1/1/2000', freq='D', periods=5)
other = pd.period_range('1/6/2000', freq='D', periods=5)
with tm.assertRaises(TypeError):
rng - other
with tm.assertRaises(TypeError):
rng -= other
# offset
# DateOffset
rng = pd.period_range('2014', '2024', freq='A')
result = rng - pd.offsets.YearEnd(5)
expected = pd.period_range('2009', '2019', freq='A')
tm.assert_index_equal(result, expected)
rng -= pd.offsets.YearEnd(5)
tm.assert_index_equal(rng, expected)
for o in [pd.offsets.YearBegin(2), pd.offsets.MonthBegin(1),
pd.offsets.Minute(), np.timedelta64(365, 'D'),
timedelta(365)]:
rng = pd.period_range('2014', '2024', freq='A')
msg = ('Input has different freq(=.+)? '
'from PeriodIndex\\(freq=A-DEC\\)')
with tm.assertRaisesRegexp(period.IncompatibleFrequency, msg):
rng - o
rng = pd.period_range('2014-01', '2016-12', freq='M')
result = rng - pd.offsets.MonthEnd(5)
expected = pd.period_range('2013-08', '2016-07', freq='M')
tm.assert_index_equal(result, expected)
rng -= pd.offsets.MonthEnd(5)
tm.assert_index_equal(rng, expected)
for o in [pd.offsets.YearBegin(2), pd.offsets.MonthBegin(1),
pd.offsets.Minute(), np.timedelta64(365, 'D'),
timedelta(365)]:
rng = pd.period_range('2014-01', '2016-12', freq='M')
msg = 'Input has different freq(=.+)? from PeriodIndex\\(freq=M\\)'
with tm.assertRaisesRegexp(period.IncompatibleFrequency, msg):
rng - o
# Tick
offsets = [pd.offsets.Day(3), timedelta(days=3),
np.timedelta64(3, 'D'), pd.offsets.Hour(72),
timedelta(minutes=60 * 24 * 3), np.timedelta64(72, 'h')]
for delta in offsets:
rng = | pd.period_range('2014-05-01', '2014-05-15', freq='D') | pandas.period_range |
import pandas as pd
import torch
from torch.utils.data import DataLoader
from tqdm import tqdm
from torch.optim import Adagrad
def run(dim, ds, epochs, attempts, lrs, reg_coef):
losses = | pd.DataFrame(columns=['lr', 'epoch', 'attempt', 'loss']) | pandas.DataFrame |
# Author: <NAME>
# github: sehovaclj
# code that uses a regular RNN to forecast energy consumption. Refer to Journal paper for more details
# importing
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
import torch
import torch.nn as nn
import torch.nn.functional as F
import torch.optim as optim
from torch.nn import init
import random
import time
import math
#from twilio.rest import Client
from tempfile import TemporaryFile
#########################################################################################
# building the S2S model
class SModel(nn.Module):
def __init__(self, cell_type, input_size, hidden_size, use_cuda, pred_type, pred_length):
super(SModel, self).__init__()
self.input_size = input_size
self.hidden_size = hidden_size
self.cell_type = cell_type
if self.cell_type not in ['rnn', 'gru', 'lstm']:
raise ValueError(self.cell_type, " is not an appropriate cell type. Please select one of rnn, gru, or lstm.")
if self.cell_type == 'rnn':
self.rnncell = nn.RNNCell(self.input_size, self.hidden_size)
if self.cell_type == 'gru':
self.rnncell = nn.GRUCell(self.input_size, self.hidden_size)
if self.cell_type == 'lstm':
self.rnncell = nn.LSTMCell(self.input_size, self.hidden_size)
self.lin_usage = nn.Linear(self.hidden_size, 1)
self.use_cuda = use_cuda
self.pred_length = pred_length
self.pred_type = pred_type
self.init()
# VERY IMPORTANT INIT PARAMS FUNCTIONS***
# function to intialize weight parameters
def init(self):
if self.cell_type == 'rnn' or self.cell_type == 'gru':
#j = []
for p in self.parameters():
if p.dim() > 1:
init.orthogonal_(p.data, gain=1.0)
#j.append(p.data)
if p.dim() == 1:
init.constant_(p.data, 0.0)
#j.append(p.data)
elif self.cell_type == 'lstm':
#j = []
for p in self.parameters():
if p.dim() > 1:
init.orthogonal_(p.data, gain=1.0)
#j.append(p.data)
if p.dim() == 1:
init.constant_(p.data, 0.0)
init.constant_(p.data[self.hidden_size:2*self.hidden_size], 1.0)
#j.append(p.data)
#return j
def forward(self, x, pred_type, pred_length):
# encoder forward function
self.pred_type = pred_type
self.pred_length = pred_length
preds = []
# for rnn and gru
if self.cell_type == 'rnn' or self.cell_type == 'gru':
h = torch.zeros(x.shape[0], self.hidden_size)
if self.use_cuda:
h = h.cuda()
if self.pred_type == 'full':
for T in range(x.shape[1]):
h = self.rnncell(x[:, T, :], h)
pred_usage = self.lin_usage(h)
preds.append(pred_usage.unsqueeze(1))
preds = torch.cat(preds, 1)
elif self.pred_type == 'partial':
for T in range(x.shape[1]):
h = self.rnncell(x[:, T, :], h)
if T >= (x.shape[1] - self.pred_length):
pred_usage = self.lin_usage(h)
preds.append(pred_usage.unsqueeze(1))
preds = torch.cat(preds, 1)
# for lstm
elif self.cell_type == 'lstm':
h0 = torch.zeros(x.shape[0], self.hidden_size)
c0 = torch.zeros(x.shape[0], self.hidden_size)
if self.use_cuda:
h0 = h0.cuda()
c0 = c0.cuda()
h = (h0, c0)
if self.pred_type == 'full':
for T in range(x.shape[1]):
h = self.rnncell(x[:, T, :], h)
pred_usage = self.lin_usage(h[0])
preds.append(pred_usage.unsqueeze(1))
preds = torch.cat(preds, 1)
elif self.pred_type == 'partial':
for T in range(x.shape[1]):
h = self.rnncell(x[:, T, :], h)
if T >= (x.shape[1] - self.pred_length):
pred_usage = self.lin_usage(h[0])
preds.append(pred_usage.unsqueeze(1))
preds = torch.cat(preds, 1)
return preds
#################################################################################################################################################
# main function
def main(seed, cuda, cell_type, window_source_size, window_target_size, pred_type, pred_length, epochs, batch_size, hs):
t0 = time.time()
# seed == given seed
np.random.seed(seed)
torch.manual_seed(seed)
print("Loading dataset...")
d = np.loadtxt("./Anonymous_dataset.csv", delimiter=",", skiprows=1, dtype=str)
dataset = d[:, 4:].astype(np.float32)
dataset = | pd.DataFrame(dataset) | pandas.DataFrame |
from functools import partial
from collections import defaultdict
import json
import warnings
from distutils.version import LooseVersion
import pandas as pd
import pyarrow as pa
import pyarrow.parquet as pq
from ....utils import getargspec
from ..utils import _get_pyarrow_dtypes, _meta_from_dtypes
from ...utils import clear_known_categories
from ....core import flatten
from dask import delayed
from .utils import (
_parse_pandas_metadata,
_normalize_index_columns,
Engine,
_analyze_paths,
)
preserve_ind_supported = pa.__version__ >= LooseVersion("0.15.0")
schema_field_supported = pa.__version__ >= LooseVersion("0.15.0")
#
# Private Helper Functions
#
def _append_row_groups(metadata, md):
try:
metadata.append_row_groups(md)
except RuntimeError as err:
if "requires equal schemas" in str(err):
raise RuntimeError(
"Schemas are inconsistent, try using "
'`to_parquet(..., schema="infer")`, or pass an explicit '
"pyarrow schema."
)
else:
raise err
def _write_partitioned(
table, root_path, filename, partition_cols, fs, index_cols=(), **kwargs
):
"""Write table to a partitioned dataset with pyarrow.
Logic copied from pyarrow.parquet.
(arrow/python/pyarrow/parquet.py::write_to_dataset)
TODO: Remove this in favor of pyarrow's `write_to_dataset`
once ARROW-8244 is addressed.
"""
fs.mkdirs(root_path, exist_ok=True)
df = table.to_pandas(ignore_metadata=True)
index_cols = list(index_cols) if index_cols else []
preserve_index = False
if index_cols and preserve_ind_supported:
df.set_index(index_cols, inplace=True)
preserve_index = True
partition_keys = [df[col] for col in partition_cols]
data_df = df.drop(partition_cols, axis="columns")
data_cols = df.columns.drop(partition_cols)
if len(data_cols) == 0 and not index_cols:
raise ValueError("No data left to save outside partition columns")
subschema = table.schema
for col in table.schema.names:
if col in partition_cols:
subschema = subschema.remove(subschema.get_field_index(col))
md_list = []
for keys, subgroup in data_df.groupby(partition_keys):
if not isinstance(keys, tuple):
keys = (keys,)
subdir = fs.sep.join(
[
"{colname}={value}".format(colname=name, value=val)
for name, val in zip(partition_cols, keys)
]
)
subtable = pa.Table.from_pandas(
subgroup, preserve_index=preserve_index, schema=subschema, safe=False
)
prefix = fs.sep.join([root_path, subdir])
fs.mkdirs(prefix, exist_ok=True)
full_path = fs.sep.join([prefix, filename])
with fs.open(full_path, "wb") as f:
pq.write_table(subtable, f, metadata_collector=md_list, **kwargs)
md_list[-1].set_file_path(fs.sep.join([subdir, filename]))
return md_list
def _index_in_schema(index, schema):
if index and schema is not None:
# Make sure all index columns are in user-defined schema
return len(set(index).intersection(schema.names)) == len(index)
elif index:
return True # Schema is not user-specified, all good
else:
return False # No index to check
def _get_dataset_object(paths, fs, filters, dataset_kwargs):
"""Generate a ParquetDataset object"""
kwargs = dataset_kwargs.copy()
if "validate_schema" not in kwargs:
kwargs["validate_schema"] = False
if len(paths) > 1:
# This is a list of files
base, fns = _analyze_paths(paths, fs)
proxy_metadata = None
if "_metadata" in fns:
# We have a _metadata file. PyArrow cannot handle
# "_metadata" when `paths` is a list. So, we shuld
# open "_metadata" separately.
paths.remove(fs.sep.join([base, "_metadata"]))
fns.remove("_metadata")
with fs.open(fs.sep.join([base, "_metadata"]), mode="rb") as fil:
proxy_metadata = pq.ParquetFile(fil).metadata
# Create our dataset from the list of data files.
# Note #1: that this will not parse all the files (yet)
# Note #2: Cannot pass filters for legacy pyarrow API (see issue#6512).
# We can handle partitions + filtering for list input after
# adopting new pyarrow.dataset API.
dataset = pq.ParquetDataset(paths, filesystem=fs, **kwargs)
if proxy_metadata:
dataset.metadata = proxy_metadata
elif fs.isdir(paths[0]):
# This is a directory. We can let pyarrow do its thing.
# Note: In the future, it may be best to avoid listing the
# directory if we can get away with checking for the
# existence of _metadata. Listing may be much more
# expensive in storage systems like S3.
allpaths = fs.glob(paths[0] + fs.sep + "*")
base, fns = _analyze_paths(allpaths, fs)
dataset = pq.ParquetDataset(paths[0], filesystem=fs, filters=filters, **kwargs)
else:
# This is a single file. No danger in gathering statistics
# and/or splitting row-groups without a "_metadata" file
base = paths[0]
fns = [None]
dataset = pq.ParquetDataset(paths[0], filesystem=fs, **kwargs)
return dataset, base, fns
def _gather_metadata(
paths, fs, split_row_groups, gather_statistics, filters, dataset_kwargs
):
"""Gather parquet metadata into a single data structure.
Use _metadata or aggregate footer metadata into a single
object. Also, collect other information necessary for
parquet-to-ddf mapping (e.g. schema, partition_info).
"""
# Step 1: Create a ParquetDataset object
dataset, base, fns = _get_dataset_object(paths, fs, filters, dataset_kwargs)
if fns == [None]:
# This is a single file. No danger in gathering statistics
# and/or splitting row-groups without a "_metadata" file
if gather_statistics is None:
gather_statistics = True
if split_row_groups is None:
split_row_groups = True
# Step 2: Construct necessary (parquet) partitioning information
partition_info = {"partitions": None, "partition_keys": {}, "partition_names": []}
fn_partitioned = False
if dataset.partitions is not None:
fn_partitioned = True
partition_info["partition_names"] = [
n for n in dataset.partitions.partition_names if n is not None
]
partition_info["partitions"] = dataset.partitions
for piece in dataset.pieces:
partition_info["partition_keys"][piece.path] = piece.partition_keys
# Step 3: Construct a single `metadata` object. We can
# directly use dataset.metadata if it is available.
# Otherwise, if `gather_statistics` or `split_row_groups`,
# we need to gether the footer metadata manually
metadata = None
if dataset.metadata:
# We have a _metadata file.
# PyArrow already did the work for us
schema = dataset.metadata.schema.to_arrow_schema()
if gather_statistics is None:
gather_statistics = True
if split_row_groups is None:
split_row_groups = True
return (
schema,
dataset.metadata,
base,
partition_info,
split_row_groups,
gather_statistics,
)
else:
# No _metadata file.
# May need to collect footer metadata manually
if dataset.schema is not None:
schema = dataset.schema.to_arrow_schema()
else:
schema = None
if gather_statistics is None:
gather_statistics = False
if split_row_groups is None:
split_row_groups = False
metadata = None
if not (split_row_groups or gather_statistics):
# Don't need to construct real metadata if
# we are not gathering statistics or splitting
# by row-group
metadata = [p.path for p in dataset.pieces]
if schema is None:
schema = dataset.pieces[0].get_metadata().schema.to_arrow_schema()
return (
schema,
metadata,
base,
partition_info,
split_row_groups,
gather_statistics,
)
# We have not detected a _metadata file, and the user has specified
# that they want to split by row-group and/or gather statistics.
# This is the only case where we MUST scan all files to collect
# metadata.
for piece, fn in zip(dataset.pieces, fns):
md = piece.get_metadata()
if schema is None:
schema = md.schema.to_arrow_schema()
if fn_partitioned:
md.set_file_path(piece.path.replace(base + fs.sep, ""))
elif fn:
md.set_file_path(fn)
if metadata:
_append_row_groups(metadata, md)
else:
metadata = md
return (
schema,
metadata,
base,
partition_info,
split_row_groups,
gather_statistics,
)
def _generate_dd_meta(schema, index, categories, partition_info):
partition_obj = partition_info["partitions"]
partitions = partition_info["partition_names"]
columns = None
has_pandas_metadata = schema.metadata is not None and b"pandas" in schema.metadata
if has_pandas_metadata:
pandas_metadata = json.loads(schema.metadata[b"pandas"].decode("utf8"))
(
index_names,
column_names,
storage_name_mapping,
column_index_names,
) = _parse_pandas_metadata(pandas_metadata)
if categories is None:
categories = []
for col in pandas_metadata["columns"]:
if (col["pandas_type"] == "categorical") and (
col["name"] not in categories
):
categories.append(col["name"])
else:
# No pandas metadata implies no index, unless selected by the user
index_names = []
column_names = schema.names
storage_name_mapping = {k: k for k in column_names}
column_index_names = [None]
if index is None and index_names:
index = index_names
if set(column_names).intersection(partitions):
raise ValueError(
"partition(s) should not exist in columns.\n"
"categories: {} | partitions: {}".format(column_names, partitions)
)
column_names, index_names = _normalize_index_columns(
columns, column_names + partitions, index, index_names
)
all_columns = index_names + column_names
# Check that categories are included in columns
if categories and not set(categories).intersection(all_columns):
raise ValueError(
"categories not in available columns.\n"
"categories: {} | columns: {}".format(categories, list(all_columns))
)
dtypes = _get_pyarrow_dtypes(schema, categories)
dtypes = {storage_name_mapping.get(k, k): v for k, v in dtypes.items()}
index_cols = index or ()
meta = _meta_from_dtypes(all_columns, dtypes, index_cols, column_index_names)
meta = clear_known_categories(meta, cols=categories)
if partition_obj:
for partition in partition_obj:
if isinstance(index, list) and partition.name == index[0]:
# Index from directory structure
meta.index = pd.CategoricalIndex(
categories=partition.keys, name=index[0]
)
elif partition.name == meta.index.name:
# Index created from a categorical column
meta.index = pd.CategoricalIndex(
categories=partition.keys, name=meta.index.name
)
elif partition.name in meta.columns:
meta[partition.name] = pd.Series(
pd.Categorical(categories=partition.keys, values=[]),
index=meta.index,
)
return meta, index_cols, categories, index
def _aggregate_stats(
file_path, file_row_group_stats, file_row_group_column_stats, stat_col_indices
):
"""Utility to aggregate the statistics for N row-groups
into a single dictionary.
"""
if len(file_row_group_stats) < 1:
# Empty statistics
return {}
elif len(file_row_group_column_stats) == 0:
assert len(file_row_group_stats) == 1
return file_row_group_stats[0]
else:
# Note: It would be better to avoid df_rgs and df_cols
# construction altogether. It makes it fast to aggregate
# the statistics for many row groups, but isn't
# worthwhile for a small number of row groups.
if len(file_row_group_stats) > 1:
df_rgs = pd.DataFrame(file_row_group_stats)
s = {
"file_path_0": file_path,
"num-rows": df_rgs["num-rows"].sum(),
"total_byte_size": df_rgs["total_byte_size"].sum(),
"columns": [],
}
else:
s = {
"file_path_0": file_path,
"num-rows": file_row_group_stats[0]["num-rows"],
"total_byte_size": file_row_group_stats[0]["total_byte_size"],
"columns": [],
}
df_cols = None
if len(file_row_group_column_stats) > 1:
df_cols = pd.DataFrame(file_row_group_column_stats)
for ind, name in enumerate(stat_col_indices):
i = ind * 3
if df_cols is None:
s["columns"].append(
{
"name": name,
"min": file_row_group_column_stats[0][i],
"max": file_row_group_column_stats[0][i + 1],
"null_count": file_row_group_column_stats[0][i + 2],
}
)
else:
s["columns"].append(
{
"name": name,
"min": df_cols.iloc[:, i].min(),
"max": df_cols.iloc[:, i + 1].max(),
"null_count": df_cols.iloc[:, i + 2].sum(),
}
)
return s
def _process_metadata(
metadata, single_rg_parts, gather_statistics, stat_col_indices, no_filters
):
# Get the number of row groups per file
file_row_groups = defaultdict(list)
file_row_group_stats = defaultdict(list)
file_row_group_column_stats = defaultdict(list)
cmax_last = {}
for rg in range(metadata.num_row_groups):
row_group = metadata.row_group(rg)
fpath = row_group.column(0).file_path
if fpath is None:
raise ValueError(
"Global metadata structure is missing a file_path string. "
"If the dataset includes a _metadata file, that file may "
"have one or more missing file_path fields."
)
if file_row_groups[fpath]:
file_row_groups[fpath].append(file_row_groups[fpath][-1] + 1)
else:
file_row_groups[fpath].append(0)
if gather_statistics:
if single_rg_parts:
s = {
"file_path_0": fpath,
"num-rows": row_group.num_rows,
"total_byte_size": row_group.total_byte_size,
"columns": [],
}
else:
s = {
"num-rows": row_group.num_rows,
"total_byte_size": row_group.total_byte_size,
}
cstats = []
for name, i in stat_col_indices.items():
column = row_group.column(i)
if column.statistics:
cmin = column.statistics.min
cmax = column.statistics.max
cnull = column.statistics.null_count
last = cmax_last.get(name, None)
if no_filters:
# Only think about bailing if we don't need
# stats for filtering
if cmin is None or (last and cmin < last):
# We are collecting statistics for divisions
# only (no filters) - Column isn't sorted, or
# we have an all-null partition, so lets bail.
#
# Note: This assumes ascending order.
#
gather_statistics = False
file_row_group_stats = {}
file_row_group_column_stats = {}
break
if single_rg_parts:
to_ts = column.statistics.logical_type.type == "TIMESTAMP"
s["columns"].append(
{
"name": name,
"min": cmin if not to_ts else pd.Timestamp(cmin),
"max": cmax if not to_ts else pd.Timestamp(cmax),
"null_count": cnull,
}
)
else:
cstats += [cmin, cmax, cnull]
cmax_last[name] = cmax
else:
if no_filters and column.num_values > 0:
# We are collecting statistics for divisions
# only (no filters) - Lets bail.
gather_statistics = False
file_row_group_stats = {}
file_row_group_column_stats = {}
break
if single_rg_parts:
s["columns"].append({"name": name})
else:
cstats += [None, None, None]
if gather_statistics:
file_row_group_stats[fpath].append(s)
if not single_rg_parts:
file_row_group_column_stats[fpath].append(tuple(cstats))
return (
file_row_groups,
file_row_group_stats,
file_row_group_column_stats,
gather_statistics,
)
def _construct_parts(
fs,
metadata,
schema,
filters,
index_cols,
data_path,
partition_info,
categories,
split_row_groups,
gather_statistics,
):
"""Construct ``parts`` for ddf construction
Use metadata (along with other data) to define a tuple
for each ddf partition. Also gather statistics if
``gather_statistics=True``, and other criteria is met.
"""
parts = []
stats = []
partition_keys = partition_info["partition_keys"]
partition_obj = partition_info["partitions"]
# Check if `metadata` is just a list of paths
# (not splitting by row-group or collecting statistics)
if isinstance(metadata, list) and isinstance(metadata[0], str):
for full_path in metadata:
part = {
"piece": (full_path, None, partition_keys.get(full_path, None)),
"kwargs": {"partitions": partition_obj, "categories": categories},
}
parts.append(part)
return parts, stats
# Determine which columns need statistics
flat_filters = (
set(flatten(tuple(flatten(filters, container=list)), container=tuple))
if filters
else []
)
stat_col_indices = {}
for i, name in enumerate(schema.names):
if name in index_cols or name in flat_filters:
stat_col_indices[name] = i
stat_cols = list(stat_col_indices.keys())
gather_statistics = gather_statistics and len(stat_cols) > 0
# Convert metadata into simple dictionary structures
(
file_row_groups,
file_row_group_stats,
file_row_group_column_stats,
gather_statistics,
) = _process_metadata(
metadata,
int(split_row_groups) == 1,
gather_statistics,
stat_col_indices,
flat_filters == [],
)
if split_row_groups:
# Create parts from each file,
# limiting the number of row_groups in each piece
split_row_groups = int(split_row_groups)
for filename, row_groups in file_row_groups.items():
row_group_count = len(row_groups)
for i in range(0, row_group_count, split_row_groups):
i_end = i + split_row_groups
rg_list = row_groups[i:i_end]
full_path = (
fs.sep.join([data_path, filename])
if filename != ""
else data_path # This is a single file
)
pkeys = partition_keys.get(full_path, None)
if partition_obj and pkeys is None:
continue # This partition was filtered
part = {
"piece": (full_path, rg_list, pkeys),
"kwargs": {
"partitions": partition_obj,
"categories": categories,
"filters": filters,
"schema": schema,
},
}
parts.append(part)
if gather_statistics:
stat = _aggregate_stats(
filename,
file_row_group_stats[filename][i:i_end],
file_row_group_column_stats[filename][i:i_end],
stat_col_indices,
)
stats.append(stat)
else:
for filename, row_groups in file_row_groups.items():
full_path = (
fs.sep.join([data_path, filename])
if filename != ""
else data_path # This is a single file
)
pkeys = partition_keys.get(full_path, None)
if partition_obj and pkeys is None:
continue # This partition was filtered
rgs = None
part = {
"piece": (full_path, rgs, pkeys),
"kwargs": {
"partitions": partition_obj,
"categories": categories,
"filters": filters,
"schema": schema,
},
}
parts.append(part)
if gather_statistics:
stat = _aggregate_stats(
filename,
file_row_group_stats[filename],
file_row_group_column_stats[filename],
stat_col_indices,
)
stats.append(stat)
return parts, stats
class ArrowEngine(Engine):
@classmethod
def read_metadata(
cls,
fs,
paths,
categories=None,
index=None,
gather_statistics=None,
filters=None,
split_row_groups=None,
**kwargs,
):
# Check if we are using pyarrow.dataset API
dataset_kwargs = kwargs.get("dataset", {})
# Gather necessary metadata information. This includes
# the schema and (parquet) partitioning information.
# This may also set split_row_groups and gather_statistics,
# depending on _metadata availability.
(
schema,
metadata,
base_path,
partition_info,
split_row_groups,
gather_statistics,
) = _gather_metadata(
paths, fs, split_row_groups, gather_statistics, filters, dataset_kwargs
)
# Process metadata to define `meta` and `index_cols`
meta, index_cols, categories, index = _generate_dd_meta(
schema, index, categories, partition_info
)
# Cannot gather_statistics if our `metadata` is a list
# of paths, or if we are building a multiindex (for now).
# We also don't "need" to gather statistics if we don't
# want to apply any filters or calculate divisions
if (isinstance(metadata, list) and isinstance(metadata[0], str)) or len(
index_cols
) > 1:
gather_statistics = False
elif filters is None and len(index_cols) == 0:
gather_statistics = False
# Make sure gather_statistics allows filtering
# (if filters are desired)
if filters:
# Filters may require us to gather statistics
if gather_statistics is False and partition_info["partition_names"]:
warnings.warn(
"Filtering with gather_statistics=False. "
"Only partition columns will be filtered correctly."
)
elif gather_statistics is False:
raise ValueError("Cannot apply filters with gather_statistics=False")
elif not gather_statistics:
gather_statistics = True
# Finally, construct our list of `parts`
# (and a corresponing list of statistics)
parts, stats = _construct_parts(
fs,
metadata,
schema,
filters,
index_cols,
base_path,
partition_info,
categories,
split_row_groups,
gather_statistics,
)
return (meta, stats, parts, index)
@classmethod
def read_partition(
cls,
fs,
piece,
columns,
index,
categories=(),
partitions=(),
filters=None,
schema=None,
**kwargs,
):
if isinstance(index, list):
for level in index:
# unclear if we can use set ops here. I think the order matters.
# Need the membership test to avoid duplicating index when
# we slice with `columns` later on.
if level not in columns:
columns.append(level)
# Ensure `columns` and `partitions` do not overlap
columns_and_parts = columns.copy()
if columns_and_parts and partitions:
for part_name in partitions.partition_names:
if part_name in columns:
columns.remove(part_name)
else:
columns_and_parts.append(part_name)
columns = columns or None
if isinstance(piece, str):
# `piece` is a file-path string
path = piece
row_group = None
partition_keys = None
else:
# `piece` contains (path, row_group, partition_keys)
(path, row_group, partition_keys) = piece
if not isinstance(row_group, list):
row_group = [row_group]
dfs = []
for rg in row_group:
piece = pq.ParquetDatasetPiece(
path,
row_group=rg,
partition_keys=partition_keys,
open_file_func=partial(fs.open, mode="rb"),
)
arrow_table = cls._parquet_piece_as_arrow(
piece, columns, partitions, **kwargs
)
df = cls._arrow_table_to_pandas(arrow_table, categories, **kwargs)
if len(row_group) > 1:
dfs.append(df)
if len(row_group) > 1:
df = pd.concat(dfs)
# Note that `to_pandas(ignore_metadata=False)` means
# pyarrow will use the pandas metadata to set the index.
index_in_columns_and_parts = set(df.index.names).issubset(
set(columns_and_parts)
)
if not index:
if index_in_columns_and_parts:
# User does not want to set index and a desired
# column/partition has been set to the index
df.reset_index(drop=False, inplace=True)
else:
# User does not want to set index and an
# "unwanted" column has been set to the index
df.reset_index(drop=True, inplace=True)
else:
if set(df.index.names) != set(index) and index_in_columns_and_parts:
# The wrong index has been set and it contains
# one or more desired columns/partitions
df.reset_index(drop=False, inplace=True)
elif index_in_columns_and_parts:
# The correct index has already been set
index = False
columns_and_parts = list(
set(columns_and_parts).difference(set(df.index.names))
)
df = df[list(columns_and_parts)]
if index:
df = df.set_index(index)
return df
@classmethod
def _arrow_table_to_pandas(
cls, arrow_table: pa.Table, categories, **kwargs
) -> pd.DataFrame:
_kwargs = kwargs.get("arrow_to_pandas", {})
_kwargs.update({"use_threads": False, "ignore_metadata": False})
return arrow_table.to_pandas(categories=categories, **_kwargs)
@classmethod
def _parquet_piece_as_arrow(
cls, piece: pq.ParquetDatasetPiece, columns, partitions, **kwargs
) -> pa.Table:
arrow_table = piece.read(
columns=columns,
partitions=partitions,
use_pandas_metadata=True,
use_threads=False,
**kwargs.get("read", {}),
)
return arrow_table
@staticmethod
def initialize_write(
df,
fs,
path,
append=False,
partition_on=None,
ignore_divisions=False,
division_info=None,
schema=None,
index_cols=None,
**kwargs,
):
# Infer schema if "infer"
# (also start with inferred schema if user passes a dict)
if schema == "infer" or isinstance(schema, dict):
# Start with schema from _meta_nonempty
_schema = pa.Schema.from_pandas(
df._meta_nonempty.set_index(index_cols)
if index_cols
else df._meta_nonempty
)
# Use dict to update our inferred schema
if isinstance(schema, dict):
schema = pa.schema(schema)
for name in schema.names:
i = _schema.get_field_index(name)
j = schema.get_field_index(name)
_schema = _schema.set(i, schema.field(j))
# If we have object columns, we need to sample partitions
# until we find non-null data for each column in `sample`
sample = [col for col in df.columns if df[col].dtype == "object"]
if schema_field_supported and sample and schema == "infer":
delayed_schema_from_pandas = delayed(pa.Schema.from_pandas)
for i in range(df.npartitions):
# Keep data on worker
_s = delayed_schema_from_pandas(
df[sample].to_delayed()[i]
).compute()
for name, typ in zip(_s.names, _s.types):
if typ != "null":
i = _schema.get_field_index(name)
j = _s.get_field_index(name)
_schema = _schema.set(i, _s.field(j))
sample.remove(name)
if not sample:
break
# Final (inferred) schema
schema = _schema
dataset = fmd = None
i_offset = 0
if append and division_info is None:
ignore_divisions = True
fs.mkdirs(path, exist_ok=True)
if append:
try:
# Allow append if the dataset exists.
# Also need dataset.metadata object if
# ignore_divisions is False (to check divisions)
dataset = pq.ParquetDataset(path, filesystem=fs)
if not dataset.metadata and not ignore_divisions:
# TODO: Be more flexible about existing metadata.
raise NotImplementedError(
"_metadata file needed to `append` "
"with `engine='pyarrow'` "
"unless `ignore_divisions` is `True`"
)
fmd = dataset.metadata
except (IOError, ValueError, IndexError):
# Original dataset does not exist - cannot append
append = False
if append:
names = dataset.metadata.schema.names
has_pandas_metadata = (
dataset.schema.to_arrow_schema().metadata is not None
and b"pandas" in dataset.schema.to_arrow_schema().metadata
)
if has_pandas_metadata:
pandas_metadata = json.loads(
dataset.schema.to_arrow_schema().metadata[b"pandas"].decode("utf8")
)
categories = [
c["name"]
for c in pandas_metadata["columns"]
if c["pandas_type"] == "categorical"
]
else:
categories = None
dtypes = _get_pyarrow_dtypes(dataset.schema.to_arrow_schema(), categories)
if set(names) != set(df.columns) - set(partition_on):
raise ValueError(
"Appended columns not the same.\n"
"Previous: {} | New: {}".format(names, list(df.columns))
)
elif ( | pd.Series(dtypes) | pandas.Series |
import argparse
import glob
import os
import pandas as pd
from tabulate import tabulate
from texttable import Texttable
from dante_tokenizer.data.load import read_test_data
from dante_tokenizer.data.preprocessing import reconstruct_html_chars, remove_quotes
from dante_tokenizer.evaluate import evaluate_dataset
from dante_tokenizer.tokenizer import (
predict_dante_tokenizer,
predict_nltk_twitter_tokenizer,
predict_nltk_word_tokenizer,
predict_spacy,
predict_twikenizer,
)
def main():
parser = argparse.ArgumentParser(
"Evaluate different tokenizers on Dante Dataset (Brazilian Stock-Market Tweets)"
)
parser.add_argument("csv_path", type=str, help="Path to the Dante dataset csv file")
parser.add_argument(
"conllu_path",
type=str,
help="Path to the conllu file containing tokenized sentences",
)
parser.add_argument(
"--debug",
default=False,
action="store_true",
help="Print detailed metrics and wrong sentence tokens",
)
parser.add_argument(
"--output_table",
default=False,
action="store_true",
help="Wheter to output latex code to put in your awesome paper or not.",
)
parser.add_argument(
"--save_results",
default=False,
action="store_true",
help="Save results on csv file",
)
args = parser.parse_args()
if os.path.isdir(args.conllu_path):
ids, sentences, true_tokens = [], [], []
for file_name in glob.glob(f"{args.conllu_path}/*.conllu"):
_ids, _sentences, _true_tokens = read_test_data(args.csv_path, file_name)
ids += _ids
sentences += _sentences
true_tokens += _true_tokens
else:
ids, sentences, true_tokens = read_test_data(args.csv_path, args.conllu_path)
# Preprocess input
sentences = list(map(remove_quotes, sentences))
sentences = list(map(reconstruct_html_chars, sentences))
tokenizers = [
("nltk Word Tokenizer", predict_nltk_word_tokenizer),
("nltk Twitter Tokenizer", predict_nltk_twitter_tokenizer),
("Twikenizer", predict_twikenizer),
("Spacy", predict_spacy),
("DANTE Tokenizer", predict_dante_tokenizer),
]
table = [["Tokenizer", "Precision", "Recall", "Micro F-score"]]
for name, tokenizer in tokenizers:
pred_tokens = tokenizer(sentences)
if not pred_tokens:
continue
precision, recall, f_score, extra_metrics = evaluate_dataset(
pred_tokens, true_tokens, complete_metrics=True
)
table.append(
[
name,
"{:.4f} ยฑ {:.4f}".format(precision[0], precision[1]),
"{:.4f} ยฑ {:.4f}".format(recall[0], recall[1]),
"{:.4f} ยฑ {:.4f}".format(f_score[0], f_score[1]),
]
)
if args.debug:
print(
(
f"{name} precision: {precision}, recall: {recall}, "
+ f"f_score: {f_score} "
+ f"true_positives: {extra_metrics['true_positives']} "
+ f"false_positives: {extra_metrics['false_positives']} "
+ f"false_negatives: {extra_metrics['false_negatives']} "
)
)
if args.debug:
for incorrect_sentences_id in extra_metrics["incorrect_sentences_ids"]:
print("*" * 20)
print(pred_tokens[incorrect_sentences_id])
print(true_tokens[incorrect_sentences_id])
print()
latex_table = Texttable()
latex_table.set_cols_align(["c"] * 4)
latex_table.set_deco(Texttable.HEADER | Texttable.VLINES)
latex_table.add_rows(table)
print(latex_table.draw())
if args.output_table:
print(tabulate(table, headers="firstrow", tablefmt="latex"))
if args.save_results:
results = | pd.DataFrame(table[1:], columns=table[0]) | pandas.DataFrame |
'''
#Step 3: Process all tweets and assign them โlabelโ
'''
import os
import pandas as pd
import re
import numpy as np
from poultryrate.data_model import data_model
class tweet_classifier():
tweet_summary = pd.DataFrame()
tweet_exploded = pd.DataFrame()
islamicmonths = | pd.DataFrame() | pandas.DataFrame |
import copy
import gc
import matplotlib.pyplot as plt
import numpy as np
import pandas as pd
from matplotlib.patches import ConnectionPatch
from protocol_analysis import visualization_protocols as vis
def pie_plot_percentage(party_dict: dict, title, save_name, name_dict, fig_dpi):
plt.figure(figsize=(16, 10))
plt.rcParams['font.size'] = 12
palette = plt.get_cmap('Set1')
labels = []
values = []
colors = []
index = 0
for party in party_dict:
current_value = party_dict[party].__len__()
labels.append(name_dict[party] + " - " + str(current_value))
values.append(current_value)
colors.append(palette(index))
if index == 4:
index += 3
index += 1
total_values = np.array(values).sum()
index_v = 0
for v in values:
percent = round(v / total_values * 100, 2)
labels[index_v] += " (" + str('%1.2f' % percent) + "%)"
index_v += 1
plt.pie(values, colors=colors, labels=values, autopct='%1.2f%%', counterclock=False)
t = plt.title(title)
t.set_ha("left")
plt.gca().axis("equal")
plt.legend(labels, loc="right", bbox_to_anchor=(0.9, 0.5), bbox_transform=plt.gcf().transFigure)
plt.subplots_adjust(left=0.2, bottom=0.1, right=0.7)
plt.savefig(save_name, dpi=fig_dpi, bbox_inches="tight")
print(" Plot saved to \"" + save_name + "\"")
plt.close()
def plot_dst(party_hosts_traffic: dict, title, save_name, party_bar_plot: list,
name_dict, third_party_color: list, host_name_too_long, fig_dpi,
empty_parties, patch_dict):
party_hosts_traffic = copy.deepcopy(party_hosts_traffic)
if empty_parties[0]:
plt.rcParams['font.size'] = 16
current = plt.figure(figsize=(20, 12))
sub1 = current.add_subplot(1, 2, 1)
sub3 = current.add_subplot(1, 2, 2)
sub2 = None
# move the white spaces between the bar and pie plots
current.subplots_adjust(wspace=-0.3)
elif empty_parties[1]:
plt.rcParams['font.size'] = 16
current = plt.figure(figsize=(20, 12))
sub1 = current.add_subplot(1, 2, 1)
sub2 = current.add_subplot(1, 2, 2)
sub3 = None
# move the white spaces between the bar and pie plots
current.subplots_adjust(wspace=-0.3)
else:
plt.rcParams['font.size'] = 12.5
current = plt.figure(figsize=(20, 10))
sub1 = current.add_subplot(1, 3, 2)
sub3 = current.add_subplot(1, 3, 3)
sub2 = current.add_subplot(1, 3, 1)
# move the white spaces between the bar and pie plots
current.subplots_adjust(wspace=-0.45)
# pie chart textprops={'fontsize': 18}
palette = plt.get_cmap('Set1')
labels = []
values = []
colors = []
num_labels = []
col_index = 0
for party in party_hosts_traffic:
labels.append(name_dict[party])
total_traffic = 0
all_hosts: dict = party_hosts_traffic[party]
all_hosts_len = all_hosts.__len__()
all_t = int(np.array(list(all_hosts.values())).sum())
other_h_t = 0
too_small_h = []
for host in all_hosts:
current_t = all_hosts[host]
total_traffic += current_t
if all_hosts_len > 20 and \
((party != "2.5" and current_t / all_t <= 0.002)
or (party == "2.5" and current_t / all_t <= 0.01)):
other_h_t += current_t
too_small_h.append(host)
if other_h_t > 0:
all_hosts[host_name_too_long[party]] = other_h_t
for h in too_small_h:
del all_hosts[h]
values.append(total_traffic)
colors.append(palette(col_index))
num_label = vis.network_traffic_units(total_traffic)
num_labels.append(num_label)
if col_index == 4:
col_index += 3
col_index += 1
values = np.array(values)
labels = np.char.array(labels)
por_cent = 100. * values / values.sum()
patches, texts = sub1.pie(values, colors=colors, counterclock=False, radius=1)
labels = ['{0} - {1:1.2f}%'.format(i, j) for i, j in zip(labels, por_cent)]
l_index = 0
while l_index < labels.__len__():
labels[l_index] += (" (" + num_labels[l_index] + ")")
l_index += 1
# move the position of pie plot labels
sub1.legend(patches, labels, loc='center left', bbox_to_anchor=(0.25, -0.04))
if sub2 is not None and sub3 is not None:
sub1.set_title(title, x=0.5, y=1.05)
plot_bar_attached(sub1=sub1,
sub2=sub3,
third_party_color=third_party_color[1],
party_bar_plot=party_bar_plot[1],
party_hosts_traffic=party_hosts_traffic,
values=values,
legend_pos=(0.9, 1.),
patch_dict=patch_dict,
on_left=False)
plot_bar_attached(sub1=sub1,
sub2=sub2,
third_party_color=third_party_color[0],
party_bar_plot=party_bar_plot[0],
party_hosts_traffic=party_hosts_traffic,
values=values,
legend_pos=(0.04, 1.),
patch_dict=patch_dict,
on_left=True)
elif sub3 is not None:
sub1.set_title(title, x=0.8, y=1.1)
plot_bar_attached(sub1=sub1,
sub2=sub3,
third_party_color=third_party_color[1],
party_bar_plot=party_bar_plot[1],
party_hosts_traffic=party_hosts_traffic,
values=values,
legend_pos=(0.92, 1.),
patch_dict=patch_dict,
on_left=False)
elif sub2 is not None:
sub1.set_title(title, x=0.8, y=1.1)
plot_bar_attached(sub1=sub1,
sub2=sub2,
third_party_color=third_party_color[0],
party_bar_plot=party_bar_plot[0],
party_hosts_traffic=party_hosts_traffic,
values=values,
legend_pos=(0.92, 1.),
patch_dict=patch_dict,
on_left=False)
current.savefig(save_name, dpi=fig_dpi)
print(" Plot saved to \"" + save_name + "\"")
plt.close(current)
gc.collect()
def plot_bar_attached(sub1, sub2, third_party_color,
party_bar_plot, party_hosts_traffic,
values, legend_pos, patch_dict, on_left):
# bar for second party
x_pos = 0
bottom = 0
values_sub1: dict = {}
width = 0.2
colors_sub = []
sub_palette = plt.get_cmap(third_party_color)
this_party = patch_dict[party_bar_plot]
all_hosts: dict = party_hosts_traffic[this_party]
if all_hosts.__len__() >= 20:
sub_index = 40
else:
sub_index = 60
df = | pd.DataFrame.from_dict(all_hosts, orient='index') | pandas.DataFrame.from_dict |
Subsets and Splits