prompt
stringlengths 19
1.03M
| completion
stringlengths 4
2.12k
| api
stringlengths 8
90
|
---|---|---|
'''
Copyright 2022 Airbus SAS
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
'''
from numpy import array
'''
mode: python; py-indent-offset: 4; tab-width: 4; coding: utf-8
'''
import unittest
import pandas as pd
import numpy as np
from pandas.testing import assert_frame_equal
from sos_trades_core.execution_engine.execution_engine import ExecutionEngine
from sos_trades_core.sos_wrapping.analysis_discs.grid_search_eval import GridSearchEval
from sos_trades_core.execution_engine.sos_coupling import SoSCoupling
class TestGridSearchEval(unittest.TestCase):
"""
SoSGridSearchEval test class
"""
def setUp(self):
'''
Initialize third data needed for testing
'''
pd.set_option('display.max_columns', 10)
self.namespace = 'MyCase'
self.study_name = f'{self.namespace}'
self.repo = 'sos_trades_core.sos_processes.test'
self.base_path = 'sos_trades_core.sos_wrapping.test_discs'
self.exec_eng = ExecutionEngine(self.namespace)
self.factory = self.exec_eng.factory
self.grid_search = 'GridSearch'
self.proc_name = 'test_grid_search'
def test_01_grid_search_eval(self):
sa_builder = self.exec_eng.factory.get_builder_from_process(
self.repo, self.proc_name)
self.exec_eng.factory.set_builders_to_coupling_builder(
sa_builder)
self.exec_eng.configure()
self.exec_eng.display_treeview_nodes()
print('Study first configure!')
self.exec_eng.dm.get_value(
f'{self.study_name}.{self.grid_search}.eval_inputs')
# self.exec_eng.dm.get_data('MyCase.GridSearch.eval_inputs')[
# 'possible_values']
# dict_values = {}
# self.exec_eng.load_study_from_input_dict(dict_values)
eval_inputs = self.exec_eng.dm.get_value(
f'{self.study_name}.{self.grid_search}.eval_inputs')
eval_inputs.loc[eval_inputs['full_name'] ==
f'{self.grid_search}.Disc1.x', ['selected_input']] = True
eval_inputs.loc[eval_inputs['full_name'] ==
f'{self.grid_search}.Disc1.j', ['selected_input']] = True
eval_outputs = self.exec_eng.dm.get_value(
f'{self.study_name}.{self.grid_search}.eval_outputs')
eval_outputs.loc[eval_outputs['full_name'] ==
f'{self.grid_search}.Disc1.y', ['selected_output']] = True
dict_values = {
# GRID SEARCH INPUTS
f'{self.study_name}.{self.grid_search}.eval_inputs': eval_inputs,
f'{self.study_name}.{self.grid_search}.eval_outputs': eval_outputs,
# DISC1 INPUTS
f'{self.study_name}.{self.grid_search}.Disc1.name': 'A1',
f'{self.study_name}.{self.grid_search}.Disc1.a': 20,
f'{self.study_name}.{self.grid_search}.Disc1.b': 2,
f'{self.study_name}.{self.grid_search}.Disc1.x': 3.,
f'{self.study_name}.{self.grid_search}.Disc1.d': 3.,
f'{self.study_name}.{self.grid_search}.Disc1.f': 3.,
f'{self.study_name}.{self.grid_search}.Disc1.g': 3.,
f'{self.study_name}.{self.grid_search}.Disc1.h': 3.,
f'{self.study_name}.{self.grid_search}.Disc1.j': 3.,
}
self.exec_eng.load_study_from_input_dict(dict_values)
ds = self.exec_eng.dm.get_value(
f'{self.study_name}.{self.grid_search}.design_space')
print(f'Second configure with design_space creation: \n {ds}')
self.exec_eng.execute()
grid_search_disc = self.exec_eng.dm.get_disciplines_with_name(
f'{self.study_name}.{self.grid_search}')[0]
grid_search_disc_output = grid_search_disc.get_sosdisc_outputs()
doe_disc_samples = grid_search_disc_output['samples_inputs_df']
y_dict = grid_search_disc_output['GridSearch.Disc1.y_dict']
ds = self.exec_eng.dm.get_value(
f'{self.study_name}.{self.grid_search}.design_space')
print(f'Study executed from the design_space: \n {ds}')
print(f'Study executed with the samples: \n {doe_disc_samples}')
print(f'Study generated the output: y_dict \n {y_dict}')
dspace = pd.DataFrame({
'shortest_name': ['x', 'j'],
'lower_bnd': [5., 20.],
'upper_bnd': [7., 25.],
'nb_points': [3, 3],
'full_name': ['GridSearch.Disc1.x', 'GridSearch.Disc1.j'],
})
dict_values = {
f'{self.study_name}.{self.grid_search}.design_space': dspace,
}
self.exec_eng.load_study_from_input_dict(dict_values)
self.exec_eng.configure()
self.exec_eng.execute()
grid_search_disc_output = grid_search_disc.get_sosdisc_outputs()
doe_disc_samples = grid_search_disc_output['samples_inputs_df']
y_dict = grid_search_disc_output['GridSearch.Disc1.y_dict']
# CHECK THE GRIDSEARCH OUTPUTS
doe_disc_samples_ref = pd.DataFrame({'scenario': [
'scenario_1', 'scenario_2', 'scenario_3'], 'GridSearch.Disc1.x': [5.0, 6.0, 7.0]})
y_dict_ref = {'scenario_1': 102.0,
'scenario_2': 122.0, 'scenario_3': 142.0}
# assert_frame_equal(doe_disc_samples, doe_disc_samples_ref)
# assert y_dict_ref == y_dict
ds = self.exec_eng.dm.get_value(
f'{self.study_name}.{self.grid_search}.design_space')
print(f'Study executed from the design_space: \n {ds}')
print(f'Study executed with the samples: \n {doe_disc_samples}')
print(f'Study generated the output: y_dict \n {y_dict}')
# TEST FOR 6 INPUTS
eval_inputs = self.exec_eng.dm.get_value(
f'{self.study_name}.{self.grid_search}.eval_inputs')
eval_inputs.loc[eval_inputs['full_name'] ==
f'{self.grid_search}.Disc1.x', ['selected_input']] = True
eval_inputs.loc[eval_inputs['full_name'] ==
f'{self.grid_search}.Disc1.f', ['selected_input']] = True
eval_inputs.loc[eval_inputs['full_name'] ==
f'{self.grid_search}.Disc1.g', ['selected_input']] = True
eval_inputs.loc[eval_inputs['full_name'] ==
f'{self.grid_search}.Disc1.h', ['selected_input']] = True
eval_inputs.loc[eval_inputs['full_name'] ==
f'{self.grid_search}.Disc1.j', ['selected_input']] = True
eval_inputs.loc[eval_inputs['full_name'] ==
f'{self.grid_search}.Disc1.d', ['selected_input']] = True
dict_values = {
f'{self.study_name}.{self.grid_search}.eval_inputs': eval_inputs,
f'{self.study_name}.{self.grid_search}.eval_outputs': eval_outputs,
}
self.exec_eng.load_study_from_input_dict(dict_values)
ds = self.exec_eng.dm.get_value(
f'{self.study_name}.{self.grid_search}.design_space')
print(f'Second configure with design_space creation: \n {ds}')
self.exec_eng.execute()
grid_search_disc = self.exec_eng.dm.get_disciplines_with_name(
f'{self.study_name}.{self.grid_search}')[0]
grid_search_disc_output = grid_search_disc.get_sosdisc_outputs()
doe_disc_samples = grid_search_disc_output['samples_inputs_df']
y_dict = grid_search_disc_output['GridSearch.Disc1.y_dict']
ds = self.exec_eng.dm.get_value(
f'{self.study_name}.{self.grid_search}.design_space')
print(f'Study executed from the design_space: \n {ds}')
print(f'Study executed with the samples: \n {doe_disc_samples}')
print(f'Study generated the output: y_dict \n {y_dict}')
# CHANGE THE SELECTED INPUTS TO 2
eval_inputs = self.exec_eng.dm.get_value(
f'{self.study_name}.{self.grid_search}.eval_inputs')
eval_inputs.loc[eval_inputs['full_name'] ==
f'{self.grid_search}.Disc1.x', ['selected_input']] = False
eval_inputs.loc[eval_inputs['full_name'] ==
f'{self.grid_search}.Disc1.f', ['selected_input']] = True
eval_inputs.loc[eval_inputs['full_name'] ==
f'{self.grid_search}.Disc1.g', ['selected_input']] = True
eval_inputs.loc[eval_inputs['full_name'] ==
f'{self.grid_search}.Disc1.h', ['selected_input']] = True
eval_inputs.loc[eval_inputs['full_name'] ==
f'{self.grid_search}.Disc1.j', ['selected_input']] = False
eval_inputs.loc[eval_inputs['full_name'] ==
f'{self.grid_search}.Disc1.d', ['selected_input']] = False
dict_values = {
f'{self.study_name}.{self.grid_search}.eval_inputs': eval_inputs,
f'{self.study_name}.{self.grid_search}.eval_outputs': eval_outputs,
}
self.exec_eng.load_study_from_input_dict(dict_values)
self.exec_eng.dm.get_value(['MyCase.GridSearch.eval_inputs'][0])
ds = self.exec_eng.dm.get_value(
f'{self.study_name}.{self.grid_search}.design_space')
print(f'Second configure with design_space creation: \n {ds}')
self.exec_eng.execute()
self.exec_eng.dm.get_value(['MyCase.GridSearch.eval_inputs'][0])
def test_02_grid_search_shortest_name(self):
sa_builder = self.exec_eng.factory.get_builder_from_process(
self.repo, self.proc_name)
self.exec_eng.factory.set_builders_to_coupling_builder(
sa_builder)
self.exec_eng.configure()
self.exec_eng.display_treeview_nodes()
print('Study first configure!')
grid_search_disc = self.exec_eng.dm.get_disciplines_with_name(
f'{self.study_name}.{self.grid_search}')[0]
list = ['GridSearch.Disc1.d', 'GridSearch.Disc1.f', 'GridSearch.Disc1.g',
'GridSearch.Disc1.h', 'GridSearch.Disc1.j', 'GridSearch.Disc1.x',
'GridSearch.Disc2.d', 'GridSearch.Nana.Disc1.d', 'GridSearch.Nana.Disc2.d']
shortest_list = grid_search_disc.generate_shortest_name(list)
def test_03_postproc_check(self):
sa_builder = self.exec_eng.factory.get_builder_from_process(
self.repo, self.proc_name)
self.exec_eng.factory.set_builders_to_coupling_builder(
sa_builder)
self.exec_eng.configure()
self.exec_eng.display_treeview_nodes()
# print('Study first configure!')
self.exec_eng.dm.get_value(
f'{self.study_name}.{self.grid_search}.eval_inputs')
# self.exec_eng.dm.get_data('MyCase.GridSearch.eval_inputs')[
# 'possible_values']
# dict_values = {}
# self.exec_eng.load_study_from_input_dict(dict_values)
eval_inputs = self.exec_eng.dm.get_value(
f'{self.study_name}.{self.grid_search}.eval_inputs')
eval_inputs.loc[eval_inputs['full_name'] ==
f'{self.grid_search}.Disc1.f', ['selected_input']] = True
eval_inputs.loc[eval_inputs['full_name'] ==
f'{self.grid_search}.Disc1.g', ['selected_input']] = True
eval_inputs.loc[eval_inputs['full_name'] ==
f'{self.grid_search}.Disc1.h', ['selected_input']] = True
eval_outputs = self.exec_eng.dm.get_value(
f'{self.study_name}.{self.grid_search}.eval_outputs')
eval_outputs.loc[eval_outputs['full_name'] ==
f'{self.grid_search}.Disc1.y', ['selected_output']] = True
dict_values = {
# GRID SEARCH INPUTS
f'{self.study_name}.{self.grid_search}.eval_inputs': eval_inputs,
f'{self.study_name}.{self.grid_search}.eval_outputs': eval_outputs,
# DISC1 INPUTS
f'{self.study_name}.{self.grid_search}.Disc1.name': 'A1',
f'{self.study_name}.{self.grid_search}.Disc1.a': 20,
f'{self.study_name}.{self.grid_search}.Disc1.b': 2,
f'{self.study_name}.{self.grid_search}.Disc1.x': 3.,
f'{self.study_name}.{self.grid_search}.Disc1.d': 3.,
f'{self.study_name}.{self.grid_search}.Disc1.f': 3.,
f'{self.study_name}.{self.grid_search}.Disc1.g': 3.,
f'{self.study_name}.{self.grid_search}.Disc1.h': 3.,
f'{self.study_name}.{self.grid_search}.Disc1.j': 3.,
}
self.exec_eng.load_study_from_input_dict(dict_values)
ds = self.exec_eng.dm.get_value(
f'{self.study_name}.{self.grid_search}.design_space')
print(f'Second configure with design_space creation: \n {ds}')
self.exec_eng.execute()
grid_search_disc = self.exec_eng.dm.get_disciplines_with_name(
f'{self.study_name}.{self.grid_search}')[0]
grid_search_disc_output = grid_search_disc.get_sosdisc_outputs()
doe_disc_samples = grid_search_disc_output['samples_inputs_df']
y_dict = grid_search_disc_output['GridSearch.Disc1.y_dict']
ds = self.exec_eng.dm.get_value(
f'{self.study_name}.{self.grid_search}.design_space')
# yy_dict={'scenario_1':12.0,
# 'scenario_2':25.0,
# 'scenario_3':56.0,
# 'scenario_4':48.0,
# 'scenario_5':19.0,
# 'scenario_6':55.0,
# 'scenario_7':27.0,
# 'scenario_8':32.0,
# 'reference':45.0,}
# new_gsoutputs_dict={'doe_samples_dataframe':doe_disc_samples,
# 'GridSearch.Disc1.y_dict':yy_dict}
# grid_search_disc.store_sos_outputs_values(
# new_gsoutputs_dict, update_dm=True
# )
# dspace = pd.DataFrame({
# 'shortest_name': ['f','g', 'h'],
# 'lower_bnd': [5., 20.,1.],
# 'upper_bnd': [7., 25.,2.],
# 'nb_points': [3, 3, 3],
# 'full_name': ['GridSearch.Disc1.f', 'GridSearch.Disc1.g','GridSearch.Disc1.h'],
# })
# dict_values = {
# f'{self.study_name}.{self.grid_search}.design_space': dspace,
# }
# self.exec_eng.load_study_from_input_dict(dict_values)
# self.exec_eng.configure()
# self.exec_eng.execute()
ds = self.exec_eng.dm.get_value(
f'{self.study_name}.{self.grid_search}.design_space')
filter = grid_search_disc.get_chart_filter_list()
graph_list = grid_search_disc.get_post_processing_list(filter)
# for graph in graph_list:
# # pass
# graph.to_plotly().show()
def test_04_grid_search_status(self):
""" This tests aims at proving the ability of grid search
discipline to have the proper status after it has run
"""
self.ns = f'{self.study_name}'
exec_eng = ExecutionEngine(self.study_name)
factory = exec_eng.factory
proc_name = "test_sellar_grid_search"
grid_search_builder = factory.get_builder_from_process(repo=self.repo,
mod_id=proc_name)
exec_eng.factory.set_builders_to_coupling_builder(
grid_search_builder)
exec_eng.configure()
exp_tv_list = [f'Nodes representation for Treeview {self.ns}',
'|_ MyCase',
f'\t|_ GridSearch',
'\t\t|_ Sellar_2',
'\t\t|_ Sellar_1',
'\t\t|_ Sellar_Problem']
exp_tv_str = '\n'.join(exp_tv_list)
exec_eng.display_treeview_nodes(True)
assert exp_tv_str == exec_eng.display_treeview_nodes()
# -- set up disciplines
values_dict = {}
values_dict[f'{self.ns}.x'] = 1.
values_dict[f'{self.ns}.y_1'] = 1.
values_dict[f'{self.ns}.y_2'] = 1.
values_dict[f'{self.ns}.z'] = array([1., 1.])
values_dict[f'{self.ns}.GridSearch.Sellar_Problem.local_dv'] = 10
exec_eng.load_study_from_input_dict(values_dict)
dspace = pd.DataFrame({'variable': ['x'],
'lower_bnd': [20.],
'upper_bnd': [25.],
'nb_points': [3],
})
dspace = pd.DataFrame(dspace)
input_selection_x = {'selected_input': [False, True, False, False, False],
'full_name': ['GridSearch.Sellar_Problem.local_dv', 'x', 'y_1',
'y_2',
'z'],
'shortest_name': ['local_dv', 'x', 'y_1',
'y_2',
'z']}
input_selection_x = | pd.DataFrame(input_selection_x) | pandas.DataFrame |
import argparse
import json
import pandas as pd
import numpy as np
parser = argparse.ArgumentParser(description="Parse the tabular data from Mturk and save to csv.")
parser.add_argument("json", type=str, help="path to json file")
parser.add_argument("--save_path", type=str, default="./")
args = parser.parse_args()
results = {}
with open(args.json, "r") as f:
for line in f:
if len(line) < 3:
break
elif line[-1] != "\n": # for final line
line += "\n"
answers = eval(line[:-1])["taskAnswers"]
answers = eval(answers.replace("true", "True").replace("false", "False"))[0]
res = []
for key in answers.keys():
reasonable = key[0] == "r"
if reasonable:
idx = int(key.split("_")[1][3:])
k = int(key.split("_")[2][1])
is_reasonable = answers[key]["reasonable"]
if (idx, k) in results:
results[(idx, k)].append(is_reasonable)
else:
results[(idx, k)] = [is_reasonable]
save_name = args.save_path + args.json.split("/")[-2] + ".tsv"
df = | pd.DataFrame({"results": results}) | pandas.DataFrame |
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Thu Feb 22 17:28:54 2018
@author: galengao
This is the original analysis code as it exists in the environment where it was writen and initially run.
Portions and modifications of this script constitute all other .py scripts in this directory.
"""
import numpy as np
import pandas as pd
from collections import Counter
import matplotlib.pyplot as plt
import seaborn as sns
### Helper Function to Load in the Data ###
def load_data(coh, thresh=False):
"""Load in the hg38 and hg19 gistic thresholded data. Assume GISTIC runs
for each tumor type live in a parent directory (hg38_gistic or hg19_gistic)
one level up from this script."""
if thresh:
hg38 = '../hg38_gistic/'+coh+'/all_thresholded.by_genes.txt'
hg19 = '../hg19_gistic/'+coh+'/all_thresholded.by_genes.txt'
hg38drops = ['Cytoband', 'Locus ID']
else:
hg38 = '../hg38_gistic/'+coh+'/all_data_by_genes.txt'
hg19 = '../hg19_gistic/'+coh+'/all_data_by_genes.txt'
hg38drops = ['Cytoband', 'Gene ID']
df_hg19 = pd.read_table(hg19, index_col=[0]).drop(['Cytoband', 'Locus ID'], axis=1)
df_hg38 = pd.read_table(hg38, index_col=[0]).drop(hg38drops, axis=1)
same_samps = list(set(df_hg38.columns) & set(df_hg19.columns))
same_genes = list(set(df_hg38.index) & set(df_hg19.index))
print(coh, len(same_genes), len(same_samps))
return df_hg38[same_samps].T[same_genes], df_hg19[same_samps].T[same_genes]
return df_hg38, df_hg19
### Raw Copy Number Values Analysis Code ###
def raw_value_comparison(coh, plot=False):
"""Return the average differences in raw copy number values between the
gene-level calls in hg19 and hg38 for each gene for a given tumor type
'coh.' If plot=True, plot the genes' differences in a histogram."""
# load in the data
df_38, df_19 = load_data(coh, thresh=False)
# compute average sample-by-sample differences for each gene
df_s = df_38 - df_19
avg_diff = {g:np.average(df_s[g]) for g in df_s.columns.get_level_values('Gene Symbol')}
# take note of which genes are altered more than our threshold of 4*std
results = []
std = np.std([avg_diff[x] for x in avg_diff])
for g in avg_diff:
if avg_diff[g] > 4 * std:
results.append([coh, 'Pos', g, avg_diff[g]])
elif avg_diff[g] < -4 * std:
results.append([coh, 'Neg', g, avg_diff[g]])
if plot:
plt.hist([avg_diff[x] for x in avg_diff], bins=1000)
plt.title(coh, fontsize=16)
plt.xlabel('Average CN Difference Between Alignments', fontsize=14)
plt.ylabel('Genes', fontsize=14)
sns.despine()
plt.savefig('./genehists/'+coh+'_genehist.pdf')
plt.savefig('./genehists/'+coh+'_genehist.png')
plt.clf()
return results
def sequential_cohort_test_raw_values(cohs, plot=False):
"""Sequentially compare raw gene-level calls for the given tumor types."""
c_results = []
for coh in cohs: # perform raw value comparison for each cohort
c_results += raw_value_comparison(coh, plot=plot)
# compile results together
df_r = pd.DataFrame(c_results, columns=['Cohort', 'Direction', 'Gene', 'Difference'])
gcount = Counter(df_r['Gene'])
pos_gcount = Counter(df_r[df_r['Direction']=='Pos']['Gene'])
neg_gcount = Counter(df_r[df_r['Direction']=='Neg']['Gene'])
df = pd.DataFrame([gcount[x] for x in gcount], index=gcount.keys(), columns=['Count'])
df['Count_pos'] = [pos_gcount[x] if x in pos_gcount else 0 for x in gcount]
df['Count_neg'] = [neg_gcount[x] if x in neg_gcount else 0 for x in gcount]
if plot: # write output
plt.plot(np.sort([gcount[x] for x in gcount])[::-1], 'b-')
plt.xlabel('Gene by Rank', fontsize=16)
plt.ylabel('Number of Occurences', fontsize=16)
sns.despine()
plt.savefig('GeneDevianceDropoff.pdf')
plt.savefig('GeneDevianceDropoff.png')
df_r.to_csv('./genehists/LargestDifferences.tsv', sep='\t', index=False)
df.to_csv('./genehists/LargestDifferenceGenes_ByCount.tsv', sep='\t', index=True)
### Thresholded Copy Number Values Analysis Code ###
def thresholded_value_comparison(df_hg38, df_hg19, metric='hamming'):
"""Compare -2,-1,0,1,2 gene-level thresholded calls. metric can be either
hamming (number of discrepancies in each gene) or manhattan (sum of
'distances' between each gene so a 1 to -1 change is 2). Returns a vector
of each gene's metric."""
out = []
for i, g in enumerate(df_hg38.columns):
if metric == 'hamming':
out.append(sum(df_hg19[g] != df_hg38[g])/len(df_hg19))
elif metric == 'manhattan':
out.append(sum(abs((df_hg19[g] - df_hg38[g]))))
return pd.DataFrame(out, index=df_hg38.columns)
def sequential_cohort_test_thresholded_values(cohs):
"""Compare thresholded gene-level calls for input tumor types."""
df_out = | pd.DataFrame([]) | pandas.DataFrame |
"""
MIT License
Copyright (c) 2019 <NAME>
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in all
copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
SOFTWARE.
"""
import numpy as np
import pandas as pd
from sklearn.inspection import partial_dependence
from sklearn.neighbors import KNeighborsRegressor
from sklearn.linear_model import LinearRegression
from sklearn.ensemble import RandomForestRegressor
from sklearn.model_selection import train_test_split
from sklearn.model_selection import GridSearchCV
from sklearn.preprocessing import StandardScaler
from sklearn.metrics import r2_score
from sklearn import svm
from sklearn.datasets import load_boston
from articles.pd.support import load_rent, load_bulldozer, load_flights, \
toy_weather_data, toy_weight_data, \
df_cat_to_catcode, df_split_dates, \
df_string_to_cat, synthetic_interaction_data
from stratx import plot_stratpd, plot_catstratpd, \
plot_stratpd_gridsearch, plot_catstratpd_gridsearch
from stratx.partdep import partial_dependence
from stratx.plot import marginal_plot_, plot_ice, plot_catice
from stratx.ice import predict_ice, predict_catice, friedman_partial_dependence
import inspect
import matplotlib.patches as mpatches
from collections import OrderedDict
import matplotlib.pyplot as plt
import os
import shap
import xgboost as xgb
from colour import rgb2hex, Color
from dtreeviz.trees import tree, ShadowDecTree
figsize = (2.5, 2)
figsize2 = (3.8, 3.2)
GREY = '#444443'
# This genfigs.py code is just demonstration code to generate figures for the paper.
# There are lots of programming sins committed here; to not take this to be
# our idea of good code. ;)
# For data sources, please see notebooks/examples.ipynb
def addnoise(df, n=1, c=0.5, prefix=''):
if n == 1:
df[f'{prefix}noise'] = np.random.random(len(df)) * c
return
for i in range(n):
df[f'{prefix}noise{i + 1}'] = np.random.random(len(df)) * c
def fix_missing_num(df, colname):
df[colname + '_na'] = pd.isnull(df[colname])
df[colname].fillna(df[colname].median(), inplace=True)
def savefig(filename, pad=0):
plt.tight_layout(pad=pad, w_pad=0, h_pad=0)
plt.savefig(f"images/{filename}.pdf", bbox_inches="tight", pad_inches=0)
# plt.savefig(f"images/{filename}.png", dpi=150)
plt.tight_layout()
plt.show()
plt.close()
def rent():
print(f"----------- {inspect.stack()[0][3]} -----------")
np.random.seed(1) # pick seed for reproducible article images
X,y = load_rent(n=10_000)
df_rent = X.copy()
df_rent['price'] = y
colname = 'bedrooms'
colname = 'bathrooms'
TUNE_RF = False
TUNE_XGB = False
X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.2)
if TUNE_RF:
rf, bestparams = tune_RF(X, y) # does CV on entire data set to tune
# bedrooms
# RF best: {'max_features': 0.3, 'min_samples_leaf': 1, 'n_estimators': 125}
# validation R^2 0.7873724127323822
# bathrooms
# RF best: {'max_features': 0.3, 'min_samples_leaf': 1, 'n_estimators': 200}
# validation R^2 0.8066593395345907
else:
rf = RandomForestRegressor(n_estimators=200, min_samples_leaf=1, max_features=.3,
oob_score=True, n_jobs=-1)
rf.fit(X_train, y_train) # Use training set for plotting
print("RF OOB R^2", rf.oob_score_)
rf_score = rf.score(X_test, y_test)
print("RF validation R^2", rf_score)
if TUNE_XGB:
tuned_parameters = {'n_estimators': [400, 450, 500, 600, 1000],
'learning_rate': [0.008, 0.01, 0.02, 0.05, 0.08, 0.1, 0.11],
'max_depth': [3, 4, 5, 6, 7, 8, 9]}
grid = GridSearchCV(
xgb.XGBRegressor(), tuned_parameters, scoring='r2',
cv=5,
n_jobs=-1,
verbose=2
)
grid.fit(X, y) # does CV on entire data set to tune
print("XGB best:", grid.best_params_)
b = grid.best_estimator_
# bedrooms
# XGB best: {'max_depth': 7, 'n_estimators': 250}
# XGB validation R^2 0.7945797751555217
# bathrooms
# XGB best: {'learning_rate': 0.11, 'max_depth': 6, 'n_estimators': 1000}
# XGB train R^2 0.9834399795800324
# XGB validation R^2 0.8244958014380593
else:
b = xgb.XGBRegressor(n_estimators=1000,
max_depth=6,
learning_rate=.11,
verbose=2,
n_jobs=8)
b.fit(X_train, y_train)
xgb_score = b.score(X_test, y_test)
print("XGB validation R^2", xgb_score)
lm = LinearRegression()
lm.fit(X_train, y_train)
lm_score = lm.score(X_test, y_test)
print("OLS validation R^2", lm_score)
lm.fit(X, y)
model, r2_keras = rent_deep_learning_model(X_train, y_train, X_test, y_test)
fig, axes = plt.subplots(1, 6, figsize=(10, 1.8),
gridspec_kw = {'wspace':0.15})
for i in range(len(axes)):
axes[i].set_xlim(0-.3,4+.3)
axes[i].set_xticks([0,1,2,3,4])
axes[i].set_ylim(1800, 9000)
axes[i].set_yticks([2000,4000,6000,8000])
axes[1].get_yaxis().set_visible(False)
axes[2].get_yaxis().set_visible(False)
axes[3].get_yaxis().set_visible(False)
axes[4].get_yaxis().set_visible(False)
axes[0].set_title("(a) Marginal", fontsize=10)
axes[1].set_title("(b) RF", fontsize=10)
axes[1].text(2,8000, f"$R^2=${rf_score:.3f}", horizontalalignment='center', fontsize=9)
axes[2].set_title("(c) XGBoost", fontsize=10)
axes[2].text(2,8000, f"$R^2=${xgb_score:.3f}", horizontalalignment='center', fontsize=9)
axes[3].set_title("(d) OLS", fontsize=10)
axes[3].text(2,8000, f"$R^2=${lm_score:.3f}", horizontalalignment='center', fontsize=9)
axes[4].set_title("(e) Keras", fontsize=10)
axes[4].text(2,8000, f"$R^2=${r2_keras:.3f}", horizontalalignment='center', fontsize=9)
axes[5].set_title("(f) StratPD", fontsize=10)
avg_per_baths = df_rent.groupby(colname).mean()['price']
axes[0].scatter(df_rent[colname], df_rent['price'], alpha=0.07, s=5)
axes[0].scatter(np.unique(df_rent[colname]), avg_per_baths, s=6, c='black',
label="average price/{colname}")
axes[0].set_ylabel("price") # , fontsize=12)
axes[0].set_xlabel("bathrooms")
axes[0].spines['right'].set_visible(False)
axes[0].spines['top'].set_visible(False)
ice = predict_ice(rf, X, colname, 'price', numx=30, nlines=100)
plot_ice(ice, colname, 'price', alpha=.3, ax=axes[1], show_xlabel=True,
show_ylabel=False)
ice = predict_ice(b, X, colname, 'price', numx=30, nlines=100)
plot_ice(ice, colname, 'price', alpha=.3, ax=axes[2], show_ylabel=False)
ice = predict_ice(lm, X, colname, 'price', numx=30, nlines=100)
plot_ice(ice, colname, 'price', alpha=.3, ax=axes[3], show_ylabel=False)
scaler = StandardScaler()
X_train_ = pd.DataFrame(scaler.fit_transform(X_train), columns=X_train.columns)
# y_pred = model.predict(X_)
# print("Keras training R^2", r2_score(y, y_pred)) # y_test in y
ice = predict_ice(model, X_train_, colname, 'price', numx=30, nlines=100)
# replace normalized unique X with unnormalized
ice.iloc[0, :] = np.linspace(np.min(X_train[colname]), np.max(X_train[colname]), 30, endpoint=True)
plot_ice(ice, colname, 'price', alpha=.3, ax=axes[4], show_ylabel=True)
pdpx, pdpy, ignored = \
plot_stratpd(X, y, colname, 'price', ax=axes[5],
pdp_marker_size=6,
show_x_counts=False,
hide_top_right_axes=False,
show_xlabel=True, show_ylabel=False)
print(f"StratPD ignored {ignored} records")
axes[5].yaxis.tick_right()
axes[5].yaxis.set_label_position('right')
axes[5].set_ylim(-250,2250)
axes[5].set_yticks([0,1000,2000])
axes[5].set_ylabel("price")
savefig(f"{colname}_vs_price")
def tune_RF(X, y, verbose=2):
tuned_parameters = {'n_estimators': [50, 100, 125, 150, 200],
'min_samples_leaf': [1, 3, 5, 7],
'max_features': [.1, .3, .5, .7, .9]}
grid = GridSearchCV(
RandomForestRegressor(), tuned_parameters, scoring='r2',
cv=5,
n_jobs=-1,
verbose=verbose
)
grid.fit(X, y) # does CV on entire data set
rf = grid.best_estimator_
print("RF best:", grid.best_params_)
#
# X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.2)
# rf.fit(X_train, y_train)
# print("validation R^2", rf.score(X_test, y_test))
return rf, grid.best_params_
def plot_with_noise_col(df, colname):
features = ['bedrooms', 'bathrooms', 'latitude', 'longitude']
features_with_noise = ['bedrooms', 'bathrooms', 'latitude', 'longitude',
colname + '_noise']
type = "noise"
fig, axes = plt.subplots(2, 2, figsize=(5, 5), sharey=True, sharex=True)
df = df.copy()
addnoise(df, n=1, c=50, prefix=colname + '_')
X = df[features]
y = df['price']
# STRATPD ON ROW 1
X = df[features]
y = df['price']
plot_stratpd(X, y, colname, 'price', ax=axes[0, 0], slope_line_alpha=.15, show_xlabel=True,
show_ylabel=False)
axes[0, 0].set_ylim(-1000, 5000)
axes[0, 0].set_title(f"StratPD")
X = df[features_with_noise]
y = df['price']
plot_stratpd(X, y, colname, 'price', ax=axes[0, 1], slope_line_alpha=.15,
show_ylabel=False)
axes[0, 1].set_ylim(-1000, 5000)
axes[0, 1].set_title(f"StratPD w/{type} col")
# ICE ON ROW 2
X = df[features]
y = df['price']
rf = RandomForestRegressor(n_estimators=100, min_samples_leaf=1, oob_score=True,
n_jobs=-1)
rf.fit(X, y)
# do it w/o dup'd column
ice = predict_ice(rf, X, colname, 'price', nlines=1000)
uniq_x, pdp_curve = \
plot_ice(ice, colname, 'price', alpha=.05, ax=axes[1, 0], show_xlabel=True)
axes[1, 0].set_ylim(-1000, 5000)
axes[1, 0].set_title(f"FPD/ICE")
for i in range(2):
for j in range(2):
axes[i, j].set_xlim(0, 6)
X = df[features_with_noise]
y = df['price']
rf = RandomForestRegressor(n_estimators=100, min_samples_leaf=1, oob_score=True,
n_jobs=-1)
rf.fit(X, y)
ice = predict_ice(rf, X, colname, 'price', nlines=1000)
uniq_x_, pdp_curve_ = \
plot_ice(ice, colname, 'price', alpha=.05, ax=axes[1, 1], show_xlabel=True,
show_ylabel=False)
axes[1, 1].set_ylim(-1000, 5000)
axes[1, 1].set_title(f"FPD/ICE w/{type} col")
# print(f"max ICE curve {np.max(pdp_curve):.0f}, max curve with dup {np.max(pdp_curve_):.0f}")
axes[0, 0].get_xaxis().set_visible(False)
axes[0, 1].get_xaxis().set_visible(False)
def plot_with_dup_col(df, colname, min_samples_leaf):
features = ['bedrooms', 'bathrooms', 'latitude', 'longitude']
features_with_dup = ['bedrooms', 'bathrooms', 'latitude', 'longitude',
colname + '_dup']
fig, axes = plt.subplots(2, 3, figsize=(7.5, 5), sharey=True, sharex=True)
type = "dup"
verbose = False
df = df.copy()
df[colname + '_dup'] = df[colname]
# df_rent[colname+'_dupdup'] = df_rent[colname]
# STRATPD ON ROW 1
X = df[features]
y = df['price']
print(f"shape is {X.shape}")
plot_stratpd(X, y, colname, 'price', ax=axes[0, 0], slope_line_alpha=.15,
show_xlabel=True,
min_samples_leaf=min_samples_leaf,
show_ylabel=True,
verbose=verbose)
axes[0, 0].set_ylim(-1000, 5000)
axes[0, 0].set_title(f"StratPD")
X = df[features_with_dup]
y = df['price']
print(f"shape with dup is {X.shape}")
plot_stratpd(X, y, colname, 'price', ax=axes[0, 1], slope_line_alpha=.15, show_ylabel=False,
min_samples_leaf=min_samples_leaf,
verbose=verbose)
axes[0, 1].set_ylim(-1000, 5000)
axes[0, 1].set_title(f"StratPD w/{type} col")
plot_stratpd(X, y, colname, 'price', ax=axes[0, 2], slope_line_alpha=.15, show_xlabel=True,
min_samples_leaf=min_samples_leaf,
show_ylabel=False,
n_trees=15,
max_features=1,
bootstrap=False,
verbose=verbose
)
axes[0, 2].set_ylim(-1000, 5000)
axes[0, 2].set_title(f"StratPD w/{type} col")
axes[0, 2].text(.2, 4000, "ntrees=15")
axes[0, 2].text(.2, 3500, "max features per split=1")
# ICE ON ROW 2
X = df[features]
y = df['price']
rf = RandomForestRegressor(n_estimators=100, min_samples_leaf=1, oob_score=True,
n_jobs=-1)
rf.fit(X, y)
# do it w/o dup'd column
ice = predict_ice(rf, X, colname, 'price', nlines=1000)
plot_ice(ice, colname, 'price', alpha=.05, ax=axes[1, 0], show_xlabel=True)
axes[1, 0].set_ylim(-1000, 5000)
axes[1, 0].set_title(f"FPD/ICE")
for i in range(2):
for j in range(3):
axes[i, j].set_xlim(0, 6)
# with dup'd column
X = df[features_with_dup]
y = df['price']
rf = RandomForestRegressor(n_estimators=100, min_samples_leaf=1, oob_score=True,
n_jobs=-1)
rf.fit(X, y)
ice = predict_ice(rf, X, colname, 'price', nlines=1000)
plot_ice(ice, colname, 'price', alpha=.05, ax=axes[1, 1], show_xlabel=True, show_ylabel=False)
axes[1, 1].set_ylim(-1000, 5000)
axes[1, 1].set_title(f"FPD/ICE w/{type} col")
# print(f"max ICE curve {np.max(pdp_curve):.0f}, max curve with dup {np.max(pdp_curve_):.0f}")
axes[1, 2].set_title(f"FPD/ICE w/{type} col")
axes[1, 2].text(.2, 4000, "Cannot compensate")
axes[1, 2].set_xlabel(colname)
# print(f"max curve {np.max(curve):.0f}, max curve with dup {np.max(curve_):.0f}")
axes[0, 0].get_xaxis().set_visible(False)
axes[0, 1].get_xaxis().set_visible(False)
def rent_ntrees():
np.random.seed(1) # pick seed for reproducible article images
print(f"----------- {inspect.stack()[0][3]} -----------")
X, y = load_rent(n=10_000)
trees = [1, 5, 10, 30]
supervised = True
def onevar(colname, row, yrange=None):
alphas = [.1,.08,.05,.04]
for i, t in enumerate(trees):
plot_stratpd(X, y, colname, 'price', ax=axes[row, i], slope_line_alpha=alphas[i],
# min_samples_leaf=20,
yrange=yrange,
supervised=supervised,
show_ylabel=t == 1,
pdp_marker_size=2 if row==2 else 8,
n_trees=t,
max_features='auto',
bootstrap=True,
verbose=False)
fig, axes = plt.subplots(3, 4, figsize=(8, 6), sharey=True)
for i in range(1, 4):
axes[0, i].get_yaxis().set_visible(False)
axes[1, i].get_yaxis().set_visible(False)
axes[2, i].get_yaxis().set_visible(False)
for i in range(0, 4):
axes[0, i].set_title(f"{trees[i]} trees")
onevar('bedrooms', row=0, yrange=(-500, 4000))
onevar('bathrooms', row=1, yrange=(-500, 4000))
onevar('latitude', row=2, yrange=(-500, 4000))
savefig(f"rent_ntrees")
plt.close()
def meta_boston():
np.random.seed(1) # pick seed for reproducible article images
print(f"----------- {inspect.stack()[0][3]} -----------")
boston = load_boston()
print(len(boston.data))
df = pd.DataFrame(boston.data, columns=boston.feature_names)
df['MEDV'] = boston.target
X = df.drop('MEDV', axis=1)
y = df['MEDV']
plot_stratpd_gridsearch(X, y, 'AGE', 'MEDV',
show_slope_lines=True,
min_samples_leaf_values=[2,5,10,20,30],
yrange=(-10,10))
# yranges = [(-30, 0), (0, 30), (-8, 8), (-11, 0)]
# for nbins in range(6):
# plot_meta_multivar(X, y, colnames=['LSTAT', 'RM', 'CRIM', 'DIS'], targetname='MEDV',
# nbins=nbins,
# yranges=yranges)
savefig(f"meta_boston_age_medv")
def plot_meta_multivar(X, y, colnames, targetname, nbins, yranges=None):
np.random.seed(1) # pick seed for reproducible article images
min_samples_leaf_values = [2, 5, 10, 30, 50, 100, 200]
nrows = len(colnames)
ncols = len(min_samples_leaf_values)
fig, axes = plt.subplots(nrows, ncols + 2, figsize=((ncols + 2) * 2.5, nrows * 2.5))
if yranges is None:
yranges = [None] * len(colnames)
row = 0
for i, colname in enumerate(colnames):
marginal_plot_(X, y, colname, targetname, ax=axes[row, 0])
col = 2
for msl in min_samples_leaf_values:
print(
f"---------- min_samples_leaf={msl}, nbins={nbins:.2f} ----------- ")
plot_stratpd(X, y, colname, targetname, ax=axes[row, col],
min_samples_leaf=msl,
yrange=yranges[i],
n_trees=1)
axes[row, col].set_title(
f"leafsz={msl}, nbins={nbins:.2f}",
fontsize=9)
col += 1
row += 1
rf = RandomForestRegressor(n_estimators=100, min_samples_leaf=1, oob_score=True)
rf.fit(X, y)
row = 0
for i, colname in enumerate(colnames):
ice = predict_ice(rf, X, colname, targetname)
plot_ice(ice, colname, targetname, ax=axes[row, 1])
row += 1
def unsup_rent():
np.random.seed(1) # pick seed for reproducible article images
print(f"----------- {inspect.stack()[0][3]} -----------")
X, y = load_rent(n=10_000)
fig, axes = plt.subplots(4, 2, figsize=(4, 8))
plot_stratpd(X, y, 'bedrooms', 'price', ax=axes[0, 0], yrange=(-500,4000),
slope_line_alpha=.2, supervised=False)
plot_stratpd(X, y, 'bedrooms', 'price', ax=axes[0, 1], yrange=(-500,4000),
slope_line_alpha=.2, supervised=True)
plot_stratpd(X, y, 'bathrooms', 'price', ax=axes[1, 0], yrange=(-500,4000),
slope_line_alpha=.2, supervised=False)
plot_stratpd(X, y, 'bathrooms', 'price', ax=axes[1, 1], yrange=(-500,4000),
slope_line_alpha=.2, supervised=True)
plot_stratpd(X, y, 'latitude', 'price', ax=axes[2, 0], yrange=(-500,2000),
slope_line_alpha=.2, supervised=False, verbose=True)
plot_stratpd(X, y, 'latitude', 'price', ax=axes[2, 1], yrange=(-500,2000),
slope_line_alpha=.2, supervised=True, verbose=True)
plot_stratpd(X, y, 'longitude', 'price', ax=axes[3, 0], yrange=(-500,500),
slope_line_alpha=.2, supervised=False)
plot_stratpd(X, y, 'longitude', 'price', ax=axes[3, 1], yrange=(-500,500),
slope_line_alpha=.2, supervised=True)
axes[0, 0].set_title("Unsupervised")
axes[0, 1].set_title("Supervised")
for i in range(3):
axes[i, 1].get_yaxis().set_visible(False)
savefig(f"rent_unsup")
plt.close()
def weather():
np.random.seed(1) # pick seed for reproducible article images
print(f"----------- {inspect.stack()[0][3]} -----------")
TUNE_RF = False
df_raw = toy_weather_data()
df = df_raw.copy()
df_string_to_cat(df)
names = np.unique(df['state'])
catnames = OrderedDict()
for i,v in enumerate(names):
catnames[i+1] = v
df_cat_to_catcode(df)
X = df.drop('temperature', axis=1)
y = df['temperature']
# cats = catencoders['state'].values
# cats = np.insert(cats, 0, None) # prepend a None for catcode 0
if TUNE_RF:
rf, bestparams = tune_RF(X, y)
# RF best: {'max_features': 0.9, 'min_samples_leaf': 5, 'n_estimators': 150}
# validation R^2 0.9500072628270099
else:
rf = RandomForestRegressor(n_estimators=150, min_samples_leaf=5, max_features=0.9, oob_score=True)
rf.fit(X, y) # Use full data set for plotting
print("RF OOB R^2", rf.oob_score_)
fig, ax = plt.subplots(1, 1, figsize=figsize)
df = df_raw.copy()
avgtmp = df.groupby(['state', 'dayofyear'])[['temperature']].mean()
avgtmp = avgtmp.reset_index()
ca = avgtmp.query('state=="CA"')
co = avgtmp.query('state=="CO"')
az = avgtmp.query('state=="AZ"')
wa = avgtmp.query('state=="WA"')
nv = avgtmp.query('state=="NV"')
ax.plot(ca['dayofyear'], ca['temperature'], lw=.5, c='#fdae61', label="CA")
ax.plot(co['dayofyear'], co['temperature'], lw=.5, c='#225ea8', label="CO")
ax.plot(az['dayofyear'], az['temperature'], lw=.5, c='#41b6c4', label="AZ")
ax.plot(wa['dayofyear'], wa['temperature'], lw=.5, c='#a1dab4', label="WA")
ax.plot(nv['dayofyear'], nv['temperature'], lw=.5, c='#a1dab4', label="NV")
ax.legend(loc='upper left', borderpad=0, labelspacing=0)
ax.set_xlabel("dayofyear")
ax.set_ylabel("temperature")
ax.set_title("(a) State/day vs temp")
savefig(f"dayofyear_vs_temp")
fig, ax = plt.subplots(1, 1, figsize=figsize)
plot_stratpd(X, y, 'dayofyear', 'temperature', ax=ax,
show_x_counts=False,
yrange=(-10, 10),
pdp_marker_size=2, slope_line_alpha=.5, n_trials=1)
ax.set_title("(b) StratPD")
savefig(f"dayofyear_vs_temp_stratpd")
plt.close()
fig, ax = plt.subplots(1, 1, figsize=figsize)
plot_catstratpd(X, y, 'state', 'temperature', catnames=catnames,
show_x_counts=False,
# min_samples_leaf=30,
min_y_shifted_to_zero=True,
# alpha=.3,
ax=ax,
yrange=(-1, 55))
ax.set_yticks([0,10,20,30,40,50])
ax.set_title("(d) CatStratPD")
savefig(f"state_vs_temp_stratpd")
fig, ax = plt.subplots(1, 1, figsize=figsize)
ice = predict_ice(rf, X, 'dayofyear', 'temperature')
plot_ice(ice, 'dayofyear', 'temperature', ax=ax)
ax.set_title("(c) FPD/ICE")
savefig(f"dayofyear_vs_temp_pdp")
fig, ax = plt.subplots(1, 1, figsize=figsize)
ice = predict_catice(rf, X, 'state', 'temperature')
plot_catice(ice, 'state', 'temperature', catnames=catnames, ax=ax,
pdp_marker_size=15,
min_y_shifted_to_zero = True,
yrange=(-2, 50)
)
ax.set_yticks([0,10,20,30,40,50])
ax.set_title("(b) FPD/ICE")
savefig(f"state_vs_temp_pdp")
fig, ax = plt.subplots(1, 1, figsize=figsize)
ax.scatter(X['state'], y, alpha=.05, s=15)
ax.set_xticks(range(1,len(catnames)+1))
ax.set_xticklabels(catnames.values())
ax.set_xlabel("state")
ax.set_ylabel("temperature")
ax.set_title("(a) Marginal")
savefig(f"state_vs_temp")
plt.close()
def meta_weather():
np.random.seed(1) # pick seed for reproducible article images
print(f"----------- {inspect.stack()[0][3]} -----------")
# np.random.seed(66)
nyears = 5
years = []
for y in range(1980, 1980 + nyears):
df_ = toy_weather_data()
df_['year'] = y
years.append(df_)
df_raw = pd.concat(years, axis=0)
# df_raw.drop('year', axis=1, inplace=True)
df = df_raw.copy()
print(df.head(5))
names = {'CO': 5, 'CA': 10, 'AZ': 15, 'WA': 20}
df['state'] = df['state'].map(names)
catnames = {v:k for k,v in names.items()}
X = df.drop('temperature', axis=1)
y = df['temperature']
plot_catstratpd_gridsearch(X, y, 'state', 'temp',
min_samples_leaf_values=[2, 5, 20, 40, 60],
catnames=catnames,
yrange=(-5,60),
cellwidth=2
)
savefig(f"state_temp_meta")
plot_stratpd_gridsearch(X, y, 'dayofyear', 'temp',
show_slope_lines=True,
min_samples_leaf_values=[2,5,10,20,30],
yrange=(-10,10),
slope_line_alpha=.15)
savefig(f"dayofyear_temp_meta")
def weight():
np.random.seed(1) # pick seed for reproducible article images
print(f"----------- {inspect.stack()[0][3]} -----------")
X, y, df_raw, eqn = toy_weight_data(2000)
TUNE_RF = False
fig, ax = plt.subplots(1, 1, figsize=figsize)
plot_stratpd(X, y, 'education', 'weight', ax=ax,
show_x_counts=False,
pdp_marker_size=5,
yrange=(-12, 0.05), slope_line_alpha=.1, show_ylabel=True)
# ax.get_yaxis().set_visible(False)
ax.set_title("StratPD", fontsize=10)
ax.set_xlim(10,18)
ax.set_xticks([10,12,14,16,18])
savefig(f"education_vs_weight_stratpd")
fig, ax = plt.subplots(1, 1, figsize=figsize)
plot_stratpd(X, y, 'height', 'weight', ax=ax,
pdp_marker_size=.2,
show_x_counts=False,
yrange=(0, 160), show_ylabel=False)
# ax.get_yaxis().set_visible(False)
ax.set_title("StratPD", fontsize=10)
ax.set_xticks([60,65,70,75])
savefig(f"height_vs_weight_stratpd")
fig, ax = plt.subplots(1, 1, figsize=(1.3,2))
plot_catstratpd(X, y, 'sex', 'weight', ax=ax,
show_x_counts=False,
catnames={0:'M',1:'F'},
yrange=(-1, 35),
)
ax.set_title("CatStratPD", fontsize=10)
savefig(f"sex_vs_weight_stratpd")
fig, ax = plt.subplots(1, 1, figsize=(1.5,1.8))
plot_catstratpd(X, y, 'pregnant', 'weight', ax=ax,
show_x_counts=False,
catnames={0:False, 1:True},
yrange=(-1, 45),
)
ax.set_title("CatStratPD", fontsize=10)
savefig(f"pregnant_vs_weight_stratpd")
if TUNE_RF:
rf, bestparams = tune_RF(X, y)
# RF best: {'max_features': 0.9, 'min_samples_leaf': 1, 'n_estimators': 200}
# validation R^2 0.9996343699640691
else:
rf = RandomForestRegressor(n_estimators=200, min_samples_leaf=1, max_features=0.9, oob_score=True)
rf.fit(X, y) # Use full data set for plotting
print("RF OOB R^2", rf.oob_score_)
# show pregnant female at max range drops going taller
X_test = np.array([[1, 1, 70, 10]])
y_pred = rf.predict(X_test)
print("pregnant female at max range", X_test, "predicts", y_pred)
X_test = np.array([[1, 1, 72, 10]]) # make them taller
y_pred = rf.predict(X_test)
print("pregnant female in male height range", X_test, "predicts", y_pred)
fig, ax = plt.subplots(1, 1, figsize=figsize)
ice = predict_ice(rf, X, 'education', 'weight')
plot_ice(ice, 'education', 'weight', ax=ax, yrange=(-12, 0), min_y_shifted_to_zero=True)
ax.set_xlim(10,18)
ax.set_xticks([10,12,14,16,18])
ax.set_title("FPD/ICE", fontsize=10)
savefig(f"education_vs_weight_pdp")
fig, ax = plt.subplots(1, 1, figsize=(2.4, 2.2))
ice = predict_ice(rf, X, 'height', 'weight')
plot_ice(ice, 'height', 'weight', ax=ax, pdp_linewidth=2, yrange=(100, 250),
min_y_shifted_to_zero=False)
ax.set_xlabel("height\n(a)", fontsize=10)
ax.set_ylabel("weight", fontsize=10)
ax.set_title("FPD/ICE", fontsize=10)
ax.set_xticks([60,65,70,75])
savefig(f"height_vs_weight_pdp")
fig, ax = plt.subplots(1, 1, figsize=(1.3,2))
ice = predict_catice(rf, X, 'sex', 'weight')
plot_catice(ice, 'sex', 'weight', catnames={0:'M',1:'F'}, ax=ax, yrange=(0, 35),
pdp_marker_size=15)
ax.set_title("FPD/ICE", fontsize=10)
savefig(f"sex_vs_weight_pdp")
fig, ax = plt.subplots(1, 1, figsize=(1.3,1.8))
ice = predict_catice(rf, X, 'pregnant', 'weight', cats=df_raw['pregnant'].unique())
plot_catice(ice, 'pregnant', 'weight', catnames={0:'M',1:'F'}, ax=ax,
min_y_shifted_to_zero=True,
yrange=(-5, 45), pdp_marker_size=20)
ax.set_title("FPD/ICE", fontsize=10)
savefig(f"pregnant_vs_weight_pdp")
def shap_pregnant():
np.random.seed(1) # pick seed for reproducible article images
n = 2000
shap_test_size = 300
X, y, df_raw, eqn = toy_weight_data(n=n)
df = df_raw.copy()
df_string_to_cat(df)
df_cat_to_catcode(df)
df['pregnant'] = df['pregnant'].astype(int)
X = df.drop('weight', axis=1)
y = df['weight']
# parameters from tune_RF() called in weight()
rf = RandomForestRegressor(n_estimators=200, min_samples_leaf=1,
max_features=0.9,
oob_score=True)
rf.fit(X, y) # Use full data set for plotting
print("RF OOB R^2", rf.oob_score_)
explainer = shap.TreeExplainer(rf, data=shap.sample(X, 100),
feature_perturbation='interventional')
shap_sample = X.sample(shap_test_size, replace=False)
shap_values = explainer.shap_values(shap_sample, check_additivity=False)
GREY = '#444443'
fig, ax = plt.subplots(1, 1, figsize=(1.3,1.8))
preg_shap_values = shap_values[:, 1]
avg_not_preg_weight = np.mean(preg_shap_values[np.where(shap_sample['pregnant']==0)])
avg_preg_weight = np.mean(preg_shap_values[np.where(shap_sample['pregnant']==1)])
ax.bar([0, 1], [avg_not_preg_weight-avg_not_preg_weight, avg_preg_weight-avg_not_preg_weight],
color='#1E88E5')
ax.set_title("SHAP", fontsize=10)
ax.set_xlabel("pregnant")
ax.set_xticks([0,1])
ax.set_xticklabels(['False','True'])
ax.set_ylabel("weight")
ax.set_ylim(-1,45)
ax.set_yticks([0,10,20,30,40])
ax.spines['right'].set_visible(False)
ax.spines['top'].set_visible(False)
savefig('pregnant_vs_weight_shap')
def shap_weight(feature_perturbation, twin=False):
np.random.seed(1) # pick seed for reproducible article images
n = 2000
shap_test_size = 2000
X, y, df_raw, eqn = toy_weight_data(n=n)
df = df_raw.copy()
df_string_to_cat(df)
df_cat_to_catcode(df)
df['pregnant'] = df['pregnant'].astype(int)
X = df.drop('weight', axis=1)
y = df['weight']
# parameters from tune_RF() called in weight()
rf = RandomForestRegressor(n_estimators=200, min_samples_leaf=1,
max_features=0.9,
oob_score=True)
rf.fit(X, y) # Use full data set for plotting
print("RF OOB R^2", rf.oob_score_)
if feature_perturbation=='interventional':
explainer = shap.TreeExplainer(rf, data=shap.sample(X, 100), feature_perturbation='interventional')
xlabel = "height\n(c)"
ylabel = None
yticks = []
figsize = (2.2, 2.2)
else:
explainer = shap.TreeExplainer(rf, feature_perturbation='tree_path_dependent')
xlabel = "height\n(b)"
ylabel = "SHAP height"
yticks = [-75, -60, -40, -20, 0, 20, 40, 60, 75]
figsize = (2.6, 2.2)
shap_sample = X.sample(shap_test_size, replace=False)
shap_values = explainer.shap_values(shap_sample, check_additivity=False)
df_shap = pd.DataFrame()
df_shap['weight'] = shap_values[:, 2]
df_shap['height'] = shap_sample.iloc[:, 2]
# pdpy = df_shap.groupby('height').mean().reset_index()
# print("len pdpy", len(pdpy))
GREY = '#444443'
fig, ax = plt.subplots(1, 1, figsize=figsize)
shap.dependence_plot("height", shap_values, shap_sample,
interaction_index=None, ax=ax, dot_size=5,
show=False, alpha=1)
# ax.plot(pdpy['height'], pdpy['weight'], '.', c='k', markersize=.5, alpha=.5)
ax.spines['left'].set_linewidth(.5)
ax.spines['bottom'].set_linewidth(.5)
ax.spines['right'].set_linewidth(.5)
ax.spines['top'].set_linewidth(.5)
ax.set_ylabel(ylabel, fontsize=10, labelpad=0)
ax.set_xlabel(xlabel, fontsize=10)
ax.tick_params(axis='both', which='major', labelsize=10)
ax.plot([70,70], [-75,75], '--', lw=.6, color=GREY)
ax.text(69.8,60, "Max female", horizontalalignment='right',
fontsize=9)
leaf_xranges, leaf_slopes, slope_counts_at_x, dx, slope_at_x, pdpx, pdpy, ignored = \
partial_dependence(X=X, y=y, colname='height')
ax.set_ylim(-77,75)
# ax.set_xlim(min(pdpx), max(pdpx))
ax.set_xticks([60,65,70,75])
ax.set_yticks(yticks)
ax.set_title(f"SHAP {feature_perturbation}", fontsize=10)
# ax.set_ylim(-40,70)
print(min(pdpx), max(pdpx))
print(min(pdpy), max(pdpy))
rise = max(pdpy) - min(pdpy)
run = max(pdpx) - min(pdpx)
slope = rise/run
print(slope)
# ax.plot([min(pdpx),max(pdpyX['height'])], [0,]
if twin:
ax2 = ax.twinx()
# ax2.set_xlim(min(pdpx), max(pdpx))
ax2.set_ylim(min(pdpy)-5, max(pdpy)+5)
ax2.set_xticks([60,65,70,75])
ax2.set_yticks([0,20,40,60,80,100,120,140,150])
# ax2.set_ylabel("weight", fontsize=12)
ax2.plot(pdpx, pdpy, '.', markersize=1, c='k')
# ax2.text(65,25, f"StratPD slope = {slope:.1f}")
ax2.annotate(f"StratPD", (64.65,39), xytext=(66,18),
horizontalalignment='left',
arrowprops=dict(facecolor='black', width=.5, headwidth=5, headlength=5),
fontsize=9)
savefig(f"weight_{feature_perturbation}_shap")
def saledayofweek():
np.random.seed(1) # pick seed for reproducible article images
n = 10_000
shap_test_size = 1000
TUNE_RF = False
X, y = load_bulldozer(n=n)
avgprice = pd.concat([X,y], axis=1).groupby('saledayofweek')[['SalePrice']].mean()
avgprice = avgprice.reset_index()['SalePrice']
print(avgprice)
fig, ax = plt.subplots(1, 1, figsize=figsize2)
ax.scatter(range(0,7), avgprice, s=20, c='k')
ax.scatter(X['saledayofweek'], y, s=3, alpha=.1, c='#1E88E5')
# ax.set_xlim(1960,2010)
ax.set_xlabel("saledayofweek\n(a)", fontsize=11)
ax.set_ylabel("SalePrice ($)", fontsize=11)
ax.set_title("Marginal plot", fontsize=13)
ax.spines['left'].set_linewidth(.5)
ax.spines['bottom'].set_linewidth(.5)
ax.spines['top'].set_color('none')
ax.spines['right'].set_color('none')
ax.spines['left'].set_smart_bounds(True)
ax.spines['bottom'].set_smart_bounds(True)
savefig(f"bulldozer_saledayofweek_marginal")
if TUNE_RF:
rf, _ = tune_RF(X, y)
# RF best: {'max_features': 0.9, 'min_samples_leaf': 1, 'n_estimators': 150}
# validation R^2 0.8001628465688546
else:
rf = RandomForestRegressor(n_estimators=150, n_jobs=-1,
max_features=0.9,
min_samples_leaf=1, oob_score=True)
rf.fit(X, y)
print("RF OOB R^2", rf.oob_score_)
explainer = shap.TreeExplainer(rf, data=shap.sample(X, 100),
feature_perturbation='interventional')
shap_sample = X.sample(shap_test_size, replace=False)
shap_values = explainer.shap_values(shap_sample, check_additivity=False)
fig, ax = plt.subplots(1, 1, figsize=figsize2)
shap.dependence_plot("saledayofweek", shap_values, shap_sample,
interaction_index=None, ax=ax, dot_size=5,
show=False, alpha=.5)
ax.spines['left'].set_linewidth(.5)
ax.spines['bottom'].set_linewidth(.5)
ax.spines['top'].set_color('none')
ax.spines['right'].set_color('none')
ax.spines['left'].set_smart_bounds(True)
ax.spines['bottom'].set_smart_bounds(True)
ax.set_title("SHAP", fontsize=13)
ax.set_ylabel("Impact on SalePrice\n(saledayofweek SHAP)", fontsize=11)
ax.set_xlabel("saledayofweek\n(b)", fontsize=11)
# ax.set_xlim(1960, 2010)
ax.tick_params(axis='both', which='major', labelsize=10)
savefig(f"bulldozer_saledayofweek_shap")
fig, ax = plt.subplots(1, 1, figsize=figsize2)
plot_catstratpd(X, y, colname='saledayofweek', targetname='SalePrice',
catnames={0:'M',1:'T',2:'W',3:'R',4:'F',5:'S',6:'S'},
n_trials=1,
bootstrap=True,
show_x_counts=True,
show_xlabel=False,
show_impact=False,
pdp_marker_size=4,
pdp_marker_alpha=1,
ax=ax
)
ax.set_title("StratPD", fontsize=13)
ax.set_xlabel("saledayofweek\n(d)", fontsize=11)
# ax.set_xlim(1960,2010)
# ax.set_ylim(-10000,30_000)
savefig(f"bulldozer_saledayofweek_stratpd")
fig, ax = plt.subplots(1, 1, figsize=figsize2)
ice = predict_ice(rf, X, "saledayofweek", 'SalePrice', numx=30, nlines=100)
plot_ice(ice, "saledayofweek", 'SalePrice', alpha=.3, ax=ax, show_ylabel=True,
# yrange=(-10000,30_000),
min_y_shifted_to_zero=True)
# ax.set_xlim(1960, 2010)
savefig(f"bulldozer_saledayofweek_pdp")
def productsize():
np.random.seed(1) # pick seed for reproducible article images
shap_test_size = 1000
TUNE_RF = False
# reuse same data generated by gencsv.py for bulldozer to
# make same comparison.
df = pd.read_csv("bulldozer20k.csv")
X = df.drop('SalePrice', axis=1)
y = df['SalePrice']
fig, ax = plt.subplots(1, 1, figsize=figsize2)
ax.scatter(X['ProductSize'], y, s=3, alpha=.1, c='#1E88E5')
# ax.set_xlim(1960,2010)
ax.set_xlabel("ProductSize\n(a)", fontsize=11)
ax.set_ylabel("SalePrice ($)", fontsize=11)
ax.set_title("Marginal plot", fontsize=13)
ax.spines['left'].set_linewidth(.5)
ax.spines['bottom'].set_linewidth(.5)
ax.spines['top'].set_color('none')
ax.spines['right'].set_color('none')
ax.spines['left'].set_smart_bounds(True)
ax.spines['bottom'].set_smart_bounds(True)
savefig(f"bulldozer_ProductSize_marginal")
if TUNE_RF:
rf, _ = tune_RF(X, y)
# RF best: {'max_features': 0.9, 'min_samples_leaf': 1, 'n_estimators': 150}
# validation R^2 0.8001628465688546
else:
rf = RandomForestRegressor(n_estimators=150, n_jobs=-1,
max_features=0.9,
min_samples_leaf=1, oob_score=True)
rf.fit(X, y)
print("RF OOB R^2", rf.oob_score_)
# SHAP
explainer = shap.TreeExplainer(rf, data=shap.sample(X, 100),
feature_perturbation='interventional')
shap_sample = X.sample(shap_test_size, replace=False)
shap_values = explainer.shap_values(shap_sample, check_additivity=False)
fig, ax = plt.subplots(1, 1, figsize=figsize2)
shap.dependence_plot("ProductSize", shap_values, shap_sample,
interaction_index=None, ax=ax, dot_size=5,
show=False, alpha=.5)
ax.spines['left'].set_linewidth(.5)
ax.spines['bottom'].set_linewidth(.5)
ax.spines['top'].set_color('none')
ax.spines['right'].set_color('none')
ax.spines['left'].set_smart_bounds(True)
ax.spines['bottom'].set_smart_bounds(True)
ax.set_title("(b) SHAP", fontsize=13)
ax.set_ylabel("Impact on SalePrice\n(ProductSize SHAP)", fontsize=11)
ax.set_xlabel("ProductSize", fontsize=11)
# ax.set_xlim(1960, 2010)
ax.set_ylim(-15000,40_000)
ax.tick_params(axis='both', which='major', labelsize=10)
savefig(f"bulldozer_ProductSize_shap")
fig, ax = plt.subplots(1, 1, figsize=figsize2)
plot_stratpd(X, y, colname='ProductSize', targetname='SalePrice',
n_trials=10,
bootstrap=True,
show_slope_lines=False,
show_x_counts=False,
show_xlabel=False,
show_impact=False,
show_all_pdp=False,
pdp_marker_size=10,
pdp_marker_alpha=1,
ax=ax
)
ax.set_title("(d) StratPD", fontsize=13)
ax.set_xlabel("ProductSize", fontsize=11)
ax.set_xlim(0, 5)
ax.set_ylim(-15000,40_000)
savefig(f"bulldozer_ProductSize_stratpd")
fig, ax = plt.subplots(1, 1, figsize=figsize2)
ice = predict_ice(rf, X, "ProductSize", 'SalePrice', numx=30, nlines=100)
plot_ice(ice, "ProductSize", 'SalePrice', alpha=.3, ax=ax, show_ylabel=True,
# yrange=(-10000,30_000),
min_y_shifted_to_zero=True)
# ax.set_xlim(1960, 2010)
ax.set_ylim(-15000,40_000)
ax.set_title("(a) FPD/ICE plot", fontsize=13)
savefig(f"bulldozer_ProductSize_pdp")
def saledayofyear():
np.random.seed(1) # pick seed for reproducible article images
n = 10_000
shap_test_size = 1000
TUNE_RF = False
X, y = load_bulldozer(n=n)
fig, ax = plt.subplots(1, 1, figsize=figsize2)
ax.scatter(X['saledayofyear'], y, s=3, alpha=.1, c='#1E88E5')
# ax.set_xlim(1960,2010)
ax.set_xlabel("saledayofyear\n(a)", fontsize=11)
ax.set_ylabel("SalePrice ($)", fontsize=11)
ax.set_title("Marginal plot", fontsize=13)
ax.spines['left'].set_linewidth(.5)
ax.spines['bottom'].set_linewidth(.5)
ax.spines['top'].set_color('none')
ax.spines['right'].set_color('none')
ax.spines['left'].set_smart_bounds(True)
ax.spines['bottom'].set_smart_bounds(True)
savefig(f"bulldozer_saledayofyear_marginal")
if TUNE_RF:
rf, _ = tune_RF(X, y)
# RF best: {'max_features': 0.9, 'min_samples_leaf': 1, 'n_estimators': 150}
# validation R^2 0.8001628465688546
else:
rf = RandomForestRegressor(n_estimators=150, n_jobs=-1,
max_features=0.9,
min_samples_leaf=1, oob_score=True)
rf.fit(X, y)
print("RF OOB R^2", rf.oob_score_)
explainer = shap.TreeExplainer(rf, data=shap.sample(X, 100),
feature_perturbation='interventional')
shap_sample = X.sample(shap_test_size, replace=False)
shap_values = explainer.shap_values(shap_sample, check_additivity=False)
fig, ax = plt.subplots(1, 1, figsize=figsize2)
shap.dependence_plot("saledayofyear", shap_values, shap_sample,
interaction_index=None, ax=ax, dot_size=5,
show=False, alpha=.5)
ax.spines['left'].set_linewidth(.5)
ax.spines['bottom'].set_linewidth(.5)
ax.spines['top'].set_color('none')
ax.spines['right'].set_color('none')
ax.spines['left'].set_smart_bounds(True)
ax.spines['bottom'].set_smart_bounds(True)
ax.set_title("SHAP", fontsize=13)
ax.set_ylabel("Impact on SalePrice\n(saledayofyear SHAP)", fontsize=11)
ax.set_xlabel("saledayofyear\n(b)", fontsize=11)
# ax.set_xlim(1960, 2010)
ax.tick_params(axis='both', which='major', labelsize=10)
savefig(f"bulldozer_saledayofyear_shap")
fig, ax = plt.subplots(1, 1, figsize=figsize2)
plot_stratpd(X, y, colname='saledayofyear', targetname='SalePrice',
n_trials=10,
bootstrap=True,
show_all_pdp=False,
show_slope_lines=False,
show_x_counts=True,
show_xlabel=False,
show_impact=False,
pdp_marker_size=4,
pdp_marker_alpha=1,
ax=ax
)
ax.set_title("StratPD", fontsize=13)
ax.set_xlabel("saledayofyear\n(d)", fontsize=11)
# ax.set_xlim(1960,2010)
# ax.set_ylim(-10000,30_000)
savefig(f"bulldozer_saledayofyear_stratpd")
fig, ax = plt.subplots(1, 1, figsize=figsize2)
ice = predict_ice(rf, X, "saledayofyear", 'SalePrice', numx=30, nlines=100)
plot_ice(ice, "saledayofyear", 'SalePrice', alpha=.3, ax=ax, show_ylabel=True,
# yrange=(-10000,30_000),
min_y_shifted_to_zero=True)
# ax.set_xlim(1960, 2010)
savefig(f"bulldozer_saledayofyear_pdp")
def yearmade():
np.random.seed(1) # pick seed for reproducible article images
n = 20_000
shap_test_size = 1000
TUNE_RF = False
# X, y = load_bulldozer(n=n)
# reuse same data generated by gencsv.py for bulldozer to
# make same comparison.
df = pd.read_csv("bulldozer20k.csv")
X = df.drop('SalePrice', axis=1)
y = df['SalePrice']
if TUNE_RF:
rf, _ = tune_RF(X, y)
# RF best: {'max_features': 0.9, 'min_samples_leaf': 1, 'n_estimators': 150}
# validation R^2 0.8001628465688546
else:
rf = RandomForestRegressor(n_estimators=150, n_jobs=-1,
max_features=0.9,
min_samples_leaf=1, oob_score=True)
rf.fit(X, y)
print("RF OOB R^2", rf.oob_score_)
fig, ax = plt.subplots(1, 1, figsize=figsize2)
ax.scatter(X['YearMade'], y, s=3, alpha=.1, c='#1E88E5')
ax.set_xlim(1960,2010)
ax.set_xlabel("YearMade", fontsize=11)
ax.set_ylabel("SalePrice ($)", fontsize=11)
ax.set_title("(a) Marginal plot", fontsize=13)
ax.spines['left'].set_linewidth(.5)
ax.spines['bottom'].set_linewidth(.5)
ax.spines['top'].set_color('none')
ax.spines['right'].set_color('none')
ax.spines['left'].set_smart_bounds(True)
ax.spines['bottom'].set_smart_bounds(True)
savefig(f"bulldozer_YearMade_marginal")
explainer = shap.TreeExplainer(rf, data=shap.sample(X, 100),
feature_perturbation='interventional')
shap_sample = X.sample(shap_test_size, replace=False)
shap_values = explainer.shap_values(shap_sample, check_additivity=False)
fig, ax = plt.subplots(1, 1, figsize=figsize2)
shap.dependence_plot("YearMade", shap_values, shap_sample,
interaction_index=None, ax=ax, dot_size=5,
show=False, alpha=.5)
ax.yaxis.label.set_visible(False)
ax.spines['left'].set_linewidth(.5)
ax.spines['bottom'].set_linewidth(.5)
ax.spines['top'].set_color('none')
ax.spines['right'].set_color('none')
ax.spines['left'].set_smart_bounds(True)
ax.spines['bottom'].set_smart_bounds(True)
ax.set_title("(b) SHAP", fontsize=13)
ax.set_ylabel("Impact on SalePrice\n(YearMade SHAP)", fontsize=11)
ax.set_xlabel("YearMade", fontsize=11)
ax.set_xlim(1960, 2010)
ax.tick_params(axis='both', which='major', labelsize=10)
savefig(f"bulldozer_YearMade_shap")
fig, ax = plt.subplots(1, 1, figsize=figsize2)
plot_stratpd(X, y, colname='YearMade', targetname='SalePrice',
n_trials=10,
bootstrap=True,
show_slope_lines=False,
show_x_counts=True,
show_ylabel=False,
show_xlabel=False,
show_impact=False,
pdp_marker_size=4,
pdp_marker_alpha=1,
ax=ax
)
ax.set_title("(d) StratPD", fontsize=13)
ax.set_xlabel("YearMade", fontsize=11)
ax.set_xlim(1960,2010)
ax.set_ylim(-5000,30_000)
savefig(f"bulldozer_YearMade_stratpd")
fig, ax = plt.subplots(1, 1, figsize=figsize2)
ice = predict_ice(rf, X, "YearMade", 'SalePrice', numx=30, nlines=100)
plot_ice(ice, "YearMade", 'SalePrice', alpha=.3, ax=ax, show_ylabel=True,
yrange=(20_000,55_000))
ax.set_xlabel("YearMade", fontsize=11)
ax.set_xlim(1960, 2010)
ax.set_title("(a) FPD/ICE plot", fontsize=13)
savefig(f"bulldozer_YearMade_pdp")
def MachineHours():
np.random.seed(1) # pick seed for reproducible article images
shap_test_size = 1000
TUNE_RF = False
# reuse same data generated by gencsv.py for bulldozer to
# make same comparison.
df = pd.read_csv("bulldozer20k.csv")
# DROP RECORDS WITH MISSING MachineHours VALUES
# df = df[df['MachineHours']!=3138]
X = df.drop('SalePrice', axis=1)
y = df['SalePrice']
if TUNE_RF:
rf, _ = tune_RF(X, y)
# RF best: {'max_features': 0.9, 'min_samples_leaf': 1, 'n_estimators': 150}
# validation R^2 0.8001628465688546
else:
rf = RandomForestRegressor(n_estimators=150, n_jobs=-1,
max_features=0.9,
min_samples_leaf=1, oob_score=True)
rf.fit(X, y)
print("RF OOB R^2", rf.oob_score_)
fig, ax = plt.subplots(1, 1, figsize=figsize2)
ax.scatter(X['MachineHours'], y, s=3, alpha=.1, c='#1E88E5')
ax.set_xlim(0,30_000)
ax.set_xlabel("MachineHours\n(a)", fontsize=11)
ax.set_ylabel("SalePrice ($)", fontsize=11)
ax.set_title("Marginal plot", fontsize=13)
ax.spines['left'].set_linewidth(.5)
ax.spines['bottom'].set_linewidth(.5)
ax.spines['top'].set_color('none')
ax.spines['right'].set_color('none')
ax.spines['left'].set_smart_bounds(True)
ax.spines['bottom'].set_smart_bounds(True)
savefig(f"bulldozer_MachineHours_marginal")
# SHAP
explainer = shap.TreeExplainer(rf, data=shap.sample(X, 100),
feature_perturbation='interventional')
shap_sample = X.sample(shap_test_size, replace=False)
shap_values = explainer.shap_values(shap_sample, check_additivity=False)
fig, ax = plt.subplots(1, 1, figsize=figsize2)
shap.dependence_plot("MachineHours", shap_values, shap_sample,
interaction_index=None, ax=ax, dot_size=5,
show=False, alpha=.5)
ax.yaxis.label.set_visible(False)
ax.spines['left'].set_linewidth(.5)
ax.spines['bottom'].set_linewidth(.5)
ax.spines['top'].set_color('none')
ax.spines['right'].set_color('none')
ax.spines['left'].set_smart_bounds(True)
ax.spines['bottom'].set_smart_bounds(True)
ax.set_title("SHAP", fontsize=13)
ax.set_ylabel("SHAP MachineHours)", fontsize=11)
ax.set_xlabel("MachineHours\n(b)", fontsize=11)
ax.set_xlim(0,30_000)
ax.set_ylim(-3000,5000)
ax.tick_params(axis='both', which='major', labelsize=10)
savefig(f"bulldozer_MachineHours_shap")
# STRATPD
fig, ax = plt.subplots(1, 1, figsize=figsize2)
plot_stratpd(X, y, colname='MachineHours', targetname='SalePrice',
n_trials=10,
bootstrap=True,
show_all_pdp=False,
show_slope_lines=False,
show_x_counts=True,
barchar_alpha=1.0,
barchar_color='k',
show_ylabel=False,
show_xlabel=False,
show_impact=False,
pdp_marker_size=1,
pdp_marker_alpha=.3,
ax=ax
)
# ax.annotate("Imputed median value", xytext=(10000,-5300),
# xy=(3138,-5200), fontsize=9,
# arrowprops={'arrowstyle':"->"})
ax.yaxis.label.set_visible(False)
ax.set_title("StratPD", fontsize=13)
ax.set_xlim(0,30_000)
ax.set_xlabel("MachineHours\n(d)", fontsize=11)
ax.set_ylim(-6500,2_000)
savefig(f"bulldozer_MachineHours_stratpd")
fig, ax = plt.subplots(1, 1, figsize=figsize2)
ice = predict_ice(rf, X, "MachineHours", 'SalePrice', numx=300, nlines=200)
plot_ice(ice, "MachineHours", 'SalePrice', alpha=.5, ax=ax,
show_ylabel=True,
yrange=(33_000,38_000)
)
ax.set_xlabel("MachineHours\n(a)", fontsize=11)
ax.set_title("FPD/ICE plot", fontsize=13)
ax.set_xlim(0,30_000)
savefig(f"bulldozer_MachineHours_pdp")
def unsup_yearmade():
np.random.seed(1) # pick seed for reproducible article images
n = 10_000
X, y = load_bulldozer(n=n)
fig, ax = plt.subplots(1, 1, figsize=figsize2)
plot_stratpd(X, y, colname='YearMade', targetname='SalePrice',
n_trials=1,
bootstrap=True,
show_slope_lines=False,
show_x_counts=True,
show_xlabel=False,
show_impact=False,
pdp_marker_size=4,
pdp_marker_alpha=1,
ax=ax,
supervised=False
)
ax.set_title("Unsupervised StratPD", fontsize=13)
ax.set_xlabel("YearMade", fontsize=11)
ax.set_xlim(1960,2010)
ax.set_ylim(-10000,30_000)
savefig(f"bulldozer_YearMade_stratpd_unsup")
def unsup_weight():
np.random.seed(1) # pick seed for reproducible article images
print(f"----------- {inspect.stack()[0][3]} -----------")
X, y, df_raw, eqn = toy_weight_data(2000)
df = df_raw.copy()
catencoders = df_string_to_cat(df)
df_cat_to_catcode(df)
df['pregnant'] = df['pregnant'].astype(int)
X = df.drop('weight', axis=1)
y = df['weight']
fig, axes = plt.subplots(2, 2, figsize=(4, 4))
plot_stratpd(X, y, 'education', 'weight', ax=axes[0, 0],
show_x_counts=False,
yrange=(-13, 0), slope_line_alpha=.1, supervised=False)
plot_stratpd(X, y, 'education', 'weight', ax=axes[0, 1],
show_x_counts=False,
yrange=(-13, 0), slope_line_alpha=.1, supervised=True)
plot_catstratpd(X, y, 'pregnant', 'weight', ax=axes[1, 0],
show_x_counts=False,
catnames=df_raw['pregnant'].unique(),
yrange=(-5, 45))
plot_catstratpd(X, y, 'pregnant', 'weight', ax=axes[1, 1],
show_x_counts=False,
catnames=df_raw['pregnant'].unique(),
yrange=(-5, 45))
axes[0, 0].set_title("Unsupervised")
axes[0, 1].set_title("Supervised")
axes[0, 1].get_yaxis().set_visible(False)
axes[1, 1].get_yaxis().set_visible(False)
savefig(f"weight_unsup")
plt.close()
def weight_ntrees():
np.random.seed(1) # pick seed for reproducible article images
print(f"----------- {inspect.stack()[0][3]} -----------")
X, y, df_raw, eqn = toy_weight_data(1000)
df = df_raw.copy()
catencoders = df_string_to_cat(df)
df_cat_to_catcode(df)
df['pregnant'] = df['pregnant'].astype(int)
X = df.drop('weight', axis=1)
y = df['weight']
trees = [1, 5, 10, 30]
fig, axes = plt.subplots(2, 4, figsize=(8, 4))
for i in range(1, 4):
axes[0, i].get_yaxis().set_visible(False)
axes[1, i].get_yaxis().set_visible(False)
for i in range(0, 4):
axes[0, i].set_title(f"{trees[i]} trees")
plot_stratpd(X, y, 'education', 'weight', ax=axes[0, 0],
min_samples_leaf=5,
yrange=(-12, 0), slope_line_alpha=.1, pdp_marker_size=10, show_ylabel=True,
n_trees=1, max_features=1.0, bootstrap=False)
plot_stratpd(X, y, 'education', 'weight', ax=axes[0, 1],
min_samples_leaf=5,
yrange=(-12, 0), slope_line_alpha=.1, pdp_marker_size=10, show_ylabel=False,
n_trees=5, max_features='auto', bootstrap=True)
plot_stratpd(X, y, 'education', 'weight', ax=axes[0, 2],
min_samples_leaf=5,
yrange=(-12, 0), slope_line_alpha=.08, pdp_marker_size=10, show_ylabel=False,
n_trees=10, max_features='auto', bootstrap=True)
plot_stratpd(X, y, 'education', 'weight', ax=axes[0, 3],
min_samples_leaf=5,
yrange=(-12, 0), slope_line_alpha=.05, pdp_marker_size=10, show_ylabel=False,
n_trees=30, max_features='auto', bootstrap=True)
plot_catstratpd(X, y, 'pregnant', 'weight', ax=axes[1, 0],
catnames={0:False, 1:True}, show_ylabel=True,
yrange=(0, 35),
n_trees=1, max_features=1.0, bootstrap=False)
plot_catstratpd(X, y, 'pregnant', 'weight', ax=axes[1, 1],
catnames={0:False, 1:True}, show_ylabel=False,
yrange=(0, 35),
n_trees=5, max_features='auto', bootstrap=True)
plot_catstratpd(X, y, 'pregnant', 'weight', ax=axes[1, 2],
catnames={0:False, 1:True}, show_ylabel=False,
yrange=(0, 35),
n_trees=10, max_features='auto', bootstrap=True)
plot_catstratpd(X, y, 'pregnant', 'weight', ax=axes[1, 3],
catnames={0:False, 1:True}, show_ylabel=False,
yrange=(0, 35),
n_trees=30, max_features='auto', bootstrap=True)
savefig(f"education_pregnant_vs_weight_ntrees")
plt.close()
def meta_weight():
np.random.seed(1) # pick seed for reproducible article images
print(f"----------- {inspect.stack()[0][3]} -----------")
X, y, df_raw, eqn = toy_weight_data(1000)
df = df_raw.copy()
catencoders = df_string_to_cat(df)
df_cat_to_catcode(df)
df['pregnant'] = df['pregnant'].astype(int)
X = df.drop('weight', axis=1)
y = df['weight']
plot_stratpd_gridsearch(X, y, colname='education', targetname='weight',
show_slope_lines=True,
xrange=(10,18),
yrange=(-12,0))
savefig("education_weight_meta")
plot_stratpd_gridsearch(X, y, colname='height', targetname='weight', yrange=(0,150),
show_slope_lines=True)
savefig("height_weight_meta")
def noisy_poly_data(n, sd=1.0):
x1 = np.random.uniform(-2, 2, size=n)
x2 = np.random.uniform(-2, 2, size=n)
y = x1 ** 2 + x2 + 10 + np.random.normal(0, sd, size=n)
df = pd.DataFrame()
df['x1'] = x1
df['x2'] = x2
df['y'] = y
return df
def noise():
np.random.seed(1) # pick seed for reproducible article images
print(f"----------- {inspect.stack()[0][3]} -----------")
n = 1000
fig, axes = plt.subplots(1, 4, figsize=(8, 2), sharey=True)
sds = [0,.5,1,2]
for i,sd in enumerate(sds):
df = noisy_poly_data(n=n, sd=sd)
X = df.drop('y', axis=1)
y = df['y']
plot_stratpd(X, y, 'x1', 'y',
show_ylabel=False,
pdp_marker_size=1,
show_x_counts=False,
ax=axes[i], yrange=(-4, .5))
axes[0].set_ylabel("y", fontsize=12)
for i,(ax,which) in enumerate(zip(axes,['(a)','(b)','(c)','(d)'])):
ax.text(0, -1, f"{which}\n$\sigma = {sds[i]}$", horizontalalignment='center')
ax.set_xlabel('$x_1$', fontsize=12)
ax.set_xticks([-2,-1,0,1,2])
savefig(f"noise")
def meta_noise():
np.random.seed(1) # pick seed for reproducible article images
print(f"----------- {inspect.stack()[0][3]} -----------")
n = 1000
noises = [0, .5, .8, 1.0]
sizes = [2, 10, 30, 50]
fig, axes = plt.subplots(len(noises), len(sizes), figsize=(7, 6), sharey=True,
sharex=True)
row = 0
for sd in noises:
df = noisy_poly_data(n=n, sd=sd)
X = df.drop('y', axis=1)
y = df['y']
col = 0
for s in sizes:
if row == 3:
show_xlabel = True
else:
show_xlabel = False
print(f"------------------- noise {sd}, SIZE {s} --------------------")
if col > 1: axes[row, col].get_yaxis().set_visible(False)
plot_stratpd(X, y, 'x1', 'y', ax=axes[row, col],
show_x_counts=False,
min_samples_leaf=s,
yrange=(-3.5, .5),
pdp_marker_size=1,
show_ylabel=False,
show_xlabel=show_xlabel)
if col == 0:
axes[row, col].set_ylabel(f'$y, \epsilon \sim N(0,{sd:.2f})$')
if row == 0:
axes[row, col].set_title("Min $x_{\\overline{c}}$ leaf " + f"{s}",
fontsize=12)
col += 1
row += 1
lastrow = len(noises)
# row = 0
# for sd in noises:
# axes[row, 0].scatter(X['x1'], y, slope_line_alpha=.12, label=None)
# axes[row, 0].set_xlabel("x1")
# axes[row, 0].set_ylabel("y")
# axes[row, 0].set_ylim(-5, 5)
# axes[row, 0].set_title(f"$y = x_1^2 + x_2 + \epsilon$, $\epsilon \sim N(0,{sd:.2f})$")
# row += 1
# axes[lastrow, 0].set_ylabel(f'$y$ vs $x_c$ partition')
# col = 0
# for s in sizes:
# rtreeviz_univar(axes[lastrow, col],
# X['x2'], y,
# min_samples_leaf=s,
# feature_name='x2',
# target_name='y',
# fontsize=10, show={'splits'},
# split_linewidth=.5,
# markersize=5)
# axes[lastrow, col].set_xlabel("x2")
# col += 1
savefig(f"meta_additivity_noise")
def bigX_data(n):
x1 = np.random.uniform(-1, 1, size=n)
x2 = np.random.uniform(-1, 1, size=n)
x3 = np.random.uniform(-1, 1, size=n)
y = 0.2 * x1 - 5 * x2 + 10 * x2 * np.where(x3 >= 0, 1, 0) + np.random.normal(0, 1,
size=n)
df = pd.DataFrame()
df['x1'] = x1
df['x2'] = x2
df['x3'] = x3
df['y'] = y
return df
def bigX():
np.random.seed(1) # pick seed for reproducible article images
print(f"----------- {inspect.stack()[0][3]} -----------")
n = 1000
df = bigX_data(n=n)
X = df.drop('y', axis=1)
y = df['y']
# plot_stratpd_gridsearch(X, y, 'x2', 'y',
# min_samples_leaf_values=[2,5,10,20,30],
# # nbins_values=[1,3,5,6,10],
# yrange=(-4,4))
#
# plt.tight_layout()
# plt.show()
# return
# Partial deriv is just 0.2 so this is correct. flat deriv curve, net effect line at slope .2
# ICE is way too shallow and not line at n=1000 even
fig, axes = plt.subplots(2, 2, figsize=(4, 4), sharey=True)
# Partial deriv wrt x2 is -5 plus 10 about half the time so about 0
# Should not expect a criss-cross like ICE since deriv of 1_x3>=0 is 0 everywhere
# wrt to any x, even x3. x2 *is* affecting y BUT the net effect at any spot
# is what we care about and that's 0. Just because marginal x2 vs y shows non-
# random plot doesn't mean that x2's net effect is nonzero. We are trying to
# strip away x1/x3's effect upon y. When we do, x2 has no effect on y.
# Ask what is net effect at every x2? 0.
plot_stratpd(X, y, 'x2', 'y', ax=axes[0, 0], yrange=(-4, 4),
min_samples_leaf=5,
pdp_marker_size=2)
# Partial deriv wrt x3 of 1_x3>=0 is 0 everywhere so result must be 0
plot_stratpd(X, y, 'x3', 'y', ax=axes[1, 0], yrange=(-4, 4),
min_samples_leaf=5,
pdp_marker_size=2)
rf = RandomForestRegressor(n_estimators=100, min_samples_leaf=1, oob_score=True)
rf.fit(X, y)
print(f"RF OOB {rf.oob_score_}")
ice = predict_ice(rf, X, 'x2', 'y', numx=100)
plot_ice(ice, 'x2', 'y', ax=axes[0, 1], yrange=(-4, 4))
ice = predict_ice(rf, X, 'x3', 'y', numx=100)
plot_ice(ice, 'x3', 'y', ax=axes[1, 1], yrange=(-4, 4))
axes[0, 1].get_yaxis().set_visible(False)
axes[1, 1].get_yaxis().set_visible(False)
axes[0, 0].set_title("StratPD", fontsize=10)
axes[0, 1].set_title("FPD/ICE", fontsize=10)
savefig(f"bigx")
plt.close()
def unsup_boston():
np.random.seed(1) # pick seed for reproducible article images
# np.random.seed(42)
print(f"----------- {inspect.stack()[0][3]} -----------")
boston = load_boston()
print(len(boston.data))
df = pd.DataFrame(boston.data, columns=boston.feature_names)
df['MEDV'] = boston.target
X = df.drop('MEDV', axis=1)
y = df['MEDV']
fig, axes = plt.subplots(1, 4, figsize=(9, 2))
axes[0].scatter(df['AGE'], y, s=5, alpha=.7)
axes[0].set_ylabel('MEDV')
axes[0].set_xlabel('AGE')
axes[0].set_title("Marginal")
axes[1].set_title("Unsupervised StratPD")
axes[2].set_title("Supervised StratPD")
axes[3].set_title("FPD/ICE")
plot_stratpd(X, y, 'AGE', 'MEDV', ax=axes[1], yrange=(-20, 20),
n_trees=20,
bootstrap=True,
# min_samples_leaf=10,
max_features='auto',
supervised=False, show_ylabel=False,
verbose=True,
slope_line_alpha=.1)
plot_stratpd(X, y, 'AGE', 'MEDV', ax=axes[2], yrange=(-20, 20),
min_samples_leaf=5,
n_trees=1,
supervised=True, show_ylabel=False)
axes[1].text(5, 15, f"20 trees, bootstrap")
axes[2].text(5, 15, f"1 tree, no bootstrap")
rf = RandomForestRegressor(n_estimators=100, oob_score=True)
rf.fit(X, y)
print(f"RF OOB {rf.oob_score_}")
ice = predict_ice(rf, X, 'AGE', 'MEDV', numx=10)
plot_ice(ice, 'AGE', 'MEDV', ax=axes[3], yrange=(-20, 20), show_ylabel=False)
# axes[0,1].get_yaxis().set_visible(False)
# axes[1,1].get_yaxis().set_visible(False)
savefig(f"boston_unsup")
# plt.tight_layout()
# plt.show()
def lm_plot(X, y, colname, targetname, ax=None):
ax.scatter(X[colname], y, alpha=.12, label=None)
ax.set_xlabel(colname)
ax.set_ylabel(targetname)
col = X[colname]
# y_pred_hp = r_col.predict(col.values.reshape(-1, 1))
# ax.plot(col, y_pred_hp, ":", linewidth=1, c='red', label='y ~ horsepower')
r = LinearRegression()
r.fit(X[['horsepower', 'weight']], y)
xcol = np.linspace(np.min(col), np.max(col), num=100)
ci = 0 if colname == 'horsepower' else 1
# use beta from y ~ hp + weight
# ax.plot(xcol, xcol * r.coef_[ci] + r.intercept_, linewidth=1, c='orange')
# ax.text(min(xcol)*1.02, max(y)*.95, f"$\\beta_{{{colname}}}$={r.coef_[ci]:.3f}")
# r = LinearRegression()
# r.fit(X[['horsepower','weight']], y)
# xcol = np.linspace(np.min(col), np.max(col), num=100)
# ci = X.columns.get_loc(colname)
# # ax.plot(xcol, xcol * r.coef_[ci] + r_col.intercept_, linewidth=1, c='orange', label=f"$\\beta_{{{colname}}}$")
# left40 = xcol[int(len(xcol) * .4)]
# ax.text(min(xcol), max(y)*.94, f"$\hat{{y}} = \\beta_0 + \\beta_1 x_{{horsepower}} + \\beta_2 x_{{weight}}$")
# i = 1 if colname=='horsepower' else 2
# # ax.text(left40, left40*r.coef_[ci] + r_col.intercept_, f"$\\beta_{i}$={r.coef_[ci]:.3f}")
def cars():
np.random.seed(1) # pick seed for reproducible article images
print(f"----------- {inspect.stack()[0][3]} -----------")
df_cars = pd.read_csv("../notebooks/data/auto-mpg.csv")
df_cars = df_cars[df_cars['horsepower'] != '?'] # drop the few missing values
df_cars['horsepower'] = df_cars['horsepower'].astype(float)
X = df_cars[['horsepower', 'weight']]
y = df_cars['mpg']
fig, axes = plt.subplots(2, 3, figsize=(9, 4))
lm_plot(X, y, 'horsepower', 'mpg', ax=axes[0, 0])
lm_plot(X, y, 'weight', 'mpg', ax=axes[1, 0])
plot_stratpd(X, y, 'horsepower', 'mpg', ax=axes[0, 1],
min_samples_leaf=10,
xrange=(45, 235), yrange=(-20, 20), show_ylabel=False)
plot_stratpd(X, y, 'weight', 'mpg', ax=axes[1, 1],
min_samples_leaf=10,
xrange=(1600, 5200), yrange=(-20, 20), show_ylabel=False)
rf = RandomForestRegressor(n_estimators=100, min_samples_leaf=1, oob_score=True)
rf.fit(X, y)
ice = predict_ice(rf, X, 'horsepower', 'mpg', numx=100)
plot_ice(ice, 'horsepower', 'mpg', ax=axes[0, 2], yrange=(-20, 20), show_ylabel=False)
ice = predict_ice(rf, X, 'weight', 'mpg', numx=100)
plot_ice(ice, 'weight', 'mpg', ax=axes[1, 2], yrange=(-20, 20), show_ylabel=False)
# draw regr line for horsepower
r = LinearRegression()
r.fit(X, y)
colname = 'horsepower'
col = X[colname]
xcol = np.linspace(np.min(col), np.max(col), num=100)
ci = X.columns.get_loc(colname)
beta0 = -r.coef_[ci] * min(col) # solved for beta0 to get y-intercept
# axes[0,1].plot(xcol, xcol * r.coef_[ci], linewidth=1, c='orange', label=f"$\\beta_{{{colname}}}$")
# axes[0,2].plot(xcol, xcol * r.coef_[ci], linewidth=1, c='orange', label=f"$\\beta_{{{colname}}}$")
# draw regr line for weight
colname = 'weight'
col = X[colname]
xcol = np.linspace(np.min(col), np.max(col), num=100)
ci = X.columns.get_loc(colname)
beta0 = -r.coef_[ci] * min(col) # solved for beta0 to get y-intercept
# axes[1,1].plot(xcol, xcol * r.coef_[ci]+11, linewidth=1, c='orange', label=f"$\\beta_{{{colname}}}$")
# axes[1,2].plot(xcol, xcol * r.coef_[ci]+13, linewidth=1, c='orange', label=f"$\\beta_{{{colname}}}$")
axes[1, 2].set_xlim(1600, 5200)
savefig("cars")
def meta_cars():
np.random.seed(1) # pick seed for reproducible article images
print(f"----------- {inspect.stack()[0][3]} -----------")
df_cars = pd.read_csv("../notebooks/data/auto-mpg.csv")
df_cars = df_cars[df_cars['horsepower'] != '?'] # drop the few missing values
df_cars['horsepower'] = df_cars['horsepower'].astype(float)
X = df_cars[['horsepower', 'weight']]
y = df_cars['mpg']
plot_stratpd_gridsearch(X, y, colname='horsepower', targetname='mpg',
show_slope_lines=True,
min_samples_leaf_values=[2,5,10,20,30],
nbins_values=[1,2,3,4,5],
yrange=(-20, 20))
savefig("horsepower_meta")
plot_stratpd_gridsearch(X, y, colname='weight', targetname='mpg',
show_slope_lines=True,
min_samples_leaf_values=[2,5,10,20,30],
nbins_values=[1,2,3,4,5],
yrange=(-20, 20))
savefig("weight_meta")
def multi_joint_distr():
np.random.seed(1) # pick seed for reproducible article images
print(f"----------- {inspect.stack()[0][3]} -----------")
# np.random.seed(42)
n = 1000
min_samples_leaf = 30
nbins = 2
df = pd.DataFrame(np.random.multivariate_normal([6, 6, 6, 6],
[
[1, 5, .7, 3],
[5, 1, 2, .5],
[.7, 2, 1, 1.5],
[3, .5, 1.5, 1]
],
n),
columns=['x1', 'x2', 'x3', 'x4'])
df['y'] = df['x1'] + df['x2'] + df['x3'] + df['x4']
X = df.drop('y', axis=1)
y = df['y']
r = LinearRegression()
r.fit(X, y)
print(r.coef_) # should be all 1s
yrange = (-2, 15)
fig, axes = plt.subplots(6, 4, figsize=(7.5, 8.5), sharey=False) # , sharex=True)
axes[0, 0].scatter(X['x1'], y, s=5, alpha=.08)
axes[0, 0].set_xlim(0, 13)
axes[0, 0].set_ylim(0, 45)
axes[0, 1].scatter(X['x2'], y, s=5, alpha=.08)
axes[0, 1].set_xlim(0, 13)
axes[0, 1].set_ylim(3, 45)
axes[0, 2].scatter(X['x3'], y, s=5, alpha=.08)
axes[0, 2].set_xlim(0, 13)
axes[0, 2].set_ylim(3, 45)
axes[0, 3].scatter(X['x4'], y, s=5, alpha=.08)
axes[0, 3].set_xlim(0, 13)
axes[0, 3].set_ylim(3, 45)
axes[0, 0].text(1, 38, 'Marginal', horizontalalignment='left')
axes[0, 1].text(1, 38, 'Marginal', horizontalalignment='left')
axes[0, 2].text(1, 38, 'Marginal', horizontalalignment='left')
axes[0, 3].text(1, 38, 'Marginal', horizontalalignment='left')
axes[0, 0].set_ylabel("y")
for i in range(6):
for j in range(1, 4):
axes[i, j].get_yaxis().set_visible(False)
for i in range(6):
for j in range(4):
axes[i, j].set_xlim(0, 15)
pdpx, pdpy, ignored = \
plot_stratpd(X, y, 'x1', 'y', ax=axes[1, 0], xrange=(0, 13),
min_samples_leaf=min_samples_leaf,
yrange=yrange, show_xlabel=False, show_ylabel=True)
r = LinearRegression()
r.fit(pdpx.reshape(-1, 1), pdpy)
axes[1, 0].text(1, 10, f"Slope={r.coef_[0]:.2f}")
pdpx, pdpy, ignored = \
plot_stratpd(X, y, 'x2', 'y', ax=axes[1, 1], xrange=(0, 13),
# show_dx_line=True,
min_samples_leaf=min_samples_leaf,
yrange=yrange, show_xlabel=False, show_ylabel=False)
r = LinearRegression()
r.fit(pdpx.reshape(-1, 1), pdpy)
axes[1, 1].text(1, 10, f"Slope={r.coef_[0]:.2f}")
pdpx, pdpy, ignored = \
plot_stratpd(X, y, 'x3', 'y', ax=axes[1, 2], xrange=(0, 13),
# show_dx_line=True,
min_samples_leaf=min_samples_leaf,
yrange=yrange, show_xlabel=False, show_ylabel=False)
r = LinearRegression()
r.fit(pdpx.reshape(-1, 1), pdpy)
axes[1, 2].text(1, 10, f"Slope={r.coef_[0]:.2f}")
pdpx, pdpy, ignored = \
plot_stratpd(X, y, 'x4', 'y', ax=axes[1, 3], xrange=(0, 13),
# show_dx_line=True,
min_samples_leaf=min_samples_leaf,
yrange=yrange, show_xlabel=False, show_ylabel=False)
r = LinearRegression()
r.fit(pdpx.reshape(-1, 1), pdpy)
axes[1, 3].text(1, 10, f"Slope={r.coef_[0]:.2f}")
axes[1, 0].text(1, 12, 'StratPD', horizontalalignment='left')
axes[1, 1].text(1, 12, 'StratPD', horizontalalignment='left')
axes[1, 2].text(1, 12, 'StratPD', horizontalalignment='left')
axes[1, 3].text(1, 12, 'StratPD', horizontalalignment='left')
# plt.show()
# return
nfeatures = 4
regrs = [
RandomForestRegressor(n_estimators=100, min_samples_leaf=1, oob_score=True),
svm.SVR(gamma=1 / nfeatures), # gamma='scale'),
LinearRegression(),
KNeighborsRegressor(n_neighbors=5)]
row = 2
for regr in regrs:
regr.fit(X, y)
rname = regr.__class__.__name__
if rname == 'SVR':
rname = "SVM FPD/ICE"
if rname == 'RandomForestRegressor':
rname = "RF FPD/ICE"
if rname == 'LinearRegression':
rname = 'Linear FPD/ICE'
if rname == 'KNeighborsRegressor':
rname = 'kNN FPD/ICE'
show_xlabel = True if row == 5 else False
axes[row, 0].text(.5, 11, rname, horizontalalignment='left')
axes[row, 1].text(.5, 11, rname, horizontalalignment='left')
axes[row, 2].text(.5, 11, rname, horizontalalignment='left')
axes[row, 3].text(.5, 11, rname, horizontalalignment='left')
ice = predict_ice(regr, X, 'x1', 'y')
plot_ice(ice, 'x1', 'y', ax=axes[row, 0], xrange=(0, 13), yrange=yrange,
alpha=.08,
show_xlabel=show_xlabel, show_ylabel=True)
ice = predict_ice(regr, X, 'x2', 'y')
plot_ice(ice, 'x2', 'y', ax=axes[row, 1], xrange=(0, 13), yrange=yrange,
alpha=.08,
show_xlabel=show_xlabel, show_ylabel=False)
ice = predict_ice(regr, X, 'x3', 'y')
plot_ice(ice, 'x3', 'y', ax=axes[row, 2], xrange=(0, 13), yrange=yrange,
alpha=.08,
show_xlabel=show_xlabel, show_ylabel=False)
ice = predict_ice(regr, X, 'x4', 'y')
plot_ice(ice, 'x4', 'y', ax=axes[row, 3], xrange=(0, 13), yrange=yrange,
alpha=.08,
show_xlabel=show_xlabel, show_ylabel=False)
row += 1
# plt.tight_layout()
# plt.show()
savefig("multivar_multimodel_normal")
def interactions():
np.random.seed(1) # pick seed for reproducible article images
n = 2000
df = synthetic_interaction_data(n)
X, y = df[['x1', 'x2', 'x3']].copy(), df['y'].copy()
X1 = X.iloc[:, 0]
X2 = X.iloc[:, 1]
X3 = X.iloc[:, 2] # UNUSED in y
rf = RandomForestRegressor(n_estimators=10, oob_score=True)
rf.fit(X, y)
print("R^2 training", rf.score(X, y))
print("R^2 OOB", rf.oob_score_)
print("mean(y) =", np.mean(y))
print("mean(X_1), mean(X_2) =", np.mean(X1), np.mean(X2))
pdp_x1 = friedman_partial_dependence(rf, X, 'x1', numx=None, mean_centered=False)
pdp_x2 = friedman_partial_dependence(rf, X, 'x2', numx=None, mean_centered=False)
pdp_x3 = friedman_partial_dependence(rf, X, 'x3', numx=None, mean_centered=False)
m1 = np.mean(pdp_x1[1])
m2 = np.mean(pdp_x2[1])
m3 = np.mean(pdp_x3[1])
print("mean(PDP_1) =", np.mean(pdp_x1[1]))
print("mean(PDP_2) =", np.mean(pdp_x2[1]))
print("mean(PDP_2) =", np.mean(pdp_x3[1]))
print("mean abs PDP_1-ybar", np.mean(np.abs(pdp_x1[1] - m1)))
print("mean abs PDP_2-ybar", np.mean(np.abs(pdp_x2[1] - m2)))
print("mean abs PDP_3-ybar", np.mean(np.abs(pdp_x3[1] - m3)))
explainer = shap.TreeExplainer(rf, data=X,
feature_perturbation='interventional')
shap_values = explainer.shap_values(X, check_additivity=False)
shapavg = np.mean(shap_values, axis=0)
print("SHAP avg x1,x2,x3 =", shapavg)
shapimp = np.mean(np.abs(shap_values), axis=0)
print("SHAP avg |x1|,|x2|,|x3| =", shapimp)
fig, axes = plt.subplots(1,4,figsize=(11.33,2.8))
x1_color = '#1E88E5'
x2_color = 'orange'
x3_color = '#A22396'
axes[0].plot(pdp_x1[0], pdp_x1[1], '.', markersize=1, c=x1_color, label='$FPD_1$', alpha=1)
axes[0].plot(pdp_x2[0], pdp_x2[1], '.', markersize=1, c=x2_color, label='$FPD_2$', alpha=1)
axes[0].plot(pdp_x3[0], pdp_x3[1], '.', markersize=1, c=x3_color, label='$FPD_3$', alpha=1)
axes[0].text(0, 75, f"$\\bar{{y}}={np.mean(y):.1f}$", fontsize=13)
axes[0].set_xticks([0,2,4,6,8,10])
axes[0].set_xlabel("$x_1, x_2, x_3$", fontsize=10)
axes[0].set_ylabel("y")
axes[0].set_yticks([0, 25, 50, 75, 100, 125, 150])
axes[0].set_ylim(-10,160)
axes[0].set_title(f"(a) Friedman FPD")
axes[0].spines['top'].set_linewidth(.5)
axes[0].spines['right'].set_linewidth(.5)
axes[0].spines['left'].set_linewidth(.5)
axes[0].spines['bottom'].set_linewidth(.5)
axes[0].spines['top'].set_color('none')
axes[0].spines['right'].set_color('none')
x1_patch = mpatches.Patch(color=x1_color, label='$x_1$')
x2_patch = mpatches.Patch(color=x2_color, label='$x_2$')
x3_patch = mpatches.Patch(color=x3_color, label='$x_3$')
axes[0].legend(handles=[x1_patch,x2_patch,x3_patch], fontsize=10)
# axes[0].legend(fontsize=10)
#axes[1].plot(shap_values)
shap.dependence_plot("x1", shap_values, X,
interaction_index=None, ax=axes[1], dot_size=4,
show=False, alpha=.5, color=x1_color)
shap.dependence_plot("x2", shap_values, X,
interaction_index=None, ax=axes[1], dot_size=4,
show=False, alpha=.5, color=x2_color)
shap.dependence_plot("x3", shap_values, X,
interaction_index=None, ax=axes[1], dot_size=4,
show=False, alpha=.5, color=x3_color)
axes[1].set_xticks([0,2,4,6,8,10])
axes[1].set_xlabel("$x_1, x_2, x_3$", fontsize=12)
axes[1].set_ylim(-95,110)
axes[1].set_title("(b) SHAP")
axes[1].set_ylabel("SHAP values", fontsize=11)
x1_patch = mpatches.Patch(color=x1_color, label='$x_1$')
x2_patch = mpatches.Patch(color=x2_color, label='$x_2$')
x3_patch = mpatches.Patch(color=x3_color, label='$x_3$')
axes[1].legend(handles=[x1_patch,x2_patch,x3_patch], fontsize=12)
df_x1 = pd.read_csv("images/x1_ale.csv")
df_x2 = pd.read_csv("images/x2_ale.csv")
df_x3 = pd.read_csv("images/x3_ale.csv")
axes[2].plot(df_x1['x.values'],df_x1['f.values'],'.',color=x1_color,markersize=2)
axes[2].plot(df_x2['x.values'],df_x2['f.values'],'.',color=x2_color,markersize=2)
axes[2].plot(df_x3['x.values'],df_x3['f.values'],'.',color=x3_color,markersize=2)
axes[2].set_title("(c) ALE")
# axes[2].set_ylabel("y", fontsize=12)
axes[2].set_xlabel("$x_1, x_2, x_3$", fontsize=12)
axes[2].set_ylim(-95,110)
# axes[2].tick_params(axis='both', which='major', labelsize=10)
axes[2].set_xticks([0,2,4,6,8,10])
axes[2].spines['top'].set_linewidth(.5)
axes[2].spines['right'].set_linewidth(.5)
axes[2].spines['left'].set_linewidth(.5)
axes[2].spines['bottom'].set_linewidth(.5)
axes[2].spines['top'].set_color('none')
axes[2].spines['right'].set_color('none')
x1_patch = mpatches.Patch(color=x1_color, label='$x_1$')
x2_patch = mpatches.Patch(color=x2_color, label='$x_2$')
x3_patch = mpatches.Patch(color=x3_color, label='$x_3$')
axes[2].legend(handles=[x1_patch,x2_patch,x3_patch], fontsize=12)
plot_stratpd(X, y, "x1", "y", ax=axes[3], pdp_marker_size=1,
pdp_marker_color=x1_color,
show_x_counts=False, n_trials=1, show_slope_lines=False)
plot_stratpd(X, y, "x2", "y", ax=axes[3], pdp_marker_size=1,
pdp_marker_color=x2_color,
show_x_counts=False, n_trials=1, show_slope_lines=False)
plot_stratpd(X, y, "x3", "y", ax=axes[3], pdp_marker_size=1,
pdp_marker_color=x3_color,
show_x_counts=False, n_trials=1, show_slope_lines=False)
axes[3].set_xticks([0,2,4,6,8,10])
axes[3].set_ylim(-20,160)
axes[3].set_yticks([0, 25, 50, 75, 100, 125, 150])
axes[3].set_xlabel("$x_1, x_2, x_3$", fontsize=12)
# axes[3].set_ylabel("y", fontsize=12)
axes[3].set_title("(d) StratPD")
axes[3].spines['top'].set_linewidth(.5)
axes[3].spines['right'].set_linewidth(.5)
axes[3].spines['left'].set_linewidth(.5)
axes[3].spines['bottom'].set_linewidth(.5)
axes[3].spines['top'].set_color('none')
axes[3].spines['right'].set_color('none')
x1_patch = mpatches.Patch(color=x1_color, label='$x_1$')
x2_patch = mpatches.Patch(color=x2_color, label='$x_2$')
x3_patch = mpatches.Patch(color=x3_color, label='$x_3$')
axes[3].legend(handles=[x1_patch,x2_patch,x3_patch], fontsize=12)
savefig("interactions")
def gen_ale_plot_data_in_R():
"Exec R and generate images/*.csv files. Then plot with Python"
os.system("R CMD BATCH ale_plots_bulldozer.R")
os.system("R CMD BATCH ale_plots_rent.R")
os.system("R CMD BATCH ale_plots_weather.R")
os.system("R CMD BATCH ale_plots_weight.R")
def ale_yearmade():
df = pd.read_csv("images/YearMade_ale.csv")
# df['f.values'] -= np.min(df['f.values'])
print(df)
fig, ax = plt.subplots(1, 1, figsize=figsize2)
ax.plot(df['x.values'],df['f.values'],'.',color='k',markersize=4)
ax.set_title("(c) ALE", fontsize=13)
ax.set_xlabel("YearMade", fontsize=11)
ax.set_xlim(1960, 2010)
ax.set_ylim(-25000,30000)
ax.tick_params(axis='both', which='major', labelsize=10)
ax.spines['right'].set_visible(False)
ax.spines['top'].set_visible(False)
savefig('bulldozer_YearMade_ale')
def ale_MachineHours():
df = pd.read_csv("images/MachineHours_ale.csv")
# df['f.values'] -= np.min(df['f.values'])
print(df)
fig, ax = plt.subplots(1, 1, figsize=figsize2)
ax.plot(df['x.values'],df['f.values'],'.',color='k',markersize=4)
ax.set_title("ALE", fontsize=13)
# ax.set_ylabel("SalePrice", fontsize=11)
ax.set_xlabel("(c) MachineHours", fontsize=11)
ax.set_xlim(0, 30_000)
ax.set_ylim(-3000,5000)
ax.tick_params(axis='both', which='major', labelsize=10)
ax.spines['right'].set_visible(False)
ax.spines['top'].set_visible(False)
savefig('bulldozer_MachineHours_ale')
def ale_productsize():
df = pd.read_csv("images/ProductSize_ale.csv")
print(df)
fig, ax = plt.subplots(1, 1, figsize=figsize2)
ax.plot(df['x.values'],df['f.values'],'.',color='k',markersize=10)
ax.set_title("(c) ALE", fontsize=13)
# ax.set_ylabel("SalePrice", fontsize=11)
ax.set_xlabel("ProductSize", fontsize=11)
# ax.set_xlim(0, 30_000)
ax.set_ylim(-15000,40000)
ax.tick_params(axis='both', which='major', labelsize=10)
ax.spines['right'].set_visible(False)
ax.spines['top'].set_visible(False)
savefig('bulldozer_ProductSize_ale')
def ale_height():
df = | pd.read_csv("images/height_ale.csv") | pandas.read_csv |
""" test the scalar Timedelta """
import numpy as np
from datetime import timedelta
import pandas as pd
import pandas.util.testing as tm
from pandas.tseries.timedeltas import _coerce_scalar_to_timedelta_type as ct
from pandas import (Timedelta, TimedeltaIndex, timedelta_range, Series,
to_timedelta, compat, isnull)
from pandas._libs.tslib import iNaT, NaTType
class TestTimedeltas(tm.TestCase):
_multiprocess_can_split_ = True
def setUp(self):
pass
def test_construction(self):
expected = np.timedelta64(10, 'D').astype('m8[ns]').view('i8')
self.assertEqual(Timedelta(10, unit='d').value, expected)
self.assertEqual(Timedelta(10.0, unit='d').value, expected)
self.assertEqual(Timedelta('10 days').value, expected)
self.assertEqual(Timedelta(days=10).value, expected)
self.assertEqual(Timedelta(days=10.0).value, expected)
expected += np.timedelta64(10, 's').astype('m8[ns]').view('i8')
self.assertEqual(Timedelta('10 days 00:00:10').value, expected)
self.assertEqual(Timedelta(days=10, seconds=10).value, expected)
self.assertEqual(
Timedelta(days=10, milliseconds=10 * 1000).value, expected)
self.assertEqual(
Timedelta(days=10, microseconds=10 * 1000 * 1000).value, expected)
# test construction with np dtypes
# GH 8757
timedelta_kwargs = {'days': 'D',
'seconds': 's',
'microseconds': 'us',
'milliseconds': 'ms',
'minutes': 'm',
'hours': 'h',
'weeks': 'W'}
npdtypes = [np.int64, np.int32, np.int16, np.float64, np.float32,
np.float16]
for npdtype in npdtypes:
for pykwarg, npkwarg in timedelta_kwargs.items():
expected = np.timedelta64(1,
npkwarg).astype('m8[ns]').view('i8')
self.assertEqual(
Timedelta(**{pykwarg: npdtype(1)}).value, expected)
# rounding cases
self.assertEqual(Timedelta(82739999850000).value, 82739999850000)
self.assertTrue('0 days 22:58:59.999850' in str(Timedelta(
82739999850000)))
self.assertEqual(Timedelta(123072001000000).value, 123072001000000)
self.assertTrue('1 days 10:11:12.001' in str(Timedelta(
123072001000000)))
# string conversion with/without leading zero
# GH 9570
self.assertEqual(Timedelta('0:00:00'), timedelta(hours=0))
self.assertEqual(Timedelta('00:00:00'), timedelta(hours=0))
self.assertEqual(Timedelta('-1:00:00'), -timedelta(hours=1))
self.assertEqual(Timedelta('-01:00:00'), -timedelta(hours=1))
# more strings & abbrevs
# GH 8190
self.assertEqual(Timedelta('1 h'), timedelta(hours=1))
self.assertEqual(Timedelta('1 hour'), timedelta(hours=1))
self.assertEqual(Timedelta('1 hr'), timedelta(hours=1))
self.assertEqual(Timedelta('1 hours'), timedelta(hours=1))
self.assertEqual(Timedelta('-1 hours'), -timedelta(hours=1))
self.assertEqual(Timedelta('1 m'), timedelta(minutes=1))
self.assertEqual(Timedelta('1.5 m'), timedelta(seconds=90))
self.assertEqual(Timedelta('1 minute'), timedelta(minutes=1))
self.assertEqual(Timedelta('1 minutes'), timedelta(minutes=1))
self.assertEqual(Timedelta('1 s'), timedelta(seconds=1))
self.assertEqual(Timedelta('1 second'), timedelta(seconds=1))
self.assertEqual(Timedelta('1 seconds'), timedelta(seconds=1))
self.assertEqual(Timedelta('1 ms'), timedelta(milliseconds=1))
self.assertEqual(Timedelta('1 milli'), timedelta(milliseconds=1))
self.assertEqual(Timedelta('1 millisecond'), timedelta(milliseconds=1))
self.assertEqual(Timedelta('1 us'), timedelta(microseconds=1))
self.assertEqual(Timedelta('1 micros'), timedelta(microseconds=1))
self.assertEqual(Timedelta('1 microsecond'), timedelta(microseconds=1))
self.assertEqual(Timedelta('1.5 microsecond'),
Timedelta('00:00:00.000001500'))
self.assertEqual(Timedelta('1 ns'), Timedelta('00:00:00.000000001'))
self.assertEqual(Timedelta('1 nano'), Timedelta('00:00:00.000000001'))
self.assertEqual(Timedelta('1 nanosecond'),
Timedelta('00:00:00.000000001'))
# combos
self.assertEqual(Timedelta('10 days 1 hour'),
timedelta(days=10, hours=1))
self.assertEqual(Timedelta('10 days 1 h'), timedelta(days=10, hours=1))
self.assertEqual(Timedelta('10 days 1 h 1m 1s'), timedelta(
days=10, hours=1, minutes=1, seconds=1))
self.assertEqual(Timedelta('-10 days 1 h 1m 1s'), -
timedelta(days=10, hours=1, minutes=1, seconds=1))
self.assertEqual(Timedelta('-10 days 1 h 1m 1s'), -
timedelta(days=10, hours=1, minutes=1, seconds=1))
self.assertEqual(Timedelta('-10 days 1 h 1m 1s 3us'), -
timedelta(days=10, hours=1, minutes=1,
seconds=1, microseconds=3))
self.assertEqual(Timedelta('-10 days 1 h 1.5m 1s 3us'), -
timedelta(days=10, hours=1, minutes=1,
seconds=31, microseconds=3))
# currently invalid as it has a - on the hhmmdd part (only allowed on
# the days)
self.assertRaises(ValueError,
lambda: Timedelta('-10 days -1 h 1.5m 1s 3us'))
# only leading neg signs are allowed
self.assertRaises(ValueError,
lambda: Timedelta('10 days -1 h 1.5m 1s 3us'))
# no units specified
self.assertRaises(ValueError, lambda: Timedelta('3.1415'))
# invalid construction
tm.assertRaisesRegexp(ValueError, "cannot construct a Timedelta",
lambda: Timedelta())
tm.assertRaisesRegexp(ValueError, "unit abbreviation w/o a number",
lambda: Timedelta('foo'))
tm.assertRaisesRegexp(ValueError,
"cannot construct a Timedelta from the passed "
"arguments, allowed keywords are ",
lambda: Timedelta(day=10))
# roundtripping both for string and value
for v in ['1s', '-1s', '1us', '-1us', '1 day', '-1 day',
'-23:59:59.999999', '-1 days +23:59:59.999999', '-1ns',
'1ns', '-23:59:59.999999999']:
td = Timedelta(v)
self.assertEqual(Timedelta(td.value), td)
# str does not normally display nanos
if not td.nanoseconds:
self.assertEqual(Timedelta(str(td)), td)
self.assertEqual(Timedelta(td._repr_base(format='all')), td)
# floats
expected = np.timedelta64(
10, 's').astype('m8[ns]').view('i8') + np.timedelta64(
500, 'ms').astype('m8[ns]').view('i8')
self.assertEqual(Timedelta(10.5, unit='s').value, expected)
# nat
self.assertEqual(Timedelta('').value, iNaT)
self.assertEqual(Timedelta('nat').value, iNaT)
self.assertEqual(Timedelta('NAT').value, iNaT)
self.assertEqual(Timedelta(None).value, iNaT)
self.assertEqual(Timedelta(np.nan).value, iNaT)
self.assertTrue(isnull(Timedelta('nat')))
# offset
self.assertEqual(to_timedelta(pd.offsets.Hour(2)),
Timedelta('0 days, 02:00:00'))
self.assertEqual(Timedelta(pd.offsets.Hour(2)),
Timedelta('0 days, 02:00:00'))
self.assertEqual(Timedelta(pd.offsets.Second(2)),
Timedelta('0 days, 00:00:02'))
# unicode
# GH 11995
expected = Timedelta('1H')
result = pd.Timedelta(u'1H')
self.assertEqual(result, expected)
self.assertEqual(to_timedelta(pd.offsets.Hour(2)),
Timedelta(u'0 days, 02:00:00'))
self.assertRaises(ValueError, lambda: Timedelta(u'foo bar'))
def test_overflow_on_construction(self):
# xref https://github.com/statsmodels/statsmodels/issues/3374
value = pd.Timedelta('1day').value * 20169940
self.assertRaises(OverflowError, pd.Timedelta, value)
def test_total_seconds_scalar(self):
# GH 10939
rng = Timedelta('1 days, 10:11:12.100123456')
expt = 1 * 86400 + 10 * 3600 + 11 * 60 + 12 + 100123456. / 1e9
tm.assert_almost_equal(rng.total_seconds(), expt)
rng = Timedelta(np.nan)
self.assertTrue(np.isnan(rng.total_seconds()))
def test_repr(self):
self.assertEqual(repr(Timedelta(10, unit='d')),
"Timedelta('10 days 00:00:00')")
self.assertEqual(repr(Timedelta(10, unit='s')),
"Timedelta('0 days 00:00:10')")
self.assertEqual(repr(Timedelta(10, unit='ms')),
"Timedelta('0 days 00:00:00.010000')")
self.assertEqual(repr(Timedelta(-10, unit='ms')),
"Timedelta('-1 days +23:59:59.990000')")
def test_conversion(self):
for td in [Timedelta(10, unit='d'),
Timedelta('1 days, 10:11:12.012345')]:
pydt = td.to_pytimedelta()
self.assertTrue(td == Timedelta(pydt))
self.assertEqual(td, pydt)
self.assertTrue(isinstance(pydt, timedelta) and not isinstance(
pydt, Timedelta))
self.assertEqual(td, np.timedelta64(td.value, 'ns'))
td64 = td.to_timedelta64()
self.assertEqual(td64, np.timedelta64(td.value, 'ns'))
self.assertEqual(td, td64)
self.assertTrue(isinstance(td64, np.timedelta64))
# this is NOT equal and cannot be roundtriped (because of the nanos)
td = Timedelta('1 days, 10:11:12.012345678')
self.assertTrue(td != td.to_pytimedelta())
def test_freq_conversion(self):
td = Timedelta('1 days 2 hours 3 ns')
result = td / np.timedelta64(1, 'D')
self.assertEqual(result, td.value / float(86400 * 1e9))
result = td / np.timedelta64(1, 's')
self.assertEqual(result, td.value / float(1e9))
result = td / np.timedelta64(1, 'ns')
self.assertEqual(result, td.value)
def test_fields(self):
def check(value):
# that we are int/long like
self.assertTrue(isinstance(value, (int, compat.long)))
# compat to datetime.timedelta
rng = to_timedelta('1 days, 10:11:12')
self.assertEqual(rng.days, 1)
self.assertEqual(rng.seconds, 10 * 3600 + 11 * 60 + 12)
self.assertEqual(rng.microseconds, 0)
self.assertEqual(rng.nanoseconds, 0)
self.assertRaises(AttributeError, lambda: rng.hours)
self.assertRaises(AttributeError, lambda: rng.minutes)
self.assertRaises(AttributeError, lambda: rng.milliseconds)
# GH 10050
check(rng.days)
check(rng.seconds)
check(rng.microseconds)
check(rng.nanoseconds)
td = Timedelta('-1 days, 10:11:12')
self.assertEqual(abs(td), Timedelta('13:48:48'))
self.assertTrue(str(td) == "-1 days +10:11:12")
self.assertEqual(-td, Timedelta('0 days 13:48:48'))
self.assertEqual(-Timedelta('-1 days, 10:11:12').value, 49728000000000)
self.assertEqual(Timedelta('-1 days, 10:11:12').value, -49728000000000)
rng = to_timedelta('-1 days, 10:11:12.100123456')
self.assertEqual(rng.days, -1)
self.assertEqual(rng.seconds, 10 * 3600 + 11 * 60 + 12)
self.assertEqual(rng.microseconds, 100 * 1000 + 123)
self.assertEqual(rng.nanoseconds, 456)
self.assertRaises(AttributeError, lambda: rng.hours)
self.assertRaises(AttributeError, lambda: rng.minutes)
self.assertRaises(AttributeError, lambda: rng.milliseconds)
# components
tup = pd.to_timedelta(-1, 'us').components
self.assertEqual(tup.days, -1)
self.assertEqual(tup.hours, 23)
self.assertEqual(tup.minutes, 59)
self.assertEqual(tup.seconds, 59)
self.assertEqual(tup.milliseconds, 999)
self.assertEqual(tup.microseconds, 999)
self.assertEqual(tup.nanoseconds, 0)
# GH 10050
check(tup.days)
check(tup.hours)
check(tup.minutes)
check(tup.seconds)
check(tup.milliseconds)
check(tup.microseconds)
check(tup.nanoseconds)
tup = Timedelta('-1 days 1 us').components
self.assertEqual(tup.days, -2)
self.assertEqual(tup.hours, 23)
self.assertEqual(tup.minutes, 59)
self.assertEqual(tup.seconds, 59)
self.assertEqual(tup.milliseconds, 999)
self.assertEqual(tup.microseconds, 999)
self.assertEqual(tup.nanoseconds, 0)
def test_nat_converters(self):
self.assertEqual(to_timedelta(
'nat', box=False).astype('int64'), iNaT)
self.assertEqual(to_timedelta(
'nan', box=False).astype('int64'), iNaT)
def testit(unit, transform):
# array
result = to_timedelta(np.arange(5), unit=unit)
expected = TimedeltaIndex([np.timedelta64(i, transform(unit))
for i in np.arange(5).tolist()])
tm.assert_index_equal(result, expected)
# scalar
result = to_timedelta(2, unit=unit)
expected = Timedelta(np.timedelta64(2, transform(unit)).astype(
'timedelta64[ns]'))
self.assertEqual(result, expected)
# validate all units
# GH 6855
for unit in ['Y', 'M', 'W', 'D', 'y', 'w', 'd']:
testit(unit, lambda x: x.upper())
for unit in ['days', 'day', 'Day', 'Days']:
testit(unit, lambda x: 'D')
for unit in ['h', 'm', 's', 'ms', 'us', 'ns', 'H', 'S', 'MS', 'US',
'NS']:
testit(unit, lambda x: x.lower())
# offsets
# m
testit('T', lambda x: 'm')
# ms
testit('L', lambda x: 'ms')
def test_numeric_conversions(self):
self.assertEqual(ct(0), np.timedelta64(0, 'ns'))
self.assertEqual(ct(10), np.timedelta64(10, 'ns'))
self.assertEqual(ct(10, unit='ns'), np.timedelta64(
10, 'ns').astype('m8[ns]'))
self.assertEqual(ct(10, unit='us'), np.timedelta64(
10, 'us').astype('m8[ns]'))
self.assertEqual(ct(10, unit='ms'), np.timedelta64(
10, 'ms').astype('m8[ns]'))
self.assertEqual(ct(10, unit='s'), np.timedelta64(
10, 's').astype('m8[ns]'))
self.assertEqual(ct(10, unit='d'), np.timedelta64(
10, 'D').astype('m8[ns]'))
def test_timedelta_conversions(self):
self.assertEqual(ct(timedelta(seconds=1)),
np.timedelta64(1, 's').astype('m8[ns]'))
self.assertEqual(ct(timedelta(microseconds=1)),
np.timedelta64(1, 'us').astype('m8[ns]'))
self.assertEqual(ct(timedelta(days=1)),
np.timedelta64(1, 'D').astype('m8[ns]'))
def test_round(self):
t1 = Timedelta('1 days 02:34:56.789123456')
t2 = Timedelta('-1 days 02:34:56.789123456')
for (freq, s1, s2) in [('N', t1, t2),
('U', Timedelta('1 days 02:34:56.789123000'),
Timedelta('-1 days 02:34:56.789123000')),
('L', Timedelta('1 days 02:34:56.789000000'),
Timedelta('-1 days 02:34:56.789000000')),
('S', Timedelta('1 days 02:34:57'),
Timedelta('-1 days 02:34:57')),
('2S', Timedelta('1 days 02:34:56'),
Timedelta('-1 days 02:34:56')),
('5S', Timedelta('1 days 02:34:55'),
Timedelta('-1 days 02:34:55')),
('T', Timedelta('1 days 02:35:00'),
Timedelta('-1 days 02:35:00')),
('12T', Timedelta('1 days 02:36:00'),
Timedelta('-1 days 02:36:00')),
('H', Timedelta('1 days 03:00:00'),
Timedelta('-1 days 03:00:00')),
('d', Timedelta('1 days'),
Timedelta('-1 days'))]:
r1 = t1.round(freq)
self.assertEqual(r1, s1)
r2 = t2.round(freq)
self.assertEqual(r2, s2)
# invalid
for freq in ['Y', 'M', 'foobar']:
self.assertRaises(ValueError, lambda: t1.round(freq))
t1 = timedelta_range('1 days', periods=3, freq='1 min 2 s 3 us')
t2 = -1 * t1
t1a = timedelta_range('1 days', periods=3, freq='1 min 2 s')
t1c = pd.TimedeltaIndex([1, 1, 1], unit='D')
# note that negative times round DOWN! so don't give whole numbers
for (freq, s1, s2) in [('N', t1, t2),
('U', t1, t2),
('L', t1a,
TimedeltaIndex(['-1 days +00:00:00',
'-2 days +23:58:58',
'-2 days +23:57:56'],
dtype='timedelta64[ns]',
freq=None)
),
('S', t1a,
TimedeltaIndex(['-1 days +00:00:00',
'-2 days +23:58:58',
'-2 days +23:57:56'],
dtype='timedelta64[ns]',
freq=None)
),
('12T', t1c,
TimedeltaIndex(['-1 days',
'-1 days',
'-1 days'],
dtype='timedelta64[ns]',
freq=None)
),
('H', t1c,
TimedeltaIndex(['-1 days',
'-1 days',
'-1 days'],
dtype='timedelta64[ns]',
freq=None)
),
('d', t1c,
pd.TimedeltaIndex([-1, -1, -1], unit='D')
)]:
r1 = t1.round(freq)
tm.assert_index_equal(r1, s1)
r2 = t2.round(freq)
tm.assert_index_equal(r2, s2)
# invalid
for freq in ['Y', 'M', 'foobar']:
self.assertRaises(ValueError, lambda: t1.round(freq))
def test_contains(self):
# Checking for any NaT-like objects
# GH 13603
td = to_timedelta(range(5), unit='d') + pd.offsets.Hour(1)
for v in [pd.NaT, None, float('nan'), np.nan]:
self.assertFalse((v in td))
td = to_timedelta([pd.NaT])
for v in [pd.NaT, None, float('nan'), np.nan]:
self.assertTrue((v in td))
def test_identity(self):
td = Timedelta(10, unit='d')
self.assertTrue(isinstance(td, Timedelta))
self.assertTrue(isinstance(td, timedelta))
def test_short_format_converters(self):
def conv(v):
return v.astype('m8[ns]')
self.assertEqual(ct('10'), np.timedelta64(10, 'ns'))
self.assertEqual(ct('10ns'), np.timedelta64(10, 'ns'))
self.assertEqual(ct('100'), np.timedelta64(100, 'ns'))
self.assertEqual(ct('100ns'), np.timedelta64(100, 'ns'))
self.assertEqual(ct('1000'), np.timedelta64(1000, 'ns'))
self.assertEqual(ct('1000ns'), np.timedelta64(1000, 'ns'))
self.assertEqual(ct('1000NS'), np.timedelta64(1000, 'ns'))
self.assertEqual(ct('10us'), np.timedelta64(10000, 'ns'))
self.assertEqual(ct('100us'), np.timedelta64(100000, 'ns'))
self.assertEqual(ct('1000us'), np.timedelta64(1000000, 'ns'))
self.assertEqual(ct('1000Us'), np.timedelta64(1000000, 'ns'))
self.assertEqual(ct('1000uS'), np.timedelta64(1000000, 'ns'))
self.assertEqual(ct('1ms'), np.timedelta64(1000000, 'ns'))
self.assertEqual(ct('10ms'), np.timedelta64(10000000, 'ns'))
self.assertEqual( | ct('100ms') | pandas.tseries.timedeltas._coerce_scalar_to_timedelta_type |
from typing_extensions import Literal
from typing import Optional,Callable,Union,List
import numpy as np
import pandas as pd
import torch
import os
from time import time
import matplotlib.pyplot as plt
from scipy.stats import chi2
from numpy.random import permutation
from sklearn.model_selection import train_test_split
from kernels import gauss_kernel_mediane
from apt.eigen_wrapper import eigsy
import apt.kmeans # For kmeans
from kmeans_pytorch import kmeans
# Choix à faire ( trouver les bonnes pratiques )
# est ce que je range la gram ou la calcule à chaque besoin ? Je calcule la gram a chaque fois mais la diagonalise une fois par setting
# nouvelle question : est ce que je garde en mémoire toute les matrices diag ou pas ( si je fais des tests avec Nystrom)
# est ce que je crée deux variables, une pour la gram et une pour la gram nystrom ?
# reponse : au moment de la calculer, ce uqi est calculé est selectionné automatiquement
# les assignations du nystrom anchors pourront servir à calculer une somme de carrés résiduels par ex.
# voir comment ils gèrent les plot dans scanpy
# initialiser le mask aussi au moment de tracer des figures
# ranger les dataframes de proj dans des dict pour en avoir plusieurs en mémoire en même temps.
# faire les plot par defaut sur toutes les proj en mémoire
# tracer les spectres
# tracer l'evolution des corrélations par rapport aux gènes ordonnés
# tout rendre verbose
# ecrire les docstring de chaque fonction
# faire des dict pour sp et ev ? pour : quand on veut tracer les spectres.
# faire des fonctions compute kfdat et proj qui font appelle au scheduleur "compute and load" (a renommer)
# le name de corr doit spécifier en plus du nom la projection auxquel il fiat référence (acp ou fda)
# enregistrer le coef de la projection kfda de chaque axe de l'acp
# faire une fonction __print__ (?)
# calculer les p values par direction
# liste de gène
# lsmeans least square means (dans le cours de Franck pour ANOVA deux facteurs p32 test moyennes ajustées )
# mmd et test par permutation
# plot proj :on a dfx et dfy pour tracer le result en fct d'un axe de la pca
# on peut aussi vouloir tracer en fonction de ll'expression d'un gène
class Tester:
"""
Tester is a class that performs kernels tests such that MMD and the test based on Kernel Fisher Discriminant Analysis.
It also provides a range of visualisations based on the discrimination between two groups.
"""
def __init__(self,
x:Union[np.array,torch.tensor]=None,
y:Union[np.array,torch.tensor]=None,
kernel:Callable[[torch.Tensor,torch.Tensor],torch.Tensor]=None,
x_index:List = None,
y_index:List = None,
variables:List = None):
"""\
Parameters
----------
x,y: torch.Tensor or numpy.array of sizes n1 x p and n2 x p
kernel: kernel function to apply on (x,y)
x_index,y_index: pd.Index or list of index to identify observations from x and y
variables: pd.Index or list of index to identify variables from x and y
Returns
-------
:obj:`Tester`
"""
self.has_data = False
# attributs initialisés
self.df_kfdat = pd.DataFrame()
self.df_proj_kfda = {}
self.df_proj_kpca = {}
self.corr = {}
self.dict_mmd = {}
if x is not None and y is not None:
self.init_data(x=x,y=y,kernel=kernel,x_index=x_index,y_index=y_index,variables=variables)
def init_data(self,x,y,kernel=None, x_index=None, y_index=None,variables=None):
# Tester works with torch tensor objects
self.x = torch.from_numpy(x).double() if (isinstance(x, np.ndarray)) else x
self.y = torch.from_numpy(y).double() if (isinstance(y, np.ndarray)) else y
self.n1_initial = x.shape[0]
self.n2_initial = y.shape[0]
self.n1 = x.shape[0]
self.n2 = y.shape[0]
# generates range index if no index
self.x_index=pd.Index(range(1,self.n1+1)) if x_index is None else pd.Index(x_index) if isinstance(x_index,list) else x_index
self.y_index=pd.Index(range(self.n1,self.n1+self.n2)) if y_index is None else pd.Index(y_index) if isinstance(y_index,list) else y_index
self.index = self.x_index.append(self.y_index)
self.variables = range(x.shape[1]) if variables is None else variables
self.xmask = self.x_index.isin(self.x_index)
self.ymask = self.y_index.isin(self.y_index)
self.imask = self.index.isin(self.index)
self.ignored_obs = None
if kernel is None:
self.kernel,self.mediane = gauss_kernel_mediane(x,y,return_mediane=True)
else:
self.kernel = kernel
if self.df_kfdat.empty:
self.df_kfdat = pd.DataFrame(index= list(range(1,self.n1+self.n2)))
self.has_data = True
def compute_nystrom_anchors(self,nanchors,nystrom_method='kmeans',split_data=False,test_size=.8,on_other_data=False,x_other=None,y_other=None,verbose=0): # max_iter=1000, (pour kmeans de François)
"""
Determines the nystrom anchors using ``nystrom_method`` which can be 'kmeans' or 'random'
Parameters
----------
nxanchors: number of anchors to determine from first sample
nyanchors: number of anchors to determine from second sample, equals nxanchors if None.
nystrom_method: 'kmeans' or 'random'
"""
if verbose >0:
start = time()
print(f'Determining Nystrom anchors by {nystrom_method} ...',end=' ')
if verbose >1:
print(f"nanchors{nanchors} nystrom_method:{nystrom_method} split:{split_data} test_size:{test_size} on_other_data:{on_other_data}")
if on_other_data:
xmask_ny = self.xmask
ymask_ny = self.ymask
xratio,yratio = self.n1/(self.n1 + self.n2), self.n2/(self.n1 + self.n2)
self.nxanchors=np.int(np.floor(xratio * nanchors))
self.nyanchors=np.int(np.floor(yratio * nanchors))
if nystrom_method == 'kmeans':
# self.xanchors,self.xassignations = apt.kmeans.spherical_kmeans(self.x[self.xmask,:], nxanchors, max_iter)
# self.yanchors,self.yassignations = apt.kmeans.spherical_kmeans(self.y[self.ymask,:], nyanchors, max_iter)
self.xassignations,self.xanchors = kmeans(X=x_other, num_clusters=self.nxanchors, distance='euclidean', tqdm_flag=False) #cuda:0')
self.yassignations,self.yanchors = kmeans(X=y_other, num_clusters=self.nyanchors, distance='euclidean', tqdm_flag=False) #cuda:0')
self.xanchors = self.xanchors.double()
self.yanchors = self.yanchors.double()
elif nystrom_method == 'random':
self.xanchors = x_other[np.random.choice(x_other.shape[0], size=self.nxanchors, replace=False)]
self.yanchors = y_other[np.random.choice(y_other.shape[0], size=self.nyanchors, replace=False)]
else:
xratio,yratio = self.n1/(self.n1 + self.n2), self.n2/(self.n1 + self.n2)
self.nxanchors=np.int(np.floor(xratio * nanchors))
self.nyanchors=np.int(np.floor(yratio * nanchors))
if split_data:
#split data
# Say a = 1 - test_size
# We determine the nanchors = nxanchors + nyanchors on n1_ny = |_a*n1_| and n2_ny = |_a*n2_| data.
# To keep proportion we have nxanchors = |_ nanchors * n1/(n1+n2) _|and nyanchors = |_ nanchors * n2/(n1+n2) _| (strictly positive numbers)
# Thus, we need to have n1_ny >= nxanchors and n2_ny >= nyanchors
# We use a*n1 >= |_ a*n1 _| and find the condition a >= 1/n1 |_ nanchors* n1/(n1+n2) _| and a >= 1/n2 |_ nanchors* n2/(n1+n2) _|
# in order to implement a simple rule, we raise an error if these conditions are not fulfilled:
assert (1-test_size) >= 1/self.n1 * np.int(np.floor(nanchors * xratio)) and \
(1-test_size) >= 1/self.n2 * np.int(np.floor(nanchors * yratio))
assert self.nxanchors >0 and self.nyanchors >0
# print(self.xmask.sum(),len(self.x_index))
xindex_nystrom,xindex_test = train_test_split(self.x_index[self.xmask],test_size=.8)
yindex_nystrom,yindex_test = train_test_split(self.y_index[self.ymask],test_size=.8)
xmask_ny = self.x_index.isin(xindex_nystrom)
ymask_ny = self.y_index.isin(yindex_nystrom)
self.xmask_test = self.x_index.isin(xindex_test)
self.ymask_test = self.y_index.isin(yindex_test)
self.n1_test = len(xindex_test)
self.n2_test = len(yindex_test)
else:
xmask_ny = self.xmask
ymask_ny = self.ymask
if nystrom_method == 'kmeans':
# self.xanchors,self.xassignations = apt.kmeans.spherical_kmeans(self.x[self.xmask,:], nxanchors, max_iter)
# self.yanchors,self.yassignations = apt.kmeans.spherical_kmeans(self.y[self.ymask,:], nyanchors, max_iter)
self.xassignations,self.xanchors = kmeans(X=self.x[xmask_ny,:], num_clusters=self.nxanchors, distance='euclidean', tqdm_flag=False) #cuda:0')
self.yassignations,self.yanchors = kmeans(X=self.y[ymask_ny,:], num_clusters=self.nyanchors, distance='euclidean', tqdm_flag=False) #cuda:0')
self.xanchors = self.xanchors.double()
self.yanchors = self.yanchors.double()
elif nystrom_method == 'random':
self.xanchors = self.x[xmask_ny,:][np.random.choice(self.x[xmask_ny,:].shape[0], size=self.nxanchors, replace=False)]
self.yanchors = self.y[ymask_ny,:][np.random.choice(self.y[ymask_ny,:].shape[0], size=self.nyanchors, replace=False)]
if verbose > 0:
print(time() - start)
def compute_nystrom_kmn(self,test_data=False):
"""
Computes an (nxanchors+nyanchors)x(n1+n2) conversion gram matrix
"""
x,y = (self.x[self.xmask_test,:],self.y[self.ymask_test,:]) if test_data else (self.x[self.xmask,:],self.y[self.ymask,:])
z1,z2 = self.xanchors,self.yanchors
kernel = self.kernel
kz1x = kernel(z1,x)
kz2x = kernel(z2,x)
kz1y = kernel(z1,y)
kz2y = kernel(z2,y)
return(torch.cat((torch.cat((kz1x, kz1y), dim=1),
torch.cat((kz2x, kz2y), dim=1)), dim=0))
def compute_nystrom_kntestn(self):
"""
Computes an (nxanchors+nyanchors)x(n1+n2) conversion gram matrix
"""
x,y = (self.x[self.xmask,:],self.y[self.ymask,:])
z1,z2 = self.x[self.xmask_test,:],self.y[self.ymask_test,:]
kernel = self.kernel
kz1x = kernel(z1,x)
kz2x = kernel(z2,x)
kz1y = kernel(z1,y)
kz2y = kernel(z2,y)
return(torch.cat((torch.cat((kz1x, kz1y), dim=1),
torch.cat((kz2x, kz2y), dim=1)), dim=0))
def compute_gram_matrix(self,nystrom=False,test_data=False):
"""
Computes Gram matrix, on anchors if nystrom is True, else on data.
This function is called everytime the Gram matrix is needed but I could had an option to keep it in memory in case of a kernel function
that makes it difficult to compute
Returns
-------
torch.Tensor of size (nxanchors+nyanchors)**2 if nystrom else (n1+n2)**2
"""
x,y = (self.xanchors,self.yanchors) if nystrom else \
(self.x[self.xmask_test,:],self.y[self.ymask_test,:]) if test_data else \
(self.x[self.xmask,:],self.y[self.ymask,:])
kernel = self.kernel
kxx = kernel(x, x)
kyy = kernel(y, y)
kxy = kernel(x, y)
return(torch.cat((torch.cat((kxx, kxy), dim=1),
torch.cat((kxy.t(), kyy), dim=1)), dim=0))
def compute_bicentering_matrix(self,nystrom=False,test_data=False):
"""
Computes the bicentering Gram matrix Pn.
Let I1,I2 the identity matrix of size n1 and n2 (or nxanchors and nyanchors if nystrom).
J1,J2 the squared matrix full of one of size n1 and n2 (or nxanchors and nyanchors if nystrom).
012, 021 the matrix full of zeros of size n1 x n2 and n2 x n1 (or nxanchors x nyanchors and nyanchors x nxanchors if nystrom)
Pn = [I1 - 1/n1 J1 , 012 ]
[ 021 ,I2 - 1/n2 J2]
Returns
-------
torch.Tensor of size (nxanchors+nyanchors)**2 if nystrom else (n1+n2)**2
"""
n1,n2 = (self.nxanchors,self.nyanchors) if nystrom else (self.n1_test,self.n2_test) if test_data else (self.n1,self.n2)
idn1 = torch.eye(n1, dtype=torch.float64)
idn2 = torch.eye(n2, dtype=torch.float64)
onen1 = torch.ones(n1, n1, dtype=torch.float64)
onen2 = torch.ones(n2, n2, dtype=torch.float64)
if nystrom in [4,5]:
A1 = 1/self.n1*torch.diag(torch.bincount(self.xassignations)).double()
A2 = 1/self.n2*torch.diag(torch.bincount(self.yassignations)).double()
pn1 = np.sqrt(self.n1/(self.n1+self.n2))*(idn1 - torch.matmul(A1,onen1))
pn2 = np.sqrt(self.n2/(self.n1+self.n2))*(idn2 - torch.matmul(A2,onen2))
else:
pn1 = idn1 - 1/n1 * onen1
pn2 = idn2 - 1/n2 * onen2
z12 = torch.zeros(n1, n2, dtype=torch.float64)
z21 = torch.zeros(n2, n1, dtype=torch.float64)
return(torch.cat((torch.cat((pn1, z12), dim=1), torch.cat(
(z21, pn2), dim=1)), dim=0)) # bloc diagonal
def compute_pkm(self,nystrom=False,test_data=False):
device = torch.device("cuda:0" if torch.cuda.is_available() else "cpu")
n1,n2 = (self.n1_test,self.n2_test) if test_data else (self.n1,self.n2)
if nystrom in [0,3]:
Pbi = self.compute_bicentering_matrix(nystrom=False,test_data=test_data)
elif nystrom in [1,2]:
Pbi = self.compute_bicentering_matrix(nystrom=True,test_data=False)
elif nystrom in [4,5]:
Pbi = self.compute_bicentering_matrix(nystrom=nystrom,test_data=False)
if nystrom in [2,3,5]:
m1,m2 = (self.nxanchors,self.nyanchors)
m_mu1 = -1/m1 * torch.ones(m1, dtype=torch.float64) #, device=device)
m_mu2 = 1/m2 * torch.ones(m2, dtype=torch.float64) # , device=device)
elif nystrom in [0,1,4]:
m_mu1 = -1/n1 * torch.ones(n1, dtype=torch.float64) # , device=device)
m_mu2 = 1/n2 * torch.ones(n2, dtype=torch.float64) # , device=device)
m_mu12 = torch.cat((m_mu1, m_mu2), dim=0) #.to(device)
if nystrom in [4,5]:
A = torch.diag(torch.cat((1/n1*torch.bincount(self.xassignations),1/n2*torch.bincount(self.yassignations)))).double()
if nystrom ==0:
K = self.compute_gram_matrix(nystrom=False,test_data=test_data).to(device)
pk = torch.matmul(Pbi,K)
elif nystrom == 1:
kmn = self.compute_nystrom_kmn(test_data=test_data).to(device)
pk = torch.matmul(Pbi,kmn)
elif nystrom == 2:
kny = self.compute_gram_matrix(nystrom=nystrom).to(device)
pk = torch.matmul(Pbi,kny)
elif nystrom == 3:
kmn = self.compute_nystrom_kmn(test_data=test_data).to(device)
pk = torch.matmul(kmn,Pbi).T
elif nystrom == 4:
kmn = self.compute_nystrom_kmn(test_data=test_data).to(device)
pk = torch.chain_matmul(A**(1/2),Pbi.T,kmn)
elif nystrom == 5:
kny = self.compute_gram_matrix(nystrom=nystrom).to(device)
pk = torch.chain_matmul(A**(1/2),Pbi.T,kny)
# pk = torch.chain_matmul(Pbi,A,kny,A)
return(torch.mv(pk,m_mu12))
def diagonalize_bicentered_gram(self,nystrom=False,test_data=False,verbose=0):
"""
Diagonalizes the bicentered Gram matrix which shares its spectrum with the Withon covariance operator in the RKHS.
Stores eigenvalues (sp or spny) and eigenvectors (ev or evny) as attributes
"""
if verbose >0:
start = time()
ny = ' nystrom' if nystrom else ''
print(f'Diagonalizing the{ny} Gram matrix ...',end=' ')
if verbose >1:
print(f'nystrom:{nystrom} test_data:{test_data}')
n1,n2 = (self.n1_test,self.n2_test) if test_data else (self.n1,self.n2)
if nystrom:
m1,m2 = (self.nxanchors,self.nyanchors)
# if nystrom in [1,2,3] else \
# (self.n1_test,self.n2_test) if test_data else \
# (self.n1,self.n2) # nystrom = False or nystrom in [4,5]
pn = self.compute_bicentering_matrix(nystrom=nystrom,test_data=test_data).double()
if nystrom in [4,5]:
A = torch.diag(torch.cat((1/n1*torch.bincount(self.xassignations),1/n2*torch.bincount(self.yassignations)))).double()
sp,ev = eigsy(torch.chain_matmul(A**(1/2),pn, self.compute_gram_matrix(nystrom=nystrom,test_data=test_data),pn,A**(1/2)).cpu().numpy())
elif nystrom in [1,2]:
sp,ev = eigsy(1/(m1+m2) * torch.chain_matmul(pn, self.compute_gram_matrix(nystrom=nystrom,test_data=test_data), pn).cpu().numpy()) # eigsy uses numpy
else:
sp,ev = eigsy(1/(n1+n2) * torch.chain_matmul(pn, self.compute_gram_matrix(nystrom=nystrom,test_data=test_data), pn).cpu().numpy()) # eigsy uses numpy
order = sp.argsort()[::-1]
if nystrom: # la distinction est utile pour calculer les metriques sur nystrom, mais on ne garde en mémoire que la dernière version de la diag nystrom
self.evny = torch.tensor(ev.T[order],dtype=torch.float64)
self.spny = torch.tensor(sp[order], dtype=torch.float64)
elif test_data:
self.ev_test = torch.tensor(ev.T[order],dtype=torch.float64)
self.sp_test = torch.tensor(sp[order], dtype=torch.float64)
else:
self.ev = torch.tensor(ev.T[order],dtype=torch.float64)
self.sp = torch.tensor(sp[order], dtype=torch.float64)
if verbose > 0:
print(time() - start)
# def compute_kfdat(self,trunc=None,nystrom=False,name=None,verbose=0):
# """
# Computes the kfda truncated statistic of [Harchaoui 2009].
# Two ways of using Nystrom:
# nystrom = False or 0 -> Statistic computed without nystrom
# nystrom = True or 1 -> Statistic computed using nystrom for the diagonalized bicentered gram matrix (~Sigma_W) and not for mn (\mu_2 - \mu_1)
# nystrom = 2 -> Statistic computed using nystrom for the diagonalized bicentered gram matrix (~Sigma_W) and for mn (\mu_2 - \mu_1)
# Depending on the situation, the coeficient of the statistic changes.
# Stores the result as a column in the dataframe df_kfdat
# """
# if verbose >0:
# start = time()
# ny = ' nystrom' if nystrom else ''
# print(f'Computing the{ny} kfdat statistic ...',end=' ')
# n1,n2 = (self.nxanchors,self.nyanchors) if nystrom else (self.n1,self.n2)
# ntot = self.n1 + self.n2 # tot nobs in data
# mtot = n1 + n2 # either tot nobs in data or tot nanchors
# if trunc is None:
# trunc = np.arange(1,mtot+1)
# device = torch.device("cuda:0" if torch.cuda.is_available() else "cpu")
# sp,ev = (self.spny.to(device),self.evny.to(device)) if nystrom else (self.sp.to(device),self.ev.to(device))
# pn = self.compute_bicentering_matrix(nystrom).to(device)
# mn1 = -1/self.n1 * torch.ones(self.n1, dtype=torch.float64, device=device) if nystrom < 2 else -1/n1 * torch.ones(n1, dtype=torch.float64, device=device)
# mn2 = 1/self.n2 * torch.ones(self.n2, dtype=torch.float64, device=device) if nystrom <2 else 1/n2 * torch.ones(n2, dtype=torch.float64, device=device)
# mn = torch.cat((mn1, mn2), dim=0).to(device)
# gram = self.compute_nystrom_kmn().to(device) if nystrom==1 else self.compute_gram_matrix(nystrom=nystrom).to(device)
# pk = torch.matmul(pn,gram)
# pkm = torch.mv(pk,mn)
# kfda = 0
# kfda_dict = []
# if trunc[-1] >mtot:
# trunc=trunc[:mtot]
# for i,t in enumerate(trunc):
# if t <= mtot:
# evi = ev[i]
# # kfda += (n1*n2)/(ntot * mtot *sp[i]**2)* torch.dot(evi,pkm)**2 if nystrom <2 else (n1*n2)/(mtot * mtot *sp[i]**2)* torch.dot(evi,pkm)**2
# kfda += (self.n1*self.n2)/(mtot * ntot *sp[i]**2)* torch.dot(evi,pkm)**2 if nystrom <2 else \
# (n1*n2)/(mtot * mtot *sp[i]**2)* torch.dot(evi,pkm)**2
# kfda_dict += [kfda.item()] # [f'O_{t}']
# name = name if name is not None else self.name_generator(trunc,nystrom)
# self.df_kfdat[name] = pd.Series(kfda_dict,index=trunc)
# if verbose > 0:
# print(time() - start)
def compute_kfdat(self,trunc=None,nystrom=False,test_data=False,name=None,verbose=0):
"""
Computes the kfda truncated statistic of [Harchaoui 2009].
Two ways of using Nystrom:
nystrom = False or 0 -> Statistic computed without nystrom
nystrom = True or 1 -> Statistic computed using nystrom for the diagonalized bicentered gram matrix (~Sigma_W) and not for mn (\mu_2 - \mu_1)
nystrom = 2 -> Statistic computed using nystrom for the diagonalized bicentered gram matrix (~Sigma_W) and for mn (\mu_2 - \mu_1)
Depending on the situation, the coeficient of the statistic changes.
Stores the result as a column in the dataframe df_kfdat
"""
if verbose >0:
start = time()
ny = ' nystrom' if nystrom else ''
print(f'Computing the{ny} kfdat statistic ...',end=' ')
n1,n2 = (self.n1_test,self.n2_test) if test_data else (self.n1,self.n2)
ntot = n1+n2
if nystrom >=1:
m1,m2 = (self.nxanchors,self.nyanchors)
mtot = m1+m2
maxtrunc = ntot if nystrom ==0 else mtot
if trunc is None:
trunc = np.arange(1,ntot+1) if nystrom==False else np.arange(1,mtot+1)
device = torch.device("cuda:0" if torch.cuda.is_available() else "cpu")
if nystrom in [0,3]:
sp,ev = (self.sp.to(device),self.ev.to(device))
else:
sp,ev = (self.spny.to(device),self.evny.to(device))
pkm = self.compute_pkm(nystrom=nystrom,test_data=test_data)
if trunc[-1] >maxtrunc:
trunc=trunc[:maxtrunc]
t=trunc[-1]
kfda = ((n1*n2)/(ntot*ntot*sp[:t]**2)*torch.mv(ev[:t],pkm)**2).cumsum(axis=0).numpy() if nystrom ==0 else \
((n1*n2)/(ntot*mtot*sp[:t]**2)*torch.mv(ev[:t],pkm)**2).cumsum(axis=0).numpy() if nystrom ==1 else \
((m1*m2)/(mtot*mtot*sp[:t]**2)*torch.mv(ev[:t],pkm)**2).cumsum(axis=0).numpy() if nystrom ==2 else \
((m1*m2)/(mtot*ntot*sp[:t]**2)*torch.mv(ev[:t],pkm)**2).cumsum(axis=0).numpy() if nystrom ==3 else \
((n1*n2)/(ntot*sp[:t]**2)*torch.mv(ev[:t],pkm)**2).cumsum(axis=0).numpy() if nystrom ==4 else \
((m1*m2)/(mtot*sp[:t]**2)*torch.mv(ev[:t],pkm)**2).cumsum(axis=0).numpy()
name = name if name is not None else self.name_generator(trunc,nystrom)
self.df_kfdat[name] = pd.Series(kfda,index=trunc)
if verbose > 0:
print(time() - start)
def compute_proj_kfda(self,trunc = None,nystrom=False,test_data=False,name=None,verbose=0):
# ajouter nystrom dans m et dans la colonne sample
if verbose >0:
start = time()
ny = ' nystrom' if nystrom else ''
print(f'Computing{ny} proj on kernel Fisher discriminant axis ...',end=' ')
n1,n2 = (self.n1_test,self.n2_test) if test_data else (self.n1,self.n2)
ntot = n1+n2
if nystrom >=1:
m1,m2 = (self.nxanchors,self.nyanchors)
mtot = m1+m2
maxtrunc = ntot if nystrom ==0 else mtot
if trunc is None:
trunc = np.arange(1,ntot+1) if nystrom==False else np.arange(1,mtot+1)
device = torch.device("cuda:0" if torch.cuda.is_available() else "cpu")
if nystrom in [0,3]:
Pbi = self.compute_bicentering_matrix(nystrom=False,test_data=test_data)
sp,ev = (self.sp.to(device),self.ev.to(device))
if test_data:
kntestn = self.compute_nystrom_kntestn()
pk2 = torch.matmul(Pbi,kntestn)
else:
Pbi = self.compute_bicentering_matrix(nystrom=True,test_data=False)
sp,ev = (self.spny.to(device),self.evny.to(device))
if test_data:
kmn = self.compute_nystrom_kmn(test_data=False).to(device)
pk2 = torch.matmul(Pbi,kmn)
if nystrom ==0:
K = self.compute_gram_matrix(nystrom=False,test_data=test_data).to(device)
pk2 = torch.matmul(Pbi,K)
elif nystrom == 1:
kmn = self.compute_nystrom_kmn(test_data=test_data).to(device)
pk2 = torch.matmul(Pbi,kmn)
elif nystrom == 2:
kny = self.compute_gram_matrix(nystrom=nystrom).to(device)
pk2 = torch.matmul(Pbi,kny)
else:
kmn = self.compute_nystrom_kmn(test_data=test_data).to(device)
pk2 = torch.matmul(kmn,Pbi).T
pkm = self.compute_pkm(nystrom=nystrom,test_data=test_data)
t=trunc[-1]
proj = (ntot**-1*sp[:t]**(-3/2)*torch.mv(ev[:t],pkm)*torch.matmul(ev[:t],pk2).T).cumsum(axis=1).numpy() if nystrom ==0 else \
(mtot**-1*sp[:t]**(-3/2)*torch.mv(ev[:t],pkm)*torch.matmul(ev[:t],pk2).T).cumsum(axis=0).numpy() if nystrom ==1 else \
(mtot**-1*sp[:t]**(-3/2)*torch.mv(ev[:t],pkm)*torch.matmul(ev[:t],pk2).T).cumsum(axis=0).numpy() if nystrom ==2 else \
(ntot**-1*sp[:t]**(-3/2)*torch.mv(ev[:t],pkm)*torch.matmul(ev[:t],pk2).T).cumsum(axis=0).numpy()
name = name if name is not None else self.name_generator(trunc,nystrom)
self.df_proj_kfda[name] = pd.DataFrame(proj,index= self.index[self.imask],columns=[str(t) for t in trunc])
self.df_proj_kfda[name]['sample'] = ['x']*n1 + ['y']*n2
if verbose > 0:
print(time() - start)
# résidus du passé
# n1,n2 = (self.nxanchors,self.nyanchors) if nystrom else (self.n1,self.n2)
# ntot = self.n1 + self.n2
# mtot = n1 + n2
# if trunc is None:
# trunc = np.arange(1,ntot+1)
# device = torch.device("cuda:0" if torch.cuda.is_available() else "cpu")
# sp,ev = (self.spny.to(device),self.evny.to(device)) if nystrom else (self.sp.to(device),self.ev.to(device))
# pn = self.compute_bicentering_matrix(nystrom).to(device)
# # m est construit sur toute les obs, pas sur les ancres même si Nystrom
# mn1 = -1/n1 * torch.ones(n1, dtype=torch.float64, device=device)
# mn2 = 1/n2 * torch.ones(n2, dtype=torch.float64, device=device)
# mn = torch.cat((mn1, mn2), dim=0).to(device)
# pk = torch.matmul(pn,gram)
# pkm = torch.mv(pk,mn)
# coefs = []
# print(mtot)
# for i,t in enumerate(trunc):
# if t<=mtot:
# evi = ev[i]
# coefs += [1/(mtot *sp[i]**(3/2))* torch.dot(evi,pkm).item()]
# lvpkm =
# print(mn.shape)
# coefs = torch.tensor(coefs)
# cev=(ev[:trunc[-1]].t()*coefs).t()
# cevpk = torch.matmul(cev,pk)
# cevpk= cevpk.cumsum(dim=0)
# print(cevpk[:2,:2])
def compute_proj_kpca(self,trunc=None,nystrom=False,test_data=False,name=None,verbose=0):
if verbose >0:
start = time()
ny = ' nystrom' if nystrom else ''
print(f'Computing{ny} proj on kernel principal componant axis ...',end=' ')
n1,n2 = (self.n1_test,self.n2_test) if test_data else (self.n1,self.n2)
ntot = n1+n2
if nystrom >=1:
m1,m2 = (self.nxanchors,self.nyanchors)
mtot = m1+m2
maxtrunc = ntot if nystrom ==0 else mtot
if trunc is None:
trunc = np.arange(1,ntot+1) if nystrom==False else np.arange(1,mtot+1)
device = torch.device("cuda:0" if torch.cuda.is_available() else "cpu")
if nystrom in [0,3]:
Pbi = self.compute_bicentering_matrix(nystrom=False,test_data=test_data)
sp,ev = (self.sp.to(device),self.ev.to(device))
else:
Pbi = self.compute_bicentering_matrix(nystrom=True,test_data=False)
sp,ev = (self.spny.to(device),self.evny.to(device))
K = self.compute_kmn() if nystrom in [1,2] else \
self.compute_nystrom_kntestn() if nystrom ==3 and test_data else \
self.compute_gram_matrix()
t = trunc[-1]
proj = ( ntot**(-1/2)*sp[:t]**(-1/2)*torch.chain_matmul(ev[:t],Pbi,K).T).numpy()
name = name if name is not None else self.name_generator(trunc,nystrom)
self.df_proj_kpca[name] = pd.DataFrame(proj,index=self.index[self.imask],columns=[str(t) for t in trunc])
self.df_proj_kpca[name]['sample'] = ['x']*self.n1 + ['y']*self.n2
if verbose > 0:
print(time() - start)
def compute_corr_proj_var(self,trunc=None,nystrom=False,which='proj_kfda',name=None,prefix_col='',verbose=0): # df_array,df_proj,csvfile,pathfile,trunc=range(1,60)):
if verbose >0:
start = time()
ny = ' nystrom' if nystrom else ''
print(f'Computing the{ny} corr matrix between projections and variables ...',end=' ')
self.prefix_col=prefix_col
df_proj= self.init_df_proj(which,name)
if trunc is None:
trunc = range(1,df_proj.shape[1] - 1) # -1 pour la colonne sample
df_array = pd.DataFrame(torch.cat((self.x[self.xmask,:],self.y[self.ymask,:]),dim=0).numpy(),index=self.index[self.imask],columns=self.variables)
for t in trunc:
df_array[f'{prefix_col}{t}'] = pd.Series(df_proj[f'{t}'])
name = name if name is not None else self.name_generator(trunc,nystrom)
self.corr[name] = df_array.corr().loc[self.variables,[f'{prefix_col}{t}' for t in trunc]]
if verbose > 0:
print(time() - start)
def compute_mmd(self,unbiaised=False,nystrom=False,test_data=False,name='',verbose=0):
if verbose >0:
start = time()
ny = ' nystrom' if nystrom else ''
print(f'Computing the{ny} kfdat statistic ...',end=' ')
n1,n2 = (self.n1_test,self.n2_test) if test_data else (self.n1,self.n2)
ntot = n1+n2
if nystrom >=1:
m1,m2 = (self.nxanchors,self.nyanchors)
mtot = m1+m2
npoints = mtot if nystrom else ntot
if nystrom:
m1,m2 = (self.nxanchors,self.nyanchors)
m_mu1 = -1/m1 * torch.ones(m1, dtype=torch.float64) #, device=device)
m_mu2 = 1/m2 * torch.ones(m2, dtype=torch.float64) # , device=device)
else:
m_mu1 = -1/n1 * torch.ones(n1, dtype=torch.float64) # , device=device)
m_mu2 = 1/n2 * torch.ones(n2, dtype=torch.float64) # , device=device)
m_mu12 = torch.cat((m_mu1, m_mu2), dim=0) #.to(device)
K = self.compute_gram_matrix(nystrom=nystrom,test_data=test_data)
if name is None:
name=''
self.dict_mmd['B'+name] = torch.dot(torch.mv(K,m_mu12),m_mu12)
if unbiaised:
mask = torch.eye(npoints,npoints).byte()
K.masked_fill_(mask, 0)
self.dict_mmd['U'+name] = torch.dot(torch.mv(K,m_mu12),m_mu12)
if verbose > 0:
print(time() - start)
def name_generator(self,trunc=None,nystrom=0,nystrom_method='kmeans',nanchors=None,
split_data=False,test_size=.8,obs_to_ignore=None):
if obs_to_ignore is not None:
name_ = f'~{obs_to_ignore[0]} n={len(obs_to_ignore)}'
else:
name_ = ""
if trunc is not None:
name_ += f"tmax{trunc[-1]}"
if nystrom:
name_ +=f'ny{nystrom}{nystrom_method}na{nanchors}'
if split_data:
name_ +=f'split{test_size}'
return(name_)
def kfdat(self,trunc=None,nystrom=False,nanchors=None,nystrom_method='kmeans',split_data=False,test_size=.8,name=None,main=False,obs_to_ignore=None,save=False,path=None,verbose=0):
which_dict={'kfdat':path if save else ''}
self.test(trunc=trunc,which_dict=which_dict,nystrom=nystrom,nanchors=nanchors,nystrom_method=nystrom_method,split_data=split_data,test_size=test_size,name=name,main=main,
obs_to_ignore=obs_to_ignore,save=save,verbose=verbose)
def proj_kfda(self,trunc=None,nystrom=False,nanchors=None,nystrom_method='kmeans',split_data=False,test_size=.8,name=None,main=False,obs_to_ignore=None,save=False,path=None,verbose=0):
which_dict={'proj_kfda':path if save else ''}
self.test(trunc=trunc,which_dict=which_dict,nystrom=nystrom,nanchors=nanchors,nystrom_method=nystrom_method,split_data=split_data,test_size=test_size,name=name,main=main,
obs_to_ignore=obs_to_ignore,save=save,verbose=verbose)
def proj_kpca(self,trunc=None,nystrom=False,nanchors=None,nystrom_method='kmeans',split_data=False,test_size=.8,name=None,main=False,obs_to_ignore=None,save=False,path=None,verbose=0):
which_dict={'proj_kpca':path if save else ''}
self.test(trunc=trunc,which_dict=which_dict,nystrom=nystrom,nanchors=nanchors,nystrom_method=nystrom_method,split_data=split_data,test_size=test_size,name=name,main=main,
obs_to_ignore=obs_to_ignore,save=save,verbose=verbose)
def correlations(self,trunc=None,nystrom=False,nanchors=None,nystrom_method='kmeans',split_data=False,test_size=.8,name=None,main=False,obs_to_ignore=None,save=False,path=None,verbose=0):
which_dict={'corr':path if save else ''}
self.test(trunc=trunc,which_dict=which_dict,nystrom=nystrom,nanchors=nanchors,nystrom_method=nystrom_method,split_data=split_data,test_size=test_size,name=name,main=main,
obs_to_ignore=obs_to_ignore,save=save,verbose=verbose,corr_which='proj_kfda',corr_prefix_col='')
def mmd(self,unbiaised=True,nystrom=False,nanchors=None,nystrom_method='kmeans',split_data=False,test_size=.8,name=None,main=False,obs_to_ignore=None,save=False,path=None,verbose=0):
which_dict={'mmd':path if save else ''}
self.test(which_dict=which_dict,nystrom=nystrom,nanchors=nanchors,nystrom_method=nystrom_method,split_data=split_data,test_size=test_size,name=name,main=main,
obs_to_ignore=obs_to_ignore,mmd_unbiaised=unbiaised,save=save,verbose=verbose)
def test(self,trunc=None,which_dict=['kfdat','proj_kfda','proj_kpca','corr','mmd'],
nystrom=False,nanchors=None,nystrom_method='kmeans',split_data=False,test_size=.8,
name=None,main=False,corr_which='proj_kfda',corr_prefix_col='',obs_to_ignore=None,mmd_unbiaised=False,save=False,verbose=0):
# for output,path in which.items()
name_ = "main" if not hasattr(self,'main_name') and name is None else \
self.name_generator(trunc=trunc,nystrom=nystrom,nystrom_method=nystrom_method,nanchors=nanchors,
split_data=split_data,test_size=test_size,obs_to_ignore=obs_to_ignore) if name is None else \
name
if main or not hasattr(self,'main_name'):
self.main_name = name_
if verbose >0:
none = 'None'
datastr = f'n1:{self.n1} n2:{self.n2} trunc:{none if trunc is None else len(trunc)}'
datastr += f'\nname:{name}\n'
inwhich = ' and '.join(which_dict.keys()) if len(which_dict)>1 else list(which_dict.keys())[0]
ny=''
if nystrom:
ny += f' nystrom:{nystrom} {nystrom_method} nanchors={nanchors}'
if split_data:
ny+=f' split{test_size}'
print(f'{datastr}Compute {inwhich} {ny}') # of {self.n1} and {self.n2} points{ny} ')
if verbose >1:
print(f"trunc:{len(trunc)} \n which:{which_dict} nystrom:{nystrom} nanchors:{nanchors} nystrom_method:{nystrom_method} split:{split_data} test_size:{test_size}\n")
print(f"main:{main} corr:{corr_which} mmd_unbiaised:{mmd_unbiaised} seva:{save}")
loaded = []
if save:
if 'kfdat' in which_dict and os.path.isfile(which_dict['kfdat']):
loaded_kfdat = pd.read_csv(which_dict['kfdat'],header=0,index_col=0)
if len(loaded_kfdat.columns)==1 and name is not None:
c= loaded_kfdat.columns[0]
self.df_kfdat[name] = loaded_kfdat[c]
else:
for c in loaded_kfdat.columns:
if c not in self.df_kfdat.columns:
self.df_kfdat[c] = loaded_kfdat[c]
loaded += ['kfdat']
if 'proj_kfda' in which_dict and os.path.isfile(which_dict['proj_kfda']):
self.df_proj_kfda[name_] = pd.read_csv(which_dict['proj_kfda'],header=0,index_col=0)
loaded += ['proj_kfda']
if 'proj_kpca' in which_dict and os.path.isfile(which_dict['proj_kpca']):
self.df_proj_kpca[name_] = pd.read_csv(which_dict['proj_kpca'],header=0,index_col=0)
loaded += ['proj_kpca']
if 'corr' in which_dict and os.path.isfile(which_dict['corr']):
self.corr[name_] = | pd.read_csv(which_dict['corr'],header=0,index_col=0) | pandas.read_csv |
import os
import sys
import pytest
from shapely.geometry import Polygon, GeometryCollection
from pandas import DataFrame, Timestamp
from pandas.testing import assert_frame_equal
from tests.fixtures import *
from tests.test_core_components_route import self_looping_route, route
from tests.test_core_components_service import service
from genet.inputs_handler import matsim_reader, gtfs_reader
from genet.inputs_handler import read
from genet.schedule_elements import Schedule, Service, Route, Stop, read_vehicle_types
from genet.utils import plot, spatial
from genet.validate import schedule_validation
from genet.exceptions import ServiceIndexError, RouteIndexError, StopIndexError, UndefinedCoordinateSystemError, \
ConflictingStopData, InconsistentVehicleModeError
sys.path.insert(0, os.path.abspath(os.path.join(os.path.dirname(__file__), "..")))
pt2matsim_schedule_file = os.path.abspath(
os.path.join(os.path.dirname(__file__), "test_data", "matsim", "schedule.xml"))
pt2matsim_vehicles_file = os.path.abspath(
os.path.join(os.path.dirname(__file__), "test_data", "matsim", "vehicles.xml"))
@pytest.fixture()
def schedule():
route_1 = Route(route_short_name='name',
mode='bus', id='1',
stops=[Stop(id='1', x=4, y=2, epsg='epsg:27700'), Stop(id='2', x=1, y=2, epsg='epsg:27700'),
Stop(id='3', x=3, y=3, epsg='epsg:27700'), Stop(id='4', x=7, y=5, epsg='epsg:27700')],
trips={'trip_id': ['1', '2'],
'trip_departure_time': ['13:00:00', '13:30:00'],
'vehicle_id': ['veh_1_bus', 'veh_2_bus']},
arrival_offsets=['00:00:00', '00:03:00', '00:07:00', '00:13:00'],
departure_offsets=['00:00:00', '00:05:00', '00:09:00', '00:15:00'])
route_2 = Route(route_short_name='name_2',
mode='bus', id='2',
stops=[Stop(id='5', x=4, y=2, epsg='epsg:27700'), Stop(id='6', x=1, y=2, epsg='epsg:27700'),
Stop(id='7', x=3, y=3, epsg='epsg:27700'), Stop(id='8', x=7, y=5, epsg='epsg:27700')],
trips={'trip_id': ['1', '2'],
'trip_departure_time': ['11:00:00', '13:00:00'],
'vehicle_id': ['veh_3_bus', 'veh_4_bus']},
arrival_offsets=['00:00:00', '00:03:00', '00:07:00', '00:13:00'],
departure_offsets=['00:00:00', '00:05:00', '00:09:00', '00:15:00'])
service = Service(id='service', routes=[route_1, route_2])
return Schedule(epsg='epsg:27700', services=[service])
@pytest.fixture()
def strongly_connected_schedule():
route_1 = Route(route_short_name='name',
mode='bus',
stops=[Stop(id='1', x=4, y=2, epsg='epsg:27700', name='Stop_1'),
Stop(id='2', x=1, y=2, epsg='epsg:27700', name='Stop_2'),
Stop(id='3', x=3, y=3, epsg='epsg:27700', name='Stop_3'),
Stop(id='4', x=7, y=5, epsg='epsg:27700', name='Stop_4'),
Stop(id='1', x=4, y=2, epsg='epsg:27700', name='Stop_1')],
trips={'trip_id': ['1', '2'], 'trip_departure_time': ['11:00:00', '13:00:00'],
'vehicle_id': ['veh_1_bus', 'veh_2_bus']},
arrival_offsets=['1', '2'], departure_offsets=['1', '2'],
id='1')
route_2 = Route(route_short_name='name_2',
mode='bus',
stops=[Stop(id='5', x=4, y=2, epsg='epsg:27700', name='Stop_5'),
Stop(id='2', x=1, y=2, epsg='epsg:27700', name='Stop_2'),
Stop(id='7', x=3, y=3, epsg='epsg:27700', name='Stop_7'),
Stop(id='8', x=7, y=5, epsg='epsg:27700', name='Stop_8'),
Stop(id='5', x=4, y=2, epsg='epsg:27700', name='Stop_5')],
trips={'trip_id': ['1', '2'], 'trip_departure_time': ['11:00:00', '13:00:00'],
'vehicle_id': ['veh_3_bus', 'veh_4_bus']},
arrival_offsets=['1', '2', '3', '4', '5'],
departure_offsets=['1', '2', '3', '4', '5'],
id='2')
service = Service(id='service', routes=[route_1, route_2])
return Schedule(epsg='epsg:27700', services=[service])
def test_initiating_schedule(schedule):
s = schedule
assert_semantically_equal(dict(s._graph.nodes(data=True)), {
'5': {'services': {'service'}, 'routes': {'2'}, 'id': '5', 'x': 4.0, 'y': 2.0, 'epsg': 'epsg:27700', 'name': '',
'lat': 49.76682779861249, 'lon': -7.557106577683727, 's2_id': 5205973754090531959,
'additional_attributes': set()},
'6': {'services': {'service'}, 'routes': {'2'}, 'id': '6', 'x': 1.0, 'y': 2.0, 'epsg': 'epsg:27700', 'name': '',
'lat': 49.766825803756994, 'lon': -7.557148039524952, 's2_id': 5205973754090365183,
'additional_attributes': set()},
'7': {'services': {'service'}, 'routes': {'2'}, 'id': '7', 'x': 3.0, 'y': 3.0, 'epsg': 'epsg:27700', 'name': '',
'lat': 49.76683608549253, 'lon': -7.557121424907424, 's2_id': 5205973754090203369,
'additional_attributes': set()},
'8': {'services': {'service'}, 'routes': {'2'}, 'id': '8', 'x': 7.0, 'y': 5.0, 'epsg': 'epsg:27700', 'name': '',
'lat': 49.766856648946295, 'lon': -7.5570681956375, 's2_id': 5205973754097123809,
'additional_attributes': set()},
'1': {'services': {'service'}, 'routes': {'1'}, 'id': '1', 'x': 4.0, 'y': 2.0, 'epsg': 'epsg:27700', 'name': '',
'lat': 49.76682779861249, 'lon': -7.557106577683727, 's2_id': 5205973754090531959,
'additional_attributes': set()},
'4': {'services': {'service'}, 'routes': {'1'}, 'id': '4', 'x': 7.0, 'y': 5.0, 'epsg': 'epsg:27700', 'name': '',
'lat': 49.766856648946295, 'lon': -7.5570681956375, 's2_id': 5205973754097123809,
'additional_attributes': set()},
'2': {'services': {'service'}, 'routes': {'1'}, 'id': '2', 'x': 1.0, 'y': 2.0, 'epsg': 'epsg:27700', 'name': '',
'lat': 49.766825803756994, 'lon': -7.557148039524952, 's2_id': 5205973754090365183,
'additional_attributes': set()},
'3': {'services': {'service'}, 'routes': {'1'}, 'id': '3', 'x': 3.0, 'y': 3.0, 'epsg': 'epsg:27700', 'name': '',
'lat': 49.76683608549253, 'lon': -7.557121424907424, 's2_id': 5205973754090203369,
'additional_attributes': set()}})
assert_semantically_equal(s._graph.edges(data=True)._adjdict,
{'5': {'6': {'services': {'service'}, 'routes': {'2'}}},
'6': {'7': {'services': {'service'}, 'routes': {'2'}}},
'7': {'8': {'services': {'service'}, 'routes': {'2'}}}, '8': {}, '4': {},
'1': {'2': {'services': {'service'}, 'routes': {'1'}}},
'3': {'4': {'services': {'service'}, 'routes': {'1'}}},
'2': {'3': {'services': {'service'}, 'routes': {'1'}}}})
log = s._graph.graph.pop('change_log')
assert log.empty
assert_semantically_equal(s._graph.graph,
{'name': 'Schedule graph',
'routes': {'2': {'route_short_name': 'name_2', 'mode': 'bus',
'trips': {'trip_id': ['1', '2'],
'trip_departure_time': ['11:00:00', '13:00:00'],
'vehicle_id': ['veh_3_bus', 'veh_4_bus']},
'arrival_offsets': ['00:00:00', '00:03:00',
'00:07:00', '00:13:00'],
'departure_offsets': ['00:00:00', '00:05:00',
'00:09:00', '00:15:00'],
'route_long_name': '', 'id': '2', 'route': [],
'await_departure': [],
'ordered_stops': ['5', '6', '7', '8']},
'1': {'route_short_name': 'name', 'mode': 'bus',
'trips': {'trip_id': ['1', '2'],
'trip_departure_time': ['13:00:00', '13:30:00'],
'vehicle_id': ['veh_1_bus', 'veh_2_bus']},
'arrival_offsets': ['00:00:00', '00:03:00',
'00:07:00', '00:13:00'],
'departure_offsets': ['00:00:00', '00:05:00',
'00:09:00', '00:15:00'],
'route_long_name': '', 'id': '1', 'route': [],
'await_departure': [],
'ordered_stops': ['1', '2', '3', '4']}},
'services': {'service': {'id': 'service', 'name': 'name'}},
'route_to_service_map': {'1': 'service', '2': 'service'},
'service_to_route_map': {'service': ['1', '2']},
'crs': {'init': 'epsg:27700'}})
def test_initiating_schedule_with_non_uniquely_indexed_objects():
route_1 = Route(route_short_name='name',
mode='bus', id='',
stops=[Stop(id='1', x=4, y=2, epsg='epsg:27700'), Stop(id='2', x=1, y=2, epsg='epsg:27700'),
Stop(id='3', x=3, y=3, epsg='epsg:27700'), Stop(id='4', x=7, y=5, epsg='epsg:27700')],
trips={'trip_id': ['1', '2'],
'trip_departure_time': ['13:00:00', '13:30:00'],
'vehicle_id': ['veh_1_bus', 'veh_2_bus']},
arrival_offsets=['00:00:00', '00:03:00', '00:07:00', '00:13:00'],
departure_offsets=['00:00:00', '00:05:00', '00:09:00', '00:15:00'])
route_2 = Route(route_short_name='name_2',
mode='bus', id='',
stops=[Stop(id='5', x=4, y=2, epsg='epsg:27700'), Stop(id='6', x=1, y=2, epsg='epsg:27700'),
Stop(id='7', x=3, y=3, epsg='epsg:27700'), Stop(id='8', x=7, y=5, epsg='epsg:27700')],
trips={'trip_id': ['1', '2'],
'trip_departure_time': ['11:00:00', '13:00:00'],
'vehicle_id': ['veh_2_bus', 'veh_3_bus']},
arrival_offsets=['00:00:00', '00:03:00', '00:07:00', '00:13:00'],
departure_offsets=['00:00:00', '00:05:00', '00:09:00', '00:15:00'])
service1 = Service(id='service', routes=[route_1, route_2])
service2 = Service(id='service', routes=[route_1, route_2])
s = Schedule(epsg='epsg:27700', services=[service1, service2])
assert s.number_of_routes() == 4
assert len(s) == 2
def test__getitem__returns_a_service(test_service):
services = [test_service]
schedule = Schedule(services=services, epsg='epsg:4326')
assert schedule['service'] == services[0]
def test_accessing_route(schedule):
assert schedule.route('1') == Route(route_short_name='name',
mode='bus', id='1',
stops=[Stop(id='1', x=4, y=2, epsg='epsg:27700'),
Stop(id='2', x=1, y=2, epsg='epsg:27700'),
Stop(id='3', x=3, y=3, epsg='epsg:27700'),
Stop(id='4', x=7, y=5, epsg='epsg:27700')],
trips={'trip_id': ['1', '2'],
'trip_departure_time': ['1', '2'],
'vehicle_id': ['veh_1_bus', 'veh_2_bus']},
arrival_offsets=['00:00:00', '00:03:00', '00:07:00', '00:13:00'],
departure_offsets=['00:00:00', '00:05:00', '00:09:00', '00:15:00'])
def test__repr__shows_number_of_services(mocker):
mocker.patch.object(Schedule, '__len__', return_value=0)
schedule = Schedule('epsg:27700')
s = schedule.__repr__()
assert 'instance at' in s
assert 'services' in s
Schedule.__len__.assert_called()
def test__str__shows_info():
schedule = Schedule('epsg:27700')
assert 'Number of services' in schedule.__str__()
assert 'Number of routes' in schedule.__str__()
def test__len__returns_the_number_of_services(test_service):
services = [test_service]
schedule = Schedule(services=services, epsg='epsg:4326')
assert len(schedule) == 1
def test_print_shows_info(mocker):
mocker.patch.object(Schedule, 'info')
schedule = Schedule('epsg:27700')
schedule.print()
Schedule.info.assert_called_once()
def test_info_shows_number_of_services_and_routes(mocker):
mocker.patch.object(Schedule, '__len__', return_value=0)
mocker.patch.object(Schedule, 'number_of_routes')
schedule = Schedule('epsg:27700')
schedule.print()
Schedule.__len__.assert_called()
Schedule.number_of_routes.assert_called_once()
def test_plot_delegates_to_util_plot_plot_graph_routes(mocker, schedule):
mocker.patch.object(plot, 'plot_graph')
schedule.plot()
plot.plot_graph.assert_called_once()
def test_reproject_changes_projection_for_all_stops_in_route():
correct_x_y = {'x': -0.14967658860132668, 'y': 51.52393050617373}
schedule = Schedule(
'epsg:27700',
[Service(id='10314', routes=[
Route(
route_short_name='12',
mode='bus',
stops=[Stop(id='26997928P', x='528464.1342843144', y='182179.7435136598', epsg='epsg:27700'),
Stop(id='26997928P.link:1', x='528464.1342843144', y='182179.7435136598', epsg='epsg:27700')],
route=['1'],
trips={'trip_id': ['VJ00938baa194cee94700312812d208fe79f3297ee_04:40:00'],
'trip_departure_time': ['04:40:00'],
'vehicle_id': ['veh_1_bus']},
arrival_offsets=['00:00:00', '00:02:00'],
departure_offsets=['00:00:00', '00:02:00']
)
])])
schedule.reproject('epsg:4326')
_stops = list(schedule.stops())
stops = dict(zip([stop.id for stop in _stops], _stops))
assert_semantically_equal({'x': stops['26997928P'].x, 'y': stops['26997928P'].y}, correct_x_y)
assert_semantically_equal({'x': stops['26997928P.link:1'].x, 'y': stops['26997928P.link:1'].y}, correct_x_y)
def test_adding_merges_separable_schedules(route):
schedule = Schedule(epsg='epsg:4326', services=[Service(id='1', routes=[route])])
before_graph_nodes = schedule.reference_nodes()
before_graph_edges = schedule.reference_edges()
a = Stop(id='10', x=40, y=20, epsg='epsg:27700', linkRefId='1')
b = Stop(id='20', x=10, y=20, epsg='epsg:27700', linkRefId='2')
c = Stop(id='30', x=30, y=30, epsg='epsg:27700', linkRefId='3')
d = Stop(id='40', x=70, y=50, epsg='epsg:27700', linkRefId='4')
schedule_to_be_added = Schedule(epsg='epsg:4326', services=[Service(id='2', routes=[
Route(
route_short_name='name',
mode='bus',
stops=[a, b, c, d],
trips={'trip_id': ['1', '2'],
'trip_departure_time': ['04:40:00', '05:40:00'],
'vehicle_id': ['veh_1_bus', 'veh_2_bus']},
arrival_offsets=['00:00:00', '00:03:00', '00:07:00', '00:13:00'],
departure_offsets=['00:00:00', '00:05:00', '00:09:00', '00:15:00'],
route=['1', '2', '3', '4'], id='2')
])])
tba_graph_nodes = schedule_to_be_added.reference_nodes()
tba_graph_edges = schedule_to_be_added.reference_edges()
schedule.add(schedule_to_be_added)
assert '1' in list(schedule.service_ids())
assert '2' in list(schedule.service_ids())
assert '1' in list(schedule.route_ids())
assert '2' in list(schedule.route_ids())
assert schedule.epsg == 'epsg:4326'
assert schedule.epsg == schedule_to_be_added.epsg
assert set(schedule._graph.nodes()) == set(before_graph_nodes) | set(tba_graph_nodes)
assert set(schedule._graph.edges()) == set(before_graph_edges) | set(tba_graph_edges)
def test_adding_throws_error_when_schedules_not_separable(test_service):
schedule = Schedule(epsg='epsg:4326', services=[test_service])
assert 'service' in schedule
schedule_to_be_added = Schedule(epsg='epsg:4326', services=[test_service])
with pytest.raises(NotImplementedError) as e:
schedule.add(schedule_to_be_added)
assert 'This method only supports adding non overlapping services' in str(e.value)
def test_adding_calls_on_reproject_when_schedules_dont_have_matching_epsg(test_service, different_test_service, mocker):
mocker.patch.object(Schedule, 'reproject')
schedule = Schedule(services=[test_service], epsg='epsg:27700')
assert schedule.has_service('service')
schedule_to_be_added = Schedule(services=[different_test_service], epsg='epsg:4326')
schedule.add(schedule_to_be_added)
schedule_to_be_added.reproject.assert_called_once_with('epsg:27700')
def test_service_ids_returns_keys_of_the_services_dict(test_service):
services = [test_service]
schedule = Schedule(services=services, epsg='epsg:4326')
assert set(schedule.service_ids()) == {'service'}
def test_routes_returns_service_ids_with_unique_routes(route, similar_non_exact_test_route):
services = [Service(id='1', routes=[route]), Service(id='2', routes=[similar_non_exact_test_route])]
schedule = Schedule(services=services, epsg='epsg:4326')
routes = list(schedule.routes())
assert route in routes
assert similar_non_exact_test_route in routes
assert len(routes) == 2
def test_number_of_routes_counts_routes(test_service, different_test_service):
schedule = Schedule(services=[test_service, different_test_service], epsg='epsg:4362')
assert schedule.number_of_routes() == 3
def test_service_attribute_data_under_key(schedule):
df = schedule.service_attribute_data(keys='name').sort_index()
assert_frame_equal(df, DataFrame(
{'name': {'service': 'name'}}
))
def test_service_attribute_data_under_keys(schedule):
df = schedule.service_attribute_data(keys=['name', 'id']).sort_index()
assert_frame_equal(df, DataFrame(
{'name': {'service': 'name'}, 'id': {'service': 'service'}}
))
def test_route_attribute_data_under_key(schedule):
df = schedule.route_attribute_data(keys='route_short_name').sort_index()
assert_frame_equal(df, DataFrame(
{'route_short_name': {'1': 'name', '2': 'name_2'}}
))
def test_route_attribute_data_under_keys(schedule):
df = schedule.route_attribute_data(keys=['route_short_name', 'mode']).sort_index()
assert_frame_equal(df, DataFrame(
{'route_short_name': {'1': 'name', '2': 'name_2'}, 'mode': {'1': 'bus', '2': 'bus'}}
))
def test_stop_attribute_data_under_key(schedule):
df = schedule.stop_attribute_data(keys='x').sort_index()
assert_frame_equal(df, DataFrame(
{'x': {'1': 4.0, '2': 1.0, '3': 3.0, '4': 7.0, '5': 4.0, '6': 1.0, '7': 3.0, '8': 7.0}}))
def test_stop_attribute_data_under_keys(schedule):
df = schedule.stop_attribute_data(keys=['x', 'y']).sort_index()
assert_frame_equal(df, DataFrame(
{'x': {'1': 4.0, '2': 1.0, '3': 3.0, '4': 7.0, '5': 4.0, '6': 1.0, '7': 3.0, '8': 7.0},
'y': {'1': 2.0, '2': 2.0, '3': 3.0, '4': 5.0, '5': 2.0, '6': 2.0, '7': 3.0, '8': 5.0}}))
def test_extracting_services_on_condition(schedule):
ids = schedule.extract_service_ids_on_attributes(conditions={'name': 'name'})
assert ids == ['service']
def test_extracting_routes_on_condition(schedule):
ids = schedule.extract_route_ids_on_attributes(conditions=[{'mode': 'bus'}, {'route_short_name': 'name_2'}],
how=all)
assert ids == ['2']
def test_extracting_stops_on_condition(schedule):
ids = schedule.extract_stop_ids_on_attributes(conditions=[{'x': (0, 4)}, {'y': (0, 2)}], how=all)
assert set(ids) == {'5', '6', '1', '2'}
def test_getting_services_on_modal_condition(schedule):
service_ids = schedule.services_on_modal_condition(modes='bus')
assert service_ids == ['service']
def test_getting_routes_on_modal_condition(schedule):
route_ids = schedule.routes_on_modal_condition(modes='bus')
assert set(route_ids) == {'1', '2'}
def test_getting_stops_on_modal_condition(schedule):
stop_ids = schedule.stops_on_modal_condition(modes='bus')
assert set(stop_ids) == {'5', '6', '7', '8', '3', '1', '4', '2'}
test_geojson = os.path.abspath(
os.path.join(os.path.dirname(__file__), "test_data", "test_geojson.geojson"))
def test_getting_stops_on_spatial_condition_with_geojson(schedule, mocker):
mocker.patch.object(spatial, 'read_geojson_to_shapely',
return_value=GeometryCollection(
[Polygon([(-7.6, 49.7), (-7.4, 49.7), (-7.4, 49.8), (-7.6, 49.8), (-7.6, 49.7)])]))
stops = schedule.stops_on_spatial_condition(test_geojson)
assert set(stops) == {'5', '6', '7', '8', '2', '4', '3', '1'}
def test_getting_stops_on_spatial_condition_with_shapely_polygon(schedule):
p = Polygon([(-7.6, 49.7), (-7.4, 49.7), (-7.4, 49.8), (-7.6, 49.8), (-7.6, 49.7)])
stops = schedule.stops_on_spatial_condition(p)
assert set(stops) == {'5', '6', '7', '8', '2', '4', '3', '1'}
def test_getting_stops_on_spatial_condition_with_s2_hex_region(schedule):
s2_region = '4837,4839,483f5,4844,4849'
stops = schedule.stops_on_spatial_condition(s2_region)
assert set(stops) == {'5', '6', '7', '8', '2', '4', '3', '1'}
def test_getting_routes_intersecting_spatial_region(schedule):
p = Polygon([(-7.6, 49.7), (-7.4, 49.7), (-7.4, 49.8), (-7.6, 49.8), (-7.6, 49.7)])
routes = schedule.routes_on_spatial_condition(p)
assert set(routes) == {'1', '2'}
def test_getting_routes_contained_spatial_region(schedule):
p = Polygon([(-7.6, 49.7), (-7.4, 49.7), (-7.4, 49.8), (-7.6, 49.8), (-7.6, 49.7)])
routes = schedule.routes_on_spatial_condition(p, how='within')
assert set(routes) == {'1', '2'}
def test_getting_services_intersecting_spatial_region(schedule):
p = Polygon([(-7.6, 49.7), (-7.4, 49.7), (-7.4, 49.8), (-7.6, 49.8), (-7.6, 49.7)])
routes = schedule.services_on_spatial_condition(p)
assert set(routes) == {'service'}
def test_getting_services_contained_spatial_region(schedule):
p = Polygon([(-7.6, 49.7), (-7.4, 49.7), (-7.4, 49.8), (-7.6, 49.8), (-7.6, 49.7)])
routes = schedule.services_on_spatial_condition(p, how='within')
assert set(routes) == {'service'}
def test_applying_attributes_to_service(schedule):
assert schedule._graph.graph['services']['service']['name'] == 'name'
assert schedule['service'].name == 'name'
schedule.apply_attributes_to_services({'service': {'name': 'new_name'}})
assert schedule._graph.graph['services']['service']['name'] == 'new_name'
assert schedule['service'].name == 'new_name'
def test_applying_attributes_changing_id_to_service_throws_error(schedule):
assert 'service' in schedule._graph.graph['services']
assert schedule._graph.graph['services']['service']['id'] == 'service'
assert schedule['service'].id == 'service'
with pytest.raises(NotImplementedError) as e:
schedule.apply_attributes_to_services({'service': {'id': 'new_id'}})
assert 'Changing id can only be done via the `reindex` method' in str(e.value)
def test_applying_attributes_to_route(schedule):
assert schedule._graph.graph['routes']['1']['route_short_name'] == 'name'
assert schedule.route('1').route_short_name == 'name'
schedule.apply_attributes_to_routes({'1': {'route_short_name': 'new_name'}})
assert schedule._graph.graph['routes']['1']['route_short_name'] == 'new_name'
assert schedule.route('1').route_short_name == 'new_name'
def test_applying_mode_attributes_to_route_results_in_correct_mode_methods(schedule):
assert schedule.route('1').mode == 'bus'
assert schedule.modes() == {'bus'}
assert schedule.mode_graph_map() == {
'bus': {('3', '4'), ('2', '3'), ('1', '2'), ('6', '7'), ('5', '6'), ('7', '8')}}
schedule.apply_attributes_to_routes({'1': {'mode': 'new_bus'}})
assert schedule.route('1').mode == 'new_bus'
assert schedule.modes() == {'bus', 'new_bus'}
assert schedule['service'].modes() == {'bus', 'new_bus'}
assert schedule.mode_graph_map() == {'bus': {('7', '8'), ('6', '7'), ('5', '6')},
'new_bus': {('3', '4'), ('1', '2'), ('2', '3')}}
assert schedule['service'].mode_graph_map() == {'bus': {('6', '7'), ('7', '8'), ('5', '6')},
'new_bus': {('3', '4'), ('2', '3'), ('1', '2')}}
def test_applying_attributes_changing_id_to_route_throws_error(schedule):
assert '1' in schedule._graph.graph['routes']
assert schedule._graph.graph['routes']['1']['id'] == '1'
assert schedule.route('1').id == '1'
with pytest.raises(NotImplementedError) as e:
schedule.apply_attributes_to_routes({'1': {'id': 'new_id'}})
assert 'Changing id can only be done via the `reindex` method' in str(e.value)
def test_applying_attributes_to_stop(schedule):
assert schedule._graph.nodes['5']['name'] == ''
assert schedule.stop('5').name == ''
schedule.apply_attributes_to_stops({'5': {'name': 'new_name'}})
assert schedule._graph.nodes['5']['name'] == 'new_name'
assert schedule.stop('5').name == 'new_name'
def test_applying_attributes_changing_id_to_stop_throws_error(schedule):
assert '5' in schedule._graph.nodes
assert schedule._graph.nodes['5']['id'] == '5'
assert schedule.stop('5').id == '5'
with pytest.raises(NotImplementedError) as e:
schedule.apply_attributes_to_routes({'5': {'id': 'new_id'}})
assert 'Changing id can only be done via the `reindex` method' in str(e.value)
def change_name(attrib):
return 'new_name'
def test_applying_function_to_services(schedule):
schedule.apply_function_to_services(function=change_name, location='name')
assert schedule._graph.graph['services']['service']['name'] == 'new_name'
assert schedule['service'].name == 'new_name'
def test_applying_function_to_routes(schedule):
schedule.apply_function_to_routes(function=change_name, location='route_short_name')
for route in schedule.routes():
assert schedule._graph.graph['routes'][route.id]['route_short_name'] == 'new_name'
assert route.route_short_name == 'new_name'
def test_applying_function_to_stops(schedule):
schedule.apply_function_to_stops(function=change_name, location='name')
for stop in schedule.stops():
assert stop.name == 'new_name'
assert schedule._graph.nodes[stop.id]['name'] == 'new_name'
def test_adding_service(schedule, service):
service.reindex('different_service')
service.route('1').reindex('different_service_1')
service.route('2').reindex('different_service_2')
schedule.add_service(service)
assert set(schedule.route_ids()) == {'1', '2', 'different_service_1', 'different_service_2'}
assert set(schedule.service_ids()) == {'service', 'different_service'}
assert_semantically_equal(schedule._graph.graph['route_to_service_map'],
{'1': 'service', '2': 'service',
'different_service_1': 'different_service', 'different_service_2': 'different_service'})
assert_semantically_equal(schedule._graph.graph['service_to_route_map'],
{'service': ['1', '2'],
'different_service': ['different_service_1', 'different_service_2']})
def test_adding_service_with_clashing_route_ids(schedule, service):
service.reindex('different_service')
schedule.add_service(service)
assert set(schedule.route_ids()) == {'1', '2', 'different_service_1', 'different_service_2'}
assert set(schedule.service_ids()) == {'service', 'different_service'}
assert_semantically_equal(schedule._graph.graph['route_to_service_map'],
{'1': 'service', '2': 'service',
'different_service_1': 'different_service', 'different_service_2': 'different_service'})
assert_semantically_equal(schedule._graph.graph['service_to_route_map'],
{'service': ['1', '2'],
'different_service': ['different_service_1', 'different_service_2']})
def test_adding_service_with_clashing_id_throws_error(schedule, service):
with pytest.raises(ServiceIndexError) as e:
schedule.add_service(service)
assert 'already exists' in str(e.value)
def test_adding_service_with_clashing_stops_data_does_not_overwrite_existing_stops(schedule):
expected_stops_data = {
'5': {'services': {'service', 'some_id'}, 'routes': {'2', '3'}, 'id': '5', 'x': 4.0, 'y': 2.0,
'epsg': 'epsg:27700',
'name': '',
'lat': 49.76682779861249, 'lon': -7.557106577683727, 's2_id': 5205973754090531959,
'additional_attributes': set()},
'1': {'services': {'service', 'some_id'}, 'routes': {'1', '3'}, 'id': '1', 'x': 4.0, 'y': 2.0,
'epsg': 'epsg:27700',
'name': '',
'lat': 49.76682779861249, 'lon': -7.557106577683727, 's2_id': 5205973754090531959,
'additional_attributes': set()},
'2': {'services': {'service', 'some_id'}, 'routes': {'1', '3'}, 'id': '2', 'x': 1.0, 'y': 2.0,
'epsg': 'epsg:27700',
'name': '',
'lat': 49.766825803756994, 'lon': -7.557148039524952, 's2_id': 5205973754090365183,
'additional_attributes': set()}}
r = Route(
id='3',
route_short_name='name',
mode='bus',
trips={},
arrival_offsets=[],
departure_offsets=[],
stops=[Stop(id='1', x=1, y=2, epsg='epsg:27700'),
Stop(id='2', x=0, y=1, epsg='epsg:27700'),
Stop(id='5', x=0, y=2, epsg='epsg:27700')]
)
assert r.ordered_stops == ['1', '2', '5']
s = Service(id='some_id', routes=[r])
schedule.add_service(s, force=True)
assert_semantically_equal(dict(s.graph().nodes(data=True)), expected_stops_data)
assert_semantically_equal(s.graph()['1']['2'], {'routes': {'1', '3'}, 'services': {'some_id', 'service'}})
assert_semantically_equal(s.graph()['2']['5'], {'routes': {'3'}, 'services': {'some_id'}})
def test_adding_service_with_clashing_stops_data_without_force_flag_throws_error(schedule):
r = Route(
id='3',
route_short_name='name',
mode='bus',
trips={},
arrival_offsets=[],
departure_offsets=[],
stops=[Stop(id='1', x=1, y=2, epsg='epsg:27700'),
Stop(id='2', x=0, y=1, epsg='epsg:27700'),
Stop(id='5', x=0, y=2, epsg='epsg:27700')]
)
with pytest.raises(ConflictingStopData) as e:
schedule.add_service(Service(id='some_id', routes=[r]))
assert 'The following stops would inherit data' in str(e.value)
def test_removing_service(schedule):
schedule.remove_service('service')
assert not set(schedule.route_ids())
assert not set(schedule.service_ids())
assert not schedule._graph.graph['route_to_service_map']
assert not schedule._graph.graph['service_to_route_map']
def test_adding_route(schedule, route):
route.reindex('new_id')
schedule.add_route('service', route)
assert set(schedule.route_ids()) == {'1', '2', 'new_id'}
assert set(schedule.service_ids()) == {'service'}
assert_semantically_equal(schedule._graph.graph['route_to_service_map'],
{'1': 'service', '2': 'service', 'new_id': 'service'})
assert_semantically_equal(schedule._graph.graph['service_to_route_map'],
{'service': ['1', '2', 'new_id']})
def test_adding_route_with_clashing_id(schedule, route):
schedule.add_route('service', route)
assert set(schedule.route_ids()) == {'1', '2', 'service_3'}
assert set(schedule.service_ids()) == {'service'}
assert_semantically_equal(schedule._graph.graph['route_to_service_map'],
{'1': 'service', '2': 'service', 'service_3': 'service'})
assert_semantically_equal(schedule._graph.graph['service_to_route_map'],
{'service': ['1', '2', 'service_3']})
def test_adding_route_to_non_existing_service_throws_error(schedule, route):
with pytest.raises(ServiceIndexError) as e:
schedule.add_route('service_that_doesnt_exist', route)
assert 'does not exist' in str(e.value)
def test_creating_a_route_to_add_using_id_references_to_existing_stops_inherits_schedule_stops_data(schedule):
expected_stops_data = {
'5': {'services': {'service'}, 'routes': {'2', '3'}, 'id': '5', 'x': 4.0, 'y': 2.0, 'epsg': 'epsg:27700',
'name': '',
'lat': 49.76682779861249, 'lon': -7.557106577683727, 's2_id': 5205973754090531959,
'additional_attributes': set()},
'1': {'services': {'service'}, 'routes': {'1', '3'}, 'id': '1', 'x': 4.0, 'y': 2.0, 'epsg': 'epsg:27700',
'name': '',
'lat': 49.76682779861249, 'lon': -7.557106577683727, 's2_id': 5205973754090531959,
'additional_attributes': set()},
'2': {'services': {'service'}, 'routes': {'1', '3'}, 'id': '2', 'x': 1.0, 'y': 2.0, 'epsg': 'epsg:27700',
'name': '',
'lat': 49.766825803756994, 'lon': -7.557148039524952, 's2_id': 5205973754090365183,
'additional_attributes': set()}}
r = Route(
id='3',
route_short_name='name',
mode='bus',
trips={},
arrival_offsets=[],
departure_offsets=[],
stops=['1', '2', '5']
)
assert r.ordered_stops == ['1', '2', '5']
assert_semantically_equal(dict(r._graph.nodes(data=True)),
{'1': {'routes': {'3'}}, '2': {'routes': {'3'}}, '5': {'routes': {'3'}}})
assert_semantically_equal(r._graph.edges(data=True)._adjdict,
{'1': {'2': {'routes': {'3'}}}, '2': {'5': {'routes': {'3'}}}, '5': {}})
schedule.add_route('service', r)
assert_semantically_equal(dict(r.graph().nodes(data=True)), expected_stops_data)
assert_semantically_equal(r.graph()['1']['2'], {'routes': {'1', '3'}, 'services': {'service'}})
assert_semantically_equal(r.graph()['2']['5'], {'routes': {'3'}, 'services': {'service'}})
def test_creating_a_route_to_add_giving_existing_schedule_stops(schedule):
expected_stops_data = {
'5': {'services': {'service'}, 'routes': {'2', '3'}, 'id': '5', 'x': 4.0, 'y': 2.0, 'epsg': 'epsg:27700',
'name': '',
'lat': 49.76682779861249, 'lon': -7.557106577683727, 's2_id': 5205973754090531959,
'additional_attributes': set()},
'1': {'services': {'service'}, 'routes': {'1', '3'}, 'id': '1', 'x': 4.0, 'y': 2.0, 'epsg': 'epsg:27700',
'name': '',
'lat': 49.76682779861249, 'lon': -7.557106577683727, 's2_id': 5205973754090531959,
'additional_attributes': set()},
'2': {'services': {'service'}, 'routes': {'1', '3'}, 'id': '2', 'x': 1.0, 'y': 2.0, 'epsg': 'epsg:27700',
'name': '',
'lat': 49.766825803756994, 'lon': -7.557148039524952, 's2_id': 5205973754090365183,
'additional_attributes': set()}}
r = Route(
id='3',
route_short_name='name',
mode='bus',
trips={},
arrival_offsets=[],
departure_offsets=[],
stops=[schedule.stop('1'), schedule.stop('2'), schedule.stop('5')]
)
assert r.ordered_stops == ['1', '2', '5']
assert_semantically_equal(dict(r._graph.nodes(data=True)),
{'1': {'routes': {'3'}, 'id': '1', 'x': 4.0, 'y': 2.0, 'epsg': 'epsg:27700', 'name': '',
'lat': 49.76682779861249, 'lon': -7.557106577683727, 's2_id': 5205973754090531959,
'additional_attributes': set()},
'2': {'routes': {'3'}, 'id': '2', 'x': 1.0, 'y': 2.0, 'epsg': 'epsg:27700', 'name': '',
'lat': 49.766825803756994, 'lon': -7.557148039524952, 's2_id': 5205973754090365183,
'additional_attributes': set()},
'5': {'routes': {'3'}, 'id': '5', 'x': 4.0, 'y': 2.0, 'epsg': 'epsg:27700', 'name': '',
'lat': 49.76682779861249, 'lon': -7.557106577683727, 's2_id': 5205973754090531959,
'additional_attributes': set()}})
assert_semantically_equal(r._graph.edges(data=True)._adjdict,
{'1': {'2': {'routes': {'3'}}}, '2': {'5': {'routes': {'3'}}}, '5': {}})
schedule.add_route('service', r)
assert_semantically_equal(dict(r.graph().nodes(data=True)), expected_stops_data)
assert_semantically_equal(r.graph()['1']['2'], {'routes': {'1', '3'}, 'services': {'service'}})
assert_semantically_equal(r.graph()['2']['5'], {'routes': {'3'}, 'services': {'service'}})
def test_adding_route_with_clashing_stops_data_does_not_overwrite_existing_stops(schedule):
expected_stops_data = {
'5': {'services': {'service'}, 'routes': {'2', '3'}, 'id': '5', 'x': 4.0, 'y': 2.0, 'epsg': 'epsg:27700',
'name': '',
'lat': 49.76682779861249, 'lon': -7.557106577683727, 's2_id': 5205973754090531959,
'additional_attributes': set()},
'1': {'services': {'service'}, 'routes': {'1', '3'}, 'id': '1', 'x': 4.0, 'y': 2.0, 'epsg': 'epsg:27700',
'name': '',
'lat': 49.76682779861249, 'lon': -7.557106577683727, 's2_id': 5205973754090531959,
'additional_attributes': set()},
'2': {'services': {'service'}, 'routes': {'1', '3'}, 'id': '2', 'x': 1.0, 'y': 2.0, 'epsg': 'epsg:27700',
'name': '',
'lat': 49.766825803756994, 'lon': -7.557148039524952, 's2_id': 5205973754090365183,
'additional_attributes': set()}}
r = Route(
id='3',
route_short_name='name',
mode='bus',
trips={},
arrival_offsets=[],
departure_offsets=[],
stops=[Stop(id='1', x=1, y=2, epsg='epsg:27700'),
Stop(id='2', x=0, y=1, epsg='epsg:27700'),
Stop(id='5', x=0, y=2, epsg='epsg:27700')]
)
assert r.ordered_stops == ['1', '2', '5']
schedule.add_route('service', r, force=True)
assert_semantically_equal(dict(r.graph().nodes(data=True)), expected_stops_data)
assert_semantically_equal(r.graph()['1']['2'], {'routes': {'1', '3'}, 'services': {'service'}})
assert_semantically_equal(r.graph()['2']['5'], {'routes': {'3'}, 'services': {'service'}})
def test_adding_route_with_clashing_stops_data_only_flags_those_that_are_actually_different(schedule):
r = Route(
id='3',
route_short_name='name',
mode='bus',
trips={},
arrival_offsets=[],
departure_offsets=[],
stops=[Stop(id='1', x=1, y=2, epsg='epsg:27700'),
Stop(id='2', x=0, y=1, epsg='epsg:27700'),
Stop(id='5', x=4, y=2, epsg='epsg:27700', name='')]
)
assert r.ordered_stops == ['1', '2', '5']
with pytest.raises(ConflictingStopData) as e:
schedule.add_route('service', r)
assert "The following stops would inherit data currently stored under those Stop IDs in the Schedule: " \
"['1', '2']" in str(e.value)
def test_adding_route_with_clashing_stops_data_without_force_flag_throws_error(schedule):
r = Route(
id='3',
route_short_name='name',
mode='bus',
trips={},
arrival_offsets=[],
departure_offsets=[],
stops=[Stop(id='1', x=1, y=2, epsg='epsg:27700'),
Stop(id='2', x=0, y=1, epsg='epsg:27700'),
Stop(id='5', x=0, y=2, epsg='epsg:27700')]
)
with pytest.raises(ConflictingStopData) as e:
schedule.add_route('service', r)
assert 'The following stops would inherit data' in str(e.value)
def test_extracting_epsg_from_an_intermediate_route_gives_none():
# intermediate meaning not belonging to a schedule yet but referring to stops in a schedule
r = Route(
route_short_name='name',
mode='bus',
trips={},
arrival_offsets=[],
departure_offsets=[],
stops=['S1', 'S2', 'S3']
)
assert r.epsg is None
def test_removing_route(schedule):
schedule.remove_route('2')
assert set(schedule.route_ids()) == {'1'}
assert set(schedule.service_ids()) == {'service'}
assert_semantically_equal(schedule._graph.graph['route_to_service_map'],
{'1': 'service'})
assert_semantically_equal(schedule._graph.graph['service_to_route_map'],
{'service': ['1']})
def test_removing_route_updates_services_on_nodes_and_edges(schedule):
schedule.remove_route('2')
assert_semantically_equal(dict(schedule.graph().nodes(data=True)),
{'5': {'services': set(), 'routes': set(), 'id': '5', 'x': 4.0, 'y': 2.0, 'epsg': 'epsg:27700',
'name': '', 'lat': 49.76682779861249, 'lon': -7.557106577683727,
's2_id': 5205973754090531959, 'additional_attributes': set()},
'6': {'services': set(), 'routes': set(), 'id': '6', 'x': 1.0, 'y': 2.0, 'epsg': 'epsg:27700',
'name': '', 'lat': 49.766825803756994, 'lon': -7.557148039524952,
's2_id': 5205973754090365183, 'additional_attributes': set()},
'7': {'services': set(), 'routes': set(), 'id': '7', 'x': 3.0, 'y': 3.0, 'epsg': 'epsg:27700',
'name': '', 'lat': 49.76683608549253, 'lon': -7.557121424907424,
's2_id': 5205973754090203369, 'additional_attributes': set()},
'8': {'services': set(), 'routes': set(), 'id': '8', 'x': 7.0, 'y': 5.0, 'epsg': 'epsg:27700',
'name': '', 'lat': 49.766856648946295, 'lon': -7.5570681956375,
's2_id': 5205973754097123809, 'additional_attributes': set()},
'3': {'services': {'service'}, 'routes': {'1'}, 'id': '3', 'x': 3.0, 'y': 3.0,
'epsg': 'epsg:27700', 'name': '', 'lat': 49.76683608549253,
'lon': -7.557121424907424, 's2_id': 5205973754090203369,
'additional_attributes': set()},
'1': {'services': {'service'}, 'routes': {'1'}, 'id': '1', 'x': 4.0, 'y': 2.0,
'epsg': 'epsg:27700', 'name': '', 'lat': 49.76682779861249,
'lon': -7.557106577683727, 's2_id': 5205973754090531959,
'additional_attributes': set()},
'2': {'services': {'service'}, 'routes': {'1'}, 'id': '2', 'x': 1.0, 'y': 2.0,
'epsg': 'epsg:27700', 'name': '', 'lat': 49.766825803756994,
'lon': -7.557148039524952, 's2_id': 5205973754090365183,
'additional_attributes': set()},
'4': {'services': {'service'}, 'routes': {'1'}, 'id': '4', 'x': 7.0, 'y': 5.0,
'epsg': 'epsg:27700', 'name': '', 'lat': 49.766856648946295,
'lon': -7.5570681956375, 's2_id': 5205973754097123809,
'additional_attributes': set()}})
assert_semantically_equal(schedule.graph().edges(data=True)._adjdict,
{'5': {'6': {'services': set(), 'routes': set()}},
'6': {'7': {'services': set(), 'routes': set()}},
'7': {'8': {'services': set(), 'routes': set()}}, '8': {},
'1': {'2': {'services': {'service'}, 'routes': {'1'}}},
'3': {'4': {'services': {'service'}, 'routes': {'1'}}},
'2': {'3': {'services': {'service'}, 'routes': {'1'}}}, '4': {}})
def test_removing_stop(schedule):
schedule.remove_stop('5')
assert {stop.id for stop in schedule.stops()} == {'1', '3', '4', '7', '8', '6', '2'}
def test_removing_unused_stops(schedule):
schedule.remove_route('1')
schedule.remove_unsused_stops()
assert {stop.id for stop in schedule.stops()} == {'6', '8', '5', '7'}
def test_iter_stops_returns_stops_objects(test_service, different_test_service):
schedule = Schedule(services=[test_service, different_test_service], epsg='epsg:4326')
assert set([stop.id for stop in schedule.stops()]) == {'0', '1', '2', '3', '4'}
assert all([isinstance(stop, Stop) for stop in schedule.stops()])
def test_read_matsim_schedule_returns_expected_schedule():
schedule = read.read_matsim_schedule(
path_to_schedule=pt2matsim_schedule_file,
epsg='epsg:27700')
correct_services = Service(id='10314', routes=[
Route(
route_short_name='12', id='VJbd8660f05fe6f744e58a66ae12bd66acbca88b98',
mode='bus',
stops=[Stop(id='26997928P', x='528464.1342843144', y='182179.7435136598', epsg='epsg:27700'),
Stop(id='26997928P.link:1', x='528464.1342843144', y='182179.7435136598', epsg='epsg:27700')],
route=['1'],
trips={'trip_id': ['VJ00938baa194cee94700312812d208fe79f3297ee_04:40:00'],
'trip_departure_time': ['04:40:00'],
'vehicle_id': ['veh_0_bus']},
arrival_offsets=['00:00:00', '00:02:00'],
departure_offsets=['00:00:00', '00:02:00']
)
])
for val in schedule.services():
assert val == correct_services
assert_semantically_equal(schedule.stop_to_service_ids_map(),
{'26997928P.link:1': {'10314'}, '26997928P': {'10314'}})
assert_semantically_equal(schedule.stop_to_route_ids_map(),
{'26997928P': {'VJbd8660f05fe6f744e58a66ae12bd66acbca88b98'},
'26997928P.link:1': {'VJbd8660f05fe6f744e58a66ae12bd66acbca88b98'}})
assert_semantically_equal(schedule.route('VJbd8660f05fe6f744e58a66ae12bd66acbca88b98').trips,
{'trip_id': ['VJ00938baa194cee94700312812d208fe79f3297ee_04:40:00'],
'trip_departure_time': ['04:40:00'], 'vehicle_id': ['veh_0_bus']})
assert_semantically_equal(
dict(schedule.graph().nodes(data=True)),
{'26997928P': {'services': {'10314'}, 'routes': {'VJbd8660f05fe6f744e58a66ae12bd66acbca88b98'},
'id': '26997928P', 'x': 528464.1342843144, 'y': 182179.7435136598, 'epsg': 'epsg:27700',
'name': '<NAME> (Stop P)', 'lat': 51.52393050617373, 'lon': -0.14967658860132668,
's2_id': 5221390302759871369, 'additional_attributes': {'name', 'isBlocking'},
'isBlocking': 'false'},
'26997928P.link:1': {'services': {'10314'}, 'routes': {'VJbd8660f05fe6f744e58a66ae12bd66acbca88b98'},
'id': '26997928P.link:1', 'x': 528464.1342843144, 'y': 182179.7435136598,
'epsg': 'epsg:27700', 'name': 'Brunswick Place (Stop P)', 'lat': 51.52393050617373,
'lon': -0.14967658860132668, 's2_id': 5221390302759871369,
'additional_attributes': {'name', 'linkRefId', 'isBlocking'}, 'linkRefId': '1',
'isBlocking': 'false'}}
)
def test_reading_vehicles_with_a_schedule():
schedule = read.read_matsim_schedule(
path_to_schedule=pt2matsim_schedule_file,
path_to_vehicles=pt2matsim_vehicles_file,
epsg='epsg:27700')
assert_semantically_equal(schedule.vehicles, {'veh_0_bus': {'type': 'bus'}})
assert_semantically_equal(schedule.vehicle_types['bus'], {
'capacity': {'seats': {'persons': '71'}, 'standingRoom': {'persons': '1'}},
'length': {'meter': '18.0'},
'width': {'meter': '2.5'},
'accessTime': {'secondsPerPerson': '0.5'},
'egressTime': {'secondsPerPerson': '0.5'},
'doorOperation': {'mode': 'serial'},
'passengerCarEquivalents': {'pce': '2.8'}})
def test_reading_vehicles_after_reading_schedule():
schedule = read.read_matsim_schedule(
path_to_schedule=pt2matsim_schedule_file,
path_to_vehicles=pt2matsim_vehicles_file,
epsg='epsg:27700')
assert_semantically_equal(schedule.vehicles, {'veh_0_bus': {'type': 'bus'}})
assert_semantically_equal(schedule.vehicle_types['bus'], {
'capacity': {'seats': {'persons': '71'}, 'standingRoom': {'persons': '1'}},
'length': {'meter': '18.0'},
'width': {'meter': '2.5'},
'accessTime': {'secondsPerPerson': '0.5'},
'egressTime': {'secondsPerPerson': '0.5'},
'doorOperation': {'mode': 'serial'},
'passengerCarEquivalents': {'pce': '2.8'}})
def test_is_strongly_connected_with_strongly_connected_schedule(strongly_connected_schedule):
assert strongly_connected_schedule.is_strongly_connected()
def test_is_strongly_connected_with_not_strongly_connected_schedule(schedule):
assert not schedule.is_strongly_connected()
def test_has_self_loops_with_self_has_self_looping_schedule(self_looping_route):
s = Schedule('epsg:27700', [Service(id='service', routes=[self_looping_route])])
assert s.has_self_loops()
def test_has_self_loops_returns_self_looping_stops(self_looping_route):
s = Schedule('epsg:27700', [Service(id='service', routes=[self_looping_route])])
loop_nodes = s.has_self_loops()
assert loop_nodes == ['1']
def test_has_self_loops_with_non_looping_routes(schedule):
assert not schedule.has_self_loops()
def test_validity_of_services(self_looping_route, route):
s = Schedule('epsg:27700', [Service(id='1', routes=[self_looping_route]),
Service(id='2', routes=[route])])
assert not s['1'].is_valid_service()
assert s['2'].is_valid_service()
assert set(s.validity_of_services()) == {False, True}
def test_has_valid_services(schedule):
assert not schedule.has_valid_services()
def test_has_valid_services_with_only_valid_services(service):
s = Schedule('epsg:27700', [service])
assert s.has_valid_services()
def test_invalid_services_shows_invalid_services(service):
for route_id in service.route_ids():
service._graph.graph['routes'][route_id]['route'] = ['1']
s = Schedule('epsg:27700', [service])
assert s.invalid_services() == [service]
def test_is_valid_with_valid_schedule(service):
s = Schedule('epsg:27700', [service])
assert s.is_valid_schedule()
def test_generate_validation_report_delegates_to_method_in_schedule_operations(mocker, schedule):
mocker.patch.object(schedule_validation, 'generate_validation_report')
schedule.generate_validation_report()
schedule_validation.generate_validation_report.assert_called_once()
def test_build_graph_builds_correct_graph(strongly_connected_schedule):
g = strongly_connected_schedule.graph()
assert_semantically_equal(dict(g.nodes(data=True)),
{'5': {'services': {'service'}, 'routes': {'2'}, 'id': '5', 'x': 4.0, 'y': 2.0,
'epsg': 'epsg:27700', 'lat': 49.76682779861249, 'lon': -7.557106577683727,
's2_id': 5205973754090531959, 'additional_attributes': set(), 'name': 'Stop_5'},
'2': {'services': {'service'}, 'routes': {'1', '2'}, 'id': '2', 'x': 1.0, 'y': 2.0,
'epsg': 'epsg:27700', 'lat': 49.766825803756994, 'lon': -7.557148039524952,
's2_id': 5205973754090365183, 'additional_attributes': set(), 'name': 'Stop_2'},
'7': {'services': {'service'}, 'routes': {'2'}, 'id': '7', 'x': 3.0, 'y': 3.0,
'epsg': 'epsg:27700', 'lat': 49.76683608549253, 'lon': -7.557121424907424,
's2_id': 5205973754090203369, 'additional_attributes': set(), 'name': 'Stop_7'},
'8': {'services': {'service'}, 'routes': {'2'}, 'id': '8', 'x': 7.0, 'y': 5.0,
'epsg': 'epsg:27700', 'lat': 49.766856648946295, 'lon': -7.5570681956375,
's2_id': 5205973754097123809, 'additional_attributes': set(), 'name': 'Stop_8'},
'3': {'services': {'service'}, 'routes': {'1'}, 'id': '3', 'x': 3.0, 'y': 3.0,
'epsg': 'epsg:27700', 'lat': 49.76683608549253, 'lon': -7.557121424907424,
's2_id': 5205973754090203369, 'additional_attributes': set(), 'name': 'Stop_3'},
'1': {'services': {'service'}, 'routes': {'1'}, 'id': '1', 'x': 4.0, 'y': 2.0,
'epsg': 'epsg:27700', 'lat': 49.76682779861249, 'lon': -7.557106577683727,
's2_id': 5205973754090531959, 'additional_attributes': set(), 'name': 'Stop_1'},
'4': {'services': {'service'}, 'routes': {'1'}, 'id': '4', 'x': 7.0, 'y': 5.0,
'epsg': 'epsg:27700', 'lat': 49.766856648946295, 'lon': -7.5570681956375,
's2_id': 5205973754097123809, 'additional_attributes': set(), 'name': 'Stop_4'}})
assert_semantically_equal(g.edges(data=True)._adjdict,
{'5': {'2': {'services': {'service'}, 'routes': {'2'}}},
'2': {'7': {'services': {'service'}, 'routes': {'2'}},
'3': {'services': {'service'}, 'routes': {'1'}}},
'7': {'8': {'services': {'service'}, 'routes': {'2'}}},
'8': {'5': {'services': {'service'}, 'routes': {'2'}}},
'4': {'1': {'services': {'service'}, 'routes': {'1'}}},
'1': {'2': {'services': {'service'}, 'routes': {'1'}}},
'3': {'4': {'services': {'service'}, 'routes': {'1'}}}})
def test_building_trips_dataframe(schedule):
df = schedule.route_trips_with_stops_to_dataframe()
correct_df = DataFrame({'departure_time': {0: Timestamp('1970-01-01 13:00:00'), 1: Timestamp('1970-01-01 13:05:00'),
2: Timestamp('1970-01-01 13:09:00'), 3: Timestamp('1970-01-01 13:30:00'),
4: Timestamp('1970-01-01 13:35:00'), 5: Timestamp('1970-01-01 13:39:00'),
6: Timestamp('1970-01-01 11:00:00'), 7: Timestamp('1970-01-01 11:05:00'),
8: Timestamp('1970-01-01 11:09:00'), 9: Timestamp('1970-01-01 13:00:00'),
10: Timestamp('1970-01-01 13:05:00'),
11: Timestamp('1970-01-01 13:09:00')},
'arrival_time': {0: Timestamp('1970-01-01 13:03:00'), 1: Timestamp('1970-01-01 13:07:00'),
2: Timestamp('1970-01-01 13:13:00'), 3: Timestamp('1970-01-01 13:33:00'),
4: Timestamp('1970-01-01 13:37:00'), 5: Timestamp('1970-01-01 13:43:00'),
6: Timestamp('1970-01-01 11:03:00'), 7: Timestamp('1970-01-01 11:07:00'),
8: Timestamp('1970-01-01 11:13:00'), 9: Timestamp('1970-01-01 13:03:00'),
10: Timestamp('1970-01-01 13:07:00'),
11: Timestamp('1970-01-01 13:13:00')},
'from_stop': {0: '1', 1: '2', 2: '3', 3: '1', 4: '2', 5: '3', 6: '5', 7: '6', 8: '7',
9: '5', 10: '6', 11: '7'},
'to_stop': {0: '2', 1: '3', 2: '4', 3: '2', 4: '3', 5: '4', 6: '6', 7: '7', 8: '8', 9: '6',
10: '7', 11: '8'},
'trip': {0: '1', 1: '1', 2: '1', 3: '2', 4: '2', 5: '2', 6: '1', 7: '1', 8: '1', 9: '2',
10: '2', 11: '2'},
'vehicle_id': {0: 'veh_1_bus', 1: 'veh_1_bus', 2: 'veh_1_bus', 3: 'veh_2_bus',
4: 'veh_2_bus', 5: 'veh_2_bus', 6: 'veh_3_bus', 7: 'veh_3_bus',
8: 'veh_3_bus', 9: 'veh_4_bus', 10: 'veh_4_bus', 11: 'veh_4_bus'},
'route': {0: '1', 1: '1', 2: '1', 3: '1', 4: '1', 5: '1', 6: '2', 7: '2', 8: '2', 9: '2',
10: '2', 11: '2'},
'route_name': {0: 'name', 1: 'name', 2: 'name', 3: 'name', 4: 'name', 5: 'name',
6: 'name_2', 7: 'name_2', 8: 'name_2', 9: 'name_2', 10: 'name_2',
11: 'name_2'},
'mode': {0: 'bus', 1: 'bus', 2: 'bus', 3: 'bus', 4: 'bus', 5: 'bus', 6: 'bus', 7: 'bus',
8: 'bus', 9: 'bus', 10: 'bus', 11: 'bus'},
'from_stop_name': {0: '', 1: '', 2: '', 3: '', 4: '', 5: '', 6: '', 7: '', 8: '', 9: '',
10: '', 11: ''},
'to_stop_name': {0: '', 1: '', 2: '', 3: '', 4: '', 5: '', 6: '', 7: '', 8: '', 9: '',
10: '', 11: ''},
'service': {0: 'service', 1: 'service', 2: 'service', 3: 'service', 4: 'service',
5: 'service', 6: 'service', 7: 'service', 8: 'service', 9: 'service',
10: 'service', 11: 'service'},
'service_name': {0: 'name', 1: 'name', 2: 'name', 3: 'name', 4: 'name', 5: 'name',
6: 'name', 7: 'name', 8: 'name', 9: 'name', 10: 'name',
11: 'name'}}).sort_values(
by=['route', 'trip', 'departure_time']).reset_index(drop=True)
assert_frame_equal(df.sort_index(axis=1), correct_df.sort_index(axis=1))
def test_generating_vehicles(schedule):
schedule.generate_vehicles()
assert_semantically_equal(schedule.vehicles, {'veh_3_bus': {'type': 'bus'}, 'veh_2_bus': {'type': 'bus'},
'veh_1_bus': {'type': 'bus'}, 'veh_4_bus': {'type': 'bus'}})
def test_generating_vehicles_with_shared_vehicles_and_consistent_modes(mocker, schedule):
schedule.vehicles = {}
mocker.patch.object(DataFrame, 'drop',
return_value=DataFrame({'vehicle_id': ['v_1', 'v_2', 'v_1', 'v_2', 'v_3'],
'type': ['bus', 'bus', 'bus', 'bus', 'rail']}))
schedule.generate_vehicles()
assert_semantically_equal(schedule.vehicles, {'v_1': {'type': 'bus'}, 'v_2': {'type': 'bus'},
'v_3': {'type': 'rail'}})
def test_generating_additional_vehicles_by_default(schedule):
r = Route(
route_short_name='N55',
mode='bus',
trips={'trip_id': ['some_trip_1'],
'trip_departure_time': ['16:23:00'],
'vehicle_id': ['some_bus_2']},
arrival_offsets=['00:00:00', '00:06:00'],
departure_offsets=['00:00:00', '00:06:00'],
id='new',
stops=[schedule.stop('1'),
schedule.stop('3')]
)
schedule.add_route('service', r)
# change existing vehicle types to be different from mode to test whether they are regenerated with default
# mode type
schedule.vehicles = {'veh_3_bus': {'type': '_bus'}, 'veh_4_bus': {'type': '_bus'}, 'veh_1_bus': {'type': '_bus'},
'veh_2_bus': {'type': '_bus'}}
schedule.generate_vehicles()
assert_semantically_equal(schedule.vehicles, {'veh_3_bus': {'type': '_bus'}, 'veh_4_bus': {'type': '_bus'},
'veh_1_bus': {'type': '_bus'}, 'veh_2_bus': {'type': '_bus'},
'some_bus_2': {'type': 'bus'}})
def test_generating_new_vehicles_with_overwite_True(schedule):
# change existing vehicle types to be different from mode to test whether they are regenerated with default
# mode type
schedule.vehicles = {'veh_3_bus': {'type': '_bus'}, 'veh_4_bus': {'type': '_bus'}, 'veh_1_bus': {'type': '_bus'},
'veh_2_bus': {'type': '_bus'}}
schedule.generate_vehicles(overwrite=True)
assert_semantically_equal(schedule.vehicles, {'veh_3_bus': {'type': 'bus'}, 'veh_4_bus': {'type': 'bus'},
'veh_1_bus': {'type': 'bus'}, 'veh_2_bus': {'type': 'bus'}})
def test_rejects_inconsistent_modes_when_generating_vehicles(mocker, schedule):
mocker.patch.object(DataFrame, 'drop',
return_value=DataFrame({'vehicle_id': ['v_1', 'v_2', 'v_1', 'v_3', 'v_3'],
'type': ['bus', 'bus', 'rail', 'rail', 'rail']}))
with pytest.raises(InconsistentVehicleModeError) as e:
schedule.generate_vehicles()
assert "{'v_1': ['bus', 'rail']}" in str(e.value)
def test_generating_route_trips_dataframe(schedule):
df = schedule.route_trips_to_dataframe(gtfs_day='19700102')
assert_frame_equal(df.sort_index(axis=1), DataFrame(
{'service_id': {0: 'service', 1: 'service', 2: 'service', 3: 'service'},
'route_id': {0: '2', 1: '2', 2: '1', 3: '1'}, 'trip_id': {0: '1', 1: '2', 2: '1', 3: '2'},
'trip_departure_time': {0: Timestamp('1970-01-02 11:00:00'), 1: | Timestamp('1970-01-02 13:00:00') | pandas.Timestamp |
import anndata as ad
import logging
import numpy as np
import os
import time
import pandas as pd
import yaml
from pathlib import Path
from collections import namedtuple
from const import PATH, OUT_PATH
#logging.basicConfig(level=logging.INFO)
try:
import git
except:
pass
def get_tasks(phase):
assert phase in ['phase1v2','phase2']
tasks = [
"GEX2ADT",
"ADT2GEX",
"GEX2ATAC",
"ATAC2GEX"
]
task2name = {
"ADT2GEX":f"openproblems_bmmc_cite_{phase}_mod2",
"GEX2ADT":f"openproblems_bmmc_cite_{phase}_rna",
"ATAC2GEX":f"openproblems_bmmc_multiome_{phase}_mod2",
"GEX2ATAC":f"openproblems_bmmc_multiome_{phase}_rna"
}
return tasks, task2name
def get_y_dim(data_path):
if '_cite_' in data_path:
if 'mod2' in data_path:
return 13953,"ADT2GEX"
elif 'rna' in data_path:
return 134,"GEX2ADT"
else:
assert 0
elif '_multiome_' in data_path:
if 'mod2' in data_path:
return 13431,"ATAC2GEX"
elif 'rna' in data_path:
return 10000,"GEX2ATAC"
else:
assert 0
def get_par(path,phase):
par = {
"input_solution" : f"{path}/datasets_{phase}/predict_modality",
"input_prediction" : f"{path}/predictions/predict_modality",
}
return par
def get_train_test_paths(name,phase,path = "./output"):
par = get_par(path,phase)
train_mod1 = f"{par['input_solution']}/{name}/{name}.censor_dataset.output_train_mod1.h5ad"
train_mod2 = train_mod1.replace('mod1','mod2')
test_mod1 = train_mod1.replace('train','test')
test_mod2 = test_mod1.replace('mod1','mod2')
assert os.path.exists(train_mod1) and os.path.exists(train_mod2)
if phase == 'phase1v2':
assert os.path.exists(test_mod1) and os.path.exists(test_mod2)
return train_mod1,train_mod2,test_mod1,test_mod2
def get_data_paths(task,phase,data_type='train_test',path='./output'):
assert data_type in ['train_test','gt_pred']
tasks, task2name = get_tasks(phase)
name = task2name[task]
if data_type == 'train_test':
return get_train_test_paths(name,phase,path)
else:
return get_gt_pred_paths(name,path)
def get_gt_pred_paths(name,path = "./output"):
par = get_par(path,'phase1v2')
gt = f"{par['input_solution']}/{name}/{name}.censor_dataset.output_test_mod2.h5ad"
pred = f"{par['input_prediction']}/{name}/{name}.method.output.h5ad"
print(gt)
print(pred)
assert os.path.exists(gt) and os.path.exists(pred)
return gt, pred
def eval_one_file(name):
gt, pred = get_gt_pred_paths(name)
logging.info("Reading solution file")
ad_sol = ad.read_h5ad(gt)
logging.info("Reading prediction file")
ad_pred = ad.read_h5ad(pred)
logging.info("Check prediction format")
if ad_sol.uns["dataset_id"] != ad_pred.uns["dataset_id"]:
raise ValueError("Prediction and solution have differing dataset_ids")
if ad_sol.shape != ad_pred.shape:
raise ValueError("Dataset and prediction anndata objects should have the same shape / dimensions.")
logging.info("Computing MSE metrics")
tmp = ad_sol.X - ad_pred.X
rmse = np.sqrt(tmp.power(2).mean())
mae = np.abs(tmp).mean()
return rmse
def eval_all():
start = time.time()
tasks, task2name = get_tasks(phase='phase1v2')
s = 0
res = {}
for task in tasks:
name = task2name[task]
score = eval_one_file(name)
s += score
res[task] = score
res['overall'] = s/len(tasks)
print_res(res)
duration = time.time() - start
logging.critical(f" Total time: {duration:.1f} seconds")
def print_res(res):
for i,j in res.items():
logging.critical(f" {i} {j:.4f}")
def check_column_mean_var_all(path='./output',phase='phase2'):
tasks, task2name = get_tasks(phase=phase)
if phase == 'phase2':
names = ['train_mod1', 'train_mod2']
else:
names = ['train_mod1', 'train_mod2', 'test_mod1', 'test_mod2']
logging.info("[min, max, mean]")
res = []
ms = []
ns = []
for task in tasks:
data_names = get_data_paths(task,phase=phase,path=path)
logging.info(f"task:{task}")
for d,n in zip(data_names, names):
logging.info(n)
data = ad.read_h5ad(d)
msg,dd = check_column_mean_var(data)
logging.info('\n'+msg)
res.append(dd)
ms.append(task)
ns.append(n)
dg = pd.DataFrame({'task':ms,'type':ns})
res = np.array(res)
c1 = ['mu','var']
c2 = ['min','max','mean']
df = pd.DataFrame(res,columns = [f'{i}_{j}' for i in c1 for j in c2]+['rows','cols'])
df = pd.concat([dg,df],axis=1)
return df
def check_column_mean_var(data):
x = data.X
mu = x.mean(axis=0)
msg = f"mean {mu.min():.3f}, {mu.max():.3f}, {mu.mean():.3f}\n"
u2 = (x.multiply(x)).mean(axis=0)
var = u2 - np.multiply(mu,mu)
msg += f"var {var.min():.3f}, {var.max():.3f}, {var.mean():.3f}\n"
d = [mu.min(),mu.max(),mu.mean(),var.min(),var.max(),var.mean(),x.shape[0],x.shape[1]]
return msg,np.array(d)
def to_site_donor(data):
df = data.obs['batch'].copy().to_frame().reset_index()
df.columns = ['index','batch']
df['site'] = df['batch'].apply(lambda x: x[:2])
df['donor'] = df['batch'].apply(lambda x: x[2:])
return df
def get_batch_count_df(data):
df = to_site_donor(data)
ds = df[['site','count']].groupby('site').agg({'count':'sum'})
ds = ds.reset_index()
dd = df[['donor','count']].groupby('donor').agg({'count':'sum'})
dd = dd.reset_index()
return df.drop(['site','donor'],axis=1), ds, dd
def get_batch_count_df_all(path,phase='phase2'):
tasks, task2name = get_tasks(phase=phase)
names = ['train_mod1', 'train_mod2', 'test_mod1', 'test_mod2']
if phase == 'phase2':
names = ['train_mod1']
else:
names = ['train_mod1', 'test_mod1']
res = []
for task in tasks:
data_names = get_data_paths(task,phase=phase,path=path)
data_names = [data_names[0],data_names[2]]
logging.info(f"task:{task}")
for d,n in zip(data_names, names):
data = ad.read_h5ad(d)
dfs = get_batch_count_df(data)
for i in dfs:
i['type'] = n
i['task'] = task
res.append(dfs)
df = pd.concat([i[0] for i in res],axis=0).set_index(['batch','type','task']).unstack(-1).fillna(0)
ds = pd.concat([i[1] for i in res],axis=0).set_index(['site','type','task']).unstack(-1).fillna(0)
du = | pd.concat([i[2] for i in res],axis=0) | pandas.concat |
import numpy as np
import pandas as pd
import matplotlib as mpl
mpl.use("svg")
import matplotlib.pyplot as plt
import seaborn as sns
from seaborn.palettes import blend_palette
from seaborn.utils import set_hls_values
def ci_error(lower, upper, truth):
below = np.maximum(lower - truth, 0)
above = np.maximum(truth - upper, 0)
return below + above
epsilon = 0.001
sns.set(style="ticks", palette="colorblind", context=snakemake.wildcards.context)
codebook = pd.read_table(snakemake.input.codebook, index_col=0)
errors = []
counts = []
ci_errors = []
for i, (mean, posterior_counts, raw_counts, known_counts) in enumerate(zip(
snakemake.params.means,
snakemake.input.posterior_counts,
snakemake.input.raw_counts,
snakemake.input.known_counts)):
posterior_estimates = | pd.read_table(posterior_counts, index_col=[0, 1]) | pandas.read_table |
#!/usr/bin/env python
import pandas as pd
import numpy as np
import multiprocessing
import argparse,operator,os,random,sys,time
import random,subprocess
import pysam
import collections
import warnings
import math
def parseargs():
parser=argparse.ArgumentParser(description="Calculate read features")
parser.add_argument('--bam',help='index bam file for alignment')
parser.add_argument('--output',help='output directory for MetaREA')
parser.add_argument("--mlen",type=int, default=5000,help='minimum contig length [default: 5000bp]')
args=parser.parse_args()
return args
def window_read_cal(samfile,ref,lens):
read_count={"start_pos":[],"proper_count":[],"inversion_count":[],"clipped_count":[],"supplementary_count":[],"read_count":[]}
for i in range(300,lens,100):
start=i
end=i+100
read_count["start_pos"].append(i)
proper_num=0
inversion_num=0
clipped_num=0
supplementary_num=0
readcount=0
for read in samfile.fetch(ref,start,end):
proper_num+=(read.rnext==read.tid and read.is_proper_pair)
inversion_num+=(read.rnext==read.tid and read.is_paired and ((read.is_reverse)+(read.mate_is_reverse)) != 1)
clipped_num+=(read.get_cigar_stats()[0][4] > 20)
supplementary_num+=(read.is_supplementary and read.get_cigar_stats()[0][5]>20)
readcount+=1
read_count["read_count"].append(readcount)
read_count["proper_count"].append(proper_num)
read_count["inversion_count"].append(inversion_num)
read_count["clipped_count"].append(clipped_num)
read_count["supplementary_count"].append(supplementary_num)
if (lens-end) < 300:
break
return read_count
def read_cal(args,samfile):
if os.path.exists(os.path.join(args.output, "temp/read_feature/read_feature.txt")):
return 0
references=samfile.references
lengths=samfile.lengths
read_store={"contig":[],"start_pos":[],"read_count":[],"proper_read_count":[],
"inversion_read_count":[],"clipped_read_count":[],"supplementary_read_count":[],"length":[]}
i=0
for ref,lens in zip(references,lengths):
print(i)
i+=1
if lens < args.mlen:
continue
read_count=window_read_cal(samfile,ref,lens)
read_store["contig"].extend([ref]*len(read_count["start_pos"]))
read_store["start_pos"].extend(read_count["start_pos"])
read_store["read_count"].extend(read_count["read_count"])
read_store["proper_read_count"].extend(read_count["proper_count"])
read_store["inversion_read_count"].extend(read_count["inversion_count"])
read_store["clipped_read_count"].extend(read_count["clipped_count"])
read_store["supplementary_read_count"].extend(read_count["supplementary_count"])
read_store["length"].extend([lens]*len(read_count["start_pos"]))
data= | pd.DataFrame(read_store) | pandas.DataFrame |
'''
Created on 28.04.2016
@author: lemmerfn
'''
from abc import ABC, abstractmethod
import weakref
from functools import total_ordering
import pandas as pd
import pysubgroup as ps
from itertools import chain
import copy
import numpy as np
@total_ordering
class SelectorBase(ABC):
__refs__ = weakref.WeakSet()
def __new__(cls, *args, **kwargs):
tmp = super().__new__(cls)
tmp.set_descriptions(*args, **kwargs)
if tmp in SelectorBase.__refs__:
for ref in SelectorBase. __refs__:
if ref == tmp:
return ref
return tmp
def __init__(self):
SelectorBase.__refs__.add(self)
def __eq__(self, other):
if other is None:
return False
return repr(self) == repr(other)
def __lt__(self, other):
return repr(self) < repr(other)
def __hash__(self):
return self._hash #pylint: disable=no-member
@abstractmethod
def set_descriptions(self, *args, **kwargs):
pass
def get_cover_array_and_size(subgroup, data_len=None, data=None):
if hasattr(subgroup, "representation"):
cover_arr = subgroup
size = subgroup.size_sg
elif isinstance(subgroup, slice):
cover_arr = subgroup
if data_len is None:
if isinstance(data, pd.DataFrame):
data_len = len(data)
else:
raise ValueError("if you pass a slice, you need to pass either data_len or data")
# https://stackoverflow.com/questions/36188429/retrieve-length-of-slice-from-slice-object-in-python
size = len(range(*subgroup.indices(data_len)))
elif hasattr(subgroup, '__array_interface__'):
cover_arr = subgroup
type_char = subgroup.__array_interface__['typestr'][1]
if type_char == 'b': # boolean indexing is used
size = np.count_nonzero(cover_arr)
elif type_char == 'u' or type_char == 'i': # integer indexing
size = subgroup.__array_interface__['shape'][0]
else:
print(type_char)
raise NotImplementedError(f"Currently a typechar of {type_char} is not supported.")
else:
assert isinstance(data, pd.DataFrame)
cover_arr = subgroup.covers(data)
size = np.count_nonzero(cover_arr)
return cover_arr, size
def get_size(subgroup, data_len=None, data=None):
if hasattr(subgroup, "representation"):
size = subgroup.size_sg
elif isinstance(subgroup, slice):
if data_len is None:
if isinstance(data, pd.DataFrame):
data_len = len(data)
else:
raise ValueError("if you pass a slice, you need to pass either data_len or data")
# https://stackoverflow.com/questions/36188429/retrieve-length-of-slice-from-slice-object-in-python
size = len(range(*subgroup.indices(data_len)))
elif hasattr(subgroup, '__array_interface__'):
type_char = subgroup.__array_interface__['typestr'][1]
if type_char == 'b': # boolean indexing is used
size = np.count_nonzero(subgroup)
elif type_char == 'u' or type_char == 'i': # integer indexing
size = subgroup.__array_interface__['shape'][0]
else:
print(type_char)
raise NotImplementedError(f"Currently a typechar of {type_char} is not supported.")
else:
assert isinstance(data, pd.DataFrame)
size = np.count_nonzero(subgroup.covers(data))
return size
class EqualitySelector(SelectorBase):
def __init__(self, attribute_name, attribute_value, selector_name=None):
if attribute_name is None:
raise TypeError()
if attribute_value is None:
raise TypeError()
self._attribute_name = attribute_name
self._attribute_value = attribute_value
self._selector_name = selector_name
self.set_descriptions(self._attribute_name, self._attribute_value, self._selector_name)
super().__init__()
@property
def attribute_name(self):
return self._attribute_name
@property
def attribute_value(self):
return self._attribute_value
def set_descriptions(self, attribute_name, attribute_value, selector_name=None): # pylint: disable=arguments-differ
self._hash, self._query, self._string = EqualitySelector.compute_descriptions(attribute_name, attribute_value, selector_name=selector_name)
@classmethod
def compute_descriptions(cls, attribute_name, attribute_value, selector_name):
if isinstance(attribute_value, (str, bytes)):
query = str(attribute_name) + "==" + "'" + str(attribute_value) + "'"
elif np.isnan(attribute_value):
query = attribute_name + ".isnull()"
else:
query = str(attribute_name) + "==" + str(attribute_value)
if selector_name is not None:
string_ = selector_name
else:
string_ = query
hash_value = hash(query)
return (hash_value, query, string_)
def __repr__(self):
return self._query
def covers(self, data):
row = data[self.attribute_name].to_numpy()
if pd.isnull(self.attribute_value):
return | pd.isnull(row) | pandas.isnull |
#!/usr/bin/env python
# coding: utf-8
# In[2]:
# import csv
from operator import itemgetter
import numpy as np
import pandas as pd
import os
os.environ['KMP_DUPLICATE_LIB_OK']='True'
# from datetime import datetime
file5 = open('201811291521.txt', 'r')
dataset = file5.read()
print('data read')
dataset = dataset.split('\n')
data = []
for i in range(0, len(dataset)):
data.append(dataset[i].split('|'))
data = sorted(data, key=itemgetter(0))
data.pop(0)
print('data sorted')
headings = ['stime', 'etime', 'sip', 'sport', 'sipint', 'mac', 'osname', 'osversion', 'fingerprint', 'dip', 'dport', 'dipint', 'dstmac', 'rosname', 'rosversion', 'rfingerprint', 'protocol', 'pkts', 'bytes', 'rpkts', 'rbytes', 'dur', 'iflags', 'riflags', 'uflags', 'ruflags', 'entropy', 'rentropy', 'tos', 'rtos', 'application', 'vlanint', 'domain', 'endreason', 'hash']
print('data : '+str(len(data)))
# for item in data:
# item[0] = datetime.strptime(item[0], '%Y-%m-%d %H:%M:%S.%f')
# item[1] = datetime.strptime(item[1], '%Y-%m-%d %H:%M:%S.%f')
# print(item[0].strftime('%m/%d/%Y'))
data = np.array(data)
df = pd.DataFrame(data)
df.columns = headings
print('dataset created')
print(df.head())
# print(df['protocol'].nunique())
# In[3]:
edited_df = df.drop(['stime','etime','sipint','mac','osname','osversion','fingerprint','dipint','dstmac','rosname','rosversion','rfingerprint','iflags','riflags','uflags','ruflags','entropy','rentropy','tos','rtos','application','vlanint','domain','hash','pkts','bytes','rpkts','rbytes','dur','endreason'],axis=1)
print(edited_df.head())
# Get one hot encoding of columns B
one_hot = pd.get_dummies(edited_df['protocol'])
# Drop column B as it is now encoded
edited_df = edited_df.drop('protocol',axis = 1)
headers = []
for i in one_hot.columns:
headers.append('protocol_' + i)
# Join the encoded df
one_hot.columns = headers
# edited_df = edited_df.join(one_hot)
def correct_ip(s):
o = ''
if '.' in s:
for part in s.split('.'):
part = part.zfill(3)
o += part
else:
o = o.zfill(12)
o = o[:3] + '.' + o[3:]
o = o[:7] + '.' + o[7:]
o = o[:11] + '.' + o[11:]
return o
def correct_port(s):
return(s.zfill(5))
sip_headers = []
dip_headers = []
for i in range(4):
sip_headers.append('sip_'+str(i))
dip_headers.append('dip_'+str(i))
sip = []
for ip in edited_df['sip']:
sip.append(map(int,correct_ip(ip).split('.')))
dip = []
for ip in edited_df['dip']:
dip.append(map(int,correct_ip(ip).split('.')))
sport = []
for port in edited_df['sport']:
sport.append(int(port))
dport = []
for port in edited_df['dport']:
dport.append(int(port))
# print(len(sip[0]))
# print(len(dip[0]))
# print(len(dport[0]))
# print(len(sport[0]))
8
sip_df = pd.DataFrame(sip,columns=sip_headers)
dip_df = pd.DataFrame(dip,columns=dip_headers)
sport_df = | pd.DataFrame(sport,columns=['sport']) | pandas.DataFrame |
import pandas as pd
import numpy as np
#Descripción de variables de tabla asociadas a archivo 'data/tmdb_5000_movies.csv'
def descripcion_movies(tabla) -> pd.DataFrame:
"""
Breve descripción de la tabla en estudio:
qew: diccionario con definiciones acerca de variables de estudio
:return: dataframe resumen por columna con información acerca de
descripción de variables.
"""
qwe=pd.DataFrame(
{'Variable':[
'budget',
'genres',
'homepage',
'id',
'keywords',
'original_language',
'original_title',
'overview',
'popularity',
'production_companies',
'production_countries',
'release_date',
'revenue',
'runtime',
'spoken_languages',
'status',
'tagline',
'title',
'vote_average',
'vote_count'],
'Descripción':[
'El presupuesto en el que se realizó la película.',
'El género de la película, acción, comedia, suspenso, etc.',
'Un enlace a la página de inicio de la película.',
'Es el código de identificación de la película.',
'Las palabras clave o etiquetas relacionadas con la película.',
'El idioma en el que se hizo la película.',
'El título de la película antes de la traducción o adaptación.',
'Una breve descripción de la película.',
'Una cantidad numérica que especifica la popularidad de la película.',
'La casa productora de la película.',
'El país en el que se produjo.',
'La fecha en la que se publicó.',
'Los ingresos mundiales generados por la película.',
'El tiempo de ejecución de la película en minutos.',
'Otros idiomas en que se escucha la pelicula',
'"Liberado" o "Rumoreado".',
'Eslogan de la película.',
'título de la película.',
'Calificaciones promedio que recibió la película.',
'El recuento de votos recibidos.']})
qwe2=pd.DataFrame({
'Variable':[
'id',
'cast',
'crew'],
'Descripción':[
'Idenfificador único de la película',
'Conjunto de actores',
'El nombre del director, editor, compositor, escritor, etc.']
})
if tabla==1:
return qwe2
elif tabla==2:
return qwe
else:
""
def tabla_descripcion(df,descrip) -> pd.DataFrame:
"""
Resumir la información disponible en tabla
parametros:
df: dataframe con una serie de columnas
descrip:información acerca del significado de cada columna
:return: dataframe resumen por columna con información acerca de
descripción y tipo de dato por columna.
"""
tabla_resumen1= | pd.DataFrame({'Variable':df.columns,'Tipo de Dato':df.dtypes}) | pandas.DataFrame |
#!/usr/bin/env python
# coding: utf-8
# # Apply and Visualize UMAP
#
# **<NAME>, 2019**
#
# We are interested in visualizing the relationship among samples according to several variables.
# These variables include `batch`, `dosage`, and `cell line`.
# In[1]:
import os
import numpy as np
import pandas as pd
import umap
import plotnine as gg
from pycytominer import feature_select
from pycytominer.cyto_utils import infer_cp_features
# In[2]:
get_ipython().run_line_magic('matplotlib', 'inline')
# In[3]:
np.random.seed(123)
# In[4]:
def process_umap(data_df):
# Prepare UMAP input by removing metadata columns
metadata_cols = infer_cp_features(data_df, metadata=True)
metadata_df = data_df.loc[:, metadata_cols]
umap_data_df = data_df.drop(metadata_cols, axis="columns")
# Apply UMAP
reducer = umap.UMAP(random_state=123)
embedding = reducer.fit_transform(umap_data_df)
# Setup plotting logic
embedding_df = | pd.DataFrame(embedding, columns=['x', 'y']) | pandas.DataFrame |
"""
HEADER
CMS_data_analysis.py
File created to load the cms data and perform a few basic analyses on it.
Created on Thur Apr 2 16:07:40 2020
@author: <NAME>
"""
import pickle
import pandas as pd
import matplotlib.pyplot as plt
import numpy as np
# Create a list of all the files we will work with
input_path = "./input/"
file_paths = ["cms_Animals.cpickle", "cms_Herds.cpickle", "cms_Milkings.cpickle"]
datasets = []
# Load each dataset in the folder and save to a pandas dataframe (df)
for i, file in enumerate(file_paths):
print(file)
datasets.append(pickle.load(open(input_path + file, "rb")))
# Print an overview of the dataset in the terminal
print("Loaded Dataset: " + file)
print(datasets[i].head())
# First try looking at the milking data values for a single cow
cms_Milkings = datasets[2]
# Find the region in the df corresponding to the first cow & make sure that
# the data is properly sorted by date
animal1 = cms_Milkings.copy()[cms_Milkings["cca_id"] == 6688]
animal1["milkng_date"] = | pd.to_datetime(animal1["milkng_date"]) | pandas.to_datetime |
# -*- coding: utf-8 -*-
import numpy as np
import pytest
from numpy.random import RandomState
from numpy import nan
from datetime import datetime
from itertools import permutations
from pandas import (Series, Categorical, CategoricalIndex,
Timestamp, DatetimeIndex, Index, IntervalIndex)
import pandas as pd
from pandas import compat
from pandas._libs import (groupby as libgroupby, algos as libalgos,
hashtable as ht)
from pandas._libs.hashtable import unique_label_indices
from pandas.compat import lrange, range
import pandas.core.algorithms as algos
import pandas.core.common as com
import pandas.util.testing as tm
import pandas.util._test_decorators as td
from pandas.core.dtypes.dtypes import CategoricalDtype as CDT
from pandas.compat.numpy import np_array_datetime64_compat
from pandas.util.testing import assert_almost_equal
class TestMatch(object):
def test_ints(self):
values = np.array([0, 2, 1])
to_match = np.array([0, 1, 2, 2, 0, 1, 3, 0])
result = algos.match(to_match, values)
expected = np.array([0, 2, 1, 1, 0, 2, -1, 0], dtype=np.int64)
tm.assert_numpy_array_equal(result, expected)
result = Series(algos.match(to_match, values, np.nan))
expected = Series(np.array([0, 2, 1, 1, 0, 2, np.nan, 0]))
tm.assert_series_equal(result, expected)
s = Series(np.arange(5), dtype=np.float32)
result = algos.match(s, [2, 4])
expected = np.array([-1, -1, 0, -1, 1], dtype=np.int64)
tm.assert_numpy_array_equal(result, expected)
result = Series(algos.match(s, [2, 4], np.nan))
expected = Series(np.array([np.nan, np.nan, 0, np.nan, 1]))
tm.assert_series_equal(result, expected)
def test_strings(self):
values = ['foo', 'bar', 'baz']
to_match = ['bar', 'foo', 'qux', 'foo', 'bar', 'baz', 'qux']
result = algos.match(to_match, values)
expected = np.array([1, 0, -1, 0, 1, 2, -1], dtype=np.int64)
tm.assert_numpy_array_equal(result, expected)
result = Series(algos.match(to_match, values, np.nan))
expected = Series(np.array([1, 0, np.nan, 0, 1, 2, np.nan]))
tm.assert_series_equal(result, expected)
class TestFactorize(object):
def test_basic(self):
labels, uniques = algos.factorize(['a', 'b', 'b', 'a', 'a', 'c', 'c',
'c'])
tm.assert_numpy_array_equal(
uniques, np.array(['a', 'b', 'c'], dtype=object))
labels, uniques = algos.factorize(['a', 'b', 'b', 'a',
'a', 'c', 'c', 'c'], sort=True)
exp = np.array([0, 1, 1, 0, 0, 2, 2, 2], dtype=np.intp)
tm.assert_numpy_array_equal(labels, exp)
exp = np.array(['a', 'b', 'c'], dtype=object)
tm.assert_numpy_array_equal(uniques, exp)
labels, uniques = algos.factorize(list(reversed(range(5))))
exp = np.array([0, 1, 2, 3, 4], dtype=np.intp)
tm.assert_numpy_array_equal(labels, exp)
exp = np.array([4, 3, 2, 1, 0], dtype=np.int64)
tm.assert_numpy_array_equal(uniques, exp)
labels, uniques = algos.factorize(list(reversed(range(5))), sort=True)
exp = np.array([4, 3, 2, 1, 0], dtype=np.intp)
tm.assert_numpy_array_equal(labels, exp)
exp = np.array([0, 1, 2, 3, 4], dtype=np.int64)
tm.assert_numpy_array_equal(uniques, exp)
labels, uniques = algos.factorize(list(reversed(np.arange(5.))))
exp = np.array([0, 1, 2, 3, 4], dtype=np.intp)
tm.assert_numpy_array_equal(labels, exp)
exp = np.array([4., 3., 2., 1., 0.], dtype=np.float64)
tm.assert_numpy_array_equal(uniques, exp)
labels, uniques = algos.factorize(list(reversed(np.arange(5.))),
sort=True)
exp = np.array([4, 3, 2, 1, 0], dtype=np.intp)
tm.assert_numpy_array_equal(labels, exp)
exp = np.array([0., 1., 2., 3., 4.], dtype=np.float64)
tm.assert_numpy_array_equal(uniques, exp)
def test_mixed(self):
# doc example reshaping.rst
x = Series(['A', 'A', np.nan, 'B', 3.14, np.inf])
labels, uniques = algos.factorize(x)
exp = np.array([0, 0, -1, 1, 2, 3], dtype=np.intp)
tm.assert_numpy_array_equal(labels, exp)
exp = Index(['A', 'B', 3.14, np.inf])
tm.assert_index_equal(uniques, exp)
labels, uniques = algos.factorize(x, sort=True)
exp = np.array([2, 2, -1, 3, 0, 1], dtype=np.intp)
tm.assert_numpy_array_equal(labels, exp)
exp = Index([3.14, np.inf, 'A', 'B'])
tm.assert_index_equal(uniques, exp)
def test_datelike(self):
# M8
v1 = Timestamp('20130101 09:00:00.00004')
v2 = Timestamp('20130101')
x = Series([v1, v1, v1, v2, v2, v1])
labels, uniques = algos.factorize(x)
exp = np.array([0, 0, 0, 1, 1, 0], dtype=np.intp)
tm.assert_numpy_array_equal(labels, exp)
exp = DatetimeIndex([v1, v2])
tm.assert_index_equal(uniques, exp)
labels, uniques = algos.factorize(x, sort=True)
exp = np.array([1, 1, 1, 0, 0, 1], dtype=np.intp)
tm.assert_numpy_array_equal(labels, exp)
exp = DatetimeIndex([v2, v1])
tm.assert_index_equal(uniques, exp)
# period
v1 = pd.Period('201302', freq='M')
v2 = pd.Period('201303', freq='M')
x = Series([v1, v1, v1, v2, v2, v1])
# periods are not 'sorted' as they are converted back into an index
labels, uniques = algos.factorize(x)
exp = np.array([0, 0, 0, 1, 1, 0], dtype=np.intp)
tm.assert_numpy_array_equal(labels, exp)
tm.assert_index_equal(uniques, pd.PeriodIndex([v1, v2]))
labels, uniques = algos.factorize(x, sort=True)
exp = np.array([0, 0, 0, 1, 1, 0], dtype=np.intp)
tm.assert_numpy_array_equal(labels, exp)
tm.assert_index_equal(uniques, pd.PeriodIndex([v1, v2]))
# GH 5986
v1 = pd.to_timedelta('1 day 1 min')
v2 = pd.to_timedelta('1 day')
x = Series([v1, v2, v1, v1, v2, v2, v1])
labels, uniques = algos.factorize(x)
exp = np.array([0, 1, 0, 0, 1, 1, 0], dtype=np.intp)
tm.assert_numpy_array_equal(labels, exp)
tm.assert_index_equal(uniques, pd.to_timedelta([v1, v2]))
labels, uniques = algos.factorize(x, sort=True)
exp = np.array([1, 0, 1, 1, 0, 0, 1], dtype=np.intp)
tm.assert_numpy_array_equal(labels, exp)
tm.assert_index_equal(uniques, pd.to_timedelta([v2, v1]))
def test_factorize_nan(self):
# nan should map to na_sentinel, not reverse_indexer[na_sentinel]
# rizer.factorize should not raise an exception if na_sentinel indexes
# outside of reverse_indexer
key = np.array([1, 2, 1, np.nan], dtype='O')
rizer = ht.Factorizer(len(key))
for na_sentinel in (-1, 20):
ids = rizer.factorize(key, sort=True, na_sentinel=na_sentinel)
expected = np.array([0, 1, 0, na_sentinel], dtype='int32')
assert len(set(key)) == len(set(expected))
tm.assert_numpy_array_equal(pd.isna(key),
expected == na_sentinel)
# nan still maps to na_sentinel when sort=False
key = np.array([0, np.nan, 1], dtype='O')
na_sentinel = -1
# TODO(wesm): unused?
ids = rizer.factorize(key, sort=False, na_sentinel=na_sentinel) # noqa
expected = np.array([2, -1, 0], dtype='int32')
assert len(set(key)) == len(set(expected))
tm.assert_numpy_array_equal(pd.isna(key), expected == na_sentinel)
@pytest.mark.parametrize("data,expected_label,expected_level", [
(
[(1, 1), (1, 2), (0, 0), (1, 2), 'nonsense'],
[0, 1, 2, 1, 3],
[(1, 1), (1, 2), (0, 0), 'nonsense']
),
(
[(1, 1), (1, 2), (0, 0), (1, 2), (1, 2, 3)],
[0, 1, 2, 1, 3],
[(1, 1), (1, 2), (0, 0), (1, 2, 3)]
),
(
[(1, 1), (1, 2), (0, 0), (1, 2)],
[0, 1, 2, 1],
[(1, 1), (1, 2), (0, 0)]
)
])
def test_factorize_tuple_list(self, data, expected_label, expected_level):
# GH9454
result = pd.factorize(data)
tm.assert_numpy_array_equal(result[0],
np.array(expected_label, dtype=np.intp))
expected_level_array = com._asarray_tuplesafe(expected_level,
dtype=object)
tm.assert_numpy_array_equal(result[1], expected_level_array)
def test_complex_sorting(self):
# gh 12666 - check no segfault
# Test not valid numpy versions older than 1.11
if pd._np_version_under1p11:
pytest.skip("Test valid only for numpy 1.11+")
x17 = np.array([complex(i) for i in range(17)], dtype=object)
pytest.raises(TypeError, algos.factorize, x17[::-1], sort=True)
def test_uint64_factorize(self):
data = np.array([2**63, 1, 2**63], dtype=np.uint64)
exp_labels = np.array([0, 1, 0], dtype=np.intp)
exp_uniques = np.array([2**63, 1], dtype=np.uint64)
labels, uniques = algos.factorize(data)
tm.assert_numpy_array_equal(labels, exp_labels)
tm.assert_numpy_array_equal(uniques, exp_uniques)
data = np.array([2**63, -1, 2**63], dtype=object)
exp_labels = np.array([0, 1, 0], dtype=np.intp)
exp_uniques = np.array([2**63, -1], dtype=object)
labels, uniques = algos.factorize(data)
tm.assert_numpy_array_equal(labels, exp_labels)
tm.assert_numpy_array_equal(uniques, exp_uniques)
def test_deprecate_order(self):
# gh 19727 - check warning is raised for deprecated keyword, order.
# Test not valid once order keyword is removed.
data = np.array([2**63, 1, 2**63], dtype=np.uint64)
with tm.assert_produces_warning(expected_warning=FutureWarning):
algos.factorize(data, order=True)
with tm.assert_produces_warning(False):
algos.factorize(data)
@pytest.mark.parametrize('data', [
np.array([0, 1, 0], dtype='u8'),
np.array([-2**63, 1, -2**63], dtype='i8'),
np.array(['__nan__', 'foo', '__nan__'], dtype='object'),
])
def test_parametrized_factorize_na_value_default(self, data):
# arrays that include the NA default for that type, but isn't used.
l, u = algos.factorize(data)
expected_uniques = data[[0, 1]]
expected_labels = np.array([0, 1, 0], dtype=np.intp)
tm.assert_numpy_array_equal(l, expected_labels)
tm.assert_numpy_array_equal(u, expected_uniques)
@pytest.mark.parametrize('data, na_value', [
(np.array([0, 1, 0, 2], dtype='u8'), 0),
(np.array([1, 0, 1, 2], dtype='u8'), 1),
(np.array([-2**63, 1, -2**63, 0], dtype='i8'), -2**63),
(np.array([1, -2**63, 1, 0], dtype='i8'), 1),
(np.array(['a', '', 'a', 'b'], dtype=object), 'a'),
(np.array([(), ('a', 1), (), ('a', 2)], dtype=object), ()),
(np.array([('a', 1), (), ('a', 1), ('a', 2)], dtype=object),
('a', 1)),
])
def test_parametrized_factorize_na_value(self, data, na_value):
l, u = algos._factorize_array(data, na_value=na_value)
expected_uniques = data[[1, 3]]
expected_labels = np.array([-1, 0, -1, 1], dtype=np.intp)
tm.assert_numpy_array_equal(l, expected_labels)
tm.assert_numpy_array_equal(u, expected_uniques)
class TestUnique(object):
def test_ints(self):
arr = np.random.randint(0, 100, size=50)
result = algos.unique(arr)
assert isinstance(result, np.ndarray)
def test_objects(self):
arr = np.random.randint(0, 100, size=50).astype('O')
result = algos.unique(arr)
assert isinstance(result, np.ndarray)
def test_object_refcount_bug(self):
lst = ['A', 'B', 'C', 'D', 'E']
for i in range(1000):
len(algos.unique(lst))
def test_on_index_object(self):
mindex = pd.MultiIndex.from_arrays([np.arange(5).repeat(5), np.tile(
np.arange(5), 5)])
expected = mindex.values
expected.sort()
mindex = mindex.repeat(2)
result = pd.unique(mindex)
result.sort()
tm.assert_almost_equal(result, expected)
def test_datetime64_dtype_array_returned(self):
# GH 9431
expected = np_array_datetime64_compat(
['2015-01-03T00:00:00.000000000+0000',
'2015-01-01T00:00:00.000000000+0000'],
dtype='M8[ns]')
dt_index = pd.to_datetime(['2015-01-03T00:00:00.000000000+0000',
'2015-01-01T00:00:00.000000000+0000',
'2015-01-01T00:00:00.000000000+0000'])
result = algos.unique(dt_index)
tm.assert_numpy_array_equal(result, expected)
assert result.dtype == expected.dtype
s = Series(dt_index)
result = algos.unique(s)
tm.assert_numpy_array_equal(result, expected)
assert result.dtype == expected.dtype
arr = s.values
result = | algos.unique(arr) | pandas.core.algorithms.unique |
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
import re
import datetime
from datetime import date, timedelta
from dateutil.parser import parse
from nltk.tokenize import RegexpTokenizer
class MetadataCleaner():
'''
'''
@staticmethod
def clean(meta_df):
'''
'''
m_df = meta_df.copy()
# Find videos with more than 50 ratings
meta_df = meta_df[meta_df['ratingCount'] >= 50].copy()
## Translate duration into seconds
meta_df['duration'] = meta_df['duration'].apply(MetadataCleaner._to_seconds)
# Translate date into a numeric format, and add these as features
date_df = meta_df['date'].apply(MetadataCleaner.get_date_features)
meta_df = pd.concat([meta_df, date_df], axis=1)
# Drop duplicate columns
meta_df = meta_df.drop_duplicates('id')
# Create target variables
# Creating three classes: good, average, bad
# Let's use the <20, 20-80, >80 as the bins
cutoff_lo, cutoff_hi = MetadataCleaner.get_percentiles(meta_df['like_pct'], 20, 80)
meta_df['target_class'] = meta_df['like_pct'].apply(lambda x: MetadataCleaner.target_bin(x, cutoff_lo, cutoff_hi))
# Get dummy variables for categories
meta_df = | pd.get_dummies(meta_df, prefix='sport', columns=['sport']) | pandas.get_dummies |
import errno
import logging
import os
import pandas as pd
from xlrd.biffh import XLRDError
import uuid
import shutil
import math
import json
from installed_clients.DataFileUtilClient import DataFileUtil
from installed_clients.KBaseReportClient import KBaseReport
from installed_clients.kb_GenericsReportClient import kb_GenericsReport
from installed_clients.GenericsAPIClient import GenericsAPI
from installed_clients.WsLargeDataIOClient import WsLargeDataIO
DATA_EPISTEMOLOGY = ['measured', 'asserted', 'predicted']
PROFILE_CATEGORY = ['community', 'organism']
PROFILE_TYPE = ['amplicon', 'mg', 'modelset']
class ProfileImporter:
@staticmethod
def _mkdir_p(path):
"""
_mkdir_p: make directory for given path
"""
if not path:
return
try:
os.makedirs(path)
except OSError as exc:
if exc.errno == errno.EEXIST and os.path.isdir(path):
pass
else:
raise
@staticmethod
def _validate_params(params, expected, opt_param=set()):
"""Validates that required parameters are present. Warns if unexpected parameters appear"""
expected = set(expected)
opt_param = set(opt_param)
pkeys = set(params)
if expected - pkeys:
raise ValueError("Required keys {} not in supplied parameters"
.format(", ".join(expected - pkeys)))
defined_param = expected | opt_param
for param in params:
if param not in defined_param:
logging.warning("Unexpected parameter {} supplied".format(param))
@staticmethod
def _convert_size(size_bytes):
if size_bytes == 0:
return "0B"
size_name = ("B", "KB", "MB", "GB", "TB", "PB", "EB", "ZB", "YB")
i = int(math.floor(math.log(size_bytes, 1024)))
p = math.pow(1024, i)
s = round(size_bytes / p, 2)
return "%s %s" % (s, size_name[i])
def _calculate_object_size(self, func_profile_data):
json_size = 0
try:
logging.info('start calculating object size')
json_object = json.dumps(func_profile_data).encode("utf-8")
json_size = len(json_object)
size_str = self._convert_size(json_size)
logging.info('serialized object JSON size: {}'.format(size_str))
except Exception:
logging.info('failed to calculate object size')
return json_size
@staticmethod
def _file_to_df(file_path):
logging.info('start parsing file content to data frame')
try:
df = | pd.read_excel(file_path, sheet_name='data', index_col=0) | pandas.read_excel |
import streamlit as st
import pandas as pd
import altair as alt
import pickle
import numpy as np
from map import create_map
from airdata import AirData
from utils import parse_time, parse_time_hms
from vega_datasets import data
#st.set_page_config(layout="wide")
# Getting data ready, Refresh every hour (same data when user refreshes within an hour)
@st.cache(ttl=60 * 60, suppress_st_warning=True)
def get_AD_data():
ad = AirData()
flight_df = ad.get_flights_df()
flight_df = ad.add_time_to_df(flight_df)
return ad, flight_df
# Cache to prevent computation on every rerun
@st.cache
def save_AD_data(df):
return df.to_csv().encode('utf-8')
ad, flight_df = get_AD_data()
# Definitions for flight delay
## Prepare data
# load in files
origin = pickle.load(open('flight-price/DestState.sav','rb'))
dest = pickle.load(open('flight-price/DestState.sav','rb'))
air = pickle.load(open('flight-price/AirlineCompany.sav','rb'))
miles_dic = pickle.load(open('flight-price/miles_dic.sav','rb'))
quarter_dic= {'Spring':'Q1','Summer':'Q2','Fall':'Q3','Winter':'Q4'}
df_viz = pd.read_csv('flight-price/df_viz.csv').iloc[:,:]
# fit the prediction model, get prediction and prediction interval
def get_pi(X):
all_models = pickle.load(open('flight-price/all_models.sav', 'rb'))
lb = all_models[0].predict(X)
pred = all_models[2].predict(X)
ub = all_models[1].predict(X)
return (round(np.exp(lb[0]),2), round(np.exp(pred[0]),2), round(np.exp(ub[0]),2))
# load data for non ML visual
def load_data_viz():
return pd.read_csv('flight-price/train_viz.csv').iloc[:,:]
# visual for price comparison
@st.cache
def get_slice_ogstate(df, ogstate=None):
labels = pd.Series([1] * len(df), index=df.index)
labels &= df['OriginState'] == ogstate
return labels
def get_slice_destate(df, destate=None):
labels = pd.Series([1] * len(df), index=df.index)
labels &= df['DestState'] == destate
return labels
def get_slice_membership(df, ogstate=None, destate=None, quarter=None,airline=None):
labels = pd.Series([1] * len(df), index=df.index)
if ogstate:
labels &= df['OriginState'] == ogstate
if destate is not None:
labels &= df['DestState'] == destate
if quarter:
labels &= df['Quarter'].isin(quarter)
if airline:
labels &= df['AirlineCompany'].isin(airline)
return labels
#-------------------- Price Heat Map-------------------------------------------
def load_data(url):
file = url
df = pd.read_csv(file)
return df
def get_season(df, quarter):
sub = df[df['Quarter']== quarter]
return sub
menu_selection = st.sidebar.radio("Menu", ["Introduction","Flight Map", "Flight Delay Analysis",
"Flight Price Analysis"])
if menu_selection == 'Introduction':
#col1, col2, col3,col4 = st.columns([0.5,1,2,1])
#col2.image("image/flight-logo.jpg", width=150)
#col3.markdown("<h1 style='text-align: left; color: #072F5F;'>Flight Traffic Brain</h1>",
# unsafe_allow_html=True)
col1, col2, col3 = st.columns([0.5,1,4])
col2.image("image/flight-logo.jpg", width=150)
col3.markdown("<h1 style='text-align: left; color: #072F5F;'>Flight Traffic Brain</h1>",
unsafe_allow_html=True)
text = "<p style='font-size:18px'>Nowadays, air traffic control has become a complicated task as there are\
more and more flights and airlines. There has also been rising cases of flight delays possibly due to poor\
management and massive volume of traffic. While air traffic is important to manage from the perspective of\
airports and airlines, flight prices are crucial for customers who usually make decisions of their travel\
plans based on them. In this project we hope to help airports better manage airlines and control airline\
traffic and passengers make wiser decisions about airline flights.</p>"
st.write(text, unsafe_allow_html=True)
text = "<p style='font-size:18px'>A <span style='color: #1167b1'> real-time map of flights </span> with interactive information such as speed and altitude can help the specialists\
to make better decisions. Meanwhile, an <span style='color: #1167b1'> interactive network graph </span> that shows the connections between airports and\
flights can also improve the handling of dependencies among the traffic. A <span style='color: #1167b1'> data visualization section of delay time </span>\
can also enable users to analyze different flights in real time and in more detail. By filtering the flight according to their\
departure airport, the users can not only view the delay time of different flights, but also have a high-level overview of\
the delay information of flights of different airlines. This information will help airport specialists to better communicate\
with the airports and passengers, and make better decisions in terms of resource distribution. In addition, a <span style='color: #1167b1'> \
machine learning model </span> using historical data to <span style='color: #1167b1'> predict flight price </span> can help passengers\
estimate the potential fare of flight of their interest. An <span style='color: #1167b1'> interactive platform with visualizations of airline comparisons </span> can also allow\
them to compare different flight prices by modifying parameters of interest, thus helping optimize their travel plan.</p>"
st.write(text, unsafe_allow_html=True)
text = "<br><br><br>This project was created by [<NAME>](<EMAIL>), [<NAME>](<EMAIL>), \
[<NAME>](<EMAIL>) and [<NAME>](<EMAIL>) for the [Interactive Data Science](https://dig.cmu.edu/ids2022) course at\
[Carnegie Mellon University](https://www.cmu.edu)"
st.write(text, unsafe_allow_html=True)
elif menu_selection == "Flight Map":
st.title("Real-time Flight Data Visualization")
# ------------ Map starts ---------------------
with st.sidebar.expander("Analysis for flights/airports"):
st.write("This is an analysis tool from the perspective of flights or airports")
to_show = st.selectbox("Data to look at", ["flight", "airport"])
if to_show == "flight":
field = st.selectbox("Variable of interest", ["heading", "altitude", "ground_speed"])
else:
field = st.selectbox("Variable of interest", ["origin_airport_iata", "destination_airport_iata"])
st.write("This is a map of real-time flights and airports. The blue circles are \
the airport, while the red squares are the flights. You can utilize \
the tool bar on the left tab to explore the data. You can also \
move your mouse over the map to see more information.")
map_air = create_map(flight_df, field, to_show)
st.altair_chart(map_air,use_container_width=True)
st.sidebar.title("Note")
st.sidebar.write("This visualization consists of three components.\
The first component is a map that shows real-time flights and airports\
in the U.S. The second component, linked to the first component, \
is an analysis tool for the real-time flight and airport data. \
The third component displays the time information of a flight.")
st.sidebar.download_button("Download real-time data", data=save_AD_data(flight_df),
file_name='airdata.csv', mime='text/csv')
# ------------ Map ends ---------------------
# ------------ Flight time starts ---------------------
st.write("Here we display the time information of a flight.")
option = st.selectbox("Which flight number are you looking into?",
flight_df['number'].sort_values())
# Get the corresponding flight row in the dataframe
option_row = flight_df[flight_df['number'] == option]
option_id = option_row.id.values[0]
option_detail = ad.get_flight_details(option_id)
option_time = option_detail['time']
# Display scheduled and actual time for departual and arrival using metric
col1, col2 = st.columns(2)
col1.metric("Scheduled departure time",
parse_time(option_time['scheduled']['departure']))
if option_time['real']['departure'] and option_time['scheduled']['departure']:
depart_delta = option_time['real']['departure'] - option_time['scheduled']['departure']
else:
depart_delta = None
col2.metric("Actual departure time",
parse_time(option_time['real']['departure']),
parse_time_hms(depart_delta),
delta_color='inverse')
col3, col4 = st.columns(2)
col3.metric("Scheduled arrival time", parse_time(option_time['scheduled']['arrival']))
arrival_time = option_time['real']['arrival']
if not arrival_time:
arrival_time = option_time['estimated']['arrival']
col4.metric("Estimated/Actual arrival time", parse_time(arrival_time))
# Note that some flights are not displayed due to... so the number of routes
# may appear larger than...
# ------------ Flight time ends ---------------------
elif menu_selection == "Flight Delay Analysis":
# ------------ Delay Analysis starts ---------------------
st.title("Flight Delay Analysis")
st.sidebar.title("Note")
st.sidebar.write("This flight delay analysis consists of four parts: \
The first part is a data slicing tool that allows the users to filter any flight data according to the different departure airport.\
The second part lists out all the flights flying from the selected departure airport, and displays the relevant delay time information of the flights. \
The third part displays a stripplot graph to allow the users to visually compare the different departure delay time of flights of different airlines.\
The last part compares the average delay time of different airlines. ")
ad = AirData()
flight_df = ad.get_flights_df()
st.header("Slice Data")
st.write("You can filter the airline data by choosing the different departure airport.")
with st.expander("Airports"):
origin_airport_list = flight_df['origin_airport_iata'].drop_duplicates()
option1 = st.selectbox("Departure Airport:",
(origin_airport_list))
flight_df_selected1 = flight_df[(flight_df['origin_airport_iata'] == option1)]
st.header("Data Visualization")
with st.expander("Flight delay from different departure airports"):
st.write("This data indicates all the current flights coming from the departure airport and their related delay times.")
index = 0
for row in flight_df_selected1.iterrows():
flight_number = flight_df_selected1['number'].values[index]
option_id = flight_df_selected1['id'].values[index]
option_detail = ad.get_flight_details(option_id)
option_time = option_detail['time']
if option_time['real']['departure'] is None:
continue
elif option_time['real']['arrival'] is None:
depart_delta = option_time['real']['departure'] - option_time['scheduled']['departure']
arrive_delta = None
col1, col2, col3 = st.columns(3)
col1.metric("Flight number",
flight_number)
col2.metric("Departure delay",
parse_time_hms(depart_delta))
col3.metric("Arrival delay",
arrive_delta)
else:
depart_delta = option_time['real']['departure'] - option_time['scheduled']['departure']
arrive_delta = option_time['real']['arrival'] - option_time['scheduled']['arrival']
col1, col2, col3 = st.columns(3)
col1.metric("Flight number",
flight_number)
col2.metric("Departure delay",
parse_time_hms(depart_delta))
col3.metric("Arrival delay",
parse_time_hms(arrive_delta))
index = index + 1
with st.expander("Flight delay of different airlines"):
st.write("This data compares the punctuality and departure delay times between different airlines.")
depart_delay = []
index = 0
for row in flight_df_selected1.iterrows():
option_id = flight_df_selected1['id'].values[index]
option_detail = ad.get_flight_details(option_id)
option_time = option_detail['time']
if option_time['real']['departure'] is None:
continue
else:
depart_delta = option_time['real']['departure'] - option_time['scheduled']['departure']
depart_delta = parse_time_hms(depart_delta)
depart_delay.append(depart_delta)
index = index + 1
flight_df_selected1['depart_delay'] = depart_delay
stripplot = alt.Chart(flight_df_selected1, width=640).mark_circle(size=30).encode(
x=alt.X(
'depart_delay',
title='Departure delay',
scale=alt.Scale()),
y=alt.Y(
'airline_iata',
title='Airline iata'),
color=alt.Color('airline_iata', legend=alt.Legend(orient="right")),
tooltip=['number', 'airline_iata', 'depart_delay']
).transform_calculate(
jitter='sqrt(-2*log(random()))*cos(2*PI*random())'
).configure_facet(
spacing=0
).configure_view(
stroke=None
)
stripplot
with st.expander("Compare average departure delay of different airlines"):
depart_delay = []
index = 0
for row in flight_df_selected1.iterrows():
option_id = flight_df_selected1['id'].values[index]
option_detail = ad.get_flight_details(option_id)
option_time = option_detail['time']
if option_time['real']['departure'] is None:
continue
else:
depart_delta = option_time['real']['departure'] - option_time['scheduled']['departure']
# depart_delta = parse_time_hms(depart_delta)
depart_delay.append(depart_delta)
index = index + 1
flight_df_selected1['depart_delay'] = depart_delay
average_delay = []
airline_average_delay_parsed = []
index = 0
for row in flight_df_selected1.iterrows():
ite_airline = flight_df_selected1['airline_iata'].values[index]
airline_data = flight_df_selected1[flight_df_selected1['airline_iata'] == ite_airline]
airline_average_delay = airline_data['depart_delay'].mean()
average_delay_parsed = parse_time_hms(airline_average_delay)
average_delay_parsed = str(average_delay_parsed).rstrip(':0')
airline_average_delay = round(airline_average_delay, 2)
# airline_average_delay = parse_time_hms(airline_average_delay)
average_delay.append(airline_average_delay)
airline_average_delay_parsed.append(average_delay_parsed)
index = index + 1
flight_df_selected1['airline_average_delay'] = average_delay
flight_df_selected1['average_delay_parsed'] = airline_average_delay_parsed
flight_df_selected2 = flight_df_selected1.drop_duplicates(subset=['airline_iata'], keep='first')
flight_df_selected2 = flight_df_selected2.sort_values(by=['airline_average_delay'], ascending=False)
barchart = alt.Chart(flight_df_selected2, width=640).mark_bar().encode(
x=alt.X('airline_average_delay', axis=alt.Axis(labels=False)),
y=alt.Y('airline_iata', sort=alt.EncodingSortField(field="airline_average_delay", op="count", order='ascending')),
tooltip=['airline_iata', 'average_delay_parsed']
)
text = barchart.mark_text(
align='left',
baseline='middle',
dx=3 # Nudges text to right so it doesn't appear on top of the bar
).encode(
text='average_delay_parsed'
)
(barchart + text).properties(height=900)
barchart + text
index = 0
for row in flight_df_selected2.iterrows():
ite_airline = flight_df_selected2['airline_iata'].values[index]
ite_delay = flight_df_selected2['average_delay_parsed'].values[index]
# ite_delay = parse_time_hms(ite_delay)
ite_delay = str(ite_delay).rstrip(':0')
col1, col2 = st.columns(2)
col1.metric("Airline",
ite_airline)
col2.metric("Average departure delay",
ite_delay)
index = index + 1
# ------------ Delay Analysis ends ---------------------
else:
# ------------------------ Flight price prediction starts ------------------------------
## Price Prediction
st.title("Flight Price Analysis")
# 1. ML prediction
st.header("Flight Price Prediction")
st.write("Tell us your intended flight information and get predicted flight price value and range.")
X_train=pd.read_csv('flight-price/X_train.csv')
features = list(X_train.columns)
del X_train
df_pred = pd.DataFrame(0, index=np.arange(1), columns=features)
col1, col2 = st.columns([3, 2])
with col2:
og = st.selectbox('Origin', np.array(origin),index=30)
de = st.selectbox('Destination', np.array(dest),index=4)
season = st.selectbox('Season', ['Spring','Summer','Fall','Winter'])
airline = st.selectbox('Airline Company', np.array(air))
numT = st.slider('Number of tickets', 1, 15, 1)
if og != "Virgin Islands":
df_pred[f'o{og}'] = 1
else:
df_pred['oU.S. Virgin Islands']=1
if de != "Virgin Islands":
df_pred[f'd{de}'] = 1
else:
df_pred['dU.S. Virgin Islands']=1
if season!='Spring':
df_pred[quarter_dic[season]] = 1
if airline[-3:-1]!='AA':
df_pred[airline[-3:-1]] = 1
df_pred['NumTicketsOrdered'] = numT
if og!=de:
try:
miles = miles_dic[(og,de)]
except:
miles = miles_dic[(de,og)]
df_pred['log_miles']=np.log(miles)
else:
st.markdown(" ")
if og!=de:
low, mean, high = get_pi(pd.DataFrame(df_pred))
with col1:
st.subheader("Predicted Price per Ticket")
st.metric("Low", f'${low}',"+$",delta_color="inverse")
st.metric("Mean", f'${mean}')
st.metric("High", f'${high}',"-$",delta_color="inverse")
df_interval = | pd.DataFrame([[low,mean,high]],columns=['Low','Mean','High']) | pandas.DataFrame |
import os, random, time
import numpy as np
import pandas as pd
import seaborn as sns
import tensorflow as tf
import matplotlib.pyplot as plt
from sklearn.metrics import confusion_matrix, roc_auc_score, classification_report
# Auxiliary functions
def color_map(val):
if type(val) == float:
if val <= 0.2:
color = 'red'
elif val <= 0.3:
color = 'orange'
elif val >= 0.8:
color = 'green'
else:
color = 'black'
else:
color = 'black'
return 'color: %s' % color
def seed_everything(seed=0):
random.seed(seed)
np.random.seed(seed)
tf.random.set_seed(seed)
os.environ['PYTHONHASHSEED'] = str(seed)
os.environ['TF_DETERMINISTIC_OPS'] = '1'
def set_up_strategy():
try:
tpu = tf.distribute.cluster_resolver.TPUClusterResolver()
print('Running on TPU ', tpu.master())
except ValueError:
tpu = None
if tpu:
tf.config.experimental_connect_to_cluster(tpu)
tf.tpu.experimental.initialize_tpu_system(tpu)
strategy = tf.distribute.experimental.TPUStrategy(tpu)
else:
strategy = tf.distribute.get_strategy()
return strategy, tpu
# Model evaluation
def evaluate_model(k_fold, n_folds=1, label_col='toxic'):
metrics_df = pd.DataFrame([], columns=['Metric', 'Train', 'Valid', 'Var'])
metrics_df['Metric'] = ['ROC AUC', 'Accuracy', 'Precision', 'Recall', 'F1-score', 'Support']
for n_fold in range(n_folds):
rows = []
train_set = k_fold[k_fold['fold_%d' % (n_fold+1)] == 'train']
validation_set = k_fold[k_fold['fold_%d' % (n_fold+1)] == 'validation']
train_report = classification_report(train_set[label_col], train_set['pred_%d' % (n_fold+1)], output_dict=True)
valid_report = classification_report(validation_set[label_col], validation_set['pred_%d' % (n_fold+1)], output_dict=True)
rows.append([roc_auc_score(train_set[label_col], train_set['pred_%d' % (n_fold+1)]),
roc_auc_score(validation_set[label_col], validation_set['pred_%d' % (n_fold+1)])])
rows.append([train_report['accuracy'], valid_report['accuracy']])
rows.append([train_report['1']['precision'], valid_report['1']['precision']])
rows.append([train_report['1']['recall'], valid_report['1']['recall']])
rows.append([train_report['1']['f1-score'], valid_report['1']['f1-score']])
rows.append([train_report['1']['support'], valid_report['1']['support']])
metrics_df = pd.concat([metrics_df, pd.DataFrame(rows, columns=['Train_fold_%d' % (n_fold+1),
'Valid_fold_%d' % (n_fold+1)])], axis=1)
metrics_df['Train'] = metrics_df[[c for c in metrics_df.columns if c.startswith('Train_fold')]].mean(axis=1)
metrics_df['Valid'] = metrics_df[[c for c in metrics_df.columns if c.startswith('Valid_fold')]].mean(axis=1)
metrics_df['Var'] = metrics_df['Train'] - metrics_df['Valid']
return metrics_df.set_index('Metric')
def evaluate_model_single_fold(k_fold, n_fold=1, label_col='toxic'):
metrics_df = | pd.DataFrame([], columns=['Metric', 'Train', 'Valid', 'Var']) | pandas.DataFrame |
# -*- coding: utf-8 -*-
"""
Created on Mon Mar 22 21:30:32 2020
@author: <NAME>
"""
import numpy as np
import statsmodels.api as sm
import matplotlib.pyplot as plt
from matplotlib.offsetbox import AnchoredText
import warnings
import itertools
import pandas as pd
plt.style.use('fivethirtyeight')
class ForecastPower:
"""
Univariate time-series forecasting of WT output power
SARIMAX(p,d,q)(P,D,Q)s
p - Auto-Regressive (AR) , P - Seasonal component of p
d - Integrated (I) , D - Seasonal component of d
q - Moving Average (MA) - Seasonal component of q
s - period
"""
def __init__(self):
# Import data from pickle
try:
self.data = | pd.read_pickle('data_by_mean.pkl') | pandas.read_pickle |
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Sun May 3 17:09:00 2020
@author: krishna
"""
#----------Here I had taken only 9 features obtained from my dataset--------------------
import time
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
data=pd.read_csv('dataset_final1')
data.drop('Unnamed: 0',axis=1,inplace=True) #only done for this dataset since it contains one extra unnamed column
column_names=list(data.columns)
data['URL_Type_obf_Type'].value_counts()
#creating a category of malicious and non-malicious
# data['category']='malicious'
# data['category'][7930:15711]='non-malicious'
# data['category'].value_counts()
#shuffling the dataframe
shuffled_dataset=data.sample(frac=1).reset_index(drop=True)
#dropping the categorical value
# categorical_data=shuffled_dataset[['URL_Type_obf_Type','category']]
# data1=shuffled_dataset.drop(['URL_Type_obf_Type','category'],axis=1)
#checking for na and inf values
shuffled_dataset.replace([np.inf,-np.inf],np.nan,inplace=True) #handling the infinite value
shuffled_dataset.fillna(shuffled_dataset.mean(),inplace=True) #handling the na value
#checking if any value in data1 now contains infinite and null value or not
null_result=shuffled_dataset.isnull().any(axis=0)
inf_result=shuffled_dataset is np.inf
#scaling the dataset with standard scaler
shuffled_x=shuffled_dataset.drop(['URL_Type_obf_Type'],axis=1)
shuffled_y=shuffled_dataset[['URL_Type_obf_Type']]
from sklearn.preprocessing import StandardScaler
sc_x=StandardScaler()
shuffled_dataset_scaled=sc_x.fit_transform(shuffled_x)
shuffled_dataset_scaled=pd.DataFrame(shuffled_dataset_scaled)
shuffled_dataset_scaled.columns=shuffled_x.columns
dataset_final=pd.concat([shuffled_dataset_scaled,shuffled_y],axis=1)
#dataset_final.drop(['ISIpAddressInDomainName'],inplace=True,axis=1) #dropping this column since it always contain zero
#Preparing the dataset with the reduced features of K-Best
# reduced_features=['SymbolCount_Domain','domain_token_count','tld','Entropy_Afterpath','NumberRate_AfterPath','ArgUrlRatio','domainUrlRatio','URLQueries_variable','SymbolCount_FileName','delimeter_Count','argPathRatio','delimeter_path','pathurlRatio','SymbolCount_Extension','SymbolCount_URL','NumberofDotsinURL','Arguments_LongestWordLength','SymbolCount_Afterpath','CharacterContinuityRate','domainlength']
# reduced_features.append('URL_Type_obf_Type')
# reduced_features.append('category')
# shuffled_dataset1=shuffled_dataset[reduced_features]
#Applying the 13 phising features from research paper
# column_names=dataset_final.columns
# phising_columns=['domain_token_count','tld','urlLen','domainlength','domainUrlRatio','NumberofDotsinURL','Query_DigitCount','LongestPathTokenLength','delimeter_Domain','delimeter_path','SymbolCount_Domain','URL_Type_obf_Type']
# dataset_final=dataset_final[phising_columns]
#splitting the dataset into train set and test set
from sklearn.model_selection import train_test_split
train_set,test_set=train_test_split(dataset_final,test_size=0.2,random_state=42)
#sorting the train_set and test set
pd.DataFrame.sort_index(train_set,axis=0,ascending=True,inplace=True)
pd.DataFrame.sort_index(test_set,axis=0,ascending=True,inplace=True)
#splitting further ito train_x,train_y,test_x,test_x ----Multiclass classification-----
train_y=train_set['URL_Type_obf_Type'] #train data for binary classification
# train_y_binary=train_set['category']
train_x=train_set.drop(['URL_Type_obf_Type'],axis=1,inplace=True)
train_x=train_set
test_y=test_set['URL_Type_obf_Type']
# test_y_binary=test_set['category'] #test data for binary classsification
test_x=test_set.drop(['URL_Type_obf_Type'],axis=1,inplace=True)
test_x=test_set
#Encoding the categorical variables
#for SVM classification
train_y_svm=train_y
test_y_svm=test_y
#for other types of classification
train_y=pd.get_dummies(train_y)
# train_y_binary=pd.get_dummies(train_y_binary)
# train_y_binary=train_y_svm['benign']
test_y= | pd.get_dummies(test_y) | pandas.get_dummies |
#
# Licensed to the Apache Software Foundation (ASF) under one or more
# contributor license agreements. See the NOTICE file distributed with
# this work for additional information regarding copyright ownership.
# The ASF licenses this file to You under the Apache License, Version 2.0
# (the "License"); you may not use this file except in compliance with
# the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import decimal
from datetime import datetime
from distutils.version import LooseVersion
import inspect
import sys
import unittest
from io import StringIO
from typing import List
import numpy as np
import pandas as pd
from pandas.tseries.offsets import DateOffset
from pyspark import StorageLevel
from pyspark.ml.linalg import SparseVector
from pyspark.sql.types import StructType
from pyspark import pandas as ps
from pyspark.pandas.config import option_context
from pyspark.pandas.exceptions import PandasNotImplementedError
from pyspark.pandas.frame import CachedDataFrame
from pyspark.pandas.missing.frame import _MissingPandasLikeDataFrame
from pyspark.pandas.typedef.typehints import (
extension_dtypes,
extension_dtypes_available,
extension_float_dtypes_available,
extension_object_dtypes_available,
)
from pyspark.testing.pandasutils import (
have_tabulate,
PandasOnSparkTestCase,
SPARK_CONF_ARROW_ENABLED,
tabulate_requirement_message,
)
from pyspark.testing.sqlutils import SQLTestUtils
from pyspark.pandas.utils import name_like_string
class DataFrameTest(PandasOnSparkTestCase, SQLTestUtils):
@property
def pdf(self):
return pd.DataFrame(
{"a": [1, 2, 3, 4, 5, 6, 7, 8, 9], "b": [4, 5, 6, 3, 2, 1, 0, 0, 0]},
index=np.random.rand(9),
)
@property
def psdf(self):
return ps.from_pandas(self.pdf)
@property
def df_pair(self):
pdf = self.pdf
psdf = ps.from_pandas(pdf)
return pdf, psdf
def test_dataframe(self):
pdf, psdf = self.df_pair
self.assert_eq(psdf["a"] + 1, pdf["a"] + 1)
self.assert_eq(psdf.columns, pd.Index(["a", "b"]))
self.assert_eq(psdf[psdf["b"] > 2], pdf[pdf["b"] > 2])
self.assert_eq(-psdf[psdf["b"] > 2], -pdf[pdf["b"] > 2])
self.assert_eq(psdf[["a", "b"]], pdf[["a", "b"]])
self.assert_eq(psdf.a, pdf.a)
self.assert_eq(psdf.b.mean(), pdf.b.mean())
self.assert_eq(psdf.b.var(), pdf.b.var())
self.assert_eq(psdf.b.std(), pdf.b.std())
pdf, psdf = self.df_pair
self.assert_eq(psdf[["a", "b"]], pdf[["a", "b"]])
self.assertEqual(psdf.a.notnull().rename("x").name, "x")
# check ps.DataFrame(ps.Series)
pser = pd.Series([1, 2, 3], name="x", index=np.random.rand(3))
psser = ps.from_pandas(pser)
self.assert_eq(pd.DataFrame(pser), ps.DataFrame(psser))
# check psdf[pd.Index]
pdf, psdf = self.df_pair
column_mask = pdf.columns.isin(["a", "b"])
index_cols = pdf.columns[column_mask]
self.assert_eq(psdf[index_cols], pdf[index_cols])
def _check_extension(self, psdf, pdf):
if LooseVersion("1.1") <= LooseVersion(pd.__version__) < LooseVersion("1.2.2"):
self.assert_eq(psdf, pdf, check_exact=False)
for dtype in psdf.dtypes:
self.assertTrue(isinstance(dtype, extension_dtypes))
else:
self.assert_eq(psdf, pdf)
@unittest.skipIf(not extension_dtypes_available, "pandas extension dtypes are not available")
def test_extension_dtypes(self):
pdf = pd.DataFrame(
{
"a": pd.Series([1, 2, None, 4], dtype="Int8"),
"b": pd.Series([1, None, None, 4], dtype="Int16"),
"c": pd.Series([1, 2, None, None], dtype="Int32"),
"d": pd.Series([None, 2, None, 4], dtype="Int64"),
}
)
psdf = ps.from_pandas(pdf)
self._check_extension(psdf, pdf)
self._check_extension(psdf + psdf, pdf + pdf)
@unittest.skipIf(not extension_dtypes_available, "pandas extension dtypes are not available")
def test_astype_extension_dtypes(self):
pdf = pd.DataFrame(
{
"a": [1, 2, None, 4],
"b": [1, None, None, 4],
"c": [1, 2, None, None],
"d": [None, 2, None, 4],
}
)
psdf = ps.from_pandas(pdf)
astype = {"a": "Int8", "b": "Int16", "c": "Int32", "d": "Int64"}
self._check_extension(psdf.astype(astype), pdf.astype(astype))
@unittest.skipIf(
not extension_object_dtypes_available, "pandas extension object dtypes are not available"
)
def test_extension_object_dtypes(self):
pdf = pd.DataFrame(
{
"a": pd.Series(["a", "b", None, "c"], dtype="string"),
"b": pd.Series([True, None, False, True], dtype="boolean"),
}
)
psdf = ps.from_pandas(pdf)
self._check_extension(psdf, pdf)
@unittest.skipIf(
not extension_object_dtypes_available, "pandas extension object dtypes are not available"
)
def test_astype_extension_object_dtypes(self):
pdf = pd.DataFrame({"a": ["a", "b", None, "c"], "b": [True, None, False, True]})
psdf = ps.from_pandas(pdf)
astype = {"a": "string", "b": "boolean"}
self._check_extension(psdf.astype(astype), pdf.astype(astype))
@unittest.skipIf(
not extension_float_dtypes_available, "pandas extension float dtypes are not available"
)
def test_extension_float_dtypes(self):
pdf = pd.DataFrame(
{
"a": pd.Series([1.0, 2.0, None, 4.0], dtype="Float32"),
"b": pd.Series([1.0, None, 3.0, 4.0], dtype="Float64"),
}
)
psdf = ps.from_pandas(pdf)
self._check_extension(psdf, pdf)
self._check_extension(psdf + 1, pdf + 1)
self._check_extension(psdf + psdf, pdf + pdf)
@unittest.skipIf(
not extension_float_dtypes_available, "pandas extension float dtypes are not available"
)
def test_astype_extension_float_dtypes(self):
pdf = pd.DataFrame({"a": [1.0, 2.0, None, 4.0], "b": [1.0, None, 3.0, 4.0]})
psdf = ps.from_pandas(pdf)
astype = {"a": "Float32", "b": "Float64"}
self._check_extension(psdf.astype(astype), pdf.astype(astype))
def test_insert(self):
#
# Basic DataFrame
#
pdf = pd.DataFrame([1, 2, 3])
psdf = ps.from_pandas(pdf)
psdf.insert(1, "b", 10)
pdf.insert(1, "b", 10)
self.assert_eq(psdf.sort_index(), pdf.sort_index(), almost=True)
psdf.insert(2, "c", 0.1)
pdf.insert(2, "c", 0.1)
self.assert_eq(psdf.sort_index(), pdf.sort_index(), almost=True)
psdf.insert(3, "d", psdf.b + 1)
pdf.insert(3, "d", pdf.b + 1)
self.assert_eq(psdf.sort_index(), pdf.sort_index(), almost=True)
psser = ps.Series([4, 5, 6])
self.assertRaises(ValueError, lambda: psdf.insert(0, "y", psser))
self.assertRaisesRegex(
ValueError, "cannot insert b, already exists", lambda: psdf.insert(1, "b", 10)
)
self.assertRaisesRegex(
TypeError,
'"column" should be a scalar value or tuple that contains scalar values',
lambda: psdf.insert(0, list("abc"), psser),
)
self.assertRaisesRegex(
TypeError,
"loc must be int",
lambda: psdf.insert((1,), "b", 10),
)
self.assertRaisesRegex(
NotImplementedError,
"Assigning column name as tuple is only supported for MultiIndex columns for now.",
lambda: psdf.insert(0, ("e",), 10),
)
self.assertRaises(ValueError, lambda: psdf.insert(0, "e", [7, 8, 9, 10]))
self.assertRaises(ValueError, lambda: psdf.insert(0, "f", ps.Series([7, 8])))
self.assertRaises(AssertionError, lambda: psdf.insert(100, "y", psser))
self.assertRaises(AssertionError, lambda: psdf.insert(1, "y", psser, allow_duplicates=True))
#
# DataFrame with MultiIndex as columns
#
pdf = pd.DataFrame({("x", "a", "b"): [1, 2, 3]})
psdf = ps.from_pandas(pdf)
psdf.insert(1, "b", 10)
pdf.insert(1, "b", 10)
self.assert_eq(psdf.sort_index(), pdf.sort_index(), almost=True)
psdf.insert(2, "c", 0.1)
pdf.insert(2, "c", 0.1)
self.assert_eq(psdf.sort_index(), pdf.sort_index(), almost=True)
psdf.insert(3, "d", psdf.b + 1)
pdf.insert(3, "d", pdf.b + 1)
self.assert_eq(psdf.sort_index(), pdf.sort_index(), almost=True)
self.assertRaisesRegex(
ValueError, "cannot insert d, already exists", lambda: psdf.insert(4, "d", 11)
)
self.assertRaisesRegex(
ValueError,
r"cannot insert \('x', 'a', 'b'\), already exists",
lambda: psdf.insert(4, ("x", "a", "b"), 11),
)
self.assertRaisesRegex(
ValueError,
'"column" must have length equal to number of column levels.',
lambda: psdf.insert(4, ("e",), 11),
)
def test_inplace(self):
pdf, psdf = self.df_pair
pser = pdf.a
psser = psdf.a
pdf["a"] = pdf["a"] + 10
psdf["a"] = psdf["a"] + 10
self.assert_eq(psdf, pdf)
self.assert_eq(psser, pser)
def test_assign_list(self):
pdf, psdf = self.df_pair
pser = pdf.a
psser = psdf.a
pdf["x"] = [10, 20, 30, 40, 50, 60, 70, 80, 90]
psdf["x"] = [10, 20, 30, 40, 50, 60, 70, 80, 90]
self.assert_eq(psdf.sort_index(), pdf.sort_index())
self.assert_eq(psser, pser)
with self.assertRaisesRegex(ValueError, "Length of values does not match length of index"):
psdf["z"] = [10, 20, 30, 40, 50, 60, 70, 80]
def test_dataframe_multiindex_columns(self):
pdf = pd.DataFrame(
{
("x", "a", "1"): [1, 2, 3],
("x", "b", "2"): [4, 5, 6],
("y.z", "c.d", "3"): [7, 8, 9],
("x", "b", "4"): [10, 11, 12],
},
index=np.random.rand(3),
)
psdf = ps.from_pandas(pdf)
self.assert_eq(psdf, pdf)
self.assert_eq(psdf["x"], pdf["x"])
self.assert_eq(psdf["y.z"], pdf["y.z"])
self.assert_eq(psdf["x"]["b"], pdf["x"]["b"])
self.assert_eq(psdf["x"]["b"]["2"], pdf["x"]["b"]["2"])
self.assert_eq(psdf.x, pdf.x)
self.assert_eq(psdf.x.b, pdf.x.b)
self.assert_eq(psdf.x.b["2"], pdf.x.b["2"])
self.assertRaises(KeyError, lambda: psdf["z"])
self.assertRaises(AttributeError, lambda: psdf.z)
self.assert_eq(psdf[("x",)], pdf[("x",)])
self.assert_eq(psdf[("x", "a")], pdf[("x", "a")])
self.assert_eq(psdf[("x", "a", "1")], pdf[("x", "a", "1")])
def test_dataframe_column_level_name(self):
column = pd.Index(["A", "B", "C"], name="X")
pdf = pd.DataFrame([[1, 2, 3], [4, 5, 6]], columns=column, index=np.random.rand(2))
psdf = ps.from_pandas(pdf)
self.assert_eq(psdf, pdf)
self.assert_eq(psdf.columns.names, pdf.columns.names)
self.assert_eq(psdf.to_pandas().columns.names, pdf.columns.names)
def test_dataframe_multiindex_names_level(self):
columns = pd.MultiIndex.from_tuples(
[("X", "A", "Z"), ("X", "B", "Z"), ("Y", "C", "Z"), ("Y", "D", "Z")],
names=["lvl_1", "lvl_2", "lv_3"],
)
pdf = pd.DataFrame(
[[1, 2, 3, 4], [5, 6, 7, 8], [9, 10, 11, 12], [13, 14, 15, 16], [17, 18, 19, 20]],
columns=columns,
index=np.random.rand(5),
)
psdf = ps.from_pandas(pdf)
self.assert_eq(psdf.columns.names, pdf.columns.names)
self.assert_eq(psdf.to_pandas().columns.names, pdf.columns.names)
psdf1 = ps.from_pandas(pdf)
self.assert_eq(psdf1.columns.names, pdf.columns.names)
self.assertRaises(
AssertionError,
lambda: ps.DataFrame(psdf1._internal.copy(column_label_names=("level",))),
)
self.assert_eq(psdf["X"], pdf["X"])
self.assert_eq(psdf["X"].columns.names, pdf["X"].columns.names)
self.assert_eq(psdf["X"].to_pandas().columns.names, pdf["X"].columns.names)
self.assert_eq(psdf["X"]["A"], pdf["X"]["A"])
self.assert_eq(psdf["X"]["A"].columns.names, pdf["X"]["A"].columns.names)
self.assert_eq(psdf["X"]["A"].to_pandas().columns.names, pdf["X"]["A"].columns.names)
self.assert_eq(psdf[("X", "A")], pdf[("X", "A")])
self.assert_eq(psdf[("X", "A")].columns.names, pdf[("X", "A")].columns.names)
self.assert_eq(psdf[("X", "A")].to_pandas().columns.names, pdf[("X", "A")].columns.names)
self.assert_eq(psdf[("X", "A", "Z")], pdf[("X", "A", "Z")])
def test_itertuples(self):
pdf = pd.DataFrame({"num_legs": [4, 2], "num_wings": [0, 2]}, index=["dog", "hawk"])
psdf = ps.from_pandas(pdf)
for ptuple, ktuple in zip(
pdf.itertuples(index=False, name="Animal"), psdf.itertuples(index=False, name="Animal")
):
self.assert_eq(ptuple, ktuple)
for ptuple, ktuple in zip(pdf.itertuples(name=None), psdf.itertuples(name=None)):
self.assert_eq(ptuple, ktuple)
pdf.index = pd.MultiIndex.from_arrays(
[[1, 2], ["black", "brown"]], names=("count", "color")
)
psdf = ps.from_pandas(pdf)
for ptuple, ktuple in zip(pdf.itertuples(name="Animal"), psdf.itertuples(name="Animal")):
self.assert_eq(ptuple, ktuple)
pdf.columns = pd.MultiIndex.from_arrays(
[["CA", "WA"], ["age", "children"]], names=("origin", "info")
)
psdf = ps.from_pandas(pdf)
for ptuple, ktuple in zip(pdf.itertuples(name="Animal"), psdf.itertuples(name="Animal")):
self.assert_eq(ptuple, ktuple)
pdf = pd.DataFrame([1, 2, 3])
psdf = ps.from_pandas(pdf)
for ptuple, ktuple in zip(
(pdf + 1).itertuples(name="num"), (psdf + 1).itertuples(name="num")
):
self.assert_eq(ptuple, ktuple)
# DataFrames with a large number of columns (>254)
pdf = pd.DataFrame(np.random.random((1, 255)))
psdf = ps.from_pandas(pdf)
for ptuple, ktuple in zip(pdf.itertuples(name="num"), psdf.itertuples(name="num")):
self.assert_eq(ptuple, ktuple)
def test_iterrows(self):
pdf = pd.DataFrame(
{
("x", "a", "1"): [1, 2, 3],
("x", "b", "2"): [4, 5, 6],
("y.z", "c.d", "3"): [7, 8, 9],
("x", "b", "4"): [10, 11, 12],
},
index=np.random.rand(3),
)
psdf = ps.from_pandas(pdf)
for (pdf_k, pdf_v), (psdf_k, psdf_v) in zip(pdf.iterrows(), psdf.iterrows()):
self.assert_eq(pdf_k, psdf_k)
self.assert_eq(pdf_v, psdf_v)
def test_reset_index(self):
pdf = pd.DataFrame({"a": [1, 2, 3], "b": [4, 5, 6]}, index=np.random.rand(3))
psdf = ps.from_pandas(pdf)
self.assert_eq(psdf.reset_index(), pdf.reset_index())
self.assert_eq(psdf.reset_index().index, pdf.reset_index().index)
self.assert_eq(psdf.reset_index(drop=True), pdf.reset_index(drop=True))
pdf.index.name = "a"
psdf.index.name = "a"
with self.assertRaisesRegex(ValueError, "cannot insert a, already exists"):
psdf.reset_index()
self.assert_eq(psdf.reset_index(drop=True), pdf.reset_index(drop=True))
# inplace
pser = pdf.a
psser = psdf.a
pdf.reset_index(drop=True, inplace=True)
psdf.reset_index(drop=True, inplace=True)
self.assert_eq(psdf, pdf)
self.assert_eq(psser, pser)
pdf.columns = ["index", "b"]
psdf.columns = ["index", "b"]
self.assert_eq(psdf.reset_index(), pdf.reset_index())
def test_reset_index_with_default_index_types(self):
pdf = pd.DataFrame({"a": [1, 2, 3], "b": [4, 5, 6]}, index=np.random.rand(3))
psdf = ps.from_pandas(pdf)
with ps.option_context("compute.default_index_type", "sequence"):
self.assert_eq(psdf.reset_index(), pdf.reset_index())
with ps.option_context("compute.default_index_type", "distributed-sequence"):
self.assert_eq(psdf.reset_index(), pdf.reset_index())
with ps.option_context("compute.default_index_type", "distributed"):
# the index is different.
self.assert_eq(psdf.reset_index().to_pandas().reset_index(drop=True), pdf.reset_index())
def test_reset_index_with_multiindex_columns(self):
index = pd.MultiIndex.from_tuples(
[("bird", "falcon"), ("bird", "parrot"), ("mammal", "lion"), ("mammal", "monkey")],
names=["class", "name"],
)
columns = pd.MultiIndex.from_tuples([("speed", "max"), ("species", "type")])
pdf = pd.DataFrame(
[(389.0, "fly"), (24.0, "fly"), (80.5, "run"), (np.nan, "jump")],
index=index,
columns=columns,
)
psdf = ps.from_pandas(pdf)
self.assert_eq(psdf, pdf)
self.assert_eq(psdf.reset_index(), pdf.reset_index())
self.assert_eq(psdf.reset_index(level="class"), pdf.reset_index(level="class"))
self.assert_eq(
psdf.reset_index(level="class", col_level=1),
pdf.reset_index(level="class", col_level=1),
)
self.assert_eq(
psdf.reset_index(level="class", col_level=1, col_fill="species"),
pdf.reset_index(level="class", col_level=1, col_fill="species"),
)
self.assert_eq(
psdf.reset_index(level="class", col_level=1, col_fill="genus"),
pdf.reset_index(level="class", col_level=1, col_fill="genus"),
)
with self.assertRaisesRegex(IndexError, "Index has only 2 levels, not 3"):
psdf.reset_index(col_level=2)
pdf.index.names = [("x", "class"), ("y", "name")]
psdf.index.names = [("x", "class"), ("y", "name")]
self.assert_eq(psdf.reset_index(), pdf.reset_index())
with self.assertRaisesRegex(ValueError, "Item must have length equal to number of levels."):
psdf.reset_index(col_level=1)
def test_index_to_frame_reset_index(self):
def check(psdf, pdf):
self.assert_eq(psdf.reset_index(), pdf.reset_index())
self.assert_eq(psdf.reset_index(drop=True), pdf.reset_index(drop=True))
pdf.reset_index(drop=True, inplace=True)
psdf.reset_index(drop=True, inplace=True)
self.assert_eq(psdf, pdf)
pdf, psdf = self.df_pair
check(psdf.index.to_frame(), pdf.index.to_frame())
check(psdf.index.to_frame(index=False), pdf.index.to_frame(index=False))
check(psdf.index.to_frame(name="a"), pdf.index.to_frame(name="a"))
check(psdf.index.to_frame(index=False, name="a"), pdf.index.to_frame(index=False, name="a"))
check(psdf.index.to_frame(name=("x", "a")), pdf.index.to_frame(name=("x", "a")))
check(
psdf.index.to_frame(index=False, name=("x", "a")),
pdf.index.to_frame(index=False, name=("x", "a")),
)
def test_multiindex_column_access(self):
columns = pd.MultiIndex.from_tuples(
[
("a", "", "", "b"),
("c", "", "d", ""),
("e", "", "f", ""),
("e", "g", "", ""),
("", "", "", "h"),
("i", "", "", ""),
]
)
pdf = pd.DataFrame(
[
(1, "a", "x", 10, 100, 1000),
(2, "b", "y", 20, 200, 2000),
(3, "c", "z", 30, 300, 3000),
],
columns=columns,
index=np.random.rand(3),
)
psdf = ps.from_pandas(pdf)
self.assert_eq(psdf, pdf)
self.assert_eq(psdf["a"], pdf["a"])
self.assert_eq(psdf["a"]["b"], pdf["a"]["b"])
self.assert_eq(psdf["c"], pdf["c"])
self.assert_eq(psdf["c"]["d"], pdf["c"]["d"])
self.assert_eq(psdf["e"], pdf["e"])
self.assert_eq(psdf["e"][""]["f"], pdf["e"][""]["f"])
self.assert_eq(psdf["e"]["g"], pdf["e"]["g"])
self.assert_eq(psdf[""], pdf[""])
self.assert_eq(psdf[""]["h"], pdf[""]["h"])
self.assert_eq(psdf["i"], pdf["i"])
self.assert_eq(psdf[["a", "e"]], pdf[["a", "e"]])
self.assert_eq(psdf[["e", "a"]], pdf[["e", "a"]])
self.assert_eq(psdf[("a",)], pdf[("a",)])
self.assert_eq(psdf[("e", "g")], pdf[("e", "g")])
# self.assert_eq(psdf[("i",)], pdf[("i",)])
self.assert_eq(psdf[("i", "")], pdf[("i", "")])
self.assertRaises(KeyError, lambda: psdf[("a", "b")])
def test_repr_cache_invalidation(self):
# If there is any cache, inplace operations should invalidate it.
df = ps.range(10)
df.__repr__()
df["a"] = df["id"]
self.assertEqual(df.__repr__(), df.to_pandas().__repr__())
def test_repr_html_cache_invalidation(self):
# If there is any cache, inplace operations should invalidate it.
df = ps.range(10)
df._repr_html_()
df["a"] = df["id"]
self.assertEqual(df._repr_html_(), df.to_pandas()._repr_html_())
def test_empty_dataframe(self):
pdf = pd.DataFrame({"a": pd.Series([], dtype="i1"), "b": pd.Series([], dtype="str")})
psdf = ps.from_pandas(pdf)
self.assert_eq(psdf, pdf)
with self.sql_conf({SPARK_CONF_ARROW_ENABLED: False}):
psdf = ps.from_pandas(pdf)
self.assert_eq(psdf, pdf)
def test_all_null_dataframe(self):
pdf = pd.DataFrame(
{
"a": [None, None, None, "a"],
"b": [None, None, None, 1],
"c": [None, None, None] + list(np.arange(1, 2).astype("i1")),
"d": [None, None, None, 1.0],
"e": [None, None, None, True],
"f": [None, None, None] + list(pd.date_range("20130101", periods=1)),
},
)
psdf = ps.from_pandas(pdf)
self.assert_eq(psdf.iloc[:-1], pdf.iloc[:-1])
with self.sql_conf({SPARK_CONF_ARROW_ENABLED: False}):
self.assert_eq(psdf.iloc[:-1], pdf.iloc[:-1])
pdf = pd.DataFrame(
{
"a": pd.Series([None, None, None], dtype="float64"),
"b": pd.Series([None, None, None], dtype="str"),
},
)
psdf = ps.from_pandas(pdf)
self.assert_eq(psdf, pdf)
with self.sql_conf({SPARK_CONF_ARROW_ENABLED: False}):
psdf = ps.from_pandas(pdf)
self.assert_eq(psdf, pdf)
def test_nullable_object(self):
pdf = pd.DataFrame(
{
"a": list("abc") + [np.nan, None],
"b": list(range(1, 4)) + [np.nan, None],
"c": list(np.arange(3, 6).astype("i1")) + [np.nan, None],
"d": list(np.arange(4.0, 7.0, dtype="float64")) + [np.nan, None],
"e": [True, False, True, np.nan, None],
"f": list(pd.date_range("20130101", periods=3)) + [np.nan, None],
},
index=np.random.rand(5),
)
psdf = ps.from_pandas(pdf)
self.assert_eq(psdf, pdf)
with self.sql_conf({SPARK_CONF_ARROW_ENABLED: False}):
psdf = ps.from_pandas(pdf)
self.assert_eq(psdf, pdf)
def test_assign(self):
pdf, psdf = self.df_pair
psdf["w"] = 1.0
pdf["w"] = 1.0
self.assert_eq(psdf, pdf)
psdf.w = 10.0
pdf.w = 10.0
self.assert_eq(psdf, pdf)
psdf[1] = 1.0
pdf[1] = 1.0
self.assert_eq(psdf, pdf)
psdf = psdf.assign(a=psdf["a"] * 2)
pdf = pdf.assign(a=pdf["a"] * 2)
self.assert_eq(psdf, pdf)
# multi-index columns
columns = pd.MultiIndex.from_tuples([("x", "a"), ("x", "b"), ("y", "w"), ("y", "v")])
pdf.columns = columns
psdf.columns = columns
psdf[("a", "c")] = "def"
pdf[("a", "c")] = "def"
self.assert_eq(psdf, pdf)
psdf = psdf.assign(Z="ZZ")
pdf = pdf.assign(Z="ZZ")
self.assert_eq(psdf, pdf)
psdf["x"] = "ghi"
pdf["x"] = "ghi"
self.assert_eq(psdf, pdf)
def test_head(self):
pdf, psdf = self.df_pair
self.assert_eq(psdf.head(2), pdf.head(2))
self.assert_eq(psdf.head(3), pdf.head(3))
self.assert_eq(psdf.head(0), pdf.head(0))
self.assert_eq(psdf.head(-3), pdf.head(-3))
self.assert_eq(psdf.head(-10), pdf.head(-10))
with option_context("compute.ordered_head", True):
self.assert_eq(psdf.head(), pdf.head())
def test_attributes(self):
psdf = self.psdf
self.assertIn("a", dir(psdf))
self.assertNotIn("foo", dir(psdf))
self.assertRaises(AttributeError, lambda: psdf.foo)
psdf = ps.DataFrame({"a b c": [1, 2, 3]})
self.assertNotIn("a b c", dir(psdf))
psdf = ps.DataFrame({"a": [1, 2], 5: [1, 2]})
self.assertIn("a", dir(psdf))
self.assertNotIn(5, dir(psdf))
def test_column_names(self):
pdf, psdf = self.df_pair
self.assert_eq(psdf.columns, pdf.columns)
self.assert_eq(psdf[["b", "a"]].columns, pdf[["b", "a"]].columns)
self.assert_eq(psdf["a"].name, pdf["a"].name)
self.assert_eq((psdf["a"] + 1).name, (pdf["a"] + 1).name)
self.assert_eq((psdf.a + psdf.b).name, (pdf.a + pdf.b).name)
self.assert_eq((psdf.a + psdf.b.rename("a")).name, (pdf.a + pdf.b.rename("a")).name)
self.assert_eq((psdf.a + psdf.b.rename()).name, (pdf.a + pdf.b.rename()).name)
self.assert_eq((psdf.a.rename() + psdf.b).name, (pdf.a.rename() + pdf.b).name)
self.assert_eq(
(psdf.a.rename() + psdf.b.rename()).name, (pdf.a.rename() + pdf.b.rename()).name
)
def test_rename_columns(self):
pdf = pd.DataFrame(
{"a": [1, 2, 3, 4, 5, 6, 7], "b": [7, 6, 5, 4, 3, 2, 1]}, index=np.random.rand(7)
)
psdf = ps.from_pandas(pdf)
psdf.columns = ["x", "y"]
pdf.columns = ["x", "y"]
self.assert_eq(psdf.columns, pd.Index(["x", "y"]))
self.assert_eq(psdf, pdf)
self.assert_eq(psdf._internal.data_spark_column_names, ["x", "y"])
self.assert_eq(psdf.to_spark().columns, ["x", "y"])
self.assert_eq(psdf.to_spark(index_col="index").columns, ["index", "x", "y"])
columns = pdf.columns
columns.name = "lvl_1"
psdf.columns = columns
self.assert_eq(psdf.columns.names, ["lvl_1"])
self.assert_eq(psdf, pdf)
msg = "Length mismatch: Expected axis has 2 elements, new values have 4 elements"
with self.assertRaisesRegex(ValueError, msg):
psdf.columns = [1, 2, 3, 4]
# Multi-index columns
pdf = pd.DataFrame(
{("A", "0"): [1, 2, 2, 3], ("B", "1"): [1, 2, 3, 4]}, index=np.random.rand(4)
)
psdf = ps.from_pandas(pdf)
columns = pdf.columns
self.assert_eq(psdf.columns, columns)
self.assert_eq(psdf, pdf)
pdf.columns = ["x", "y"]
psdf.columns = ["x", "y"]
self.assert_eq(psdf.columns, pd.Index(["x", "y"]))
self.assert_eq(psdf, pdf)
self.assert_eq(psdf._internal.data_spark_column_names, ["x", "y"])
self.assert_eq(psdf.to_spark().columns, ["x", "y"])
self.assert_eq(psdf.to_spark(index_col="index").columns, ["index", "x", "y"])
pdf.columns = columns
psdf.columns = columns
self.assert_eq(psdf.columns, columns)
self.assert_eq(psdf, pdf)
self.assert_eq(psdf._internal.data_spark_column_names, ["(A, 0)", "(B, 1)"])
self.assert_eq(psdf.to_spark().columns, ["(A, 0)", "(B, 1)"])
self.assert_eq(psdf.to_spark(index_col="index").columns, ["index", "(A, 0)", "(B, 1)"])
columns.names = ["lvl_1", "lvl_2"]
psdf.columns = columns
self.assert_eq(psdf.columns.names, ["lvl_1", "lvl_2"])
self.assert_eq(psdf, pdf)
self.assert_eq(psdf._internal.data_spark_column_names, ["(A, 0)", "(B, 1)"])
self.assert_eq(psdf.to_spark().columns, ["(A, 0)", "(B, 1)"])
self.assert_eq(psdf.to_spark(index_col="index").columns, ["index", "(A, 0)", "(B, 1)"])
def test_rename_dataframe(self):
pdf1 = pd.DataFrame({"A": [1, 2, 3], "B": [4, 5, 6]})
psdf1 = ps.from_pandas(pdf1)
self.assert_eq(
psdf1.rename(columns={"A": "a", "B": "b"}), pdf1.rename(columns={"A": "a", "B": "b"})
)
result_psdf = psdf1.rename(index={1: 10, 2: 20})
result_pdf = pdf1.rename(index={1: 10, 2: 20})
self.assert_eq(result_psdf, result_pdf)
# inplace
pser = result_pdf.A
psser = result_psdf.A
result_psdf.rename(index={10: 100, 20: 200}, inplace=True)
result_pdf.rename(index={10: 100, 20: 200}, inplace=True)
self.assert_eq(result_psdf, result_pdf)
self.assert_eq(psser, pser)
def str_lower(s) -> str:
return str.lower(s)
self.assert_eq(
psdf1.rename(str_lower, axis="columns"), pdf1.rename(str_lower, axis="columns")
)
def mul10(x) -> int:
return x * 10
self.assert_eq(psdf1.rename(mul10, axis="index"), pdf1.rename(mul10, axis="index"))
self.assert_eq(
psdf1.rename(columns=str_lower, index={1: 10, 2: 20}),
pdf1.rename(columns=str_lower, index={1: 10, 2: 20}),
)
idx = pd.MultiIndex.from_tuples([("X", "A"), ("X", "B"), ("Y", "C"), ("Y", "D")])
pdf2 = pd.DataFrame([[1, 2, 3, 4], [5, 6, 7, 8]], columns=idx)
psdf2 = ps.from_pandas(pdf2)
self.assert_eq(psdf2.rename(columns=str_lower), pdf2.rename(columns=str_lower))
self.assert_eq(
psdf2.rename(columns=str_lower, level=0), pdf2.rename(columns=str_lower, level=0)
)
self.assert_eq(
psdf2.rename(columns=str_lower, level=1), pdf2.rename(columns=str_lower, level=1)
)
pdf3 = pd.DataFrame([[1, 2], [3, 4], [5, 6], [7, 8]], index=idx, columns=list("ab"))
psdf3 = ps.from_pandas(pdf3)
self.assert_eq(psdf3.rename(index=str_lower), pdf3.rename(index=str_lower))
self.assert_eq(
psdf3.rename(index=str_lower, level=0), pdf3.rename(index=str_lower, level=0)
)
self.assert_eq(
psdf3.rename(index=str_lower, level=1), pdf3.rename(index=str_lower, level=1)
)
pdf4 = pdf2 + 1
psdf4 = psdf2 + 1
self.assert_eq(psdf4.rename(columns=str_lower), pdf4.rename(columns=str_lower))
pdf5 = pdf3 + 1
psdf5 = psdf3 + 1
self.assert_eq(psdf5.rename(index=str_lower), pdf5.rename(index=str_lower))
msg = "Either `index` or `columns` should be provided."
with self.assertRaisesRegex(ValueError, msg):
psdf1.rename()
msg = "`mapper` or `index` or `columns` should be either dict-like or function type."
with self.assertRaisesRegex(ValueError, msg):
psdf1.rename(mapper=[str_lower], axis=1)
msg = "Mapper dict should have the same value type."
with self.assertRaisesRegex(ValueError, msg):
psdf1.rename({"A": "a", "B": 2}, axis=1)
msg = r"level should be an integer between \[0, column_labels_level\)"
with self.assertRaisesRegex(ValueError, msg):
psdf2.rename(columns=str_lower, level=2)
def test_rename_axis(self):
index = pd.Index(["A", "B", "C"], name="index")
columns = pd.Index(["numbers", "values"], name="cols")
pdf = pd.DataFrame([[1.0, 2.0], [3.0, 4.0], [5.0, 6.0]], index=index, columns=columns)
psdf = ps.from_pandas(pdf)
for axis in [0, "index"]:
self.assert_eq(
pdf.rename_axis("index2", axis=axis).sort_index(),
psdf.rename_axis("index2", axis=axis).sort_index(),
)
self.assert_eq(
pdf.rename_axis(["index2"], axis=axis).sort_index(),
psdf.rename_axis(["index2"], axis=axis).sort_index(),
)
for axis in [1, "columns"]:
self.assert_eq(
pdf.rename_axis("cols2", axis=axis).sort_index(),
psdf.rename_axis("cols2", axis=axis).sort_index(),
)
self.assert_eq(
pdf.rename_axis(["cols2"], axis=axis).sort_index(),
psdf.rename_axis(["cols2"], axis=axis).sort_index(),
)
pdf2 = pdf.copy()
psdf2 = psdf.copy()
pdf2.rename_axis("index2", axis="index", inplace=True)
psdf2.rename_axis("index2", axis="index", inplace=True)
self.assert_eq(pdf2.sort_index(), psdf2.sort_index())
self.assertRaises(ValueError, lambda: psdf.rename_axis(["index2", "index3"], axis=0))
self.assertRaises(ValueError, lambda: psdf.rename_axis(["cols2", "cols3"], axis=1))
self.assertRaises(TypeError, lambda: psdf.rename_axis(mapper=["index2"], index=["index3"]))
self.assert_eq(
pdf.rename_axis(index={"index": "index2"}, columns={"cols": "cols2"}).sort_index(),
psdf.rename_axis(index={"index": "index2"}, columns={"cols": "cols2"}).sort_index(),
)
self.assert_eq(
pdf.rename_axis(index={"missing": "index2"}, columns={"missing": "cols2"}).sort_index(),
psdf.rename_axis(
index={"missing": "index2"}, columns={"missing": "cols2"}
).sort_index(),
)
self.assert_eq(
pdf.rename_axis(index=str.upper, columns=str.upper).sort_index(),
psdf.rename_axis(index=str.upper, columns=str.upper).sort_index(),
)
index = pd.MultiIndex.from_tuples(
[("A", "B"), ("C", "D"), ("E", "F")], names=["index1", "index2"]
)
columns = pd.MultiIndex.from_tuples(
[("numbers", "first"), ("values", "second")], names=["cols1", "cols2"]
)
pdf = pd.DataFrame([[1.0, 2.0], [3.0, 4.0], [5.0, 6.0]], index=index, columns=columns)
psdf = ps.from_pandas(pdf)
for axis in [0, "index"]:
self.assert_eq(
pdf.rename_axis(["index3", "index4"], axis=axis).sort_index(),
psdf.rename_axis(["index3", "index4"], axis=axis).sort_index(),
)
for axis in [1, "columns"]:
self.assert_eq(
pdf.rename_axis(["cols3", "cols4"], axis=axis).sort_index(),
psdf.rename_axis(["cols3", "cols4"], axis=axis).sort_index(),
)
self.assertRaises(
ValueError, lambda: psdf.rename_axis(["index3", "index4", "index5"], axis=0)
)
self.assertRaises(ValueError, lambda: psdf.rename_axis(["cols3", "cols4", "cols5"], axis=1))
self.assert_eq(
pdf.rename_axis(index={"index1": "index3"}, columns={"cols1": "cols3"}).sort_index(),
psdf.rename_axis(index={"index1": "index3"}, columns={"cols1": "cols3"}).sort_index(),
)
self.assert_eq(
pdf.rename_axis(index={"missing": "index3"}, columns={"missing": "cols3"}).sort_index(),
psdf.rename_axis(
index={"missing": "index3"}, columns={"missing": "cols3"}
).sort_index(),
)
self.assert_eq(
pdf.rename_axis(
index={"index1": "index3", "index2": "index4"},
columns={"cols1": "cols3", "cols2": "cols4"},
).sort_index(),
psdf.rename_axis(
index={"index1": "index3", "index2": "index4"},
columns={"cols1": "cols3", "cols2": "cols4"},
).sort_index(),
)
self.assert_eq(
pdf.rename_axis(index=str.upper, columns=str.upper).sort_index(),
psdf.rename_axis(index=str.upper, columns=str.upper).sort_index(),
)
def test_dot(self):
psdf = self.psdf
with self.assertRaisesRegex(TypeError, "Unsupported type DataFrame"):
psdf.dot(psdf)
def test_dot_in_column_name(self):
self.assert_eq(
ps.DataFrame(ps.range(1)._internal.spark_frame.selectExpr("1L as `a.b`"))["a.b"],
ps.Series([1], name="a.b"),
)
def test_aggregate(self):
pdf = pd.DataFrame(
[[1, 2, 3], [4, 5, 6], [7, 8, 9], [np.nan, np.nan, np.nan]], columns=["A", "B", "C"]
)
psdf = ps.from_pandas(pdf)
self.assert_eq(
psdf.agg(["sum", "min"])[["A", "B", "C"]].sort_index(), # TODO?: fix column order
pdf.agg(["sum", "min"])[["A", "B", "C"]].sort_index(),
)
self.assert_eq(
psdf.agg({"A": ["sum", "min"], "B": ["min", "max"]})[["A", "B"]].sort_index(),
pdf.agg({"A": ["sum", "min"], "B": ["min", "max"]})[["A", "B"]].sort_index(),
)
self.assertRaises(KeyError, lambda: psdf.agg({"A": ["sum", "min"], "X": ["min", "max"]}))
# multi-index columns
columns = pd.MultiIndex.from_tuples([("X", "A"), ("X", "B"), ("Y", "C")])
pdf.columns = columns
psdf.columns = columns
self.assert_eq(
psdf.agg(["sum", "min"])[[("X", "A"), ("X", "B"), ("Y", "C")]].sort_index(),
pdf.agg(["sum", "min"])[[("X", "A"), ("X", "B"), ("Y", "C")]].sort_index(),
)
self.assert_eq(
psdf.agg({("X", "A"): ["sum", "min"], ("X", "B"): ["min", "max"]})[
[("X", "A"), ("X", "B")]
].sort_index(),
pdf.agg({("X", "A"): ["sum", "min"], ("X", "B"): ["min", "max"]})[
[("X", "A"), ("X", "B")]
].sort_index(),
)
self.assertRaises(TypeError, lambda: psdf.agg({"X": ["sum", "min"], "Y": ["min", "max"]}))
# non-string names
pdf = pd.DataFrame(
[[1, 2, 3], [4, 5, 6], [7, 8, 9], [np.nan, np.nan, np.nan]], columns=[10, 20, 30]
)
psdf = ps.from_pandas(pdf)
self.assert_eq(
psdf.agg(["sum", "min"])[[10, 20, 30]].sort_index(),
pdf.agg(["sum", "min"])[[10, 20, 30]].sort_index(),
)
self.assert_eq(
psdf.agg({10: ["sum", "min"], 20: ["min", "max"]})[[10, 20]].sort_index(),
pdf.agg({10: ["sum", "min"], 20: ["min", "max"]})[[10, 20]].sort_index(),
)
columns = pd.MultiIndex.from_tuples([("X", 10), ("X", 20), ("Y", 30)])
pdf.columns = columns
psdf.columns = columns
self.assert_eq(
psdf.agg(["sum", "min"])[[("X", 10), ("X", 20), ("Y", 30)]].sort_index(),
pdf.agg(["sum", "min"])[[("X", 10), ("X", 20), ("Y", 30)]].sort_index(),
)
self.assert_eq(
psdf.agg({("X", 10): ["sum", "min"], ("X", 20): ["min", "max"]})[
[("X", 10), ("X", 20)]
].sort_index(),
pdf.agg({("X", 10): ["sum", "min"], ("X", 20): ["min", "max"]})[
[("X", 10), ("X", 20)]
].sort_index(),
)
pdf = pd.DataFrame(
[datetime(2019, 2, 2, 0, 0, 0, 0), datetime(2019, 2, 3, 0, 0, 0, 0)],
columns=["timestamp"],
)
psdf = ps.from_pandas(pdf)
self.assert_eq(psdf.timestamp.min(), pdf.timestamp.min())
self.assert_eq(psdf.timestamp.max(), pdf.timestamp.max())
self.assertRaises(ValueError, lambda: psdf.agg(("sum", "min")))
def test_droplevel(self):
pdf = (
pd.DataFrame([[1, 2, 3, 4], [5, 6, 7, 8], [9, 10, 11, 12]])
.set_index([0, 1])
.rename_axis(["a", "b"])
)
pdf.columns = pd.MultiIndex.from_tuples(
[("c", "e"), ("d", "f")], names=["level_1", "level_2"]
)
psdf = ps.from_pandas(pdf)
self.assertRaises(ValueError, lambda: psdf.droplevel(["a", "b"]))
self.assertRaises(ValueError, lambda: psdf.droplevel([1, 1, 1, 1, 1]))
self.assertRaises(IndexError, lambda: psdf.droplevel(2))
self.assertRaises(IndexError, lambda: psdf.droplevel(-3))
self.assertRaises(KeyError, lambda: psdf.droplevel({"a"}))
self.assertRaises(KeyError, lambda: psdf.droplevel({"a": 1}))
self.assertRaises(ValueError, lambda: psdf.droplevel(["level_1", "level_2"], axis=1))
self.assertRaises(IndexError, lambda: psdf.droplevel(2, axis=1))
self.assertRaises(IndexError, lambda: psdf.droplevel(-3, axis=1))
self.assertRaises(KeyError, lambda: psdf.droplevel({"level_1"}, axis=1))
self.assertRaises(KeyError, lambda: psdf.droplevel({"level_1": 1}, axis=1))
self.assert_eq(pdf.droplevel("a"), psdf.droplevel("a"))
self.assert_eq(pdf.droplevel(["a"]), psdf.droplevel(["a"]))
self.assert_eq(pdf.droplevel(("a",)), psdf.droplevel(("a",)))
self.assert_eq(pdf.droplevel(0), psdf.droplevel(0))
self.assert_eq(pdf.droplevel(-1), psdf.droplevel(-1))
self.assert_eq(pdf.droplevel("level_1", axis=1), psdf.droplevel("level_1", axis=1))
self.assert_eq(pdf.droplevel(["level_1"], axis=1), psdf.droplevel(["level_1"], axis=1))
self.assert_eq(pdf.droplevel(("level_1",), axis=1), psdf.droplevel(("level_1",), axis=1))
self.assert_eq(pdf.droplevel(0, axis=1), psdf.droplevel(0, axis=1))
self.assert_eq(pdf.droplevel(-1, axis=1), psdf.droplevel(-1, axis=1))
# Tupled names
pdf.columns.names = [("level", 1), ("level", 2)]
pdf.index.names = [("a", 10), ("x", 20)]
psdf = ps.from_pandas(pdf)
self.assertRaises(KeyError, lambda: psdf.droplevel("a"))
self.assertRaises(KeyError, lambda: psdf.droplevel(("a", 10)))
self.assert_eq(pdf.droplevel([("a", 10)]), psdf.droplevel([("a", 10)]))
self.assert_eq(
pdf.droplevel([("level", 1)], axis=1), psdf.droplevel([("level", 1)], axis=1)
)
# non-string names
pdf = (
pd.DataFrame([[1, 2, 3, 4], [5, 6, 7, 8], [9, 10, 11, 12]])
.set_index([0, 1])
.rename_axis([10.0, 20.0])
)
pdf.columns = pd.MultiIndex.from_tuples([("c", "e"), ("d", "f")], names=[100.0, 200.0])
psdf = ps.from_pandas(pdf)
self.assert_eq(pdf.droplevel(10.0), psdf.droplevel(10.0))
self.assert_eq(pdf.droplevel([10.0]), psdf.droplevel([10.0]))
self.assert_eq(pdf.droplevel((10.0,)), psdf.droplevel((10.0,)))
self.assert_eq(pdf.droplevel(0), psdf.droplevel(0))
self.assert_eq(pdf.droplevel(-1), psdf.droplevel(-1))
self.assert_eq(pdf.droplevel(100.0, axis=1), psdf.droplevel(100.0, axis=1))
self.assert_eq(pdf.droplevel(0, axis=1), psdf.droplevel(0, axis=1))
def test_drop(self):
pdf = pd.DataFrame({"x": [1, 2], "y": [3, 4], "z": [5, 6]}, index=np.random.rand(2))
psdf = ps.from_pandas(pdf)
# Assert 'labels' or 'columns' parameter is set
expected_error_message = "Need to specify at least one of 'labels' or 'columns'"
with self.assertRaisesRegex(ValueError, expected_error_message):
psdf.drop()
#
# Drop columns
#
# Assert using a str for 'labels' works
self.assert_eq(psdf.drop("x", axis=1), pdf.drop("x", axis=1))
self.assert_eq((psdf + 1).drop("x", axis=1), (pdf + 1).drop("x", axis=1))
# Assert using a list for 'labels' works
self.assert_eq(psdf.drop(["y", "z"], axis=1), pdf.drop(["y", "z"], axis=1))
self.assert_eq(psdf.drop(["x", "y", "z"], axis=1), pdf.drop(["x", "y", "z"], axis=1))
# Assert using 'columns' instead of 'labels' produces the same results
self.assert_eq(psdf.drop(columns="x"), pdf.drop(columns="x"))
self.assert_eq(psdf.drop(columns=["y", "z"]), pdf.drop(columns=["y", "z"]))
self.assert_eq(psdf.drop(columns=["x", "y", "z"]), pdf.drop(columns=["x", "y", "z"]))
self.assert_eq(psdf.drop(columns=[]), pdf.drop(columns=[]))
columns = pd.MultiIndex.from_tuples([(1, "x"), (1, "y"), (2, "z")])
pdf.columns = columns
psdf = ps.from_pandas(pdf)
self.assert_eq(psdf.drop(columns=1), pdf.drop(columns=1))
self.assert_eq(psdf.drop(columns=(1, "x")), pdf.drop(columns=(1, "x")))
self.assert_eq(psdf.drop(columns=[(1, "x"), 2]), pdf.drop(columns=[(1, "x"), 2]))
self.assert_eq(
psdf.drop(columns=[(1, "x"), (1, "y"), (2, "z")]),
pdf.drop(columns=[(1, "x"), (1, "y"), (2, "z")]),
)
self.assertRaises(KeyError, lambda: psdf.drop(columns=3))
self.assertRaises(KeyError, lambda: psdf.drop(columns=(1, "z")))
pdf.index = pd.MultiIndex.from_tuples([("i", 0), ("j", 1)])
psdf = ps.from_pandas(pdf)
self.assert_eq(
psdf.drop(columns=[(1, "x"), (1, "y"), (2, "z")]),
pdf.drop(columns=[(1, "x"), (1, "y"), (2, "z")]),
)
# non-string names
pdf = pd.DataFrame({10: [1, 2], 20: [3, 4], 30: [5, 6]}, index=np.random.rand(2))
psdf = ps.from_pandas(pdf)
self.assert_eq(psdf.drop(10, axis=1), pdf.drop(10, axis=1))
self.assert_eq(psdf.drop([20, 30], axis=1), pdf.drop([20, 30], axis=1))
#
# Drop rows
#
pdf = pd.DataFrame({"X": [1, 2, 3], "Y": [4, 5, 6], "Z": [7, 8, 9]}, index=["A", "B", "C"])
psdf = ps.from_pandas(pdf)
# Given labels (and axis = 0)
self.assert_eq(psdf.drop(labels="A", axis=0), pdf.drop(labels="A", axis=0))
self.assert_eq(psdf.drop(labels="A"), pdf.drop(labels="A"))
self.assert_eq((psdf + 1).drop(labels="A"), (pdf + 1).drop(labels="A"))
self.assert_eq(psdf.drop(labels=["A", "C"], axis=0), pdf.drop(labels=["A", "C"], axis=0))
self.assert_eq(
psdf.drop(labels=["A", "B", "C"], axis=0), pdf.drop(labels=["A", "B", "C"], axis=0)
)
with ps.option_context("compute.isin_limit", 2):
self.assert_eq(
psdf.drop(labels=["A", "B", "C"], axis=0), pdf.drop(labels=["A", "B", "C"], axis=0)
)
# Given index
self.assert_eq(psdf.drop(index="A"), pdf.drop(index="A"))
self.assert_eq(psdf.drop(index=["A", "C"]), pdf.drop(index=["A", "C"]))
self.assert_eq(psdf.drop(index=["A", "B", "C"]), pdf.drop(index=["A", "B", "C"]))
self.assert_eq(psdf.drop(index=[]), pdf.drop(index=[]))
with ps.option_context("compute.isin_limit", 2):
self.assert_eq(psdf.drop(index=["A", "B", "C"]), pdf.drop(index=["A", "B", "C"]))
# Non-string names
pdf.index = [10, 20, 30]
psdf = ps.from_pandas(pdf)
self.assert_eq(psdf.drop(labels=10, axis=0), pdf.drop(labels=10, axis=0))
self.assert_eq(psdf.drop(labels=[10, 30], axis=0), pdf.drop(labels=[10, 30], axis=0))
self.assert_eq(
psdf.drop(labels=[10, 20, 30], axis=0), pdf.drop(labels=[10, 20, 30], axis=0)
)
with ps.option_context("compute.isin_limit", 2):
self.assert_eq(
psdf.drop(labels=[10, 20, 30], axis=0), pdf.drop(labels=[10, 20, 30], axis=0)
)
# MultiIndex
pdf.index = pd.MultiIndex.from_tuples([("a", "x"), ("b", "y"), ("c", "z")])
psdf = ps.from_pandas(pdf)
self.assertRaises(NotImplementedError, lambda: psdf.drop(labels=[("a", "x")]))
#
# Drop rows and columns
#
pdf = pd.DataFrame({"X": [1, 2, 3], "Y": [4, 5, 6], "Z": [7, 8, 9]}, index=["A", "B", "C"])
psdf = ps.from_pandas(pdf)
self.assert_eq(psdf.drop(index="A", columns="X"), pdf.drop(index="A", columns="X"))
self.assert_eq(
psdf.drop(index=["A", "C"], columns=["X", "Z"]),
pdf.drop(index=["A", "C"], columns=["X", "Z"]),
)
self.assert_eq(
psdf.drop(index=["A", "B", "C"], columns=["X", "Z"]),
pdf.drop(index=["A", "B", "C"], columns=["X", "Z"]),
)
with ps.option_context("compute.isin_limit", 2):
self.assert_eq(
psdf.drop(index=["A", "B", "C"], columns=["X", "Z"]),
pdf.drop(index=["A", "B", "C"], columns=["X", "Z"]),
)
self.assert_eq(
psdf.drop(index=[], columns=["X", "Z"]),
pdf.drop(index=[], columns=["X", "Z"]),
)
self.assert_eq(
psdf.drop(index=["A", "B", "C"], columns=[]),
pdf.drop(index=["A", "B", "C"], columns=[]),
)
self.assert_eq(
psdf.drop(index=[], columns=[]),
pdf.drop(index=[], columns=[]),
)
self.assertRaises(
ValueError,
lambda: psdf.drop(labels="A", axis=0, columns="X"),
)
def _test_dropna(self, pdf, axis):
psdf = ps.from_pandas(pdf)
self.assert_eq(psdf.dropna(axis=axis), pdf.dropna(axis=axis))
self.assert_eq(psdf.dropna(axis=axis, how="all"), pdf.dropna(axis=axis, how="all"))
self.assert_eq(psdf.dropna(axis=axis, subset=["x"]), pdf.dropna(axis=axis, subset=["x"]))
self.assert_eq(psdf.dropna(axis=axis, subset="x"), pdf.dropna(axis=axis, subset=["x"]))
self.assert_eq(
psdf.dropna(axis=axis, subset=["y", "z"]), pdf.dropna(axis=axis, subset=["y", "z"])
)
self.assert_eq(
psdf.dropna(axis=axis, subset=["y", "z"], how="all"),
pdf.dropna(axis=axis, subset=["y", "z"], how="all"),
)
self.assert_eq(psdf.dropna(axis=axis, thresh=2), pdf.dropna(axis=axis, thresh=2))
self.assert_eq(
psdf.dropna(axis=axis, thresh=1, subset=["y", "z"]),
pdf.dropna(axis=axis, thresh=1, subset=["y", "z"]),
)
pdf2 = pdf.copy()
psdf2 = psdf.copy()
pser = pdf2[pdf2.columns[0]]
psser = psdf2[psdf2.columns[0]]
pdf2.dropna(inplace=True, axis=axis)
psdf2.dropna(inplace=True, axis=axis)
self.assert_eq(psdf2, pdf2)
self.assert_eq(psser, pser)
# multi-index
columns = pd.MultiIndex.from_tuples([("a", "x"), ("a", "y"), ("b", "z")])
if axis == 0:
pdf.columns = columns
else:
pdf.index = columns
psdf = ps.from_pandas(pdf)
self.assert_eq(psdf.dropna(axis=axis), pdf.dropna(axis=axis))
self.assert_eq(psdf.dropna(axis=axis, how="all"), pdf.dropna(axis=axis, how="all"))
self.assert_eq(
psdf.dropna(axis=axis, subset=[("a", "x")]), pdf.dropna(axis=axis, subset=[("a", "x")])
)
self.assert_eq(
psdf.dropna(axis=axis, subset=("a", "x")), pdf.dropna(axis=axis, subset=[("a", "x")])
)
self.assert_eq(
psdf.dropna(axis=axis, subset=[("a", "y"), ("b", "z")]),
pdf.dropna(axis=axis, subset=[("a", "y"), ("b", "z")]),
)
self.assert_eq(
psdf.dropna(axis=axis, subset=[("a", "y"), ("b", "z")], how="all"),
pdf.dropna(axis=axis, subset=[("a", "y"), ("b", "z")], how="all"),
)
self.assert_eq(psdf.dropna(axis=axis, thresh=2), pdf.dropna(axis=axis, thresh=2))
self.assert_eq(
psdf.dropna(axis=axis, thresh=1, subset=[("a", "y"), ("b", "z")]),
pdf.dropna(axis=axis, thresh=1, subset=[("a", "y"), ("b", "z")]),
)
def test_dropna_axis_index(self):
pdf = pd.DataFrame(
{
"x": [np.nan, 2, 3, 4, np.nan, 6],
"y": [1, 2, np.nan, 4, np.nan, np.nan],
"z": [1, 2, 3, 4, np.nan, np.nan],
},
index=np.random.rand(6),
)
psdf = ps.from_pandas(pdf)
self._test_dropna(pdf, axis=0)
# empty
pdf = pd.DataFrame(index=np.random.rand(6))
psdf = ps.from_pandas(pdf)
self.assert_eq(psdf.dropna(), pdf.dropna())
self.assert_eq(psdf.dropna(how="all"), pdf.dropna(how="all"))
self.assert_eq(psdf.dropna(thresh=0), pdf.dropna(thresh=0))
self.assert_eq(psdf.dropna(thresh=1), pdf.dropna(thresh=1))
with self.assertRaisesRegex(ValueError, "No axis named foo"):
psdf.dropna(axis="foo")
self.assertRaises(KeyError, lambda: psdf.dropna(subset="1"))
with self.assertRaisesRegex(ValueError, "invalid how option: 1"):
psdf.dropna(how=1)
with self.assertRaisesRegex(TypeError, "must specify how or thresh"):
psdf.dropna(how=None)
def test_dropna_axis_column(self):
pdf = pd.DataFrame(
{
"x": [np.nan, 2, 3, 4, np.nan, 6],
"y": [1, 2, np.nan, 4, np.nan, np.nan],
"z": [1, 2, 3, 4, np.nan, np.nan],
},
index=[str(r) for r in np.random.rand(6)],
).T
self._test_dropna(pdf, axis=1)
psdf = ps.from_pandas(pdf)
with self.assertRaisesRegex(
ValueError, "The length of each subset must be the same as the index size."
):
psdf.dropna(subset=(["x", "y"]), axis=1)
# empty
pdf = pd.DataFrame({"x": [], "y": [], "z": []})
psdf = ps.from_pandas(pdf)
self.assert_eq(psdf.dropna(axis=1), pdf.dropna(axis=1))
self.assert_eq(psdf.dropna(axis=1, how="all"), pdf.dropna(axis=1, how="all"))
self.assert_eq(psdf.dropna(axis=1, thresh=0), pdf.dropna(axis=1, thresh=0))
self.assert_eq(psdf.dropna(axis=1, thresh=1), pdf.dropna(axis=1, thresh=1))
def test_dtype(self):
pdf = pd.DataFrame(
{
"a": list("abc"),
"b": list(range(1, 4)),
"c": np.arange(3, 6).astype("i1"),
"d": np.arange(4.0, 7.0, dtype="float64"),
"e": [True, False, True],
"f": pd.date_range("20130101", periods=3),
},
index=np.random.rand(3),
)
psdf = ps.from_pandas(pdf)
self.assert_eq(psdf, pdf)
self.assertTrue((psdf.dtypes == pdf.dtypes).all())
# multi-index columns
columns = pd.MultiIndex.from_tuples(zip(list("xxxyyz"), list("abcdef")))
pdf.columns = columns
psdf.columns = columns
self.assertTrue((psdf.dtypes == pdf.dtypes).all())
def test_fillna(self):
pdf = pd.DataFrame(
{
"x": [np.nan, 2, 3, 4, np.nan, 6],
"y": [1, 2, np.nan, 4, np.nan, np.nan],
"z": [1, 2, 3, 4, np.nan, np.nan],
},
index=np.random.rand(6),
)
psdf = ps.from_pandas(pdf)
self.assert_eq(psdf, pdf)
self.assert_eq(psdf.fillna(-1), pdf.fillna(-1))
self.assert_eq(
psdf.fillna({"x": -1, "y": -2, "z": -5}), pdf.fillna({"x": -1, "y": -2, "z": -5})
)
self.assert_eq(pdf.fillna(method="ffill"), psdf.fillna(method="ffill"))
self.assert_eq(pdf.fillna(method="ffill", limit=2), psdf.fillna(method="ffill", limit=2))
self.assert_eq(pdf.fillna(method="bfill"), psdf.fillna(method="bfill"))
self.assert_eq(pdf.fillna(method="bfill", limit=2), psdf.fillna(method="bfill", limit=2))
pdf = pdf.set_index(["x", "y"])
psdf = ps.from_pandas(pdf)
# check multi index
self.assert_eq(psdf.fillna(-1), pdf.fillna(-1))
self.assert_eq(pdf.fillna(method="bfill"), psdf.fillna(method="bfill"))
self.assert_eq(pdf.fillna(method="ffill"), psdf.fillna(method="ffill"))
pser = pdf.z
psser = psdf.z
pdf.fillna({"x": -1, "y": -2, "z": -5}, inplace=True)
psdf.fillna({"x": -1, "y": -2, "z": -5}, inplace=True)
self.assert_eq(psdf, pdf)
self.assert_eq(psser, pser)
s_nan = pd.Series([-1, -2, -5], index=["x", "y", "z"], dtype=int)
self.assert_eq(psdf.fillna(s_nan), pdf.fillna(s_nan))
with self.assertRaisesRegex(NotImplementedError, "fillna currently only"):
psdf.fillna(-1, axis=1)
with self.assertRaisesRegex(NotImplementedError, "fillna currently only"):
psdf.fillna(-1, axis="columns")
with self.assertRaisesRegex(ValueError, "limit parameter for value is not support now"):
psdf.fillna(-1, limit=1)
with self.assertRaisesRegex(TypeError, "Unsupported.*DataFrame"):
psdf.fillna(pd.DataFrame({"x": [-1], "y": [-1], "z": [-1]}))
with self.assertRaisesRegex(TypeError, "Unsupported.*int64"):
psdf.fillna({"x": np.int64(-6), "y": np.int64(-4), "z": -5})
with self.assertRaisesRegex(ValueError, "Expecting 'pad', 'ffill', 'backfill' or 'bfill'."):
psdf.fillna(method="xxx")
with self.assertRaisesRegex(
ValueError, "Must specify a fillna 'value' or 'method' parameter."
):
psdf.fillna()
# multi-index columns
pdf = pd.DataFrame(
{
("x", "a"): [np.nan, 2, 3, 4, np.nan, 6],
("x", "b"): [1, 2, np.nan, 4, np.nan, np.nan],
("y", "c"): [1, 2, 3, 4, np.nan, np.nan],
},
index=np.random.rand(6),
)
psdf = ps.from_pandas(pdf)
self.assert_eq(psdf.fillna(-1), pdf.fillna(-1))
self.assert_eq(
psdf.fillna({("x", "a"): -1, ("x", "b"): -2, ("y", "c"): -5}),
pdf.fillna({("x", "a"): -1, ("x", "b"): -2, ("y", "c"): -5}),
)
self.assert_eq(pdf.fillna(method="ffill"), psdf.fillna(method="ffill"))
self.assert_eq(pdf.fillna(method="ffill", limit=2), psdf.fillna(method="ffill", limit=2))
self.assert_eq(pdf.fillna(method="bfill"), psdf.fillna(method="bfill"))
self.assert_eq(pdf.fillna(method="bfill", limit=2), psdf.fillna(method="bfill", limit=2))
self.assert_eq(psdf.fillna({"x": -1}), pdf.fillna({"x": -1}))
self.assert_eq(
psdf.fillna({"x": -1, ("x", "b"): -2}), pdf.fillna({"x": -1, ("x", "b"): -2})
)
self.assert_eq(
psdf.fillna({("x", "b"): -2, "x": -1}), pdf.fillna({("x", "b"): -2, "x": -1})
)
# check multi index
pdf = pdf.set_index([("x", "a"), ("x", "b")])
psdf = ps.from_pandas(pdf)
self.assert_eq(psdf.fillna(-1), pdf.fillna(-1))
self.assert_eq(
psdf.fillna({("x", "a"): -1, ("x", "b"): -2, ("y", "c"): -5}),
pdf.fillna({("x", "a"): -1, ("x", "b"): -2, ("y", "c"): -5}),
)
def test_isnull(self):
pdf = pd.DataFrame(
{"x": [1, 2, 3, 4, None, 6], "y": list("abdabd")}, index=np.random.rand(6)
)
psdf = ps.from_pandas(pdf)
self.assert_eq(psdf.notnull(), pdf.notnull())
self.assert_eq(psdf.isnull(), pdf.isnull())
def test_to_datetime(self):
pdf = pd.DataFrame(
{"year": [2015, 2016], "month": [2, 3], "day": [4, 5]}, index=np.random.rand(2)
)
psdf = ps.from_pandas(pdf)
self.assert_eq(pd.to_datetime(pdf), ps.to_datetime(psdf))
def test_nunique(self):
pdf = pd.DataFrame({"A": [1, 2, 3], "B": [np.nan, 3, np.nan]}, index=np.random.rand(3))
psdf = ps.from_pandas(pdf)
# Assert NaNs are dropped by default
self.assert_eq(psdf.nunique(), pdf.nunique())
# Assert including NaN values
self.assert_eq(psdf.nunique(dropna=False), pdf.nunique(dropna=False))
# Assert approximate counts
self.assert_eq(
ps.DataFrame({"A": range(100)}).nunique(approx=True),
pd.Series([103], index=["A"]),
)
self.assert_eq(
ps.DataFrame({"A": range(100)}).nunique(approx=True, rsd=0.01),
pd.Series([100], index=["A"]),
)
# Assert unsupported axis value yet
msg = 'axis should be either 0 or "index" currently.'
with self.assertRaisesRegex(NotImplementedError, msg):
psdf.nunique(axis=1)
# multi-index columns
columns = pd.MultiIndex.from_tuples([("X", "A"), ("Y", "B")], names=["1", "2"])
pdf.columns = columns
psdf.columns = columns
self.assert_eq(psdf.nunique(), pdf.nunique())
self.assert_eq(psdf.nunique(dropna=False), pdf.nunique(dropna=False))
def test_sort_values(self):
pdf = pd.DataFrame(
{"a": [1, 2, 3, 4, 5, None, 7], "b": [7, 6, 5, 4, 3, 2, 1]}, index=np.random.rand(7)
)
psdf = ps.from_pandas(pdf)
self.assert_eq(psdf.sort_values("b"), pdf.sort_values("b"))
for ascending in [True, False]:
for na_position in ["first", "last"]:
self.assert_eq(
psdf.sort_values("a", ascending=ascending, na_position=na_position),
pdf.sort_values("a", ascending=ascending, na_position=na_position),
)
self.assert_eq(psdf.sort_values(["a", "b"]), pdf.sort_values(["a", "b"]))
self.assert_eq(
psdf.sort_values(["a", "b"], ascending=[False, True]),
pdf.sort_values(["a", "b"], ascending=[False, True]),
)
self.assertRaises(ValueError, lambda: psdf.sort_values(["b", "a"], ascending=[False]))
self.assert_eq(
psdf.sort_values(["a", "b"], na_position="first"),
pdf.sort_values(["a", "b"], na_position="first"),
)
self.assertRaises(ValueError, lambda: psdf.sort_values(["b", "a"], na_position="invalid"))
pserA = pdf.a
psserA = psdf.a
self.assert_eq(psdf.sort_values("b", inplace=True), pdf.sort_values("b", inplace=True))
self.assert_eq(psdf, pdf)
self.assert_eq(psserA, pserA)
# multi-index columns
pdf = pd.DataFrame(
{("X", 10): [1, 2, 3, 4, 5, None, 7], ("X", 20): [7, 6, 5, 4, 3, 2, 1]},
index=np.random.rand(7),
)
psdf = ps.from_pandas(pdf)
self.assert_eq(psdf.sort_values(("X", 20)), pdf.sort_values(("X", 20)))
self.assert_eq(
psdf.sort_values([("X", 20), ("X", 10)]), pdf.sort_values([("X", 20), ("X", 10)])
)
self.assertRaisesRegex(
ValueError,
"For a multi-index, the label must be a tuple with elements",
lambda: psdf.sort_values(["X"]),
)
# non-string names
pdf = pd.DataFrame(
{10: [1, 2, 3, 4, 5, None, 7], 20: [7, 6, 5, 4, 3, 2, 1]}, index=np.random.rand(7)
)
psdf = ps.from_pandas(pdf)
self.assert_eq(psdf.sort_values(20), pdf.sort_values(20))
self.assert_eq(psdf.sort_values([20, 10]), pdf.sort_values([20, 10]))
def test_sort_index(self):
pdf = pd.DataFrame(
{"A": [2, 1, np.nan], "B": [np.nan, 0, np.nan]}, index=["b", "a", np.nan]
)
psdf = ps.from_pandas(pdf)
# Assert invalid parameters
self.assertRaises(NotImplementedError, lambda: psdf.sort_index(axis=1))
self.assertRaises(NotImplementedError, lambda: psdf.sort_index(kind="mergesort"))
self.assertRaises(ValueError, lambda: psdf.sort_index(na_position="invalid"))
# Assert default behavior without parameters
self.assert_eq(psdf.sort_index(), pdf.sort_index())
# Assert sorting descending
self.assert_eq(psdf.sort_index(ascending=False), pdf.sort_index(ascending=False))
# Assert sorting NA indices first
self.assert_eq(psdf.sort_index(na_position="first"), pdf.sort_index(na_position="first"))
# Assert sorting descending and NA indices first
self.assert_eq(
psdf.sort_index(ascending=False, na_position="first"),
pdf.sort_index(ascending=False, na_position="first"),
)
# Assert sorting inplace
pserA = pdf.A
psserA = psdf.A
self.assertEqual(psdf.sort_index(inplace=True), pdf.sort_index(inplace=True))
self.assert_eq(psdf, pdf)
self.assert_eq(psserA, pserA)
# Assert multi-indices
pdf = pd.DataFrame(
{"A": range(4), "B": range(4)[::-1]}, index=[["b", "b", "a", "a"], [1, 0, 1, 0]]
)
psdf = ps.from_pandas(pdf)
self.assert_eq(psdf.sort_index(), pdf.sort_index())
self.assert_eq(psdf.sort_index(level=[1, 0]), pdf.sort_index(level=[1, 0]))
self.assert_eq(psdf.reset_index().sort_index(), pdf.reset_index().sort_index())
# Assert with multi-index columns
columns = pd.MultiIndex.from_tuples([("X", "A"), ("X", "B")])
pdf.columns = columns
psdf.columns = columns
self.assert_eq(psdf.sort_index(), pdf.sort_index())
def test_swaplevel(self):
# MultiIndex with two levels
arrays = [[1, 1, 2, 2], ["red", "blue", "red", "blue"]]
pidx = pd.MultiIndex.from_arrays(arrays, names=("number", "color"))
pdf = pd.DataFrame({"x1": ["a", "b", "c", "d"], "x2": ["a", "b", "c", "d"]}, index=pidx)
psdf = ps.from_pandas(pdf)
self.assert_eq(pdf.swaplevel(), psdf.swaplevel())
self.assert_eq(pdf.swaplevel(0, 1), psdf.swaplevel(0, 1))
self.assert_eq(pdf.swaplevel(1, 1), psdf.swaplevel(1, 1))
self.assert_eq(pdf.swaplevel("number", "color"), psdf.swaplevel("number", "color"))
# MultiIndex with more than two levels
arrays = [[1, 1, 2, 2], ["red", "blue", "red", "blue"], ["l", "m", "s", "xs"]]
pidx = pd.MultiIndex.from_arrays(arrays, names=("number", "color", "size"))
pdf = pd.DataFrame({"x1": ["a", "b", "c", "d"], "x2": ["a", "b", "c", "d"]}, index=pidx)
psdf = ps.from_pandas(pdf)
self.assert_eq(pdf.swaplevel(), psdf.swaplevel())
self.assert_eq(pdf.swaplevel(0, 1), psdf.swaplevel(0, 1))
self.assert_eq(pdf.swaplevel(0, 2), psdf.swaplevel(0, 2))
self.assert_eq(pdf.swaplevel(1, 2), psdf.swaplevel(1, 2))
self.assert_eq(pdf.swaplevel(1, 1), psdf.swaplevel(1, 1))
self.assert_eq(pdf.swaplevel(-1, -2), psdf.swaplevel(-1, -2))
self.assert_eq(pdf.swaplevel("number", "color"), psdf.swaplevel("number", "color"))
self.assert_eq(pdf.swaplevel("number", "size"), psdf.swaplevel("number", "size"))
self.assert_eq(pdf.swaplevel("color", "size"), psdf.swaplevel("color", "size"))
self.assert_eq(
pdf.swaplevel("color", "size", axis="index"),
psdf.swaplevel("color", "size", axis="index"),
)
self.assert_eq(
pdf.swaplevel("color", "size", axis=0), psdf.swaplevel("color", "size", axis=0)
)
pdf = pd.DataFrame(
{
"x1": ["a", "b", "c", "d"],
"x2": ["a", "b", "c", "d"],
"x3": ["a", "b", "c", "d"],
"x4": ["a", "b", "c", "d"],
}
)
pidx = pd.MultiIndex.from_arrays(arrays, names=("number", "color", "size"))
pdf.columns = pidx
psdf = ps.from_pandas(pdf)
self.assert_eq(pdf.swaplevel(axis=1), psdf.swaplevel(axis=1))
self.assert_eq(pdf.swaplevel(0, 1, axis=1), psdf.swaplevel(0, 1, axis=1))
self.assert_eq(pdf.swaplevel(0, 2, axis=1), psdf.swaplevel(0, 2, axis=1))
self.assert_eq(pdf.swaplevel(1, 2, axis=1), psdf.swaplevel(1, 2, axis=1))
self.assert_eq(pdf.swaplevel(1, 1, axis=1), psdf.swaplevel(1, 1, axis=1))
self.assert_eq(pdf.swaplevel(-1, -2, axis=1), psdf.swaplevel(-1, -2, axis=1))
self.assert_eq(
pdf.swaplevel("number", "color", axis=1), psdf.swaplevel("number", "color", axis=1)
)
self.assert_eq(
pdf.swaplevel("number", "size", axis=1), psdf.swaplevel("number", "size", axis=1)
)
self.assert_eq(
pdf.swaplevel("color", "size", axis=1), psdf.swaplevel("color", "size", axis=1)
)
self.assert_eq(
pdf.swaplevel("color", "size", axis="columns"),
psdf.swaplevel("color", "size", axis="columns"),
)
# Error conditions
self.assertRaises(AssertionError, lambda: ps.DataFrame([1, 2]).swaplevel())
self.assertRaises(IndexError, lambda: psdf.swaplevel(0, 9, axis=1))
self.assertRaises(KeyError, lambda: psdf.swaplevel("not_number", "color", axis=1))
self.assertRaises(ValueError, lambda: psdf.swaplevel(axis=2))
def test_swapaxes(self):
pdf = pd.DataFrame(
[[1, 2, 3], [4, 5, 6], [7, 8, 9]], index=["x", "y", "z"], columns=["a", "b", "c"]
)
psdf = ps.from_pandas(pdf)
self.assert_eq(psdf.swapaxes(0, 1), pdf.swapaxes(0, 1))
self.assert_eq(psdf.swapaxes(1, 0), pdf.swapaxes(1, 0))
self.assert_eq(psdf.swapaxes("index", "columns"), pdf.swapaxes("index", "columns"))
self.assert_eq(psdf.swapaxes("columns", "index"), pdf.swapaxes("columns", "index"))
self.assert_eq((psdf + 1).swapaxes(0, 1), (pdf + 1).swapaxes(0, 1))
self.assertRaises(AssertionError, lambda: psdf.swapaxes(0, 1, copy=False))
self.assertRaises(ValueError, lambda: psdf.swapaxes(0, -1))
def test_nlargest(self):
pdf = pd.DataFrame(
{"a": [1, 2, 3, 4, 5, None, 7], "b": [7, 6, 5, 4, 3, 2, 1]}, index=np.random.rand(7)
)
psdf = ps.from_pandas(pdf)
self.assert_eq(psdf.nlargest(n=5, columns="a"), pdf.nlargest(5, columns="a"))
self.assert_eq(psdf.nlargest(n=5, columns=["a", "b"]), pdf.nlargest(5, columns=["a", "b"]))
def test_nsmallest(self):
pdf = pd.DataFrame(
{"a": [1, 2, 3, 4, 5, None, 7], "b": [7, 6, 5, 4, 3, 2, 1]}, index=np.random.rand(7)
)
psdf = ps.from_pandas(pdf)
self.assert_eq(psdf.nsmallest(n=5, columns="a"), pdf.nsmallest(5, columns="a"))
self.assert_eq(
psdf.nsmallest(n=5, columns=["a", "b"]), pdf.nsmallest(5, columns=["a", "b"])
)
def test_xs(self):
d = {
"num_legs": [4, 4, 2, 2],
"num_wings": [0, 0, 2, 2],
"class": ["mammal", "mammal", "mammal", "bird"],
"animal": ["cat", "dog", "bat", "penguin"],
"locomotion": ["walks", "walks", "flies", "walks"],
}
pdf = pd.DataFrame(data=d)
pdf = pdf.set_index(["class", "animal", "locomotion"])
psdf = ps.from_pandas(pdf)
self.assert_eq(psdf.xs("mammal"), pdf.xs("mammal"))
self.assert_eq(psdf.xs(("mammal",)), pdf.xs(("mammal",)))
self.assert_eq(psdf.xs(("mammal", "dog", "walks")), pdf.xs(("mammal", "dog", "walks")))
self.assert_eq(
ps.concat([psdf, psdf]).xs(("mammal", "dog", "walks")),
pd.concat([pdf, pdf]).xs(("mammal", "dog", "walks")),
)
self.assert_eq(psdf.xs("cat", level=1), pdf.xs("cat", level=1))
self.assert_eq(psdf.xs("flies", level=2), pdf.xs("flies", level=2))
self.assert_eq(psdf.xs("mammal", level=-3), pdf.xs("mammal", level=-3))
msg = 'axis should be either 0 or "index" currently.'
with self.assertRaisesRegex(NotImplementedError, msg):
psdf.xs("num_wings", axis=1)
with self.assertRaises(KeyError):
psdf.xs(("mammal", "dog", "walk"))
msg = r"'Key length \(4\) exceeds index depth \(3\)'"
with self.assertRaisesRegex(KeyError, msg):
psdf.xs(("mammal", "dog", "walks", "foo"))
msg = "'key' should be a scalar value or tuple that contains scalar values"
with self.assertRaisesRegex(TypeError, msg):
psdf.xs(["mammal", "dog", "walks", "foo"])
self.assertRaises(IndexError, lambda: psdf.xs("foo", level=-4))
self.assertRaises(IndexError, lambda: psdf.xs("foo", level=3))
self.assertRaises(KeyError, lambda: psdf.xs(("dog", "walks"), level=1))
# non-string names
pdf = pd.DataFrame(data=d)
pdf = pdf.set_index(["class", "animal", "num_legs", "num_wings"])
psdf = ps.from_pandas(pdf)
self.assert_eq(psdf.xs(("mammal", "dog", 4)), pdf.xs(("mammal", "dog", 4)))
self.assert_eq(psdf.xs(2, level=2), pdf.xs(2, level=2))
self.assert_eq((psdf + "a").xs(("mammal", "dog", 4)), (pdf + "a").xs(("mammal", "dog", 4)))
self.assert_eq((psdf + "a").xs(2, level=2), (pdf + "a").xs(2, level=2))
def test_missing(self):
psdf = self.psdf
missing_functions = inspect.getmembers(_MissingPandasLikeDataFrame, inspect.isfunction)
unsupported_functions = [
name for (name, type_) in missing_functions if type_.__name__ == "unsupported_function"
]
for name in unsupported_functions:
with self.assertRaisesRegex(
PandasNotImplementedError,
"method.*DataFrame.*{}.*not implemented( yet\\.|\\. .+)".format(name),
):
getattr(psdf, name)()
deprecated_functions = [
name for (name, type_) in missing_functions if type_.__name__ == "deprecated_function"
]
for name in deprecated_functions:
with self.assertRaisesRegex(
PandasNotImplementedError, "method.*DataFrame.*{}.*is deprecated".format(name)
):
getattr(psdf, name)()
missing_properties = inspect.getmembers(
_MissingPandasLikeDataFrame, lambda o: isinstance(o, property)
)
unsupported_properties = [
name
for (name, type_) in missing_properties
if type_.fget.__name__ == "unsupported_property"
]
for name in unsupported_properties:
with self.assertRaisesRegex(
PandasNotImplementedError,
"property.*DataFrame.*{}.*not implemented( yet\\.|\\. .+)".format(name),
):
getattr(psdf, name)
deprecated_properties = [
name
for (name, type_) in missing_properties
if type_.fget.__name__ == "deprecated_property"
]
for name in deprecated_properties:
with self.assertRaisesRegex(
PandasNotImplementedError, "property.*DataFrame.*{}.*is deprecated".format(name)
):
getattr(psdf, name)
def test_to_numpy(self):
pdf = pd.DataFrame(
{
"a": [4, 2, 3, 4, 8, 6],
"b": [1, 2, 9, 4, 2, 4],
"c": ["one", "three", "six", "seven", "one", "5"],
},
index=np.random.rand(6),
)
psdf = ps.from_pandas(pdf)
self.assert_eq(psdf.to_numpy(), pdf.values)
def test_to_pandas(self):
pdf, psdf = self.df_pair
self.assert_eq(psdf.to_pandas(), pdf)
def test_isin(self):
pdf = pd.DataFrame(
{
"a": [4, 2, 3, 4, 8, 6],
"b": [1, 2, 9, 4, 2, 4],
"c": ["one", "three", "six", "seven", "one", "5"],
},
index=np.random.rand(6),
)
psdf = ps.from_pandas(pdf)
self.assert_eq(psdf.isin([4, "six"]), pdf.isin([4, "six"]))
# Seems like pandas has a bug when passing `np.array` as parameter
self.assert_eq(psdf.isin(np.array([4, "six"])), pdf.isin([4, "six"]))
self.assert_eq(
psdf.isin({"a": [2, 8], "c": ["three", "one"]}),
pdf.isin({"a": [2, 8], "c": ["three", "one"]}),
)
self.assert_eq(
psdf.isin({"a": np.array([2, 8]), "c": ["three", "one"]}),
pdf.isin({"a": np.array([2, 8]), "c": ["three", "one"]}),
)
msg = "'DataFrame' object has no attribute {'e'}"
with self.assertRaisesRegex(AttributeError, msg):
psdf.isin({"e": [5, 7], "a": [1, 6]})
msg = "DataFrame and Series are not supported"
with self.assertRaisesRegex(NotImplementedError, msg):
psdf.isin(pdf)
msg = "Values should be iterable, Series, DataFrame or dict."
with self.assertRaisesRegex(TypeError, msg):
psdf.isin(1)
pdf = pd.DataFrame(
{
"a": [4, 2, 3, 4, 8, 6],
"b": [1, None, 9, 4, None, 4],
"c": [None, 5, None, 3, 2, 1],
},
)
psdf = ps.from_pandas(pdf)
if LooseVersion(pd.__version__) >= LooseVersion("1.2"):
self.assert_eq(psdf.isin([4, 3, 1, 1, None]), pdf.isin([4, 3, 1, 1, None]))
else:
expected = pd.DataFrame(
{
"a": [True, False, True, True, False, False],
"b": [True, False, False, True, False, True],
"c": [False, False, False, True, False, True],
}
)
self.assert_eq(psdf.isin([4, 3, 1, 1, None]), expected)
if LooseVersion(pd.__version__) >= LooseVersion("1.2"):
self.assert_eq(
psdf.isin({"b": [4, 3, 1, 1, None]}), pdf.isin({"b": [4, 3, 1, 1, None]})
)
else:
expected = pd.DataFrame(
{
"a": [False, False, False, False, False, False],
"b": [True, False, False, True, False, True],
"c": [False, False, False, False, False, False],
}
)
self.assert_eq(psdf.isin({"b": [4, 3, 1, 1, None]}), expected)
def test_merge(self):
left_pdf = pd.DataFrame(
{
"lkey": ["foo", "bar", "baz", "foo", "bar", "l"],
"value": [1, 2, 3, 5, 6, 7],
"x": list("abcdef"),
},
columns=["lkey", "value", "x"],
)
right_pdf = pd.DataFrame(
{
"rkey": ["baz", "foo", "bar", "baz", "foo", "r"],
"value": [4, 5, 6, 7, 8, 9],
"y": list("efghij"),
},
columns=["rkey", "value", "y"],
)
right_ps = pd.Series(list("defghi"), name="x", index=[5, 6, 7, 8, 9, 10])
left_psdf = ps.from_pandas(left_pdf)
right_psdf = ps.from_pandas(right_pdf)
right_psser = ps.from_pandas(right_ps)
def check(op, right_psdf=right_psdf, right_pdf=right_pdf):
k_res = op(left_psdf, right_psdf)
k_res = k_res.to_pandas()
k_res = k_res.sort_values(by=list(k_res.columns))
k_res = k_res.reset_index(drop=True)
p_res = op(left_pdf, right_pdf)
p_res = p_res.sort_values(by=list(p_res.columns))
p_res = p_res.reset_index(drop=True)
self.assert_eq(k_res, p_res)
check(lambda left, right: left.merge(right))
check(lambda left, right: left.merge(right, on="value"))
check(lambda left, right: left.merge(right, on=("value",)))
check(lambda left, right: left.merge(right, left_on="lkey", right_on="rkey"))
check(lambda left, right: left.set_index("lkey").merge(right.set_index("rkey")))
check(
lambda left, right: left.set_index("lkey").merge(
right, left_index=True, right_on="rkey"
)
)
check(
lambda left, right: left.merge(
right.set_index("rkey"), left_on="lkey", right_index=True
)
)
check(
lambda left, right: left.set_index("lkey").merge(
right.set_index("rkey"), left_index=True, right_index=True
)
)
# MultiIndex
check(
lambda left, right: left.merge(
right, left_on=["lkey", "value"], right_on=["rkey", "value"]
)
)
check(
lambda left, right: left.set_index(["lkey", "value"]).merge(
right, left_index=True, right_on=["rkey", "value"]
)
)
check(
lambda left, right: left.merge(
right.set_index(["rkey", "value"]), left_on=["lkey", "value"], right_index=True
)
)
# TODO: when both left_index=True and right_index=True with multi-index
# check(lambda left, right: left.set_index(['lkey', 'value']).merge(
# right.set_index(['rkey', 'value']), left_index=True, right_index=True))
# join types
for how in ["inner", "left", "right", "outer"]:
check(lambda left, right: left.merge(right, on="value", how=how))
check(lambda left, right: left.merge(right, left_on="lkey", right_on="rkey", how=how))
# suffix
check(
lambda left, right: left.merge(
right, left_on="lkey", right_on="rkey", suffixes=["_left", "_right"]
)
)
# Test Series on the right
check(lambda left, right: left.merge(right), right_psser, right_ps)
check(
lambda left, right: left.merge(right, left_on="x", right_on="x"), right_psser, right_ps
)
check(
lambda left, right: left.set_index("x").merge(right, left_index=True, right_on="x"),
right_psser,
right_ps,
)
# Test join types with Series
for how in ["inner", "left", "right", "outer"]:
check(lambda left, right: left.merge(right, how=how), right_psser, right_ps)
check(
lambda left, right: left.merge(right, left_on="x", right_on="x", how=how),
right_psser,
right_ps,
)
# suffix with Series
check(
lambda left, right: left.merge(
right,
suffixes=["_left", "_right"],
how="outer",
left_index=True,
right_index=True,
),
right_psser,
right_ps,
)
# multi-index columns
left_columns = pd.MultiIndex.from_tuples([(10, "lkey"), (10, "value"), (20, "x")])
left_pdf.columns = left_columns
left_psdf.columns = left_columns
right_columns = pd.MultiIndex.from_tuples([(10, "rkey"), (10, "value"), (30, "y")])
right_pdf.columns = right_columns
right_psdf.columns = right_columns
check(lambda left, right: left.merge(right))
check(lambda left, right: left.merge(right, on=[(10, "value")]))
check(
lambda left, right: (left.set_index((10, "lkey")).merge(right.set_index((10, "rkey"))))
)
check(
lambda left, right: (
left.set_index((10, "lkey")).merge(
right.set_index((10, "rkey")), left_index=True, right_index=True
)
)
)
# TODO: when both left_index=True and right_index=True with multi-index columns
# check(lambda left, right: left.merge(right,
# left_on=[('a', 'lkey')], right_on=[('a', 'rkey')]))
# check(lambda left, right: (left.set_index(('a', 'lkey'))
# .merge(right, left_index=True, right_on=[('a', 'rkey')])))
# non-string names
left_pdf.columns = [10, 100, 1000]
left_psdf.columns = [10, 100, 1000]
right_pdf.columns = [20, 100, 2000]
right_psdf.columns = [20, 100, 2000]
check(lambda left, right: left.merge(right))
check(lambda left, right: left.merge(right, on=[100]))
check(lambda left, right: (left.set_index(10).merge(right.set_index(20))))
check(
lambda left, right: (
left.set_index(10).merge(right.set_index(20), left_index=True, right_index=True)
)
)
def test_merge_same_anchor(self):
pdf = pd.DataFrame(
{
"lkey": ["foo", "bar", "baz", "foo", "bar", "l"],
"rkey": ["baz", "foo", "bar", "baz", "foo", "r"],
"value": [1, 1, 3, 5, 6, 7],
"x": list("abcdef"),
"y": list("efghij"),
},
columns=["lkey", "rkey", "value", "x", "y"],
)
psdf = ps.from_pandas(pdf)
left_pdf = pdf[["lkey", "value", "x"]]
right_pdf = pdf[["rkey", "value", "y"]]
left_psdf = psdf[["lkey", "value", "x"]]
right_psdf = psdf[["rkey", "value", "y"]]
def check(op, right_psdf=right_psdf, right_pdf=right_pdf):
k_res = op(left_psdf, right_psdf)
k_res = k_res.to_pandas()
k_res = k_res.sort_values(by=list(k_res.columns))
k_res = k_res.reset_index(drop=True)
p_res = op(left_pdf, right_pdf)
p_res = p_res.sort_values(by=list(p_res.columns))
p_res = p_res.reset_index(drop=True)
self.assert_eq(k_res, p_res)
check(lambda left, right: left.merge(right))
check(lambda left, right: left.merge(right, on="value"))
check(lambda left, right: left.merge(right, left_on="lkey", right_on="rkey"))
check(lambda left, right: left.set_index("lkey").merge(right.set_index("rkey")))
check(
lambda left, right: left.set_index("lkey").merge(
right, left_index=True, right_on="rkey"
)
)
check(
lambda left, right: left.merge(
right.set_index("rkey"), left_on="lkey", right_index=True
)
)
check(
lambda left, right: left.set_index("lkey").merge(
right.set_index("rkey"), left_index=True, right_index=True
)
)
def test_merge_retains_indices(self):
left_pdf = pd.DataFrame({"A": [0, 1]})
right_pdf = pd.DataFrame({"B": [1, 2]}, index=[1, 2])
left_psdf = ps.from_pandas(left_pdf)
right_psdf = ps.from_pandas(right_pdf)
self.assert_eq(
left_psdf.merge(right_psdf, left_index=True, right_index=True),
left_pdf.merge(right_pdf, left_index=True, right_index=True),
)
self.assert_eq(
left_psdf.merge(right_psdf, left_on="A", right_index=True),
left_pdf.merge(right_pdf, left_on="A", right_index=True),
)
self.assert_eq(
left_psdf.merge(right_psdf, left_index=True, right_on="B"),
left_pdf.merge(right_pdf, left_index=True, right_on="B"),
)
self.assert_eq(
left_psdf.merge(right_psdf, left_on="A", right_on="B"),
left_pdf.merge(right_pdf, left_on="A", right_on="B"),
)
def test_merge_how_parameter(self):
left_pdf = pd.DataFrame({"A": [1, 2]})
right_pdf = pd.DataFrame({"B": ["x", "y"]}, index=[1, 2])
left_psdf = ps.from_pandas(left_pdf)
right_psdf = ps.from_pandas(right_pdf)
psdf = left_psdf.merge(right_psdf, left_index=True, right_index=True)
pdf = left_pdf.merge(right_pdf, left_index=True, right_index=True)
self.assert_eq(
psdf.sort_values(by=list(psdf.columns)).reset_index(drop=True),
pdf.sort_values(by=list(pdf.columns)).reset_index(drop=True),
)
psdf = left_psdf.merge(right_psdf, left_index=True, right_index=True, how="left")
pdf = left_pdf.merge(right_pdf, left_index=True, right_index=True, how="left")
self.assert_eq(
psdf.sort_values(by=list(psdf.columns)).reset_index(drop=True),
pdf.sort_values(by=list(pdf.columns)).reset_index(drop=True),
)
psdf = left_psdf.merge(right_psdf, left_index=True, right_index=True, how="right")
pdf = left_pdf.merge(right_pdf, left_index=True, right_index=True, how="right")
self.assert_eq(
psdf.sort_values(by=list(psdf.columns)).reset_index(drop=True),
pdf.sort_values(by=list(pdf.columns)).reset_index(drop=True),
)
psdf = left_psdf.merge(right_psdf, left_index=True, right_index=True, how="outer")
pdf = left_pdf.merge(right_pdf, left_index=True, right_index=True, how="outer")
self.assert_eq(
psdf.sort_values(by=list(psdf.columns)).reset_index(drop=True),
pdf.sort_values(by=list(pdf.columns)).reset_index(drop=True),
)
def test_merge_raises(self):
left = ps.DataFrame(
{"value": [1, 2, 3, 5, 6], "x": list("abcde")},
columns=["value", "x"],
index=["foo", "bar", "baz", "foo", "bar"],
)
right = ps.DataFrame(
{"value": [4, 5, 6, 7, 8], "y": list("fghij")},
columns=["value", "y"],
index=["baz", "foo", "bar", "baz", "foo"],
)
with self.assertRaisesRegex(ValueError, "No common columns to perform merge on"):
left[["x"]].merge(right[["y"]])
with self.assertRaisesRegex(ValueError, "not a combination of both"):
left.merge(right, on="value", left_on="x")
with self.assertRaisesRegex(ValueError, "Must pass right_on or right_index=True"):
left.merge(right, left_on="x")
with self.assertRaisesRegex(ValueError, "Must pass right_on or right_index=True"):
left.merge(right, left_index=True)
with self.assertRaisesRegex(ValueError, "Must pass left_on or left_index=True"):
left.merge(right, right_on="y")
with self.assertRaisesRegex(ValueError, "Must pass left_on or left_index=True"):
left.merge(right, right_index=True)
with self.assertRaisesRegex(
ValueError, "len\\(left_keys\\) must equal len\\(right_keys\\)"
):
left.merge(right, left_on="value", right_on=["value", "y"])
with self.assertRaisesRegex(
ValueError, "len\\(left_keys\\) must equal len\\(right_keys\\)"
):
left.merge(right, left_on=["value", "x"], right_on="value")
with self.assertRaisesRegex(ValueError, "['inner', 'left', 'right', 'full', 'outer']"):
left.merge(right, left_index=True, right_index=True, how="foo")
with self.assertRaisesRegex(KeyError, "id"):
left.merge(right, on="id")
def test_append(self):
pdf = pd.DataFrame([[1, 2], [3, 4]], columns=list("AB"))
psdf = ps.from_pandas(pdf)
other_pdf = pd.DataFrame([[3, 4], [5, 6]], columns=list("BC"), index=[2, 3])
other_psdf = ps.from_pandas(other_pdf)
self.assert_eq(psdf.append(psdf), pdf.append(pdf))
self.assert_eq(psdf.append(psdf, ignore_index=True), pdf.append(pdf, ignore_index=True))
# Assert DataFrames with non-matching columns
self.assert_eq(psdf.append(other_psdf), pdf.append(other_pdf))
# Assert appending a Series fails
msg = "DataFrames.append() does not support appending Series to DataFrames"
with self.assertRaises(TypeError, msg=msg):
psdf.append(psdf["A"])
# Assert using the sort parameter raises an exception
msg = "The 'sort' parameter is currently not supported"
with self.assertRaises(NotImplementedError, msg=msg):
psdf.append(psdf, sort=True)
# Assert using 'verify_integrity' only raises an exception for overlapping indices
self.assert_eq(
psdf.append(other_psdf, verify_integrity=True),
pdf.append(other_pdf, verify_integrity=True),
)
msg = "Indices have overlapping values"
with self.assertRaises(ValueError, msg=msg):
psdf.append(psdf, verify_integrity=True)
# Skip integrity verification when ignore_index=True
self.assert_eq(
psdf.append(psdf, ignore_index=True, verify_integrity=True),
pdf.append(pdf, ignore_index=True, verify_integrity=True),
)
# Assert appending multi-index DataFrames
multi_index_pdf = pd.DataFrame([[1, 2], [3, 4]], columns=list("AB"), index=[[2, 3], [4, 5]])
multi_index_psdf = ps.from_pandas(multi_index_pdf)
other_multi_index_pdf = pd.DataFrame(
[[5, 6], [7, 8]], columns=list("AB"), index=[[2, 3], [6, 7]]
)
other_multi_index_psdf = ps.from_pandas(other_multi_index_pdf)
self.assert_eq(
multi_index_psdf.append(multi_index_psdf), multi_index_pdf.append(multi_index_pdf)
)
# Assert DataFrames with non-matching columns
self.assert_eq(
multi_index_psdf.append(other_multi_index_psdf),
multi_index_pdf.append(other_multi_index_pdf),
)
# Assert using 'verify_integrity' only raises an exception for overlapping indices
self.assert_eq(
multi_index_psdf.append(other_multi_index_psdf, verify_integrity=True),
multi_index_pdf.append(other_multi_index_pdf, verify_integrity=True),
)
with self.assertRaises(ValueError, msg=msg):
multi_index_psdf.append(multi_index_psdf, verify_integrity=True)
# Skip integrity verification when ignore_index=True
self.assert_eq(
multi_index_psdf.append(multi_index_psdf, ignore_index=True, verify_integrity=True),
multi_index_pdf.append(multi_index_pdf, ignore_index=True, verify_integrity=True),
)
# Assert trying to append DataFrames with different index levels
msg = "Both DataFrames have to have the same number of index levels"
with self.assertRaises(ValueError, msg=msg):
psdf.append(multi_index_psdf)
# Skip index level check when ignore_index=True
self.assert_eq(
psdf.append(multi_index_psdf, ignore_index=True),
pdf.append(multi_index_pdf, ignore_index=True),
)
columns = pd.MultiIndex.from_tuples([("A", "X"), ("A", "Y")])
pdf.columns = columns
psdf.columns = columns
self.assert_eq(psdf.append(psdf), pdf.append(pdf))
def test_clip(self):
pdf = pd.DataFrame(
{"A": [0, 2, 4], "B": [4, 2, 0], "X": [-1, 10, 0]}, index=np.random.rand(3)
)
psdf = ps.from_pandas(pdf)
# Assert list-like values are not accepted for 'lower' and 'upper'
msg = "List-like value are not supported for 'lower' and 'upper' at the moment"
with self.assertRaises(TypeError, msg=msg):
psdf.clip(lower=[1])
with self.assertRaises(TypeError, msg=msg):
psdf.clip(upper=[1])
# Assert no lower or upper
self.assert_eq(psdf.clip(), pdf.clip())
# Assert lower only
self.assert_eq(psdf.clip(1), pdf.clip(1))
# Assert upper only
self.assert_eq(psdf.clip(upper=3), pdf.clip(upper=3))
# Assert lower and upper
self.assert_eq(psdf.clip(1, 3), pdf.clip(1, 3))
pdf["clip"] = pdf.A.clip(lower=1, upper=3)
psdf["clip"] = psdf.A.clip(lower=1, upper=3)
self.assert_eq(psdf, pdf)
# Assert behavior on string values
str_psdf = ps.DataFrame({"A": ["a", "b", "c"]}, index=np.random.rand(3))
self.assert_eq(str_psdf.clip(1, 3), str_psdf)
def test_binary_operators(self):
pdf = pd.DataFrame(
{"A": [0, 2, 4], "B": [4, 2, 0], "X": [-1, 10, 0]}, index=np.random.rand(3)
)
psdf = ps.from_pandas(pdf)
self.assert_eq(psdf + psdf.copy(), pdf + pdf.copy())
self.assert_eq(psdf + psdf.loc[:, ["A", "B"]], pdf + pdf.loc[:, ["A", "B"]])
self.assert_eq(psdf.loc[:, ["A", "B"]] + psdf, pdf.loc[:, ["A", "B"]] + pdf)
self.assertRaisesRegex(
ValueError,
"it comes from a different dataframe",
lambda: ps.range(10).add(ps.range(10)),
)
self.assertRaisesRegex(
TypeError,
"add with a sequence is currently not supported",
lambda: ps.range(10).add(ps.range(10).id),
)
psdf_other = psdf.copy()
psdf_other.columns = pd.MultiIndex.from_tuples([("A", "Z"), ("B", "X"), ("C", "C")])
self.assertRaisesRegex(
ValueError,
"cannot join with no overlapping index names",
lambda: psdf.add(psdf_other),
)
def test_binary_operator_add(self):
# Positive
pdf = pd.DataFrame({"a": ["x"], "b": ["y"], "c": [1], "d": [2]})
psdf = ps.from_pandas(pdf)
self.assert_eq(psdf["a"] + psdf["b"], pdf["a"] + pdf["b"])
self.assert_eq(psdf["c"] + psdf["d"], pdf["c"] + pdf["d"])
# Negative
ks_err_msg = "Addition can not be applied to given types"
self.assertRaisesRegex(TypeError, ks_err_msg, lambda: psdf["a"] + psdf["c"])
self.assertRaisesRegex(TypeError, ks_err_msg, lambda: psdf["c"] + psdf["a"])
self.assertRaisesRegex(TypeError, ks_err_msg, lambda: psdf["c"] + "literal")
self.assertRaisesRegex(TypeError, ks_err_msg, lambda: "literal" + psdf["c"])
self.assertRaisesRegex(TypeError, ks_err_msg, lambda: 1 + psdf["a"])
self.assertRaisesRegex(TypeError, ks_err_msg, lambda: psdf["a"] + 1)
def test_binary_operator_sub(self):
# Positive
pdf = pd.DataFrame({"a": [2], "b": [1]})
psdf = ps.from_pandas(pdf)
self.assert_eq(psdf["a"] - psdf["b"], pdf["a"] - pdf["b"])
# Negative
psdf = ps.DataFrame({"a": ["x"], "b": [1]})
ks_err_msg = "Subtraction can not be applied to given types"
self.assertRaisesRegex(TypeError, ks_err_msg, lambda: psdf["b"] - psdf["a"])
self.assertRaisesRegex(TypeError, ks_err_msg, lambda: psdf["b"] - "literal")
self.assertRaisesRegex(TypeError, ks_err_msg, lambda: "literal" - psdf["b"])
ks_err_msg = "Subtraction can not be applied to strings"
self.assertRaisesRegex(TypeError, ks_err_msg, lambda: psdf["a"] - psdf["b"])
self.assertRaisesRegex(TypeError, ks_err_msg, lambda: 1 - psdf["a"])
self.assertRaisesRegex(TypeError, ks_err_msg, lambda: psdf["a"] - 1)
psdf = ps.DataFrame({"a": ["x"], "b": ["y"]})
self.assertRaisesRegex(TypeError, ks_err_msg, lambda: psdf["a"] - psdf["b"])
def test_binary_operator_truediv(self):
# Positive
pdf = pd.DataFrame({"a": [3], "b": [2]})
psdf = ps.from_pandas(pdf)
self.assert_eq(psdf["a"] / psdf["b"], pdf["a"] / pdf["b"])
# Negative
psdf = ps.DataFrame({"a": ["x"], "b": [1]})
ks_err_msg = "True division can not be applied to given types"
self.assertRaisesRegex(TypeError, ks_err_msg, lambda: psdf["b"] / psdf["a"])
self.assertRaisesRegex(TypeError, ks_err_msg, lambda: psdf["b"] / "literal")
self.assertRaisesRegex(TypeError, ks_err_msg, lambda: "literal" / psdf["b"])
ks_err_msg = "True division can not be applied to strings"
self.assertRaisesRegex(TypeError, ks_err_msg, lambda: psdf["a"] / psdf["b"])
self.assertRaisesRegex(TypeError, ks_err_msg, lambda: 1 / psdf["a"])
def test_binary_operator_floordiv(self):
psdf = ps.DataFrame({"a": ["x"], "b": [1]})
ks_err_msg = "Floor division can not be applied to strings"
self.assertRaisesRegex(TypeError, ks_err_msg, lambda: psdf["a"] // psdf["b"])
self.assertRaisesRegex(TypeError, ks_err_msg, lambda: 1 // psdf["a"])
ks_err_msg = "Floor division can not be applied to given types"
self.assertRaisesRegex(TypeError, ks_err_msg, lambda: psdf["b"] // psdf["a"])
self.assertRaisesRegex(TypeError, ks_err_msg, lambda: psdf["b"] // "literal")
self.assertRaisesRegex(TypeError, ks_err_msg, lambda: "literal" // psdf["b"])
def test_binary_operator_mod(self):
# Positive
pdf = pd.DataFrame({"a": [3], "b": [2]})
psdf = ps.from_pandas(pdf)
self.assert_eq(psdf["a"] % psdf["b"], pdf["a"] % pdf["b"])
# Negative
psdf = ps.DataFrame({"a": ["x"], "b": [1]})
ks_err_msg = "Modulo can not be applied to given types"
self.assertRaisesRegex(TypeError, ks_err_msg, lambda: psdf["b"] % psdf["a"])
self.assertRaisesRegex(TypeError, ks_err_msg, lambda: psdf["b"] % "literal")
ks_err_msg = "Modulo can not be applied to strings"
self.assertRaisesRegex(TypeError, ks_err_msg, lambda: psdf["a"] % psdf["b"])
self.assertRaisesRegex(TypeError, ks_err_msg, lambda: 1 % psdf["a"])
def test_binary_operator_multiply(self):
# Positive
pdf = pd.DataFrame({"a": ["x", "y"], "b": [1, 2], "c": [3, 4]})
psdf = ps.from_pandas(pdf)
self.assert_eq(psdf["b"] * psdf["c"], pdf["b"] * pdf["c"])
self.assert_eq(psdf["c"] * psdf["b"], pdf["c"] * pdf["b"])
self.assert_eq(psdf["a"] * psdf["b"], pdf["a"] * pdf["b"])
self.assert_eq(psdf["b"] * psdf["a"], pdf["b"] * pdf["a"])
self.assert_eq(psdf["a"] * 2, pdf["a"] * 2)
self.assert_eq(psdf["b"] * 2, pdf["b"] * 2)
self.assert_eq(2 * psdf["a"], 2 * pdf["a"])
self.assert_eq(2 * psdf["b"], 2 * pdf["b"])
# Negative
psdf = ps.DataFrame({"a": ["x"], "b": [2]})
ks_err_msg = "Multiplication can not be applied to given types"
self.assertRaisesRegex(TypeError, ks_err_msg, lambda: psdf["b"] * "literal")
self.assertRaisesRegex(TypeError, ks_err_msg, lambda: "literal" * psdf["b"])
self.assertRaisesRegex(TypeError, ks_err_msg, lambda: psdf["a"] * "literal")
self.assertRaisesRegex(TypeError, ks_err_msg, lambda: psdf["a"] * psdf["a"])
self.assertRaisesRegex(TypeError, ks_err_msg, lambda: psdf["a"] * 0.1)
self.assertRaisesRegex(TypeError, ks_err_msg, lambda: 0.1 * psdf["a"])
self.assertRaisesRegex(TypeError, ks_err_msg, lambda: "literal" * psdf["a"])
def test_sample(self):
pdf = pd.DataFrame({"A": [0, 2, 4]})
psdf = ps.from_pandas(pdf)
# Make sure the tests run, but we can't check the result because they are non-deterministic.
psdf.sample(frac=0.1)
psdf.sample(frac=0.2, replace=True)
psdf.sample(frac=0.2, random_state=5)
psdf["A"].sample(frac=0.2)
psdf["A"].sample(frac=0.2, replace=True)
psdf["A"].sample(frac=0.2, random_state=5)
with self.assertRaises(ValueError):
psdf.sample()
with self.assertRaises(NotImplementedError):
psdf.sample(n=1)
def test_add_prefix(self):
pdf = pd.DataFrame({"A": [1, 2, 3, 4], "B": [3, 4, 5, 6]}, index=np.random.rand(4))
psdf = ps.from_pandas(pdf)
self.assert_eq(pdf.add_prefix("col_"), psdf.add_prefix("col_"))
columns = pd.MultiIndex.from_tuples([("X", "A"), ("X", "B")])
pdf.columns = columns
psdf.columns = columns
self.assert_eq(pdf.add_prefix("col_"), psdf.add_prefix("col_"))
def test_add_suffix(self):
pdf = pd.DataFrame({"A": [1, 2, 3, 4], "B": [3, 4, 5, 6]}, index=np.random.rand(4))
psdf = ps.from_pandas(pdf)
self.assert_eq(pdf.add_suffix("first_series"), psdf.add_suffix("first_series"))
columns = pd.MultiIndex.from_tuples([("X", "A"), ("X", "B")])
pdf.columns = columns
psdf.columns = columns
self.assert_eq(pdf.add_suffix("first_series"), psdf.add_suffix("first_series"))
def test_join(self):
# check basic function
pdf1 = pd.DataFrame(
{"key": ["K0", "K1", "K2", "K3"], "A": ["A0", "A1", "A2", "A3"]}, columns=["key", "A"]
)
pdf2 = pd.DataFrame(
{"key": ["K0", "K1", "K2"], "B": ["B0", "B1", "B2"]}, columns=["key", "B"]
)
psdf1 = ps.from_pandas(pdf1)
psdf2 = ps.from_pandas(pdf2)
join_pdf = pdf1.join(pdf2, lsuffix="_left", rsuffix="_right")
join_pdf.sort_values(by=list(join_pdf.columns), inplace=True)
join_psdf = psdf1.join(psdf2, lsuffix="_left", rsuffix="_right")
join_psdf.sort_values(by=list(join_psdf.columns), inplace=True)
self.assert_eq(join_pdf, join_psdf)
# join with duplicated columns in Series
with self.assertRaisesRegex(ValueError, "columns overlap but no suffix specified"):
ks1 = ps.Series(["A1", "A5"], index=[1, 2], name="A")
psdf1.join(ks1, how="outer")
# join with duplicated columns in DataFrame
with self.assertRaisesRegex(ValueError, "columns overlap but no suffix specified"):
psdf1.join(psdf2, how="outer")
# check `on` parameter
join_pdf = pdf1.join(pdf2.set_index("key"), on="key", lsuffix="_left", rsuffix="_right")
join_pdf.sort_values(by=list(join_pdf.columns), inplace=True)
join_psdf = psdf1.join(psdf2.set_index("key"), on="key", lsuffix="_left", rsuffix="_right")
join_psdf.sort_values(by=list(join_psdf.columns), inplace=True)
self.assert_eq(join_pdf.reset_index(drop=True), join_psdf.reset_index(drop=True))
join_pdf = pdf1.set_index("key").join(
pdf2.set_index("key"), on="key", lsuffix="_left", rsuffix="_right"
)
join_pdf.sort_values(by=list(join_pdf.columns), inplace=True)
join_psdf = psdf1.set_index("key").join(
psdf2.set_index("key"), on="key", lsuffix="_left", rsuffix="_right"
)
join_psdf.sort_values(by=list(join_psdf.columns), inplace=True)
self.assert_eq(join_pdf.reset_index(drop=True), join_psdf.reset_index(drop=True))
# multi-index columns
columns1 = pd.MultiIndex.from_tuples([("x", "key"), ("Y", "A")])
columns2 = pd.MultiIndex.from_tuples([("x", "key"), ("Y", "B")])
pdf1.columns = columns1
pdf2.columns = columns2
psdf1.columns = columns1
psdf2.columns = columns2
join_pdf = pdf1.join(pdf2, lsuffix="_left", rsuffix="_right")
join_pdf.sort_values(by=list(join_pdf.columns), inplace=True)
join_psdf = psdf1.join(psdf2, lsuffix="_left", rsuffix="_right")
join_psdf.sort_values(by=list(join_psdf.columns), inplace=True)
self.assert_eq(join_pdf, join_psdf)
# check `on` parameter
join_pdf = pdf1.join(
pdf2.set_index(("x", "key")), on=[("x", "key")], lsuffix="_left", rsuffix="_right"
)
join_pdf.sort_values(by=list(join_pdf.columns), inplace=True)
join_psdf = psdf1.join(
psdf2.set_index(("x", "key")), on=[("x", "key")], lsuffix="_left", rsuffix="_right"
)
join_psdf.sort_values(by=list(join_psdf.columns), inplace=True)
self.assert_eq(join_pdf.reset_index(drop=True), join_psdf.reset_index(drop=True))
join_pdf = pdf1.set_index(("x", "key")).join(
pdf2.set_index(("x", "key")), on=[("x", "key")], lsuffix="_left", rsuffix="_right"
)
join_pdf.sort_values(by=list(join_pdf.columns), inplace=True)
join_psdf = psdf1.set_index(("x", "key")).join(
psdf2.set_index(("x", "key")), on=[("x", "key")], lsuffix="_left", rsuffix="_right"
)
join_psdf.sort_values(by=list(join_psdf.columns), inplace=True)
self.assert_eq(join_pdf.reset_index(drop=True), join_psdf.reset_index(drop=True))
# multi-index
midx1 = pd.MultiIndex.from_tuples(
[("w", "a"), ("x", "b"), ("y", "c"), ("z", "d")], names=["index1", "index2"]
)
midx2 = pd.MultiIndex.from_tuples(
[("w", "a"), ("x", "b"), ("y", "c")], names=["index1", "index2"]
)
pdf1.index = midx1
pdf2.index = midx2
psdf1 = ps.from_pandas(pdf1)
psdf2 = ps.from_pandas(pdf2)
join_pdf = pdf1.join(pdf2, on=["index1", "index2"], rsuffix="_right")
join_pdf.sort_values(by=list(join_pdf.columns), inplace=True)
join_psdf = psdf1.join(psdf2, on=["index1", "index2"], rsuffix="_right")
join_psdf.sort_values(by=list(join_psdf.columns), inplace=True)
self.assert_eq(join_pdf, join_psdf)
with self.assertRaisesRegex(
ValueError, r'len\(left_on\) must equal the number of levels in the index of "right"'
):
psdf1.join(psdf2, on=["index1"], rsuffix="_right")
def test_replace(self):
pdf = pd.DataFrame(
{
"name": ["Ironman", "Captain America", "Thor", "Hulk"],
"weapon": ["Mark-45", "Shield", "Mjolnir", "Smash"],
},
index=np.random.rand(4),
)
psdf = ps.from_pandas(pdf)
with self.assertRaisesRegex(
NotImplementedError, "replace currently works only for method='pad"
):
psdf.replace(method="bfill")
with self.assertRaisesRegex(
NotImplementedError, "replace currently works only when limit=None"
):
psdf.replace(limit=10)
with self.assertRaisesRegex(
NotImplementedError, "replace currently doesn't supports regex"
):
psdf.replace(regex="")
with self.assertRaisesRegex(ValueError, "Length of to_replace and value must be same"):
psdf.replace(to_replace=["Ironman"], value=["Spiderman", "Doctor Strange"])
with self.assertRaisesRegex(TypeError, "Unsupported type function"):
psdf.replace("Ironman", lambda x: "Spiderman")
with self.assertRaisesRegex(TypeError, "Unsupported type function"):
psdf.replace(lambda x: "Ironman", "Spiderman")
self.assert_eq(psdf.replace("Ironman", "Spiderman"), pdf.replace("Ironman", "Spiderman"))
self.assert_eq(
psdf.replace(["Ironman", "Captain America"], ["Rescue", "Hawkeye"]),
pdf.replace(["Ironman", "Captain America"], ["Rescue", "Hawkeye"]),
)
self.assert_eq(
psdf.replace(("Ironman", "Captain America"), ("Rescue", "Hawkeye")),
pdf.replace(("Ironman", "Captain America"), ("Rescue", "Hawkeye")),
)
# inplace
pser = pdf.name
psser = psdf.name
pdf.replace("Ironman", "Spiderman", inplace=True)
psdf.replace("Ironman", "Spiderman", inplace=True)
self.assert_eq(psdf, pdf)
self.assert_eq(psser, pser)
pdf = pd.DataFrame(
{"A": [0, 1, 2, 3, np.nan], "B": [5, 6, 7, 8, np.nan], "C": ["a", "b", "c", "d", None]},
index=np.random.rand(5),
)
psdf = ps.from_pandas(pdf)
self.assert_eq(psdf.replace([0, 1, 2, 3, 5, 6], 4), pdf.replace([0, 1, 2, 3, 5, 6], 4))
self.assert_eq(
psdf.replace([0, 1, 2, 3, 5, 6], [6, 5, 4, 3, 2, 1]),
pdf.replace([0, 1, 2, 3, 5, 6], [6, 5, 4, 3, 2, 1]),
)
self.assert_eq(psdf.replace({0: 10, 1: 100, 7: 200}), pdf.replace({0: 10, 1: 100, 7: 200}))
self.assert_eq(
psdf.replace({"A": [0, np.nan], "B": [5, np.nan]}, 100),
pdf.replace({"A": [0, np.nan], "B": [5, np.nan]}, 100),
)
self.assert_eq(
psdf.replace({"A": {0: 100, 4: 400, np.nan: 700}}),
pdf.replace({"A": {0: 100, 4: 400, np.nan: 700}}),
)
self.assert_eq(
psdf.replace({"X": {0: 100, 4: 400, np.nan: 700}}),
pdf.replace({"X": {0: 100, 4: 400, np.nan: 700}}),
)
self.assert_eq(psdf.replace({"C": ["a", None]}, "e"), pdf.replace({"C": ["a", None]}, "e"))
# multi-index columns
columns = pd.MultiIndex.from_tuples([("X", "A"), ("X", "B"), ("Y", "C")])
pdf.columns = columns
psdf.columns = columns
self.assert_eq(psdf.replace([0, 1, 2, 3, 5, 6], 4), pdf.replace([0, 1, 2, 3, 5, 6], 4))
self.assert_eq(
psdf.replace([0, 1, 2, 3, 5, 6], [6, 5, 4, 3, 2, 1]),
pdf.replace([0, 1, 2, 3, 5, 6], [6, 5, 4, 3, 2, 1]),
)
self.assert_eq(psdf.replace({0: 10, 1: 100, 7: 200}), pdf.replace({0: 10, 1: 100, 7: 200}))
self.assert_eq(
psdf.replace({("X", "A"): [0, np.nan], ("X", "B"): 5}, 100),
pdf.replace({("X", "A"): [0, np.nan], ("X", "B"): 5}, 100),
)
self.assert_eq(
psdf.replace({("X", "A"): {0: 100, 4: 400, np.nan: 700}}),
pdf.replace({("X", "A"): {0: 100, 4: 400, np.nan: 700}}),
)
self.assert_eq(
psdf.replace({("X", "B"): {0: 100, 4: 400, np.nan: 700}}),
pdf.replace({("X", "B"): {0: 100, 4: 400, np.nan: 700}}),
)
self.assert_eq(
psdf.replace({("Y", "C"): ["a", None]}, "e"),
pdf.replace({("Y", "C"): ["a", None]}, "e"),
)
def test_update(self):
# check base function
def get_data(left_columns=None, right_columns=None):
left_pdf = pd.DataFrame(
{"A": ["1", "2", "3", "4"], "B": ["100", "200", np.nan, np.nan]}, columns=["A", "B"]
)
right_pdf = pd.DataFrame(
{"B": ["x", np.nan, "y", np.nan], "C": ["100", "200", "300", "400"]},
columns=["B", "C"],
)
left_psdf = ps.DataFrame(
{"A": ["1", "2", "3", "4"], "B": ["100", "200", None, None]}, columns=["A", "B"]
)
right_psdf = ps.DataFrame(
{"B": ["x", None, "y", None], "C": ["100", "200", "300", "400"]}, columns=["B", "C"]
)
if left_columns is not None:
left_pdf.columns = left_columns
left_psdf.columns = left_columns
if right_columns is not None:
right_pdf.columns = right_columns
right_psdf.columns = right_columns
return left_psdf, left_pdf, right_psdf, right_pdf
left_psdf, left_pdf, right_psdf, right_pdf = get_data()
pser = left_pdf.B
psser = left_psdf.B
left_pdf.update(right_pdf)
left_psdf.update(right_psdf)
self.assert_eq(left_pdf.sort_values(by=["A", "B"]), left_psdf.sort_values(by=["A", "B"]))
self.assert_eq(psser.sort_index(), pser.sort_index())
left_psdf, left_pdf, right_psdf, right_pdf = get_data()
left_pdf.update(right_pdf, overwrite=False)
left_psdf.update(right_psdf, overwrite=False)
self.assert_eq(left_pdf.sort_values(by=["A", "B"]), left_psdf.sort_values(by=["A", "B"]))
with self.assertRaises(NotImplementedError):
left_psdf.update(right_psdf, join="right")
# multi-index columns
left_columns = pd.MultiIndex.from_tuples([("X", "A"), ("X", "B")])
right_columns = pd.MultiIndex.from_tuples([("X", "B"), ("Y", "C")])
left_psdf, left_pdf, right_psdf, right_pdf = get_data(
left_columns=left_columns, right_columns=right_columns
)
left_pdf.update(right_pdf)
left_psdf.update(right_psdf)
self.assert_eq(
left_pdf.sort_values(by=[("X", "A"), ("X", "B")]),
left_psdf.sort_values(by=[("X", "A"), ("X", "B")]),
)
left_psdf, left_pdf, right_psdf, right_pdf = get_data(
left_columns=left_columns, right_columns=right_columns
)
left_pdf.update(right_pdf, overwrite=False)
left_psdf.update(right_psdf, overwrite=False)
self.assert_eq(
left_pdf.sort_values(by=[("X", "A"), ("X", "B")]),
left_psdf.sort_values(by=[("X", "A"), ("X", "B")]),
)
right_columns = pd.MultiIndex.from_tuples([("Y", "B"), ("Y", "C")])
left_psdf, left_pdf, right_psdf, right_pdf = get_data(
left_columns=left_columns, right_columns=right_columns
)
left_pdf.update(right_pdf)
left_psdf.update(right_psdf)
self.assert_eq(
left_pdf.sort_values(by=[("X", "A"), ("X", "B")]),
left_psdf.sort_values(by=[("X", "A"), ("X", "B")]),
)
def test_pivot_table_dtypes(self):
pdf = pd.DataFrame(
{
"a": [4, 2, 3, 4, 8, 6],
"b": [1, 2, 2, 4, 2, 4],
"e": [1, 2, 2, 4, 2, 4],
"c": [1, 2, 9, 4, 7, 4],
},
index=np.random.rand(6),
)
psdf = ps.from_pandas(pdf)
# Skip columns comparison by reset_index
res_df = psdf.pivot_table(
index=["c"], columns="a", values=["b"], aggfunc={"b": "mean"}
).dtypes.reset_index(drop=True)
exp_df = pdf.pivot_table(
index=["c"], columns="a", values=["b"], aggfunc={"b": "mean"}
).dtypes.reset_index(drop=True)
self.assert_eq(res_df, exp_df)
# Results don't have the same column's name
# Todo: self.assert_eq(psdf.pivot_table(columns="a", values="b").dtypes,
# pdf.pivot_table(columns="a", values="b").dtypes)
# Todo: self.assert_eq(psdf.pivot_table(index=['c'], columns="a", values="b").dtypes,
# pdf.pivot_table(index=['c'], columns="a", values="b").dtypes)
# Todo: self.assert_eq(psdf.pivot_table(index=['e', 'c'], columns="a", values="b").dtypes,
# pdf.pivot_table(index=['e', 'c'], columns="a", values="b").dtypes)
# Todo: self.assert_eq(psdf.pivot_table(index=['e', 'c'],
# columns="a", values="b", fill_value=999).dtypes, pdf.pivot_table(index=['e', 'c'],
# columns="a", values="b", fill_value=999).dtypes)
def test_pivot_table(self):
pdf = pd.DataFrame(
{
"a": [4, 2, 3, 4, 8, 6],
"b": [1, 2, 2, 4, 2, 4],
"e": [10, 20, 20, 40, 20, 40],
"c": [1, 2, 9, 4, 7, 4],
"d": [-1, -2, -3, -4, -5, -6],
},
index=np.random.rand(6),
)
psdf = ps.from_pandas(pdf)
# Checking if both DataFrames have the same results
self.assert_eq(
psdf.pivot_table(columns="a", values="b").sort_index(),
pdf.pivot_table(columns="a", values="b").sort_index(),
almost=True,
)
self.assert_eq(
psdf.pivot_table(index=["c"], columns="a", values="b").sort_index(),
pdf.pivot_table(index=["c"], columns="a", values="b").sort_index(),
almost=True,
)
self.assert_eq(
psdf.pivot_table(index=["c"], columns="a", values="b", aggfunc="sum").sort_index(),
pdf.pivot_table(index=["c"], columns="a", values="b", aggfunc="sum").sort_index(),
almost=True,
)
self.assert_eq(
psdf.pivot_table(index=["c"], columns="a", values=["b"], aggfunc="sum").sort_index(),
pdf.pivot_table(index=["c"], columns="a", values=["b"], aggfunc="sum").sort_index(),
almost=True,
)
self.assert_eq(
psdf.pivot_table(
index=["c"], columns="a", values=["b", "e"], aggfunc="sum"
).sort_index(),
pdf.pivot_table(
index=["c"], columns="a", values=["b", "e"], aggfunc="sum"
).sort_index(),
almost=True,
)
self.assert_eq(
psdf.pivot_table(
index=["c"], columns="a", values=["b", "e", "d"], aggfunc="sum"
).sort_index(),
pdf.pivot_table(
index=["c"], columns="a", values=["b", "e", "d"], aggfunc="sum"
).sort_index(),
almost=True,
)
self.assert_eq(
psdf.pivot_table(
index=["c"], columns="a", values=["b", "e"], aggfunc={"b": "mean", "e": "sum"}
).sort_index(),
pdf.pivot_table(
index=["c"], columns="a", values=["b", "e"], aggfunc={"b": "mean", "e": "sum"}
).sort_index(),
almost=True,
)
self.assert_eq(
psdf.pivot_table(index=["e", "c"], columns="a", values="b").sort_index(),
pdf.pivot_table(index=["e", "c"], columns="a", values="b").sort_index(),
almost=True,
)
self.assert_eq(
psdf.pivot_table(
index=["e", "c"], columns="a", values="b", fill_value=999
).sort_index(),
pdf.pivot_table(index=["e", "c"], columns="a", values="b", fill_value=999).sort_index(),
almost=True,
)
# multi-index columns
columns = pd.MultiIndex.from_tuples(
[("x", "a"), ("x", "b"), ("y", "e"), ("z", "c"), ("w", "d")]
)
pdf.columns = columns
psdf.columns = columns
self.assert_eq(
psdf.pivot_table(columns=("x", "a"), values=("x", "b")).sort_index(),
pdf.pivot_table(columns=[("x", "a")], values=[("x", "b")]).sort_index(),
almost=True,
)
self.assert_eq(
psdf.pivot_table(
index=[("z", "c")], columns=("x", "a"), values=[("x", "b")]
).sort_index(),
pdf.pivot_table(
index=[("z", "c")], columns=[("x", "a")], values=[("x", "b")]
).sort_index(),
almost=True,
)
self.assert_eq(
psdf.pivot_table(
index=[("z", "c")], columns=("x", "a"), values=[("x", "b"), ("y", "e")]
).sort_index(),
pdf.pivot_table(
index=[("z", "c")], columns=[("x", "a")], values=[("x", "b"), ("y", "e")]
).sort_index(),
almost=True,
)
self.assert_eq(
psdf.pivot_table(
index=[("z", "c")], columns=("x", "a"), values=[("x", "b"), ("y", "e"), ("w", "d")]
).sort_index(),
pdf.pivot_table(
index=[("z", "c")],
columns=[("x", "a")],
values=[("x", "b"), ("y", "e"), ("w", "d")],
).sort_index(),
almost=True,
)
self.assert_eq(
psdf.pivot_table(
index=[("z", "c")],
columns=("x", "a"),
values=[("x", "b"), ("y", "e")],
aggfunc={("x", "b"): "mean", ("y", "e"): "sum"},
).sort_index(),
pdf.pivot_table(
index=[("z", "c")],
columns=[("x", "a")],
values=[("x", "b"), ("y", "e")],
aggfunc={("x", "b"): "mean", ("y", "e"): "sum"},
).sort_index(),
almost=True,
)
def test_pivot_table_and_index(self):
# https://github.com/databricks/koalas/issues/805
pdf = pd.DataFrame(
{
"A": ["foo", "foo", "foo", "foo", "foo", "bar", "bar", "bar", "bar"],
"B": ["one", "one", "one", "two", "two", "one", "one", "two", "two"],
"C": [
"small",
"large",
"large",
"small",
"small",
"large",
"small",
"small",
"large",
],
"D": [1, 2, 2, 3, 3, 4, 5, 6, 7],
"E": [2, 4, 5, 5, 6, 6, 8, 9, 9],
},
columns=["A", "B", "C", "D", "E"],
index=np.random.rand(9),
)
psdf = ps.from_pandas(pdf)
ptable = pdf.pivot_table(
values="D", index=["A", "B"], columns="C", aggfunc="sum", fill_value=0
).sort_index()
ktable = psdf.pivot_table(
values="D", index=["A", "B"], columns="C", aggfunc="sum", fill_value=0
).sort_index()
self.assert_eq(ktable, ptable)
self.assert_eq(ktable.index, ptable.index)
self.assert_eq(repr(ktable.index), repr(ptable.index))
def test_stack(self):
pdf_single_level_cols = pd.DataFrame(
[[0, 1], [2, 3]], index=["cat", "dog"], columns=["weight", "height"]
)
psdf_single_level_cols = ps.from_pandas(pdf_single_level_cols)
self.assert_eq(
psdf_single_level_cols.stack().sort_index(), pdf_single_level_cols.stack().sort_index()
)
multicol1 = pd.MultiIndex.from_tuples(
[("weight", "kg"), ("weight", "pounds")], names=["x", "y"]
)
pdf_multi_level_cols1 = pd.DataFrame(
[[1, 2], [2, 4]], index=["cat", "dog"], columns=multicol1
)
psdf_multi_level_cols1 = ps.from_pandas(pdf_multi_level_cols1)
self.assert_eq(
psdf_multi_level_cols1.stack().sort_index(), pdf_multi_level_cols1.stack().sort_index()
)
multicol2 = pd.MultiIndex.from_tuples([("weight", "kg"), ("height", "m")])
pdf_multi_level_cols2 = pd.DataFrame(
[[1.0, 2.0], [3.0, 4.0]], index=["cat", "dog"], columns=multicol2
)
psdf_multi_level_cols2 = ps.from_pandas(pdf_multi_level_cols2)
self.assert_eq(
psdf_multi_level_cols2.stack().sort_index(), pdf_multi_level_cols2.stack().sort_index()
)
pdf = pd.DataFrame(
{
("y", "c"): [True, True],
("x", "b"): [False, False],
("x", "c"): [True, False],
("y", "a"): [False, True],
}
)
psdf = ps.from_pandas(pdf)
self.assert_eq(psdf.stack().sort_index(), pdf.stack().sort_index())
self.assert_eq(psdf[[]].stack().sort_index(), pdf[[]].stack().sort_index(), almost=True)
def test_unstack(self):
pdf = pd.DataFrame(
np.random.randn(3, 3),
index=pd.MultiIndex.from_tuples([("rg1", "x"), ("rg1", "y"), ("rg2", "z")]),
)
psdf = ps.from_pandas(pdf)
self.assert_eq(psdf.unstack().sort_index(), pdf.unstack().sort_index(), almost=True)
self.assert_eq(
psdf.unstack().unstack().sort_index(), pdf.unstack().unstack().sort_index(), almost=True
)
def test_pivot_errors(self):
psdf = ps.range(10)
with self.assertRaisesRegex(ValueError, "columns should be set"):
psdf.pivot(index="id")
with self.assertRaisesRegex(ValueError, "values should be set"):
psdf.pivot(index="id", columns="id")
def test_pivot_table_errors(self):
pdf = pd.DataFrame(
{
"a": [4, 2, 3, 4, 8, 6],
"b": [1, 2, 2, 4, 2, 4],
"e": [1, 2, 2, 4, 2, 4],
"c": [1, 2, 9, 4, 7, 4],
},
index=np.random.rand(6),
)
psdf = ps.from_pandas(pdf)
self.assertRaises(KeyError, lambda: psdf.pivot_table(index=["c"], columns="a", values=5))
msg = "index should be a None or a list of columns."
with self.assertRaisesRegex(TypeError, msg):
psdf.pivot_table(index="c", columns="a", values="b")
msg = "pivot_table doesn't support aggfunc as dict and without index."
with self.assertRaisesRegex(NotImplementedError, msg):
psdf.pivot_table(columns="a", values=["b", "e"], aggfunc={"b": "mean", "e": "sum"})
msg = "columns should be one column name."
with self.assertRaisesRegex(TypeError, msg):
psdf.pivot_table(columns=["a"], values=["b"], aggfunc={"b": "mean", "e": "sum"})
msg = "Columns in aggfunc must be the same as values."
with self.assertRaisesRegex(ValueError, msg):
psdf.pivot_table(
index=["e", "c"], columns="a", values="b", aggfunc={"b": "mean", "e": "sum"}
)
msg = "values can't be a list without index."
with self.assertRaisesRegex(NotImplementedError, msg):
psdf.pivot_table(columns="a", values=["b", "e"])
msg = "Wrong columns A."
with self.assertRaisesRegex(ValueError, msg):
psdf.pivot_table(
index=["c"], columns="A", values=["b", "e"], aggfunc={"b": "mean", "e": "sum"}
)
msg = "values should be one column or list of columns."
with self.assertRaisesRegex(TypeError, msg):
psdf.pivot_table(columns="a", values=(["b"], ["c"]))
msg = "aggfunc must be a dict mapping from column name to aggregate functions"
with self.assertRaisesRegex(TypeError, msg):
psdf.pivot_table(columns="a", values="b", aggfunc={"a": lambda x: sum(x)})
psdf = ps.DataFrame(
{
"A": ["foo", "foo", "foo", "foo", "foo", "bar", "bar", "bar", "bar"],
"B": ["one", "one", "one", "two", "two", "one", "one", "two", "two"],
"C": [
"small",
"large",
"large",
"small",
"small",
"large",
"small",
"small",
"large",
],
"D": [1, 2, 2, 3, 3, 4, 5, 6, 7],
"E": [2, 4, 5, 5, 6, 6, 8, 9, 9],
},
columns=["A", "B", "C", "D", "E"],
index=np.random.rand(9),
)
msg = "values should be a numeric type."
with self.assertRaisesRegex(TypeError, msg):
psdf.pivot_table(
index=["C"], columns="A", values=["B", "E"], aggfunc={"B": "mean", "E": "sum"}
)
msg = "values should be a numeric type."
with self.assertRaisesRegex(TypeError, msg):
psdf.pivot_table(index=["C"], columns="A", values="B", aggfunc={"B": "mean"})
def test_transpose(self):
# TODO: what if with random index?
pdf1 = pd.DataFrame(data={"col1": [1, 2], "col2": [3, 4]}, columns=["col1", "col2"])
psdf1 = ps.from_pandas(pdf1)
pdf2 = pd.DataFrame(
data={"score": [9, 8], "kids": [0, 0], "age": [12, 22]},
columns=["score", "kids", "age"],
)
psdf2 = ps.from_pandas(pdf2)
self.assert_eq(pdf1.transpose().sort_index(), psdf1.transpose().sort_index())
self.assert_eq(pdf2.transpose().sort_index(), psdf2.transpose().sort_index())
with option_context("compute.max_rows", None):
self.assert_eq(pdf1.transpose().sort_index(), psdf1.transpose().sort_index())
self.assert_eq(pdf2.transpose().sort_index(), psdf2.transpose().sort_index())
pdf3 = pd.DataFrame(
{
("cg1", "a"): [1, 2, 3],
("cg1", "b"): [4, 5, 6],
("cg2", "c"): [7, 8, 9],
("cg3", "d"): [9, 9, 9],
},
index=pd.MultiIndex.from_tuples([("rg1", "x"), ("rg1", "y"), ("rg2", "z")]),
)
psdf3 = ps.from_pandas(pdf3)
self.assert_eq(pdf3.transpose().sort_index(), psdf3.transpose().sort_index())
with option_context("compute.max_rows", None):
self.assert_eq(pdf3.transpose().sort_index(), psdf3.transpose().sort_index())
def _test_cummin(self, pdf, psdf):
self.assert_eq(pdf.cummin(), psdf.cummin())
self.assert_eq(pdf.cummin(skipna=False), psdf.cummin(skipna=False))
self.assert_eq(pdf.cummin().sum(), psdf.cummin().sum())
def test_cummin(self):
pdf = pd.DataFrame(
[[2.0, 1.0], [5, None], [1.0, 0.0], [2.0, 4.0], [4.0, 9.0]],
columns=list("AB"),
index=np.random.rand(5),
)
psdf = ps.from_pandas(pdf)
self._test_cummin(pdf, psdf)
def test_cummin_multiindex_columns(self):
arrays = [np.array(["A", "A", "B", "B"]), np.array(["one", "two", "one", "two"])]
pdf = pd.DataFrame(np.random.randn(3, 4), index=["A", "C", "B"], columns=arrays)
pdf.at["C", ("A", "two")] = None
psdf = ps.from_pandas(pdf)
self._test_cummin(pdf, psdf)
def _test_cummax(self, pdf, psdf):
self.assert_eq(pdf.cummax(), psdf.cummax())
self.assert_eq(pdf.cummax(skipna=False), psdf.cummax(skipna=False))
self.assert_eq(pdf.cummax().sum(), psdf.cummax().sum())
def test_cummax(self):
pdf = pd.DataFrame(
[[2.0, 1.0], [5, None], [1.0, 0.0], [2.0, 4.0], [4.0, 9.0]],
columns=list("AB"),
index=np.random.rand(5),
)
psdf = ps.from_pandas(pdf)
self._test_cummax(pdf, psdf)
def test_cummax_multiindex_columns(self):
arrays = [np.array(["A", "A", "B", "B"]), np.array(["one", "two", "one", "two"])]
pdf = pd.DataFrame(np.random.randn(3, 4), index=["A", "C", "B"], columns=arrays)
pdf.at["C", ("A", "two")] = None
psdf = ps.from_pandas(pdf)
self._test_cummax(pdf, psdf)
def _test_cumsum(self, pdf, psdf):
self.assert_eq(pdf.cumsum(), psdf.cumsum())
self.assert_eq(pdf.cumsum(skipna=False), psdf.cumsum(skipna=False))
self.assert_eq(pdf.cumsum().sum(), psdf.cumsum().sum())
def test_cumsum(self):
pdf = pd.DataFrame(
[[2.0, 1.0], [5, None], [1.0, 0.0], [2.0, 4.0], [4.0, 9.0]],
columns=list("AB"),
index=np.random.rand(5),
)
psdf = ps.from_pandas(pdf)
self._test_cumsum(pdf, psdf)
def test_cumsum_multiindex_columns(self):
arrays = [np.array(["A", "A", "B", "B"]), np.array(["one", "two", "one", "two"])]
pdf = pd.DataFrame(np.random.randn(3, 4), index=["A", "C", "B"], columns=arrays)
pdf.at["C", ("A", "two")] = None
psdf = ps.from_pandas(pdf)
self._test_cumsum(pdf, psdf)
def _test_cumprod(self, pdf, psdf):
self.assert_eq(pdf.cumprod(), psdf.cumprod(), almost=True)
self.assert_eq(pdf.cumprod(skipna=False), psdf.cumprod(skipna=False), almost=True)
self.assert_eq(pdf.cumprod().sum(), psdf.cumprod().sum(), almost=True)
def test_cumprod(self):
pdf = pd.DataFrame(
[[2.0, 1.0, 1], [5, None, 2], [1.0, -1.0, -3], [2.0, 0, 4], [4.0, 9.0, 5]],
columns=list("ABC"),
index=np.random.rand(5),
)
psdf = ps.from_pandas(pdf)
self._test_cumprod(pdf, psdf)
def test_cumprod_multiindex_columns(self):
arrays = [np.array(["A", "A", "B", "B"]), np.array(["one", "two", "one", "two"])]
pdf = pd.DataFrame(np.random.rand(3, 4), index=["A", "C", "B"], columns=arrays)
pdf.at["C", ("A", "two")] = None
psdf = ps.from_pandas(pdf)
self._test_cumprod(pdf, psdf)
def test_drop_duplicates(self):
pdf = pd.DataFrame(
{"a": [1, 2, 2, 2, 3], "b": ["a", "a", "a", "c", "d"]}, index=np.random.rand(5)
)
psdf = ps.from_pandas(pdf)
# inplace is False
for keep in ["first", "last", False]:
with self.subTest(keep=keep):
self.assert_eq(
pdf.drop_duplicates(keep=keep).sort_index(),
psdf.drop_duplicates(keep=keep).sort_index(),
)
self.assert_eq(
pdf.drop_duplicates("a", keep=keep).sort_index(),
psdf.drop_duplicates("a", keep=keep).sort_index(),
)
self.assert_eq(
pdf.drop_duplicates(["a", "b"], keep=keep).sort_index(),
psdf.drop_duplicates(["a", "b"], keep=keep).sort_index(),
)
self.assert_eq(
pdf.set_index("a", append=True).drop_duplicates(keep=keep).sort_index(),
psdf.set_index("a", append=True).drop_duplicates(keep=keep).sort_index(),
)
self.assert_eq(
pdf.set_index("a", append=True).drop_duplicates("b", keep=keep).sort_index(),
psdf.set_index("a", append=True).drop_duplicates("b", keep=keep).sort_index(),
)
columns = pd.MultiIndex.from_tuples([("x", "a"), ("y", "b")])
pdf.columns = columns
psdf.columns = columns
# inplace is False
for keep in ["first", "last", False]:
with self.subTest("multi-index columns", keep=keep):
self.assert_eq(
pdf.drop_duplicates(keep=keep).sort_index(),
psdf.drop_duplicates(keep=keep).sort_index(),
)
self.assert_eq(
pdf.drop_duplicates(("x", "a"), keep=keep).sort_index(),
psdf.drop_duplicates(("x", "a"), keep=keep).sort_index(),
)
self.assert_eq(
pdf.drop_duplicates([("x", "a"), ("y", "b")], keep=keep).sort_index(),
psdf.drop_duplicates([("x", "a"), ("y", "b")], keep=keep).sort_index(),
)
# inplace is True
subset_list = [None, "a", ["a", "b"]]
for subset in subset_list:
pdf = pd.DataFrame(
{"a": [1, 2, 2, 2, 3], "b": ["a", "a", "a", "c", "d"]}, index=np.random.rand(5)
)
psdf = ps.from_pandas(pdf)
pser = pdf.a
psser = psdf.a
pdf.drop_duplicates(subset=subset, inplace=True)
psdf.drop_duplicates(subset=subset, inplace=True)
self.assert_eq(psdf.sort_index(), pdf.sort_index())
self.assert_eq(psser.sort_index(), pser.sort_index())
# multi-index columns, inplace is True
subset_list = [None, ("x", "a"), [("x", "a"), ("y", "b")]]
for subset in subset_list:
pdf = pd.DataFrame(
{"a": [1, 2, 2, 2, 3], "b": ["a", "a", "a", "c", "d"]}, index=np.random.rand(5)
)
psdf = ps.from_pandas(pdf)
columns = pd.MultiIndex.from_tuples([("x", "a"), ("y", "b")])
pdf.columns = columns
psdf.columns = columns
pser = pdf[("x", "a")]
psser = psdf[("x", "a")]
pdf.drop_duplicates(subset=subset, inplace=True)
psdf.drop_duplicates(subset=subset, inplace=True)
self.assert_eq(psdf.sort_index(), pdf.sort_index())
self.assert_eq(psser.sort_index(), pser.sort_index())
# non-string names
pdf = pd.DataFrame(
{10: [1, 2, 2, 2, 3], 20: ["a", "a", "a", "c", "d"]}, index=np.random.rand(5)
)
psdf = ps.from_pandas(pdf)
self.assert_eq(
pdf.drop_duplicates(10, keep=keep).sort_index(),
psdf.drop_duplicates(10, keep=keep).sort_index(),
)
self.assert_eq(
pdf.drop_duplicates([10, 20], keep=keep).sort_index(),
psdf.drop_duplicates([10, 20], keep=keep).sort_index(),
)
def test_reindex(self):
index = pd.Index(["A", "B", "C", "D", "E"])
columns = pd.Index(["numbers"])
pdf = pd.DataFrame([1.0, 2.0, 3.0, 4.0, None], index=index, columns=columns)
psdf = ps.from_pandas(pdf)
columns2 = pd.Index(["numbers", "2", "3"], name="cols2")
self.assert_eq(
pdf.reindex(columns=columns2).sort_index(),
psdf.reindex(columns=columns2).sort_index(),
)
columns = pd.Index(["numbers"], name="cols")
pdf.columns = columns
psdf.columns = columns
self.assert_eq(
pdf.reindex(["A", "B", "C"], columns=["numbers", "2", "3"]).sort_index(),
psdf.reindex(["A", "B", "C"], columns=["numbers", "2", "3"]).sort_index(),
)
self.assert_eq(
pdf.reindex(["A", "B", "C"], index=["numbers", "2", "3"]).sort_index(),
psdf.reindex(["A", "B", "C"], index=["numbers", "2", "3"]).sort_index(),
)
self.assert_eq(
pdf.reindex(index=["A", "B"]).sort_index(), psdf.reindex(index=["A", "B"]).sort_index()
)
self.assert_eq(
pdf.reindex(index=["A", "B", "2", "3"]).sort_index(),
psdf.reindex(index=["A", "B", "2", "3"]).sort_index(),
)
self.assert_eq(
pdf.reindex(index=["A", "E", "2", "3"], fill_value=0).sort_index(),
psdf.reindex(index=["A", "E", "2", "3"], fill_value=0).sort_index(),
)
self.assert_eq(
pdf.reindex(columns=["numbers"]).sort_index(),
psdf.reindex(columns=["numbers"]).sort_index(),
)
self.assert_eq(
pdf.reindex(columns=["numbers"], copy=True).sort_index(),
psdf.reindex(columns=["numbers"], copy=True).sort_index(),
)
# Using float as fill_value to avoid int64/32 clash
self.assert_eq(
pdf.reindex(columns=["numbers", "2", "3"], fill_value=0.0).sort_index(),
psdf.reindex(columns=["numbers", "2", "3"], fill_value=0.0).sort_index(),
)
columns2 = pd.Index(["numbers", "2", "3"])
self.assert_eq(
pdf.reindex(columns=columns2).sort_index(),
psdf.reindex(columns=columns2).sort_index(),
)
columns2 = pd.Index(["numbers", "2", "3"], name="cols2")
self.assert_eq(
pdf.reindex(columns=columns2).sort_index(),
psdf.reindex(columns=columns2).sort_index(),
)
# Reindexing single Index on single Index
pindex2 = pd.Index(["A", "C", "D", "E", "0"], name="index2")
kindex2 = ps.from_pandas(pindex2)
for fill_value in [None, 0]:
self.assert_eq(
pdf.reindex(index=pindex2, fill_value=fill_value).sort_index(),
psdf.reindex(index=kindex2, fill_value=fill_value).sort_index(),
)
pindex2 = pd.DataFrame({"index2": ["A", "C", "D", "E", "0"]}).set_index("index2").index
kindex2 = ps.from_pandas(pindex2)
for fill_value in [None, 0]:
self.assert_eq(
pdf.reindex(index=pindex2, fill_value=fill_value).sort_index(),
psdf.reindex(index=kindex2, fill_value=fill_value).sort_index(),
)
# Reindexing MultiIndex on single Index
pindex = pd.MultiIndex.from_tuples(
[("A", "B"), ("C", "D"), ("F", "G")], names=["name1", "name2"]
)
kindex = ps.from_pandas(pindex)
self.assert_eq(
pdf.reindex(index=pindex, fill_value=0.0).sort_index(),
psdf.reindex(index=kindex, fill_value=0.0).sort_index(),
)
self.assertRaises(TypeError, lambda: psdf.reindex(columns=["numbers", "2", "3"], axis=1))
self.assertRaises(TypeError, lambda: psdf.reindex(columns=["numbers", "2", "3"], axis=2))
self.assertRaises(TypeError, lambda: psdf.reindex(columns="numbers"))
self.assertRaises(TypeError, lambda: psdf.reindex(index=["A", "B", "C"], axis=1))
self.assertRaises(TypeError, lambda: psdf.reindex(index=123))
# Reindexing MultiIndex on MultiIndex
pdf = pd.DataFrame({"numbers": [1.0, 2.0, None]}, index=pindex)
psdf = ps.from_pandas(pdf)
pindex2 = pd.MultiIndex.from_tuples(
[("A", "G"), ("C", "D"), ("I", "J")], names=["name1", "name2"]
)
kindex2 = ps.from_pandas(pindex2)
for fill_value in [None, 0.0]:
self.assert_eq(
pdf.reindex(index=pindex2, fill_value=fill_value).sort_index(),
psdf.reindex(index=kindex2, fill_value=fill_value).sort_index(),
)
pindex2 = (
pd.DataFrame({"index_level_1": ["A", "C", "I"], "index_level_2": ["G", "D", "J"]})
.set_index(["index_level_1", "index_level_2"])
.index
)
kindex2 = ps.from_pandas(pindex2)
for fill_value in [None, 0.0]:
self.assert_eq(
pdf.reindex(index=pindex2, fill_value=fill_value).sort_index(),
psdf.reindex(index=kindex2, fill_value=fill_value).sort_index(),
)
columns = pd.MultiIndex.from_tuples([("X", "numbers")], names=["cols1", "cols2"])
pdf.columns = columns
psdf.columns = columns
# Reindexing MultiIndex index on MultiIndex columns and MultiIndex index
for fill_value in [None, 0.0]:
self.assert_eq(
pdf.reindex(index=pindex2, fill_value=fill_value).sort_index(),
psdf.reindex(index=kindex2, fill_value=fill_value).sort_index(),
)
index = pd.Index(["A", "B", "C", "D", "E"])
pdf = pd.DataFrame(data=[1.0, 2.0, 3.0, 4.0, None], index=index, columns=columns)
psdf = ps.from_pandas(pdf)
pindex2 = pd.Index(["A", "C", "D", "E", "0"], name="index2")
kindex2 = ps.from_pandas(pindex2)
# Reindexing single Index on MultiIndex columns and single Index
for fill_value in [None, 0.0]:
self.assert_eq(
pdf.reindex(index=pindex2, fill_value=fill_value).sort_index(),
psdf.reindex(index=kindex2, fill_value=fill_value).sort_index(),
)
for fill_value in [None, 0.0]:
self.assert_eq(
pdf.reindex(
columns=[("X", "numbers"), ("Y", "2"), ("Y", "3")], fill_value=fill_value
).sort_index(),
psdf.reindex(
columns=[("X", "numbers"), ("Y", "2"), ("Y", "3")], fill_value=fill_value
).sort_index(),
)
columns2 = pd.MultiIndex.from_tuples(
[("X", "numbers"), ("Y", "2"), ("Y", "3")], names=["cols3", "cols4"]
)
self.assert_eq(
pdf.reindex(columns=columns2).sort_index(),
psdf.reindex(columns=columns2).sort_index(),
)
self.assertRaises(TypeError, lambda: psdf.reindex(columns=["X"]))
self.assertRaises(ValueError, lambda: psdf.reindex(columns=[("X",)]))
def test_reindex_like(self):
data = [[1.0, 2.0], [3.0, None], [None, 4.0]]
index = pd.Index(["A", "B", "C"], name="index")
columns = pd.Index(["numbers", "values"], name="cols")
pdf = pd.DataFrame(data=data, index=index, columns=columns)
psdf = ps.from_pandas(pdf)
# Reindexing single Index on single Index
data2 = [[5.0, None], [6.0, 7.0], [8.0, None]]
index2 = pd.Index(["A", "C", "D"], name="index2")
columns2 = pd.Index(["numbers", "F"], name="cols2")
pdf2 = pd.DataFrame(data=data2, index=index2, columns=columns2)
psdf2 = ps.from_pandas(pdf2)
self.assert_eq(
pdf.reindex_like(pdf2).sort_index(),
psdf.reindex_like(psdf2).sort_index(),
)
pdf2 = pd.DataFrame({"index_level_1": ["A", "C", "I"]})
psdf2 = ps.from_pandas(pdf2)
self.assert_eq(
pdf.reindex_like(pdf2.set_index(["index_level_1"])).sort_index(),
psdf.reindex_like(psdf2.set_index(["index_level_1"])).sort_index(),
)
# Reindexing MultiIndex on single Index
index2 = pd.MultiIndex.from_tuples(
[("A", "G"), ("C", "D"), ("I", "J")], names=["name3", "name4"]
)
pdf2 = pd.DataFrame(data=data2, index=index2)
psdf2 = ps.from_pandas(pdf2)
self.assert_eq(
pdf.reindex_like(pdf2).sort_index(),
psdf.reindex_like(psdf2).sort_index(),
)
self.assertRaises(TypeError, lambda: psdf.reindex_like(index2))
self.assertRaises(AssertionError, lambda: psdf2.reindex_like(psdf))
# Reindexing MultiIndex on MultiIndex
columns2 = pd.MultiIndex.from_tuples(
[("numbers", "third"), ("values", "second")], names=["cols3", "cols4"]
)
pdf2.columns = columns2
psdf2.columns = columns2
columns = pd.MultiIndex.from_tuples(
[("numbers", "first"), ("values", "second")], names=["cols1", "cols2"]
)
index = pd.MultiIndex.from_tuples(
[("A", "B"), ("C", "D"), ("E", "F")], names=["name1", "name2"]
)
pdf = pd.DataFrame(data=data, index=index, columns=columns)
psdf = ps.from_pandas(pdf)
self.assert_eq(
pdf.reindex_like(pdf2).sort_index(),
psdf.reindex_like(psdf2).sort_index(),
)
def test_melt(self):
pdf = pd.DataFrame(
{"A": [1, 3, 5], "B": [2, 4, 6], "C": [7, 8, 9]}, index=np.random.rand(3)
)
psdf = ps.from_pandas(pdf)
self.assert_eq(
psdf.melt().sort_values(["variable", "value"]).reset_index(drop=True),
pdf.melt().sort_values(["variable", "value"]),
)
self.assert_eq(
psdf.melt(id_vars="A").sort_values(["variable", "value"]).reset_index(drop=True),
pdf.melt(id_vars="A").sort_values(["variable", "value"]),
)
self.assert_eq(
psdf.melt(id_vars=["A", "B"]).sort_values(["variable", "value"]).reset_index(drop=True),
pdf.melt(id_vars=["A", "B"]).sort_values(["variable", "value"]),
)
self.assert_eq(
psdf.melt(id_vars=("A", "B")).sort_values(["variable", "value"]).reset_index(drop=True),
pdf.melt(id_vars=("A", "B")).sort_values(["variable", "value"]),
)
self.assert_eq(
psdf.melt(id_vars=["A"], value_vars=["C"])
.sort_values(["variable", "value"])
.reset_index(drop=True),
pdf.melt(id_vars=["A"], value_vars=["C"]).sort_values(["variable", "value"]),
)
self.assert_eq(
psdf.melt(id_vars=["A"], value_vars=["B"], var_name="myVarname", value_name="myValname")
.sort_values(["myVarname", "myValname"])
.reset_index(drop=True),
pdf.melt(
id_vars=["A"], value_vars=["B"], var_name="myVarname", value_name="myValname"
).sort_values(["myVarname", "myValname"]),
)
self.assert_eq(
psdf.melt(value_vars=("A", "B"))
.sort_values(["variable", "value"])
.reset_index(drop=True),
pdf.melt(value_vars=("A", "B")).sort_values(["variable", "value"]),
)
self.assertRaises(KeyError, lambda: psdf.melt(id_vars="Z"))
self.assertRaises(KeyError, lambda: psdf.melt(value_vars="Z"))
# multi-index columns
TEN = 10.0
TWELVE = 20.0
columns = pd.MultiIndex.from_tuples([(TEN, "A"), (TEN, "B"), (TWELVE, "C")])
pdf.columns = columns
psdf.columns = columns
self.assert_eq(
psdf.melt().sort_values(["variable_0", "variable_1", "value"]).reset_index(drop=True),
pdf.melt().sort_values(["variable_0", "variable_1", "value"]),
)
self.assert_eq(
psdf.melt(id_vars=[(TEN, "A")])
.sort_values(["variable_0", "variable_1", "value"])
.reset_index(drop=True),
pdf.melt(id_vars=[(TEN, "A")])
.sort_values(["variable_0", "variable_1", "value"])
.rename(columns=name_like_string),
)
self.assert_eq(
psdf.melt(id_vars=[(TEN, "A")], value_vars=[(TWELVE, "C")])
.sort_values(["variable_0", "variable_1", "value"])
.reset_index(drop=True),
pdf.melt(id_vars=[(TEN, "A")], value_vars=[(TWELVE, "C")])
.sort_values(["variable_0", "variable_1", "value"])
.rename(columns=name_like_string),
)
self.assert_eq(
psdf.melt(
id_vars=[(TEN, "A")],
value_vars=[(TEN, "B")],
var_name=["myV1", "myV2"],
value_name="myValname",
)
.sort_values(["myV1", "myV2", "myValname"])
.reset_index(drop=True),
pdf.melt(
id_vars=[(TEN, "A")],
value_vars=[(TEN, "B")],
var_name=["myV1", "myV2"],
value_name="myValname",
)
.sort_values(["myV1", "myV2", "myValname"])
.rename(columns=name_like_string),
)
columns.names = ["v0", "v1"]
pdf.columns = columns
psdf.columns = columns
self.assert_eq(
psdf.melt().sort_values(["v0", "v1", "value"]).reset_index(drop=True),
pdf.melt().sort_values(["v0", "v1", "value"]),
)
self.assertRaises(ValueError, lambda: psdf.melt(id_vars=(TEN, "A")))
self.assertRaises(ValueError, lambda: psdf.melt(value_vars=(TEN, "A")))
self.assertRaises(KeyError, lambda: psdf.melt(id_vars=[TEN]))
self.assertRaises(KeyError, lambda: psdf.melt(id_vars=[(TWELVE, "A")]))
self.assertRaises(KeyError, lambda: psdf.melt(value_vars=[TWELVE]))
self.assertRaises(KeyError, lambda: psdf.melt(value_vars=[(TWELVE, "A")]))
# non-string names
pdf.columns = [10.0, 20.0, 30.0]
psdf.columns = [10.0, 20.0, 30.0]
self.assert_eq(
psdf.melt().sort_values(["variable", "value"]).reset_index(drop=True),
pdf.melt().sort_values(["variable", "value"]),
)
self.assert_eq(
psdf.melt(id_vars=10.0).sort_values(["variable", "value"]).reset_index(drop=True),
pdf.melt(id_vars=10.0).sort_values(["variable", "value"]),
)
self.assert_eq(
psdf.melt(id_vars=[10.0, 20.0])
.sort_values(["variable", "value"])
.reset_index(drop=True),
pdf.melt(id_vars=[10.0, 20.0]).sort_values(["variable", "value"]),
)
self.assert_eq(
psdf.melt(id_vars=(10.0, 20.0))
.sort_values(["variable", "value"])
.reset_index(drop=True),
pdf.melt(id_vars=(10.0, 20.0)).sort_values(["variable", "value"]),
)
self.assert_eq(
psdf.melt(id_vars=[10.0], value_vars=[30.0])
.sort_values(["variable", "value"])
.reset_index(drop=True),
pdf.melt(id_vars=[10.0], value_vars=[30.0]).sort_values(["variable", "value"]),
)
self.assert_eq(
psdf.melt(value_vars=(10.0, 20.0))
.sort_values(["variable", "value"])
.reset_index(drop=True),
pdf.melt(value_vars=(10.0, 20.0)).sort_values(["variable", "value"]),
)
def test_all(self):
pdf = pd.DataFrame(
{
"col1": [False, False, False],
"col2": [True, False, False],
"col3": [0, 0, 1],
"col4": [0, 1, 2],
"col5": [False, False, None],
"col6": [True, False, None],
},
index=np.random.rand(3),
)
psdf = ps.from_pandas(pdf)
self.assert_eq(psdf.all(), pdf.all())
columns = pd.MultiIndex.from_tuples(
[
("a", "col1"),
("a", "col2"),
("a", "col3"),
("b", "col4"),
("b", "col5"),
("c", "col6"),
]
)
pdf.columns = columns
psdf.columns = columns
self.assert_eq(psdf.all(), pdf.all())
columns.names = ["X", "Y"]
pdf.columns = columns
psdf.columns = columns
self.assert_eq(psdf.all(), pdf.all())
with self.assertRaisesRegex(
NotImplementedError, 'axis should be either 0 or "index" currently.'
):
psdf.all(axis=1)
def test_any(self):
pdf = pd.DataFrame(
{
"col1": [False, False, False],
"col2": [True, False, False],
"col3": [0, 0, 1],
"col4": [0, 1, 2],
"col5": [False, False, None],
"col6": [True, False, None],
},
index=np.random.rand(3),
)
psdf = ps.from_pandas(pdf)
self.assert_eq(psdf.any(), pdf.any())
columns = pd.MultiIndex.from_tuples(
[
("a", "col1"),
("a", "col2"),
("a", "col3"),
("b", "col4"),
("b", "col5"),
("c", "col6"),
]
)
pdf.columns = columns
psdf.columns = columns
self.assert_eq(psdf.any(), pdf.any())
columns.names = ["X", "Y"]
pdf.columns = columns
psdf.columns = columns
self.assert_eq(psdf.any(), pdf.any())
with self.assertRaisesRegex(
NotImplementedError, 'axis should be either 0 or "index" currently.'
):
psdf.any(axis=1)
def test_rank(self):
pdf = pd.DataFrame(
data={"col1": [1, 2, 3, 1], "col2": [3, 4, 3, 1]},
columns=["col1", "col2"],
index=np.random.rand(4),
)
psdf = ps.from_pandas(pdf)
self.assert_eq(pdf.rank().sort_index(), psdf.rank().sort_index())
self.assert_eq(pdf.rank().sum(), psdf.rank().sum())
self.assert_eq(
pdf.rank(ascending=False).sort_index(), psdf.rank(ascending=False).sort_index()
)
self.assert_eq(pdf.rank(method="min").sort_index(), psdf.rank(method="min").sort_index())
self.assert_eq(pdf.rank(method="max").sort_index(), psdf.rank(method="max").sort_index())
self.assert_eq(
pdf.rank(method="first").sort_index(), psdf.rank(method="first").sort_index()
)
self.assert_eq(
pdf.rank(method="dense").sort_index(), psdf.rank(method="dense").sort_index()
)
msg = "method must be one of 'average', 'min', 'max', 'first', 'dense'"
with self.assertRaisesRegex(ValueError, msg):
psdf.rank(method="nothing")
# multi-index columns
columns = pd.MultiIndex.from_tuples([("x", "col1"), ("y", "col2")])
pdf.columns = columns
psdf.columns = columns
self.assert_eq(pdf.rank().sort_index(), psdf.rank().sort_index())
def test_round(self):
pdf = pd.DataFrame(
{
"A": [0.028208, 0.038683, 0.877076],
"B": [0.992815, 0.645646, 0.149370],
"C": [0.173891, 0.577595, 0.491027],
},
columns=["A", "B", "C"],
index=np.random.rand(3),
)
psdf = ps.from_pandas(pdf)
pser = pd.Series([1, 0, 2], index=["A", "B", "C"])
psser = ps.Series([1, 0, 2], index=["A", "B", "C"])
self.assert_eq(pdf.round(2), psdf.round(2))
self.assert_eq(pdf.round({"A": 1, "C": 2}), psdf.round({"A": 1, "C": 2}))
self.assert_eq(pdf.round({"A": 1, "D": 2}), psdf.round({"A": 1, "D": 2}))
self.assert_eq(pdf.round(pser), psdf.round(psser))
msg = "decimals must be an integer, a dict-like or a Series"
with self.assertRaisesRegex(TypeError, msg):
psdf.round(1.5)
# multi-index columns
columns = pd.MultiIndex.from_tuples([("X", "A"), ("X", "B"), ("Y", "C")])
pdf.columns = columns
psdf.columns = columns
pser = pd.Series([1, 0, 2], index=columns)
psser = ps.Series([1, 0, 2], index=columns)
self.assert_eq(pdf.round(2), psdf.round(2))
self.assert_eq(
pdf.round({("X", "A"): 1, ("Y", "C"): 2}), psdf.round({("X", "A"): 1, ("Y", "C"): 2})
)
self.assert_eq(pdf.round({("X", "A"): 1, "Y": 2}), psdf.round({("X", "A"): 1, "Y": 2}))
self.assert_eq(pdf.round(pser), psdf.round(psser))
# non-string names
pdf = pd.DataFrame(
{
10: [0.028208, 0.038683, 0.877076],
20: [0.992815, 0.645646, 0.149370],
30: [0.173891, 0.577595, 0.491027],
},
index=np.random.rand(3),
)
psdf = ps.from_pandas(pdf)
self.assert_eq(pdf.round({10: 1, 30: 2}), psdf.round({10: 1, 30: 2}))
def test_shift(self):
pdf = pd.DataFrame(
{
"Col1": [10, 20, 15, 30, 45],
"Col2": [13, 23, 18, 33, 48],
"Col3": [17, 27, 22, 37, 52],
},
index=np.random.rand(5),
)
psdf = ps.from_pandas(pdf)
self.assert_eq(pdf.shift(3), psdf.shift(3))
self.assert_eq(pdf.shift().shift(-1), psdf.shift().shift(-1))
self.assert_eq(pdf.shift().sum().astype(int), psdf.shift().sum())
# Need the expected result since pandas 0.23 does not support `fill_value` argument.
pdf1 = pd.DataFrame(
{"Col1": [0, 0, 0, 10, 20], "Col2": [0, 0, 0, 13, 23], "Col3": [0, 0, 0, 17, 27]},
index=pdf.index,
)
self.assert_eq(pdf1, psdf.shift(periods=3, fill_value=0))
msg = "should be an int"
with self.assertRaisesRegex(TypeError, msg):
psdf.shift(1.5)
# multi-index columns
columns = pd.MultiIndex.from_tuples([("x", "Col1"), ("x", "Col2"), ("y", "Col3")])
pdf.columns = columns
psdf.columns = columns
self.assert_eq(pdf.shift(3), psdf.shift(3))
self.assert_eq(pdf.shift().shift(-1), psdf.shift().shift(-1))
def test_diff(self):
pdf = pd.DataFrame(
{"a": [1, 2, 3, 4, 5, 6], "b": [1, 1, 2, 3, 5, 8], "c": [1, 4, 9, 16, 25, 36]},
index=np.random.rand(6),
)
psdf = ps.from_pandas(pdf)
self.assert_eq(pdf.diff(), psdf.diff())
self.assert_eq(pdf.diff().diff(-1), psdf.diff().diff(-1))
self.assert_eq(pdf.diff().sum().astype(int), psdf.diff().sum())
msg = "should be an int"
with self.assertRaisesRegex(TypeError, msg):
psdf.diff(1.5)
msg = 'axis should be either 0 or "index" currently.'
with self.assertRaisesRegex(NotImplementedError, msg):
psdf.diff(axis=1)
# multi-index columns
columns = pd.MultiIndex.from_tuples([("x", "Col1"), ("x", "Col2"), ("y", "Col3")])
pdf.columns = columns
psdf.columns = columns
self.assert_eq(pdf.diff(), psdf.diff())
def test_duplicated(self):
pdf = pd.DataFrame(
{"a": [1, 1, 2, 3], "b": [1, 1, 1, 4], "c": [1, 1, 1, 5]}, index=np.random.rand(4)
)
psdf = ps.from_pandas(pdf)
self.assert_eq(pdf.duplicated().sort_index(), psdf.duplicated().sort_index())
self.assert_eq(
pdf.duplicated(keep="last").sort_index(),
psdf.duplicated(keep="last").sort_index(),
)
self.assert_eq(
pdf.duplicated(keep=False).sort_index(),
psdf.duplicated(keep=False).sort_index(),
)
self.assert_eq(
pdf.duplicated(subset="b").sort_index(),
psdf.duplicated(subset="b").sort_index(),
)
self.assert_eq(
pdf.duplicated(subset=["b"]).sort_index(),
psdf.duplicated(subset=["b"]).sort_index(),
)
with self.assertRaisesRegex(ValueError, "'keep' only supports 'first', 'last' and False"):
psdf.duplicated(keep="false")
with self.assertRaisesRegex(KeyError, "'d'"):
psdf.duplicated(subset=["d"])
pdf.index.name = "x"
psdf.index.name = "x"
self.assert_eq(pdf.duplicated().sort_index(), psdf.duplicated().sort_index())
# multi-index
self.assert_eq(
pdf.set_index("a", append=True).duplicated().sort_index(),
psdf.set_index("a", append=True).duplicated().sort_index(),
)
self.assert_eq(
pdf.set_index("a", append=True).duplicated(keep=False).sort_index(),
psdf.set_index("a", append=True).duplicated(keep=False).sort_index(),
)
self.assert_eq(
pdf.set_index("a", append=True).duplicated(subset=["b"]).sort_index(),
psdf.set_index("a", append=True).duplicated(subset=["b"]).sort_index(),
)
# mutli-index columns
columns = pd.MultiIndex.from_tuples([("x", "a"), ("x", "b"), ("y", "c")])
pdf.columns = columns
psdf.columns = columns
self.assert_eq(pdf.duplicated().sort_index(), psdf.duplicated().sort_index())
self.assert_eq(
pdf.duplicated(subset=("x", "b")).sort_index(),
psdf.duplicated(subset=("x", "b")).sort_index(),
)
self.assert_eq(
pdf.duplicated(subset=[("x", "b")]).sort_index(),
psdf.duplicated(subset=[("x", "b")]).sort_index(),
)
# non-string names
pdf = pd.DataFrame(
{10: [1, 1, 2, 3], 20: [1, 1, 1, 4], 30: [1, 1, 1, 5]}, index=np.random.rand(4)
)
psdf = ps.from_pandas(pdf)
self.assert_eq(pdf.duplicated().sort_index(), psdf.duplicated().sort_index())
self.assert_eq(
pdf.duplicated(subset=10).sort_index(),
psdf.duplicated(subset=10).sort_index(),
)
def test_ffill(self):
idx = np.random.rand(6)
pdf = pd.DataFrame(
{
"x": [np.nan, 2, 3, 4, np.nan, 6],
"y": [1, 2, np.nan, 4, np.nan, np.nan],
"z": [1, 2, 3, 4, np.nan, np.nan],
},
index=idx,
)
psdf = ps.from_pandas(pdf)
self.assert_eq(psdf.ffill(), pdf.ffill())
self.assert_eq(psdf.ffill(limit=1), pdf.ffill(limit=1))
pser = pdf.y
psser = psdf.y
psdf.ffill(inplace=True)
pdf.ffill(inplace=True)
self.assert_eq(psdf, pdf)
self.assert_eq(psser, pser)
self.assert_eq(psser[idx[2]], pser[idx[2]])
def test_bfill(self):
idx = np.random.rand(6)
pdf = pd.DataFrame(
{
"x": [np.nan, 2, 3, 4, np.nan, 6],
"y": [1, 2, np.nan, 4, np.nan, np.nan],
"z": [1, 2, 3, 4, np.nan, np.nan],
},
index=idx,
)
psdf = ps.from_pandas(pdf)
self.assert_eq(psdf.bfill(), pdf.bfill())
self.assert_eq(psdf.bfill(limit=1), pdf.bfill(limit=1))
pser = pdf.x
psser = psdf.x
psdf.bfill(inplace=True)
pdf.bfill(inplace=True)
self.assert_eq(psdf, pdf)
self.assert_eq(psser, pser)
self.assert_eq(psser[idx[0]], pser[idx[0]])
def test_filter(self):
pdf = pd.DataFrame(
{
"aa": ["aa", "bd", "bc", "ab", "ce"],
"ba": [1, 2, 3, 4, 5],
"cb": [1.0, 2.0, 3.0, 4.0, 5.0],
"db": [1.0, np.nan, 3.0, np.nan, 5.0],
}
)
pdf = pdf.set_index("aa")
psdf = ps.from_pandas(pdf)
self.assert_eq(
psdf.filter(items=["ab", "aa"], axis=0).sort_index(),
pdf.filter(items=["ab", "aa"], axis=0).sort_index(),
)
with option_context("compute.isin_limit", 0):
self.assert_eq(
psdf.filter(items=["ab", "aa"], axis=0).sort_index(),
pdf.filter(items=["ab", "aa"], axis=0).sort_index(),
)
self.assert_eq(
psdf.filter(items=["ba", "db"], axis=1).sort_index(),
pdf.filter(items=["ba", "db"], axis=1).sort_index(),
)
self.assert_eq(psdf.filter(like="b", axis="index"), pdf.filter(like="b", axis="index"))
self.assert_eq(psdf.filter(like="c", axis="columns"), pdf.filter(like="c", axis="columns"))
self.assert_eq(
psdf.filter(regex="b.*", axis="index"), pdf.filter(regex="b.*", axis="index")
)
self.assert_eq(
psdf.filter(regex="b.*", axis="columns"), pdf.filter(regex="b.*", axis="columns")
)
pdf = pdf.set_index("ba", append=True)
psdf = ps.from_pandas(pdf)
self.assert_eq(
psdf.filter(items=[("aa", 1), ("bd", 2)], axis=0).sort_index(),
pdf.filter(items=[("aa", 1), ("bd", 2)], axis=0).sort_index(),
)
with self.assertRaisesRegex(TypeError, "Unsupported type list"):
psdf.filter(items=[["aa", 1], ("bd", 2)], axis=0)
with self.assertRaisesRegex(ValueError, "The item should not be empty."):
psdf.filter(items=[(), ("bd", 2)], axis=0)
self.assert_eq(psdf.filter(like="b", axis=0), pdf.filter(like="b", axis=0))
self.assert_eq(psdf.filter(regex="b.*", axis=0), pdf.filter(regex="b.*", axis=0))
with self.assertRaisesRegex(ValueError, "items should be a list-like object"):
psdf.filter(items="b")
with self.assertRaisesRegex(ValueError, "No axis named"):
psdf.filter(regex="b.*", axis=123)
with self.assertRaisesRegex(TypeError, "Must pass either `items`, `like`"):
psdf.filter()
with self.assertRaisesRegex(TypeError, "mutually exclusive"):
psdf.filter(regex="b.*", like="aaa")
# multi-index columns
pdf = pd.DataFrame(
{
("x", "aa"): ["aa", "ab", "bc", "bd", "ce"],
("x", "ba"): [1, 2, 3, 4, 5],
("y", "cb"): [1.0, 2.0, 3.0, 4.0, 5.0],
("z", "db"): [1.0, np.nan, 3.0, np.nan, 5.0],
}
)
pdf = pdf.set_index(("x", "aa"))
psdf = ps.from_pandas(pdf)
self.assert_eq(
psdf.filter(items=["ab", "aa"], axis=0).sort_index(),
pdf.filter(items=["ab", "aa"], axis=0).sort_index(),
)
self.assert_eq(
psdf.filter(items=[("x", "ba"), ("z", "db")], axis=1).sort_index(),
pdf.filter(items=[("x", "ba"), ("z", "db")], axis=1).sort_index(),
)
self.assert_eq(psdf.filter(like="b", axis="index"), pdf.filter(like="b", axis="index"))
self.assert_eq(psdf.filter(like="c", axis="columns"), pdf.filter(like="c", axis="columns"))
self.assert_eq(
psdf.filter(regex="b.*", axis="index"), pdf.filter(regex="b.*", axis="index")
)
self.assert_eq(
psdf.filter(regex="b.*", axis="columns"), pdf.filter(regex="b.*", axis="columns")
)
def test_pipe(self):
psdf = ps.DataFrame(
{"category": ["A", "A", "B"], "col1": [1, 2, 3], "col2": [4, 5, 6]},
columns=["category", "col1", "col2"],
)
self.assertRaisesRegex(
ValueError,
"arg is both the pipe target and a keyword argument",
lambda: psdf.pipe((lambda x: x, "arg"), arg="1"),
)
def test_transform(self):
pdf = pd.DataFrame(
{
"a": [1, 2, 3, 4, 5, 6] * 100,
"b": [1.0, 1.0, 2.0, 3.0, 5.0, 8.0] * 100,
"c": [1, 4, 9, 16, 25, 36] * 100,
},
columns=["a", "b", "c"],
index=np.random.rand(600),
)
psdf = ps.DataFrame(pdf)
self.assert_eq(
psdf.transform(lambda x: x + 1).sort_index(),
pdf.transform(lambda x: x + 1).sort_index(),
)
self.assert_eq(
psdf.transform(lambda x, y: x + y, y=2).sort_index(),
pdf.transform(lambda x, y: x + y, y=2).sort_index(),
)
with option_context("compute.shortcut_limit", 500):
self.assert_eq(
psdf.transform(lambda x: x + 1).sort_index(),
pdf.transform(lambda x: x + 1).sort_index(),
)
self.assert_eq(
psdf.transform(lambda x, y: x + y, y=1).sort_index(),
pdf.transform(lambda x, y: x + y, y=1).sort_index(),
)
with self.assertRaisesRegex(AssertionError, "the first argument should be a callable"):
psdf.transform(1)
# multi-index columns
columns = pd.MultiIndex.from_tuples([("x", "a"), ("x", "b"), ("y", "c")])
pdf.columns = columns
psdf.columns = columns
self.assert_eq(
psdf.transform(lambda x: x + 1).sort_index(),
pdf.transform(lambda x: x + 1).sort_index(),
)
with option_context("compute.shortcut_limit", 500):
self.assert_eq(
psdf.transform(lambda x: x + 1).sort_index(),
pdf.transform(lambda x: x + 1).sort_index(),
)
def test_apply(self):
pdf = pd.DataFrame(
{
"a": [1, 2, 3, 4, 5, 6] * 100,
"b": [1.0, 1.0, 2.0, 3.0, 5.0, 8.0] * 100,
"c": [1, 4, 9, 16, 25, 36] * 100,
},
columns=["a", "b", "c"],
index=np.random.rand(600),
)
psdf = ps.DataFrame(pdf)
self.assert_eq(
psdf.apply(lambda x: x + 1).sort_index(), pdf.apply(lambda x: x + 1).sort_index()
)
self.assert_eq(
psdf.apply(lambda x, b: x + b, args=(1,)).sort_index(),
pdf.apply(lambda x, b: x + b, args=(1,)).sort_index(),
)
self.assert_eq(
psdf.apply(lambda x, b: x + b, b=1).sort_index(),
pdf.apply(lambda x, b: x + b, b=1).sort_index(),
)
with option_context("compute.shortcut_limit", 500):
self.assert_eq(
psdf.apply(lambda x: x + 1).sort_index(), pdf.apply(lambda x: x + 1).sort_index()
)
self.assert_eq(
psdf.apply(lambda x, b: x + b, args=(1,)).sort_index(),
pdf.apply(lambda x, b: x + b, args=(1,)).sort_index(),
)
self.assert_eq(
psdf.apply(lambda x, b: x + b, b=1).sort_index(),
pdf.apply(lambda x, b: x + b, b=1).sort_index(),
)
# returning a Series
self.assert_eq(
psdf.apply(lambda x: len(x), axis=1).sort_index(),
pdf.apply(lambda x: len(x), axis=1).sort_index(),
)
self.assert_eq(
psdf.apply(lambda x, c: len(x) + c, axis=1, c=100).sort_index(),
pdf.apply(lambda x, c: len(x) + c, axis=1, c=100).sort_index(),
)
with option_context("compute.shortcut_limit", 500):
self.assert_eq(
psdf.apply(lambda x: len(x), axis=1).sort_index(),
pdf.apply(lambda x: len(x), axis=1).sort_index(),
)
self.assert_eq(
psdf.apply(lambda x, c: len(x) + c, axis=1, c=100).sort_index(),
pdf.apply(lambda x, c: len(x) + c, axis=1, c=100).sort_index(),
)
with self.assertRaisesRegex(AssertionError, "the first argument should be a callable"):
psdf.apply(1)
with self.assertRaisesRegex(TypeError, "The given function.*1 or 'column'; however"):
def f1(_) -> ps.DataFrame[int]:
pass
psdf.apply(f1, axis=0)
with self.assertRaisesRegex(TypeError, "The given function.*0 or 'index'; however"):
def f2(_) -> ps.Series[int]:
pass
psdf.apply(f2, axis=1)
# multi-index columns
columns = pd.MultiIndex.from_tuples([("x", "a"), ("x", "b"), ("y", "c")])
pdf.columns = columns
psdf.columns = columns
self.assert_eq(
psdf.apply(lambda x: x + 1).sort_index(), pdf.apply(lambda x: x + 1).sort_index()
)
with option_context("compute.shortcut_limit", 500):
self.assert_eq(
psdf.apply(lambda x: x + 1).sort_index(), pdf.apply(lambda x: x + 1).sort_index()
)
# returning a Series
self.assert_eq(
psdf.apply(lambda x: len(x), axis=1).sort_index(),
pdf.apply(lambda x: len(x), axis=1).sort_index(),
)
with option_context("compute.shortcut_limit", 500):
self.assert_eq(
psdf.apply(lambda x: len(x), axis=1).sort_index(),
pdf.apply(lambda x: len(x), axis=1).sort_index(),
)
def test_apply_with_type(self):
pdf = self.pdf
psdf = ps.from_pandas(pdf)
def identify1(x) -> ps.DataFrame[int, int]:
return x
# Type hints set the default column names, and we use default index for
# pandas API on Spark. Here we ignore both diff.
actual = psdf.apply(identify1, axis=1)
expected = pdf.apply(identify1, axis=1)
self.assert_eq(sorted(actual["c0"].to_numpy()), sorted(expected["a"].to_numpy()))
self.assert_eq(sorted(actual["c1"].to_numpy()), sorted(expected["b"].to_numpy()))
def identify2(x) -> ps.DataFrame[slice("a", int), slice("b", int)]: # noqa: F405
return x
actual = psdf.apply(identify2, axis=1)
expected = pdf.apply(identify2, axis=1)
self.assert_eq(sorted(actual["a"].to_numpy()), sorted(expected["a"].to_numpy()))
self.assert_eq(sorted(actual["b"].to_numpy()), sorted(expected["b"].to_numpy()))
def test_apply_batch(self):
pdf = pd.DataFrame(
{
"a": [1, 2, 3, 4, 5, 6] * 100,
"b": [1.0, 1.0, 2.0, 3.0, 5.0, 8.0] * 100,
"c": [1, 4, 9, 16, 25, 36] * 100,
},
columns=["a", "b", "c"],
index=np.random.rand(600),
)
psdf = ps.DataFrame(pdf)
self.assert_eq(
psdf.pandas_on_spark.apply_batch(lambda pdf, a: pdf + a, args=(1,)).sort_index(),
(pdf + 1).sort_index(),
)
with option_context("compute.shortcut_limit", 500):
self.assert_eq(
psdf.pandas_on_spark.apply_batch(lambda pdf: pdf + 1).sort_index(),
(pdf + 1).sort_index(),
)
self.assert_eq(
psdf.pandas_on_spark.apply_batch(lambda pdf, b: pdf + b, b=1).sort_index(),
(pdf + 1).sort_index(),
)
with self.assertRaisesRegex(AssertionError, "the first argument should be a callable"):
psdf.pandas_on_spark.apply_batch(1)
with self.assertRaisesRegex(TypeError, "The given function.*frame as its type hints"):
def f2(_) -> ps.Series[int]:
pass
psdf.pandas_on_spark.apply_batch(f2)
with self.assertRaisesRegex(ValueError, "The given function should return a frame"):
psdf.pandas_on_spark.apply_batch(lambda pdf: 1)
# multi-index columns
columns = pd.MultiIndex.from_tuples([("x", "a"), ("x", "b"), ("y", "c")])
pdf.columns = columns
psdf.columns = columns
self.assert_eq(
psdf.pandas_on_spark.apply_batch(lambda x: x + 1).sort_index(), (pdf + 1).sort_index()
)
with option_context("compute.shortcut_limit", 500):
self.assert_eq(
psdf.pandas_on_spark.apply_batch(lambda x: x + 1).sort_index(),
(pdf + 1).sort_index(),
)
def test_apply_batch_with_type(self):
pdf = self.pdf
psdf = ps.from_pandas(pdf)
def identify1(x) -> ps.DataFrame[int, int]:
return x
# Type hints set the default column names, and we use default index for
# pandas API on Spark. Here we ignore both diff.
actual = psdf.pandas_on_spark.apply_batch(identify1)
expected = pdf
self.assert_eq(sorted(actual["c0"].to_numpy()), sorted(expected["a"].to_numpy()))
self.assert_eq(sorted(actual["c1"].to_numpy()), sorted(expected["b"].to_numpy()))
def identify2(x) -> ps.DataFrame[slice("a", int), slice("b", int)]: # noqa: F405
return x
actual = psdf.pandas_on_spark.apply_batch(identify2)
expected = pdf
self.assert_eq(sorted(actual["a"].to_numpy()), sorted(expected["a"].to_numpy()))
self.assert_eq(sorted(actual["b"].to_numpy()), sorted(expected["b"].to_numpy()))
pdf = pd.DataFrame(
{"a": [1, 2, 3, 4, 5, 6, 7, 8, 9], "b": [[e] for e in [4, 5, 6, 3, 2, 1, 0, 0, 0]]},
index=np.random.rand(9),
)
psdf = ps.from_pandas(pdf)
def identify3(x) -> ps.DataFrame[float, [int, List[int]]]:
return x
actual = psdf.pandas_on_spark.apply_batch(identify3)
actual.columns = ["a", "b"]
self.assert_eq(actual, pdf)
# For NumPy typing, NumPy version should be 1.21+ and Python version should be 3.8+
if sys.version_info >= (3, 8) and LooseVersion(np.__version__) >= LooseVersion("1.21"):
import numpy.typing as ntp
psdf = ps.from_pandas(pdf)
def identify4(
x,
) -> ps.DataFrame[float, [int, ntp.NDArray[int]]]: # type: ignore[name-defined]
return x
actual = psdf.pandas_on_spark.apply_batch(identify4)
actual.columns = ["a", "b"]
self.assert_eq(actual, pdf)
arrays = [[1, 2, 3, 4, 5, 6, 7, 8, 9], ["a", "b", "c", "d", "e", "f", "g", "h", "i"]]
idx = pd.MultiIndex.from_arrays(arrays, names=("number", "color"))
pdf = pd.DataFrame(
{"a": [1, 2, 3, 4, 5, 6, 7, 8, 9], "b": [[e] for e in [4, 5, 6, 3, 2, 1, 0, 0, 0]]},
index=idx,
)
psdf = ps.from_pandas(pdf)
def identify4(x) -> ps.DataFrame[[int, str], [int, List[int]]]:
return x
actual = psdf.pandas_on_spark.apply_batch(identify4)
actual.index.names = ["number", "color"]
actual.columns = ["a", "b"]
self.assert_eq(actual, pdf)
def identify5(
x,
) -> ps.DataFrame[
[("number", int), ("color", str)], [("a", int), ("b", List[int])] # noqa: F405
]:
return x
actual = psdf.pandas_on_spark.apply_batch(identify5)
self.assert_eq(actual, pdf)
def test_transform_batch(self):
pdf = pd.DataFrame(
{
"a": [1, 2, 3, 4, 5, 6] * 100,
"b": [1.0, 1.0, 2.0, 3.0, 5.0, 8.0] * 100,
"c": [1, 4, 9, 16, 25, 36] * 100,
},
columns=["a", "b", "c"],
index=np.random.rand(600),
)
psdf = ps.DataFrame(pdf)
self.assert_eq(
psdf.pandas_on_spark.transform_batch(lambda pdf: pdf.c + 1).sort_index(),
(pdf.c + 1).sort_index(),
)
self.assert_eq(
psdf.pandas_on_spark.transform_batch(lambda pdf, a: pdf + a, 1).sort_index(),
(pdf + 1).sort_index(),
)
self.assert_eq(
psdf.pandas_on_spark.transform_batch(lambda pdf, a: pdf.c + a, a=1).sort_index(),
(pdf.c + 1).sort_index(),
)
with option_context("compute.shortcut_limit", 500):
self.assert_eq(
psdf.pandas_on_spark.transform_batch(lambda pdf: pdf + 1).sort_index(),
(pdf + 1).sort_index(),
)
self.assert_eq(
psdf.pandas_on_spark.transform_batch(lambda pdf: pdf.b + 1).sort_index(),
(pdf.b + 1).sort_index(),
)
self.assert_eq(
psdf.pandas_on_spark.transform_batch(lambda pdf, a: pdf + a, 1).sort_index(),
(pdf + 1).sort_index(),
)
self.assert_eq(
psdf.pandas_on_spark.transform_batch(lambda pdf, a: pdf.c + a, a=1).sort_index(),
(pdf.c + 1).sort_index(),
)
with self.assertRaisesRegex(AssertionError, "the first argument should be a callable"):
psdf.pandas_on_spark.transform_batch(1)
with self.assertRaisesRegex(ValueError, "The given function should return a frame"):
psdf.pandas_on_spark.transform_batch(lambda pdf: 1)
with self.assertRaisesRegex(
ValueError, "transform_batch cannot produce aggregated results"
):
psdf.pandas_on_spark.transform_batch(lambda pdf: pd.Series(1))
# multi-index columns
columns = pd.MultiIndex.from_tuples([("x", "a"), ("x", "b"), ("y", "c")])
pdf.columns = columns
psdf.columns = columns
self.assert_eq(
psdf.pandas_on_spark.transform_batch(lambda x: x + 1).sort_index(),
(pdf + 1).sort_index(),
)
with option_context("compute.shortcut_limit", 500):
self.assert_eq(
psdf.pandas_on_spark.transform_batch(lambda x: x + 1).sort_index(),
(pdf + 1).sort_index(),
)
def test_transform_batch_with_type(self):
pdf = self.pdf
psdf = ps.from_pandas(pdf)
def identify1(x) -> ps.DataFrame[int, int]:
return x
# Type hints set the default column names, and we use default index for
# pandas API on Spark. Here we ignore both diff.
actual = psdf.pandas_on_spark.transform_batch(identify1)
expected = pdf
self.assert_eq(sorted(actual["c0"].to_numpy()), sorted(expected["a"].to_numpy()))
self.assert_eq(sorted(actual["c1"].to_numpy()), sorted(expected["b"].to_numpy()))
def identify2(x) -> ps.DataFrame[slice("a", int), slice("b", int)]: # noqa: F405
return x
actual = psdf.pandas_on_spark.transform_batch(identify2)
expected = pdf
self.assert_eq(sorted(actual["a"].to_numpy()), sorted(expected["a"].to_numpy()))
self.assert_eq(sorted(actual["b"].to_numpy()), sorted(expected["b"].to_numpy()))
def test_transform_batch_same_anchor(self):
psdf = ps.range(10)
psdf["d"] = psdf.pandas_on_spark.transform_batch(lambda pdf: pdf.id + 1)
self.assert_eq(
psdf,
pd.DataFrame({"id": list(range(10)), "d": list(range(1, 11))}, columns=["id", "d"]),
)
psdf = ps.range(10)
def plus_one(pdf) -> ps.Series[np.int64]:
return pdf.id + 1
psdf["d"] = psdf.pandas_on_spark.transform_batch(plus_one)
self.assert_eq(
psdf,
pd.DataFrame({"id": list(range(10)), "d": list(range(1, 11))}, columns=["id", "d"]),
)
psdf = ps.range(10)
def plus_one(ser) -> ps.Series[np.int64]:
return ser + 1
psdf["d"] = psdf.id.pandas_on_spark.transform_batch(plus_one)
self.assert_eq(
psdf,
pd.DataFrame({"id": list(range(10)), "d": list(range(1, 11))}, columns=["id", "d"]),
)
def test_empty_timestamp(self):
pdf = pd.DataFrame(
{
"t": [
datetime(2019, 1, 1, 0, 0, 0),
datetime(2019, 1, 2, 0, 0, 0),
datetime(2019, 1, 3, 0, 0, 0),
]
},
index=np.random.rand(3),
)
psdf = ps.from_pandas(pdf)
self.assert_eq(psdf[psdf["t"] != psdf["t"]], pdf[pdf["t"] != pdf["t"]])
self.assert_eq(psdf[psdf["t"] != psdf["t"]].dtypes, pdf[pdf["t"] != pdf["t"]].dtypes)
def test_to_spark(self):
psdf = ps.from_pandas(self.pdf)
with self.assertRaisesRegex(ValueError, "'index_col' cannot be overlapped"):
psdf.to_spark(index_col="a")
with self.assertRaisesRegex(ValueError, "length of index columns.*1.*3"):
psdf.to_spark(index_col=["x", "y", "z"])
def test_keys(self):
pdf = pd.DataFrame(
[[1, 2], [4, 5], [7, 8]],
index=["cobra", "viper", "sidewinder"],
columns=["max_speed", "shield"],
)
psdf = ps.from_pandas(pdf)
self.assert_eq(psdf.keys(), pdf.keys())
def test_quantile(self):
pdf, psdf = self.df_pair
self.assert_eq(psdf.quantile(0.5), pdf.quantile(0.5))
self.assert_eq(psdf.quantile([0.25, 0.5, 0.75]), pdf.quantile([0.25, 0.5, 0.75]))
self.assert_eq(psdf.loc[[]].quantile(0.5), pdf.loc[[]].quantile(0.5))
self.assert_eq(
psdf.loc[[]].quantile([0.25, 0.5, 0.75]), pdf.loc[[]].quantile([0.25, 0.5, 0.75])
)
with self.assertRaisesRegex(
NotImplementedError, 'axis should be either 0 or "index" currently.'
):
psdf.quantile(0.5, axis=1)
with self.assertRaisesRegex(TypeError, "accuracy must be an integer; however"):
psdf.quantile(accuracy="a")
with self.assertRaisesRegex(TypeError, "q must be a float or an array of floats;"):
psdf.quantile(q="a")
with self.assertRaisesRegex(TypeError, "q must be a float or an array of floats;"):
psdf.quantile(q=["a"])
with self.assertRaisesRegex(
ValueError, r"percentiles should all be in the interval \[0, 1\]"
):
psdf.quantile(q=[1.1])
self.assert_eq(
psdf.quantile(0.5, numeric_only=False), pdf.quantile(0.5, numeric_only=False)
)
self.assert_eq(
psdf.quantile([0.25, 0.5, 0.75], numeric_only=False),
pdf.quantile([0.25, 0.5, 0.75], numeric_only=False),
)
# multi-index column
columns = pd.MultiIndex.from_tuples([("x", "a"), ("y", "b")])
pdf.columns = columns
psdf.columns = columns
self.assert_eq(psdf.quantile(0.5), pdf.quantile(0.5))
self.assert_eq(psdf.quantile([0.25, 0.5, 0.75]), pdf.quantile([0.25, 0.5, 0.75]))
pdf = pd.DataFrame({"x": ["a", "b", "c"]})
psdf = ps.from_pandas(pdf)
self.assert_eq(psdf.quantile(0.5), pdf.quantile(0.5))
self.assert_eq(psdf.quantile([0.25, 0.5, 0.75]), pdf.quantile([0.25, 0.5, 0.75]))
with self.assertRaisesRegex(TypeError, "Could not convert object \\(string\\) to numeric"):
psdf.quantile(0.5, numeric_only=False)
with self.assertRaisesRegex(TypeError, "Could not convert object \\(string\\) to numeric"):
psdf.quantile([0.25, 0.5, 0.75], numeric_only=False)
def test_pct_change(self):
pdf = pd.DataFrame(
{"a": [1, 2, 3, 2], "b": [4.0, 2.0, 3.0, 1.0], "c": [300, 200, 400, 200]},
index=np.random.rand(4),
)
pdf.columns = pd.MultiIndex.from_tuples([("a", "x"), ("b", "y"), ("c", "z")])
psdf = ps.from_pandas(pdf)
self.assert_eq(psdf.pct_change(2), pdf.pct_change(2), check_exact=False)
self.assert_eq(psdf.pct_change().sum(), pdf.pct_change().sum(), check_exact=False)
def test_where(self):
pdf, psdf = self.df_pair
# pandas requires `axis` argument when the `other` is Series.
# `axis` is not fully supported yet in pandas-on-Spark.
self.assert_eq(
psdf.where(psdf > 2, psdf.a + 10, axis=0), pdf.where(pdf > 2, pdf.a + 10, axis=0)
)
with self.assertRaisesRegex(TypeError, "type of cond must be a DataFrame or Series"):
psdf.where(1)
def test_mask(self):
psdf = ps.from_pandas(self.pdf)
with self.assertRaisesRegex(TypeError, "type of cond must be a DataFrame or Series"):
psdf.mask(1)
def test_query(self):
pdf = pd.DataFrame({"A": range(1, 6), "B": range(10, 0, -2), "C": range(10, 5, -1)})
psdf = ps.from_pandas(pdf)
exprs = ("A > B", "A < C", "C == B")
for expr in exprs:
self.assert_eq(psdf.query(expr), pdf.query(expr))
# test `inplace=True`
for expr in exprs:
dummy_psdf = psdf.copy()
dummy_pdf = pdf.copy()
pser = dummy_pdf.A
psser = dummy_psdf.A
dummy_pdf.query(expr, inplace=True)
dummy_psdf.query(expr, inplace=True)
self.assert_eq(dummy_psdf, dummy_pdf)
self.assert_eq(psser, pser)
# invalid values for `expr`
invalid_exprs = (1, 1.0, (exprs[0],), [exprs[0]])
for expr in invalid_exprs:
with self.assertRaisesRegex(
TypeError,
"expr must be a string to be evaluated, {} given".format(type(expr).__name__),
):
psdf.query(expr)
# invalid values for `inplace`
invalid_inplaces = (1, 0, "True", "False")
for inplace in invalid_inplaces:
with self.assertRaisesRegex(
TypeError,
'For argument "inplace" expected type bool, received type {}.'.format(
type(inplace).__name__
),
):
psdf.query("a < b", inplace=inplace)
# doesn't support for MultiIndex columns
columns = pd.MultiIndex.from_tuples([("A", "Z"), ("B", "X"), ("C", "C")])
psdf.columns = columns
with self.assertRaisesRegex(TypeError, "Doesn't support for MultiIndex columns"):
psdf.query("('A', 'Z') > ('B', 'X')")
def test_take(self):
pdf = pd.DataFrame(
{"A": range(0, 50000), "B": range(100000, 0, -2), "C": range(100000, 50000, -1)}
)
psdf = ps.from_pandas(pdf)
# axis=0 (default)
self.assert_eq(psdf.take([1, 2]).sort_index(), pdf.take([1, 2]).sort_index())
self.assert_eq(psdf.take([-1, -2]).sort_index(), pdf.take([-1, -2]).sort_index())
self.assert_eq(
psdf.take(range(100, 110)).sort_index(), pdf.take(range(100, 110)).sort_index()
)
self.assert_eq(
psdf.take(range(-110, -100)).sort_index(), pdf.take(range(-110, -100)).sort_index()
)
self.assert_eq(
psdf.take([10, 100, 1000, 10000]).sort_index(),
pdf.take([10, 100, 1000, 10000]).sort_index(),
)
self.assert_eq(
psdf.take([-10, -100, -1000, -10000]).sort_index(),
pdf.take([-10, -100, -1000, -10000]).sort_index(),
)
# axis=1
self.assert_eq(
psdf.take([1, 2], axis=1).sort_index(), pdf.take([1, 2], axis=1).sort_index()
)
self.assert_eq(
psdf.take([-1, -2], axis=1).sort_index(), pdf.take([-1, -2], axis=1).sort_index()
)
self.assert_eq(
psdf.take(range(1, 3), axis=1).sort_index(),
pdf.take(range(1, 3), axis=1).sort_index(),
)
self.assert_eq(
psdf.take(range(-1, -3), axis=1).sort_index(),
pdf.take(range(-1, -3), axis=1).sort_index(),
)
self.assert_eq(
psdf.take([2, 1], axis=1).sort_index(),
pdf.take([2, 1], axis=1).sort_index(),
)
self.assert_eq(
psdf.take([-1, -2], axis=1).sort_index(),
pdf.take([-1, -2], axis=1).sort_index(),
)
# MultiIndex columns
columns = pd.MultiIndex.from_tuples([("A", "Z"), ("B", "X"), ("C", "C")])
psdf.columns = columns
pdf.columns = columns
# MultiIndex columns with axis=0 (default)
self.assert_eq(psdf.take([1, 2]).sort_index(), pdf.take([1, 2]).sort_index())
self.assert_eq(psdf.take([-1, -2]).sort_index(), pdf.take([-1, -2]).sort_index())
self.assert_eq(
psdf.take(range(100, 110)).sort_index(), pdf.take(range(100, 110)).sort_index()
)
self.assert_eq(
psdf.take(range(-110, -100)).sort_index(), pdf.take(range(-110, -100)).sort_index()
)
self.assert_eq(
psdf.take([10, 100, 1000, 10000]).sort_index(),
pdf.take([10, 100, 1000, 10000]).sort_index(),
)
self.assert_eq(
psdf.take([-10, -100, -1000, -10000]).sort_index(),
pdf.take([-10, -100, -1000, -10000]).sort_index(),
)
# axis=1
self.assert_eq(
psdf.take([1, 2], axis=1).sort_index(), pdf.take([1, 2], axis=1).sort_index()
)
self.assert_eq(
psdf.take([-1, -2], axis=1).sort_index(), pdf.take([-1, -2], axis=1).sort_index()
)
self.assert_eq(
psdf.take(range(1, 3), axis=1).sort_index(),
pdf.take(range(1, 3), axis=1).sort_index(),
)
self.assert_eq(
psdf.take(range(-1, -3), axis=1).sort_index(),
pdf.take(range(-1, -3), axis=1).sort_index(),
)
self.assert_eq(
psdf.take([2, 1], axis=1).sort_index(),
pdf.take([2, 1], axis=1).sort_index(),
)
self.assert_eq(
psdf.take([-1, -2], axis=1).sort_index(),
pdf.take([-1, -2], axis=1).sort_index(),
)
# Checking the type of indices.
self.assertRaises(TypeError, lambda: psdf.take(1))
self.assertRaises(TypeError, lambda: psdf.take("1"))
self.assertRaises(TypeError, lambda: psdf.take({1, 2}))
self.assertRaises(TypeError, lambda: psdf.take({1: None, 2: None}))
def test_axes(self):
pdf = self.pdf
psdf = ps.from_pandas(pdf)
self.assert_eq(pdf.axes, psdf.axes)
# multi-index columns
columns = pd.MultiIndex.from_tuples([("x", "a"), ("y", "b")])
pdf.columns = columns
psdf.columns = columns
self.assert_eq(pdf.axes, psdf.axes)
def test_udt(self):
sparse_values = {0: 0.1, 1: 1.1}
sparse_vector = SparseVector(len(sparse_values), sparse_values)
pdf = pd.DataFrame({"a": [sparse_vector], "b": [10]})
psdf = ps.from_pandas(pdf)
self.assert_eq(psdf, pdf)
def test_eval(self):
pdf = pd.DataFrame({"A": range(1, 6), "B": range(10, 0, -2)})
psdf = ps.from_pandas(pdf)
# operation between columns (returns Series)
self.assert_eq(pdf.eval("A + B"), psdf.eval("A + B"))
self.assert_eq(pdf.eval("A + A"), psdf.eval("A + A"))
# assignment (returns DataFrame)
self.assert_eq(pdf.eval("C = A + B"), psdf.eval("C = A + B"))
self.assert_eq(pdf.eval("A = A + A"), psdf.eval("A = A + A"))
# operation between scalars (returns scalar)
self.assert_eq(pdf.eval("1 + 1"), psdf.eval("1 + 1"))
# complicated operations with assignment
self.assert_eq(
pdf.eval("B = A + B // (100 + 200) * (500 - B) - 10.5"),
psdf.eval("B = A + B // (100 + 200) * (500 - B) - 10.5"),
)
# inplace=True (only support for assignment)
pdf.eval("C = A + B", inplace=True)
psdf.eval("C = A + B", inplace=True)
self.assert_eq(pdf, psdf)
pser = pdf.A
psser = psdf.A
pdf.eval("A = B + C", inplace=True)
psdf.eval("A = B + C", inplace=True)
self.assert_eq(pdf, psdf)
self.assert_eq(pser, psser)
# doesn't support for multi-index columns
columns = pd.MultiIndex.from_tuples([("x", "a"), ("y", "b"), ("z", "c")])
psdf.columns = columns
self.assertRaises(TypeError, lambda: psdf.eval("x.a + y.b"))
@unittest.skipIf(not have_tabulate, tabulate_requirement_message)
def test_to_markdown(self):
pdf = pd.DataFrame(data={"animal_1": ["elk", "pig"], "animal_2": ["dog", "quetzal"]})
psdf = ps.from_pandas(pdf)
self.assert_eq(pdf.to_markdown(), psdf.to_markdown())
def test_cache(self):
pdf = pd.DataFrame(
[(0.2, 0.3), (0.0, 0.6), (0.6, 0.0), (0.2, 0.1)], columns=["dogs", "cats"]
)
psdf = ps.from_pandas(pdf)
with psdf.spark.cache() as cached_df:
self.assert_eq(isinstance(cached_df, CachedDataFrame), True)
self.assert_eq(
repr(cached_df.spark.storage_level), repr(StorageLevel(True, True, False, True))
)
def test_persist(self):
pdf = pd.DataFrame(
[(0.2, 0.3), (0.0, 0.6), (0.6, 0.0), (0.2, 0.1)], columns=["dogs", "cats"]
)
psdf = ps.from_pandas(pdf)
storage_levels = [
StorageLevel.DISK_ONLY,
StorageLevel.MEMORY_AND_DISK,
StorageLevel.MEMORY_ONLY,
StorageLevel.OFF_HEAP,
]
for storage_level in storage_levels:
with psdf.spark.persist(storage_level) as cached_df:
self.assert_eq(isinstance(cached_df, CachedDataFrame), True)
self.assert_eq(repr(cached_df.spark.storage_level), repr(storage_level))
self.assertRaises(TypeError, lambda: psdf.spark.persist("DISK_ONLY"))
def test_squeeze(self):
axises = [None, 0, 1, "rows", "index", "columns"]
# Multiple columns
pdf = pd.DataFrame([[1, 2], [3, 4]], columns=["a", "b"], index=["x", "y"])
psdf = ps.from_pandas(pdf)
for axis in axises:
self.assert_eq(pdf.squeeze(axis), psdf.squeeze(axis))
# Multiple columns with MultiIndex columns
columns = pd.MultiIndex.from_tuples([("A", "Z"), ("B", "X")])
pdf.columns = columns
psdf.columns = columns
for axis in axises:
self.assert_eq(pdf.squeeze(axis), psdf.squeeze(axis))
# Single column with single value
pdf = pd.DataFrame([[1]], columns=["a"], index=["x"])
psdf = ps.from_pandas(pdf)
for axis in axises:
self.assert_eq(pdf.squeeze(axis), psdf.squeeze(axis))
# Single column with single value with MultiIndex column
columns = pd.MultiIndex.from_tuples([("A", "Z")])
pdf.columns = columns
psdf.columns = columns
for axis in axises:
self.assert_eq(pdf.squeeze(axis), psdf.squeeze(axis))
# Single column with multiple values
pdf = pd.DataFrame([1, 2, 3, 4], columns=["a"])
psdf = ps.from_pandas(pdf)
for axis in axises:
self.assert_eq(pdf.squeeze(axis), psdf.squeeze(axis))
# Single column with multiple values with MultiIndex column
pdf.columns = columns
psdf.columns = columns
for axis in axises:
self.assert_eq(pdf.squeeze(axis), psdf.squeeze(axis))
def test_rfloordiv(self):
pdf = pd.DataFrame(
{"angles": [0, 3, 4], "degrees": [360, 180, 360]},
index=["circle", "triangle", "rectangle"],
columns=["angles", "degrees"],
)
psdf = ps.from_pandas(pdf)
expected_result = pdf.rfloordiv(10)
self.assert_eq(psdf.rfloordiv(10), expected_result)
def test_truncate(self):
pdf1 = pd.DataFrame(
{
"A": ["a", "b", "c", "d", "e", "f", "g"],
"B": ["h", "i", "j", "k", "l", "m", "n"],
"C": ["o", "p", "q", "r", "s", "t", "u"],
},
index=[-500, -20, -1, 0, 400, 550, 1000],
)
psdf1 = ps.from_pandas(pdf1)
pdf2 = pd.DataFrame(
{
"A": ["a", "b", "c", "d", "e", "f", "g"],
"B": ["h", "i", "j", "k", "l", "m", "n"],
"C": ["o", "p", "q", "r", "s", "t", "u"],
},
index=[1000, 550, 400, 0, -1, -20, -500],
)
psdf2 = ps.from_pandas(pdf2)
self.assert_eq(psdf1.truncate(), pdf1.truncate())
self.assert_eq(psdf1.truncate(before=-20), pdf1.truncate(before=-20))
self.assert_eq(psdf1.truncate(after=400), pdf1.truncate(after=400))
self.assert_eq(psdf1.truncate(copy=False), pdf1.truncate(copy=False))
self.assert_eq(psdf1.truncate(-20, 400, copy=False), pdf1.truncate(-20, 400, copy=False))
# The bug for these tests has been fixed in pandas 1.1.0.
if LooseVersion(pd.__version__) >= LooseVersion("1.1.0"):
self.assert_eq(psdf2.truncate(0, 550), pdf2.truncate(0, 550))
self.assert_eq(psdf2.truncate(0, 550, copy=False), pdf2.truncate(0, 550, copy=False))
else:
expected_psdf = ps.DataFrame(
{"A": ["b", "c", "d"], "B": ["i", "j", "k"], "C": ["p", "q", "r"]},
index=[550, 400, 0],
)
self.assert_eq(psdf2.truncate(0, 550), expected_psdf)
self.assert_eq(psdf2.truncate(0, 550, copy=False), expected_psdf)
# axis = 1
self.assert_eq(psdf1.truncate(axis=1), pdf1.truncate(axis=1))
self.assert_eq(psdf1.truncate(before="B", axis=1), pdf1.truncate(before="B", axis=1))
self.assert_eq(psdf1.truncate(after="A", axis=1), pdf1.truncate(after="A", axis=1))
self.assert_eq(psdf1.truncate(copy=False, axis=1), pdf1.truncate(copy=False, axis=1))
self.assert_eq(psdf2.truncate("B", "C", axis=1), pdf2.truncate("B", "C", axis=1))
self.assert_eq(
psdf1.truncate("B", "C", copy=False, axis=1),
pdf1.truncate("B", "C", copy=False, axis=1),
)
# MultiIndex columns
columns = pd.MultiIndex.from_tuples([("A", "Z"), ("B", "X"), ("C", "Z")])
pdf1.columns = columns
psdf1.columns = columns
pdf2.columns = columns
psdf2.columns = columns
self.assert_eq(psdf1.truncate(), pdf1.truncate())
self.assert_eq(psdf1.truncate(before=-20), pdf1.truncate(before=-20))
self.assert_eq(psdf1.truncate(after=400), pdf1.truncate(after=400))
self.assert_eq(psdf1.truncate(copy=False), pdf1.truncate(copy=False))
self.assert_eq(psdf1.truncate(-20, 400, copy=False), pdf1.truncate(-20, 400, copy=False))
# The bug for these tests has been fixed in pandas 1.1.0.
if LooseVersion(pd.__version__) >= LooseVersion("1.1.0"):
self.assert_eq(psdf2.truncate(0, 550), pdf2.truncate(0, 550))
self.assert_eq(psdf2.truncate(0, 550, copy=False), pdf2.truncate(0, 550, copy=False))
else:
expected_psdf.columns = columns
self.assert_eq(psdf2.truncate(0, 550), expected_psdf)
self.assert_eq(psdf2.truncate(0, 550, copy=False), expected_psdf)
# axis = 1
self.assert_eq(psdf1.truncate(axis=1), pdf1.truncate(axis=1))
self.assert_eq(psdf1.truncate(before="B", axis=1), pdf1.truncate(before="B", axis=1))
self.assert_eq(psdf1.truncate(after="A", axis=1), pdf1.truncate(after="A", axis=1))
self.assert_eq(psdf1.truncate(copy=False, axis=1), pdf1.truncate(copy=False, axis=1))
self.assert_eq(psdf2.truncate("B", "C", axis=1), pdf2.truncate("B", "C", axis=1))
self.assert_eq(
psdf1.truncate("B", "C", copy=False, axis=1),
pdf1.truncate("B", "C", copy=False, axis=1),
)
# Exceptions
psdf = ps.DataFrame(
{
"A": ["a", "b", "c", "d", "e", "f", "g"],
"B": ["h", "i", "j", "k", "l", "m", "n"],
"C": ["o", "p", "q", "r", "s", "t", "u"],
},
index=[-500, 100, 400, 0, -1, 550, -20],
)
msg = "truncate requires a sorted index"
with self.assertRaisesRegex(ValueError, msg):
psdf.truncate()
psdf = ps.DataFrame(
{
"A": ["a", "b", "c", "d", "e", "f", "g"],
"B": ["h", "i", "j", "k", "l", "m", "n"],
"C": ["o", "p", "q", "r", "s", "t", "u"],
},
index=[-500, -20, -1, 0, 400, 550, 1000],
)
msg = "Truncate: -20 must be after 400"
with self.assertRaisesRegex(ValueError, msg):
psdf.truncate(400, -20)
msg = "Truncate: B must be after C"
with self.assertRaisesRegex(ValueError, msg):
psdf.truncate("C", "B", axis=1)
def test_explode(self):
pdf = pd.DataFrame({"A": [[-1.0, np.nan], [0.0, np.inf], [1.0, -np.inf]], "B": 1})
pdf.index.name = "index"
pdf.columns.name = "columns"
psdf = ps.from_pandas(pdf)
expected_result1 = pdf.explode("A")
expected_result2 = pdf.explode("B")
self.assert_eq(psdf.explode("A"), expected_result1, almost=True)
self.assert_eq(psdf.explode("B"), expected_result2)
self.assert_eq(psdf.explode("A").index.name, expected_result1.index.name)
self.assert_eq(psdf.explode("A").columns.name, expected_result1.columns.name)
self.assertRaises(TypeError, lambda: psdf.explode(["A", "B"]))
# MultiIndex
midx = pd.MultiIndex.from_tuples(
[("x", "a"), ("x", "b"), ("y", "c")], names=["index1", "index2"]
)
pdf.index = midx
psdf = ps.from_pandas(pdf)
expected_result1 = pdf.explode("A")
expected_result2 = pdf.explode("B")
self.assert_eq(psdf.explode("A"), expected_result1, almost=True)
self.assert_eq(psdf.explode("B"), expected_result2)
self.assert_eq(psdf.explode("A").index.names, expected_result1.index.names)
self.assert_eq(psdf.explode("A").columns.name, expected_result1.columns.name)
self.assertRaises(TypeError, lambda: psdf.explode(["A", "B"]))
# MultiIndex columns
columns = pd.MultiIndex.from_tuples([("A", "Z"), ("B", "X")], names=["column1", "column2"])
pdf.columns = columns
psdf.columns = columns
expected_result1 = pdf.explode(("A", "Z"))
expected_result2 = pdf.explode(("B", "X"))
expected_result3 = pdf.A.explode("Z")
self.assert_eq(psdf.explode(("A", "Z")), expected_result1, almost=True)
self.assert_eq(psdf.explode(("B", "X")), expected_result2)
self.assert_eq(psdf.explode(("A", "Z")).index.names, expected_result1.index.names)
self.assert_eq(psdf.explode(("A", "Z")).columns.names, expected_result1.columns.names)
self.assert_eq(psdf.A.explode("Z"), expected_result3, almost=True)
self.assertRaises(TypeError, lambda: psdf.explode(["A", "B"]))
self.assertRaises(ValueError, lambda: psdf.explode("A"))
def test_spark_schema(self):
psdf = ps.DataFrame(
{
"a": list("abc"),
"b": list(range(1, 4)),
"c": np.arange(3, 6).astype("i1"),
"d": np.arange(4.0, 7.0, dtype="float64"),
"e": [True, False, True],
"f": pd.date_range("20130101", periods=3),
},
columns=["a", "b", "c", "d", "e", "f"],
)
actual = psdf.spark.schema()
expected = (
StructType()
.add("a", "string", False)
.add("b", "long", False)
.add("c", "byte", False)
.add("d", "double", False)
.add("e", "boolean", False)
.add("f", "timestamp", False)
)
self.assertEqual(actual, expected)
actual = psdf.spark.schema("index")
expected = (
StructType()
.add("index", "long", False)
.add("a", "string", False)
.add("b", "long", False)
.add("c", "byte", False)
.add("d", "double", False)
.add("e", "boolean", False)
.add("f", "timestamp", False)
)
self.assertEqual(actual, expected)
def test_print_schema(self):
psdf = ps.DataFrame(
{"a": list("abc"), "b": list(range(1, 4)), "c": np.arange(3, 6).astype("i1")},
columns=["a", "b", "c"],
)
prev = sys.stdout
try:
out = StringIO()
sys.stdout = out
psdf.spark.print_schema()
actual = out.getvalue().strip()
self.assertTrue("a: string" in actual, actual)
self.assertTrue("b: long" in actual, actual)
self.assertTrue("c: byte" in actual, actual)
out = StringIO()
sys.stdout = out
psdf.spark.print_schema(index_col="index")
actual = out.getvalue().strip()
self.assertTrue("index: long" in actual, actual)
self.assertTrue("a: string" in actual, actual)
self.assertTrue("b: long" in actual, actual)
self.assertTrue("c: byte" in actual, actual)
finally:
sys.stdout = prev
def test_explain_hint(self):
psdf1 = ps.DataFrame(
{"lkey": ["foo", "bar", "baz", "foo"], "value": [1, 2, 3, 5]},
columns=["lkey", "value"],
)
psdf2 = ps.DataFrame(
{"rkey": ["foo", "bar", "baz", "foo"], "value": [5, 6, 7, 8]},
columns=["rkey", "value"],
)
merged = psdf1.merge(psdf2.spark.hint("broadcast"), left_on="lkey", right_on="rkey")
prev = sys.stdout
try:
out = StringIO()
sys.stdout = out
merged.spark.explain()
actual = out.getvalue().strip()
self.assertTrue("Broadcast" in actual, actual)
finally:
sys.stdout = prev
def test_mad(self):
pdf = pd.DataFrame(
{
"A": [1, 2, None, 4, np.nan],
"B": [-0.1, 0.2, -0.3, np.nan, 0.5],
"C": ["a", "b", "c", "d", "e"],
}
)
psdf = ps.from_pandas(pdf)
self.assert_eq(psdf.mad(), pdf.mad())
self.assert_eq(psdf.mad(axis=1), pdf.mad(axis=1))
with self.assertRaises(ValueError):
psdf.mad(axis=2)
# MultiIndex columns
columns = pd.MultiIndex.from_tuples([("A", "X"), ("A", "Y"), ("A", "Z")])
pdf.columns = columns
psdf.columns = columns
self.assert_eq(psdf.mad(), pdf.mad())
self.assert_eq(psdf.mad(axis=1), pdf.mad(axis=1))
pdf = pd.DataFrame({"A": [True, True, False, False], "B": [True, False, False, True]})
psdf = ps.from_pandas(pdf)
self.assert_eq(psdf.mad(), pdf.mad())
self.assert_eq(psdf.mad(axis=1), pdf.mad(axis=1))
def test_abs(self):
pdf = pd.DataFrame({"a": [-2, -1, 0, 1]})
psdf = ps.from_pandas(pdf)
self.assert_eq(abs(psdf), abs(pdf))
self.assert_eq(np.abs(psdf), np.abs(pdf))
def test_iteritems(self):
pdf = pd.DataFrame(
{"species": ["bear", "bear", "marsupial"], "population": [1864, 22000, 80000]},
index=["panda", "polar", "koala"],
columns=["species", "population"],
)
psdf = ps.from_pandas(pdf)
for (p_name, p_items), (k_name, k_items) in zip(pdf.iteritems(), psdf.iteritems()):
self.assert_eq(p_name, k_name)
self.assert_eq(p_items, k_items)
def test_tail(self):
pdf = pd.DataFrame({"x": range(1000)})
psdf = ps.from_pandas(pdf)
self.assert_eq(pdf.tail(), psdf.tail())
self.assert_eq(pdf.tail(10), psdf.tail(10))
self.assert_eq(pdf.tail(-990), psdf.tail(-990))
self.assert_eq(pdf.tail(0), psdf.tail(0))
self.assert_eq(pdf.tail(-1001), psdf.tail(-1001))
self.assert_eq(pdf.tail(1001), psdf.tail(1001))
self.assert_eq((pdf + 1).tail(), (psdf + 1).tail())
self.assert_eq((pdf + 1).tail(10), (psdf + 1).tail(10))
self.assert_eq((pdf + 1).tail(-990), (psdf + 1).tail(-990))
self.assert_eq((pdf + 1).tail(0), (psdf + 1).tail(0))
self.assert_eq((pdf + 1).tail(-1001), (psdf + 1).tail(-1001))
self.assert_eq((pdf + 1).tail(1001), (psdf + 1).tail(1001))
with self.assertRaisesRegex(TypeError, "bad operand type for unary -: 'str'"):
psdf.tail("10")
def test_last_valid_index(self):
pdf = pd.DataFrame(
{"a": [1, 2, 3, None], "b": [1.0, 2.0, 3.0, None], "c": [100, 200, 400, None]},
index=["Q", "W", "E", "R"],
)
psdf = ps.from_pandas(pdf)
self.assert_eq(pdf.last_valid_index(), psdf.last_valid_index())
self.assert_eq(pdf[[]].last_valid_index(), psdf[[]].last_valid_index())
# MultiIndex columns
pdf.columns = pd.MultiIndex.from_tuples([("a", "x"), ("b", "y"), ("c", "z")])
psdf = ps.from_pandas(pdf)
self.assert_eq(pdf.last_valid_index(), psdf.last_valid_index())
# Empty DataFrame
pdf = pd.Series([]).to_frame()
psdf = ps.Series([]).to_frame()
self.assert_eq(pdf.last_valid_index(), psdf.last_valid_index())
def test_last(self):
index = pd.date_range("2018-04-09", periods=4, freq="2D")
pdf = pd.DataFrame([1, 2, 3, 4], index=index)
psdf = ps.from_pandas(pdf)
self.assert_eq(pdf.last("1D"), psdf.last("1D"))
self.assert_eq(pdf.last(DateOffset(days=1)), psdf.last(DateOffset(days=1)))
with self.assertRaisesRegex(TypeError, "'last' only supports a DatetimeIndex"):
ps.DataFrame([1, 2, 3, 4]).last("1D")
def test_first(self):
index = pd.date_range("2018-04-09", periods=4, freq="2D")
pdf = pd.DataFrame([1, 2, 3, 4], index=index)
psdf = ps.from_pandas(pdf)
self.assert_eq(pdf.first("1D"), psdf.first("1D"))
self.assert_eq(pdf.first(DateOffset(days=1)), psdf.first( | DateOffset(days=1) | pandas.tseries.offsets.DateOffset |
# CacheIntervals: Memoization with interval parameters
#
# Copyright (C) <NAME>
#
# This file is part of CacheIntervals.
#
# @author = '<NAME>'
# @email = '<EMAIL>'
import logging
from functools import reduce
import loguru
import numpy as np
import pandas as pd
import pendulum as pdl
import sqlite3
import time
import klepto
from datetime import date, datetime
from CacheIntervals import MemoizationWithIntervals
from CacheIntervals.utils.Timer import Timer
name_db_file_test1 = "../test/test1.sqlite"
delay = 2
def get_records(conn, name_table, period = pd.Interval(pd.Timestamp(2021, 1,1), pd.Timestamp(2021, 1, 31))):
time.sleep(delay)
query = f"Select * From {name_table} Where date(date) between date('{period.left.date()}') and date('{period.right.date()}')"
#query = f'Select * From {name_table} '
loguru.logger.debug(query)
df = pd.read_sql(query, conn)
return df
cache_itvls =MemoizationWithIntervals(
[], ['period'],
aggregation=pd.concat,
debug=True,
memoization=klepto.lru_cache(
maxsize=500,
cache=klepto.archives.dict_archive(),
keymap=klepto.keymaps.stringmap(typed=False, flat=False)))
get_records_cached = cache_itvls(get_records)
cache_itvls_concat_with_tolerance = MemoizationWithIntervals(
[], ['period'],
aggregation=pd.concat,
debug=False,
memoization=klepto.lru_cache(
maxsize=500,
cache=klepto.archives.dict_archive(),
keymap=klepto.keymaps.stringmap(typed=False, flat=False)),
rounding = pdl.today()-pdl.yesterday()
)
get_records_cached_with_tolerance_1day = cache_itvls_concat_with_tolerance(get_records)
def caching_with_tolerance():
with Timer() as timer_no_cache:
df_jan = get_records(cnx_file, "test1", pd.Interval(pd.Timestamp(2021, 1, 1), pd.Timestamp(2021, 1, 31)))
# activate caching
get_records_cached_with_tolerance_1day(cnx_file, "test1", pd.Interval(pd.Timestamp(2021, 1, 1),
pd.Timestamp(2021, 1, 31)))
df_jan_cached = None
with Timer() as timer_cache:
df_jan_cached = get_records_cached_with_tolerance_1day(cnx_file, "test1",
pd.Interval(pd.Timestamp(2021, 1, 1), pd.Timestamp(2021, 2, 1)))
loguru.logger.debug(f'\n{df_jan_cached.sort_values(by="date")}')
assert timer_cache.interval < timer_no_cache.interval
def accesss_cached_function():
get_records_cached(cnx_file, "test1", pd.Interval(pd.Timestamp(2021, 2, 1), pd.Timestamp(2021, 3, 31)))
f_cached = get_records_cached(cnx_file, "test1", get_function_cachedQ=True)
return f_cached.info().hit, f_cached.info().miss, f_cached.info().load
########################################################################################################
#
# Testing caching with aggregation-type operations
#
########################################################################################################
def agg_cumul(listdf):
loguru.logger.debug(f'list dfs:{listdf}')
listdf = [df for df in listdf if not (df is None) and not (df.empty)]
if len(listdf):
df = reduce(lambda x, y: x.add(y, fill_value=0), listdf)
else:
raise Exception("Nothing to aggregate")
return df
def cumulate_records(conn, name_table, period=pd.Interval(pd.Timestamp(2021, 1, 1), pd.Timestamp(2021, 1, 31))):
time.sleep(delay) # simulating a long SQL request
query = f"Select currency, sum(amount_in_eur) " \
f"From {name_table} " \
f"Where date(date) >= date('{period.left.date()}') and date(date) < date('{period.right.date()}')" \
f"Group by currency"
loguru.logger.debug(query)
df = pd.read_sql(query, conn)
df = df.set_index('currency', drop=True)
df.columns = ['total']
df['total'] = df['total'].astype(float)
return df
cache_itvls_agg = MemoizationWithIntervals(
[],
['period'],
aggregation=agg_cumul,
debug=True,
memoization=klepto.lru_cache(
maxsize=500,
cache=klepto.archives.dict_archive(),
keymap=klepto.keymaps.stringmap(typed=False, flat=False)),
subintervals_requiredQ=True # extra-kwarg are passed to RecordInterval constructor
)
cumulate_records_cached = cache_itvls_agg(cumulate_records)
def caching_aggregation():
with Timer() as timer_no_cache:
df_janmar = cumulate_records(cnx_file,
"test1",
pd.Interval(pd.Timestamp(2021, 1, 1), pd.Timestamp(2021, 4, 1)))
# activate caching
df_jan = cumulate_records_cached(cnx_file, "test1", pd.Interval(pd.Timestamp(2021, 1, 1),
pd.Timestamp(2021, 2, 1)))
df_febmar = cumulate_records_cached(cnx_file, "test1", pd.Interval(pd.Timestamp(2021, 2, 1),
pd.Timestamp(2021, 4, 1)))
with Timer() as timer_cache:
df_janmar_cached = cumulate_records_cached(cnx_file,
"test1",
pd.Interval(pd.Timestamp(2021, 1, 1),
pd.Timestamp(2021, 4, 1)))
loguru.logger.debug(f'no cache: \n{df_janmar}')
loguru.logger.debug(f'cached: \n{df_janmar_cached}')
loguru.logger.debug(f'jan: \n{df_jan}')
loguru.logger.debug(f'feb-mar:\n{df_febmar}')
df_compare = pd.concat({'nocache': df_janmar, 'cache' : df_janmar_cached}, axis=1)
df_compare = df_compare.assign(zediff = lambda x: x[('cache', 'total')] - x[('nocache', 'total')])
df_compare = df_compare.assign(zediff = lambda x: x.zediff.apply(abs))
loguru.logger.debug(f'diff :\n{df_compare[df_compare.zediff>1]}')
assert np.isclose(df_janmar.total, df_janmar_cached.total, 0.1).all()
assert timer_cache.interval < timer_no_cache.interval
if __name__ == '__main__':
import logging
import daiquiri
daiquiri.setup(level=logging.DEBUG)
name_csv_test1 = "test1.gz"
cnx_file = sqlite3.connect(name_db_file_test1)
if False:
df = | pd.read_sql('Select * from test1', cnx_file) | pandas.read_sql |
"""
Tests for the research package's ProCoDA parsing functions
"""
import unittest
import aguaclara.research.procoda_parser as pp
from aguaclara.core.units import u
import pandas as pd
import numpy as np
import os
from matplotlib.testing.compare import compare_images
import matplotlib
matplotlib.use("Agg")
import matplotlib.pyplot as plt
class TestProCoDAParser(unittest.TestCase):
def test_column_of_data(self):
'''''
Extract other columns of data and append units.
'''''
path = os.path.join(os.path.dirname(__file__), '.', 'data', 'example datalog.xls')
answer = pp.column_of_data(path, 50, 1, units='mg/L')
answer = np.round(answer, 5)
self.assertSequenceEqual(
answer.tolist(),
np.round(np.array([ 21.61681747, 21.31163216, 20.80215263, 20.46752739,
20.1048584 , 19.7037487 , 19.4194355 , 18.95934677,
18.65832138, 18.24054337, 17.93864632, 17.591259 ,
17.25979805, 16.98148346, 16.60666656, 16.28514862,
15.99366856, 15.72474861, 15.35812187, 15.11634636,
14.75801468, 14.53341103, 14.20829868, 13.94124603,
13.69845104, 13.42016983, 13.17064667, 12.94155121,
12.66110611, 12.36821651, 12.1641016 , 11.91081715,
11.69137764, 11.46448898, 11.2214098 , 11.03143692,
10.78680801, 10.56936836, 10.36802101, 10.17097855,
9.95537758, 9.78312111, 9.55150509, 9.3843832 ,
9.21883678, 9.03395939, 8.85475636, 8.68857765,
8.47574997, 8.33256149, 8.13628197, 7.96697569,
7.80458403, 7.68562984, 7.4511261 , 7.34629679,
7.17365456, 7.03930044, 6.88661861, 6.73307562,
6.60730886, 6.45987988, 6.30656338, 6.18089199,
6.05378485, 5.90268421, 5.81327915, 5.68042564,
5.57657337, 5.40122986, 5.33153057, 5.19660377,
5.09033108, 4.96228552, 4.85437012, 4.76652002,
4.66415834, 4.54592991, 4.43500376, 4.34614754,
4.24292231, 4.16423607, 4.06328297, 3.96581864,
3.88231015, 3.7828486 , 3.74253488, 3.62953901,
3.53508115, 3.46755266, 3.36818004, 3.30672598,
3.22161722, 3.13899183, 3.08345532, 2.98398542,
2.94956589, 2.8504107 , 2.79215455, 2.72924852,
2.66635823, 2.60831141, 2.53093195, 2.47217631,
2.42190933, 2.36228228, 2.30094266, 2.24602866,
2.19216943, 2.14143515, 2.10641694, 2.07170939,
2.04412961, 2.0158174 , 2.00059986, 1.98546684,
1.97646523, 1.96455812, 1.95887971, 1.94987118])*u('mg/L'), 5).tolist()
)
path = os.path.join(os.path.dirname(__file__), '.', 'data', 'example datalog.xls')
answer = pp.column_of_data(path, 50, "red dye (mg/L)", units='mg/L')
answer = np.round(answer, 5)
self.assertSequenceEqual(
answer.tolist(),
np.round(np.array([ 21.61681747, 21.31163216, 20.80215263, 20.46752739,
20.1048584 , 19.7037487 , 19.4194355 , 18.95934677,
18.65832138, 18.24054337, 17.93864632, 17.591259 ,
17.25979805, 16.98148346, 16.60666656, 16.28514862,
15.99366856, 15.72474861, 15.35812187, 15.11634636,
14.75801468, 14.53341103, 14.20829868, 13.94124603,
13.69845104, 13.42016983, 13.17064667, 12.94155121,
12.66110611, 12.36821651, 12.1641016 , 11.91081715,
11.69137764, 11.46448898, 11.2214098 , 11.03143692,
10.78680801, 10.56936836, 10.36802101, 10.17097855,
9.95537758, 9.78312111, 9.55150509, 9.3843832 ,
9.21883678, 9.03395939, 8.85475636, 8.68857765,
8.47574997, 8.33256149, 8.13628197, 7.96697569,
7.80458403, 7.68562984, 7.4511261 , 7.34629679,
7.17365456, 7.03930044, 6.88661861, 6.73307562,
6.60730886, 6.45987988, 6.30656338, 6.18089199,
6.05378485, 5.90268421, 5.81327915, 5.68042564,
5.57657337, 5.40122986, 5.33153057, 5.19660377,
5.09033108, 4.96228552, 4.85437012, 4.76652002,
4.66415834, 4.54592991, 4.43500376, 4.34614754,
4.24292231, 4.16423607, 4.06328297, 3.96581864,
3.88231015, 3.7828486 , 3.74253488, 3.62953901,
3.53508115, 3.46755266, 3.36818004, 3.30672598,
3.22161722, 3.13899183, 3.08345532, 2.98398542,
2.94956589, 2.8504107 , 2.79215455, 2.72924852,
2.66635823, 2.60831141, 2.53093195, 2.47217631,
2.42190933, 2.36228228, 2.30094266, 2.24602866,
2.19216943, 2.14143515, 2.10641694, 2.07170939,
2.04412961, 2.0158174 , 2.00059986, 1.98546684,
1.97646523, 1.96455812, 1.95887971, 1.94987118])*u('mg/L'), 5).tolist()
)
def test_column_of_time(self):
'''''
Extract the time column from a data file.
'''''
path = os.path.join(os.path.dirname(__file__), '.', 'data', 'example datalog.xls')
answer = pp.column_of_time(path, 50)
answer = np.round(answer, 5)
self.assertSequenceEqual(
answer.tolist(),
np.round(np.array([0.00000000e+00, 5.78662000e-05, 1.15725500e-04,
1.73586900e-04, 2.31470400e-04, 2.89325100e-04,
3.47199600e-04, 4.05070800e-04, 4.62941200e-04,
5.20805100e-04, 5.78682300e-04, 6.36541000e-04,
6.94405500e-04, 7.52295200e-04, 8.10152600e-04,
8.68025100e-04, 9.25879200e-04, 9.83766900e-04,
1.04163170e-03, 1.09949610e-03, 1.15736260e-03,
1.21522990e-03, 1.27310590e-03, 1.33096560e-03,
1.38884810e-03, 1.44671260e-03, 1.50456890e-03,
1.56244910e-03, 1.62031940e-03, 1.67819090e-03,
1.73605480e-03, 1.79390590e-03, 1.85178640e-03,
1.90965780e-03, 1.96752080e-03, 2.02538760e-03,
2.08325540e-03, 2.14113380e-03, 2.19899280e-03,
2.25686180e-03, 2.31473400e-03, 2.37261100e-03,
2.43048170e-03, 2.48834570e-03, 2.54620210e-03,
2.60408890e-03, 2.66194550e-03, 2.71981170e-03,
2.77768240e-03, 2.83556180e-03, 2.89342620e-03,
2.95130110e-03, 3.00916580e-03, 3.06704400e-03,
3.12490300e-03, 3.18278490e-03, 3.24064920e-03,
3.29852180e-03, 3.35638230e-03, 3.41425150e-03,
3.47212870e-03, 3.52999870e-03, 3.58786830e-03,
3.64572740e-03, 3.70359810e-03, 3.76146930e-03,
3.81933520e-03, 3.87721010e-03, 3.93506860e-03,
3.99295440e-03, 4.05082240e-03, 4.10868470e-03,
4.16654890e-03, 4.22442890e-03, 4.28230160e-03,
4.34016650e-03, 4.39804130e-03, 4.45591720e-03,
4.51377060e-03, 4.57164920e-03, 4.62952340e-03,
4.68739510e-03, 4.74524320e-03, 4.80312930e-03,
4.86098350e-03, 4.91887450e-03, 4.97673430e-03,
5.03459310e-03, 5.09248050e-03, 5.15033640e-03,
5.20820950e-03, 5.26607440e-03, 5.32394690e-03,
5.38181660e-03, 5.43967960e-03, 5.49755470e-03,
5.55543130e-03, 5.61330110e-03, 5.67117330e-03,
5.72903190e-03, 5.78690100e-03, 5.84477570e-03,
5.90264880e-03, 5.96051240e-03, 6.01837960e-03,
6.07625150e-03, 6.13413050e-03, 6.19199110e-03,
6.24987260e-03, 6.30772900e-03, 6.36560880e-03,
6.42346920e-03, 6.48135320e-03, 6.53921020e-03,
6.59709090e-03, 6.65494290e-03, 6.71281870e-03,
6.77069570e-03, 6.82855640e-03, 6.88642010e-03])*u.day, 5).tolist()
)
answer = pp.column_of_time(path, 50, end=60, units='hr')
answer = np.round(answer, 5)
self.assertSequenceEqual(
answer.tolist(),
np.round(np.array([0.00000000e+00, 5.78662000e-05, 1.15725500e-04,
1.73586900e-04, 2.31470400e-04, 2.89325100e-04,
3.47199600e-04, 4.05070800e-04, 4.62941200e-04,
5.20805100e-04])*24*u.hr, 5).tolist()
)
def test_notes(self):
'''''
Test function that extracts meta information from data file.
'''''
path = os.path.join(os.path.dirname(__file__), '.', 'data', 'example datalog.xls')
answer = pp.notes(path)['Day fraction since midnight on ']
x = pd.DataFrame(index=[1, 29, 35],
columns=['Day fraction since midnight on ', 'red dye (mg/L)', 'Run Pump ()', 'Pump ()'])
x.iloc[0][0] = 'Start'
x.iloc[1][0] = 'Start'
x.iloc[2][0] = '30 mg/L'
self.assertSequenceEqual(
answer.tolist(),
x['Day fraction since midnight on '].tolist())
def test_remove_notes(self):
'''
Return a DataFrame without any lines that originally contained text
'''
path = os.path.join(os.path.dirname(__file__), '.', 'data')
output = pp.remove_notes(pd.read_csv(path + '/example datalog.xls', delimiter='\t'))
self.assertSequenceEqual(np.round(pd.to_numeric(output.iloc[:, 0]), 5).tolist(), np.round(np.array(
[0.6842773323, 0.6843351954, 0.6843930789, 0.6844509555, 0.6845088278,
0.6845666989, 0.6846245615, 0.6846824172, 0.6847402968, 0.6847981752,
0.6848560403, 0.6849139126, 0.6849717883, 0.6850296562, 0.6850875147,
0.6851453919, 0.6852032725, 0.6852611229, 0.6853190069, 0.6853768753,
0.6854347496, 0.6854926132, 0.6855504820, 0.6856083520, 0.6856662182,
0.6857240844, 0.6857819618, 0.6858398270, 0.6858977139, 0.6859555700,
0.6860134505, 0.6860713232, 0.6861291842, 0.6861870457, 0.6862449249,
0.6863027915, 0.6863606668, 0.6864185391, 0.6864764071, 0.6865342703,
0.6865921393, 0.6866500041, 0.6867078679, 0.6867657506, 0.6868236041,
0.6868814757, 0.6869393510, 0.6869972210, 0.6870550872, 0.6871129465,
0.6871708079, 0.6872286914, 0.6872865461, 0.6873444206, 0.6874022918,
0.6874601622, 0.6875180261, 0.6875759033, 0.6876337620, 0.6876916265,
0.6877495162, 0.6878073736, 0.6878652461, 0.6879231002, 0.6879809879,
0.6880388527, 0.6880967171, 0.6881545836, 0.6882124509, 0.6882703269,
0.6883281866, 0.6883860691, 0.6884439336, 0.6885017899, 0.6885596701,
0.6886175404, 0.6886754119, 0.6887332758, 0.6887911269, 0.6888490074,
0.6889068788, 0.6889647418, 0.6890226086, 0.6890804764, 0.6891383548,
0.6891962138, 0.6892540828, 0.6893119550, 0.6893698320, 0.6894277027,
0.6894855667, 0.6895434231, 0.6896013099, 0.6896591665, 0.6897170327,
0.6897749034, 0.6898327828, 0.6898906472, 0.6899485221, 0.6900063868,
0.6900642650, 0.6901221240, 0.6901800059, 0.6902378702, 0.6902957428,
0.6903536033, 0.6904114725, 0.6904693497, 0.6905272197, 0.6905850893,
0.6906429484, 0.6907008191, 0.6907586903, 0.6908165562, 0.6908744311,
0.6909322896, 0.6909901754, 0.6910480434, 0.6911059057, 0.6911637699,
0.6912216499, 0.6912795226, 0.6913373875, 0.6913952623, 0.6914531382,
0.6915109916, 0.6915688702, 0.6916267444, 0.6916846161, 0.6917424642,
0.6918003503, 0.6918582045, 0.6919160955, 0.6919739553, 0.6920318141,
0.6920897015, 0.6921475574, 0.6922054305, 0.6922632954, 0.6923211679,
0.6923790376, 0.6924369006, 0.6924947757, 0.6925526523, 0.6926105221,
0.6926683943, 0.6927262529, 0.6927841220, 0.6928419967, 0.6928998698,
0.6929577334, 0.6930156006, 0.6930734725, 0.6931313515, 0.6931892121,
0.6932470936, 0.6933049500, 0.6933628298, 0.6934206902, 0.6934785742,
0.6935364312, 0.6935943119, 0.6936521639, 0.6937100397, 0.6937679167,
0.6938257774, 0.6938836411]), 5).tolist())
def test_get_data_by_time(self):
'''
Extract column(s) of data between given starting and ending days and times
'''
path = os.path.join(os.path.dirname(__file__), '.', 'data')
data_day1 = pd.read_csv(path + '/datalog_6-14-2018.xls', delimiter='\t')
data_day1 = np.round([pd.to_numeric(data_day1.iloc[:, 0]), pd.to_numeric(data_day1.iloc[:, 4])], 5)
data_day1 = [data_day1[0].tolist(), data_day1[1].tolist()]
data_day2 = pd.read_csv(path + '/datalog_6-15-2018.xls', delimiter='\t')
data_day2 = np.round([pd.to_numeric(data_day2.iloc[:, 0]), pd.to_numeric(data_day2.iloc[:, 4])], 5)
data_day2 = [data_day2[0].tolist(), data_day2[1].tolist()]
data_day2[0][0] = 0 # to remove scientific notation "e-"
# SINGLE COLUMN, ONE DAY
output = pp.get_data_by_time(path=path, columns=0, dates="6-14-2018", start_time="12:20",
end_time="13:00", extension=".xls")
self.assertSequenceEqual(np.round(output, 5).tolist(), data_day1[0][1041:1282])
# SINGLE COLUMN, TWO DAYS
output = pp.get_data_by_time(path=path, columns=0, dates=["6-14-2018", "6-15-2018"],
start_time="12:20", end_time="10:50", extension=".xls")
time_column = data_day1[0][1041:] + np.round(np.array(data_day2[0][:3901])+1, 5).tolist()
self.assertSequenceEqual(np.round(output, 5).tolist(), time_column)
# MULTI COLUMN, ONE DAY
output = pp.get_data_by_time(path=path, columns=[0, 4], dates=["6-14-2018"], start_time="12:20",
end_time="13:00", extension=".xls")
self.assertSequenceEqual(np.round(output[0], 5).tolist(), data_day1[0][1041:1282])
self.assertSequenceEqual(np.round(output[1], 5).tolist(), data_day1[1][1041:1282])
# MULTI COLUMN, TWO DAYS
output = pp.get_data_by_time(path=path, columns=[0, 4], dates=["6-14-2018", "6-15-2018"],
start_time="12:20", end_time="10:50", extension=".xls")
time_column = data_day1[0][1041:] + np.round(np.array(data_day2[0][:3901])+1, 5).tolist()
self.assertSequenceEqual(np.round(output[0], 5).tolist(), time_column)
self.assertSequenceEqual(np.round(output[1], 5).tolist(), data_day1[1][1041:]+data_day2[1][:3901])
# MULTI COLUMN, TWO DAYS, WITH UNITS
output = pp.get_data_by_time(path=path, columns=[0, 4], dates=["6-14-2018", "6-15-2018"],
start_time="12:20", end_time="10:50", extension=".xls", units=['day', 'mg/L'])
time_column = data_day1[0][1041:] + np.round(np.array(data_day2[0][:3901])+1, 5).tolist()
self.assertEqual(output[0].units, u.day)
self.assertSequenceEqual(np.round(output[0].magnitude, 5).tolist(), time_column)
self.assertEqual(output[1].units, u.mg/u.L)
self.assertSequenceEqual(np.round(output[1].magnitude, 5).tolist(), data_day1[1][1041:]+data_day2[1][:3901])
######## WITH ELAPSED TIME ########
start = pp.day_fraction("12:20")
data_day1 = pd.read_csv(path + '/datalog_6-14-2018.xls', delimiter='\t')
data_day1 = [np.round(pd.to_numeric(data_day1.iloc[:, 0]) - start, 5).tolist(),
np.round(pd.to_numeric(data_day1.iloc[:, 4]), 5).tolist()]
data_day2 = pd.read_csv(path + '/datalog_6-15-2018.xls', delimiter='\t')
data_day2.iloc[0,0] = 0 # to remove scientific notation "e-"
data_day2 = [np.round(pd.to_numeric(data_day2.iloc[:, 0]) - start + 1, 5).tolist(),
np.round(pd.to_numeric(data_day2.iloc[:, 4]), 5).tolist()]
# SINGLE COLUMN, ONE DAY
output = pp.get_data_by_time(path=path, columns=0, dates="6-14-2018", start_time="12:20",
end_time="13:00", extension=".xls", elapsed=True)
self.assertSequenceEqual(np.round(output, 5).tolist(), data_day1[0][1041:1282])
# MULTI COLUMN, TWO DAYS
output = pp.get_data_by_time(path=path, columns=[0, 4], dates=["6-14-2018", "6-15-2018"],
start_time="12:20", end_time="10:50", extension=".xls",
elapsed=True)
self.assertSequenceEqual(np.round(output[0], 5).tolist(), data_day1[0][1041:]+data_day2[0][:3901])
self.assertSequenceEqual(np.round(output[1], 5).tolist(), data_day1[1][1041:]+data_day2[1][:3901])
def test_day_fraction(self):
'''
Converts time into a fraction of the day
'''
time = pp.day_fraction(time="12:00")
self.assertEqual(time, 0.5)
def test_data_from_dates(self):
'''
Return a list of DataFrames representing the ProCoDA data files stored in the given path and recorded on the given dates.
'''
path = os.path.join(os.path.dirname(__file__), '.', 'data')
dataFromPath = pd.read_csv(path + '/datalog_6-15-2018.xls', delimiter='\t')
getDataFromDates = pp.data_from_dates(path=path, dates='6-15-2018', extension=".xls")[0]
self.assertTrue(getDataFromDates.equals(dataFromPath))
def test_column_start_to_end(self):
'''
Return entries in column from starting index in first DataFrame to ending index in last DataFrame
'''
#One DataFrame
path = os.path.join(os.path.dirname(__file__), '.', 'data')
data_manual1 = pd.read_csv(path + '/datalog_6-14-2018.xls', delimiter='\t')
getColData1 = pp.column_start_to_end(data=[data_manual1], column=1, start_idx=2, end_idx=7)
compareColData1 = [-4.34825945, -2.3821919, -2.57200098, -2.40549088,
-1.00214481]
self.assertSequenceEqual(getColData1, compareColData1)
#Three DataFrames
data_manual2 = pd.read_csv(path + '/datalog_6-16-2018.xls', delimiter='\t')
data_manual3 = pd.read_csv(path + '/datalog_6-15-2018.xls', delimiter='\t')
getColData2 = pp.column_start_to_end([data_manual1, data_manual2, data_manual3],
column=2, start_idx=5238, end_idx=2)
compareColData2 = [24.26625443, 24.2669487, 24.26613235, 24.26708603,
24.26683617, 24.26708603, 24.26683617]
self.assertSequenceEqual(getColData2, compareColData2)
def test_get_data_by_state(self):
'''
Extract the time column and a data column for each iteration of a state
'''
path = os.path.join(os.path.dirname(__file__), '.', 'data')
# Local path
output = pp.get_data_by_state(path, dates="6-19-2013", state=1, column=1, extension=".xls") # , "6-20-2013"
datafile = pd.read_csv(path + "/datalog_6-19-2013.xls", delimiter='\t')
time_and_data1 = np.array([pd.to_numeric(datafile.iloc[:, 0]),
np.round( | pd.to_numeric(datafile.iloc[:, 1]) | pandas.to_numeric |
"""
Test output formatting for Series/DataFrame, including to_string & reprs
"""
from datetime import datetime
from io import StringIO
import itertools
from operator import methodcaller
import os
from pathlib import Path
import re
from shutil import get_terminal_size
import sys
import textwrap
import dateutil
import numpy as np
import pytest
import pytz
from pandas.compat import (
IS64,
is_platform_windows,
)
import pandas.util._test_decorators as td
import pandas as pd
from pandas import (
DataFrame,
Index,
MultiIndex,
NaT,
Series,
Timestamp,
date_range,
get_option,
option_context,
read_csv,
reset_option,
set_option,
)
import pandas._testing as tm
import pandas.io.formats.format as fmt
import pandas.io.formats.printing as printing
use_32bit_repr = is_platform_windows() or not IS64
@pytest.fixture(params=["string", "pathlike", "buffer"])
def filepath_or_buffer_id(request):
"""
A fixture yielding test ids for filepath_or_buffer testing.
"""
return request.param
@pytest.fixture
def filepath_or_buffer(filepath_or_buffer_id, tmp_path):
"""
A fixture yielding a string representing a filepath, a path-like object
and a StringIO buffer. Also checks that buffer is not closed.
"""
if filepath_or_buffer_id == "buffer":
buf = StringIO()
yield buf
assert not buf.closed
else:
assert isinstance(tmp_path, Path)
if filepath_or_buffer_id == "pathlike":
yield tmp_path / "foo"
else:
yield str(tmp_path / "foo")
@pytest.fixture
def assert_filepath_or_buffer_equals(
filepath_or_buffer, filepath_or_buffer_id, encoding
):
"""
Assertion helper for checking filepath_or_buffer.
"""
def _assert_filepath_or_buffer_equals(expected):
if filepath_or_buffer_id == "string":
with open(filepath_or_buffer, encoding=encoding) as f:
result = f.read()
elif filepath_or_buffer_id == "pathlike":
result = filepath_or_buffer.read_text(encoding=encoding)
elif filepath_or_buffer_id == "buffer":
result = filepath_or_buffer.getvalue()
assert result == expected
return _assert_filepath_or_buffer_equals
def curpath():
pth, _ = os.path.split(os.path.abspath(__file__))
return pth
def has_info_repr(df):
r = repr(df)
c1 = r.split("\n")[0].startswith("<class")
c2 = r.split("\n")[0].startswith(r"<class") # _repr_html_
return c1 or c2
def has_non_verbose_info_repr(df):
has_info = has_info_repr(df)
r = repr(df)
# 1. <class>
# 2. Index
# 3. Columns
# 4. dtype
# 5. memory usage
# 6. trailing newline
nv = len(r.split("\n")) == 6
return has_info and nv
def has_horizontally_truncated_repr(df):
try: # Check header row
fst_line = np.array(repr(df).splitlines()[0].split())
cand_col = np.where(fst_line == "...")[0][0]
except IndexError:
return False
# Make sure each row has this ... in the same place
r = repr(df)
for ix, l in enumerate(r.splitlines()):
if not r.split()[cand_col] == "...":
return False
return True
def has_vertically_truncated_repr(df):
r = repr(df)
only_dot_row = False
for row in r.splitlines():
if re.match(r"^[\.\ ]+$", row):
only_dot_row = True
return only_dot_row
def has_truncated_repr(df):
return has_horizontally_truncated_repr(df) or has_vertically_truncated_repr(df)
def has_doubly_truncated_repr(df):
return has_horizontally_truncated_repr(df) and has_vertically_truncated_repr(df)
def has_expanded_repr(df):
r = repr(df)
for line in r.split("\n"):
if line.endswith("\\"):
return True
return False
@pytest.mark.filterwarnings("ignore::FutureWarning:.*format")
class TestDataFrameFormatting:
def test_eng_float_formatter(self, float_frame):
df = float_frame
df.loc[5] = 0
fmt.set_eng_float_format()
repr(df)
fmt.set_eng_float_format(use_eng_prefix=True)
repr(df)
fmt.set_eng_float_format(accuracy=0)
repr(df)
tm.reset_display_options()
def test_show_null_counts(self):
df = DataFrame(1, columns=range(10), index=range(10))
df.iloc[1, 1] = np.nan
def check(show_counts, result):
buf = StringIO()
df.info(buf=buf, show_counts=show_counts)
assert ("non-null" in buf.getvalue()) is result
with option_context(
"display.max_info_rows", 20, "display.max_info_columns", 20
):
check(None, True)
check(True, True)
check(False, False)
with option_context("display.max_info_rows", 5, "display.max_info_columns", 5):
check(None, False)
check(True, False)
check(False, False)
# GH37999
with tm.assert_produces_warning(
FutureWarning, match="null_counts is deprecated.+"
):
buf = StringIO()
df.info(buf=buf, null_counts=True)
assert "non-null" in buf.getvalue()
# GH37999
with pytest.raises(ValueError, match=r"null_counts used with show_counts.+"):
df.info(null_counts=True, show_counts=True)
def test_repr_truncation(self):
max_len = 20
with option_context("display.max_colwidth", max_len):
df = DataFrame(
{
"A": np.random.randn(10),
"B": [
tm.rands(np.random.randint(max_len - 1, max_len + 1))
for i in range(10)
],
}
)
r = repr(df)
r = r[r.find("\n") + 1 :]
adj = fmt.get_adjustment()
for line, value in zip(r.split("\n"), df["B"]):
if adj.len(value) + 1 > max_len:
assert "..." in line
else:
assert "..." not in line
with option_context("display.max_colwidth", 999999):
assert "..." not in repr(df)
with option_context("display.max_colwidth", max_len + 2):
assert "..." not in repr(df)
def test_repr_deprecation_negative_int(self):
# TODO(2.0): remove in future version after deprecation cycle
# Non-regression test for:
# https://github.com/pandas-dev/pandas/issues/31532
width = get_option("display.max_colwidth")
with tm.assert_produces_warning(FutureWarning):
set_option("display.max_colwidth", -1)
set_option("display.max_colwidth", width)
def test_repr_chop_threshold(self):
df = DataFrame([[0.1, 0.5], [0.5, -0.1]])
reset_option("display.chop_threshold") # default None
assert repr(df) == " 0 1\n0 0.1 0.5\n1 0.5 -0.1"
with option_context("display.chop_threshold", 0.2):
assert repr(df) == " 0 1\n0 0.0 0.5\n1 0.5 0.0"
with option_context("display.chop_threshold", 0.6):
assert repr(df) == " 0 1\n0 0.0 0.0\n1 0.0 0.0"
with option_context("display.chop_threshold", None):
assert repr(df) == " 0 1\n0 0.1 0.5\n1 0.5 -0.1"
def test_repr_chop_threshold_column_below(self):
# GH 6839: validation case
df = DataFrame([[10, 20, 30, 40], [8e-10, -1e-11, 2e-9, -2e-11]]).T
with option_context("display.chop_threshold", 0):
assert repr(df) == (
" 0 1\n"
"0 10.0 8.000000e-10\n"
"1 20.0 -1.000000e-11\n"
"2 30.0 2.000000e-09\n"
"3 40.0 -2.000000e-11"
)
with option_context("display.chop_threshold", 1e-8):
assert repr(df) == (
" 0 1\n"
"0 10.0 0.000000e+00\n"
"1 20.0 0.000000e+00\n"
"2 30.0 0.000000e+00\n"
"3 40.0 0.000000e+00"
)
with option_context("display.chop_threshold", 5e-11):
assert repr(df) == (
" 0 1\n"
"0 10.0 8.000000e-10\n"
"1 20.0 0.000000e+00\n"
"2 30.0 2.000000e-09\n"
"3 40.0 0.000000e+00"
)
def test_repr_obeys_max_seq_limit(self):
with option_context("display.max_seq_items", 2000):
assert len(printing.pprint_thing(list(range(1000)))) > 1000
with option_context("display.max_seq_items", 5):
assert len(printing.pprint_thing(list(range(1000)))) < 100
with option_context("display.max_seq_items", 1):
assert len(printing.pprint_thing(list(range(1000)))) < 9
def test_repr_set(self):
assert printing.pprint_thing({1}) == "{1}"
def test_repr_is_valid_construction_code(self):
# for the case of Index, where the repr is traditional rather than
# stylized
idx = Index(["a", "b"])
res = eval("pd." + repr(idx))
tm.assert_series_equal(Series(res), Series(idx))
def test_repr_should_return_str(self):
# https://docs.python.org/3/reference/datamodel.html#object.__repr__
# "...The return value must be a string object."
# (str on py2.x, str (unicode) on py3)
data = [8, 5, 3, 5]
index1 = ["\u03c3", "\u03c4", "\u03c5", "\u03c6"]
cols = ["\u03c8"]
df = DataFrame(data, columns=cols, index=index1)
assert type(df.__repr__()) == str # both py2 / 3
def test_repr_no_backslash(self):
with option_context("mode.sim_interactive", True):
df = DataFrame(np.random.randn(10, 4))
assert "\\" not in repr(df)
def test_expand_frame_repr(self):
df_small = DataFrame("hello", index=[0], columns=[0])
df_wide = DataFrame("hello", index=[0], columns=range(10))
df_tall = DataFrame("hello", index=range(30), columns=range(5))
with option_context("mode.sim_interactive", True):
with option_context(
"display.max_columns",
10,
"display.width",
20,
"display.max_rows",
20,
"display.show_dimensions",
True,
):
with option_context("display.expand_frame_repr", True):
assert not has_truncated_repr(df_small)
assert not has_expanded_repr(df_small)
assert not has_truncated_repr(df_wide)
assert has_expanded_repr(df_wide)
assert has_vertically_truncated_repr(df_tall)
assert has_expanded_repr(df_tall)
with option_context("display.expand_frame_repr", False):
assert not has_truncated_repr(df_small)
assert not has_expanded_repr(df_small)
assert not has_horizontally_truncated_repr(df_wide)
assert not has_expanded_repr(df_wide)
assert has_vertically_truncated_repr(df_tall)
assert not has_expanded_repr(df_tall)
def test_repr_non_interactive(self):
# in non interactive mode, there can be no dependency on the
# result of terminal auto size detection
df = DataFrame("hello", index=range(1000), columns=range(5))
with option_context(
"mode.sim_interactive", False, "display.width", 0, "display.max_rows", 5000
):
assert not has_truncated_repr(df)
assert not has_expanded_repr(df)
def test_repr_truncates_terminal_size(self, monkeypatch):
# see gh-21180
terminal_size = (118, 96)
monkeypatch.setattr(
"pandas.io.formats.format.get_terminal_size", lambda: terminal_size
)
index = range(5)
columns = MultiIndex.from_tuples(
[
("This is a long title with > 37 chars.", "cat"),
("This is a loooooonger title with > 43 chars.", "dog"),
]
)
df = DataFrame(1, index=index, columns=columns)
result = repr(df)
h1, h2 = result.split("\n")[:2]
assert "long" in h1
assert "loooooonger" in h1
assert "cat" in h2
assert "dog" in h2
# regular columns
df2 = DataFrame({"A" * 41: [1, 2], "B" * 41: [1, 2]})
result = repr(df2)
assert df2.columns[0] in result.split("\n")[0]
def test_repr_truncates_terminal_size_full(self, monkeypatch):
# GH 22984 ensure entire window is filled
terminal_size = (80, 24)
df = DataFrame(np.random.rand(1, 7))
monkeypatch.setattr(
"pandas.io.formats.format.get_terminal_size", lambda: terminal_size
)
assert "..." not in str(df)
def test_repr_truncation_column_size(self):
# dataframe with last column very wide -> check it is not used to
# determine size of truncation (...) column
df = DataFrame(
{
"a": [108480, 30830],
"b": [12345, 12345],
"c": [12345, 12345],
"d": [12345, 12345],
"e": ["a" * 50] * 2,
}
)
assert "..." in str(df)
assert " ... " not in str(df)
def test_repr_max_columns_max_rows(self):
term_width, term_height = get_terminal_size()
if term_width < 10 or term_height < 10:
pytest.skip(f"terminal size too small, {term_width} x {term_height}")
def mkframe(n):
index = [f"{i:05d}" for i in range(n)]
return DataFrame(0, index, index)
df6 = mkframe(6)
df10 = mkframe(10)
with option_context("mode.sim_interactive", True):
with option_context("display.width", term_width * 2):
with option_context("display.max_rows", 5, "display.max_columns", 5):
assert not has_expanded_repr(mkframe(4))
assert not has_expanded_repr(mkframe(5))
assert not has_expanded_repr(df6)
assert has_doubly_truncated_repr(df6)
with option_context("display.max_rows", 20, "display.max_columns", 10):
# Out off max_columns boundary, but no extending
# since not exceeding width
assert not has_expanded_repr(df6)
assert not has_truncated_repr(df6)
with option_context("display.max_rows", 9, "display.max_columns", 10):
# out vertical bounds can not result in expanded repr
assert not has_expanded_repr(df10)
assert has_vertically_truncated_repr(df10)
# width=None in terminal, auto detection
with option_context(
"display.max_columns",
100,
"display.max_rows",
term_width * 20,
"display.width",
None,
):
df = mkframe((term_width // 7) - 2)
assert not has_expanded_repr(df)
df = mkframe((term_width // 7) + 2)
printing.pprint_thing(df._repr_fits_horizontal_())
assert has_expanded_repr(df)
def test_repr_min_rows(self):
df = DataFrame({"a": range(20)})
# default setting no truncation even if above min_rows
assert ".." not in repr(df)
assert ".." not in df._repr_html_()
df = DataFrame({"a": range(61)})
# default of max_rows 60 triggers truncation if above
assert ".." in repr(df)
assert ".." in df._repr_html_()
with option_context("display.max_rows", 10, "display.min_rows", 4):
# truncated after first two rows
assert ".." in repr(df)
assert "2 " not in repr(df)
assert "..." in df._repr_html_()
assert "<td>2</td>" not in df._repr_html_()
with option_context("display.max_rows", 12, "display.min_rows", None):
# when set to None, follow value of max_rows
assert "5 5" in repr(df)
assert "<td>5</td>" in df._repr_html_()
with option_context("display.max_rows", 10, "display.min_rows", 12):
# when set value higher as max_rows, use the minimum
assert "5 5" not in repr(df)
assert "<td>5</td>" not in df._repr_html_()
with option_context("display.max_rows", None, "display.min_rows", 12):
# max_rows of None -> never truncate
assert ".." not in repr(df)
assert ".." not in df._repr_html_()
def test_str_max_colwidth(self):
# GH 7856
df = DataFrame(
[
{
"a": "foo",
"b": "bar",
"c": "uncomfortably long line with lots of stuff",
"d": 1,
},
{"a": "foo", "b": "bar", "c": "stuff", "d": 1},
]
)
df.set_index(["a", "b", "c"])
assert str(df) == (
" a b c d\n"
"0 foo bar uncomfortably long line with lots of stuff 1\n"
"1 foo bar stuff 1"
)
with option_context("max_colwidth", 20):
assert str(df) == (
" a b c d\n"
"0 foo bar uncomfortably lo... 1\n"
"1 foo bar stuff 1"
)
def test_auto_detect(self):
term_width, term_height = get_terminal_size()
fac = 1.05 # Arbitrary large factor to exceed term width
cols = range(int(term_width * fac))
index = range(10)
df = DataFrame(index=index, columns=cols)
with option_context("mode.sim_interactive", True):
with option_context("display.max_rows", None):
with option_context("display.max_columns", None):
# Wrap around with None
assert has_expanded_repr(df)
with option_context("display.max_rows", 0):
with option_context("display.max_columns", 0):
# Truncate with auto detection.
assert has_horizontally_truncated_repr(df)
index = range(int(term_height * fac))
df = DataFrame(index=index, columns=cols)
with option_context("display.max_rows", 0):
with option_context("display.max_columns", None):
# Wrap around with None
assert has_expanded_repr(df)
# Truncate vertically
assert has_vertically_truncated_repr(df)
with option_context("display.max_rows", None):
with option_context("display.max_columns", 0):
assert has_horizontally_truncated_repr(df)
def test_to_string_repr_unicode(self):
buf = StringIO()
unicode_values = ["\u03c3"] * 10
unicode_values = np.array(unicode_values, dtype=object)
df = DataFrame({"unicode": unicode_values})
df.to_string(col_space=10, buf=buf)
# it works!
repr(df)
idx = Index(["abc", "\u03c3a", "aegdvg"])
ser = Series(np.random.randn(len(idx)), idx)
rs = repr(ser).split("\n")
line_len = len(rs[0])
for line in rs[1:]:
try:
line = line.decode(get_option("display.encoding"))
except AttributeError:
pass
if not line.startswith("dtype:"):
assert len(line) == line_len
# it works even if sys.stdin in None
_stdin = sys.stdin
try:
sys.stdin = None
repr(df)
finally:
sys.stdin = _stdin
def test_east_asian_unicode_false(self):
# not aligned properly because of east asian width
# mid col
df = DataFrame(
{"a": ["あ", "いいい", "う", "ええええええ"], "b": [1, 222, 33333, 4]},
index=["a", "bb", "c", "ddd"],
)
expected = (
" a b\na あ 1\n"
"bb いいい 222\nc う 33333\n"
"ddd ええええええ 4"
)
assert repr(df) == expected
# last col
df = DataFrame(
{"a": [1, 222, 33333, 4], "b": ["あ", "いいい", "う", "ええええええ"]},
index=["a", "bb", "c", "ddd"],
)
expected = (
" a b\na 1 あ\n"
"bb 222 いいい\nc 33333 う\n"
"ddd 4 ええええええ"
)
assert repr(df) == expected
# all col
df = DataFrame(
{"a": ["あああああ", "い", "う", "えええ"], "b": ["あ", "いいい", "う", "ええええええ"]},
index=["a", "bb", "c", "ddd"],
)
expected = (
" a b\na あああああ あ\n"
"bb い いいい\nc う う\n"
"ddd えええ ええええええ"
)
assert repr(df) == expected
# column name
df = DataFrame(
{"b": ["あ", "いいい", "う", "ええええええ"], "あああああ": [1, 222, 33333, 4]},
index=["a", "bb", "c", "ddd"],
)
expected = (
" b あああああ\na あ 1\n"
"bb いいい 222\nc う 33333\n"
"ddd ええええええ 4"
)
assert repr(df) == expected
# index
df = DataFrame(
{"a": ["あああああ", "い", "う", "えええ"], "b": ["あ", "いいい", "う", "ええええええ"]},
index=["あああ", "いいいいいい", "うう", "え"],
)
expected = (
" a b\nあああ あああああ あ\n"
"いいいいいい い いいい\nうう う う\n"
"え えええ ええええええ"
)
assert repr(df) == expected
# index name
df = DataFrame(
{"a": ["あああああ", "い", "う", "えええ"], "b": ["あ", "いいい", "う", "ええええええ"]},
index=Index(["あ", "い", "うう", "え"], name="おおおお"),
)
expected = (
" a b\n"
"おおおお \n"
"あ あああああ あ\n"
"い い いいい\n"
"うう う う\n"
"え えええ ええええええ"
)
assert repr(df) == expected
# all
df = DataFrame(
{"あああ": ["あああ", "い", "う", "えええええ"], "いいいいい": ["あ", "いいい", "う", "ええ"]},
index=Index(["あ", "いいい", "うう", "え"], name="お"),
)
expected = (
" あああ いいいいい\n"
"お \n"
"あ あああ あ\n"
"いいい い いいい\n"
"うう う う\n"
"え えええええ ええ"
)
assert repr(df) == expected
# MultiIndex
idx = MultiIndex.from_tuples(
[("あ", "いい"), ("う", "え"), ("おおお", "かかかか"), ("き", "くく")]
)
df = DataFrame(
{"a": ["あああああ", "い", "う", "えええ"], "b": ["あ", "いいい", "う", "ええええええ"]},
index=idx,
)
expected = (
" a b\n"
"あ いい あああああ あ\n"
"う え い いいい\n"
"おおお かかかか う う\n"
"き くく えええ ええええええ"
)
assert repr(df) == expected
# truncate
with option_context("display.max_rows", 3, "display.max_columns", 3):
df = DataFrame(
{
"a": ["あああああ", "い", "う", "えええ"],
"b": ["あ", "いいい", "う", "ええええええ"],
"c": ["お", "か", "ききき", "くくくくくく"],
"ああああ": ["さ", "し", "す", "せ"],
},
columns=["a", "b", "c", "ああああ"],
)
expected = (
" a ... ああああ\n0 あああああ ... さ\n"
".. ... ... ...\n3 えええ ... せ\n"
"\n[4 rows x 4 columns]"
)
assert repr(df) == expected
df.index = ["あああ", "いいいい", "う", "aaa"]
expected = (
" a ... ああああ\nあああ あああああ ... さ\n"
".. ... ... ...\naaa えええ ... せ\n"
"\n[4 rows x 4 columns]"
)
assert repr(df) == expected
def test_east_asian_unicode_true(self):
# Enable Unicode option -----------------------------------------
with option_context("display.unicode.east_asian_width", True):
# mid col
df = DataFrame(
{"a": ["あ", "いいい", "う", "ええええええ"], "b": [1, 222, 33333, 4]},
index=["a", "bb", "c", "ddd"],
)
expected = (
" a b\na あ 1\n"
"bb いいい 222\nc う 33333\n"
"ddd ええええええ 4"
)
assert repr(df) == expected
# last col
df = DataFrame(
{"a": [1, 222, 33333, 4], "b": ["あ", "いいい", "う", "ええええええ"]},
index=["a", "bb", "c", "ddd"],
)
expected = (
" a b\na 1 あ\n"
"bb 222 いいい\nc 33333 う\n"
"ddd 4 ええええええ"
)
assert repr(df) == expected
# all col
df = DataFrame(
{"a": ["あああああ", "い", "う", "えええ"], "b": ["あ", "いいい", "う", "ええええええ"]},
index=["a", "bb", "c", "ddd"],
)
expected = (
" a b\n"
"a あああああ あ\n"
"bb い いいい\n"
"c う う\n"
"ddd えええ ええええええ"
)
assert repr(df) == expected
# column name
df = DataFrame(
{"b": ["あ", "いいい", "う", "ええええええ"], "あああああ": [1, 222, 33333, 4]},
index=["a", "bb", "c", "ddd"],
)
expected = (
" b あああああ\n"
"a あ 1\n"
"bb いいい 222\n"
"c う 33333\n"
"ddd ええええええ 4"
)
assert repr(df) == expected
# index
df = DataFrame(
{"a": ["あああああ", "い", "う", "えええ"], "b": ["あ", "いいい", "う", "ええええええ"]},
index=["あああ", "いいいいいい", "うう", "え"],
)
expected = (
" a b\n"
"あああ あああああ あ\n"
"いいいいいい い いいい\n"
"うう う う\n"
"え えええ ええええええ"
)
assert repr(df) == expected
# index name
df = DataFrame(
{"a": ["あああああ", "い", "う", "えええ"], "b": ["あ", "いいい", "う", "ええええええ"]},
index=Index(["あ", "い", "うう", "え"], name="おおおお"),
)
expected = (
" a b\n"
"おおおお \n"
"あ あああああ あ\n"
"い い いいい\n"
"うう う う\n"
"え えええ ええええええ"
)
assert repr(df) == expected
# all
df = DataFrame(
{"あああ": ["あああ", "い", "う", "えええええ"], "いいいいい": ["あ", "いいい", "う", "ええ"]},
index=Index(["あ", "いいい", "うう", "え"], name="お"),
)
expected = (
" あああ いいいいい\n"
"お \n"
"あ あああ あ\n"
"いいい い いいい\n"
"うう う う\n"
"え えええええ ええ"
)
assert repr(df) == expected
# MultiIndex
idx = MultiIndex.from_tuples(
[("あ", "いい"), ("う", "え"), ("おおお", "かかかか"), ("き", "くく")]
)
df = DataFrame(
{"a": ["あああああ", "い", "う", "えええ"], "b": ["あ", "いいい", "う", "ええええええ"]},
index=idx,
)
expected = (
" a b\n"
"あ いい あああああ あ\n"
"う え い いいい\n"
"おおお かかかか う う\n"
"き くく えええ ええええええ"
)
assert repr(df) == expected
# truncate
with option_context("display.max_rows", 3, "display.max_columns", 3):
df = DataFrame(
{
"a": ["あああああ", "い", "う", "えええ"],
"b": ["あ", "いいい", "う", "ええええええ"],
"c": ["お", "か", "ききき", "くくくくくく"],
"ああああ": ["さ", "し", "す", "せ"],
},
columns=["a", "b", "c", "ああああ"],
)
expected = (
" a ... ああああ\n"
"0 あああああ ... さ\n"
".. ... ... ...\n"
"3 えええ ... せ\n"
"\n[4 rows x 4 columns]"
)
assert repr(df) == expected
df.index = ["あああ", "いいいい", "う", "aaa"]
expected = (
" a ... ああああ\n"
"あああ あああああ ... さ\n"
"... ... ... ...\n"
"aaa えええ ... せ\n"
"\n[4 rows x 4 columns]"
)
assert repr(df) == expected
# ambiguous unicode
df = DataFrame(
{"b": ["あ", "いいい", "¡¡", "ええええええ"], "あああああ": [1, 222, 33333, 4]},
index=["a", "bb", "c", "¡¡¡"],
)
expected = (
" b あああああ\n"
"a あ 1\n"
"bb いいい 222\n"
"c ¡¡ 33333\n"
"¡¡¡ ええええええ 4"
)
assert repr(df) == expected
def test_to_string_buffer_all_unicode(self):
buf = StringIO()
empty = DataFrame({"c/\u03c3": Series(dtype=object)})
nonempty = DataFrame({"c/\u03c3": Series([1, 2, 3])})
print(empty, file=buf)
print(nonempty, file=buf)
# this should work
buf.getvalue()
def test_to_string_with_col_space(self):
df = DataFrame(np.random.random(size=(1, 3)))
c10 = len(df.to_string(col_space=10).split("\n")[1])
c20 = len(df.to_string(col_space=20).split("\n")[1])
c30 = len(df.to_string(col_space=30).split("\n")[1])
assert c10 < c20 < c30
# GH 8230
# col_space wasn't being applied with header=False
with_header = df.to_string(col_space=20)
with_header_row1 = with_header.splitlines()[1]
no_header = df.to_string(col_space=20, header=False)
assert len(with_header_row1) == len(no_header)
def test_to_string_with_column_specific_col_space_raises(self):
df = DataFrame(np.random.random(size=(3, 3)), columns=["a", "b", "c"])
msg = (
"Col_space length\\(\\d+\\) should match "
"DataFrame number of columns\\(\\d+\\)"
)
with pytest.raises(ValueError, match=msg):
df.to_string(col_space=[30, 40])
with pytest.raises(ValueError, match=msg):
df.to_string(col_space=[30, 40, 50, 60])
msg = "unknown column"
with pytest.raises(ValueError, match=msg):
df.to_string(col_space={"a": "foo", "b": 23, "d": 34})
def test_to_string_with_column_specific_col_space(self):
df = DataFrame(np.random.random(size=(3, 3)), columns=["a", "b", "c"])
result = df.to_string(col_space={"a": 10, "b": 11, "c": 12})
# 3 separating space + each col_space for (id, a, b, c)
assert len(result.split("\n")[1]) == (3 + 1 + 10 + 11 + 12)
result = df.to_string(col_space=[10, 11, 12])
assert len(result.split("\n")[1]) == (3 + 1 + 10 + 11 + 12)
def test_to_string_truncate_indices(self):
for index in [
tm.makeStringIndex,
tm.makeUnicodeIndex,
tm.makeIntIndex,
tm.makeDateIndex,
tm.makePeriodIndex,
]:
for column in [tm.makeStringIndex]:
for h in [10, 20]:
for w in [10, 20]:
with option_context("display.expand_frame_repr", False):
df = DataFrame(index=index(h), columns=column(w))
with option_context("display.max_rows", 15):
if h == 20:
assert has_vertically_truncated_repr(df)
else:
assert not has_vertically_truncated_repr(df)
with option_context("display.max_columns", 15):
if w == 20:
assert has_horizontally_truncated_repr(df)
else:
assert not (has_horizontally_truncated_repr(df))
with option_context(
"display.max_rows", 15, "display.max_columns", 15
):
if h == 20 and w == 20:
assert has_doubly_truncated_repr(df)
else:
assert not has_doubly_truncated_repr(df)
def test_to_string_truncate_multilevel(self):
arrays = [
["bar", "bar", "baz", "baz", "foo", "foo", "qux", "qux"],
["one", "two", "one", "two", "one", "two", "one", "two"],
]
df = DataFrame(index=arrays, columns=arrays)
with option_context("display.max_rows", 7, "display.max_columns", 7):
assert has_doubly_truncated_repr(df)
def test_truncate_with_different_dtypes(self):
# 11594, 12045
# when truncated the dtypes of the splits can differ
# 11594
import datetime
s = Series(
[datetime.datetime(2012, 1, 1)] * 10
+ [datetime.datetime(1012, 1, 2)]
+ [datetime.datetime(2012, 1, 3)] * 10
)
with option_context("display.max_rows", 8):
result = str(s)
assert "object" in result
# 12045
df = DataFrame({"text": ["some words"] + [None] * 9})
with option_context("display.max_rows", 8, "display.max_columns", 3):
result = str(df)
assert "None" in result
assert "NaN" not in result
def test_truncate_with_different_dtypes_multiindex(self):
# GH#13000
df = DataFrame({"Vals": range(100)})
frame = pd.concat([df], keys=["Sweep"], names=["Sweep", "Index"])
result = repr(frame)
result2 = repr(frame.iloc[:5])
assert result.startswith(result2)
def test_datetimelike_frame(self):
# GH 12211
df = DataFrame({"date": [Timestamp("20130101").tz_localize("UTC")] + [NaT] * 5})
with option_context("display.max_rows", 5):
result = str(df)
assert "2013-01-01 00:00:00+00:00" in result
assert "NaT" in result
assert "..." in result
assert "[6 rows x 1 columns]" in result
dts = [Timestamp("2011-01-01", tz="US/Eastern")] * 5 + [NaT] * 5
df = DataFrame({"dt": dts, "x": [1, 2, 3, 4, 5, 6, 7, 8, 9, 10]})
with option_context("display.max_rows", 5):
expected = (
" dt x\n"
"0 2011-01-01 00:00:00-05:00 1\n"
"1 2011-01-01 00:00:00-05:00 2\n"
".. ... ..\n"
"8 NaT 9\n"
"9 NaT 10\n\n"
"[10 rows x 2 columns]"
)
assert repr(df) == expected
dts = [NaT] * 5 + [Timestamp("2011-01-01", tz="US/Eastern")] * 5
df = DataFrame({"dt": dts, "x": [1, 2, 3, 4, 5, 6, 7, 8, 9, 10]})
with option_context("display.max_rows", 5):
expected = (
" dt x\n"
"0 NaT 1\n"
"1 NaT 2\n"
".. ... ..\n"
"8 2011-01-01 00:00:00-05:00 9\n"
"9 2011-01-01 00:00:00-05:00 10\n\n"
"[10 rows x 2 columns]"
)
assert repr(df) == expected
dts = [Timestamp("2011-01-01", tz="Asia/Tokyo")] * 5 + [
Timestamp("2011-01-01", tz="US/Eastern")
] * 5
df = DataFrame({"dt": dts, "x": [1, 2, 3, 4, 5, 6, 7, 8, 9, 10]})
with | option_context("display.max_rows", 5) | pandas.option_context |
"""
Test AR Model
"""
import datetime as dt
from itertools import product
import numpy as np
from numpy.testing import assert_allclose, assert_almost_equal
import pandas as pd
from pandas import Index, Series, date_range, period_range
from pandas.testing import assert_series_equal
import pytest
from statsmodels.datasets import macrodata, sunspots
from statsmodels.iolib.summary import Summary
from statsmodels.regression.linear_model import OLS
from statsmodels.tools.sm_exceptions import SpecificationWarning, ValueWarning
from statsmodels.tools.tools import Bunch
from statsmodels.tsa.ar_model import AutoReg, ar_select_order
from statsmodels.tsa.arima_process import arma_generate_sample
from statsmodels.tsa.deterministic import (
DeterministicProcess,
Seasonality,
TimeTrend,
)
from statsmodels.tsa.statespace.sarimax import SARIMAX
from statsmodels.tsa.tests.results import results_ar
DECIMAL_6 = 6
DECIMAL_5 = 5
DECIMAL_4 = 4
def gen_ar_data(nobs):
rs = np.random.RandomState(982739)
idx = pd.date_range(dt.datetime(1900, 1, 1), freq="M", periods=nobs)
return pd.Series(rs.standard_normal(nobs), index=idx), rs
def gen_ols_regressors(ar, seasonal, trend, exog):
nobs = 500
y, rs = gen_ar_data(nobs)
maxlag = ar if isinstance(ar, int) else max(ar)
reg = []
if "c" in trend:
const = pd.Series(np.ones(nobs), index=y.index, name="const")
reg.append(const)
if "t" in trend:
time = np.arange(1, nobs + 1)
time = pd.Series(time, index=y.index, name="time")
reg.append(time)
if isinstance(ar, int) and ar:
lags = np.arange(1, ar + 1)
elif ar == 0:
lags = None
else:
lags = ar
if seasonal:
seasons = np.zeros((500, 12))
for i in range(12):
seasons[i::12, i] = 1
cols = ["s.{0}".format(i) for i in range(12)]
seasons = pd.DataFrame(seasons, columns=cols, index=y.index)
if "c" in trend:
seasons = seasons.iloc[:, 1:]
reg.append(seasons)
if maxlag:
for lag in lags:
reg.append(y.shift(lag))
if exog:
x = rs.standard_normal((nobs, exog))
cols = ["x.{0}".format(i) for i in range(exog)]
x = pd.DataFrame(x, columns=cols, index=y.index)
reg.append(x)
else:
x = None
reg.insert(0, y)
df = pd.concat(reg, axis=1).dropna()
endog = df.iloc[:, 0]
exog = df.iloc[:, 1:]
return y, x, endog, exog
ar = [0, 3, [1, 3], [3]]
seasonal = [True, False]
trend = ["n", "c", "t", "ct"]
exog = [None, 2]
covs = ["nonrobust", "HC0"]
params = list(product(ar, seasonal, trend, exog, covs))
final = []
for param in params:
if param[0] != 0 or param[1] or param[2] != "n" or param[3]:
final.append(param)
params = final
names = ("AR", "Seasonal", "Trend", "Exog", "Cov Type")
ids = [
", ".join([n + ": " + str(p) for n, p in zip(names, param)])
for param in params
]
@pytest.fixture(scope="module", params=params, ids=ids)
def ols_autoreg_result(request):
ar, seasonal, trend, exog, cov_type = request.param
y, x, endog, exog = gen_ols_regressors(ar, seasonal, trend, exog)
ar_mod = AutoReg(y, ar, seasonal=seasonal, trend=trend, exog=x)
ar_res = ar_mod.fit(cov_type=cov_type)
ols = OLS(endog, exog)
ols_res = ols.fit(cov_type=cov_type, use_t=False)
return ar_res, ols_res
attributes = [
"bse",
"cov_params",
"df_model",
"df_resid",
"fittedvalues",
"llf",
"nobs",
"params",
"resid",
"scale",
"tvalues",
"use_t",
]
def fix_ols_attribute(val, attrib, res):
"""
fixes to correct for df adjustment b/t OLS and AutoReg with nonrobust cov
"""
nparam = res.k_constant + res.df_model
nobs = nparam + res.df_resid
df_correction = (nobs - nparam) / nobs
if attrib in ("scale",):
return val * df_correction
elif attrib == "df_model":
return val + res.k_constant
elif res.cov_type != "nonrobust":
return val
elif attrib in ("bse", "conf_int"):
return val * np.sqrt(df_correction)
elif attrib in ("cov_params", "scale"):
return val * df_correction
elif attrib in ("f_test",):
return val / df_correction
elif attrib in ("tvalues",):
return val / np.sqrt(df_correction)
return val
@pytest.mark.parametrize("attribute", attributes)
def test_equiv_ols_autoreg(ols_autoreg_result, attribute):
a, o = ols_autoreg_result
ols_a = getattr(o, attribute)
ar_a = getattr(a, attribute)
if callable(ols_a):
ols_a = ols_a()
ar_a = ar_a()
ols_a = fix_ols_attribute(ols_a, attribute, o)
assert_allclose(ols_a, ar_a)
def test_conf_int_ols_autoreg(ols_autoreg_result):
a, o = ols_autoreg_result
a_ci = a.conf_int()
o_ci = o.conf_int()
if o.cov_type == "nonrobust":
spread = o_ci.T - o.params
spread = fix_ols_attribute(spread, "conf_int", o)
o_ci = (spread + o.params).T
assert_allclose(a_ci, o_ci)
def test_f_test_ols_autoreg(ols_autoreg_result):
a, o = ols_autoreg_result
r = np.eye(a.params.shape[0])
a_f = a.f_test(r).fvalue
o_f = o.f_test(r).fvalue
o_f = fix_ols_attribute(o_f, "f_test", o)
assert_allclose(a_f, o_f)
@pytest.mark.smoke
def test_other_tests_autoreg(ols_autoreg_result):
a, _ = ols_autoreg_result
r = np.ones_like(a.params)
a.t_test(r)
r = np.eye(a.params.shape[0])
a.wald_test(r)
# TODO: test likelihood for ARX model?
class CheckARMixin(object):
def test_params(self):
assert_almost_equal(self.res1.params, self.res2.params, DECIMAL_6)
def test_bse(self):
bse = np.sqrt(np.diag(self.res1.cov_params()))
# no dof correction for compatability with Stata
assert_almost_equal(bse, self.res2.bse_stata, DECIMAL_6)
assert_almost_equal(self.res1.bse, self.res2.bse_gretl, DECIMAL_5)
def test_llf(self):
assert_almost_equal(self.res1.llf, self.res2.llf, DECIMAL_6)
def test_fpe(self):
assert_almost_equal(self.res1.fpe, self.res2.fpe, DECIMAL_6)
def test_pickle(self):
from io import BytesIO
fh = BytesIO()
# test wrapped results load save pickle
self.res1.save(fh)
fh.seek(0, 0)
res_unpickled = self.res1.__class__.load(fh)
assert type(res_unpickled) is type(self.res1) # noqa: E721
@pytest.mark.smoke
def test_summary(self):
assert isinstance(self.res1.summary().as_text(), str)
@pytest.mark.smoke
def test_pvalues(self):
assert isinstance(self.res1.pvalues, (np.ndarray, pd.Series))
params = product(
[0, 1, 3, [1, 3]],
["n", "c", "t", "ct"],
[True, False],
[0, 2],
[None, 11],
["none", "drop"],
[True, False],
[None, 12],
)
params = list(params)
params = [
param
for param in params
if (param[0] or param[1] != "n" or param[2] or param[3])
]
params = [
param
for param in params
if not param[2] or (param[2] and (param[4] or param[6]))
]
param_fmt = """\
lags: {0}, trend: {1}, seasonal: {2}, nexog: {3}, periods: {4}, \
missing: {5}, pandas: {6}, hold_back{7}"""
ids = [param_fmt.format(*param) for param in params]
def gen_data(nobs, nexog, pandas, seed=92874765):
rs = np.random.RandomState(seed)
endog = rs.standard_normal((nobs))
exog = rs.standard_normal((nobs, nexog)) if nexog else None
if pandas:
index = pd.date_range(
dt.datetime(1999, 12, 31), periods=nobs, freq="M"
)
endog = pd.Series(endog, name="endog", index=index)
if nexog:
cols = ["exog.{0}".format(i) for i in range(exog.shape[1])]
exog = pd.DataFrame(exog, columns=cols, index=index)
from collections import namedtuple
DataSet = namedtuple("DataSet", ["endog", "exog"])
return DataSet(endog=endog, exog=exog)
@pytest.fixture(scope="module", params=params, ids=ids)
def ar_data(request):
lags, trend, seasonal = request.param[:3]
nexog, period, missing, use_pandas, hold_back = request.param[3:]
data = gen_data(250, nexog, use_pandas)
return Bunch(
trend=trend,
lags=lags,
seasonal=seasonal,
period=period,
endog=data.endog,
exog=data.exog,
missing=missing,
hold_back=hold_back,
)
@pytest.fixture(scope="module")
def ar2(request):
gen = np.random.RandomState(20210623)
e = gen.standard_normal(52)
y = 10 * np.ones_like(e)
for i in range(2, y.shape[0]):
y[i] = 1 + 0.5 * y[i - 1] + 0.4 * y[i - 2] + e[i]
index = pd.period_range("2000-01-01", periods=e.shape[0] - 2, freq="M")
return pd.Series(y[2:], index=index)
params = product(
[0, 3, [1, 3]],
["c"],
[True, False],
[0],
[None, 11],
["drop"],
[True, False],
[None, 12],
)
params = list(params)
params = [
param
for param in params
if (param[0] or param[1] != "n" or param[2] or param[3])
]
params = [
param
for param in params
if not param[2] or (param[2] and (param[4] or param[6]))
]
param_fmt = """\
lags: {0}, trend: {1}, seasonal: {2}, nexog: {3}, periods: {4}, \
missing: {5}, pandas: {6}, hold_back: {7}"""
ids = [param_fmt.format(*param) for param in params]
# Only test 1/3 to save time
@pytest.fixture(scope="module", params=params[::3], ids=ids[::3])
def plot_data(request):
lags, trend, seasonal = request.param[:3]
nexog, period, missing, use_pandas, hold_back = request.param[3:]
data = gen_data(250, nexog, use_pandas)
return Bunch(
trend=trend,
lags=lags,
seasonal=seasonal,
period=period,
endog=data.endog,
exog=data.exog,
missing=missing,
hold_back=hold_back,
)
@pytest.mark.matplotlib
@pytest.mark.smoke
def test_autoreg_smoke_plots(plot_data, close_figures):
from matplotlib.figure import Figure
mod = AutoReg(
plot_data.endog,
plot_data.lags,
trend=plot_data.trend,
seasonal=plot_data.seasonal,
exog=plot_data.exog,
hold_back=plot_data.hold_back,
period=plot_data.period,
missing=plot_data.missing,
)
res = mod.fit()
fig = res.plot_diagnostics()
assert isinstance(fig, Figure)
if plot_data.exog is None:
fig = res.plot_predict(end=300)
assert isinstance(fig, Figure)
fig = res.plot_predict(end=300, alpha=None, in_sample=False)
assert isinstance(fig, Figure)
assert isinstance(res.summary(), Summary)
@pytest.mark.smoke
def test_autoreg_predict_smoke(ar_data):
mod = AutoReg(
ar_data.endog,
ar_data.lags,
trend=ar_data.trend,
seasonal=ar_data.seasonal,
exog=ar_data.exog,
hold_back=ar_data.hold_back,
period=ar_data.period,
missing=ar_data.missing,
)
res = mod.fit()
exog_oos = None
if ar_data.exog is not None:
exog_oos = np.empty((1, ar_data.exog.shape[1]))
mod.predict(res.params, 0, 250, exog_oos=exog_oos)
if ar_data.lags == 0 and ar_data.exog is None:
mod.predict(res.params, 0, 350, exog_oos=exog_oos)
if isinstance(ar_data.endog, pd.Series) and (
not ar_data.seasonal or ar_data.period is not None
):
ar_data.endog.index = list(range(ar_data.endog.shape[0]))
if ar_data.exog is not None:
ar_data.exog.index = list(range(ar_data.endog.shape[0]))
mod = AutoReg(
ar_data.endog,
ar_data.lags,
trend=ar_data.trend,
seasonal=ar_data.seasonal,
exog=ar_data.exog,
period=ar_data.period,
missing=ar_data.missing,
)
mod.predict(res.params, 0, 250, exog_oos=exog_oos)
@pytest.mark.matplotlib
def test_parameterless_autoreg():
data = gen_data(250, 0, False)
mod = AutoReg(data.endog, 0, trend="n", seasonal=False, exog=None)
res = mod.fit()
for attr in dir(res):
if attr.startswith("_"):
continue
# TODO
if attr in (
"predict",
"f_test",
"t_test",
"initialize",
"load",
"remove_data",
"save",
"t_test",
"t_test_pairwise",
"wald_test",
"wald_test_terms",
):
continue
attr = getattr(res, attr)
if callable(attr):
attr()
else:
assert isinstance(attr, object)
def test_predict_errors():
data = gen_data(250, 2, True)
mod = AutoReg(data.endog, 3)
res = mod.fit()
with pytest.raises(ValueError, match="exog and exog_oos cannot be used"):
mod.predict(res.params, exog=data.exog)
with pytest.raises(ValueError, match="exog and exog_oos cannot be used"):
mod.predict(res.params, exog_oos=data.exog)
with pytest.raises(ValueError, match="hold_back must be >= lags"):
AutoReg(data.endog, 3, hold_back=1)
with pytest.raises(ValueError, match="freq cannot be inferred"):
AutoReg(data.endog.values, 3, seasonal=True)
mod = AutoReg(data.endog, 3, exog=data.exog)
res = mod.fit()
with pytest.raises(ValueError, match=r"The shape of exog \(200, 2\)"):
mod.predict(res.params, exog=data.exog.iloc[:200])
with pytest.raises(ValueError, match="The number of columns in exog_oos"):
mod.predict(res.params, exog_oos=data.exog.iloc[:, :1])
with pytest.raises(ValueError, match="Prediction must have `end` after"):
mod.predict(res.params, start=200, end=199)
with pytest.raises(ValueError, match="exog_oos must be provided"):
mod.predict(res.params, end=250, exog_oos=None)
mod = AutoReg(data.endog, 0, exog=data.exog)
res = mod.fit()
with pytest.raises(ValueError, match="start and end indicate that 10"):
mod.predict(res.params, end=259, exog_oos=data.exog.iloc[:5])
def test_spec_errors():
data = gen_data(250, 2, True)
with pytest.raises(ValueError, match="lags must be a non-negative scalar"):
AutoReg(data.endog, -1)
with pytest.raises(ValueError, match="All values in lags must be pos"):
AutoReg(data.endog, [1, 1, 1])
with pytest.raises(ValueError, match="All values in lags must be pos"):
AutoReg(data.endog, [1, -2, 3])
@pytest.mark.smoke
def test_dynamic_forecast_smoke(ar_data):
mod = AutoReg(
ar_data.endog,
ar_data.lags,
trend=ar_data.trend,
seasonal=ar_data.seasonal,
exog=ar_data.exog,
hold_back=ar_data.hold_back,
period=ar_data.period,
missing=ar_data.missing,
)
res = mod.fit()
res.predict(dynamic=True)
if ar_data.exog is None:
res.predict(end=260, dynamic=True)
@pytest.mark.smoke
def test_ar_select_order_smoke():
data = sunspots.load().data["SUNACTIVITY"]
ar_select_order(data, 4, glob=True, trend="n")
ar_select_order(data, 4, glob=False, trend="n")
ar_select_order(data, 4, seasonal=True, period=12)
ar_select_order(data, 4, seasonal=False)
ar_select_order(data, 4, glob=True)
ar_select_order(data, 4, glob=True, seasonal=True, period=12)
class CheckAutoRegMixin(CheckARMixin):
def test_bse(self):
assert_almost_equal(self.res1.bse, self.res2.bse_stata, DECIMAL_6)
class TestAutoRegOLSConstant(CheckAutoRegMixin):
"""
Test AutoReg fit by OLS with a constant.
"""
@classmethod
def setup_class(cls):
data = sunspots.load()
data.endog.index = list(range(len(data.endog)))
cls.res1 = AutoReg(data.endog, lags=9).fit()
cls.res2 = results_ar.ARResultsOLS(constant=True)
def test_predict(self):
model = self.res1.model
params = self.res1.params
assert_almost_equal(
model.predict(params)[model.hold_back :],
self.res2.FVOLSnneg1start0,
DECIMAL_4,
)
assert_almost_equal(
model.predict(params)[model.hold_back :],
self.res2.FVOLSnneg1start9,
DECIMAL_4,
)
assert_almost_equal(
model.predict(params, start=100),
self.res2.FVOLSnneg1start100,
DECIMAL_4,
)
assert_almost_equal(
model.predict(params, start=9, end=200),
self.res2.FVOLSn200start0,
DECIMAL_4,
)
assert_almost_equal(
model.predict(params)[model.hold_back :],
self.res2.FVOLSdefault,
DECIMAL_4,
)
assert_almost_equal(
model.predict(params, start=200, end=400),
self.res2.FVOLSn200start200,
DECIMAL_4,
)
assert_almost_equal(
model.predict(params, start=308, end=424),
self.res2.FVOLSn100start325,
DECIMAL_4,
)
assert_almost_equal(
model.predict(params, start=9, end=310),
self.res2.FVOLSn301start9,
DECIMAL_4,
)
assert_almost_equal(
model.predict(params, start=308, end=316),
self.res2.FVOLSn4start312,
DECIMAL_4,
)
assert_almost_equal(
model.predict(params, start=308, end=327),
self.res2.FVOLSn15start312,
DECIMAL_4,
)
class TestAutoRegOLSNoConstant(CheckAutoRegMixin):
"""f
Test AR fit by OLS without a constant.
"""
@classmethod
def setup_class(cls):
data = sunspots.load()
cls.res1 = AutoReg(np.asarray(data.endog), lags=9, trend="n").fit()
cls.res2 = results_ar.ARResultsOLS(constant=False)
def test_predict(self):
model = self.res1.model
params = self.res1.params
assert_almost_equal(
model.predict(params)[model.hold_back :],
self.res2.FVOLSnneg1start0,
DECIMAL_4,
)
assert_almost_equal(
model.predict(params)[model.hold_back :],
self.res2.FVOLSnneg1start9,
DECIMAL_4,
)
assert_almost_equal(
model.predict(params, start=100),
self.res2.FVOLSnneg1start100,
DECIMAL_4,
)
assert_almost_equal(
model.predict(params, start=9, end=200),
self.res2.FVOLSn200start0,
DECIMAL_4,
)
assert_almost_equal(
model.predict(params)[model.hold_back :],
self.res2.FVOLSdefault,
DECIMAL_4,
)
assert_almost_equal(
model.predict(params, start=200, end=400),
self.res2.FVOLSn200start200,
DECIMAL_4,
)
assert_almost_equal(
model.predict(params, start=308, end=424),
self.res2.FVOLSn100start325,
DECIMAL_4,
)
assert_almost_equal(
model.predict(params, start=9, end=310),
self.res2.FVOLSn301start9,
DECIMAL_4,
)
assert_almost_equal(
model.predict(params, start=308, end=316),
self.res2.FVOLSn4start312,
DECIMAL_4,
)
assert_almost_equal(
model.predict(params, start=308, end=327),
self.res2.FVOLSn15start312,
DECIMAL_4,
)
@pytest.mark.parametrize("lag", list(np.arange(1, 16 + 1)))
def test_autoreg_info_criterion(lag):
data = sunspots.load()
endog = np.asarray(data.endog)
endog_tmp = endog[16 - lag :]
r = AutoReg(endog_tmp, lags=lag).fit()
# See issue #324 for the corrections vs. R
aic = r.aic
hqic = r.hqic
bic = r.bic
res1 = np.array([aic, hqic, bic, r.fpe])
# aic correction to match R
res2 = results_ar.ARLagResults("const").ic.T
comp = res2[lag - 1, :].copy()
k = 2 + lag
pen = np.array([2, 2 * np.log(np.log(r.nobs)), np.log(r.nobs)])
comp[:3] = -2 * r.llf + pen * k
assert_almost_equal(res1, comp, DECIMAL_6)
r2 = AutoReg(endog, lags=lag, hold_back=16).fit()
assert_allclose(r.aic, r2.aic)
assert_allclose(r.bic, r2.bic)
assert_allclose(r.hqic, r2.hqic)
assert_allclose(r.fpe, r2.fpe)
@pytest.mark.parametrize("old_names", [True, False])
def test_autoreg_named_series(reset_randomstate, old_names):
warning = FutureWarning if old_names else None
dates = period_range(start="2011-1", periods=72, freq="M")
y = Series(np.random.randn(72), name="foobar", index=dates)
with pytest.warns(warning):
results = AutoReg(y, lags=2, old_names=old_names).fit()
if old_names:
idx = Index(["intercept", "foobar.L1", "foobar.L2"])
else:
idx = Index(["const", "foobar.L1", "foobar.L2"])
assert results.params.index.equals(idx)
@pytest.mark.smoke
def test_autoreg_series():
# GH#773
dta = macrodata.load_pandas().data["cpi"].diff().dropna()
dates = period_range(start="1959Q1", periods=len(dta), freq="Q")
dta.index = dates
ar = AutoReg(dta, lags=15).fit()
ar.bse
def test_ar_order_select():
# GH#2118
np.random.seed(12345)
y = arma_generate_sample([1, -0.75, 0.3], [1], 100)
ts = Series(
y,
index=date_range(start=dt.datetime(1990, 1, 1), periods=100, freq="M"),
)
res = ar_select_order(ts, maxlag=12, ic="aic")
assert tuple(res.ar_lags) == (1, 2)
assert isinstance(res.aic, dict)
assert isinstance(res.bic, dict)
assert isinstance(res.hqic, dict)
assert isinstance(res.model, AutoReg)
assert not res.seasonal
assert res.trend == "c"
assert res.period is None
def test_autoreg_constant_column_trend():
sample = np.array(
[
0.46341460943222046,
0.46341460943222046,
0.39024388790130615,
0.4146341383457184,
0.4146341383457184,
0.4146341383457184,
0.3414634168148041,
0.4390243887901306,
0.46341460943222046,
0.4390243887901306,
]
)
with pytest.raises(ValueError, match="The model specification cannot"):
AutoReg(sample, lags=7)
with pytest.raises(ValueError, match="The model specification cannot"):
AutoReg(sample, lags=7, trend="n")
@pytest.mark.parametrize("old_names", [True, False])
def test_autoreg_summary_corner(old_names):
data = macrodata.load_pandas().data["cpi"].diff().dropna()
dates = period_range(start="1959Q1", periods=len(data), freq="Q")
data.index = dates
warning = FutureWarning if old_names else None
with pytest.warns(warning):
res = AutoReg(data, lags=4, old_names=old_names).fit()
summ = res.summary().as_text()
assert "AutoReg(4)" in summ
assert "cpi.L4" in summ
assert "03-31-1960" in summ
with pytest.warns(warning):
res = AutoReg(data, lags=0, old_names=old_names).fit()
summ = res.summary().as_text()
if old_names:
assert "intercept" in summ
else:
assert "const" in summ
assert "AutoReg(0)" in summ
@pytest.mark.smoke
def test_autoreg_score():
data = sunspots.load_pandas()
ar = AutoReg(np.asarray(data.endog), 3)
res = ar.fit()
score = ar.score(res.params)
assert isinstance(score, np.ndarray)
assert score.shape == (4,)
assert ar.information(res.params).shape == (4, 4)
assert_allclose(-ar.hessian(res.params), ar.information(res.params))
def test_autoreg_roots():
data = sunspots.load_pandas()
ar = AutoReg(np.asarray(data.endog), lags=1)
res = ar.fit()
assert_almost_equal(res.roots, np.array([1.0 / res.params[-1]]))
def test_equiv_dynamic(reset_randomstate):
e = np.random.standard_normal(1001)
y = np.empty(1001)
y[0] = e[0] * np.sqrt(1.0 / (1 - 0.9 ** 2))
for i in range(1, 1001):
y[i] = 0.9 * y[i - 1] + e[i]
mod = AutoReg(y, 1)
res = mod.fit()
pred0 = res.predict(500, 800, dynamic=0)
pred1 = res.predict(500, 800, dynamic=True)
idx = pd.date_range(dt.datetime(2000, 1, 30), periods=1001, freq="M")
y = | pd.Series(y, index=idx) | pandas.Series |
# Copyright (c) 2018-2021, NVIDIA CORPORATION.
import gzip
import os
import re
import shutil
from collections import OrderedDict
from io import BytesIO, StringIO
from pathlib import Path
import numpy as np
import pandas as pd
import pytest
import cudf
from cudf import read_csv
from cudf.tests.utils import assert_eq, assert_exceptions_equal
def make_numeric_dataframe(nrows, dtype):
df = pd.DataFrame()
df["col1"] = np.arange(nrows, dtype=dtype)
df["col2"] = np.arange(1, 1 + nrows, dtype=dtype)
return df
def make_datetime_dataframe(include_non_standard=False):
df = pd.DataFrame()
df["col1"] = np.array(
[
"31/10/2010",
"05/03/2001",
"20/10/1994",
"18/10/1990",
"1/1/1970",
"2016-04-30T01:02:03.000",
"2038-01-19 03:14:07",
]
)
df["col2"] = np.array(
[
"18/04/1995",
"14 / 07 / 1994",
"07/06/2006",
"16/09/2005",
"2/2/1970",
"2007-4-30 1:6:40.000PM",
"2038-01-19 03:14:08",
]
)
if include_non_standard:
# Last column contains non-standard date formats
df["col3"] = np.array(
[
"1 Jan",
"2 January 1994",
"Feb 2002",
"31-01-2000",
"1-1-1996",
"15-May-2009",
"21-Dec-3262",
]
)
return df
def make_numpy_mixed_dataframe():
df = | pd.DataFrame() | pandas.DataFrame |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
import pytest
import pandas as pd
import pandas_should # noqa
class TestEqualAccessorMixin(object):
def test_equal_true(self):
s1 = pd.Series([1, 2, 3])
s2 = pd.Series([1, 2, 3])
assert s1.should.equal(s2)
def test_equal_false(self):
s1 = pd.Series([1, 2, 3])
s2 = pd.Series([1, 2, 3, 4])
assert not s1.should.equal(s2)
@pytest.mark.parametrize('alias_name', [
'be_equal_to', 'be_equals_to', 'be_eq_to', 'eq',
])
def test_qeual_aliases(self, alias_name):
s = pd.Series([1, 2, 3])
assert hasattr(s.should, alias_name)
def test_not_equal_true(self):
s1 = pd.Series([1, 2, 3])
s2 = pd.Series([1, 2, 3, 4])
assert s1.should.not_equal(s2)
def test_not_equal_false(self):
s1 = pd.Series([1, 2, 3])
s2 = pd.Series([1, 2, 3])
assert not s1.should.not_equal(s2)
@pytest.mark.parametrize('alias_name', [
'be_not_equal_to', 'be_not_equals_to', 'be_neq_to', 'neq',
])
def test_not_qeual_aliases(self, alias_name):
s = pd.Series([1, 2, 3])
assert hasattr(s.should, alias_name)
def test_have_same_length_true(self):
s1 = pd.Series([1, 2, 3])
s2 = pd.Series([1, 2, 3])
assert s1.should.have_same_length(s2)
def test_have_same_length_false(self):
s1 = pd.Series([1, 2, 3])
s2 = pd.Series([1, 2, 3, 4])
assert not s1.should.have_same_length(s2)
def test_have_same_length_multiple(self):
s1 = pd.Series([1, 2, 3])
s2 = pd.Series([1, 2])
s3 = pd.Series([3])
assert s1.should.have_same_length(s2, s3)
class TestNullAccessorMixin(object):
def test_have_null_true(self):
s = pd.Series([1, None, 3])
assert s.should.have_null()
def test_have_null_false(self):
s = pd.Series([1, 2, 3])
assert not s.should.have_null()
def test_have_null_count(self):
s = pd.Series([1, None, 3])
assert s.should.have_null(count=True) == (True, 1)
def test_have_not_null_true(self):
s = pd.Series([1, 2, 3])
assert s.should.have_not_null()
def test_have_not_null_false(self):
s = pd.Series([1, None, 3])
assert not s.should.have_not_null()
@pytest.mark.parametrize('alias_name', ['havent_null'])
def test_have_not_null_aliases(self, alias_name):
s = pd.Series([1, 2, 3])
assert hasattr(s.should, alias_name)
class TestLengthAccessorMixin(object):
@pytest.mark.parametrize('s, length', [
(pd.Series([1, 2, 3]), 3),
(pd.Series([1, 2]), 2),
])
def test_have_length(self, s, length):
assert s.should.have_length(length)
@pytest.mark.parametrize('alias_name', ['length'])
def test_have_length_aliases(self, alias_name):
s = | pd.Series([1, 2, 3]) | pandas.Series |
from surf.script_tab import keytab
from surf.surf_tool import regex2pairs
import os, json, time, re, codecs, glob, shutil
import matplotlib.pyplot as plt
import matplotlib as mpl
import logging.handlers
import pandas as pd
import itertools
import numpy as np
import random
import tensorflow as tf
from sklearn.model_selection import KFold
from sklearn.model_selection._split import _BaseKFold, indexable, _num_samples
from sklearn.utils.validation import _deprecate_positional_args
# 拆分时间序列的类
class PurgedGroupTimeSeriesSplit(_BaseKFold):
"""Time Series cross-validator variant with non-overlapping groups.
Allows for a gap in groups to avoid potentially leaking info from
train into test if the model has windowed or lag features.
Provides train/test indices to split time series data samples
that are observed at fixed time intervals according to a
third-party provided group.
In each split, test indices must be higher than before, and thus shuffling
in cross validator is inappropriate.
This cross-validation object is a variation of :class:`KFold`.
In the kth split, it returns first k folds as train set and the
(k+1)th fold as test set.
The same group will not appear in two different folds (the number of
distinct groups has to be at least equal to the number of folds).
Note that unlike standard cross-validation methods, successive
training sets are supersets of those that come before them.
Read more in the :ref:`User Guide <cross_validation>`.
Parameters
----------
n_splits : int, default=5
Number of splits. Must be at least 2.
max_train_group_size : int, default=Inf
Maximum group size for a single training set.
group_gap : int, default=None
Gap between train and test
max_test_group_size : int, default=Inf
We discard this number of groups from the end of each train split
"""
@_deprecate_positional_args
def __init__(self,
n_splits=5,
*,
max_train_group_size=np.inf,
max_test_group_size=np.inf,
group_gap=None,
verbose=False
):
super().__init__(n_splits, shuffle=False, random_state=None)
self.max_train_group_size = max_train_group_size
self.group_gap = group_gap
self.max_test_group_size = max_test_group_size
self.verbose = verbose
def split(self, X, y=None, groups=None):
"""Generate indices to split data into training and test set.
Parameters
----------
X : array-like of shape (n_samples, n_features)
Training data, where n_samples is the number of samples
and n_features is the number of features.
y : array-like of shape (n_samples,)
Always ignored, exists for compatibility.
groups : array-like of shape (n_samples,)
Group labels for the samples used while splitting the dataset into
train/test set.
Yields
------
train : ndarray
The training set indices for that split.
test : ndarray
The testing set indices for that split.
"""
if groups is None:
raise ValueError(
"The 'groups' parameter should not be None")
X, y, groups = indexable(X, y, groups)
n_samples = _num_samples(X)
n_splits = self.n_splits
group_gap = self.group_gap
max_test_group_size = self.max_test_group_size
max_train_group_size = self.max_train_group_size
n_folds = n_splits + 1
group_dict = {}
u, ind = np.unique(groups, return_index=True)
unique_groups = u[np.argsort(ind)]
n_samples = _num_samples(X)
n_groups = _num_samples(unique_groups)
for idx in np.arange(n_samples):
if (groups[idx] in group_dict):
group_dict[groups[idx]].append(idx)
else:
group_dict[groups[idx]] = [idx]
if n_folds > n_groups:
raise ValueError(
("Cannot have number of folds={0} greater than"
" the number of groups={1}").format(n_folds,
n_groups))
group_test_size = min(n_groups // n_folds, max_test_group_size)
group_test_starts = range(n_groups - n_splits * group_test_size,
n_groups, group_test_size)
for group_test_start in group_test_starts:
train_array = []
test_array = []
group_st = max(0, group_test_start - group_gap - max_train_group_size)
for train_group_idx in unique_groups[group_st:(group_test_start - group_gap)]:
train_array_tmp = group_dict[train_group_idx]
train_array = np.sort(np.unique(
np.concatenate((train_array,
train_array_tmp)),
axis=None), axis=None)
train_end = train_array.size
for test_group_idx in unique_groups[group_test_start:
group_test_start +
group_test_size]:
test_array_tmp = group_dict[test_group_idx]
test_array = np.sort(np.unique(
np.concatenate((test_array,
test_array_tmp)),
axis=None), axis=None)
test_array = test_array[group_gap:]
if self.verbose > 0:
pass
yield [int(i) for i in train_array], [int(i) for i in test_array]
# 拆分时间序列的类
class PurgedGroupTimeSeriesSplitStacking(_BaseKFold):
"""Time Series cross-validator variant with non-overlapping groups.
Allows for a gap in groups to avoid potentially leaking info from
train into test if the model has windowed or lag features.
Provides train/test indices to split time series data samples
that are observed at fixed time intervals according to a
third-party provided group.
In each split, test indices must be higher than before, and thus shuffling
in cross validator is inappropriate.
This cross-validation object is a variation of :class:`KFold`.
In the kth split, it returns first k folds as train set and the
(k+1)th fold as test set.
The same group will not appear in two different folds (the number of
distinct groups has to be at least equal to the number of folds).
Note that unlike standard cross-validation methods, successive
training sets are supersets of those that come before them.
Read more in the :ref:`User Guide <cross_validation>`.
Parameters
----------
n_splits : int, default=5
Number of splits. Must be at least 2.
stacking_mode : bool, default=True
Whether to provide an additional set to test a stacking classifier or not.
max_train_group_size : int, default=Inf
Maximum group size for a single training set.
max_val_group_size : int, default=Inf
Maximum group size for a single validation set.
max_test_group_size : int, default=Inf
We discard this number of groups from the end of each train split, if stacking_mode = True and None
it defaults to max_val_group_size.
val_group_gap : int, default=None
Gap between train and validation
test_group_gap : int, default=None
Gap between validation and test, if stacking_mode = True and None
it defaults to val_group_gap.
"""
@_deprecate_positional_args
def __init__(self,
n_splits=5,
*,
stacking_mode=True,
max_train_group_size=np.inf,
max_val_group_size=np.inf,
max_test_group_size=np.inf,
val_group_gap=None,
test_group_gap=None,
verbose=False
):
super().__init__(n_splits, shuffle=False, random_state=None)
self.max_train_group_size = max_train_group_size
self.max_val_group_size = max_val_group_size
self.max_test_group_size = max_test_group_size
self.val_group_gap = val_group_gap
self.test_group_gap = test_group_gap
self.verbose = verbose
self.stacking_mode = stacking_mode
def split(self, X, y=None, groups=None):
if self.stacking_mode:
return self.split_ensemble(X, y, groups)
else:
return self.split_standard(X, y, groups)
def split_standard(self, X, y=None, groups=None):
"""Generate indices to split data into training and validation set.
Parameters
----------
X : array-like of shape (n_samples, n_features)
Training data, where n_samples is the number of samples
and n_features is the number of features.
y : array-like of shape (n_samples,)
Always ignored, exists for compatibility.
groups : array-like of shape (n_samples,)
Group labels for the samples used while splitting the dataset into
train/validation set.
Yields
------
train : ndarray
The training set indices for that split.
val : ndarray
The validation set indices for that split.
"""
if groups is None:
raise ValueError("The 'groups' parameter should not be None")
X, y, groups = indexable(X, y, groups)
n_splits = self.n_splits
group_gap = self.val_group_gap
max_val_group_size = self.max_val_group_size
max_train_group_size = self.max_train_group_size
n_folds = n_splits + 1
group_dict = {}
u, ind = np.unique(groups, return_index=True)
unique_groups = u[np.argsort(ind)]
n_samples = _num_samples(X)
n_groups = _num_samples(unique_groups)
for idx in np.arange(n_samples):
if (groups[idx] in group_dict):
group_dict[groups[idx]].append(idx)
else:
group_dict[groups[idx]] = [idx]
if n_folds > n_groups:
raise ValueError(
("Cannot have number of folds={0} greater than"
" the number of groups={1}").format(n_folds, n_groups))
group_val_size = min(n_groups // n_folds, max_val_group_size)
group_val_starts = range(n_groups - n_splits * group_val_size, n_groups, group_val_size)
for group_val_start in group_val_starts:
train_array = []
val_array = []
group_st = max(0, group_val_start - group_gap - max_train_group_size)
for train_group_idx in unique_groups[group_st:(group_val_start - group_gap)]:
train_array_tmp = group_dict[train_group_idx]
train_array = np.sort(np.unique(np.concatenate((train_array, train_array_tmp)), axis=None), axis=None)
train_end = train_array.size
for val_group_idx in unique_groups[group_val_start: group_val_start + group_val_size]:
val_array_tmp = group_dict[val_group_idx]
val_array = np.sort(np.unique(np.concatenate((val_array, val_array_tmp)), axis=None), axis=None)
val_array = val_array[group_gap:]
if self.verbose > 0:
pass
yield [int(i) for i in train_array], [int(i) for i in val_array]
def split_ensemble(self, X, y=None, groups=None):
"""Generate indices to split data into training, validation and test set.
Parameters
----------
X : array-like of shape (n_samples, n_features)
Training data, where n_samples is the number of samples
and n_features is the number of features.
y : array-like of shape (n_samples,)
Always ignored, exists for compatibility.
groups : array-like of shape (n_samples,)
Group labels for the samples used while splitting the dataset into
train/test set.
Yields
------
train : ndarray
The training set indices for that split.
val : ndarray
The validation set indices for that split (testing indices for base classifiers).
test : ndarray
The testing set indices for that split (testing indices for final classifier)
"""
if groups is None:
raise ValueError("The 'groups' parameter should not be None")
X, y, groups = indexable(X, y, groups)
n_splits = self.n_splits
val_group_gap = self.val_group_gap
test_group_gap = self.test_group_gap
if test_group_gap is None:
test_group_gap = val_group_gap
max_train_group_size = self.max_train_group_size
max_val_group_size = self.max_val_group_size
max_test_group_size = self.max_test_group_size
if max_test_group_size is None:
max_test_group_size = max_val_group_size
n_folds = n_splits + 1
group_dict = {}
u, ind = np.unique(groups, return_index=True)
unique_groups = u[np.argsort(ind)]
n_samples = _num_samples(X)
n_groups = _num_samples(unique_groups)
for idx in np.arange(n_samples):
if (groups[idx] in group_dict):
group_dict[groups[idx]].append(idx)
else:
group_dict[groups[idx]] = [idx]
if n_folds > n_groups:
raise ValueError(("Cannot have number of folds={0} greater than"
" the number of groups={1}").format(n_folds, n_groups))
group_val_size = min(n_groups // n_folds, max_val_group_size)
group_test_size = min(n_groups // n_folds, max_test_group_size)
group_test_starts = range(n_groups - n_splits * group_test_size, n_groups, group_test_size)
train_indices = []
val_indices = []
test_indices = []
for group_test_start in group_test_starts:
train_array = []
val_array = []
test_array = []
val_group_st = max(max_train_group_size + val_group_gap,
group_test_start - test_group_gap - max_val_group_size)
train_group_st = max(0, val_group_st - val_group_gap - max_train_group_size)
for train_group_idx in unique_groups[train_group_st:(val_group_st - val_group_gap)]:
train_array_tmp = group_dict[train_group_idx]
train_array = np.sort(np.unique(np.concatenate((train_array, train_array_tmp)), axis=None), axis=None)
train_end = train_array.size
for val_group_idx in unique_groups[val_group_st:(group_test_start - test_group_gap)]:
val_array_tmp = group_dict[val_group_idx]
val_array = np.sort(np.unique(np.concatenate((val_array, val_array_tmp)), axis=None), axis=None)
val_array = val_array[val_group_gap:]
for test_group_idx in unique_groups[group_test_start:(group_test_start + group_test_size)]:
test_array_tmp = group_dict[test_group_idx]
test_array = np.sort(np.unique(np.concatenate((test_array, test_array_tmp)), axis=None), axis=None)
test_array = test_array[test_group_gap:]
yield [int(i) for i in train_array], [int(i) for i in val_array], [int(i) for i in test_array]
def sharp_ratio(data, base_ratio=0.0):
num = len(data)
t_return = (data.shift(-1) - data) / data
std = t_return.std()
sharpratio = (t_return.mean() - base_ratio) * (np.sqrt(num)) / std
return sharpratio
class Pre_data(object):
def __init__(self):
self.funcmap = {
"种子": self.set_all_seeds,
"填充": self.pipe_pad,
# 一个数组栏,一个dataframe
"取列": self.split_columns,
"取行": self.split_rows,
}
def set_all_seeds(self, dataobj, seed):
np.random.seed(seed)
random.seed(seed)
# tf.random.set_seed(seed)
return dataobj
def pipe_pad(self, dataobj, paras={}):
if paras["值"] is None:
if paras["方式"] == "向前":
# 再向上填充
dataobj.fillna(method='bfill', inplace=True)
elif paras["方式"] == "向后":
# 先向下填充
dataobj.fillna(method='ffill', inplace=True)
else:
raise Exception("paras error {}".format(paras))
else:
dataobj.fillna(value=paras["值"], inplace=True)
return dataobj
def split_columns(self, dataobj, paras):
return dataobj[paras]
def split_rows(self, dataobj, paras):
if isinstance(paras[0], str):
outdata = dataobj.loc[paras[0]:]
elif isinstance(paras[0], int):
outdata = dataobj.iloc[paras[0]:]
else:
raise Exception("type error {}".format(paras))
if isinstance(paras[1], str):
outdata = outdata.loc[:paras[1]]
elif isinstance(paras[1], int):
outdata = outdata.iloc[:paras[1]]
else:
raise Exception("type error {}".format(paras))
return outdata
def __call__(self, infiles, commands):
outdata = []
for infile in infiles:
pdobj = pd.read_csv(infile, header=0, encoding="utf8")
pdobj.set_index("date", inplace=True)
# 顺序处理
for command in commands:
tkey = list(command.keys())[0]
pdobj = self.funcmap[tkey](pdobj, command[tkey])
outdata.append(pdobj)
return outdata
class Train_split(object):
def __init__(self):
self.funcmap = {
# 一个数组栏,一个dataframe
"拆分": self.split_train_test,
}
def split_train_test(self, dataobj, paras):
outlist = []
if isinstance(paras[0], str):
outlist.append(dataobj.loc[:paras[0]])
if len(paras) > 1:
outlist.append(dataobj.loc[paras[0]:paras[1]])
outlist.append(dataobj.loc[paras[1]:])
else:
outlist.append(dataobj.loc[paras[0]:])
elif isinstance(paras[0], int):
outlist.append(dataobj.iloc[:paras[0]])
if len(paras) > 1:
outlist.append(dataobj.iloc[paras[0]:paras[1]])
outlist.append(dataobj.iloc[paras[1]:])
else:
outlist.append(dataobj.iloc[paras[0]:])
elif isinstance(paras[0], float):
tsplit = len(dataobj)
tsplit1 = int(tsplit * paras[0])
outlist.append(dataobj.iloc[:tsplit1])
if len(paras) > 1:
tsplit2 = int(tsplit * sum(paras))
outlist.append(dataobj.iloc[tsplit1:tsplit2])
outlist.append(dataobj.iloc[tsplit2:])
else:
outlist.append(dataobj.iloc[tsplit1:])
else:
raise Exception("type error {}".format(paras))
return outlist
def __call__(self, infiles, commands):
outdata = []
for infile in infiles:
pdobj = pd.read_csv(infile, header=0, encoding="utf8")
pdobj.set_index("date", inplace=True)
# 顺序处理
for command in commands:
tkey = list(command.keys())[0]
pdobj = self.funcmap[tkey](pdobj, command[tkey])
outdata.append(pdobj)
return outdata
class SequenceChara(object):
def __init__(self):
self.funcmap = {
"均值n": self.mean_n,
"标准差n": self.std_n,
"涨幅比n": self.ratio_n,
"回撤n": self.draw_n,
"最涨n": self.maxrise_n,
"夏普n": self.sharp_n,
"label_最大n": self.l_max_n,
"label_最小n": self.l_min_n,
"label_回撤n": self.l_draw_n,
"label_最涨n": self.l_maxrise_n,
}
def mean_n(self, dataobj, n):
outdata = dataobj.iloc[:, 0].rolling(window=n, center=False).mean()
return outdata
def std_n(self, dataobj, n):
outdata = dataobj.iloc[:, 0].rolling(window=n, center=False).std()
return outdata
def ratio_n(self, dataobj, n):
outdata = dataobj.iloc[:, 0].rolling(window=n, center=False).apply(lambda x: x[-1] / x[0])
return outdata
def draw_n(self, dataobj, n):
pricepd = dataobj.iloc[:, 0]
maxfallret = pd.Series(index=pricepd.index)
for i in range(0, len(dataobj) - n):
tmpsec = pricepd[i + 1:i + n + 1]
tmpmax = pricepd[i]
tmpmin = pricepd[i]
tmpdrawdown = [1.0]
for t in range(0, n):
if tmpsec[t] > tmpmax:
tmpmax = tmpsec[t]
tmpdrawdown.append(tmpdrawdown[-1])
elif tmpsec[t] <= tmpmin:
tmpmin = tmpsec[t]
tmpdrawdown.append(tmpmin / tmpmax)
else:
pass
maxfallret[i] = min(tmpdrawdown)
return maxfallret
def maxrise_n(self, dataobj, n):
pricepd = dataobj.iloc[:, 0]
maxraiseret = pd.Series(index=pricepd.index)
for i in range(0, len(dataobj) - n):
tmpsec = pricepd[i + 1:i + n + 1]
tmpmax = pricepd[i]
tmpmin = pricepd[i]
tmpdrawup = [1.0]
for t in range(0, n):
if tmpsec[t] > tmpmax:
tmpmax = tmpsec[t]
tmpdrawup.append(tmpmax / tmpmin)
elif tmpsec[t] <= tmpmin:
tmpmin = tmpsec[t]
tmpdrawup.append(tmpdrawup[-1])
else:
pass
maxraiseret[i] = max(tmpdrawup)
return maxraiseret
def sharp_n(self, dataobj, n):
outdata = dataobj.iloc[:, 0].rolling(window=n, center=False).apply(sharp_ratio)
return outdata
def l_max_n(self, dataobj, n):
outdata = dataobj.iloc[:, 0].rolling(window=n, center=False).max()
outdata = outdata.shift(-n)
return outdata
def l_min_n(self, dataobj, n):
outdata = dataobj.iloc[:, 0].rolling(window=n, center=False).min()
outdata.shift(-n)
return outdata
def l_draw_n(self, dataobj, n):
outdata = self.draw_n(dataobj, n)
outdata.shift(-n)
return outdata
def l_maxrise_n(self, dataobj, n):
outdata = self.maxrise_n(dataobj, n)
outdata.shift(-n)
return outdata
def __call__(self, infiles, commands):
outdata = []
colhead = []
for infile in infiles:
pdobj = pd.read_csv(infile, header=0, encoding="utf8")
pdobj.set_index("date", inplace=True)
delhead = pdobj.columns[0]
colhead.append(delhead)
# 并行处理
toutd = []
for command in commands:
tkey = list(command.keys())[0]
outobj = self.funcmap[tkey](pdobj, command[tkey])
toutd.append(outobj)
outdata.append(toutd)
return outdata, colhead
class CharaExtract(object):
def __init__(self):
self.funcmap = {
"profit_avelog": self.profit_avelog,
"胜率": self.win_ratio,
"回撤": self.draw_n,
"最涨": self.rise_n,
"夏普": self.sharp_n,
}
def profit_avelog(self, dataobj):
return np.log(dataobj.iloc[-1, 0] / dataobj.iloc[0, 0]) / len(dataobj)
def win_ratio(self, dataobj):
pricepd = dataobj.diff()
pricepd = np.array(pricepd.iloc[:, 0])
posinum = len(pricepd[pricepd > 0])
allnum = len(pricepd[~np.isnan(pricepd)])
return float(posinum) / allnum
def draw_n(self, dataobj):
pricepd = dataobj.iloc[:, 0]
n = len(dataobj)
tmpsec = pricepd[0:n]
tmpmax = pricepd[0]
tmpmin = pricepd[0]
tmpdrawdown = [1.0]
for i in range(1, n):
if tmpsec[i] > tmpmax:
tmpmax = tmpsec[i]
tmpdrawdown.append(tmpdrawdown[-1])
elif tmpsec[i] <= tmpmin:
tmpmin = tmpsec[i]
tmpdrawdown.append(tmpmin / tmpmax)
else:
pass
return min(tmpdrawdown)
def rise_n(self, dataobj):
pricepd = dataobj.iloc[:, 0]
n = len(dataobj)
tmpsec = pricepd[0:n]
tmpmax = pricepd[0]
tmpmin = pricepd[0]
tmpdrawup = [1.0]
for i in range(1, n):
if tmpsec[i] > tmpmax:
tmpmax = tmpsec[i]
tmpdrawup.append(tmpmax / tmpmin)
elif tmpsec[i] <= tmpmin:
tmpmin = tmpsec[i]
tmpdrawup.append(tmpdrawup[-1])
else:
pass
return max(tmpdrawup)
def sharp_n(self, dataobj):
tsr = sharp_ratio(dataobj)
return tsr[0]
def __call__(self, infiles, commands):
outdatas = [{"filename": [], i1: []} for i1 in commands]
for i1, command in enumerate(commands):
# 并行处理
for infile in infiles:
pdobj = pd.read_csv(infile, header=0, encoding="utf8")
pdobj.set_index("date", inplace=True)
pdobj = pdobj[[pdobj.columns[0]]]
outobj = self.funcmap[command](pdobj)
ttinfile = os.path.split(infile)[1]
outdatas[i1]["filename"].append(ttinfile)
outdatas[i1][command].append(outobj)
outdatapds = []
for i1 in outdatas:
tpd = pd.DataFrame(i1)
tpd.set_index("filename", inplace=True)
outdatapds.append(tpd)
return outdatapds
class DataMerge(object):
def __init__(self):
pass
def __call__(self, oriinfiles, projectpath):
# 1. 只支持 前后统配合并,去掉前后的 *
pdobjlist, matchstrlist = regex2pairs(oriinfiles, projectpath)
outfilelist = [i1[0] + "_".join(["origin" if i2 == "" else i2 for i2 in i1[1]]) + i1[2] for i1 in matchstrlist]
outpdobjlist = [pd.concat(i1, axis=1) for i1 in pdobjlist]
return outpdobjlist, outfilelist
class DataCopy(object):
def __init__(self):
pass
def __call__(self, oriinfiles, prefix, projectpath):
infiles = [glob.glob(os.path.join(projectpath, i2)) for i2 in oriinfiles]
infiles = set(itertools.chain(*infiles)) # 展开去重
for infile in infiles:
(filepath, ofile) = os.path.split(infile)
shutil.copy(infile, os.path.join(filepath, prefix + ofile))
return None
class DataCalc(object):
def __init__(self):
self.funcmap = {
"+": self.add,
"-": self.mins,
"*": self.multi,
"/": self.divide,
"**": self.ppower,
}
self.symbolmap = {
"+": "加",
"-": "减",
"*": "乘",
"/": "除",
"**": "幂",
}
def add(self, dataobj, commandstr, float_f=None, float_b=None):
if float_b is None and float_f is None:
outdata = dataobj[0].iloc[:, 0] + dataobj[1].iloc[:, 0]
outdata = pd.DataFrame(outdata)
outdata.rename(columns={0: "_".join([dataobj[0].columns[0], commandstr, dataobj[1].columns[0]])},
inplace=True)
elif float_b is not None:
outdata = dataobj[0].iloc[:, 0] + float_b
outdata = pd.DataFrame(outdata)
outdata.rename(columns={0: "_".join([dataobj[0].columns[0], commandstr, str(float_b)])}, inplace=True)
elif float_f is not None:
outdata = float_f + dataobj[1].iloc[:, 0]
outdata = pd.DataFrame(outdata)
outdata.rename(columns={0: "_".join([str(float_f), commandstr, dataobj[0].columns[0]])}, inplace=True)
else:
raise Exception("No such situation.float_f float_b both not None")
return outdata
def mins(self, dataobj, commandstr, float_f=None, float_b=None):
colstrs = [dataobj[0].columns[0], dataobj[1].columns[0]]
if float_b is None and float_f is None:
outdata = dataobj[0].iloc[:, 0] - dataobj[1].iloc[:, 0]
outdata = pd.DataFrame(outdata)
outdata.rename(columns={0: "_".join([colstrs[0], commandstr, colstrs[1]])}, inplace=True)
elif float_b is not None:
outdata = dataobj[0].iloc[:, 0] - float_b
outdata = pd.DataFrame(outdata)
outdata.rename(columns={0: "_".join([dataobj[0].columns[0], commandstr, str(float_b)])}, inplace=True)
elif float_f is not None:
outdata = float_f - dataobj[1].iloc[:, 0]
outdata = pd.DataFrame(outdata)
outdata.rename(columns={0: "_".join([str(float_f), commandstr, dataobj[0].columns[0]])}, inplace=True)
else:
raise Exception("No such situation.float_f float_b both not None")
return outdata
def multi(self, dataobj, commandstr, float_f=None, float_b=None):
if float_b is None and float_f is None:
outdata = dataobj[0].iloc[:, 0] * dataobj[1].iloc[:, 0]
outdata = pd.DataFrame(outdata)
outdata.rename(columns={0: "_".join([dataobj[0].columns[0], commandstr, dataobj[1].columns[0]])},
inplace=True)
elif float_b is not None:
outdata = dataobj[0].iloc[:, 0] * float_b
outdata = pd.DataFrame(outdata)
outdata.rename(columns={0: "_".join([dataobj[0].columns[0], commandstr, str(float_b)])}, inplace=True)
elif float_f is not None:
outdata = float_f * dataobj[1].iloc[:, 0]
outdata = pd.DataFrame(outdata)
outdata.rename(columns={0: "_".join([str(float_f), commandstr, dataobj[0].columns[0]])}, inplace=True)
else:
raise Exception("No such situation.float_f float_b both not None")
return outdata
def divide(self, dataobj, commandstr, float_f=None, float_b=None):
if float_b is None and float_f is None:
outdata = dataobj[1].iloc[:, 0] / dataobj[1].iloc[:, 0]
outdata = pd.DataFrame(outdata)
outdata.rename(columns={0: "_".join([dataobj[0].columns[0], commandstr, dataobj[1].columns[0]])},
inplace=True)
elif float_b is not None:
outdata = dataobj[0].iloc[:, 0] / float_b
outdata = | pd.DataFrame(outdata) | pandas.DataFrame |
import numpy as np
import pandas as pd
import scripts.data as d
def compute_similarity_matrix(df, method="cosine"):
similarity_matrix = np.zeros([df.shape[1], df.shape[1]])
for i, offer1 in enumerate(df.columns):
for j, offer2 in enumerate(df.columns):
mask = df[offer1].notna() & df[offer2].notna()
if method == "cosine":
numerator = sum(df.loc[mask, offer1] * df.loc[mask, offer2])
denominator = np.sqrt(sum(df.loc[mask, offer1] ** 2)) * np.sqrt(
sum(df.loc[mask, offer2] ** 2)
)
similarity_matrix[i, j] = (
numerator / denominator if denominator != 0 else np.nan
)
similarity_matrix_df = | pd.DataFrame(similarity_matrix, columns=df.columns) | pandas.DataFrame |
# Copyright (C) 2020 <NAME>, <NAME>
# Code -- Study 1 -- What Personal Information Can a Consumer Facial Image Reveal?
# https://github.com/computationalmarketing/facialanalysis/
import matplotlib
matplotlib.use('Agg')
import matplotlib.pyplot as plt
import matplotlib.lines as mlines
import matplotlib.patches as mpatches
import matplotlib.ticker as mtick
import matplotlib.image as mpimg
from matplotlib import gridspec
from matplotlib import rcParams
rcParams.update({'font.size': 12})
rcParams['font.family'] = 'serif'
rcParams['font.sans-serif'] = ['Times']
import seaborn as sns
from textwrap import wrap
import torchvision.models as models
import torch
from torch.utils.data import DataLoader
from torch.autograd import Variable
import torch.nn.functional as F
import torch.optim as optim
import os
from os import walk
from tqdm import tqdm
from sklearn.utils import class_weight
from sklearn import metrics, svm
from sklearn.linear_model import LogisticRegression
from sklearn.decomposition import TruncatedSVD, PCA
from sklearn.model_selection import KFold, GroupKFold, ShuffleSplit, GroupShuffleSplit
from sklearn.metrics import confusion_matrix
import scipy.stats
from scipy.special import softmax
import scipy.cluster.hierarchy as sch
from scipy.cluster.hierarchy import dendrogram, linkage
# ATTENTION: we disable notifications when AUC cannot be computed
from sklearn.exceptions import UndefinedMetricWarning
import warnings
warnings.filterwarnings(action='ignore', category=UndefinedMetricWarning)
warnings.filterwarnings(action='ignore', category=RuntimeWarning)
import json
import numpy as np
from torchvision import transforms
from torch.utils.data.dataset import Dataset
from PIL import Image
import pandas as pd
import pickle
'''
q_to_name_dict contains match between variable labels from the survey results file and a label of the variable
'''
q_to_name_dict = {#'Q11':'gender', #'Q12':'age', 'Q13':'race', 'Q14':'school', # these variables expanded below
'Q15':'marital_status',
#'Q16':'employment',
'Q17':'social_class', #'Q18':'religion', # NO VARIANCE, SO EXCLUDED 'Q19':'US_born',
'Q21':'body_fitness', #'Q22':'household_income', 'Q23':'zip_code',
'Q24':'orientation',
#'Q25':'political_party',
'Q26':'global_warming', 'Q27':'recycling', 'Q28':'religious',
'Q29':'offensive_ads_banned', 'Q30':'offensive_ads_brand',#'Q31':'facebook_evil',
'Q32':'NRA_support',
'Q34':'bin_family_career', 'Q35':'bin_friendship_laws', 'Q36':'bin_freedom_truth',
'Q37':'bin_pleasure_duty', 'Q38':'bin_wealth_fame', 'Q39':'bin_politeness_honesty',
'Q40':'bin_beautiful_smart', 'Q41':'bin_belonging_independence',
'Q42_1': 'lfstl_set_routine',
'Q42_4': 'lfstl_try_new_things',
'Q42_5': 'lfstl_highly_social_many_friends',
'Q42_6': 'lfstl_buy_new_before_others',
'Q42_7': 'lfstl_outgoing_soc_confident',
'Q42_8': 'lfstl_compulsive_purchases',
'Q42_10': 'lfstl_political_protest_participation',
'Q42_11': 'lfstl_donate_to_beggar',
'Q42_12': 'lfstl_like_hunting',
'Q42_13': 'lfstl_like_fishing',
'Q42_14': 'lfstl_like_hiking',
'Q42_15': 'lfstl_like_out_of_doors',
'Q42_16': 'lfstl_cabin_by_quiet_lake_spend_summer',
'Q42_17': 'lfstl_good_fixing_mechanical_things',
'Q42_18': 'lfstl_repair_my_own_car',
'Q42_19': 'lfstl_like_war_stories',
'Q42_20': 'lfstl_do_better_than_avg_fist_fight',
'Q42_21': 'lfstl_would_want_to_be_prof_football_player',
'Q42_22': 'lfstl_would_like_to_be_policeman',
'Q42_23': 'lfstl_too_much_violence_on_tv',
'Q42_24': 'lfstl_should_be_gun_in_every_home',
'Q42_25': 'lfstl_like_danger',
'Q42_26': 'lfstl_would_like_my_own_airplane',
'Q42_27': 'lfstl_like_to_play_poker',
'Q42_28': 'lfstl_smoke_too_much',
'Q42_29': 'lfstl_love_to_eat',
'Q42_30': 'lfstl_spend_money_on_myself_that_shuld_spend_on_family',
'Q42_31': 'lfstl_if_given_chance_men_would_cheat_on_spouses',
'Q42_33': 'lfstl_satisfied_with_life',
'Q42_34': 'lfstl_like_to_be_in_charge',
'Q42_35': 'lfstl_enjoy_shopping',
'Q42_36': 'lfstl_plan_spending_carefully',
'Q42_37': 'lfstl_obey_rules',
'Q43_1': 'lfstl_satisfied_with_weight',
'Q43_4': 'lfstl_regular_exercise_routine',
'Q43_5': 'lfstl_grew_up_eating_healthy_foods',
'Q43_7': 'lfstl_hard_to_be_disciplined_about_what_i_eat',
'Q43_9': 'lfstl_dont_have_to_worry_how_i_eat',
'Q43_11': 'lfstl_never_think_healthy_unhealthy_food',
'Q43_13': 'lfstl_stick_to_healthy_diet_for_family',
'Q43_14': 'lfstl_choose_snack_foods_that_give_vitamins_minerals',
'Q44_1': 'lfstl_often_prepare_sauces_dips_from_scratch',
'Q44_5': 'lfstl_dont_have_much_interest_cooking',
'Q44_6': 'lfstl_seek_out_healthy_foods',
'Q44_8': 'lfstl_read_ingreadients_list_on_the_label',
'Q44_9': 'lfstl_looking_for_new_products_when_at_grocery_store',
'Q44_11': 'lfstl_lower_priced_products_same_as_higher_priced',
'Q44_13': 'lfstl_look_for_authentic_ingredients_flavors',
'Q44_14': 'lfstl_like_ethnic_foods',
'Q44_15': 'lfstl_daring_adventurous_trying_new_foods',
'Q45_42': 'brkfst_none',
'Q45_43': 'brkfst_bar',
'Q45_44': 'brkfst_fruit',
'Q45_45': 'brkfst_nuts',
'Q45_46': 'brkfst_regular_yogurt',
'Q45_47': 'brkfst_greek_yogurt',
'Q45_48': 'brkfst_muffin_croissant',
'Q45_49': 'brkfst_cold_cereal',
'Q45_50': 'brkfst_hot_cereal_oatmeal',
'Q45_51': 'brkfst_frozen_waffle',
'Q45_52': 'brkfst_cheese_cottage_cheese',
'Q45_53': 'brkfst_sandwhich',
'Q45_54': 'brkfst_salad',
'Q45_55': 'brkfst_eggs',
'Q45_56': 'brkfst_meat',
'Q45_57': 'brkfst_chicken',
'Q45_58': 'brkfst_fish',
'Q45_59': 'brkfst_potatoes',
'Q45_60': 'brkfst_vegetables',
'Q45_61': 'brkfst_soup',
'Q45_62': 'brkfst_pasta',
'Q45_63': 'brkfst_hummus',
'Q45_64': 'brkfst_bread_toast',
'Q45_65': 'brkfst_bagel_roll',
'Q45_66': 'brkfst_chocolate_candy',
'Q45_67': 'brkfst_cake_cookies',
'Q45_68': 'brkfst_chips',
'Q45_69': 'brkfst_crackers',
'Q45_70': 'brkfst_pretzels',
'Q45_71': 'brkfst_smoothie',
'Q45_72': 'brkfst_pastry_buns_fruit_pies',
'Q45_73': 'brkfst_brownies_snack_cakes',
'Q45_74': 'brkfst_popcorn',
'Q45_75': 'brkfst_ice_cream_sorbet',
'Q45_76': 'brkfst_pudding_gelatin',
'Q45_77': 'brkfst_refrig_dip_salsa_guacamole_dairy',
'Q46_1': 'rsn_brkfst_gives_energy',
'Q46_4': 'rsn_brkfst_tide_over_next_meal',
'Q46_5': 'rsn_brkfst_great_taste',
'Q46_6': 'rsn_brkfst_satisfies_craving',
'Q46_7': 'rsn_brkfst_comforting_soothing',
'Q46_8': 'rsn_brkfst_healthy_good_guilt_free',
'Q46_9': 'rsn_brkfst_take_care_of_hunger_filling',
'Q46_10': 'rsn_brkfst_not_too_filling',
'Q46_11': 'rsn_brkfst_fits_with_who_i_am',
'Q46_12': 'rsn_brkfst_helps_relax_reduce_stress',
'Q46_13': 'rsn_brkfst_helps_control_weight',
'Q46_14': 'rsn_brkfst_helps_maintain_mental_focus',
'Q46_15': 'rsn_brkfst_keeps_from_overeating_next_meal',
'Q46_16': 'rsn_brkfst_great_texture',
'Q46_17': 'rsn_brkfst_sweet_taste',
'Q46_18': 'rsn_brkfst_tangy_savory_taste',
'Q46_19': 'rsn_brkfst_chunky_multidim_texture',
'Q46_20': 'rsn_brkfst_smooth_creamy_texture',
'Q46_21': 'rsn_brkfst_gives_protein',
'Q46_22': 'rsn_brkfst_keeps_me_going',
'Q46_23': 'rsn_brkfst_good_food_to_eat_with_others',
'Q46_24': 'rsn_brkfst_keeps_me_on_track',
'Q46_25': 'rsn_brkfst_like_ingredients',
'Q46_26': 'rsn_brkfst_refreshing_taste',
'Q47':'pay_organic', 'Q48':'alcohol', 'Q49':'credit_score',
'Q50_1':'em_happiness', 'Q50_2':'em_stress', 'Q50_3':'em_loneliness',
'Q50_4':'em_jealousy', 'Q50_5':'em_fear', 'Q50_6':'em_hopefulness',
'Q50_7':'em_regret', 'Q50_8':'em_optimism', 'Q50_9':'em_contentness',
'Q50_10':'em_gratitude', 'Q50_11':'em_guilt', 'Q50_12':'em_anger',
'Q50_13':'em_joy', 'Q50_14':'em_contempt', 'Q50_15':'em_disgust',
'Q50_16':'em_sadness', 'Q50_17':'em_surprise', 'Q50_18':'em_vulnerability',
'Q50_19':'em_curiosity', 'Q50_20':'em_warmth',
'Q51':'entertain_freq', 'Q52_1':'post_lik_pos', 'Q52_2':'post_lik_neg',
'Q53':'movie_activ_rec', 'Q54':'rec_lik_ask', 'Q55':'rec_lik_follow',
'Q56_1': 'bp_is_talkative',
'Q56_4': 'bp_tends_to_find_faults_with_others',
'Q56_5': 'bp_does_thorough_job',
'Q56_6': 'bp_is_depressed_blue',
'Q56_7': 'bp_is_original_comes_up_new_ideas',
'Q56_8': 'bp_is_helpful_unselfish',
'Q56_9': 'bp_is_relaxed_handles_stress_well',
'Q56_10': 'bp_is_curious_many_different_things',
'Q56_11': 'bp_is_full_of_energy',
'Q56_12': 'bp_starts_quarrels_with_others',
'Q56_13': 'bp_can_be_tense',
'Q56_14': 'bp_is_ingenious_deep_thinker',
'Q56_15': 'bp_has_forgiving_nature',
'Q56_16': 'bp_tends_to_be_lazy',
'Q56_17': 'bp_is_emotionally_stable_not_easily_upset',
'Q56_18': 'bp_is_inventive',
'Q56_19': 'bp_has_assertive_personality',
'Q56_20': 'bp_can_be_cold_aloof',
'Q56_21': 'bp_perserveres_until_task_finished',
'Q56_22': 'bp_can_be_moody',
'Q56_23': 'bp_values_artistic_aesthetic_experience',
'Q56_24': 'bp_is_sometimes_shy_inhibited',
'Q56_25': 'bp_is_considerate_kind_almost_everything',
'Q56_26': 'bp_does_things_efficiently',
'Q56_27': 'bp_remains_calm_in_tense_situations',
'Q56_28': 'bp_prefers_routine_work',
'Q56_29': 'bp_is_outgoing_sociable',
'Q56_30': 'bp_is_sometimes_rude_to_others',
'Q56_31': 'bp_makes_plans_follows_through',
'Q56_32': 'bp_gets_nervous_easily',
'Q56_33': 'bp_likes_to_reflect_play_with_ideas',
'Q56_39': 'bp_likes_to_cooperate_with_others',
'Q56_40': 'bp_is_easily_distracted',
'Q56_41': 'bp_is_sophisticated_arts_music_literature',
'Q56_42': 'bp_generates_enthusiasm',
'Q56_43': 'bp_is_reliable_worker',
'Q56_44': 'bp_is_reserved',
'Q56_45': 'bp_can_be_somewhat_careless',
'Q56_46': 'bp_tends_to_be_disorganized',
'Q56_47': 'bp_worries_a_lot',
'Q56_48': 'bp_has_active_imagination',
'Q56_49': 'bp_tends_to_be_quiet',
'Q56_50': 'bp_is_generally_trusting',
'Q56_52': 'bp_has_few_artistic_interests',
'Q57_1':'use_facebook', 'Q57_2':'use_twitter', 'Q57_3':'use_netflix',
'Q57_4':'use_spotify', 'Q57_5':'use_apple_music', 'Q57_6':'use_tinder',
'Q57_7':'use_pandora', 'Q57_9':'use_amazon',
'Q57_11':'use_saks', 'Q57_13':'use_dropbox',
'Q57_14':'use_gmail', 'Q57_15':'use_hotmail',
'Q57_16':'use_yahoo', 'Q57_18':'use_github',
'Q57_20':'use_shazam', 'Q57_21':'use_snapchat',
'Q57_22':'use_whatsapp', 'Q57_23':'use_instagram',
'Q57_24':'use_telegram', 'Q57_27':'use_hulu',
'Q57_30':'use_bloomingdales', 'Q57_31':'use_NYT',
'Q57_32':'use_WSJ',
'Q59' : 'netflix_frequent_viewer',
'Q60' : 'netflix_binger',
'Q61' : 'netflix_active_recommender',
'Q62' : 'netflix_intend_to_get',
'Q63':'superbowl', 'Q64_1':'TV_news_trust', 'Q64_2':'Internet_news_trust',
'Q65':'track_news_daily', 'Q66':'read_reviews', #'Q67':'sports_programming',
'Q68':'social_media_time', 'Q69':'social_media_posting', #'Q70':'video_watching',
'Q73':'bin_iphone_galaxy', 'Q74':'bin_clothing_tech', 'Q75':'bin_brand_recogn_not',
'Q76':'bin_chocolate_strawberry', 'Q77':'bin_coke_original_diet',
'Q78':'bin_coke_pepsi', 'Q79':'bin_club_book', 'Q80':'bin_beach_mountain',
'Q81':'bin_story_tell_listen', 'Q82':'bin_capitalism_socialism',
'Q83':'bin_children_not', 'Q84':'bin_thinking_acting', 'Q85':'bin_planning_spontaneity',
'Q86':'bin_trump_hillary', 'Q87':'bin_madonna_lady_gaga', 'Q88':'bin_beatles_michael_jackson',
'Q89':'ec_past_fin_better', 'Q90':'ec_fut_fin_better', 'Q91':'ec_good_times',
'Q92':'ec_depression', 'Q93':'ec_buy',
'Q94_1' : 'price_bicycle',
'Q94_4' : 'price_smartphone',
'Q94_5' : 'price_laptop',
'Q94_6' : 'price_jeans',
'Q94_7' : 'price_sneakers',
'Q94_8' : 'price_microwave',
'Q94_9' : 'price_washing_machine',
'Q94_10' : 'price_office_chair',
'Q95_1' : 'spend_savings_emergencies',
'Q95_3' : 'spend_necessities_bills',
'Q95_4' : 'spend_entertainment_gift_loved_one',
'Q97':'restaurant_ethics', 'Q99':'criminal_ethics', 'source':'data_source',
'Q11_0':'gender_0', 'Q11_1':'gender_1', 'Q11_2':'gender_2',
'Q12_0': 'age_0', 'Q12_1': 'age_1', 'Q12_2': 'age_2',
'Q13_0': 'race_0','Q13_1': 'race_1','Q13_2': 'race_2','Q13_3': 'race_3','Q13_4': 'race_4',
'Q14_0': 'school_0','Q14_1': 'school_1','Q14_2': 'school_2',
'Q16_0': 'employment_0','Q16_1': 'employment_1','Q16_2': 'employment_2',
'Q18_0': 'religion_0','Q18_1': 'religion_1','Q18_2': 'religion_2','Q18_3': 'religion_3',
'Q22_0': 'household_income_0','Q22_1': 'household_income_1', 'Q22_2': 'household_income_2',
'Q23_0': 'zip_code_0','Q23_1': 'zip_code_1', 'Q23_2':'zip_code_2','Q23_3': 'zip_code_3','Q23_4': 'zip_code_4',
'Q25_0': 'political_party_0','Q25_1': 'political_party_1','Q25_2': 'political_party_2',
'Q31_0': 'facebook_evil_0','Q31_1': 'facebook_evil_1', 'Q31_2': 'facebook_evil_2',
'Q67_0': 'sports_programming_0','Q67_1': 'sports_programming_1', 'Q67_2': 'sports_programming_2',
'Q70_0': 'video_watching_0', 'Q70_1': 'video_watching_1', 'Q70_2': 'video_watching_2',
'personality_extraversion':'personality_extraversion',
'personality_agreeableness':'personality_agreeableness',
'personality_conscientiousness':'personality_conscientiousness',
'personality_neuroticism':'personality_neuroticism',
'personality_openness':'personality_openness',
'Q71#1_1' : 'active_consumer_google_news',
'Q71#1_2' : 'active_consumer_yahoo_news',
'Q71#1_3' : 'active_consumer_new_york_times',
'Q71#1_4' : 'active_consumer_wsj',
'Q71#1_5' : 'active_consumer_boston_globe',
'Q71#1_6' : 'active_consumer_cnn',
'Q71#1_7' : 'active_consumer_huffpost',
'Q71#1_8' : 'active_consumer_foxnews',
'Q71#1_10' : 'active_consumer_vice',
'Q71#1_11' : 'active_consumer_chicago_tribune',
'Q71#1_12' : 'active_consumer_breitbart',
'Q71#1_14' : 'active_consumer_washington_post',
'Q71#1_16' : 'active_consumer_bbc_news',
'Q71#1_17' : 'active_consumer_facebook',
'Q71#1_19' : 'active_consumer_twitter',
'Q71#2_1' : 'bias_google_news',
'Q71#2_2' : 'bias_yahoo_news',
'Q71#2_3' : 'bias_new_york_times',
'Q71#2_4' : 'bias_wsj',
'Q71#2_5' : 'bias_boston_globe',
'Q71#2_6' : 'bias_cnn',
'Q71#2_7' : 'bias_huffpost',
'Q71#2_8' : 'bias_foxnews',
'Q71#2_10' : 'bias_vice',
'Q71#2_11' : 'bias_chicago_tribune',
'Q71#2_12' : 'bias_breitbart',
'Q71#2_14' : 'bias_washington_post',
'Q71#2_16' : 'bias_bbc_news',
'Q71#2_17' : 'bias_facebook',
'Q71#2_19' : 'bias_twitter',
'Q6_1_TEXT_0' : 'browser_safari_iphone',
'Q6_1_TEXT_1' : 'browser_chrome',
'Q6_1_TEXT_2' : 'browser_other',
}
image_metrics = {
'rc' : 'red_color',
'gc' : 'green_color',
'bc' : 'blue_color',
'fwhr' : 'face_with_2_height_ratio',
'fwidth' : 'face_width',
'fheight': 'face_height',
'sideeyeratio' : 'face_to_eye_left_right_ratio',
'noseheight' : 'nose_height',
'eyehdiff' : 'eye_height_difference',
'intereyedist': 'inter_eye_difference',
'lipwidth' : 'lip_width',
}
'''
q_to_full_name_dict is similar to q_to_name_dict and contains
match between variable code from the survey results file and a full name of the variable -- used in plotting
'''
q_to_full_name_dict = {'Q15':'Marital status',
'Q17':'Social class',
'Q21':'Body fitness',
'Q24':'Sexual orientation',
'Q26':'Believes global warming is a threat',
'Q27':'Makes effort to recycle',
'Q28':'Considers himself religious',
'Q29':'Believes offensive ads should be banned',
'Q30':'Will stop buying a brand accused of offensive advertising',
'Q32':'Supports National Rifle Association (NRA)',
'Q34':'More important: Family vs. career',
'Q35':'More important: Friendship vs. laws',
'Q36':'More important: Freedom vs. truth',
'Q37':'More important: Pleasure vs. duty',
'Q38':'More important: Wealth vs. fame',
'Q39':'More important: Politeness vs. honesty',
'Q40':'More important: Being beautiful vs. being smart',
'Q41':'More important: Belonging vs. independence',
# Lifestyle
'Q42_1': 'Lifestyle: Prefers a set routine',
'Q42_4': 'Lifestyle: Likes to try new things',
'Q42_5': 'Lifestyle: Is highly social with many friends',
'Q42_6': 'Lifestyle: Buys new things before others',
'Q42_7': 'Lifestyle: Is outgoing and socially confident',
'Q42_8': 'Lifestyle: Tends to make compulsive purchases',
'Q42_10': 'Lifestyle: Is likely to participate in a political protest',
'Q42_11': 'Lifestyle: Is likely to donate to a beggar',
'Q42_12': 'Lifestyle: Likes hunting',
'Q42_13': 'Lifestyle: Likes fishing',
'Q42_14': 'Lifestyle: Likes hiking',
'Q42_15': 'Lifestyle: Likes out of doors',
'Q42_16': 'Lifestyle: Cabin by a quiet lake is a good way to spend summer',
'Q42_17': 'Lifestyle: Is good at fixing mechanical things',
'Q42_18': 'Lifestyle: Repairs his own car',
'Q42_19': 'Lifestyle: Likes war stories',
'Q42_20': 'Lifestyle: Would do better than average in a fist fight',
'Q42_21': 'Lifestyle: Would want to be a professional football player',
'Q42_22': 'Lifestyle: Would like to be policeman',
'Q42_23': 'Lifestyle: Thinks there is too much violence on TV',
'Q42_24': 'Lifestyle: Believes there should be a gun in every home',
'Q42_25': 'Lifestyle: Likes danger',
'Q42_26': 'Lifestyle: Would like his own airplane',
'Q42_27': 'Lifestyle: Likes to play poker',
'Q42_28': 'Lifestyle: Smokes too much',
'Q42_29': 'Lifestyle: Loves to eat',
'Q42_30': 'Lifestyle: Spends money on himself that should be spent on family',
'Q42_31': 'Lifestyle: Believes that if given a chance men would cheat on spouses',
'Q42_33': 'Lifestyle: Is satisfied with life',
'Q42_34': 'Lifestyle: Likes to be in charge',
'Q42_35': 'Lifestyle: Enjoys shopping',
'Q42_36': 'Lifestyle: Plans spending carefully',
'Q42_37': 'Lifestyle: Obeys rules',
'Q43_1': 'Food habits, attitudes: Is satisfied with his weight',
'Q43_4': 'Food habits, attitudes: Follows regular exercise routine',
'Q43_5': 'Food habits, attitudes: Grew up eating healthy foods',
'Q43_7': 'Food habits, attitudes: Finds it hard to be disciplined about what he eats',
'Q43_9': 'Food habits, attitudes: Does not have to worry about how he eats',
'Q43_11': 'Food habits, attitudes: Never thinks of healthy or unhealthy food',
'Q43_13': 'Food habits, attitudes: Sticks to healthy diet for his family',
'Q43_14': 'Food habits, attitudes:: Chooses snack foods that give vitamins and minerals',
'Q44_1': 'Food habits, attitudes: Often prepares sauces, dips from scratch',
'Q44_5': 'Food habits, attitudes: Does not have much interest in cooking',
'Q44_6': 'Food habits, attitudes: Seeks out healthy foods',
'Q44_8': 'Food habits, attitudes: Reads ingredient list on the label',
'Q44_9': 'Food habits, attitudes: Looks for new products when at grocery store',
'Q44_11': 'Food habits, attitudes: Believes lower priced products are the same as higher priced ones',
'Q44_13': 'Food habits, attitudes: Look for authentic ingredients and flavors',
'Q44_14': 'Food habits, attitudes: Likes ethnic foods',
'Q44_15': 'Food habits, attitudes: Is daring, adventurous in trying new foods',
'Q45_42': 'Breakfast food choice: No breakfast',
'Q45_43': 'Breakfast food choice: Bar',
'Q45_44': 'Breakfast food choice: Fruit',
'Q45_45': 'Breakfast food choice: Nuts',
'Q45_46': 'Breakfast food choice: Regular yogurt',
'Q45_47': 'Breakfast food choice: Greek yogurt',
'Q45_48': 'Breakfast food choice: Muffin or croissant',
'Q45_49': 'Breakfast food choice: Cold cereal',
'Q45_50': 'Breakfast food choice: Hot cereal or oatmeal',
'Q45_51': 'Breakfast food choice: Frozen_waffle',
'Q45_52': 'Breakfast food choice: Cheese, cottage cheese',
'Q45_53': 'Breakfast food choice: Sandwich',
'Q45_54': 'Breakfast food choice: Salad',
'Q45_55': 'Breakfast food choice: Eggs',
'Q45_56': 'Breakfast food choice: Meat',
'Q45_57': 'Breakfast food choice: Chicken',
'Q45_58': 'Breakfast food choice: Fish',
'Q45_59': 'Breakfast food choice: Potatoes',
'Q45_60': 'Breakfast food choice: Vegetables',
'Q45_61': 'Breakfast food choice: Soup',
'Q45_62': 'Breakfast food choice: Pasta',
'Q45_63': 'Breakfast food choice: Hummus',
'Q45_64': 'Breakfast food choice: Bread, toast',
'Q45_65': 'Breakfast food choice: Bagel, roll',
'Q45_66': 'Breakfast food choice: Chocolate candy',
'Q45_67': 'Breakfast food choice: Cake, cookies',
'Q45_68': 'Breakfast food choice: Chips',
'Q45_69': 'Breakfast food choice: Crackers',
'Q45_70': 'Breakfast food choice: Pretzels',
'Q45_71': 'Breakfast food choice: Smoothie',
'Q45_72': 'Breakfast food choice: Pastry, buns, fruit pies',
'Q45_73': 'Breakfast food choice: Brownies, snack, cakes',
'Q45_74': 'Breakfast food choice: Popcorn',
'Q45_75': 'Breakfast food choice: Ice cream, sorbet',
'Q45_76': 'Breakfast food choice: Pudding, gelatin',
'Q45_77': 'Breakfast food choice: refrigerated dip (salsa, guacamole, dairy)',
'Q46_1': 'Breakfast food choice motivations: Gives energy',
'Q46_4': 'Breakfast food choice motivations: Tides him over until next meal',
'Q46_5': 'Breakfast food choice motivations: Tastes great',
'Q46_6': 'Breakfast food choice motivations: Satisfies a craving',
'Q46_7': 'Breakfast food choice motivations: Is comforting, soothing',
'Q46_8': 'Breakfast food choice motivations: Healthy, good, guilt free',
'Q46_9': 'Breakfast food choice motivations: Takes care of hunger, is filling',
'Q46_10': 'Breakfast food choice motivations: Is not too filling',
'Q46_11': 'Breakfast food choice motivations: Fits with who he is',
'Q46_12': 'Breakfast food choice motivations: Helps relax, reduce stress',
'Q46_13': 'Breakfast food choice motivations: Helps control weight',
'Q46_14': 'Breakfast food choice motivations: Helps maintain mental focus',
'Q46_15': 'Breakfast food choice motivations: Keeps from overeating during next meal',
'Q46_16': 'Breakfast food choice motivations: Has great texture',
'Q46_17': 'Breakfast food choice motivations: Tastes sweet',
'Q46_18': 'Breakfast food choice motivations: Tastes tangy, savory',
'Q46_19': 'Breakfast food choice motivations: Has chunky, multidimensional texture',
'Q46_20': 'Breakfast food choice motivations: Has smooth, creamy texture',
'Q46_21': 'Breakfast food choice motivations: Gives protein',
'Q46_22': 'Breakfast food choice motivations: Keeps him going',
'Q46_23': 'Breakfast food choice motivations: Is good food to eat with others',
'Q46_24': 'Breakfast food choice motivations: Keeps him on track',
'Q46_25': 'Breakfast food choice motivations: Likes ingredients',
'Q46_26': 'Breakfast food choice motivations: Has refreshing taste',
'Q47':'Is ready to pay more for organic food products',
'Q48':'Is a frequent alcohol consumer',
'Q49':'Missed a credit card payment within last year',
'Q50_1':'Regularly felt emotions: Happiness',
'Q50_2':'Regularly felt emotions: Stress',
'Q50_3':'Regularly felt emotions: Loneliness',
'Q50_4':'Regularly felt emotions: Jealousy',
'Q50_5':'Regularly felt emotions: Fear',
'Q50_6':'Regularly felt emotions: Hopefulness',
'Q50_7':'Regularly felt emotions: Regret',
'Q50_8':'Regularly felt emotions: Optimism',
'Q50_9':'Regularly felt emotions: Contentness',
'Q50_10':'Regularly felt emotions: Gratitude',
'Q50_11':'Regularly felt emotions: Guilt',
'Q50_12':'Regularly felt emotions: Anger',
'Q50_13':'Regularly felt emotions: Joy',
'Q50_14':'Regularly felt emotions: Contempt',
'Q50_15':'Regularly felt emotions: Disgust',
'Q50_16':'Regularly felt emotions: Sadness',
'Q50_17':'Regularly felt emotions: Surprise',
'Q50_18':'Regularly felt emotions: Vulnerability',
'Q50_19':'Regularly felt emotions: Curiosity',
'Q50_20':'Regularly felt emotions: Warmth',
'Q51':'Frequency of entertaining others at home',
'Q52_1':'Likelihood of social media post about positive shopping experience',
'Q52_2':'Likelihood of social media post about negative shopping experience',
'Q53':'Actively recommends movies to watch to friends',
'Q54':'Likelihood of asking a friend for a movie recommendation',
'Q55':'Likelihood of following a movie recommendation from a friend',
'Q56_1': 'Big 5 variable: Is talkative',
'Q56_4': 'Big 5 variable: Tends to find faults with others (reverse)',
'Q56_5': 'Big 5 variable: Does thorough job',
'Q56_6': 'Big 5 variable: Is depressed, blue',
'Q56_7': 'Big 5 variable: Is original, comes up new ideas',
'Q56_8': 'Big 5 variable: Is helpful, unselfish',
'Q56_9': 'Big 5 variable: Is relaxed, handles stress well (reverse)',
'Q56_10': 'Big 5 variable: Is curious about many different things',
'Q56_11': 'Big 5 variable: Is full of energy',
'Q56_12': 'Big 5 variable: Starts quarrels with others (reverse)',
'Q56_13': 'Big 5 variable: Can be tense',
'Q56_14': 'Big 5 variable: Is ingenious, deep thinker',
'Q56_15': 'Big 5 variable: Has forgiving nature',
'Q56_16': 'Big 5 variable: Tends to be lazy (reverse)',
'Q56_17': 'Big 5 variable: Is emotionally stable, not easily upset (reverse)',
'Q56_18': 'Big 5 variable: Is inventive',
'Q56_19': 'Big 5 variable: Has assertive personality',
'Q56_20': 'Big 5 variable: Can be cold, aloof (reverse)',
'Q56_21': 'Big 5 variable: Perseveres until task is finished',
'Q56_22': 'Big 5 variable: Can be moody',
'Q56_23': 'Big 5 variable: Values artistic, aesthetic experience',
'Q56_24': 'Big 5 variable: Is sometimes shy, inhibited (reverse)',
'Q56_25': 'Big 5 variable: Is considerate, kind to almost everyone',
'Q56_26': 'Big 5 variable: Does things efficiently',
'Q56_27': 'Big 5 variable: Remains calm in tense situations (reverse)',
'Q56_28': 'Big 5 variable: Prefers routine work (reverse)',
'Q56_29': 'Big 5 variable: Is outgoing, sociable',
'Q56_30': 'Big 5 variable: Is sometimes rude to others (reverse)',
'Q56_31': 'Big 5 variable: Makes plans and follows through',
'Q56_32': 'Big 5 variable: Gets nervous easily',
'Q56_33': 'Big 5 variable: Likes to reflect, play with ideas',
'Q56_39': 'Big 5 variable: Likes to cooperate with others',
'Q56_40': 'Big 5 variable: Is easily distracted (reverse)',
'Q56_41': 'Big 5 variable: Is sophisticated in arts, music, literature',
'Q56_42': 'Big 5 variable: Generates enthusiasm',
'Q56_43': 'Big 5 variable: Is reliable worker',
'Q56_44': 'Big 5 variable: Is reserved (reverse)',
'Q56_45': 'Big 5 variable: Can be somewhat careless (reverse)',
'Q56_46': 'Big 5 variable: Tends to be disorganized (reverse)',
'Q56_47': 'Big 5 variable: Worries a lot',
'Q56_48': 'Big 5 variable: Has active imagination',
'Q56_49': 'Big 5 variable: Tends to be quiet (reverse)',
'Q56_50': 'Big 5 variable: Is generally trusting',
'Q56_52': 'Big 5 variable: Has few artistic interests (reverse)',
'Q57_1':'Uses Facebook', 'Q57_2':'Uses Twitter', 'Q57_3':'Uses Netflix',
'Q57_4':'Uses Spotify', 'Q57_5':'Uses Apple music', 'Q57_6':'Uses Tinder',
'Q57_7':'Uses Pandora', 'Q57_9':'Uses Amazon',
'Q57_11':'Uses Saks', 'Q57_13':'Uses Dropbox',
'Q57_14':'Uses Gmail', 'Q57_15':'Uses Hotmail',
'Q57_16':'Uses Yahoo', 'Q57_18':'Uses Github',
'Q57_20':'Uses Shazam', 'Q57_21':'Uses Snapchat',
'Q57_22':'Uses Whatsapp', 'Q57_23':'Uses Instagram',
'Q57_24':'Uses Telegram', 'Q57_27':'Uses Hulu',
'Q57_30':'Uses Bloomingdales', 'Q57_31':'Uses NYT',
'Q57_32':'Uses WSJ',
'Q59' : 'Watches Netflix 4 or more days per week',
'Q60' : 'Tends to watch more than 3 hours of Netflix at a time',
'Q61' : 'Likelihood of recommending Netflix to a friend',
'Q62' : 'Intent to get Netflix subscription within 6 months',
'Q63':'Perceived effect of Superbowl ads on choices',
'Q64_1':'Trusts TV news',
'Q64_2':'Trusts Internet news',
'Q65':'Tracks news daily',
'Q66':'Reads product review in detail before purchase', #'Q67':'sports_programming',
'Q68':'Spends 4 hours or more a day on social media',
'Q69':'Frequency of posting on social media', #'Q70':'video_watching',
'Q73':'Prefers: iPhone vs. Galaxy', 'Q74':'Prefers: Clothing vs. tech', 'Q75':'Prefers: Recognizable brand vs. not well-known brand',
'Q76':'Prefers: Chocolate ice cream vs. strawberry ice cream', 'Q77':'Prefers: Original coke vs. diet',
'Q78':'Prefers: Coke vs. Pepsi', 'Q79':'Prefers: Night in club vs. night with a book', 'Q80':'Prefers: Beach vs. mountain',
'Q81':'Prefers: Telling a story vs. listening to a story', 'Q82':'Prefers: Capitalism vs. socialism',
'Q83':'Prefers: Children vs. no children', 'Q84':'Prefers: Thinking vs. acting', 'Q85':'Prefers: Planning vs. spontaneity',
'Q86':'Prefers: Trump vs. Hillary', 'Q87':'Prefers: Madonna vs. <NAME>', 'Q88':'Prefers: Beatles vs. <NAME>',
'Q89':'Is better/ worse financially than a year before',
'Q90':'Expects to be better/ worse financially in a year',
'Q91':'Expects good/ bad times financially in the US within a year',
'Q92':'Expects economic depression in the next five years',
'Q93':'Considers it to be a good time to buy a major household item',
'Q94_1' : 'Price sensitivity: Bicycle',
'Q94_4' : 'Price sensitivity: Smartphone',
'Q94_5' : 'Price sensitivity: Laptop',
'Q94_6' : 'Price sensitivity: Jeans',
'Q94_7' : 'Price sensitivity: Sneakers',
'Q94_8' : 'Price sensitivity: Microwave',
'Q94_9' : 'Price sensitivity: Washing machine',
'Q94_10' : 'Price sensitivity: Office chair',
'Q95_1' : 'Windfall income allocation: Savings, emergencies',
'Q95_3' : 'Windfall income allocation: Necessities, bills',
'Q95_4' : 'Windfall income allocation: Gift to a loved one',
'Q97':'Ethics: What right does your friend have to expect you to go easy on her restaurant in your review?',
'Q99':'Ethics: What right does your friend have to expect you to lie in court to protect him?',
'source':'Data source: Qualtrics panel vs. MTurk',
'Q11_0': 'Gender: Male', 'Q11_1':'Gender: Female', 'Q11_2':'Gender: Other',
'Q12_0': 'Age: <=30', 'Q12_1': 'Age: (30; 50] ', 'Q12_2': 'Age: > 50',
'Q13_0': 'Race: Caucasian/ White', 'Q13_1': 'Race: Asian','Q13_2': 'Race: Hispanic/ Latino','Q13_3': 'Race: African American/ Black','Q13_4': 'Race: Other',
'Q14_0': 'Education achieved: High school or less','Q14_1': 'Education achieved: Undergraduate degree','Q14_2': 'Education achieved: Graduate degree',
'Q16_0': 'Employment: Employed/ student','Q16_1': 'Employment: Unemployed, but looking','Q16_2': 'Employment: Unemployed and not looking',
'Q18_0': 'Religious background: Christianity','Q18_1': 'Religious background: Judaism, Islam','Q18_2': 'Religious background: Other (Hinduism, Buddhism, etc.)','Q18_3': 'Religious background: No particular religion',
'Q22_0': 'Household income: <$50K','Q22_1': 'Household income: [$50K,$100K)', 'Q22_2': 'Household income: >=$100K',
'Q23_0': 'ZIP code first digit: 0, 1','Q23_1': 'ZIP code first digit: 2, 3', 'Q23_2':'ZIP code first digit: 4, 5','Q23_3': 'ZIP code first digit: 6, 7','Q23_4': 'ZIP code first digit: 8, 9',
'Q25_0': 'Political party alignment: Republican','Q25_1': 'Political party alignment: Democrat','Q25_2': 'Political party alignment: Independent',
'Q31_0': 'Facebook is good for humanity: Yes','Q31_1': 'Facebook is good for humanity: No', 'Q31_2': 'Facebook is good for humanity: Unsure',
'Q67_0': 'Sports programming hours watched per week: 0','Q67_1': 'Sports programming hours watched per week: (0,8]', 'Q67_2': 'Sports programming hours watched per week: >8',
'Q70_0': 'Prefers to watch videos: Online', 'Q70_1': 'Prefers to watch videos: TV', 'Q70_2': 'Prefers to watch videos: Does not watch videos',
'personality_extraversion':'Big 5 personality: Extraversion',
'personality_agreeableness':'Big 5 personality: Agreeableness',
'personality_conscientiousness':'Big 5 personality: Conscientiousness',
'personality_neuroticism':'Big 5 personality: Neuroticism',
'personality_openness':'Big 5 personality: Openness',
'Q71#1_1' : 'Active consumer: Google news',
'Q71#1_2' : 'Active consumer: Yahoo news',
'Q71#1_3' : 'Active consumer: New York Times',
'Q71#1_4' : 'Active consumer: WSJ',
'Q71#1_5' : 'Active consumer: Boston Globe',
'Q71#1_6' : 'Active consumer: CNN',
'Q71#1_7' : 'Active consumer: Huffpost',
'Q71#1_8' : 'Active consumer: FoxNews',
'Q71#1_10' : 'Active consumer: Vice',
'Q71#1_11' : 'Active consumer: Chicago Tribune',
'Q71#1_12' : 'Active consumer: Breitbart',
'Q71#1_14' : 'Active consumer: Washington Post',
'Q71#1_16' : 'Active consumer: BBC News',
'Q71#1_17' : 'Active consumer: Facebook',
'Q71#1_19' : 'Active consumer: Twitter',
'Q71#2_1' : 'Perception of bias: Google News',
'Q71#2_2' : 'Perception of bias: Yahoo News',
'Q71#2_3' : 'Perception of bias: New York Times',
'Q71#2_4' : 'Perception of bias: WSJ',
'Q71#2_5' : 'Perception of bias: Boston Globe',
'Q71#2_6' : 'Perception of bias: CNN',
'Q71#2_7' : 'Perception of bias: Huffpost',
'Q71#2_8' : 'Perception of bias: FoxNews',
'Q71#2_10' : 'Perception of bias: Vice',
'Q71#2_11' : 'Perception of bias: Chicago Tribune',
'Q71#2_12' : 'Perception of bias: Breitbart',
'Q71#2_14' : 'Perception of bias: Washington Post',
'Q71#2_16' : 'Perception of bias: BBC News',
'Q71#2_17' : 'Perception of bias: Facebook',
'Q71#2_19' : 'Perception of bias: Twitter',
'Q6_1_TEXT_0' : 'Browser: Safari iPhone',
'Q6_1_TEXT_1' : 'Browser: Chrome',
'Q6_1_TEXT_2' : 'Browser: Other',
# 'rc' : 'Color channel: Red',
# 'gc' : 'Color channel: Green',
# 'bc' : 'Color channel: Blue',
# 'fwhr' : 'Face width-to-height ratio',
# 'fwidth' : 'Face width',
# 'fheight': 'Face height',
# 'sideeyeratio' : 'Face-edge to eye distance, left to right ratio',
# 'noseheight' : 'Nose height',
# 'eyehdiff' : 'Eye height difference',
# 'intereyedist': 'Inter-eye difference',
# 'lipwidth' : 'Lip width',
}
'''
var_groups contains a grouping of variables by categories we identified
some variables, such as data source (qualtrics vs. mturk) are not included in the grouping
'''
var_groups = {
'demographics_biological' : [
'Q11_1', # gender
'Q12_0', 'Q12_1', # age
'Q13_0','Q13_1', 'Q13_2','Q13_3', # race
'Q21', # body fitness
'Q24',# orientation
# 'rc', 'gc', 'bc',# avg. face color
# 'fwhr', 'fwidth', 'fheight',
# 'sideeyeratio', 'noseheight', 'eyehdiff', 'intereyedist', 'lipwidth'
],
'demographics_socio_economic' : [
'Q15', # :'marital_status'
'Q17', #:'social_class'
'Q14_0', 'Q14_1', # school level
'Q16_0', 'Q16_1', # employment status
'Q18_0','Q18_1','Q18_2', # religious
'Q22_0', 'Q22_1', # household income
'Q23_0','Q23_1', 'Q23_2','Q23_3', # zip code
'Q25_0', 'Q25_1'], # political party
'personality' : ['personality_extraversion',
'personality_agreeableness',
'personality_conscientiousness',
'personality_neuroticism',
'personality_openness'
],
'character_ethics' : [
'Q97', #'restaurant_ethics'
'Q99', #'criminal_ethics'
'Q49', #'credit_score',
'Q48', #'alcohol',
],
'lifestyle' : [
'Q42_1',#: 'lfstl_set_routine',
'Q42_4',#: 'lfstl_try_new_things',
'Q42_5',#: 'lfstl_highly_social_many_friends',
'Q42_6',#: 'lfstl_buy_new_before_others',
'Q42_7',#: 'lfstl_outgoing_soc_confident',
'Q42_8',#: 'lfstl_compulsive_purchases',
'Q42_10',#: 'lfstl_political_protest_participation',
'Q42_11',#: 'lfstl_donate_to_beggar',
'Q42_12',#: 'lfstl_like_hunting',
'Q42_13',#: 'lfstl_like_fishing',
'Q42_14',#: 'lfstl_like_hiking',
'Q42_15',#: 'lfstl_like_out_of_doors',
'Q42_16',#: 'lfstl_cabin_by_quiet_lake_spend_summer',
'Q42_17',#: 'lfstl_good_fixing_mechanical_things',
'Q42_18',#: 'lfstl_repair_my_own_car',
'Q42_19',#: 'lfstl_like_war_stories',
'Q42_20',#: 'lfstl_do_better_than_avg_fist_fight',
'Q42_21',#: 'lfstl_would_want_to_be_prof_football_player',
'Q42_22',#: 'lfstl_would_like_to_be_policeman',
'Q42_23',#: 'lfstl_too_much_violence_on_tv',
'Q42_24',#: 'lfstl_should_be_gun_in_every_home',
'Q42_25',#: 'lfstl_like_danger',
'Q42_26',#: 'lfstl_would_like_my_own_airplane',
'Q42_27',#: 'lfstl_like_to_play_poker',
'Q42_28',#: 'lfstl_smoke_too_much',
'Q42_29',#: 'lfstl_love_to_eat',
'Q42_30',#: 'lfstl_spend_money_on_myself_that_shuld_spend_on_family',
'Q42_31',#: 'lfstl_if_given_chance_men_would_cheat_on_spouses',
'Q42_33',#: 'lfstl_satisfied_with_life',
'Q42_34',#: 'lfstl_like_to_be_in_charge',
'Q42_35',#: 'lfstl_enjoy_shopping',
'Q42_36',#: 'lfstl_plan_spending_carefully',
'Q42_37',#: 'lfstl_obey_rules',
],
'food_habits_and_attitudes' : [
'Q43_1',#: 'lfstl_satisfied_with_weight',
'Q43_4',#: 'lfstl_regular_exercise_routine',
'Q43_5',#: 'lfstl_grew_up_eating_healthy_foods',
'Q43_7',#: 'lfstl_hard_to_be_disciplined_about_what_i_eat',
'Q43_9',#: 'lfstl_dont_have_to_worry_how_i_eat',
'Q43_11',#: 'lfstl_never_think_healthy_unhealthy_food',
'Q43_13',#: 'lfstl_stick_to_healthy_diet_for_family',
'Q43_14',#: 'lfstl_choose_snack_foods_that_give_vitamins_minerals',
'Q44_1',#: 'lfstl_often_prepare_sauces_dips_from_scratch',
'Q44_5',#: 'lfstl_dont_have_much_interest_cooking',
'Q44_6',#: 'lfstl_seek_out_healthy_foods',
'Q44_8',#: 'lfstl_read_ingreadients_list_on_the_label',
'Q44_9',#: 'lfstl_looking_for_new_products_when_at_grocery_store',
'Q44_11',#: 'lfstl_lower_priced_products_same_as_higher_priced',
'Q44_13',#: 'lfstl_look_for_authentic_ingredients_flavors',
'Q44_14',#: 'lfstl_like_ethnic_foods',
'Q44_15',#: 'lfstl_daring_adventurous_trying_new_foods',
'Q47',#:'pay_organic',
],
'emotional_state' : [
'Q50_1',#:'em_happiness',
'Q50_2',#:'em_stress',
'Q50_3',#:'em_loneliness',
'Q50_4',#:'em_jealousy',
'Q50_5',#:'em_fear',
'Q50_6',#:'em_hopefulness',
'Q50_7',#:'em_regret',
'Q50_8',#:'em_optimism',
'Q50_9',#:'em_contentness',
'Q50_10',#:'em_gratitude',
'Q50_11',#:'em_guilt',
'Q50_12',#:'em_anger',
'Q50_13',#:'em_joy',
'Q50_14',#:'em_contempt',
'Q50_15',#:'em_disgust',
'Q50_16',#:'em_sadness',
'Q50_17',#:'em_surprise',
'Q50_18',#:'em_vulnerability',
'Q50_19',#:'em_curiosity',
'Q50_20',#:'em_warmth'
],
'values_and_beliefs' : [
'Q26',#:'global_warming',
'Q27',#:'recycling',
'Q28',#:'religious',
'Q29',#:'offensive_ads_banned',
'Q30',#:'offensive_ads_brand',
'Q32',#:'NRA_support',
'Q31_0',#: 'facebook_evil_0',
'Q31_1',#: 'facebook_evil_1',
'Q31_2',#: 'facebook_evil_2',
'Q34',#:'bin_family_career',
'Q35',#:'bin_friendship_laws',
'Q36',#:'bin_freedom_truth',
'Q37',#:'bin_pleasure_duty',
'Q38',#:'bin_wealth_fame',
'Q39',#:'bin_politeness_honesty',
'Q40',#:'bin_beautiful_smart',
'Q41',#:'bin_belonging_independence',
],
'price_sensitivity' : [
'Q94_1',# : 'price_bicycle',
'Q94_4',# : 'price_smartphone',
'Q94_5',# : 'price_laptop',
'Q94_6',# : 'price_jeans',
'Q94_7',# : 'price_sneakers',
'Q94_8',# : 'price_microwave',
'Q94_9',# : 'price_washing_machine',
'Q94_10',# : 'price_office_chair',
],
'breakfast_food_choice' : [
'Q45_42',#: 'brkfst_none',
'Q45_43',#: 'brkfst_bar',
'Q45_44',#: 'brkfst_fruit',
'Q45_45',#: 'brkfst_nuts',
'Q45_46',#: 'brkfst_regular_yogurt',
'Q45_47',#: 'brkfst_greek_yogurt',
'Q45_48',#: 'brkfst_muffin_croissant',
'Q45_49',#: 'brkfst_cold_cereal',
'Q45_50',#: 'brkfst_hot_cereal_oatmeal',
'Q45_51',#: 'brkfst_frozen_waffle',
'Q45_52',#: 'brkfst_cheese_cottage_cheese',
'Q45_53',#: 'brkfst_sandwhich',
'Q45_54',#: 'brkfst_salad',
'Q45_55',#: 'brkfst_eggs',
'Q45_56',#: 'brkfst_meat',
'Q45_57',#: 'brkfst_chicken',
'Q45_58',#: 'brkfst_fish',
'Q45_59',#: 'brkfst_potatoes',
'Q45_60',#: 'brkfst_vegetables',
'Q45_61',#: 'brkfst_soup',
'Q45_62',#: 'brkfst_pasta',
'Q45_63',#: 'brkfst_hummus',
'Q45_64',#: 'brkfst_bread_toast',
'Q45_65',#: 'brkfst_bagel_roll',
'Q45_66',#: 'brkfst_chocolate_candy',
'Q45_67',#: 'brkfst_cake_cookies',
'Q45_68',#: 'brkfst_chips',
'Q45_69',#: 'brkfst_crackers',
'Q45_70',#: 'brkfst_pretzels',
'Q45_71',#: 'brkfst_smoothie',
'Q45_72',#: 'brkfst_pastry_buns_fruit_pies',
'Q45_73',#: 'brkfst_brownies_snack_cakes',
'Q45_74',#: 'brkfst_popcorn',
'Q45_75',#: 'brkfst_ice_cream_sorbet',
'Q45_76',#: 'brkfst_pudding_gelatin',
'Q45_77',#: 'brkfst_refrig_dip_salsa_guacamole_dairy',
],
'breakfast_motivations' : [
'Q46_1',#: 'rsn_brkfst_gives_energy',
'Q46_4',#: 'rsn_brkfst_tide_over_next_meal',
'Q46_5',#: 'rsn_brkfst_great_taste',
'Q46_6',#: 'rsn_brkfst_satisfies_craving',
'Q46_7',#: 'rsn_brkfst_comforting_soothing',
'Q46_8',#: 'rsn_brkfst_healthy_good_guilt_free',
'Q46_9',#: 'rsn_brkfst_take_care_of_hunger_filling',
'Q46_10',#: 'rsn_brkfst_not_too_filling',
'Q46_11',#: 'rsn_brkfst_fits_with_who_i_am',
'Q46_12',#: 'rsn_brkfst_helps_relax_reduce_stress',
'Q46_13',#: 'rsn_brkfst_helps_control_weight',
'Q46_14',#: 'rsn_brkfst_helps_maintain_mental_focus',
'Q46_15',#: 'rsn_brkfst_keeps_from_overeating_next_meal',
'Q46_16',#: 'rsn_brkfst_great_texture',
'Q46_17',#: 'rsn_brkfst_sweet_taste',
'Q46_18',#: 'rsn_brkfst_tangy_savory_taste',
'Q46_19',#: 'rsn_brkfst_chunky_multidim_texture',
'Q46_20',#: 'rsn_brkfst_smooth_creamy_texture',
'Q46_21',#: 'rsn_brkfst_gives_protein',
'Q46_22',#: 'rsn_brkfst_keeps_me_going',
'Q46_23',#: 'rsn_brkfst_good_food_to_eat_with_others',
'Q46_24',#: 'rsn_brkfst_keeps_me_on_track',
'Q46_25',#: 'rsn_brkfst_like_ingredients',
'Q46_26',#: 'rsn_brkfst_refreshing_taste',
],
'product_preferences' : [
'Q73',#:'bin_iphone_galaxy',
'Q74',#:'bin_clothing_tech',
'Q75',#:'bin_brand_recogn_not',
'Q76',#:'bin_chocolate_strawberry',
'Q77',#:'bin_coke_original_diet',
'Q78',#:'bin_coke_pepsi',
'Q79',#:'bin_club_book',
'Q80',#:'bin_beach_mountain',
'Q81',#:'bin_story_tell_listen',
'Q82',#:'bin_capitalism_socialism',
'Q83',#:'bin_children_not',
'Q84',#:'bin_thinking_acting',
'Q85',#:'bin_planning_spontaneity',
'Q86',#:'bin_trump_hillary',
'Q87',#:'bin_madonna_lady_gaga',
'Q88',#:'bin_beatles_michael_jackson',
],
'online_service_usage' : [
'Q57_1',#:'use_facebook',
'Q57_2',#:'use_twitter',
'Q57_3',#:'use_netflix',
'Q57_4',#:'use_spotify',
'Q57_5',#:'use_apple_music',
'Q57_6',#:'use_tinder',
'Q57_7',#:'use_pandora',
'Q57_9',#:'use_amazon',
'Q57_11',#:'use_saks',
'Q57_13',#:'use_dropbox',
'Q57_14',#:'use_gmail',
'Q57_15',#:'use_hotmail',
'Q57_16',#:'use_yahoo',
'Q57_18',#:'use_github',
'Q57_20',#:'use_shazam',
'Q57_21',#:'use_snapchat',
'Q57_22',#:'use_whatsapp',
'Q57_23',#:'use_instagram',
'Q57_24',#:'use_telegram',
'Q57_27',#:'use_hulu',
'Q57_30',#:'use_bloomingdales',
'Q57_31',#:'use_NYT',
'Q57_32',#:'use_WSJ',
],
'browser' : [
'Q6_1_TEXT_0', #: 'Browser: Safari iPhone',
'Q6_1_TEXT_1', #: 'Browser: Chrome',
'Q6_1_TEXT_2', #: 'Browser: Other',
],
'media_source' : [
'Q71#1_1',# : 'active_consumer_google_news',
'Q71#1_2',# : 'active_consumer_yahoo_news',
'Q71#1_3',# : 'active_consumer_new_york_times',
'Q71#1_4',# : 'active_consumer_wsj',
'Q71#1_5',# : 'active_consumer_boston_globe',
'Q71#1_6',# : 'active_consumer_cnn',
'Q71#1_7',# : 'active_consumer_huffpost',
'Q71#1_8',# : 'active_consumer_foxnews',
'Q71#1_10',# : 'active_consumer_vice',
'Q71#1_11',# : 'active_consumer_chicago_tribune',
'Q71#1_12',# : 'active_consumer_breitbart',
'Q71#1_14',# : 'active_consumer_washington_post',
'Q71#1_16',# : 'active_consumer_bbc_news',
'Q71#1_17',# : 'active_consumer_facebook',
'Q71#1_19',# : 'active_consumer_twitter',
],
'media_trust' : [
'Q71#2_1',# : 'bias_google_news',
'Q71#2_2',# : 'bias_yahoo_news',
'Q71#2_3',# : 'bias_new_york_times',
'Q71#2_4',# : 'bias_wsj',
'Q71#2_5',# : 'bias_boston_globe',
'Q71#2_6',# : 'bias_cnn',
'Q71#2_7',# : 'bias_huffpost',
'Q71#2_8',# : 'bias_foxnews',
'Q71#2_10',# : 'bias_vice',
'Q71#2_11',# : 'bias_chicago_tribune',
'Q71#2_12',# : 'bias_breitbart',
'Q71#2_14',# : 'bias_washington_post',
'Q71#2_16',# : 'bias_bbc_news',
'Q71#2_17',# : 'bias_facebook',
'Q71#2_19',# : 'bias_twitter',
'Q64_1',#:'TV_news_trust',
'Q64_2',#:'Internet_news_trust',
],
'economic_outlook' : [
'Q89',#:'ec_past_fin_better',
'Q90',#:'ec_fut_fin_better',
'Q91',#:'ec_good_times',
'Q92',#:'ec_depression',
],
'spend_intentions' :[
'Q93',#:'ec_buy',
'Q95_1',# : 'spend_savings_emergencies',
'Q95_3',# : 'spend_necessities_bills',
'Q95_4',# : 'spend_entertainment_gift_loved_one',
'Q62', #: 'netflix_intend_to_get',
],
'media_consumption_intensity' : [
'Q65',#:'track_news_daily',
'Q68',#:'social_media_time',
'Q69',#:'social_media_posting',
'Q67_0',#: 'sports_programming_0',
'Q67_1',#: 'sports_programming_1',
'Q67_2',#: 'sports_programming_2',
'Q70_0',#: 'video_watching_0',
'Q70_1',#: 'video_watching_1',
'Q70_2',#: 'video_watching_2',
'Q59', #: 'netflix_frequent_viewer',
'Q60', #: 'netflix_binger',
],
'follower_characteristics' : [
'Q63',#:'superbowl',
'Q66',#:'read_reviews',
'Q55',#:'rec_lik_follow'
'Q54',#:'rec_lik_ask',
],
'influencer_characteristics' : [
'Q52_1',#:'post_lik_pos',
'Q52_2',#:'post_lik_neg',
'Q53',#:'movie_activ_rec',
'Q51',#:'entertain_freq'
'Q61', # : 'netflix_active_recommender',
],
}
'''
meta_groups contains labels for the buckets of the variable groups
'''
meta_groups = [
('Demographics', '', 'Biological characteristics', 'demographics_biological'),
('Demographics', '', 'Socio-economic status', 'demographics_socio_economic'),
('General psychographics', '', 'Values and beliefs', 'values_and_beliefs'),
('General psychographics', '', 'Big 5 personalities', 'personality'),
('General psychographics', '', 'Regularly felt emotions', 'emotional_state'),
('General psychographics', '', 'Character and ethical choices', 'character_ethics'),
('General psychographics', '', 'Lifestyle', 'lifestyle'),
('Consumer psychographics', 'Products and services', 'Product preferences', 'product_preferences'),
('Consumer psychographics', 'Products and services', 'Online service use', 'online_service_usage'),
('Consumer psychographics', 'Products and services', 'Browser', 'browser'),
('Consumer psychographics', 'Media', 'Media choice', 'media_source'),
('Consumer psychographics', 'Media', 'Media consumption intensity', 'media_consumption_intensity'),
('Consumer psychographics', 'Media', 'Media trust', 'media_trust'),
('Consumer psychographics', 'Influence', 'Influencer characteristics', 'influencer_characteristics'),
('Consumer psychographics', 'Influence', 'Follower characteristics', 'follower_characteristics'),
('Consumer psychographics', 'Economics', 'Spend intentions', 'spend_intentions'),
('Consumer psychographics', 'Economics', 'Price sensitivity', 'price_sensitivity'),
('Consumer psychographics', 'Economics', 'Economic outlook', 'economic_outlook'),
('Consumer psychographics', 'Food', 'Food habits and attitudes', 'food_habits_and_attitudes'),
('Consumer psychographics', 'Food', 'Breakfast food choice', 'breakfast_food_choice'),
('Consumer psychographics', 'Food', 'Breakfast food choice motivations', 'breakfast_motivations'),
]
meta_groups = | pd.DataFrame(meta_groups) | pandas.DataFrame |
from pathlib import Path
import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
import seaborn as sns
from string import ascii_letters
import time
import os
import sys
training_data_path = './data/widsdatathon2020/training_v2.csv'
data_types_path = './data/widsdatathon2020/WiDS Datathon 2020 Dictionary.csv'
training_data = pd.read_csv(training_data_path)
print('-'*40)
print('Started generating data_types-file')
N_variables = 40
if(len(sys.argv) > 1):
N_variables = int(sys.argv[1])
else:
print('-'*40)
print('No argument given for number of variables, using default value')
print('-'*40)
print('Number of variables used: {}'.format(N_variables))
print('-'*40)
training_data = training_data.iloc[:, :N_variables]
training_data = training_data.reindex(sorted(training_data.columns), axis=1)
def createMaskCSV(data):
mask = data.isna()
data = data.fillna(0)
mask_dict = []
L = mask.shape[0]
rows = []
cols = []
for i in range(mask.shape[0]):
start_time = time.time()
row = mask.to_numpy()[i]
for j, val in enumerate(row):
if val:
rows.append(i)
cols.append(j)
# print("{0} out of {1} took: {2}".format(
# i, L, time.time() - start_time))
dic = {'row': rows, 'column': cols}
mask_df = pd.DataFrame(dic)
return mask_df, data
mask_df, training_data = createMaskCSV(training_data)
training_data_preprocessed_path = './data/training_data_preprocessed.csv'
################################################################################
# Replacing object datatype with category in training data
################################################################################
training_data_check = training_data
# Select object columns
cat_columns = training_data_check.select_dtypes(['object']).columns
# Cast to category
training_data_check[cat_columns] = training_data_check[cat_columns].astype(
'category')
# Replace with integer coding
training_data_check[cat_columns] = training_data_check[cat_columns].apply(
lambda x: x.cat.codes)
# Overwrite
training_data_preprocessed_path = './data/training_data_preprocessed_cleaned.csv'
# training_data_check.to_csv(
# training_data_preprocessed_path, index=False, header=True)
############################
# Creating data_types-file
############################
data_types_path = './data/widsdatathon2020/WiDS Datathon 2020 Dictionary.csv'
data_types_raw = pd.read_csv(data_types_path)
data_types_raw = data_types_raw.sort_values('Data Type')
variable_names = data_types_raw['Variable Name']
column_names = training_data.columns
# Numerical values
count = 'count'
positive = 'pos'
real = 'real'
# Nominal values
categorical = 'cat'
ordinal = 'ordin'
variable_names = np.intersect1d(variable_names, column_names)
minimums = training_data.select_dtypes(include='float').describe().iloc[3, :]
only_real = 'pre_icu_los_days'
print('Selected variables')
print(variable_names)
# minimums
size = list(range(len(variable_names)))
data = {'name': variable_names, 'type': size, 'dim': size, 'nclass': size}
data_types_with_name = | pd.DataFrame(data) | pandas.DataFrame |
import numpy as np
import os
import pandas as pd
import joblib
from glob import glob
filepath = '/home/jungkap/Documents/datathon/arrhythmia_feature.pkl'
features = joblib.load(filepath)
for i in range(1, 6):
filepath = '/home/jungkap/Documents/datathon/arrhythmia_feature{}.pkl'.format(i)
tmp = joblib.load(filepath)
for k, v in tmp.items():
features[k] = v
cols = ['CaseID', 'AF_rhythm', 'AFIB_rhythm', 'NORMAL_rhythm', 'APC_beat', 'LBB_beat', 'NORMAL_beat', 'PAB_beat', 'PVC_beat', 'RBB_beat']
df = pd.DataFrame(features).T
df = df.reset_index()
df.columns=cols
extra_df = | pd.read_csv('/home/jungkap/Documents/datathon/extra_feature.csv') | pandas.read_csv |
# -*- coding: utf-8 -*-
import click
import logging
from pathlib import Path
# from dotenv import find_dotenv, load_dotenv
import requests
from bs4 import BeautifulSoup
import numpy as np
import pandas as pd
import datetime
import yfinance as yf
from pandas_datareader import data as pdr
from flask import current_app
from stk_predictor.extensions import db
@click.command()
@click.argument('input_filepath', type=click.Path(exists=True))
@click.argument('output_filepath', type=click.Path())
def main(input_filepath, output_filepath):
""" Runs data processing scripts to turn raw data from (../raw) into
cleaned data ready to be analyzed (saved in ../processed).
"""
logger = logging.getLogger(__name__)
logger.info('making final data set from raw data')
def get_ticker_from_yahoo(ticker, start_date, end_date):
yf.pdr_override()
try:
new_trading_df = pdr.get_data_yahoo(
ticker, start_date, end_date, interval='1d')
new_trading_df = new_trading_df.drop(
['Open', 'High', 'Low', 'Adj Close'], axis=1)
new_trading_df = new_trading_df.dropna('index')
new_trading_df = new_trading_df.reset_index()
new_trading_df.columns = ['trading_date',
'intraday_close', 'intraday_volumes']
his_trading_df = pd.read_sql('aapl', db.engine, index_col='id')
df = pd.concat([his_trading_df, new_trading_df]
).drop_duplicates('trading_date')
df = df.sort_values(by='trading_date')
df = df.reset_index(drop=True)
if len(df) > 0:
df.to_sql("aapl", db.engine, if_exists='replace', index_label='id')
return df
else:
# t = pd.read_sql('aapl', db.engine, index_col='id')
return None
except Exception as ex:
raise RuntimeError(
"Catch Excetion when retrieve data from Yahoo...", ex)
return None
def get_news_from_finviz(ticker):
"""Request news headline from finviz, according to
company ticker's name
Parameters
-----------
ticker: str
the stock ticker name
Return
----------
df : pd.DataFrame
return the latest 2 days news healines.
"""
current_app.logger.info("Job >> Enter Finviz news scrape step...")
base_url = 'https://finviz.com/quote.ashx?t={}'.format(ticker)
headers = {
'User-Agent': 'Mozilla/5.0 (Macintosh; Intel Mac OS X 10_11_5) \
AppleWebKit/537.36 (KHTML, like Gecko) \
Chrome/50.0.2661.102 Safari/537.36'
}
parsed_news = []
try:
res = requests.get(base_url, headers=headers)
if res.status_code == 200:
texts = res.text
soup = BeautifulSoup(texts)
news_tables = soup.find(id="news-table")
for x in news_tables.findAll('tr'):
text = x.a.get_text()
date_scrape = x.td.text.split()
if len(date_scrape) == 1:
time = date_scrape[0]
else:
date = date_scrape[0]
time = date_scrape[1]
parsed_news.append([date, time, text])
# filter the recent day news
df = pd.DataFrame(parsed_news, columns=['date', 'time', 'texts'])
df['date'] = pd.to_datetime(df.date).dt.date
one_day_period = (datetime.datetime.today() -
datetime.timedelta(days=1)).date()
df_sub = df[df.date >= one_day_period]
return df_sub
else:
raise RuntimeError("HTTP response Error {}".format(
res.status_code)) from None
except Exception as ex:
current_app.logger.info("Exception in scrape Finviz.", ex)
raise RuntimeError("Exception in scrape Finviz.") from ex
def prepare_trading_dataset(df):
"""Prepare the trading data set.
Time series analysis incoporate previous data for future prediction,
We need to retrieve historical data to generate features.
Parameters
-----------
df: DataFrame
the stock ticker trading data, including trading-date, close-price, volumes
window: int, default = 400
feature engineer windows size. Using at most 400 trading days to construct
features.
Return
----------
array_lstm : np.array
return the array with 3 dimensions shape -> [samples, 1, features]
"""
if len(df) == 0:
raise RuntimeError(
"Encounter Error in >>make_dataset.prepare_trading_dataset<<... \
Did not catch any news.") from None
else:
df['log_ret_1d'] = np.log(df['intraday_close'] / df['intraday_close'].shift(1))
df['log_ret_1w'] = pd.Series(df['log_ret_1d']).rolling(window=5).sum()
df['log_ret_2w'] = pd.Series(df['log_ret_1d']).rolling(window=10).sum()
df['log_ret_3w'] = pd.Series(df['log_ret_1d']).rolling(window=15).sum()
df['log_ret_4w'] = pd.Series(df['log_ret_1d']).rolling(window=20).sum()
df['log_ret_8w'] = pd.Series(df['log_ret_1d']).rolling(window=40).sum()
df['log_ret_12w'] = pd.Series(df['log_ret_1d']).rolling(window=60).sum()
df['log_ret_16w'] = pd.Series(df['log_ret_1d']).rolling(window=80).sum()
df['log_ret_20w'] = pd.Series(df['log_ret_1d']).rolling(window=100).sum()
df['log_ret_24w'] = pd.Series(df['log_ret_1d']).rolling(window=120).sum()
df['log_ret_28w'] = pd.Series(df['log_ret_1d']).rolling(window=140).sum()
df['log_ret_32w'] = pd.Series(df['log_ret_1d']).rolling(window=160).sum()
df['log_ret_36w'] = pd.Series(df['log_ret_1d']).rolling(window=180).sum()
df['log_ret_40w'] = pd.Series(df['log_ret_1d']).rolling(window=200).sum()
df['log_ret_44w'] = pd.Series(df['log_ret_1d']).rolling(window=220).sum()
df['log_ret_48w'] = pd.Series(df['log_ret_1d']).rolling(window=240).sum()
df['log_ret_52w'] = pd.Series(df['log_ret_1d']).rolling(window=260).sum()
df['log_ret_56w'] = pd.Series(df['log_ret_1d']).rolling(window=280).sum()
df['log_ret_60w'] = pd.Series(df['log_ret_1d']).rolling(window=300).sum()
df['log_ret_64w'] = pd.Series(df['log_ret_1d']).rolling(window=320).sum()
df['log_ret_68w'] = pd.Series(df['log_ret_1d']).rolling(window=340).sum()
df['log_ret_72w'] = pd.Series(df['log_ret_1d']).rolling(window=360).sum()
df['log_ret_76w'] = pd.Series(df['log_ret_1d']).rolling(window=380).sum()
df['log_ret_80w'] = pd.Series(df['log_ret_1d']).rolling(window=400).sum()
df['vol_1w'] = pd.Series(df['log_ret_1d']).rolling(window=5).std()*np.sqrt(5)
df['vol_2w'] = pd.Series(df['log_ret_1d']).rolling(window=10).std()*np.sqrt(10)
df['vol_3w'] = pd.Series(df['log_ret_1d']).rolling(window=15).std()*np.sqrt(15)
df['vol_4w'] = pd.Series(df['log_ret_1d']).rolling(window=20).std()*np.sqrt(20)
df['vol_8w'] = pd.Series(df['log_ret_1d']).rolling(window=40).std()*np.sqrt(40)
df['vol_12w'] = pd.Series(df['log_ret_1d']).rolling(window=60).std()*np.sqrt(60)
df['vol_16w'] = pd.Series(df['log_ret_1d']).rolling(window=80).std()*np.sqrt(80)
df['vol_20w'] = pd.Series(df['log_ret_1d']).rolling(window=100).std()*np.sqrt(100)
df['vol_24w'] = pd.Series(df['log_ret_1d']).rolling(window=120).std()*np.sqrt(120)
df['vol_28w'] = pd.Series(df['log_ret_1d']).rolling(window=140).std()*np.sqrt(140)
df['vol_32w'] = pd.Series(df['log_ret_1d']).rolling(window=160).std()*np.sqrt(160)
df['vol_36w'] = pd.Series(df['log_ret_1d']).rolling(window=180).std()*np.sqrt(180)
df['vol_40w'] = pd.Series(df['log_ret_1d']).rolling(window=200).std()*np.sqrt(200)
df['vol_44w'] = pd.Series(df['log_ret_1d']).rolling(window=220).std()*np.sqrt(220)
df['vol_48w'] = pd.Series(df['log_ret_1d']).rolling(window=240).std()*np.sqrt(240)
df['vol_52w'] = pd.Series(df['log_ret_1d']).rolling(window=260).std()*np.sqrt(260)
df['vol_56w'] = pd.Series(df['log_ret_1d']).rolling(window=280).std()*np.sqrt(280)
df['vol_60w'] = pd.Series(df['log_ret_1d']).rolling(window=300).std()*np.sqrt(300)
df['vol_64w'] = pd.Series(df['log_ret_1d']).rolling(window=320).std()*np.sqrt(320)
df['vol_68w'] = pd.Series(df['log_ret_1d']).rolling(window=340).std()*np.sqrt(340)
df['vol_72w'] = pd.Series(df['log_ret_1d']).rolling(window=360).std()*np.sqrt(360)
df['vol_76w'] = pd.Series(df['log_ret_1d']).rolling(window=380).std()*np.sqrt(380)
df['vol_80w'] = pd.Series(df['log_ret_1d']).rolling(window=400).std()*np.sqrt(400)
df['volume_1w'] = | pd.Series(df['intraday_volumes']) | pandas.Series |
import dask.dataframe as dd
import pandas as pd
import pytest
import featuretools as ft
from featuretools.entityset import EntitySet, Relationship
def test_create_entity_from_dask_df(pd_es):
dask_es = EntitySet(id="dask_es")
log_dask = dd.from_pandas(pd_es["log"].df, npartitions=2)
dask_es = dask_es.entity_from_dataframe(
entity_id="log_dask",
dataframe=log_dask,
index="id",
time_index="datetime",
variable_types=pd_es["log"].variable_types
)
pd.testing.assert_frame_equal(pd_es["log"].df, dask_es["log_dask"].df.compute(), check_like=True)
def test_create_entity_with_non_numeric_index(pd_es, dask_es):
df = pd.DataFrame({"id": ["A_1", "A_2", "C", "D"],
"values": [1, 12, -34, 27]})
dask_df = dd.from_pandas(df, npartitions=2)
pd_es.entity_from_dataframe(
entity_id="new_entity",
dataframe=df,
index="id")
dask_es.entity_from_dataframe(
entity_id="new_entity",
dataframe=dask_df,
index="id",
variable_types={"id": ft.variable_types.Id, "values": ft.variable_types.Numeric})
pd.testing.assert_frame_equal(pd_es['new_entity'].df.reset_index(drop=True), dask_es['new_entity'].df.compute())
def test_create_entityset_with_mixed_dataframe_types(pd_es, dask_es):
df = pd.DataFrame({"id": [0, 1, 2, 3],
"values": [1, 12, -34, 27]})
dask_df = dd.from_pandas(df, npartitions=2)
# Test error is raised when trying to add Dask entity to entitset with existing pandas entities
err_msg = "All entity dataframes must be of the same type. " \
"Cannot add entity of type {} to an entityset with existing entities " \
"of type {}".format(type(dask_df), type(pd_es.entities[0].df))
with pytest.raises(ValueError, match=err_msg):
pd_es.entity_from_dataframe(
entity_id="new_entity",
dataframe=dask_df,
index="id")
# Test error is raised when trying to add pandas entity to entitset with existing dask entities
err_msg = "All entity dataframes must be of the same type. " \
"Cannot add entity of type {} to an entityset with existing entities " \
"of type {}".format(type(df), type(dask_es.entities[0].df))
with pytest.raises(ValueError, match=err_msg):
dask_es.entity_from_dataframe(
entity_id="new_entity",
dataframe=df,
index="id")
def test_add_last_time_indexes():
pd_es = EntitySet(id="pd_es")
dask_es = EntitySet(id="dask_es")
sessions = pd.DataFrame({"id": [0, 1, 2, 3],
"user": [1, 2, 1, 3],
"time": [pd.to_datetime('2019-01-10'),
| pd.to_datetime('2019-02-03') | pandas.to_datetime |
import anndata as ad
import logging
import numpy as np
import os
import time
import pandas as pd
import yaml
from pathlib import Path
from collections import namedtuple
from const import PATH, OUT_PATH
#logging.basicConfig(level=logging.INFO)
try:
import git
except:
pass
def get_tasks(phase):
assert phase in ['phase1v2','phase2']
tasks = [
"GEX2ADT",
"ADT2GEX",
"GEX2ATAC",
"ATAC2GEX"
]
task2name = {
"ADT2GEX":f"openproblems_bmmc_cite_{phase}_mod2",
"GEX2ADT":f"openproblems_bmmc_cite_{phase}_rna",
"ATAC2GEX":f"openproblems_bmmc_multiome_{phase}_mod2",
"GEX2ATAC":f"openproblems_bmmc_multiome_{phase}_rna"
}
return tasks, task2name
def get_y_dim(data_path):
if '_cite_' in data_path:
if 'mod2' in data_path:
return 13953,"ADT2GEX"
elif 'rna' in data_path:
return 134,"GEX2ADT"
else:
assert 0
elif '_multiome_' in data_path:
if 'mod2' in data_path:
return 13431,"ATAC2GEX"
elif 'rna' in data_path:
return 10000,"GEX2ATAC"
else:
assert 0
def get_par(path,phase):
par = {
"input_solution" : f"{path}/datasets_{phase}/predict_modality",
"input_prediction" : f"{path}/predictions/predict_modality",
}
return par
def get_train_test_paths(name,phase,path = "./output"):
par = get_par(path,phase)
train_mod1 = f"{par['input_solution']}/{name}/{name}.censor_dataset.output_train_mod1.h5ad"
train_mod2 = train_mod1.replace('mod1','mod2')
test_mod1 = train_mod1.replace('train','test')
test_mod2 = test_mod1.replace('mod1','mod2')
assert os.path.exists(train_mod1) and os.path.exists(train_mod2)
if phase == 'phase1v2':
assert os.path.exists(test_mod1) and os.path.exists(test_mod2)
return train_mod1,train_mod2,test_mod1,test_mod2
def get_data_paths(task,phase,data_type='train_test',path='./output'):
assert data_type in ['train_test','gt_pred']
tasks, task2name = get_tasks(phase)
name = task2name[task]
if data_type == 'train_test':
return get_train_test_paths(name,phase,path)
else:
return get_gt_pred_paths(name,path)
def get_gt_pred_paths(name,path = "./output"):
par = get_par(path,'phase1v2')
gt = f"{par['input_solution']}/{name}/{name}.censor_dataset.output_test_mod2.h5ad"
pred = f"{par['input_prediction']}/{name}/{name}.method.output.h5ad"
print(gt)
print(pred)
assert os.path.exists(gt) and os.path.exists(pred)
return gt, pred
def eval_one_file(name):
gt, pred = get_gt_pred_paths(name)
logging.info("Reading solution file")
ad_sol = ad.read_h5ad(gt)
logging.info("Reading prediction file")
ad_pred = ad.read_h5ad(pred)
logging.info("Check prediction format")
if ad_sol.uns["dataset_id"] != ad_pred.uns["dataset_id"]:
raise ValueError("Prediction and solution have differing dataset_ids")
if ad_sol.shape != ad_pred.shape:
raise ValueError("Dataset and prediction anndata objects should have the same shape / dimensions.")
logging.info("Computing MSE metrics")
tmp = ad_sol.X - ad_pred.X
rmse = np.sqrt(tmp.power(2).mean())
mae = np.abs(tmp).mean()
return rmse
def eval_all():
start = time.time()
tasks, task2name = get_tasks(phase='phase1v2')
s = 0
res = {}
for task in tasks:
name = task2name[task]
score = eval_one_file(name)
s += score
res[task] = score
res['overall'] = s/len(tasks)
print_res(res)
duration = time.time() - start
logging.critical(f" Total time: {duration:.1f} seconds")
def print_res(res):
for i,j in res.items():
logging.critical(f" {i} {j:.4f}")
def check_column_mean_var_all(path='./output',phase='phase2'):
tasks, task2name = get_tasks(phase=phase)
if phase == 'phase2':
names = ['train_mod1', 'train_mod2']
else:
names = ['train_mod1', 'train_mod2', 'test_mod1', 'test_mod2']
logging.info("[min, max, mean]")
res = []
ms = []
ns = []
for task in tasks:
data_names = get_data_paths(task,phase=phase,path=path)
logging.info(f"task:{task}")
for d,n in zip(data_names, names):
logging.info(n)
data = ad.read_h5ad(d)
msg,dd = check_column_mean_var(data)
logging.info('\n'+msg)
res.append(dd)
ms.append(task)
ns.append(n)
dg = | pd.DataFrame({'task':ms,'type':ns}) | pandas.DataFrame |
# Arithmetic tests for DataFrame/Series/Index/Array classes that should
# behave identically.
# Specifically for datetime64 and datetime64tz dtypes
from datetime import (
datetime,
time,
timedelta,
)
from itertools import (
product,
starmap,
)
import operator
import warnings
import numpy as np
import pytest
import pytz
from pandas._libs.tslibs.conversion import localize_pydatetime
from pandas._libs.tslibs.offsets import shift_months
from pandas.errors import PerformanceWarning
import pandas as pd
from pandas import (
DateOffset,
DatetimeIndex,
NaT,
Period,
Series,
Timedelta,
TimedeltaIndex,
Timestamp,
date_range,
)
import pandas._testing as tm
from pandas.core.arrays import (
DatetimeArray,
TimedeltaArray,
)
from pandas.core.ops import roperator
from pandas.tests.arithmetic.common import (
assert_cannot_add,
assert_invalid_addsub_type,
assert_invalid_comparison,
get_upcast_box,
)
# ------------------------------------------------------------------
# Comparisons
class TestDatetime64ArrayLikeComparisons:
# Comparison tests for datetime64 vectors fully parametrized over
# DataFrame/Series/DatetimeIndex/DatetimeArray. Ideally all comparison
# tests will eventually end up here.
def test_compare_zerodim(self, tz_naive_fixture, box_with_array):
# Test comparison with zero-dimensional array is unboxed
tz = tz_naive_fixture
box = box_with_array
dti = date_range("20130101", periods=3, tz=tz)
other = np.array(dti.to_numpy()[0])
dtarr = tm.box_expected(dti, box)
xbox = get_upcast_box(dtarr, other, True)
result = dtarr <= other
expected = np.array([True, False, False])
expected = tm.box_expected(expected, xbox)
tm.assert_equal(result, expected)
@pytest.mark.parametrize(
"other",
[
"foo",
-1,
99,
4.0,
object(),
timedelta(days=2),
# GH#19800, GH#19301 datetime.date comparison raises to
# match DatetimeIndex/Timestamp. This also matches the behavior
# of stdlib datetime.datetime
datetime(2001, 1, 1).date(),
# GH#19301 None and NaN are *not* cast to NaT for comparisons
None,
np.nan,
],
)
def test_dt64arr_cmp_scalar_invalid(self, other, tz_naive_fixture, box_with_array):
# GH#22074, GH#15966
tz = tz_naive_fixture
rng = date_range("1/1/2000", periods=10, tz=tz)
dtarr = tm.box_expected(rng, box_with_array)
assert_invalid_comparison(dtarr, other, box_with_array)
@pytest.mark.parametrize(
"other",
[
# GH#4968 invalid date/int comparisons
list(range(10)),
np.arange(10),
np.arange(10).astype(np.float32),
np.arange(10).astype(object),
pd.timedelta_range("1ns", periods=10).array,
np.array(pd.timedelta_range("1ns", periods=10)),
list(pd.timedelta_range("1ns", periods=10)),
pd.timedelta_range("1 Day", periods=10).astype(object),
pd.period_range("1971-01-01", freq="D", periods=10).array,
pd.period_range("1971-01-01", freq="D", periods=10).astype(object),
],
)
def test_dt64arr_cmp_arraylike_invalid(
self, other, tz_naive_fixture, box_with_array
):
tz = tz_naive_fixture
dta = date_range("1970-01-01", freq="ns", periods=10, tz=tz)._data
obj = tm.box_expected(dta, box_with_array)
assert_invalid_comparison(obj, other, box_with_array)
def test_dt64arr_cmp_mixed_invalid(self, tz_naive_fixture):
tz = tz_naive_fixture
dta = date_range("1970-01-01", freq="h", periods=5, tz=tz)._data
other = np.array([0, 1, 2, dta[3], Timedelta(days=1)])
result = dta == other
expected = np.array([False, False, False, True, False])
tm.assert_numpy_array_equal(result, expected)
result = dta != other
tm.assert_numpy_array_equal(result, ~expected)
msg = "Invalid comparison between|Cannot compare type|not supported between"
with pytest.raises(TypeError, match=msg):
dta < other
with pytest.raises(TypeError, match=msg):
dta > other
with pytest.raises(TypeError, match=msg):
dta <= other
with pytest.raises(TypeError, match=msg):
dta >= other
def test_dt64arr_nat_comparison(self, tz_naive_fixture, box_with_array):
# GH#22242, GH#22163 DataFrame considered NaT == ts incorrectly
tz = tz_naive_fixture
box = box_with_array
ts = Timestamp("2021-01-01", tz=tz)
ser = Series([ts, NaT])
obj = tm.box_expected(ser, box)
xbox = get_upcast_box(obj, ts, True)
expected = Series([True, False], dtype=np.bool_)
expected = tm.box_expected(expected, xbox)
result = obj == ts
tm.assert_equal(result, expected)
class TestDatetime64SeriesComparison:
# TODO: moved from tests.series.test_operators; needs cleanup
@pytest.mark.parametrize(
"pair",
[
(
[Timestamp("2011-01-01"), NaT, Timestamp("2011-01-03")],
[NaT, NaT, Timestamp("2011-01-03")],
),
(
[Timedelta("1 days"), NaT, Timedelta("3 days")],
[NaT, NaT, Timedelta("3 days")],
),
(
[Period("2011-01", freq="M"), NaT, Period("2011-03", freq="M")],
[NaT, NaT, Period("2011-03", freq="M")],
),
],
)
@pytest.mark.parametrize("reverse", [True, False])
@pytest.mark.parametrize("dtype", [None, object])
@pytest.mark.parametrize(
"op, expected",
[
(operator.eq, Series([False, False, True])),
(operator.ne, Series([True, True, False])),
(operator.lt, Series([False, False, False])),
(operator.gt, Series([False, False, False])),
(operator.ge, Series([False, False, True])),
(operator.le, Series([False, False, True])),
],
)
def test_nat_comparisons(
self,
dtype,
index_or_series,
reverse,
pair,
op,
expected,
):
box = index_or_series
l, r = pair
if reverse:
# add lhs / rhs switched data
l, r = r, l
left = Series(l, dtype=dtype)
right = box(r, dtype=dtype)
result = op(left, right)
tm.assert_series_equal(result, expected)
@pytest.mark.parametrize(
"data",
[
[Timestamp("2011-01-01"), NaT, Timestamp("2011-01-03")],
[Timedelta("1 days"), NaT, Timedelta("3 days")],
[Period("2011-01", freq="M"), NaT, Period("2011-03", freq="M")],
],
)
@pytest.mark.parametrize("dtype", [None, object])
def test_nat_comparisons_scalar(self, dtype, data, box_with_array):
box = box_with_array
left = Series(data, dtype=dtype)
left = tm.box_expected(left, box)
xbox = get_upcast_box(left, NaT, True)
expected = [False, False, False]
expected = tm.box_expected(expected, xbox)
if box is pd.array and dtype is object:
expected = pd.array(expected, dtype="bool")
tm.assert_equal(left == NaT, expected)
tm.assert_equal(NaT == left, expected)
expected = [True, True, True]
expected = tm.box_expected(expected, xbox)
if box is pd.array and dtype is object:
expected = pd.array(expected, dtype="bool")
tm.assert_equal(left != NaT, expected)
tm.assert_equal(NaT != left, expected)
expected = [False, False, False]
expected = tm.box_expected(expected, xbox)
if box is pd.array and dtype is object:
expected = pd.array(expected, dtype="bool")
tm.assert_equal(left < NaT, expected)
tm.assert_equal(NaT > left, expected)
tm.assert_equal(left <= NaT, expected)
tm.assert_equal(NaT >= left, expected)
tm.assert_equal(left > NaT, expected)
tm.assert_equal(NaT < left, expected)
tm.assert_equal(left >= NaT, expected)
tm.assert_equal(NaT <= left, expected)
@pytest.mark.parametrize("val", [datetime(2000, 1, 4), datetime(2000, 1, 5)])
def test_series_comparison_scalars(self, val):
series = Series(date_range("1/1/2000", periods=10))
result = series > val
expected = Series([x > val for x in series])
tm.assert_series_equal(result, expected)
@pytest.mark.parametrize(
"left,right", [("lt", "gt"), ("le", "ge"), ("eq", "eq"), ("ne", "ne")]
)
def test_timestamp_compare_series(self, left, right):
# see gh-4982
# Make sure we can compare Timestamps on the right AND left hand side.
ser = Series(date_range("20010101", periods=10), name="dates")
s_nat = ser.copy(deep=True)
ser[0] = Timestamp("nat")
ser[3] = Timestamp("nat")
left_f = getattr(operator, left)
right_f = getattr(operator, right)
# No NaT
expected = left_f(ser, Timestamp("20010109"))
result = right_f(Timestamp("20010109"), ser)
tm.assert_series_equal(result, expected)
# NaT
expected = left_f(ser, Timestamp("nat"))
result = right_f(Timestamp("nat"), ser)
tm.assert_series_equal(result, expected)
# Compare to Timestamp with series containing NaT
expected = left_f(s_nat, Timestamp("20010109"))
result = right_f(Timestamp("20010109"), s_nat)
tm.assert_series_equal(result, expected)
# Compare to NaT with series containing NaT
expected = left_f(s_nat, NaT)
result = right_f(NaT, s_nat)
tm.assert_series_equal(result, expected)
def test_dt64arr_timestamp_equality(self, box_with_array):
# GH#11034
ser = Series([Timestamp("2000-01-29 01:59:00"), Timestamp("2000-01-30"), NaT])
ser = tm.box_expected(ser, box_with_array)
xbox = get_upcast_box(ser, ser, True)
result = ser != ser
expected = tm.box_expected([False, False, True], xbox)
tm.assert_equal(result, expected)
warn = FutureWarning if box_with_array is pd.DataFrame else None
with tm.assert_produces_warning(warn):
# alignment for frame vs series comparisons deprecated
result = ser != ser[0]
expected = tm.box_expected([False, True, True], xbox)
tm.assert_equal(result, expected)
with tm.assert_produces_warning(warn):
# alignment for frame vs series comparisons deprecated
result = ser != ser[2]
expected = tm.box_expected([True, True, True], xbox)
tm.assert_equal(result, expected)
result = ser == ser
expected = tm.box_expected([True, True, False], xbox)
tm.assert_equal(result, expected)
with tm.assert_produces_warning(warn):
# alignment for frame vs series comparisons deprecated
result = ser == ser[0]
expected = tm.box_expected([True, False, False], xbox)
tm.assert_equal(result, expected)
with tm.assert_produces_warning(warn):
# alignment for frame vs series comparisons deprecated
result = ser == ser[2]
expected = tm.box_expected([False, False, False], xbox)
tm.assert_equal(result, expected)
@pytest.mark.parametrize(
"datetimelike",
[
Timestamp("20130101"),
datetime(2013, 1, 1),
np.datetime64("2013-01-01T00:00", "ns"),
],
)
@pytest.mark.parametrize(
"op,expected",
[
(operator.lt, [True, False, False, False]),
(operator.le, [True, True, False, False]),
(operator.eq, [False, True, False, False]),
(operator.gt, [False, False, False, True]),
],
)
def test_dt64_compare_datetime_scalar(self, datetimelike, op, expected):
# GH#17965, test for ability to compare datetime64[ns] columns
# to datetimelike
ser = Series(
[
Timestamp("20120101"),
Timestamp("20130101"),
np.nan,
Timestamp("20130103"),
],
name="A",
)
result = op(ser, datetimelike)
expected = Series(expected, name="A")
tm.assert_series_equal(result, expected)
class TestDatetimeIndexComparisons:
# TODO: moved from tests.indexes.test_base; parametrize and de-duplicate
def test_comparators(self, comparison_op):
index = tm.makeDateIndex(100)
element = index[len(index) // 2]
element = Timestamp(element).to_datetime64()
arr = np.array(index)
arr_result = comparison_op(arr, element)
index_result = comparison_op(index, element)
assert isinstance(index_result, np.ndarray)
tm.assert_numpy_array_equal(arr_result, index_result)
@pytest.mark.parametrize(
"other",
[datetime(2016, 1, 1), Timestamp("2016-01-01"), np.datetime64("2016-01-01")],
)
def test_dti_cmp_datetimelike(self, other, tz_naive_fixture):
tz = tz_naive_fixture
dti = date_range("2016-01-01", periods=2, tz=tz)
if tz is not None:
if isinstance(other, np.datetime64):
# no tzaware version available
return
other = localize_pydatetime(other, dti.tzinfo)
result = dti == other
expected = np.array([True, False])
tm.assert_numpy_array_equal(result, expected)
result = dti > other
expected = np.array([False, True])
tm.assert_numpy_array_equal(result, expected)
result = dti >= other
expected = np.array([True, True])
tm.assert_numpy_array_equal(result, expected)
result = dti < other
expected = np.array([False, False])
tm.assert_numpy_array_equal(result, expected)
result = dti <= other
expected = np.array([True, False])
tm.assert_numpy_array_equal(result, expected)
@pytest.mark.parametrize("dtype", [None, object])
def test_dti_cmp_nat(self, dtype, box_with_array):
left = DatetimeIndex([Timestamp("2011-01-01"), NaT, Timestamp("2011-01-03")])
right = DatetimeIndex([NaT, NaT, Timestamp("2011-01-03")])
left = tm.box_expected(left, box_with_array)
right = tm.box_expected(right, box_with_array)
xbox = get_upcast_box(left, right, True)
lhs, rhs = left, right
if dtype is object:
lhs, rhs = left.astype(object), right.astype(object)
result = rhs == lhs
expected = np.array([False, False, True])
expected = tm.box_expected(expected, xbox)
tm.assert_equal(result, expected)
result = lhs != rhs
expected = np.array([True, True, False])
expected = tm.box_expected(expected, xbox)
tm.assert_equal(result, expected)
expected = np.array([False, False, False])
expected = tm.box_expected(expected, xbox)
tm.assert_equal(lhs == NaT, expected)
tm.assert_equal(NaT == rhs, expected)
expected = np.array([True, True, True])
expected = tm.box_expected(expected, xbox)
tm.assert_equal(lhs != NaT, expected)
tm.assert_equal(NaT != lhs, expected)
expected = np.array([False, False, False])
expected = tm.box_expected(expected, xbox)
tm.assert_equal(lhs < NaT, expected)
tm.assert_equal(NaT > lhs, expected)
def test_dti_cmp_nat_behaves_like_float_cmp_nan(self):
fidx1 = pd.Index([1.0, np.nan, 3.0, np.nan, 5.0, 7.0])
fidx2 = pd.Index([2.0, 3.0, np.nan, np.nan, 6.0, 7.0])
didx1 = DatetimeIndex(
["2014-01-01", NaT, "2014-03-01", NaT, "2014-05-01", "2014-07-01"]
)
didx2 = DatetimeIndex(
["2014-02-01", "2014-03-01", NaT, NaT, "2014-06-01", "2014-07-01"]
)
darr = np.array(
[
np.datetime64("2014-02-01 00:00"),
np.datetime64("2014-03-01 00:00"),
np.datetime64("nat"),
np.datetime64("nat"),
np.datetime64("2014-06-01 00:00"),
np.datetime64("2014-07-01 00:00"),
]
)
cases = [(fidx1, fidx2), (didx1, didx2), (didx1, darr)]
# Check pd.NaT is handles as the same as np.nan
with tm.assert_produces_warning(None):
for idx1, idx2 in cases:
result = idx1 < idx2
expected = np.array([True, False, False, False, True, False])
tm.assert_numpy_array_equal(result, expected)
result = idx2 > idx1
expected = np.array([True, False, False, False, True, False])
tm.assert_numpy_array_equal(result, expected)
result = idx1 <= idx2
expected = np.array([True, False, False, False, True, True])
tm.assert_numpy_array_equal(result, expected)
result = idx2 >= idx1
expected = np.array([True, False, False, False, True, True])
tm.assert_numpy_array_equal(result, expected)
result = idx1 == idx2
expected = np.array([False, False, False, False, False, True])
tm.assert_numpy_array_equal(result, expected)
result = idx1 != idx2
expected = np.array([True, True, True, True, True, False])
tm.assert_numpy_array_equal(result, expected)
with tm.assert_produces_warning(None):
for idx1, val in [(fidx1, np.nan), (didx1, NaT)]:
result = idx1 < val
expected = np.array([False, False, False, False, False, False])
tm.assert_numpy_array_equal(result, expected)
result = idx1 > val
tm.assert_numpy_array_equal(result, expected)
result = idx1 <= val
tm.assert_numpy_array_equal(result, expected)
result = idx1 >= val
tm.assert_numpy_array_equal(result, expected)
result = idx1 == val
tm.assert_numpy_array_equal(result, expected)
result = idx1 != val
expected = np.array([True, True, True, True, True, True])
tm.assert_numpy_array_equal(result, expected)
# Check pd.NaT is handles as the same as np.nan
with tm.assert_produces_warning(None):
for idx1, val in [(fidx1, 3), (didx1, datetime(2014, 3, 1))]:
result = idx1 < val
expected = np.array([True, False, False, False, False, False])
tm.assert_numpy_array_equal(result, expected)
result = idx1 > val
expected = np.array([False, False, False, False, True, True])
tm.assert_numpy_array_equal(result, expected)
result = idx1 <= val
expected = np.array([True, False, True, False, False, False])
tm.assert_numpy_array_equal(result, expected)
result = idx1 >= val
expected = np.array([False, False, True, False, True, True])
tm.assert_numpy_array_equal(result, expected)
result = idx1 == val
expected = np.array([False, False, True, False, False, False])
tm.assert_numpy_array_equal(result, expected)
result = idx1 != val
expected = np.array([True, True, False, True, True, True])
tm.assert_numpy_array_equal(result, expected)
def test_comparison_tzawareness_compat(self, comparison_op, box_with_array):
# GH#18162
op = comparison_op
box = box_with_array
dr = date_range("2016-01-01", periods=6)
dz = dr.tz_localize("US/Pacific")
dr = tm.box_expected(dr, box)
dz = tm.box_expected(dz, box)
if box is pd.DataFrame:
tolist = lambda x: x.astype(object).values.tolist()[0]
else:
tolist = list
if op not in [operator.eq, operator.ne]:
msg = (
r"Invalid comparison between dtype=datetime64\[ns.*\] "
"and (Timestamp|DatetimeArray|list|ndarray)"
)
with pytest.raises(TypeError, match=msg):
op(dr, dz)
with pytest.raises(TypeError, match=msg):
op(dr, tolist(dz))
with pytest.raises(TypeError, match=msg):
op(dr, np.array(tolist(dz), dtype=object))
with pytest.raises(TypeError, match=msg):
op(dz, dr)
with pytest.raises(TypeError, match=msg):
op(dz, tolist(dr))
with pytest.raises(TypeError, match=msg):
op(dz, np.array(tolist(dr), dtype=object))
# The aware==aware and naive==naive comparisons should *not* raise
assert np.all(dr == dr)
assert np.all(dr == tolist(dr))
assert np.all(tolist(dr) == dr)
assert np.all(np.array(tolist(dr), dtype=object) == dr)
assert np.all(dr == np.array(tolist(dr), dtype=object))
assert np.all(dz == dz)
assert np.all(dz == tolist(dz))
assert np.all(tolist(dz) == dz)
assert np.all(np.array(tolist(dz), dtype=object) == dz)
assert np.all(dz == np.array(tolist(dz), dtype=object))
def test_comparison_tzawareness_compat_scalars(self, comparison_op, box_with_array):
# GH#18162
op = comparison_op
dr = date_range("2016-01-01", periods=6)
dz = dr.tz_localize("US/Pacific")
dr = tm.box_expected(dr, box_with_array)
dz = tm.box_expected(dz, box_with_array)
# Check comparisons against scalar Timestamps
ts = Timestamp("2000-03-14 01:59")
ts_tz = Timestamp("2000-03-14 01:59", tz="Europe/Amsterdam")
assert np.all(dr > ts)
msg = r"Invalid comparison between dtype=datetime64\[ns.*\] and Timestamp"
if op not in [operator.eq, operator.ne]:
with pytest.raises(TypeError, match=msg):
op(dr, ts_tz)
assert np.all(dz > ts_tz)
if op not in [operator.eq, operator.ne]:
with pytest.raises(TypeError, match=msg):
op(dz, ts)
if op not in [operator.eq, operator.ne]:
# GH#12601: Check comparison against Timestamps and DatetimeIndex
with pytest.raises(TypeError, match=msg):
op(ts, dz)
@pytest.mark.parametrize(
"other",
[datetime(2016, 1, 1), Timestamp("2016-01-01"), np.datetime64("2016-01-01")],
)
# Bug in NumPy? https://github.com/numpy/numpy/issues/13841
# Raising in __eq__ will fallback to NumPy, which warns, fails,
# then re-raises the original exception. So we just need to ignore.
@pytest.mark.filterwarnings("ignore:elementwise comp:DeprecationWarning")
@pytest.mark.filterwarnings("ignore:Converting timezone-aware:FutureWarning")
def test_scalar_comparison_tzawareness(
self, comparison_op, other, tz_aware_fixture, box_with_array
):
op = comparison_op
tz = tz_aware_fixture
dti = date_range("2016-01-01", periods=2, tz=tz)
dtarr = tm.box_expected(dti, box_with_array)
xbox = get_upcast_box(dtarr, other, True)
if op in [operator.eq, operator.ne]:
exbool = op is operator.ne
expected = np.array([exbool, exbool], dtype=bool)
expected = tm.box_expected(expected, xbox)
result = op(dtarr, other)
tm.assert_equal(result, expected)
result = op(other, dtarr)
tm.assert_equal(result, expected)
else:
msg = (
r"Invalid comparison between dtype=datetime64\[ns, .*\] "
f"and {type(other).__name__}"
)
with pytest.raises(TypeError, match=msg):
op(dtarr, other)
with pytest.raises(TypeError, match=msg):
op(other, dtarr)
def test_nat_comparison_tzawareness(self, comparison_op):
# GH#19276
# tzaware DatetimeIndex should not raise when compared to NaT
op = comparison_op
dti = DatetimeIndex(
["2014-01-01", NaT, "2014-03-01", NaT, "2014-05-01", "2014-07-01"]
)
expected = np.array([op == operator.ne] * len(dti))
result = op(dti, NaT)
tm.assert_numpy_array_equal(result, expected)
result = op(dti.tz_localize("US/Pacific"), NaT)
tm.assert_numpy_array_equal(result, expected)
def test_dti_cmp_str(self, tz_naive_fixture):
# GH#22074
# regardless of tz, we expect these comparisons are valid
tz = tz_naive_fixture
rng = date_range("1/1/2000", periods=10, tz=tz)
other = "1/1/2000"
result = rng == other
expected = np.array([True] + [False] * 9)
tm.assert_numpy_array_equal(result, expected)
result = rng != other
expected = np.array([False] + [True] * 9)
tm.assert_numpy_array_equal(result, expected)
result = rng < other
expected = np.array([False] * 10)
tm.assert_numpy_array_equal(result, expected)
result = rng <= other
expected = np.array([True] + [False] * 9)
tm.assert_numpy_array_equal(result, expected)
result = rng > other
expected = np.array([False] + [True] * 9)
tm.assert_numpy_array_equal(result, expected)
result = rng >= other
expected = np.array([True] * 10)
tm.assert_numpy_array_equal(result, expected)
def test_dti_cmp_list(self):
rng = date_range("1/1/2000", periods=10)
result = rng == list(rng)
expected = rng == rng
tm.assert_numpy_array_equal(result, expected)
@pytest.mark.parametrize(
"other",
[
pd.timedelta_range("1D", periods=10),
pd.timedelta_range("1D", periods=10).to_series(),
pd.timedelta_range("1D", periods=10).asi8.view("m8[ns]"),
],
ids=lambda x: type(x).__name__,
)
def test_dti_cmp_tdi_tzawareness(self, other):
# GH#22074
# reversion test that we _don't_ call _assert_tzawareness_compat
# when comparing against TimedeltaIndex
dti = date_range("2000-01-01", periods=10, tz="Asia/Tokyo")
result = dti == other
expected = np.array([False] * 10)
tm.assert_numpy_array_equal(result, expected)
result = dti != other
expected = np.array([True] * 10)
tm.assert_numpy_array_equal(result, expected)
msg = "Invalid comparison between"
with pytest.raises(TypeError, match=msg):
dti < other
with pytest.raises(TypeError, match=msg):
dti <= other
with pytest.raises(TypeError, match=msg):
dti > other
with pytest.raises(TypeError, match=msg):
dti >= other
def test_dti_cmp_object_dtype(self):
# GH#22074
dti = date_range("2000-01-01", periods=10, tz="Asia/Tokyo")
other = dti.astype("O")
result = dti == other
expected = np.array([True] * 10)
tm.assert_numpy_array_equal(result, expected)
other = dti.tz_localize(None)
result = dti != other
tm.assert_numpy_array_equal(result, expected)
other = np.array(list(dti[:5]) + [Timedelta(days=1)] * 5)
result = dti == other
expected = np.array([True] * 5 + [False] * 5)
tm.assert_numpy_array_equal(result, expected)
msg = ">=' not supported between instances of 'Timestamp' and 'Timedelta'"
with pytest.raises(TypeError, match=msg):
dti >= other
# ------------------------------------------------------------------
# Arithmetic
class TestDatetime64Arithmetic:
# This class is intended for "finished" tests that are fully parametrized
# over DataFrame/Series/Index/DatetimeArray
# -------------------------------------------------------------
# Addition/Subtraction of timedelta-like
@pytest.mark.arm_slow
def test_dt64arr_add_timedeltalike_scalar(
self, tz_naive_fixture, two_hours, box_with_array
):
# GH#22005, GH#22163 check DataFrame doesn't raise TypeError
tz = tz_naive_fixture
rng = date_range("2000-01-01", "2000-02-01", tz=tz)
expected = date_range("2000-01-01 02:00", "2000-02-01 02:00", tz=tz)
rng = tm.box_expected(rng, box_with_array)
expected = tm.box_expected(expected, box_with_array)
result = rng + two_hours
tm.assert_equal(result, expected)
rng += two_hours
tm.assert_equal(rng, expected)
def test_dt64arr_sub_timedeltalike_scalar(
self, tz_naive_fixture, two_hours, box_with_array
):
tz = tz_naive_fixture
rng = date_range("2000-01-01", "2000-02-01", tz=tz)
expected = date_range("1999-12-31 22:00", "2000-01-31 22:00", tz=tz)
rng = tm.box_expected(rng, box_with_array)
expected = tm.box_expected(expected, box_with_array)
result = rng - two_hours
tm.assert_equal(result, expected)
rng -= two_hours
tm.assert_equal(rng, expected)
# TODO: redundant with test_dt64arr_add_timedeltalike_scalar
def test_dt64arr_add_td64_scalar(self, box_with_array):
# scalar timedeltas/np.timedelta64 objects
# operate with np.timedelta64 correctly
ser = Series([Timestamp("20130101 9:01"), Timestamp("20130101 9:02")])
expected = Series(
[Timestamp("20130101 9:01:01"), Timestamp("20130101 9:02:01")]
)
dtarr = tm.box_expected(ser, box_with_array)
expected = tm.box_expected(expected, box_with_array)
result = dtarr + np.timedelta64(1, "s")
tm.assert_equal(result, expected)
result = np.timedelta64(1, "s") + dtarr
tm.assert_equal(result, expected)
expected = Series(
[Timestamp("20130101 9:01:00.005"), Timestamp("20130101 9:02:00.005")]
)
expected = tm.box_expected(expected, box_with_array)
result = dtarr + np.timedelta64(5, "ms")
tm.assert_equal(result, expected)
result = np.timedelta64(5, "ms") + dtarr
tm.assert_equal(result, expected)
def test_dt64arr_add_sub_td64_nat(self, box_with_array, tz_naive_fixture):
# GH#23320 special handling for timedelta64("NaT")
tz = tz_naive_fixture
dti = date_range("1994-04-01", periods=9, tz=tz, freq="QS")
other = np.timedelta64("NaT")
expected = DatetimeIndex(["NaT"] * 9, tz=tz)
obj = tm.box_expected(dti, box_with_array)
expected = tm.box_expected(expected, box_with_array)
result = obj + other
tm.assert_equal(result, expected)
result = other + obj
tm.assert_equal(result, expected)
result = obj - other
tm.assert_equal(result, expected)
msg = "cannot subtract"
with pytest.raises(TypeError, match=msg):
other - obj
def test_dt64arr_add_sub_td64ndarray(self, tz_naive_fixture, box_with_array):
tz = tz_naive_fixture
dti = date_range("2016-01-01", periods=3, tz=tz)
tdi = TimedeltaIndex(["-1 Day", "-1 Day", "-1 Day"])
tdarr = tdi.values
expected = date_range("2015-12-31", "2016-01-02", periods=3, tz=tz)
dtarr = tm.box_expected(dti, box_with_array)
expected = tm.box_expected(expected, box_with_array)
result = dtarr + tdarr
tm.assert_equal(result, expected)
result = tdarr + dtarr
tm.assert_equal(result, expected)
expected = date_range("2016-01-02", "2016-01-04", periods=3, tz=tz)
expected = tm.box_expected(expected, box_with_array)
result = dtarr - tdarr
tm.assert_equal(result, expected)
msg = "cannot subtract|(bad|unsupported) operand type for unary"
with pytest.raises(TypeError, match=msg):
tdarr - dtarr
# -----------------------------------------------------------------
# Subtraction of datetime-like scalars
@pytest.mark.parametrize(
"ts",
[
Timestamp("2013-01-01"),
Timestamp("2013-01-01").to_pydatetime(),
Timestamp("2013-01-01").to_datetime64(),
],
)
def test_dt64arr_sub_dtscalar(self, box_with_array, ts):
# GH#8554, GH#22163 DataFrame op should _not_ return dt64 dtype
idx = date_range("2013-01-01", periods=3)._with_freq(None)
idx = tm.box_expected(idx, box_with_array)
expected = TimedeltaIndex(["0 Days", "1 Day", "2 Days"])
expected = tm.box_expected(expected, box_with_array)
result = idx - ts
tm.assert_equal(result, expected)
def test_dt64arr_sub_datetime64_not_ns(self, box_with_array):
# GH#7996, GH#22163 ensure non-nano datetime64 is converted to nano
# for DataFrame operation
dt64 = np.datetime64("2013-01-01")
assert dt64.dtype == "datetime64[D]"
dti = date_range("20130101", periods=3)._with_freq(None)
dtarr = tm.box_expected(dti, box_with_array)
expected = TimedeltaIndex(["0 Days", "1 Day", "2 Days"])
expected = tm.box_expected(expected, box_with_array)
result = dtarr - dt64
tm.assert_equal(result, expected)
result = dt64 - dtarr
tm.assert_equal(result, -expected)
def test_dt64arr_sub_timestamp(self, box_with_array):
ser = date_range("2014-03-17", periods=2, freq="D", tz="US/Eastern")
ser = ser._with_freq(None)
ts = ser[0]
ser = tm.box_expected(ser, box_with_array)
delta_series = Series([np.timedelta64(0, "D"), np.timedelta64(1, "D")])
expected = tm.box_expected(delta_series, box_with_array)
tm.assert_equal(ser - ts, expected)
tm.assert_equal(ts - ser, -expected)
def test_dt64arr_sub_NaT(self, box_with_array):
# GH#18808
dti = DatetimeIndex([NaT, Timestamp("19900315")])
ser = tm.box_expected(dti, box_with_array)
result = ser - NaT
expected = Series([NaT, NaT], dtype="timedelta64[ns]")
expected = tm.box_expected(expected, box_with_array)
tm.assert_equal(result, expected)
dti_tz = dti.tz_localize("Asia/Tokyo")
ser_tz = tm.box_expected(dti_tz, box_with_array)
result = ser_tz - NaT
expected = Series([NaT, NaT], dtype="timedelta64[ns]")
expected = tm.box_expected(expected, box_with_array)
tm.assert_equal(result, expected)
# -------------------------------------------------------------
# Subtraction of datetime-like array-like
def test_dt64arr_sub_dt64object_array(self, box_with_array, tz_naive_fixture):
dti = date_range("2016-01-01", periods=3, tz=tz_naive_fixture)
expected = dti - dti
obj = tm.box_expected(dti, box_with_array)
expected = tm.box_expected(expected, box_with_array)
with tm.assert_produces_warning(PerformanceWarning):
result = obj - obj.astype(object)
tm.assert_equal(result, expected)
def test_dt64arr_naive_sub_dt64ndarray(self, box_with_array):
dti = date_range("2016-01-01", periods=3, tz=None)
dt64vals = dti.values
dtarr = tm.box_expected(dti, box_with_array)
expected = dtarr - dtarr
result = dtarr - dt64vals
tm.assert_equal(result, expected)
result = dt64vals - dtarr
tm.assert_equal(result, expected)
def test_dt64arr_aware_sub_dt64ndarray_raises(
self, tz_aware_fixture, box_with_array
):
tz = tz_aware_fixture
dti = date_range("2016-01-01", periods=3, tz=tz)
dt64vals = dti.values
dtarr = tm.box_expected(dti, box_with_array)
msg = "subtraction must have the same timezones or"
with pytest.raises(TypeError, match=msg):
dtarr - dt64vals
with pytest.raises(TypeError, match=msg):
dt64vals - dtarr
# -------------------------------------------------------------
# Addition of datetime-like others (invalid)
def test_dt64arr_add_dt64ndarray_raises(self, tz_naive_fixture, box_with_array):
tz = tz_naive_fixture
dti = date_range("2016-01-01", periods=3, tz=tz)
dt64vals = dti.values
dtarr = tm.box_expected(dti, box_with_array)
assert_cannot_add(dtarr, dt64vals)
def test_dt64arr_add_timestamp_raises(self, box_with_array):
# GH#22163 ensure DataFrame doesn't cast Timestamp to i8
idx = DatetimeIndex(["2011-01-01", "2011-01-02"])
ts = idx[0]
idx = tm.box_expected(idx, box_with_array)
assert_cannot_add(idx, ts)
# -------------------------------------------------------------
# Other Invalid Addition/Subtraction
@pytest.mark.parametrize(
"other",
[
3.14,
np.array([2.0, 3.0]),
# GH#13078 datetime +/- Period is invalid
Period("2011-01-01", freq="D"),
# https://github.com/pandas-dev/pandas/issues/10329
time(1, 2, 3),
],
)
@pytest.mark.parametrize("dti_freq", [None, "D"])
def test_dt64arr_add_sub_invalid(self, dti_freq, other, box_with_array):
dti = DatetimeIndex(["2011-01-01", "2011-01-02"], freq=dti_freq)
dtarr = tm.box_expected(dti, box_with_array)
msg = "|".join(
[
"unsupported operand type",
"cannot (add|subtract)",
"cannot use operands with types",
"ufunc '?(add|subtract)'? cannot use operands with types",
"Concatenation operation is not implemented for NumPy arrays",
]
)
assert_invalid_addsub_type(dtarr, other, msg)
@pytest.mark.parametrize("pi_freq", ["D", "W", "Q", "H"])
@pytest.mark.parametrize("dti_freq", [None, "D"])
def test_dt64arr_add_sub_parr(
self, dti_freq, pi_freq, box_with_array, box_with_array2
):
# GH#20049 subtracting PeriodIndex should raise TypeError
dti = DatetimeIndex(["2011-01-01", "2011-01-02"], freq=dti_freq)
pi = dti.to_period(pi_freq)
dtarr = tm.box_expected(dti, box_with_array)
parr = tm.box_expected(pi, box_with_array2)
msg = "|".join(
[
"cannot (add|subtract)",
"unsupported operand",
"descriptor.*requires",
"ufunc.*cannot use operands",
]
)
assert_invalid_addsub_type(dtarr, parr, msg)
def test_dt64arr_addsub_time_objects_raises(self, box_with_array, tz_naive_fixture):
# https://github.com/pandas-dev/pandas/issues/10329
tz = tz_naive_fixture
obj1 = date_range("2012-01-01", periods=3, tz=tz)
obj2 = [time(i, i, i) for i in range(3)]
obj1 = tm.box_expected(obj1, box_with_array)
obj2 = tm.box_expected(obj2, box_with_array)
with warnings.catch_warnings(record=True):
# pandas.errors.PerformanceWarning: Non-vectorized DateOffset being
# applied to Series or DatetimeIndex
# we aren't testing that here, so ignore.
warnings.simplefilter("ignore", PerformanceWarning)
# If `x + y` raises, then `y + x` should raise here as well
msg = (
r"unsupported operand type\(s\) for -: "
"'(Timestamp|DatetimeArray)' and 'datetime.time'"
)
with pytest.raises(TypeError, match=msg):
obj1 - obj2
msg = "|".join(
[
"cannot subtract DatetimeArray from ndarray",
"ufunc (subtract|'subtract') cannot use operands with types "
r"dtype\('O'\) and dtype\('<M8\[ns\]'\)",
]
)
with pytest.raises(TypeError, match=msg):
obj2 - obj1
msg = (
r"unsupported operand type\(s\) for \+: "
"'(Timestamp|DatetimeArray)' and 'datetime.time'"
)
with pytest.raises(TypeError, match=msg):
obj1 + obj2
msg = "|".join(
[
r"unsupported operand type\(s\) for \+: "
"'(Timestamp|DatetimeArray)' and 'datetime.time'",
"ufunc (add|'add') cannot use operands with types "
r"dtype\('O'\) and dtype\('<M8\[ns\]'\)",
]
)
with pytest.raises(TypeError, match=msg):
obj2 + obj1
class TestDatetime64DateOffsetArithmetic:
# -------------------------------------------------------------
# Tick DateOffsets
# TODO: parametrize over timezone?
def test_dt64arr_series_add_tick_DateOffset(self, box_with_array):
# GH#4532
# operate with pd.offsets
ser = Series([Timestamp("20130101 9:01"), Timestamp("20130101 9:02")])
expected = Series(
[Timestamp("20130101 9:01:05"), Timestamp("20130101 9:02:05")]
)
ser = tm.box_expected(ser, box_with_array)
expected = tm.box_expected(expected, box_with_array)
result = ser + pd.offsets.Second(5)
tm.assert_equal(result, expected)
result2 = pd.offsets.Second(5) + ser
tm.assert_equal(result2, expected)
def test_dt64arr_series_sub_tick_DateOffset(self, box_with_array):
# GH#4532
# operate with pd.offsets
ser = Series([Timestamp("20130101 9:01"), Timestamp("20130101 9:02")])
expected = Series(
[Timestamp("20130101 9:00:55"), Timestamp("20130101 9:01:55")]
)
ser = tm.box_expected(ser, box_with_array)
expected = tm.box_expected(expected, box_with_array)
result = ser - pd.offsets.Second(5)
tm.assert_equal(result, expected)
result2 = -pd.offsets.Second(5) + ser
tm.assert_equal(result2, expected)
msg = "(bad|unsupported) operand type for unary"
with pytest.raises(TypeError, match=msg):
| pd.offsets.Second(5) | pandas.offsets.Second |
# Steinbeck.py is a python program designed specifically for
# pulling time series data from the Johns Hopkins University
# COVID-19 github and turning them in to usable timeseries
# csv's for analysis.
# @author <NAME>, <EMAIL>
# This program written and produced for and by Cloud Brigade
import pandas as pd
import numpy as np
import datetime
import boto3
from io import StringIO
BUCKETNAME = 'covid-v1-part-3-data-bucket'
def timify(data):
'''
returns a usable national dataframe using inputs from the JHU timeseries data set,
and second item it returns is a dataframe with the population of each county
data is a pandas dataframe
'''
# create a county_state column
df = data.copy()
county_state = df['Admin2'] + " County, " + df['Province_State']
county_state = | pd.DataFrame(county_state) | pandas.DataFrame |
import xml.etree.ElementTree as ET
import mysql.connector
import pandas as pd
import time
from datetime import datetime, timezone, timedelta
class OperaDB2:
def __init__(self,file,dbname):
_host, _port, db, s3, user, pa = self.getDB_XML(file,dbname)
print('connect to opera2 server')
self.conn = mysql.connector.connect(
user=user,
password=pa,
host=_host,
port=_port,
database=db)
self.cur = self.conn.cursor()
self.s3dir = s3
self.triptable = 'data_logs'
self.numSensor = {'meidai':5, 'aioi':3, 'arc':3 }
def getDB_XML(self,file,dbname):
db_tree = ET.ElementTree(file=file)
db_root = db_tree.getroot()
for db_info in db_root.findall('.//'+dbname):
host = db_info.find('host').text
port = db_info.find('port').text
sql = db_info.find('sql').text
s3 = db_info.find('s3').text
user = db_info.find('user').text
_pass = db_info.find('pass').text
return host, port, sql, s3, user, _pass
def exe_query(self, query):
#print( query )
try:
self.cur.execute(query)
except Exception as e:
#
self.conn.rollback()
print( e.pgerror )
def get_DataFrame(self, query):
self.exe_query( query )
column_names = [desc[0] for desc in self.cur.description]
df = pd.DataFrame(columns=column_names)
for row in self.cur.fetchall():
df_ = pd.Series(list(row), index=column_names)
df = df.append(df_, ignore_index=True)
return column_names, df
def get_TripListFromTime(self, day, tstart='00:00:00.0000', duration='23:59:59.0000'):
desire_datetime = day + ' ' + tstart
desire_length = | pd.Timedelta(duration) | pandas.Timedelta |
#merge this and mack once we decide we want to use this or not
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from __future__ import unicode_literals
import re
import datetime
from kivy.app import App
from kivy.uix.scrollview import ScrollView
from kivy.uix.boxlayout import BoxLayout
from kivy.properties import StringProperty
from kivy.lang import Builder
from kivy.core.window import Window
# import pandas
import nltk
from pandas import DataFrame, Series, read_csv, read_pickle
from re import sub
from nltk.stem import wordnet
from sklearn.feature_extraction.text import CountVectorizer
from sklearn.feature_extraction.text import TfidfVectorizer
from nltk import pos_tag
from sklearn.metrics import pairwise_distances
from nltk import word_tokenize
from nltk.corpus import stopwords
from clean_master_data import DataCleaner
# This is for the autocorrect functionality
from textblob import TextBlob
# This is for the Named Entity Recognition functionality
import spacy
import en_core_web_sm
from random import randint
print("Starting Laur...")
#import laur_ai as LaurAI
def time_now():
return "[" + datetime.datetime.now().strftime("%H:%M:%S") + "]"
#set specifics for the stuff in the gui
root_widget = Builder.load_string('''
<ScrollableLabel>:
#Specifics of Scrollable Label
text: app.text
Label:
text: root.text
font_size: 14
text_size: self.width, None
color: [0,0,0,1]
markup: True
size_hint_y: None
pos_hint: {"left":1, "top":1}
height: self.texture_size[1]
valign: 'top'
halign: 'left'
scroll_y: None
padding_x: 7
padding_y: 7
<BoxLayout>
<RootWidget>:
#Background Set
BoxLayout:
size: root.size
pos: 0,0
canvas.before:
Rectangle:
pos: self.pos
size: self.size
source: 'BackImage.jpg'
#Conversation Box
BoxLayout:
orientation: 'vertical'
padding: 20
spacing: 10
BoxLayout:
size_hint: None, None
size: root.width * 0.35, root.height - 100
canvas.before:
Color:
rgba: .55,.23,.23,.05
Color:
rgba: .55,.23,.23,.05
Rectangle:
pos: self.pos
size: root.width * 0.35,root.height - 100
ScrollableLabel:
id: laur_output
markup: True
#Bottom Bar
BoxLayout:
orientation: 'horizontal'
spacing: 10
size_hint_y: .1
TextInput:
id: txt_input
background_color: [.73,.83,.93,.2]
foreground_color: [0,0,0,1]
cursor_color: [.19,.39,.65,1]
size_hint_x: .8
#multiline: False
write_tab: False
hint_text: "Send me a message"
Button:
id: btn
text: 'Send'
font_size: 20
bold: True
size_hint_x: .2
background_color: [.19,.39,.65,.9]
color: [.97,.97,.97,.9]
on_press: app.runStuff(txt_input.text)
on_release: app.read()
on_release: txt_input.text=""
''')
print("Laur Started")
class RootWidget(BoxLayout):
pass
class ScrollableLabel(ScrollView):
pass
class Laur_AI(App):
text = StringProperty('')
#Initiate the file to write and read from / Start conversation
def __init__(self, data, use_cleaned_data=True, **kwargs):
super().__init__(**kwargs)
Window.bind(on_key_down=self._on_keyboard_down)
Window.bind(on_key_up=self._on_keyboard_up)
with open('Conversation.txt', 'w') as f:
f.write('[b]' + time_now() + " Laur:[/b] HI! My name is Laur and I am a chatbot! Let's talk! I am pulling from r/CasualConversations." + '\n')
f.write('[b]' + time_now() + " Laur:[/b] To talk please type a message and click the 'Send' button. To stop talking and close the application type 'bye'" + '\n')
f.close()
with open('Conversation.txt', 'r') as f:
contents = f.read()
self.text = contents
self.data = data[["comment", "response"]]
self.data_cleaner = DataCleaner()
self.cleaned_data = DataFrame(columns=["Question", "Answer"])
# use data if provided
if use_cleaned_data:
self.cleaned_data = read_pickle("data/master_data_cleaned.pkl")
if len(self.cleaned_data) != len(self.data):
# if the data does not match, retrain
self.cleaned_data = self.data_cleaner.clean_data(self.data)
# to improve speed, save to master cleaned
self.cleaned_data.to_pickle("data/master_data_cleaned.pkl", protocol=4)
self.finalText = DataFrame(columns=["Lemmas"])
self.c = CountVectorizer()
self.bag = None
#Make it so on keyboard enter it runs and clears text of text box
def _on_keyboard_down(self, instance, keyboard, keycode, text, modifiers):
if keycode == 40: # enter
app.runStuff(self.root.ids.txt_input.text)
def _on_keyboard_up(self,instance, keyboard, keycode):
if keycode == 40: # enter
app.read()
self.root.ids.txt_input.text=""
#Handles user input and prints to screen
def runStuff(self, input):
try:
#split sentences up into parts
#userInput = re.split('[\.!?]', input.lower().rstrip('.!?'))
full_reply = ' '
#print("got here")
if input == "bye":
app.get_running_app().stop()
#makes call to tree to get response
#print("stuck getting response")
response = app.askQuestion(input.lower())
full_reply += response + ' '
#print("got a response")
with open('Conversation.txt', 'a') as f:
f.write('[b]' + time_now() + ' User:[/b] ' + input + '\n')
f.write('[b]' + time_now() + ' Laur:[/b]' + str(full_reply) + '\n')
f.close()
except:
pass
#Reads text from Conversation.txt to screen
def read(self):
with open('Conversation.txt', 'r') as f:
contents = f.read()
self.text = contents
def build(self):
return RootWidget()
def clean_line(self, line):
'''
Clean the line
This line makes all lowercase, and removes anything that isn't a number
'''
return sub(r'[^a-z ]', '', str(line).lower())
def tokenize_and_tag_line(self, line):
''' Tokenizes the words then tags the tokenized words '''
return pos_tag(word_tokenize(line), None)
def create_lemma_line(self, input_line):
''' We create the lemmatizer object '''
lemma = wordnet.WordNetLemmatizer()
# This is an array for the current line that we will append values to
line = []
for token, ttype in input_line:
checks = ["a", "v", "r", "n"]
if(ttype[0].lower() not in checks):
ttype = "n"
line.append(lemma.lemmatize(token, ttype[0].lower()))
return {"Lemmas": " ".join(line)}
def create_lemma(self):
''' Creates lemmas for the cleaned data (lemma is the lower )'''
lemmas = []
for j in self.cleaned_data.iterrows():
lemmas.append(self.create_lemma_line(j[1][0]))
self.finalText = self.finalText.append(lemmas)
def create_bag_of_words(self):
'''
create a bag of words and save in a dataframe with the same indicies as
the master data
'''
self.bag = DataFrame(self.c.fit_transform(self.finalText["Lemmas"]).toarray(),
columns=self.c.get_feature_names(), index=self.data.index)
def askQuestion(self, context):
'''
@param question: a string context given by the user
output a string response to context
---
Compute most similar context to the input using semisupervised learning
and return approproate response to the determined most similar context
'''
# correct the given input
context = self.autocorrect(context)
# Removes all "stop words"
valid_words = []
for i in context.split():
if i not in stopwords.words("english"):
valid_words.append(i)
# Clean the data and get tokenized and tagged data
valid_sentence = self.tokenize_and_tag_line(self.clean_line(" ".join(valid_words)))
lemma_line = self.create_lemma_line(valid_sentence)
try:
index = self.determine_most_similar_context(lemma_line)
if index != -1:
# respond with response to most similar context
answer = self.data.loc[index, "response"]
return answer
# Else we are going to respond with one of the nouns with the following context
nlp = en_core_web_sm.load()
nouns = nlp(context)
# Get a random noun from the generated list of nouns, and select the first element
# which is the noun (second is what kind of noun)
noun = nouns[randint(0, len(nouns)-1)]
return "Sorry :,( I don't know what " + str(noun) + " is!"
except KeyError:
# an unknown word was passed
return "I am miss pwesident uwu"
def autocorrect(self, input):
# Creates the NLP named entity recognition
nlp = en_core_web_sm.load()
# Finds all of the nouns in the input string
nouns = nlp(input)
finalText = ""
# For all of the values in the input
for i in input.split(" "):
# If the values are not nouns (autocorrect breaks on nouns)
if i not in str(nouns):
# Run autocorrect on the nouns and add it to the final string
finalText += str(TextBlob(i).correct()) + " "
# Else just add the noun
else:
finalText += i + " "
return finalText
def determine_most_similar_context(self, lemma_line, similarity_threshold=0.05):
'''
@param lemma_line: a dictionary of words from the input
----
returne index of datapoint with most similar context to one given
'''
# create dataframe of one row initialized to zeros
# this will represent the lemma
valid_sentence = DataFrame(0, columns=self.bag.columns, index=[0])
# set column of 1's for words in lemma line
for i in lemma_line["Lemmas"].split(' '):
if i in valid_sentence.columns:
# if the column exists, laur.ai recognizes the word
# if laur.ai recognizes the word, it will on it
# otherwise, do not
valid_sentence.loc[:, i] = 1
else:
try:
for syn in wordnet.synsets(i):
if syn in valid_sentence.columns:
# if the column exists, laur.ai recognizes the word
# if laur.ai recognizes the word, it will on it
# otherwise, do not
valid_sentence.loc[:, i] = 0.1
break
except AttributeError:
# Module has no attribute synsets
# (you have entered something that doesn't exist)
break
# find cosine similarity
cosine = 1 - pairwise_distances(self.bag, valid_sentence, metric="cosine")
# prepare data to be used in series with data's index
cosine = Series(cosine.reshape(1,-1)[0], index=self.data.index)
# determine index of element with highest similarity
# the answer is the response at this index
# if it does not find any datapoints similar then it recognizes nothing
# in the input and the index returned is -1
# We can solve the 0 problem by simply saying that if the cosine.max() is
# less than 0.01 similarity we are going to respond with a predefined message
if cosine.max() < similarity_threshold:
return -1
# return cosine.idxmax()
# if multiple indicies share the maximum value, pick a random
# create list of indicies of all maximum values
max_index = cosine[cosine.values == cosine.max()].index
# return a random index from the list
i = randint(0,len(max_index)-1)
return max_index[i]
print("Please wait as Laur.AI loads")
data_master = | read_csv("data/master_data.csv") | pandas.read_csv |
# EcoFOCI
"""Contains a collection of seabird equipment parsing.
These include:
Moored SBE (cnv files):
* 16,19,26,37,39,56
"""
import datetime
import sys
import pandas as pd
def sbetime_conversion(time_type='timeJ',data=None):
"""Seabird offers multiple time output options:
timeJ:
timeS:
timeJV2:
"""
pass
def seabird_header(filename=None):
r""" Seabird Instruments have a header usually defined by *END with a significant amount of
information imbedded. Send a flag to parse seabird headers. Better yet may be to combine seabird gear
into classes and subclasses.
"""
assert filename.split('.')[-1] == 'cnv' , 'Must provide a tid file - use sbe software to convert'
header = []
var_names = {}
with open(filename) as fobj:
for k, line in enumerate(fobj.readlines()):
header = header + [line]
if "# name" in line:
var_names[int(line.split("=")[0].split()[-1])] = line.split("=")[1].split()[0].split(':')[0]
if "# start_time" in line:
start_time = line.split("[")[0].split("=")[-1].strip()
if "*END*" in line:
headercount=k+1
break
return (header, headercount, var_names, start_time)
class sbe16(object):
r""" Seabird 16
Basic Method to open files. Specific actions can be passes as kwargs for instruments
There are quite a few instrument varations possible here - use the header meta to indentify variables
"""
@staticmethod
def parse(filename=None, return_header=True, datetime_index=True):
r"""
Basic Method to open and read sbe16 .cnv files
"""
assert filename.split('.')[-1] == 'cnv' , 'Must provide a cnv file - use sbe software to convert'
header = []
var_names = {}
with open(filename) as fobj:
for k, line in enumerate(fobj.readlines()):
header = header + [line]
if "# name" in line:
var_names[int(line.split("=")[0].split()[-1])] = line.split("=")[1].split()[0].split(':')[0]
if "# start_time" in line:
start_time = line.split("[")[0].split("=")[-1].strip()
if "*END*" in line:
headercount=k+1
break
rawdata_df = pd.read_csv(filename,
delimiter="\s+",
parse_dates=True,
header=None,
names=var_names.values(),
skiprows=headercount)
#TODO: force a time word when the user knows there are multiple columms via an argument
if 'timeJ' in var_names.values(): #time in elapsed days, needs start date
rawdata_df['date_time'] = [datetime.datetime.strptime(start_time, "%b %d %Y %H:%M:%S") + pd.Timedelta(days=x) for x in rawdata_df['timeJ']]
elif 'timeJV2' in var_names.values(): #time in julian date, needs start year
rawdata_df['date_time'] = [datetime.datetime(datetime.datetime.strptime(start_time, "%b %d %Y %H:%M:%S").year,1,1) + pd.Timedelta(days=x-1) for x in rawdata_df['timeJV2']]
elif 'timeS' in var_names.values(): #time in elapse seconds, needs start date
rawdata_df['date_time'] = [datetime.datetime.strptime(start_time, "%b %d %Y %H:%M:%S") + | pd.Timedelta(seconds=x) | pandas.Timedelta |
import unittest
import pandas as pd
from pyspark.sql import SparkSession
from pyspark.sql.functions import udf
from pyspark.sql.types import StringType, StructField, StructType, IntegerType, FloatType
from haychecker.dhc.metrics import entropy
replace_empty_with_null = udf(lambda x: None if x == "" else x, StringType())
replace_0_with_null = udf(lambda x: None if x == 0 else x, IntegerType())
replace_0dot_with_null = udf(lambda x: None if x == 0. else x, FloatType())
replace_every_string_with_null = udf(lambda x: None, StringType())
replace_every_int_with_null = udf(lambda x: None, IntegerType())
replace_every_float_with_null = udf(lambda x: None, FloatType())
class TestEntropy(unittest.TestCase):
def __init__(self, *args, **kwargs):
super(TestEntropy, self).__init__(*args, **kwargs)
self.spark = SparkSession.builder.master("local[2]").appName("entropy_test").getOrCreate()
self.spark.sparkContext.setLogLevel("ERROR")
def test_empty(self):
data = | pd.DataFrame() | pandas.DataFrame |
import pandas as pd
import numpy as np
from textplainer.Textplain import Textplain
from textplainer.explain import explain
from textplainer.explain import explain_prediction
from textplainer.explain import explain_predictions
from textplainer.ModelInterface import ModelInterface
from textplainer.dictionary import get_synonyms_and_antonyms
from textplainer.dictionary import get_fallows_synonyms_and_antonyms
from .TestModels import SingleWordModel
from .TestModels import MultiWordModel
###########################################################################
def test_Textplain_constructor():
texty = Textplain("My sample sentence. There are two parts.", 1, 0)
assert texty.baseline == 1.0, "Member variable populated"
assert texty.impact == 1.0, "Impact calculated "
assert len(texty.sentences) == 2, "Right number of sentences extracted"
def test_Textplain_reconstruction():
texty = Textplain("My sample sentence. There are two parts.", 1, 0)
recon = texty.generate_modified_textblock("Final part", 1)
assert recon == "My sample sentence. Final part.", "Reconstructed block with replacement"
recon = texty.generate_modified_textblock("First bit", 0)
assert recon == "First bit. There are two parts.", "Reconstructed block with replacement"
########################################################################################
def test_result_length():
null_model = ModelInterface("NULL")
df = pd.DataFrame({"id":[1,2,3],"text":["the cat","the hat","the mat"]})
result = explain_predictions(null_model, df, "text", None)
assert len(result) == len(df), "Explain function returns results for all records"
########################################################################################
def test_single_word_model():
jellybean_model = SingleWordModel("JellyBeanModel", "TEXT", "jellybean")
df = pd.DataFrame({"ID":[1,2],"TEXT":["bob eats jellybeans","jane likes to swim"]})
result = explain_predictions(jellybean_model, df, "TEXT", None)
assert len(result) == len(df), "Explain function returns results for all records"
assert result[0][0] == 1, "First record contains discriminative word that perfectly explains output."
assert result[1][0] == 0, "Second record cannot be determined"
########################################################################################
def test_single_word_pickled_model():
model = "tests/artefacts/model.pickle"
dataset = "tests/artefacts/data.csv"
src = "tests/"
result = explain(model, src, dataset, "TEXT", None)
assert len(result) == 2, "Explain function returns results for all records"
assert result[0][0] == 1, "First record contains discriminative word that perfectly explains output."
assert result[1][0] == 0, "Second record cannot be determined"
########################################################################################
def test_multi_word_model():
mood_model = MultiWordModel("MoodsModel", "TEXT", ["happy", "sad"])
df = pd.DataFrame({
"ID":[1,2],
"TEXT":["bob is very happy today","jane is happy most mornings, but sometimes sad after school"]
})
result = explain_predictions(mood_model, df, "TEXT", None)
assert len(result) == len(df), "Explain function returns results for all records"
record_one = result[0]
record_one_text = result[0][1]
assert record_one_text.__contains__("happy{{0.5}}"), "happy contribution"
record_two = result[1]
record_two_text = result[1][1]
assert record_two_text.__contains__("{{0.5}}"), "Words with partial contribution."
assert record_two_text.__contains__("sad{{0.5}}"), "sad has half contribution"
########################################################################################
def test_multiple_sentences_model():
jellybean_model = SingleWordModel("JellyBeanModel", "TEXT", "jellybean")
df = pd.DataFrame({"ID":[1,2],"TEXT":["I eat a jellybean. Bob eats a fig.","Jane likes to swim"]})
result = explain_predictions(jellybean_model, df, "TEXT", None)
assert len(result) == len(df), "Explain function returns results for all records"
assert result[0][0] == 1, "First record contains word that perfectly explains output."
assert result[1][0] == 0, "Second record cannot be determined"
record_one = result[0]
print("record one:", record_one)
record_one_text = result[0][1]
print("record one text:", record_one_text)
assert record_one_text.__contains__("{{1.0}}"), "One sentence with full contribution"
assert record_one_text.__contains__("jellybean{{1.0}}"), "jellybean has full contribution"
########################################################################################
def test_dictionary():
syns, ants = get_synonyms_and_antonyms("test")
assert str(type(syns)) == "<class 'list'>", "Synonyms should be returned as a list"
assert str(type(ants)) == "<class 'list'>", "Antonyms should be returned as a list"
########################################################################################
def test_fallows_dictionary():
word = "test"
syns, ants = get_fallows_synonyms_and_antonyms(word)
assert str(type(syns)) == "<class 'list'>", "Synonyms should be returned as a list"
assert str(type(ants)) == "<class 'list'>", "Antonyms should be returned as a list"
assert len(syns) == 10, "Test string should have 10 synonyms"
assert len(ants) == 3, "Test string should have 3 antonyms"
########################################################################################
def test_result_attributes():
null_model = ModelInterface("NULL")
df = pd.DataFrame({"id":[1,2],"text":["the cat","the hat"]})
result = explain_predictions(null_model, df, "text", None)
assert str(type(result)) == "<class 'list'>", "Explain function returns a list"
assert str(type(result[0])) == "<class 'tuple'>", "Returned list contains tuples"
#assert str(type(result[0][0])) == "<class 'float'>", "First element is a number"
assert isinstance(result[0][0], float) == True, "First element is a number"
assert str(type(result[0][1])) == "<class 'str'>", "Second element is a string"
########################################################################################
def test_exceptions():
null_model = ModelInterface("NULL")
df = | pd.DataFrame({"id":[1,2],"text":["the cat","the hat"]}) | pandas.DataFrame |
import os
import pandas as pd
import flopy as fp
import datetime as dt
import numpy as np
import scipy.stats as ss
import scipy.optimize as so
import geopandas as gpd
from shapely.geometry import LineString, MultiLineString, Point
# import warnings
# warnings.filterwarnings("ignore", message="converting a masked element to nan")
class RTD_util(object):
'''Class to perform various functions from setting up an age-based backtracking MODPATH simulation
and analyzing the results'''
def __init__(self, sim, weight_label, group):
# import various variables from the MODFLOW model
self.ml = sim.get_model()
self.sim_name = 'mfsim.nam'
self.model_ws = self.ml.model_ws
self.oc = self.ml.get_package('OC')
self.dis = self.ml.get_package('DIS')
self.npf = self.ml.get_package('NPF')
self.tdis = sim.get_package('TDIS')
self.namefile = self.ml.namefile
self.prng = np.random.RandomState(9591029)
self.delr = self.dis.delr.array
self.delc = self.dis.delc.array
self.nlay = self.dis.nlay.array
self.nrow = self.dis.nrow.array
self.ncol = self.dis.ncol.array
self.l, self.r, self.c = np.indices((self.nlay, self.nrow, self.ncol))
self.bot = self.dis.botm.array
self.top = self.dis.top.array
self.seqnums = np.arange(self.nlay * self.nrow * self.ncol)
# self.hnoflo = self.bas.hnoflo
# self.hdry = self.upw.hdry
self.ibound = np.asarray(self.dis.idomain.array)
self.hk = np.asarray(self.npf.k.array)
self.vka = np.asarray(self.npf.k33.array)
self.weight_label = weight_label
self.group = group
self.mpname = '{}_{}_{}'.format(self.ml.name, self.weight_label, self.group)
self._len_mult()
# Create dictionary of multipliers for converting model time units to years
time_dict = dict()
time_dict['unknown'] = 1.0 # undefined assumes days
time_dict['seconds'] = 24 * 60 * 60 * 365.25
time_dict['minutes'] = 24 * 60 * 365.25
time_dict['hours'] = 24 * 365.25
time_dict['days'] = 365.25
time_dict['years'] = 1.0
self.time_dict = time_dict
def get_node(self, lrc_list):
"""
Get node number from a list of MODFLOW layer, row, column tuples.
Returns
-------
v : list of MODFLOW nodes for each layer (k), row (i),
and column (j) tuple in the input list
"""
if not isinstance(lrc_list, list):
lrc_list = [lrc_list]
nrc = self.nrow * self.ncol
v = []
for [k, i, j] in lrc_list:
node = int(((k) * nrc) + ((i) * self.ncol) + j)
v.append(node)
return v
def _get_output_dfs(self):
# Make dataframes of budget information
src = os.path.join(self.model_ws, '{}.cbb'.format(self.ml.name))
self.bud_obj = fp.utils.CellBudgetFile(src, precision='double')
all_bud_df = pd.DataFrame(self.bud_obj.recordarray)
# convert to zero base
all_bud_df['kper'] -= 1
all_bud_df['kstp'] -= 1
self.all_bud_df = all_bud_df
headfile = '{}.hds'.format(self.ml.name)
src = os.path.join(self.model_ws, headfile)
self.hds = fp.utils.binaryfile.HeadFile(src, precision='double')
def _get_kstpkper(self, mf_start_date_str = '01/01/1900', mp_release_date_str = '01/01/2018' ):
# Use calendar release date and MODFLOW start date to pick out head and budget
# items from transient model output
self._get_output_dfs()
# convert string representation of dates into Python datetime objects
self.mf_start_date = dt.datetime.strptime(mf_start_date_str , '%m/%d/%Y')
self.mp_release_date = dt.datetime.strptime(mp_release_date_str , '%m/%d/%Y')
# check to make sure they are valid
assert self.mf_start_date < self.mp_release_date, 'The particle release date has\
to be after the start of the MODFLOW simulation'
# group by period and step
kdf = self.all_bud_df.groupby(['kper', 'kstp']).median()
kdf = kdf[['pertim', 'totim']]
# make a datetime series for timesteps starting with 0
# totim is elapsed time in simulation time
units = self.tdis.time_units.array.lower()
if units == 'days':
units = 'D'
end_date = self.mf_start_date + pd.to_timedelta(np.append(0, kdf.totim), unit=units)
end_date = end_date.map(lambda t: t.strftime('%Y-%m-%d %H:%M'))
kdf.loc[:, 'start_date'] = end_date[0:-1]
kdf.loc[:, 'end_date'] = end_date[1:]
# make a datetime series for timesteps starting with 0
# totim is elapsed time in simulation time
# reformat the dates to get rid of seconds
end_date = self.mf_start_date + pd.to_timedelta(np.append(0, kdf.totim), unit=units)
kdf.loc[:, 'start_date'] = end_date[0:-1].map(lambda t: t.strftime('%Y-%m-%d %H:%M'))
kdf.loc[:, 'end_date'] = end_date[1:].map(lambda t: t.strftime('%Y-%m-%d %H:%M'))
# reference time and date are set to the end of the last stress period
self.ref_time = kdf.totim.max()
self.ref_date = end_date.max()
# release time is calculated in tracking time (for particle release) and
# in simulation time (for identifying head and budget components)
self.release_time_trk = np.abs((self.ref_date - self.mp_release_date).days)
self.release_time_sim = (self.mp_release_date - self.mf_start_date).days
# find the latest group index that includes the release date
idx = (kdf.totim >= self.release_time_sim).idxmax()
kdf.loc[idx, 'particle_release'] = True
# switch period and step
self.kstpkper = (idx[1], idx[0])
assert self.ref_date > self.mp_release_date, 'The reference date has \
to be after the particle release'
def get_heads(self):
# Get the highest non-dry head in the 2D representation of the MODFLOW model
# in each vertical stack of cells
self._get_kstpkper()
heads = self.hds.get_data(kstpkper=self.kstpkper)
hd = heads.copy()
hd[self.dis.idomain.array != 1] = np.nan
# hd[np.isclose(self.upw.hdry, hd, atol=10)] = np.nan
self.hd = hd
def get_watertable(self):
# Get the highest non-dry head in the 2D representation of the MODFLOW model
# in each vertical stack of cells
self.get_heads()
hin = np.argmax(np.isfinite(self.hd), axis=0)
self.water_table = np.squeeze(self.hd[hin, self.r[0,:,:], self.c[0,:,:]])
def make_particle_array(self, parts_per_cell):
# Given the number of desired particles per cell, return an array in the
# format of MODPATH starting location information
if not hasattr(self, 'release_time_trk'):
self._get_kstpkper()
self.parts_per_cell = parts_per_cell
lg = self.l.ravel()
rg = self.r.ravel()
cg = self.c.ravel()
label = parts_per_cell
lrep = np.repeat( lg, parts_per_cell.ravel() )
rrep = np.repeat( rg, parts_per_cell.ravel() )
crep = np.repeat( cg, parts_per_cell.ravel() )
label = np.repeat( label, parts_per_cell.ravel() )
self.num_parts = lrep.shape[0]
# generate random relative coordinates within a cell in 3D
cell_coords = self.prng.rand( self.num_parts, 3 )
grp = 1
particles = np.zeros( ( self.num_parts, 11 ) )
particles[:, 0] = np.arange( 1, self.num_parts + 1 )
particles[:, 1] = grp
particles[:, 2] = 1
particles[:, 3] = lrep + 1
particles[:, 4] = rrep + 1
particles[:, 5] = crep + 1
particles[:, 6:9] = cell_coords
particles[:, 9] = self.release_time_trk
particles[:, 10] = label
return particles
def make_arbitrary_particle_array(self, seqnum, label, parts_per_cell=1000, top_face=False):
# Given the number of desired particles per cell, return an array in the
# format of MODPATH starting location information
if not hasattr(self, 'release_time_trk'):
self._get_kstpkper()
self.parts_per_cell = parts_per_cell
lg = self.l.ravel()[seqnum]
rg = self.r.ravel()[seqnum]
cg = self.c.ravel()[seqnum]
lrep = np.repeat( lg, parts_per_cell)
rrep = np.repeat( rg, parts_per_cell)
crep = np.repeat( cg, parts_per_cell)
label = np.repeat( label, parts_per_cell)
self.num_parts = lrep.shape[0]
# generate random relative coordinates within a cell in 3D
cell_coords = self.prng.rand( self.num_parts, 3 )
if top_face:
cell_coords[:, 2] = 6
grp = 1
particles = np.zeros( ( self.num_parts, 11 ) )
particles[:, 0] = np.arange( 1, self.num_parts + 1 )
particles[:, 1] = grp
particles[:, 2] = 1
particles[:, 3] = lrep + 1
particles[:, 4] = rrep + 1
particles[:, 5] = crep + 1
particles[:, 6:9] = cell_coords
particles[:, 9] = self.release_time_trk
particles[:, 10] = label
return particles
def write_starting_locations_file(self, particles):
# Given a particle starting array, write a MODPATH starting location file with
# header information
line = '{:5d}\n{:5d}\n'.format(1, 1)
line = line + 'group_{}\n'.format(1)
npart = particles.shape[0]
line = line + '{:6d}'.format(npart)
self.ep_file_name = os.path.join(self.model_ws, '{}_{}_{}'.format(self.ml.name, self.weight_label, self.group))
form = '%6d %6d %3d %3d %3d %3d %12.9f %12.9f %12.9f %12.9e %15d'
np.savetxt(self.ep_file_name+'.loc', particles, delimiter=' ', fmt=form, header=line, comments='')
def run_MODPATH(self, por, mp_exe_name):
# Run backtracking MODPATH simulation using a starting locations file
# prepare Modpath files
SimulationType = 1 # 1 endpoint; 2 pathline; 3 timeseries
TrackingDirection = 2 # 1 forward; 2 backward
WeakSinkOption = 1 # 1 pass; 2 stop
WeakSourceOption = 1 # 1 pass; 2 stop
ReferemceTimeOption = 1 # 1 time value; 2 stress period, time step, relative offset
StopOption = 2 # 1 stop with simulation 2; extend if steady state 3; specify time
ParticleGenerationOption = 2 # 1 automatic; 2 external file
TimePointOption = 1 # 1 none; 2 number at fixed intervals; 3 array
BudgetOutputOption = 3 # 1 none; 2 summary; 3 list of cells; 4 trace mode
ZoneArrayOption = 1 # 1 none; 2 read zone array(s)
RetardationOption = 1 # 1 none; 2 read array(s)
AdvectiveObservationsOption = 1 # 1 none; 2 saved for all time pts 3; saved for final time pt
options = [SimulationType, TrackingDirection, WeakSinkOption, WeakSourceOption, ReferemceTimeOption,
StopOption, ParticleGenerationOption, TimePointOption, BudgetOutputOption, ZoneArrayOption,
RetardationOption, AdvectiveObservationsOption]
mpnf = '{}_{}_{}.mpnam'.format(self.ml.name, self.weight_label, self.group)
mplf = '{}_{}_{}.mplst'.format(self.ml.name, self.weight_label, self.group)
mp = fp.modpath.Modpath(modelname=self.mpname, modflowmodel=self.ml, dis_file=self.dis.file_name[0], exe_name=mp_exe_name,
model_ws=self.model_ws, simfile_ext='mpsim', dis_unit=self.dis.unit_number[0])
mpsim = fp.modpath.ModpathSim(mp, mp_name_file=mpnf,
mp_list_file=mplf,
option_flags=options,
ref_time=self.ref_time,
cell_bd_ct=0,
# bud_loc=bud_chk_dict[group].loc[:, ('Grid', 'Layer', 'Row', 'Column')].values.tolist(),
extension='mpsim')
mpbas = fp.modpath.ModpathBas(mp, hnoflo=self.bas.hnoflo, hdry=self.upw.hdry,
def_face_ct=1, bud_label=['RECHARGE'], def_iface=[6],
laytyp=self.upw.laytyp.get_value(), ibound=self.bas.ibound.array,
prsity=por, prsityCB=0.20)
mp.write_input()
success, msg = mp.run_model(silent=True, report=False)
# delete starting locations to save space--this information is now in the endpoint file
if success:
dst_pth = os.path.join(self.model_ws, '{}_{}_{}.loc'.format(self.ml.name, self.weight_label, self.group))
os.remove(dst_pth)
def modify_endpoint_file(self, ep_data_, write=False):
if not hasattr(self, 'water_table'):
self.get_watertable()
ep_data_ = ep_data_.copy()
# Clean up and enhance an MODPATH endpoint file
# set the Z coordinate for particles that end in dry cells to the
# head of the nearest non-dry cell below the dry cell.
# ind = np.isclose(ep_data_.loc[:, 'Final Global Z'], self.upw.hdry, atol=100)
# ep_data_.loc[:, 'Final Global Z'] = np.where(ind, self.water_table[ep_data_.loc[:, 'Final Row'] - 1,
# ep_data_.loc[:, 'Final Column']-1], ep_data_.loc[:, 'Final Global Z'])
# eliminate particles that start in dry cells
# ind = np.isclose(ep_data_.loc[:, 'Initial Global Z'], self.npf.hdry, rtol=0.99999)
# self.ep_data = ep_data_.loc[~ind, :]
# calculate approximate linear path distances
x_dist = ep_data_.loc[:, 'Final global x'] - ep_data_.loc[:, 'Initial global x']
y_dist = ep_data_.loc[:, 'Final global y'] - ep_data_.loc[:, 'Initial global y']
z_dist = ep_data_.loc[:, 'Final global z'] - ep_data_.loc[:, 'Initial global z']
ep_data_.loc[:, 'xy_path_len'] = np.sqrt(x_dist**2 + y_dist**2)
ep_data_.loc[:, 'xyz_path_len'] = np.sqrt(x_dist**2 + y_dist**2 + z_dist**2)
mendpoint_file = '{}_mod.{}'.format(self.mpname, 'mpend')
mendpoint_file = os.path.join(self.model_ws, mendpoint_file)
if write:
ep_data_.to_csv(mendpoint_file)
endpoint_file = '{}.{}'.format(self.mpname, 'mpend')
endpoint_file = os.path.join(self.model_ws, endpoint_file)
if os.path.exists(endpoint_file):
os.remove(endpoint_file)
self.ep_data = ep_data_
def get_budget(self, text):
# Get the MODFLOW budget file for the time period specified by the MODPATH release date
# and the MODFLOW start date.
self._get_kstpkper()
# budget = self.bud_obj.get_data(kstpkper=self.kstpkper, text=text, full3D=True)[0]
budget = self.bud_obj.get_data(kstpkper=self.kstpkper, text=text)
self.budget = budget
def _len_mult(self):
# the database values are in feet; if the model is in meters,
# provide a multiplier to convert database values to match the model
lenuni_dict = {0: 'undefined units', 1: 'feet', 2: 'meters', 3: 'centimeters'}
if self.dis.length_units.array == 'meters':
self.len_mult = 0.3048006096012192
elif self.dis.length_units.array == 'feet':
self.len_mult = 1.0
else:
print('unknown length units')
self.len_mult = 1.0
def read_endpoints(self, endpoint_file):
# read MODPATH 6 endpoint file
# count the number of header lines
i = 0
with open(endpoint_file) as f:
while True:
line = f.readline()
i += 1
if 'END HEADER' in line:
break
elif not line:
break
# columns names from MP6 docs
cols = ['Sequence number', 'Particle Group', 'Particle ID', 'Status', 'Initial tracking time', 'Final tracking time', 'Initial cell number', 'Initial layer', 'Initial local x', 'Initial local y', 'Initial local z', 'Initial global x', 'Initial global y', 'Initial global z', 'Initial zone', 'Initial face', 'Final cell number','Final layer', 'Final local x', 'Final local y', 'Final local z', 'Final global x', 'Final global y', 'Final global z', 'Final zone', 'Final face']
# read the endpoint data
ep_data = | pd.read_csv(endpoint_file, names=cols, header=None, skiprows=i, delim_whitespace=True) | pandas.read_csv |
import csv
import os
import pandas as pd
import re
import sys
import time
from .atom import Atom
from .periodic_table import PeriodicTable as PT
__all__ = [
"cd",
"check_user_input",
"consecutive",
"df_from_namedtuples",
"eof",
"get_files",
"get_log_type",
"list_of_dicts_to_one_level_dict",
"module_exists",
"read_file",
"read_xyz",
"remove_nones_from_dict",
"responsive_table",
"search_dict_recursively",
"sort_data",
"sort_elements",
"timeit",
"write_csv_from_dict",
"write_csv_from_nested",
"write_geom_input_for_thermo",
"write_xyz",
]
def cd(path):
if not os.path.exists(path):
os.mkdir(path)
os.chdir(path)
def df_from_namedtuples(definition, lst):
"""
Pass in the namedtuple created and a list of
objects made using the namedtuple, and this
function returns a dataframe.
Usage:
>>> nt = namedtuple('nt', 'one two')
>>> one = nt(10, 20)
>>> two = nt(30,40)
>>> df_from_namedtuples(nt, [one,two])
Returns:
one two
0 10 20
1 30 40
"""
data = {v: [] for v in definition._fields}
for val in lst:
for k, v in val._asdict().items():
data[k].append(v)
return | pd.DataFrame(data) | pandas.DataFrame |
# Copyright (c) 2021 <NAME>. All rights reserved.
# This code is licensed under Apache 2.0 with Commons Clause license (see LICENSE.md for details)
"""Custom pandas accessors for signals data.
Methods can be accessed as follows:
* `SignalsSRAccessor` -> `pd.Series.vbt.signals.*`
* `SignalsDFAccessor` -> `pd.DataFrame.vbt.signals.*`
```python-repl
>>> import pandas as pd
>>> import vectorbt as vbt
>>> # vectorbt.signals.accessors.SignalsAccessor.pos_rank
>>> pd.Series([False, True, True, True, False]).vbt.signals.pos_rank()
0 0
1 1
2 2
3 3
4 0
dtype: int64
```
The accessors extend `vectorbt.generic.accessors`.
!!! note
The underlying Series/DataFrame should already be a signal series.
Input arrays should be `np.bool_`.
Grouping is only supported by the methods that accept the `group_by` argument.
Accessors do not utilize caching.
Run for the examples below:
```python-repl
>>> import vectorbt as vbt
>>> import numpy as np
>>> import pandas as pd
>>> from numba import njit
>>> from datetime import datetime
>>> mask = pd.DataFrame({
... 'a': [True, False, False, False, False],
... 'b': [True, False, True, False, True],
... 'c': [True, True, True, False, False]
... }, index=pd.Index([
... datetime(2020, 1, 1),
... datetime(2020, 1, 2),
... datetime(2020, 1, 3),
... datetime(2020, 1, 4),
... datetime(2020, 1, 5)
... ]))
>>> mask
a b c
2020-01-01 True True True
2020-01-02 False False True
2020-01-03 False True True
2020-01-04 False False False
2020-01-05 False True False
```
## Stats
!!! hint
See `vectorbt.generic.stats_builder.StatsBuilderMixin.stats` and `SignalsAccessor.metrics`.
```python-repl
>>> mask.vbt.signals.stats(column='a')
Start 2020-01-01 00:00:00
End 2020-01-05 00:00:00
Period 5 days 00:00:00
Total 1
Rate [%] 20
First Index 2020-01-01 00:00:00
Last Index 2020-01-01 00:00:00
Norm Avg Index [-1, 1] -1
Distance: Min NaT
Distance: Max NaT
Distance: Mean NaT
Distance: Std NaT
Total Partitions 1
Partition Rate [%] 100
Partition Length: Min 1 days 00:00:00
Partition Length: Max 1 days 00:00:00
Partition Length: Mean 1 days 00:00:00
Partition Length: Std NaT
Partition Distance: Min NaT
Partition Distance: Max NaT
Partition Distance: Mean NaT
Partition Distance: Std NaT
Name: a, dtype: object
```
We can pass another signal array to compare this array with:
```python-repl
>>> mask.vbt.signals.stats(column='a', settings=dict(other=mask['b']))
Start 2020-01-01 00:00:00
End 2020-01-05 00:00:00
Period 5 days 00:00:00
Total 1
Rate [%] 20
Total Overlapping 1
Overlapping Rate [%] 33.3333
First Index 2020-01-01 00:00:00
Last Index 2020-01-01 00:00:00
Norm Avg Index [-1, 1] -1
Distance -> Other: Min 0 days 00:00:00
Distance -> Other: Max 0 days 00:00:00
Distance -> Other: Mean 0 days 00:00:00
Distance -> Other: Std NaT
Total Partitions 1
Partition Rate [%] 100
Partition Length: Min 1 days 00:00:00
Partition Length: Max 1 days 00:00:00
Partition Length: Mean 1 days 00:00:00
Partition Length: Std NaT
Partition Distance: Min NaT
Partition Distance: Max NaT
Partition Distance: Mean NaT
Partition Distance: Std NaT
Name: a, dtype: object
```
We can also return duration as a floating number rather than a timedelta:
```python-repl
>>> mask.vbt.signals.stats(column='a', settings=dict(to_timedelta=False))
Start 2020-01-01 00:00:00
End 2020-01-05 00:00:00
Period 5
Total 1
Rate [%] 20
First Index 2020-01-01 00:00:00
Last Index 2020-01-01 00:00:00
Norm Avg Index [-1, 1] -1
Distance: Min NaN
Distance: Max NaN
Distance: Mean NaN
Distance: Std NaN
Total Partitions 1
Partition Rate [%] 100
Partition Length: Min 1
Partition Length: Max 1
Partition Length: Mean 1
Partition Length: Std NaN
Partition Distance: Min NaN
Partition Distance: Max NaN
Partition Distance: Mean NaN
Partition Distance: Std NaN
Name: a, dtype: object
```
`SignalsAccessor.stats` also supports (re-)grouping:
```python-repl
>>> mask.vbt.signals.stats(column=0, group_by=[0, 0, 1])
Start 2020-01-01 00:00:00
End 2020-01-05 00:00:00
Period 5 days 00:00:00
Total 4
Rate [%] 40
First Index 2020-01-01 00:00:00
Last Index 2020-01-05 00:00:00
Norm Avg Index [-1, 1] -0.25
Distance: Min 2 days 00:00:00
Distance: Max 2 days 00:00:00
Distance: Mean 2 days 00:00:00
Distance: Std 0 days 00:00:00
Total Partitions 4
Partition Rate [%] 100
Partition Length: Min 1 days 00:00:00
Partition Length: Max 1 days 00:00:00
Partition Length: Mean 1 days 00:00:00
Partition Length: Std 0 days 00:00:00
Partition Distance: Min 2 days 00:00:00
Partition Distance: Max 2 days 00:00:00
Partition Distance: Mean 2 days 00:00:00
Partition Distance: Std 0 days 00:00:00
Name: 0, dtype: object
```
## Plots
!!! hint
See `vectorbt.generic.plots_builder.PlotsBuilderMixin.plots` and `SignalsAccessor.subplots`.
This class inherits subplots from `vectorbt.generic.accessors.GenericAccessor`.
"""
import warnings
import numpy as np
import pandas as pd
from vectorbt import _typing as tp
from vectorbt.base import reshape_fns
from vectorbt.base.array_wrapper import ArrayWrapper
from vectorbt.generic import nb as generic_nb
from vectorbt.generic import plotting
from vectorbt.generic.accessors import GenericAccessor, GenericSRAccessor, GenericDFAccessor
from vectorbt.generic.ranges import Ranges
from vectorbt.records.mapped_array import MappedArray
from vectorbt.root_accessors import register_dataframe_vbt_accessor, register_series_vbt_accessor
from vectorbt.signals import nb
from vectorbt.utils import checks
from vectorbt.utils.colors import adjust_lightness
from vectorbt.utils.config import merge_dicts, Config
from vectorbt.utils.decorators import class_or_instancemethod
from vectorbt.utils.template import RepEval
__pdoc__ = {}
class SignalsAccessor(GenericAccessor):
"""Accessor on top of signal series. For both, Series and DataFrames.
Accessible through `pd.Series.vbt.signals` and `pd.DataFrame.vbt.signals`."""
def __init__(self, obj: tp.SeriesFrame, **kwargs) -> None:
checks.assert_dtype(obj, np.bool_)
GenericAccessor.__init__(self, obj, **kwargs)
@property
def sr_accessor_cls(self) -> tp.Type["SignalsSRAccessor"]:
"""Accessor class for `pd.Series`."""
return SignalsSRAccessor
@property
def df_accessor_cls(self) -> tp.Type["SignalsDFAccessor"]:
"""Accessor class for `pd.DataFrame`."""
return SignalsDFAccessor
# ############# Overriding ############# #
def bshift(self, *args, fill_value: bool = False, **kwargs) -> tp.SeriesFrame:
"""`vectorbt.generic.accessors.GenericAccessor.bshift` with `fill_value=False`."""
return GenericAccessor.bshift(self, *args, fill_value=fill_value, **kwargs)
def fshift(self, *args, fill_value: bool = False, **kwargs) -> tp.SeriesFrame:
"""`vectorbt.generic.accessors.GenericAccessor.fshift` with `fill_value=False`."""
return GenericAccessor.fshift(self, *args, fill_value=fill_value, **kwargs)
@classmethod
def empty(cls, *args, fill_value: bool = False, **kwargs) -> tp.SeriesFrame:
"""`vectorbt.base.accessors.BaseAccessor.empty` with `fill_value=False`."""
return GenericAccessor.empty(*args, fill_value=fill_value, dtype=np.bool_, **kwargs)
@classmethod
def empty_like(cls, *args, fill_value: bool = False, **kwargs) -> tp.SeriesFrame:
"""`vectorbt.base.accessors.BaseAccessor.empty_like` with `fill_value=False`."""
return GenericAccessor.empty_like(*args, fill_value=fill_value, dtype=np.bool_, **kwargs)
# ############# Generation ############# #
@classmethod
def generate(cls,
shape: tp.RelaxedShape,
choice_func_nb: tp.ChoiceFunc, *args,
pick_first: bool = False,
**kwargs) -> tp.SeriesFrame:
"""See `vectorbt.signals.nb.generate_nb`.
`**kwargs` will be passed to pandas constructor.
## Example
Generate random signals manually:
```python-repl
>>> @njit
... def choice_func_nb(from_i, to_i, col):
... return col + from_i
>>> pd.DataFrame.vbt.signals.generate((5, 3),
... choice_func_nb, index=mask.index, columns=mask.columns)
a b c
2020-01-01 True False False
2020-01-02 False True False
2020-01-03 False False True
2020-01-04 False False False
2020-01-05 False False False
```
"""
checks.assert_numba_func(choice_func_nb)
if not isinstance(shape, tuple):
shape = (shape, 1)
elif isinstance(shape, tuple) and len(shape) == 1:
shape = (shape[0], 1)
result = nb.generate_nb(shape, pick_first, choice_func_nb, *args)
if cls.is_series():
if shape[1] > 1:
raise ValueError("Use DataFrame accessor")
return pd.Series(result[:, 0], **kwargs)
return pd.DataFrame(result, **kwargs)
@classmethod
def generate_both(cls,
shape: tp.RelaxedShape,
entry_choice_func_nb: tp.Optional[tp.ChoiceFunc] = None,
entry_args: tp.ArgsLike = None,
exit_choice_func_nb: tp.Optional[tp.ChoiceFunc] = None,
exit_args: tp.ArgsLike = None,
entry_wait: int = 1,
exit_wait: int = 1,
entry_pick_first: bool = True,
exit_pick_first: bool = True,
**kwargs) -> tp.Tuple[tp.SeriesFrame, tp.SeriesFrame]:
"""See `vectorbt.signals.nb.generate_enex_nb`.
`**kwargs` will be passed to pandas constructor.
## Example
Generate entry and exit signals one after another. Each column increment
the number of ticks to wait before placing the exit signal.
```python-repl
>>> @njit
... def entry_choice_func_nb(from_i, to_i, col, temp_idx_arr):
... temp_idx_arr[0] = from_i
... return temp_idx_arr[:1] # array with one signal
>>> @njit
... def exit_choice_func_nb(from_i, to_i, col, temp_idx_arr):
... wait = col
... temp_idx_arr[0] = from_i + wait
... if temp_idx_arr[0] < to_i:
... return temp_idx_arr[:1] # array with one signal
... return temp_idx_arr[:0] # empty array
>>> temp_idx_arr = np.empty((1,), dtype=np.int_) # reuse memory
>>> en, ex = pd.DataFrame.vbt.signals.generate_both(
... (5, 3),
... entry_choice_func_nb, (temp_idx_arr,),
... exit_choice_func_nb, (temp_idx_arr,),
... index=mask.index, columns=mask.columns)
>>> en
a b c
2020-01-01 True True True
2020-01-02 False False False
2020-01-03 True False False
2020-01-04 False True False
2020-01-05 True False True
>>> ex
a b c
2020-01-01 False False False
2020-01-02 True False False
2020-01-03 False True False
2020-01-04 True False True
2020-01-05 False False False
```
"""
checks.assert_not_none(entry_choice_func_nb)
checks.assert_not_none(exit_choice_func_nb)
checks.assert_numba_func(entry_choice_func_nb)
checks.assert_numba_func(exit_choice_func_nb)
if entry_args is None:
entry_args = ()
if exit_args is None:
exit_args = ()
if not isinstance(shape, tuple):
shape = (shape, 1)
elif isinstance(shape, tuple) and len(shape) == 1:
shape = (shape[0], 1)
result1, result2 = nb.generate_enex_nb(
shape,
entry_wait,
exit_wait,
entry_pick_first,
exit_pick_first,
entry_choice_func_nb, entry_args,
exit_choice_func_nb, exit_args
)
if cls.is_series():
if shape[1] > 1:
raise ValueError("Use DataFrame accessor")
return pd.Series(result1[:, 0], **kwargs), pd.Series(result2[:, 0], **kwargs)
return pd.DataFrame(result1, **kwargs), pd.DataFrame(result2, **kwargs)
def generate_exits(self,
exit_choice_func_nb: tp.ChoiceFunc, *args,
wait: int = 1,
until_next: bool = True,
skip_until_exit: bool = False,
pick_first: bool = False,
wrap_kwargs: tp.KwargsLike = None) -> tp.SeriesFrame:
"""See `vectorbt.signals.nb.generate_ex_nb`.
## Example
Fill all space after signals in `mask`:
```python-repl
>>> @njit
... def exit_choice_func_nb(from_i, to_i, col, temp_range):
... return temp_range[from_i:to_i]
>>> temp_range = np.arange(mask.shape[0]) # reuse memory
>>> mask.vbt.signals.generate_exits(exit_choice_func_nb, temp_range)
a b c
2020-01-01 False False False
2020-01-02 True True False
2020-01-03 True False False
2020-01-04 True True True
2020-01-05 True False True
```
"""
checks.assert_numba_func(exit_choice_func_nb)
exits = nb.generate_ex_nb(
self.to_2d_array(),
wait,
until_next,
skip_until_exit,
pick_first,
exit_choice_func_nb,
*args
)
return self.wrapper.wrap(exits, group_by=False, **merge_dicts({}, wrap_kwargs))
# ############# Filtering ############# #
@class_or_instancemethod
def clean(cls_or_self,
*args,
entry_first: bool = True,
broadcast_kwargs: tp.KwargsLike = None,
wrap_kwargs: tp.KwargsLike = None) -> tp.MaybeTuple[tp.SeriesFrame]:
"""Clean signals.
If one array passed, see `SignalsAccessor.first`.
If two arrays passed, entries and exits, see `vectorbt.signals.nb.clean_enex_nb`."""
if not isinstance(cls_or_self, type):
args = (cls_or_self.obj, *args)
if len(args) == 1:
obj = args[0]
if not isinstance(obj, (pd.Series, pd.DataFrame)):
wrapper = ArrayWrapper.from_shape(np.asarray(obj).shape)
obj = wrapper.wrap(obj)
return obj.vbt.signals.first(wrap_kwargs=wrap_kwargs)
elif len(args) == 2:
if broadcast_kwargs is None:
broadcast_kwargs = {}
entries, exits = reshape_fns.broadcast(*args, **broadcast_kwargs)
entries_out, exits_out = nb.clean_enex_nb(
reshape_fns.to_2d_array(entries),
reshape_fns.to_2d_array(exits),
entry_first
)
return (
ArrayWrapper.from_obj(entries).wrap(entries_out, group_by=False, **merge_dicts({}, wrap_kwargs)),
ArrayWrapper.from_obj(exits).wrap(exits_out, group_by=False, **merge_dicts({}, wrap_kwargs))
)
else:
raise ValueError("Either one or two arrays must be passed")
# ############# Random ############# #
@classmethod
def generate_random(cls,
shape: tp.RelaxedShape,
n: tp.Optional[tp.ArrayLike] = None,
prob: tp.Optional[tp.ArrayLike] = None,
pick_first: bool = False,
seed: tp.Optional[int] = None,
**kwargs) -> tp.SeriesFrame:
"""Generate signals randomly.
If `n` is set, see `vectorbt.signals.nb.generate_rand_nb`.
If `prob` is set, see `vectorbt.signals.nb.generate_rand_by_prob_nb`.
`n` should be either a scalar or an array that will broadcast to the number of columns.
`prob` should be either a single number or an array that will broadcast to match `shape`.
`**kwargs` will be passed to pandas constructor.
## Example
For each column, generate a variable number of signals:
```python-repl
>>> pd.DataFrame.vbt.signals.generate_random((5, 3), n=[0, 1, 2],
... seed=42, index=mask.index, columns=mask.columns)
a b c
2020-01-01 False False True
2020-01-02 False False True
2020-01-03 False False False
2020-01-04 False True False
2020-01-05 False False False
```
For each column and time step, pick a signal with 50% probability:
```python-repl
>>> pd.DataFrame.vbt.signals.generate_random((5, 3), prob=0.5,
... seed=42, index=mask.index, columns=mask.columns)
a b c
2020-01-01 True True True
2020-01-02 False True False
2020-01-03 False False False
2020-01-04 False False True
2020-01-05 True False True
```
"""
flex_2d = True
if not isinstance(shape, tuple):
flex_2d = False
shape = (shape, 1)
elif isinstance(shape, tuple) and len(shape) == 1:
flex_2d = False
shape = (shape[0], 1)
if n is not None and prob is not None:
raise ValueError("Either n or prob should be set, not both")
if n is not None:
n = np.broadcast_to(n, shape[1])
result = nb.generate_rand_nb(shape, n, seed=seed)
elif prob is not None:
prob = np.broadcast_to(prob, shape)
result = nb.generate_rand_by_prob_nb(shape, prob, pick_first, flex_2d, seed=seed)
else:
raise ValueError("At least n or prob should be set")
if cls.is_series():
if shape[1] > 1:
raise ValueError("Use DataFrame accessor")
return pd.Series(result[:, 0], **kwargs)
return pd.DataFrame(result, **kwargs)
# ############# Exits ############# #
@classmethod
def generate_random_both(cls,
shape: tp.RelaxedShape,
n: tp.Optional[tp.ArrayLike] = None,
entry_prob: tp.Optional[tp.ArrayLike] = None,
exit_prob: tp.Optional[tp.ArrayLike] = None,
seed: tp.Optional[int] = None,
entry_wait: int = 1,
exit_wait: int = 1,
entry_pick_first: bool = True,
exit_pick_first: bool = True,
**kwargs) -> tp.Tuple[tp.SeriesFrame, tp.SeriesFrame]:
"""Generate chain of entry and exit signals randomly.
If `n` is set, see `vectorbt.signals.nb.generate_rand_enex_nb`.
If `entry_prob` and `exit_prob` are set, see `vectorbt.signals.nb.generate_rand_enex_by_prob_nb`.
For arguments, see `SignalsAccessor.generate_random`.
## Example
For each column, generate two entries and exits randomly:
```python-repl
>>> en, ex = pd.DataFrame.vbt.signals.generate_random_both(
... (5, 3), n=2, seed=42, index=mask.index, columns=mask.columns)
>>> en
a b c
2020-01-01 True True True
2020-01-02 False False False
2020-01-03 True True False
2020-01-04 False False True
2020-01-05 False False False
>>> ex
a b c
2020-01-01 False False False
2020-01-02 True True True
2020-01-03 False False False
2020-01-04 False True False
2020-01-05 True False True
```
For each column and time step, pick entry with 50% probability and exit right after:
```python-repl
>>> en, ex = pd.DataFrame.vbt.signals.generate_random_both(
... (5, 3), entry_prob=0.5, exit_prob=1.,
... seed=42, index=mask.index, columns=mask.columns)
>>> en
a b c
2020-01-01 True True True
2020-01-02 False False False
2020-01-03 False False False
2020-01-04 False False True
2020-01-05 True False False
>>> ex
a b c
2020-01-01 False False False
2020-01-02 True True False
2020-01-03 False False True
2020-01-04 False True False
2020-01-05 True False True
```
"""
flex_2d = True
if not isinstance(shape, tuple):
flex_2d = False
shape = (shape, 1)
elif isinstance(shape, tuple) and len(shape) == 1:
flex_2d = False
shape = (shape[0], 1)
if n is not None and (entry_prob is not None or exit_prob is not None):
raise ValueError("Either n or any of the entry_prob and exit_prob should be set, not both")
if n is not None:
n = np.broadcast_to(n, shape[1])
entries, exits = nb.generate_rand_enex_nb(shape, n, entry_wait, exit_wait, seed=seed)
elif entry_prob is not None and exit_prob is not None:
entry_prob = np.broadcast_to(entry_prob, shape)
exit_prob = np.broadcast_to(exit_prob, shape)
entries, exits = nb.generate_rand_enex_by_prob_nb(
shape,
entry_prob,
exit_prob,
entry_wait,
exit_wait,
entry_pick_first,
exit_pick_first,
flex_2d,
seed=seed
)
else:
raise ValueError("At least n, or entry_prob and exit_prob should be set")
if cls.is_series():
if shape[1] > 1:
raise ValueError("Use DataFrame accessor")
return pd.Series(entries[:, 0], **kwargs), pd.Series(exits[:, 0], **kwargs)
return pd.DataFrame(entries, **kwargs), | pd.DataFrame(exits, **kwargs) | pandas.DataFrame |
# -*- coding: utf-8 -*-
"""
Created on Wed Aug 2 11:37:09 2017
@author: <NAME>
"""
# =============================================================================
# 调用所需的库
# =============================================================================
import pandas as pd
import numpy as np
from sklearn.cross_validation import train_test_split
import xgboost as xgb
import operator
import datetime as dt
import matplotlib
matplotlib.use("Agg")
import matplotlib.pyplot as plt
# =============================================================================
# 读取数据集
# =============================================================================
df_train = pd.read_csv("../input/train.csv", low_memory=False)
df_test = pd.read_csv("../input/test.csv", low_memory=False)
df_store = | pd.read_csv("../input/store.csv", low_memory=False) | pandas.read_csv |
"""
Testing that functions from rpy work as expected
"""
import pandas as pd
import numpy as np
import unittest
import nose
import pandas.util.testing as tm
try:
import pandas.rpy.common as com
from rpy2.robjects import r
import rpy2.robjects as robj
except ImportError:
raise nose.SkipTest('R not installed')
class TestCommon(unittest.TestCase):
def test_convert_list(self):
obj = r('list(a=1, b=2, c=3)')
converted = com.convert_robj(obj)
expected = {'a': [1], 'b': [2], 'c': [3]}
tm.assert_dict_equal(converted, expected)
def test_convert_nested_list(self):
obj = r('list(a=list(foo=1, bar=2))')
converted = com.convert_robj(obj)
expected = {'a': {'foo': [1], 'bar': [2]}}
tm.assert_dict_equal(converted, expected)
def test_convert_frame(self):
# built-in dataset
df = r['faithful']
converted = com.convert_robj(df)
assert np.array_equal(converted.columns, ['eruptions', 'waiting'])
assert np.array_equal(converted.index, np.arange(1, 273))
def _test_matrix(self):
r('mat <- matrix(rnorm(9), ncol=3)')
r('colnames(mat) <- c("one", "two", "three")')
r('rownames(mat) <- c("a", "b", "c")')
return r['mat']
def test_convert_matrix(self):
mat = self._test_matrix()
converted = com.convert_robj(mat)
assert np.array_equal(converted.index, ['a', 'b', 'c'])
assert np.array_equal(converted.columns, ['one', 'two', 'three'])
def test_convert_r_dataframe(self):
is_na = robj.baseenv.get("is.na")
seriesd = tm.getSeriesData()
frame = pd.DataFrame(seriesd, columns=['D', 'C', 'B', 'A'])
# Null data
frame["E"] = [np.nan for item in frame["A"]]
# Some mixed type data
frame["F"] = ["text" if item %
2 == 0 else np.nan for item in range(30)]
r_dataframe = | com.convert_to_r_dataframe(frame) | pandas.rpy.common.convert_to_r_dataframe |
from collections import OrderedDict
import numpy as np
import pytest
from pandas._libs.tslib import Timestamp
from pandas.core.dtypes.cast import construct_1d_object_array_from_listlike
import pandas as pd
from pandas import Index, MultiIndex, date_range
import pandas.util.testing as tm
def test_constructor_single_level():
result = MultiIndex(levels=[['foo', 'bar', 'baz', 'qux']],
codes=[[0, 1, 2, 3]], names=['first'])
assert isinstance(result, MultiIndex)
expected = Index(['foo', 'bar', 'baz', 'qux'], name='first')
tm.assert_index_equal(result.levels[0], expected)
assert result.names == ['first']
def test_constructor_no_levels():
msg = "non-zero number of levels/codes"
with pytest.raises(ValueError, match=msg):
MultiIndex(levels=[], codes=[])
msg = "Must pass both levels and codes"
with pytest.raises(TypeError, match=msg):
MultiIndex(levels=[])
with pytest.raises(TypeError, match=msg):
MultiIndex(codes=[])
def test_constructor_nonhashable_names():
# GH 20527
levels = [[1, 2], ['one', 'two']]
codes = [[0, 0, 1, 1], [0, 1, 0, 1]]
names = (['foo'], ['bar'])
msg = r"MultiIndex\.name must be a hashable type"
with pytest.raises(TypeError, match=msg):
MultiIndex(levels=levels, codes=codes, names=names)
# With .rename()
mi = MultiIndex(levels=[[1, 2], ['one', 'two']],
codes=[[0, 0, 1, 1], [0, 1, 0, 1]],
names=('foo', 'bar'))
renamed = [['foor'], ['barr']]
with pytest.raises(TypeError, match=msg):
mi.rename(names=renamed)
# With .set_names()
with pytest.raises(TypeError, match=msg):
mi.set_names(names=renamed)
def test_constructor_mismatched_codes_levels(idx):
codes = [np.array([1]), np.array([2]), np.array([3])]
levels = ["a"]
msg = "Length of levels and codes must be the same"
with pytest.raises(ValueError, match=msg):
MultiIndex(levels=levels, codes=codes)
length_error = (r"On level 0, code max \(3\) >= length of level \(1\)\."
" NOTE: this index is in an inconsistent state")
label_error = r"Unequal code lengths: \[4, 2\]"
code_value_error = r"On level 0, code value \(-2\) < -1"
# important to check that it's looking at the right thing.
with pytest.raises(ValueError, match=length_error):
MultiIndex(levels=[['a'], ['b']],
codes=[[0, 1, 2, 3], [0, 3, 4, 1]])
with pytest.raises(ValueError, match=label_error):
MultiIndex(levels=[['a'], ['b']], codes=[[0, 0, 0, 0], [0, 0]])
# external API
with pytest.raises(ValueError, match=length_error):
idx.copy().set_levels([['a'], ['b']])
with pytest.raises(ValueError, match=label_error):
idx.copy().set_codes([[0, 0, 0, 0], [0, 0]])
# test set_codes with verify_integrity=False
# the setting should not raise any value error
idx.copy().set_codes(codes=[[0, 0, 0, 0], [0, 0]],
verify_integrity=False)
# code value smaller than -1
with pytest.raises(ValueError, match=code_value_error):
MultiIndex(levels=[['a'], ['b']], codes=[[0, -2], [0, 0]])
def test_na_levels():
# GH26408
# test if codes are re-assigned value -1 for levels
# with mising values (NaN, NaT, None)
result = MultiIndex(levels=[[np.nan, None, pd.NaT, 128, 2]],
codes=[[0, -1, 1, 2, 3, 4]])
expected = MultiIndex(levels=[[np.nan, None, pd.NaT, 128, 2]],
codes=[[-1, -1, -1, -1, 3, 4]])
tm.assert_index_equal(result, expected)
result = MultiIndex(levels=[[np.nan, 's', pd.NaT, 128, None]],
codes=[[0, -1, 1, 2, 3, 4]])
expected = MultiIndex(levels=[[np.nan, 's', pd.NaT, 128, None]],
codes=[[-1, -1, 1, -1, 3, -1]])
tm.assert_index_equal(result, expected)
# verify set_levels and set_codes
result = MultiIndex(
levels=[[1, 2, 3, 4, 5]], codes=[[0, -1, 1, 2, 3, 4]]).set_levels(
[[np.nan, 's', pd.NaT, 128, None]])
tm.assert_index_equal(result, expected)
result = MultiIndex(
levels=[[np.nan, 's', pd.NaT, 128, None]],
codes=[[1, 2, 2, 2, 2, 2]]).set_codes(
[[0, -1, 1, 2, 3, 4]])
tm.assert_index_equal(result, expected)
def test_labels_deprecated(idx):
# GH23752
with tm.assert_produces_warning(FutureWarning):
MultiIndex(levels=[['foo', 'bar', 'baz', 'qux']],
labels=[[0, 1, 2, 3]], names=['first'])
with tm.assert_produces_warning(FutureWarning):
idx.labels
def test_copy_in_constructor():
levels = np.array(["a", "b", "c"])
codes = np.array([1, 1, 2, 0, 0, 1, 1])
val = codes[0]
mi = MultiIndex(levels=[levels, levels], codes=[codes, codes],
copy=True)
assert mi.codes[0][0] == val
codes[0] = 15
assert mi.codes[0][0] == val
val = levels[0]
levels[0] = "PANDA"
assert mi.levels[0][0] == val
# ----------------------------------------------------------------------------
# from_arrays
# ----------------------------------------------------------------------------
def test_from_arrays(idx):
arrays = [np.asarray(lev).take(level_codes)
for lev, level_codes in zip(idx.levels, idx.codes)]
# list of arrays as input
result = MultiIndex.from_arrays(arrays, names=idx.names)
tm.assert_index_equal(result, idx)
# infer correctly
result = MultiIndex.from_arrays([[pd.NaT, Timestamp('20130101')],
['a', 'b']])
assert result.levels[0].equals(Index([Timestamp('20130101')]))
assert result.levels[1].equals(Index(['a', 'b']))
def test_from_arrays_iterator(idx):
# GH 18434
arrays = [np.asarray(lev).take(level_codes)
for lev, level_codes in zip(idx.levels, idx.codes)]
# iterator as input
result = MultiIndex.from_arrays(iter(arrays), names=idx.names)
tm.assert_index_equal(result, idx)
# invalid iterator input
msg = "Input must be a list / sequence of array-likes."
with pytest.raises(TypeError, match=msg):
MultiIndex.from_arrays(0)
def test_from_arrays_tuples(idx):
arrays = tuple(tuple(np.asarray(lev).take(level_codes))
for lev, level_codes in zip(idx.levels, idx.codes))
# tuple of tuples as input
result = MultiIndex.from_arrays(arrays, names=idx.names)
tm.assert_index_equal(result, idx)
def test_from_arrays_index_series_datetimetz():
idx1 = pd.date_range('2015-01-01 10:00', freq='D', periods=3,
tz='US/Eastern')
idx2 = pd.date_range('2015-01-01 10:00', freq='H', periods=3,
tz='Asia/Tokyo')
result = pd.MultiIndex.from_arrays([idx1, idx2])
tm.assert_index_equal(result.get_level_values(0), idx1)
tm.assert_index_equal(result.get_level_values(1), idx2)
result2 = pd.MultiIndex.from_arrays([pd.Series(idx1), pd.Series(idx2)])
tm.assert_index_equal(result2.get_level_values(0), idx1)
tm.assert_index_equal(result2.get_level_values(1), idx2)
tm.assert_index_equal(result, result2)
def test_from_arrays_index_series_timedelta():
idx1 = pd.timedelta_range('1 days', freq='D', periods=3)
idx2 = pd.timedelta_range('2 hours', freq='H', periods=3)
result = pd.MultiIndex.from_arrays([idx1, idx2])
tm.assert_index_equal(result.get_level_values(0), idx1)
tm.assert_index_equal(result.get_level_values(1), idx2)
result2 = pd.MultiIndex.from_arrays([pd.Series(idx1), pd.Series(idx2)])
tm.assert_index_equal(result2.get_level_values(0), idx1)
tm.assert_index_equal(result2.get_level_values(1), idx2)
tm.assert_index_equal(result, result2)
def test_from_arrays_index_series_period():
idx1 = pd.period_range('2011-01-01', freq='D', periods=3)
idx2 = pd.period_range('2015-01-01', freq='H', periods=3)
result = pd.MultiIndex.from_arrays([idx1, idx2])
tm.assert_index_equal(result.get_level_values(0), idx1)
tm.assert_index_equal(result.get_level_values(1), idx2)
result2 = pd.MultiIndex.from_arrays([pd.Series(idx1), pd.Series(idx2)])
tm.assert_index_equal(result2.get_level_values(0), idx1)
tm.assert_index_equal(result2.get_level_values(1), idx2)
tm.assert_index_equal(result, result2)
def test_from_arrays_index_datetimelike_mixed():
idx1 = pd.date_range('2015-01-01 10:00', freq='D', periods=3,
tz='US/Eastern')
idx2 = pd.date_range('2015-01-01 10:00', freq='H', periods=3)
idx3 = pd.timedelta_range('1 days', freq='D', periods=3)
idx4 = pd.period_range('2011-01-01', freq='D', periods=3)
result = pd.MultiIndex.from_arrays([idx1, idx2, idx3, idx4])
tm.assert_index_equal(result.get_level_values(0), idx1)
tm.assert_index_equal(result.get_level_values(1), idx2)
tm.assert_index_equal(result.get_level_values(2), idx3)
tm.assert_index_equal(result.get_level_values(3), idx4)
result2 = pd.MultiIndex.from_arrays([pd.Series(idx1),
pd.Series(idx2),
pd.Series(idx3),
pd.Series(idx4)])
tm.assert_index_equal(result2.get_level_values(0), idx1)
tm.assert_index_equal(result2.get_level_values(1), idx2)
tm.assert_index_equal(result2.get_level_values(2), idx3)
tm.assert_index_equal(result2.get_level_values(3), idx4)
tm.assert_index_equal(result, result2)
def test_from_arrays_index_series_categorical():
# GH13743
idx1 = pd.CategoricalIndex(list("abcaab"), categories=list("bac"),
ordered=False)
idx2 = pd.CategoricalIndex(list("abcaab"), categories=list("bac"),
ordered=True)
result = pd.MultiIndex.from_arrays([idx1, idx2])
tm.assert_index_equal(result.get_level_values(0), idx1)
tm.assert_index_equal(result.get_level_values(1), idx2)
result2 = pd.MultiIndex.from_arrays([pd.Series(idx1), pd.Series(idx2)])
tm.assert_index_equal(result2.get_level_values(0), idx1)
tm.assert_index_equal(result2.get_level_values(1), idx2)
result3 = pd.MultiIndex.from_arrays([idx1.values, idx2.values])
tm.assert_index_equal(result3.get_level_values(0), idx1)
tm.assert_index_equal(result3.get_level_values(1), idx2)
def test_from_arrays_empty():
# 0 levels
msg = "Must pass non-zero number of levels/codes"
with pytest.raises(ValueError, match=msg):
| MultiIndex.from_arrays(arrays=[]) | pandas.MultiIndex.from_arrays |
import gseapy as gp
import pandas as pd
import plotly.express as px
from ..common.load_h5 import H5COUNTS
class GSEA_Analysis():
def __init__(self, data:H5COUNTS, path="data/interim/",
threshold=0.05,
gene_sets=['GO_Biological_Process_2018', 'GO_Cellular_Component_2018', 'GO_Molecular_Function_2018'],
tumor_ids=[1, 2, 3, 4, 5, 6, 7, 8]):
self.gsea_table = | pd.DataFrame() | pandas.DataFrame |
#%%
"""
參考網頁
https://stackoverflow.com/questions/36028759/how-to-open-and-convert-sqlite-database-to-pandas-dataframe
從SQL的表 撈出 做成PD 再試整理
"""
import os
import copy
import time
import sqlite3
import numpy as np
import pandas as pd
#%%
# 確認檔案 和 路徑
print(os.getcwd())
path=os.listdir('./db')
path[0]
path='./db'+'/'+path[0]
# Create your connection.
cnx = sqlite3.connect(path)
# cnx = sqlite3.connect(".\\df\\stockNo_2330.sqlite")
cursor=cnx.cursor()
cursor.execute('SELECT name FROM sqlite_master WHERE type = "table"')
Tnamelist = cursor.fetchall()
Tdatall=[]
i=Tnamelist[0]
for i in Tnamelist:
print(str(i[0]))
print("SELECT * FROM "+str(i[0]))
df = pd.read_sql_query(("SELECT * FROM "+str(i[0])), cnx)
Tdatall.append(df)
time.sleep(0.15)
cnx.close()
time.sleep(0.5)
dfall = pd.concat(Tdatall)
dfall = dfall.reset_index(drop=True)
# 清除不用的變數
del Tdatall, Tnamelist, df, i, cursor, path
#%%
# 改英文 colname
ch = pd.DataFrame(dfall.columns, columns=['ch'])
en = pd.DataFrame(['date', 'shares', 'amount', 'open', 'high', 'low', 'close', 'change', 'turnover'],
columns=['en'])
ch_en = pd.concat([ch,en], axis=1)
del ch, en
dfall.columns = ch_en['en']
#%%
# 確認一下 型態
# 兩種偷看型態法 很重要
dfall.info()
dfall.dtypes
# 先處理字串裡的 逗號問題
dfall.iloc[:,1:] = dfall.iloc[:,1:].apply(
lambda x: pd.to_numeric(x.astype(str).str.replace(',',''),
errors='coerce'))
# 再處理 .dtypes 的問題 dtypes => object 有些數學模型是不吃的
dff1 = dfall.iloc[:,0:1]
dff2 = dfall.iloc[:,1:]
dff2=dff2.astype('float32')
dff2.info()
dff2.dtypes
dfall = pd.concat([dff1, dff2], axis=1)
dfall.info()
del dff1, dff2
#%% 先把星期幾做出來
# 淺複製(shallow copy)與深複製(deep copy)
# import copy
# df_date=copy.deepcopy(DFall['日期'])
dfall.columns
type(dfall['date'][0])
df_date=pd.DataFrame(dfall['date']) # 經查驗此操作為深複制
df_date.iloc[0,0]=' 00/01/04'
df_date=pd.DataFrame(dfall['date'])
# df_date[['yy', 'mm', 'dd']]=df_date.日期.str.split('/', expand=True)
df_date[['yy', 'mm', 'dd']]=df_date["date"].str.split('/', expand=True)
df_date['yy'][0]
df_date['yy']=df_date['yy'].str.strip()
"""
# apply函數是`pandas`裡面所有函數中自由度最高的函數
# 可以自定義一個 Python function 並將 apply 函式套用到整個 DataFrame 之上
def gg(row):
return int(row)+1911
df_date['newyy'] = df_date.yy.apply(gg)
"""
type(df_date.yy)
df_date['ADyear'] = df_date.yy.apply(lambda x: str(int(x)+1911))
# 透過 apply 函式
# 我們把一個匿名函式 lambda 套用到整個 df.Survived Series 之上
# 並以此建立一個新的 存活 欄位。
# 其中 lambda x: 為匿名函式的開頭 也稱 宣告
# 後面接要 return 的東西
df_date.head(5)
df_date['ADate'] = df_date['ADyear'] + '-' + df_date['mm'] + '-' + df_date['dd']
df_date.head(5)
dfall = pd.merge(df_date,dfall, on='date')
dfall.dtypes
dfall.columns
dfall = dfall.iloc[:,5:]
del df_date
#%%
# 接上 CSV.db => 美股台積資料 台灣加權指(大盤)
print(os.listdir('./csv'))
df_NYSE_TSM = pd.read_csv('./csv/NYSE_TSM.csv')
df_NYSE_TSM['NY_up_down'] = df_NYSE_TSM['Close']-df_NYSE_TSM['Close'].shift()
df_NYSE_TSM.info()
df_TWII = pd.read_csv('./csv/^TWII.csv')
df_TWII['TWII_up_down'] = df_TWII['Close'] - df_TWII['Close'].shift()
df_TWII.info()
# 台積美股 先和 台灣大盤 合併
res1 = pd.merge(df_TWII.loc[:, ['Date','TWII_up_down']],
df_NYSE_TSM.loc[:, ['Date','NY_up_down']],
on=['Date'],
suffixes=['_TWII','_NYSE'],
how='left')
del df_TWII, df_NYSE_TSM
#%%
"""
台股開盤時間是09:00-13:30(台北時間)
美國的開盤時間比較長09:30-16:00(紐約時間)
另外美國有實行夏令日光節約時間
因此夏令開盤時間是21:30-04:00(台北時間)
冬令開盤時間是22:30-05:00(台北時間)。
台北的時間比美國紐約州紐約快 12 小時
台北的星期一下午9:36 是
美國紐約州紐約的星期一上午9:36
ex
台北時間 4/28 AM 9:00 開盤
紐約時間 4/27 PM 9:00
所以 台北4/28的盤 要參照 紐約 4/27的股市資料
"""
#########################
type(dfall)
type(dfall.shape) # 這兩行 行為居然不太一樣
type(dfall['shares']) # 這兩行 行為居然不太一樣
dfall.info()
dfall.dtypes
#########################
# dfall['gg'] = dfall.shape.apply(lambda x: int(x/1000))
# 上面這行 報錯 說是tuple 不能動
# 下面這行可以用
dfall['vol'] = dfall['shares'].apply(lambda x: int(x/1000))
dfall2 = dfall.iloc[:, [0, -4, -3, -1]]
# 改col名
coll=list(dfall2.columns)
coll[0]="Date"
dfall2.columns=coll
# dfall2 = dfall2.rename(columns={"Date": "ADdate"}) 官網教的爛東西 特難用
res2 = pd.merge(dfall2, res1, on='Date', how='left')
# shift函數是對數據進行移動的操作
# 開始平移美股
res2.columns
res2['NY_up_down'] = res2['NY_up_down'].shift()
del coll
#%%
# KD 指標
"""
未成熟隨機值(RSV):
(今日收盤價 - 最近9天的最低價) / (最近9天最高價 - 最近9天的最低價)
當日K值:前日K值 * (2/3) + 當日RSV值 * (1/3)
當日D值:前日D值 * (2/3) + 當日K值 * (1/3)
"""
import talib
kd = | pd.DataFrame() | pandas.DataFrame |
"""
Routines for casting.
"""
from contextlib import suppress
from datetime import date, datetime, timedelta
from typing import (
TYPE_CHECKING,
Any,
Dict,
List,
Optional,
Sequence,
Set,
Sized,
Tuple,
Type,
Union,
)
import numpy as np
from pandas._libs import lib, tslib, tslibs
from pandas._libs.tslibs import (
NaT,
OutOfBoundsDatetime,
Period,
Timedelta,
Timestamp,
conversion,
iNaT,
ints_to_pydatetime,
ints_to_pytimedelta,
)
from pandas._libs.tslibs.timezones import tz_compare
from pandas._typing import AnyArrayLike, ArrayLike, Dtype, DtypeObj, Scalar, Shape
from pandas.util._validators import validate_bool_kwarg
from pandas.core.dtypes.common import (
DT64NS_DTYPE,
INT64_DTYPE,
POSSIBLY_CAST_DTYPES,
TD64NS_DTYPE,
ensure_int8,
ensure_int16,
ensure_int32,
ensure_int64,
ensure_object,
ensure_str,
is_bool,
is_bool_dtype,
is_categorical_dtype,
is_complex,
is_complex_dtype,
is_datetime64_dtype,
is_datetime64_ns_dtype,
is_datetime64tz_dtype,
is_datetime_or_timedelta_dtype,
is_dtype_equal,
is_extension_array_dtype,
is_float,
is_float_dtype,
is_integer,
is_integer_dtype,
is_numeric_dtype,
is_object_dtype,
is_scalar,
is_sparse,
is_string_dtype,
is_timedelta64_dtype,
is_timedelta64_ns_dtype,
is_unsigned_integer_dtype,
pandas_dtype,
)
from pandas.core.dtypes.dtypes import (
DatetimeTZDtype,
ExtensionDtype,
IntervalDtype,
PeriodDtype,
)
from pandas.core.dtypes.generic import (
ABCDataFrame,
ABCDatetimeArray,
ABCDatetimeIndex,
ABCExtensionArray,
ABCPeriodArray,
ABCPeriodIndex,
ABCSeries,
)
from pandas.core.dtypes.inference import is_list_like
from pandas.core.dtypes.missing import (
is_valid_nat_for_dtype,
isna,
na_value_for_dtype,
notna,
)
if TYPE_CHECKING:
from pandas import Series
from pandas.core.arrays import ExtensionArray
from pandas.core.indexes.base import Index
_int8_max = np.iinfo(np.int8).max
_int16_max = np.iinfo(np.int16).max
_int32_max = np.iinfo(np.int32).max
_int64_max = np.iinfo(np.int64).max
def maybe_convert_platform(values):
""" try to do platform conversion, allow ndarray or list here """
if isinstance(values, (list, tuple, range)):
values = construct_1d_object_array_from_listlike(values)
if getattr(values, "dtype", None) == np.object_:
if hasattr(values, "_values"):
values = values._values
values = lib.maybe_convert_objects(values)
return values
def is_nested_object(obj) -> bool:
"""
return a boolean if we have a nested object, e.g. a Series with 1 or
more Series elements
This may not be necessarily be performant.
"""
if isinstance(obj, ABCSeries) and is_object_dtype(obj.dtype):
if any(isinstance(v, ABCSeries) for v in obj._values):
return True
return False
def maybe_box_datetimelike(value: Scalar, dtype: Optional[Dtype] = None) -> Scalar:
"""
Cast scalar to Timestamp or Timedelta if scalar is datetime-like
and dtype is not object.
Parameters
----------
value : scalar
dtype : Dtype, optional
Returns
-------
scalar
"""
if dtype == object:
pass
elif isinstance(value, (np.datetime64, datetime)):
value = tslibs.Timestamp(value)
elif isinstance(value, (np.timedelta64, timedelta)):
value = tslibs.Timedelta(value)
return value
def maybe_downcast_to_dtype(result, dtype: Union[str, np.dtype]):
"""
try to cast to the specified dtype (e.g. convert back to bool/int
or could be an astype of float64->float32
"""
do_round = False
if is_scalar(result):
return result
elif isinstance(result, ABCDataFrame):
# occurs in pivot_table doctest
return result
if isinstance(dtype, str):
if dtype == "infer":
inferred_type = lib.infer_dtype(ensure_object(result), skipna=False)
if inferred_type == "boolean":
dtype = "bool"
elif inferred_type == "integer":
dtype = "int64"
elif inferred_type == "datetime64":
dtype = "datetime64[ns]"
elif inferred_type == "timedelta64":
dtype = "timedelta64[ns]"
# try to upcast here
elif inferred_type == "floating":
dtype = "int64"
if issubclass(result.dtype.type, np.number):
do_round = True
else:
dtype = "object"
dtype = np.dtype(dtype)
elif dtype.type is Period:
from pandas.core.arrays import PeriodArray
with suppress(TypeError):
# e.g. TypeError: int() argument must be a string, a
# bytes-like object or a number, not 'Period
return PeriodArray(result, freq=dtype.freq)
converted = maybe_downcast_numeric(result, dtype, do_round)
if converted is not result:
return converted
# a datetimelike
# GH12821, iNaT is cast to float
if dtype.kind in ["M", "m"] and result.dtype.kind in ["i", "f"]:
if hasattr(dtype, "tz"):
# not a numpy dtype
if dtype.tz:
# convert to datetime and change timezone
from pandas import to_datetime
result = to_datetime(result).tz_localize("utc")
result = result.tz_convert(dtype.tz)
else:
result = result.astype(dtype)
return result
def maybe_downcast_numeric(result, dtype: DtypeObj, do_round: bool = False):
"""
Subset of maybe_downcast_to_dtype restricted to numeric dtypes.
Parameters
----------
result : ndarray or ExtensionArray
dtype : np.dtype or ExtensionDtype
do_round : bool
Returns
-------
ndarray or ExtensionArray
"""
if not isinstance(dtype, np.dtype):
# e.g. SparseDtype has no itemsize attr
return result
if isinstance(result, list):
# reached via groupby.agg._ohlc; really this should be handled earlier
result = np.array(result)
def trans(x):
if do_round:
return x.round()
return x
if dtype.kind == result.dtype.kind:
# don't allow upcasts here (except if empty)
if result.dtype.itemsize <= dtype.itemsize and result.size:
return result
if is_bool_dtype(dtype) or is_integer_dtype(dtype):
if not result.size:
# if we don't have any elements, just astype it
return trans(result).astype(dtype)
# do a test on the first element, if it fails then we are done
r = result.ravel()
arr = np.array([r[0]])
if isna(arr).any():
# if we have any nulls, then we are done
return result
elif not isinstance(r[0], (np.integer, np.floating, int, float, bool)):
# a comparable, e.g. a Decimal may slip in here
return result
if (
issubclass(result.dtype.type, (np.object_, np.number))
and notna(result).all()
):
new_result = trans(result).astype(dtype)
if new_result.dtype.kind == "O" or result.dtype.kind == "O":
# np.allclose may raise TypeError on object-dtype
if (new_result == result).all():
return new_result
else:
if np.allclose(new_result, result, rtol=0):
return new_result
elif (
issubclass(dtype.type, np.floating)
and not is_bool_dtype(result.dtype)
and not is_string_dtype(result.dtype)
):
return result.astype(dtype)
return result
def maybe_cast_result(
result: ArrayLike, obj: "Series", numeric_only: bool = False, how: str = ""
) -> ArrayLike:
"""
Try casting result to a different type if appropriate
Parameters
----------
result : array-like
Result to cast.
obj : Series
Input Series from which result was calculated.
numeric_only : bool, default False
Whether to cast only numerics or datetimes as well.
how : str, default ""
How the result was computed.
Returns
-------
result : array-like
result maybe casted to the dtype.
"""
dtype = obj.dtype
dtype = maybe_cast_result_dtype(dtype, how)
assert not is_scalar(result)
if (
is_extension_array_dtype(dtype)
and not is_categorical_dtype(dtype)
and dtype.kind != "M"
):
# We have to special case categorical so as not to upcast
# things like counts back to categorical
cls = dtype.construct_array_type()
result = maybe_cast_to_extension_array(cls, result, dtype=dtype)
elif numeric_only and is_numeric_dtype(dtype) or not numeric_only:
result = maybe_downcast_to_dtype(result, dtype)
return result
def maybe_cast_result_dtype(dtype: DtypeObj, how: str) -> DtypeObj:
"""
Get the desired dtype of a result based on the
input dtype and how it was computed.
Parameters
----------
dtype : DtypeObj
Input dtype.
how : str
How the result was computed.
Returns
-------
DtypeObj
The desired dtype of the result.
"""
from pandas.core.arrays.boolean import BooleanDtype
from pandas.core.arrays.integer import Int64Dtype
if how in ["add", "cumsum", "sum"] and (dtype == np.dtype(bool)):
return np.dtype(np.int64)
elif how in ["add", "cumsum", "sum"] and isinstance(dtype, BooleanDtype):
return Int64Dtype()
return dtype
def maybe_cast_to_extension_array(
cls: Type["ExtensionArray"], obj: ArrayLike, dtype: Optional[ExtensionDtype] = None
) -> ArrayLike:
"""
Call to `_from_sequence` that returns the object unchanged on Exception.
Parameters
----------
cls : class, subclass of ExtensionArray
obj : arraylike
Values to pass to cls._from_sequence
dtype : ExtensionDtype, optional
Returns
-------
ExtensionArray or obj
"""
from pandas.core.arrays.string_ import StringArray
from pandas.core.arrays.string_arrow import ArrowStringArray
assert isinstance(cls, type), f"must pass a type: {cls}"
assertion_msg = f"must pass a subclass of ExtensionArray: {cls}"
assert issubclass(cls, ABCExtensionArray), assertion_msg
# Everything can be converted to StringArrays, but we may not want to convert
if (
issubclass(cls, (StringArray, ArrowStringArray))
and lib.infer_dtype(obj) != "string"
):
return obj
try:
result = cls._from_sequence(obj, dtype=dtype)
except Exception:
# We can't predict what downstream EA constructors may raise
result = obj
return result
def maybe_upcast_putmask(
result: np.ndarray, mask: np.ndarray, other: Scalar
) -> Tuple[np.ndarray, bool]:
"""
A safe version of putmask that potentially upcasts the result.
The result is replaced with the first N elements of other,
where N is the number of True values in mask.
If the length of other is shorter than N, other will be repeated.
Parameters
----------
result : ndarray
The destination array. This will be mutated in-place if no upcasting is
necessary.
mask : boolean ndarray
other : scalar
The source value.
Returns
-------
result : ndarray
changed : bool
Set to true if the result array was upcasted.
Examples
--------
>>> arr = np.arange(1, 6)
>>> mask = np.array([False, True, False, True, True])
>>> result, _ = maybe_upcast_putmask(arr, mask, False)
>>> result
array([1, 0, 3, 0, 0])
"""
if not isinstance(result, np.ndarray):
raise ValueError("The result input must be a ndarray.")
if not is_scalar(other):
# We _could_ support non-scalar other, but until we have a compelling
# use case, we assume away the possibility.
raise ValueError("other must be a scalar")
if mask.any():
# Two conversions for date-like dtypes that can't be done automatically
# in np.place:
# NaN -> NaT
# integer or integer array -> date-like array
if result.dtype.kind in ["m", "M"]:
if isna(other):
other = result.dtype.type("nat")
elif is_integer(other):
other = np.array(other, dtype=result.dtype)
def changeit():
# we are forced to change the dtype of the result as the input
# isn't compatible
r, _ = maybe_upcast(result, fill_value=other, copy=True)
np.place(r, mask, other)
return r, True
# we want to decide whether place will work
# if we have nans in the False portion of our mask then we need to
# upcast (possibly), otherwise we DON't want to upcast (e.g. if we
# have values, say integers, in the success portion then it's ok to not
# upcast)
new_dtype, _ = maybe_promote(result.dtype, other)
if new_dtype != result.dtype:
# we have a scalar or len 0 ndarray
# and its nan and we are changing some values
if isna(other):
return changeit()
try:
np.place(result, mask, other)
except TypeError:
# e.g. int-dtype result and float-dtype other
return changeit()
return result, False
def maybe_casted_values(
index: "Index", codes: Optional[np.ndarray] = None
) -> ArrayLike:
"""
Convert an index, given directly or as a pair (level, code), to a 1D array.
Parameters
----------
index : Index
codes : np.ndarray[intp] or None, default None
Returns
-------
ExtensionArray or ndarray
If codes is `None`, the values of `index`.
If codes is passed, an array obtained by taking from `index` the indices
contained in `codes`.
"""
values = index._values
if values.dtype == np.object_:
values = lib.maybe_convert_objects(values)
# if we have the codes, extract the values with a mask
if codes is not None:
mask: np.ndarray = codes == -1
if mask.size > 0 and mask.all():
# we can have situations where the whole mask is -1,
# meaning there is nothing found in codes, so make all nan's
dtype = index.dtype
fill_value = na_value_for_dtype(dtype)
values = construct_1d_arraylike_from_scalar(fill_value, len(mask), dtype)
else:
values = values.take(codes)
if mask.any():
if isinstance(values, np.ndarray):
values, _ = maybe_upcast_putmask(values, mask, np.nan)
else:
values[mask] = np.nan
return values
def maybe_promote(dtype, fill_value=np.nan):
"""
Find the minimal dtype that can hold both the given dtype and fill_value.
Parameters
----------
dtype : np.dtype or ExtensionDtype
fill_value : scalar, default np.nan
Returns
-------
dtype
Upcasted from dtype argument if necessary.
fill_value
Upcasted from fill_value argument if necessary.
"""
if not is_scalar(fill_value) and not is_object_dtype(dtype):
# with object dtype there is nothing to promote, and the user can
# pass pretty much any weird fill_value they like
raise ValueError("fill_value must be a scalar")
# if we passed an array here, determine the fill value by dtype
if isinstance(fill_value, np.ndarray):
if issubclass(fill_value.dtype.type, (np.datetime64, np.timedelta64)):
fill_value = fill_value.dtype.type("NaT", "ns")
else:
# we need to change to object type as our
# fill_value is of object type
if fill_value.dtype == np.object_:
dtype = np.dtype(np.object_)
fill_value = np.nan
if dtype == np.object_ or dtype.kind in ["U", "S"]:
# We treat string-like dtypes as object, and _always_ fill
# with np.nan
fill_value = np.nan
dtype = np.dtype(np.object_)
# returns tuple of (dtype, fill_value)
if issubclass(dtype.type, np.datetime64):
if isinstance(fill_value, datetime) and fill_value.tzinfo is not None:
# Trying to insert tzaware into tznaive, have to cast to object
dtype = np.dtype(np.object_)
elif is_integer(fill_value) or (is_float(fill_value) and not isna(fill_value)):
dtype = np.dtype(np.object_)
else:
try:
fill_value = Timestamp(fill_value).to_datetime64()
except (TypeError, ValueError):
dtype = np.dtype(np.object_)
elif issubclass(dtype.type, np.timedelta64):
if (
is_integer(fill_value)
or (is_float(fill_value) and not np.isnan(fill_value))
or isinstance(fill_value, str)
):
# TODO: What about str that can be a timedelta?
dtype = np.dtype(np.object_)
else:
try:
fv = Timedelta(fill_value)
except ValueError:
dtype = np.dtype(np.object_)
else:
if fv is NaT:
# NaT has no `to_timedelta64` method
fill_value = np.timedelta64("NaT", "ns")
else:
fill_value = fv.to_timedelta64()
elif is_datetime64tz_dtype(dtype):
if isna(fill_value):
fill_value = NaT
elif not isinstance(fill_value, datetime):
dtype = np.dtype(np.object_)
elif fill_value.tzinfo is None:
dtype = np.dtype(np.object_)
elif not tz_compare(fill_value.tzinfo, dtype.tz):
# TODO: sure we want to cast here?
dtype = np.dtype(np.object_)
elif is_extension_array_dtype(dtype) and isna(fill_value):
fill_value = dtype.na_value
elif is_float(fill_value):
if issubclass(dtype.type, np.bool_):
dtype = np.dtype(np.object_)
elif issubclass(dtype.type, np.integer):
dtype = np.dtype(np.float64)
elif dtype.kind == "f":
mst = np.min_scalar_type(fill_value)
if mst > dtype:
# e.g. mst is np.float64 and dtype is np.float32
dtype = mst
elif dtype.kind == "c":
mst = np.min_scalar_type(fill_value)
dtype = np.promote_types(dtype, mst)
elif is_bool(fill_value):
if not issubclass(dtype.type, np.bool_):
dtype = np.dtype(np.object_)
elif is_integer(fill_value):
if issubclass(dtype.type, np.bool_):
dtype = np.dtype(np.object_)
elif issubclass(dtype.type, np.integer):
if not np.can_cast(fill_value, dtype):
# upcast to prevent overflow
mst = np.min_scalar_type(fill_value)
dtype = np.promote_types(dtype, mst)
if dtype.kind == "f":
# Case where we disagree with numpy
dtype = np.dtype(np.object_)
elif is_complex(fill_value):
if issubclass(dtype.type, np.bool_):
dtype = np.dtype(np.object_)
elif issubclass(dtype.type, (np.integer, np.floating)):
mst = np.min_scalar_type(fill_value)
dtype = np.promote_types(dtype, mst)
elif dtype.kind == "c":
mst = np.min_scalar_type(fill_value)
if mst > dtype:
# e.g. mst is np.complex128 and dtype is np.complex64
dtype = mst
elif fill_value is None:
if is_float_dtype(dtype) or is_complex_dtype(dtype):
fill_value = np.nan
elif is_integer_dtype(dtype):
dtype = np.float64
fill_value = np.nan
elif is_datetime_or_timedelta_dtype(dtype):
fill_value = dtype.type("NaT", "ns")
else:
dtype = np.dtype(np.object_)
fill_value = np.nan
else:
dtype = np.dtype(np.object_)
# in case we have a string that looked like a number
if is_extension_array_dtype(dtype):
pass
elif issubclass(np.dtype(dtype).type, (bytes, str)):
dtype = np.dtype(np.object_)
fill_value = _ensure_dtype_type(fill_value, dtype)
return dtype, fill_value
def _ensure_dtype_type(value, dtype: DtypeObj):
"""
Ensure that the given value is an instance of the given dtype.
e.g. if out dtype is np.complex64_, we should have an instance of that
as opposed to a python complex object.
Parameters
----------
value : object
dtype : np.dtype or ExtensionDtype
Returns
-------
object
"""
# Start with exceptions in which we do _not_ cast to numpy types
if is_extension_array_dtype(dtype):
return value
elif dtype == np.object_:
return value
elif isna(value):
# e.g. keep np.nan rather than try to cast to np.float32(np.nan)
return value
return dtype.type(value)
def infer_dtype_from(val, pandas_dtype: bool = False) -> Tuple[DtypeObj, Any]:
"""
Interpret the dtype from a scalar or array.
Parameters
----------
val : object
pandas_dtype : bool, default False
whether to infer dtype including pandas extension types.
If False, scalar/array belongs to pandas extension types is inferred as
object
"""
if is_scalar(val):
return infer_dtype_from_scalar(val, pandas_dtype=pandas_dtype)
return infer_dtype_from_array(val, pandas_dtype=pandas_dtype)
def infer_dtype_from_scalar(val, pandas_dtype: bool = False) -> Tuple[DtypeObj, Any]:
"""
Interpret the dtype from a scalar.
Parameters
----------
pandas_dtype : bool, default False
whether to infer dtype including pandas extension types.
If False, scalar belongs to pandas extension types is inferred as
object
"""
dtype: DtypeObj = np.dtype(object)
# a 1-element ndarray
if isinstance(val, np.ndarray):
msg = "invalid ndarray passed to infer_dtype_from_scalar"
if val.ndim != 0:
raise ValueError(msg)
dtype = val.dtype
val = val.item()
elif isinstance(val, str):
# If we create an empty array using a string to infer
# the dtype, NumPy will only allocate one character per entry
# so this is kind of bad. Alternately we could use np.repeat
# instead of np.empty (but then you still don't want things
# coming out as np.str_!
dtype = np.dtype(object)
elif isinstance(val, (np.datetime64, datetime)):
val = Timestamp(val)
if val is NaT or val.tz is None:
dtype = np.dtype("M8[ns]")
else:
if pandas_dtype:
dtype = DatetimeTZDtype(unit="ns", tz=val.tz)
else:
# return datetimetz as object
return np.dtype(object), val
val = val.value
elif isinstance(val, (np.timedelta64, timedelta)):
val = Timedelta(val).value
dtype = np.dtype("m8[ns]")
elif is_bool(val):
dtype = np.dtype(np.bool_)
elif is_integer(val):
if isinstance(val, np.integer):
dtype = np.dtype(type(val))
else:
dtype = np.dtype(np.int64)
try:
np.array(val, dtype=dtype)
except OverflowError:
dtype = np.array(val).dtype
elif is_float(val):
if isinstance(val, np.floating):
dtype = np.dtype(type(val))
else:
dtype = np.dtype(np.float64)
elif is_complex(val):
dtype = np.dtype(np.complex_)
elif pandas_dtype:
if lib.is_period(val):
dtype = PeriodDtype(freq=val.freq)
elif lib.is_interval(val):
subtype = infer_dtype_from_scalar(val.left, pandas_dtype=True)[0]
dtype = IntervalDtype(subtype=subtype)
return dtype, val
def dict_compat(d: Dict[Scalar, Scalar]) -> Dict[Scalar, Scalar]:
"""
Convert datetimelike-keyed dicts to a Timestamp-keyed dict.
Parameters
----------
d: dict-like object
Returns
-------
dict
"""
return {maybe_box_datetimelike(key): value for key, value in d.items()}
def infer_dtype_from_array(
arr, pandas_dtype: bool = False
) -> Tuple[DtypeObj, ArrayLike]:
"""
Infer the dtype from an array.
Parameters
----------
arr : array
pandas_dtype : bool, default False
whether to infer dtype including pandas extension types.
If False, array belongs to pandas extension types
is inferred as object
Returns
-------
tuple (numpy-compat/pandas-compat dtype, array)
Notes
-----
if pandas_dtype=False. these infer to numpy dtypes
exactly with the exception that mixed / object dtypes
are not coerced by stringifying or conversion
if pandas_dtype=True. datetime64tz-aware/categorical
types will retain there character.
Examples
--------
>>> np.asarray([1, '1'])
array(['1', '1'], dtype='<U21')
>>> infer_dtype_from_array([1, '1'])
(dtype('O'), [1, '1'])
"""
if isinstance(arr, np.ndarray):
return arr.dtype, arr
if not is_list_like(arr):
arr = [arr]
if pandas_dtype and is_extension_array_dtype(arr):
return arr.dtype, arr
elif isinstance(arr, ABCSeries):
return arr.dtype, np.asarray(arr)
# don't force numpy coerce with nan's
inferred = lib.infer_dtype(arr, skipna=False)
if inferred in ["string", "bytes", "mixed", "mixed-integer"]:
return (np.dtype(np.object_), arr)
arr = np.asarray(arr)
return arr.dtype, arr
def maybe_infer_dtype_type(element):
"""
Try to infer an object's dtype, for use in arithmetic ops.
Uses `element.dtype` if that's available.
Objects implementing the iterator protocol are cast to a NumPy array,
and from there the array's type is used.
Parameters
----------
element : object
Possibly has a `.dtype` attribute, and possibly the iterator
protocol.
Returns
-------
tipo : type
Examples
--------
>>> from collections import namedtuple
>>> Foo = namedtuple("Foo", "dtype")
>>> maybe_infer_dtype_type(Foo(np.dtype("i8")))
dtype('int64')
"""
tipo = None
if hasattr(element, "dtype"):
tipo = element.dtype
elif is_list_like(element):
element = np.asarray(element)
tipo = element.dtype
return tipo
def maybe_upcast(
values: ArrayLike,
fill_value: Scalar = np.nan,
dtype: Dtype = None,
copy: bool = False,
) -> Tuple[ArrayLike, Scalar]:
"""
Provide explicit type promotion and coercion.
Parameters
----------
values : ndarray or ExtensionArray
The array that we want to maybe upcast.
fill_value : what we want to fill with
dtype : if None, then use the dtype of the values, else coerce to this type
copy : bool, default True
If True always make a copy even if no upcast is required.
Returns
-------
values: ndarray or ExtensionArray
the original array, possibly upcast
fill_value:
the fill value, possibly upcast
"""
if not is_scalar(fill_value) and not is_object_dtype(values.dtype):
# We allow arbitrary fill values for object dtype
raise ValueError("fill_value must be a scalar")
if is_extension_array_dtype(values):
if copy:
values = values.copy()
else:
if dtype is None:
dtype = values.dtype
new_dtype, fill_value = maybe_promote(dtype, fill_value)
if new_dtype != values.dtype:
values = values.astype(new_dtype)
elif copy:
values = values.copy()
return values, fill_value
def invalidate_string_dtypes(dtype_set: Set[DtypeObj]):
"""
Change string like dtypes to object for
``DataFrame.select_dtypes()``.
"""
non_string_dtypes = dtype_set - {np.dtype("S").type, np.dtype("<U").type}
if non_string_dtypes != dtype_set:
raise TypeError("string dtypes are not allowed, use 'object' instead")
def coerce_indexer_dtype(indexer, categories):
""" coerce the indexer input array to the smallest dtype possible """
length = len(categories)
if length < _int8_max:
return ensure_int8(indexer)
elif length < _int16_max:
return ensure_int16(indexer)
elif length < _int32_max:
return ensure_int32(indexer)
return ensure_int64(indexer)
def astype_nansafe(
arr, dtype: DtypeObj, copy: bool = True, skipna: bool = False
) -> ArrayLike:
"""
Cast the elements of an array to a given dtype a nan-safe manner.
Parameters
----------
arr : ndarray
dtype : np.dtype
copy : bool, default True
If False, a view will be attempted but may fail, if
e.g. the item sizes don't align.
skipna: bool, default False
Whether or not we should skip NaN when casting as a string-type.
Raises
------
ValueError
The dtype was a datetime64/timedelta64 dtype, but it had no unit.
"""
# dispatch on extension dtype if needed
if is_extension_array_dtype(dtype):
return dtype.construct_array_type()._from_sequence(arr, dtype=dtype, copy=copy)
if not isinstance(dtype, np.dtype):
dtype = pandas_dtype(dtype)
if issubclass(dtype.type, str):
return lib.ensure_string_array(
arr.ravel(), skipna=skipna, convert_na_value=False
).reshape(arr.shape)
elif is_datetime64_dtype(arr):
if is_object_dtype(dtype):
return ints_to_pydatetime(arr.view(np.int64))
elif dtype == np.int64:
if isna(arr).any():
raise ValueError("Cannot convert NaT values to integer")
return arr.view(dtype)
# allow frequency conversions
if dtype.kind == "M":
return arr.astype(dtype)
raise TypeError(f"cannot astype a datetimelike from [{arr.dtype}] to [{dtype}]")
elif is_timedelta64_dtype(arr):
if is_object_dtype(dtype):
return ints_to_pytimedelta(arr.view(np.int64))
elif dtype == np.int64:
if isna(arr).any():
raise ValueError("Cannot convert NaT values to integer")
return arr.view(dtype)
if dtype not in [INT64_DTYPE, TD64NS_DTYPE]:
# allow frequency conversions
# we return a float here!
if dtype.kind == "m":
mask = isna(arr)
result = arr.astype(dtype).astype(np.float64)
result[mask] = np.nan
return result
elif dtype == TD64NS_DTYPE:
return arr.astype(TD64NS_DTYPE, copy=copy)
raise TypeError(f"cannot astype a timedelta from [{arr.dtype}] to [{dtype}]")
elif np.issubdtype(arr.dtype, np.floating) and np.issubdtype(dtype, np.integer):
if not np.isfinite(arr).all():
raise ValueError("Cannot convert non-finite values (NA or inf) to integer")
elif is_object_dtype(arr):
# work around NumPy brokenness, #1987
if np.issubdtype(dtype.type, np.integer):
return lib.astype_intsafe(arr.ravel(), dtype).reshape(arr.shape)
# if we have a datetime/timedelta array of objects
# then coerce to a proper dtype and recall astype_nansafe
elif is_datetime64_dtype(dtype):
from pandas import to_datetime
return astype_nansafe(to_datetime(arr).values, dtype, copy=copy)
elif is_timedelta64_dtype(dtype):
from pandas import to_timedelta
return astype_nansafe(to_timedelta(arr)._values, dtype, copy=copy)
if dtype.name in ("datetime64", "timedelta64"):
msg = (
f"The '{dtype.name}' dtype has no unit. Please pass in "
f"'{dtype.name}[ns]' instead."
)
raise ValueError(msg)
if copy or is_object_dtype(arr) or is_object_dtype(dtype):
# Explicit copy, or required since NumPy can't view from / to object.
return arr.astype(dtype, copy=True)
return arr.view(dtype)
def soft_convert_objects(
values: np.ndarray,
datetime: bool = True,
numeric: bool = True,
timedelta: bool = True,
copy: bool = True,
):
"""
Try to coerce datetime, timedelta, and numeric object-dtype columns
to inferred dtype.
Parameters
----------
values : np.ndarray[object]
datetime : bool, default True
numeric: bool, default True
timedelta : bool, default True
copy : bool, default True
Returns
-------
np.ndarray
"""
validate_bool_kwarg(datetime, "datetime")
validate_bool_kwarg(numeric, "numeric")
validate_bool_kwarg(timedelta, "timedelta")
validate_bool_kwarg(copy, "copy")
conversion_count = sum((datetime, numeric, timedelta))
if conversion_count == 0:
raise ValueError("At least one of datetime, numeric or timedelta must be True.")
# Soft conversions
if datetime:
# GH 20380, when datetime is beyond year 2262, hence outside
# bound of nanosecond-resolution 64-bit integers.
try:
values = lib.maybe_convert_objects(values, convert_datetime=True)
except OutOfBoundsDatetime:
pass
if timedelta and is_object_dtype(values.dtype):
# Object check to ensure only run if previous did not convert
values = lib.maybe_convert_objects(values, convert_timedelta=True)
if numeric and is_object_dtype(values.dtype):
try:
converted = lib.maybe_convert_numeric(values, set(), coerce_numeric=True)
except (ValueError, TypeError):
pass
else:
# If all NaNs, then do not-alter
values = converted if not isna(converted).all() else values
values = values.copy() if copy else values
return values
def convert_dtypes(
input_array: AnyArrayLike,
convert_string: bool = True,
convert_integer: bool = True,
convert_boolean: bool = True,
convert_floating: bool = True,
) -> Dtype:
"""
Convert objects to best possible type, and optionally,
to types supporting ``pd.NA``.
Parameters
----------
input_array : ExtensionArray, Index, Series or np.ndarray
convert_string : bool, default True
Whether object dtypes should be converted to ``StringDtype()``.
convert_integer : bool, default True
Whether, if possible, conversion can be done to integer extension types.
convert_boolean : bool, defaults True
Whether object dtypes should be converted to ``BooleanDtypes()``.
convert_floating : bool, defaults True
Whether, if possible, conversion can be done to floating extension types.
If `convert_integer` is also True, preference will be give to integer
dtypes if the floats can be faithfully casted to integers.
Returns
-------
dtype
new dtype
"""
is_extension = is_extension_array_dtype(input_array.dtype)
if (
convert_string or convert_integer or convert_boolean or convert_floating
) and not is_extension:
try:
inferred_dtype = lib.infer_dtype(input_array)
except ValueError:
# Required to catch due to Period. Can remove once GH 23553 is fixed
inferred_dtype = input_array.dtype
if not convert_string and is_string_dtype(inferred_dtype):
inferred_dtype = input_array.dtype
if convert_integer:
target_int_dtype = "Int64"
if is_integer_dtype(input_array.dtype):
from pandas.core.arrays.integer import INT_STR_TO_DTYPE
inferred_dtype = INT_STR_TO_DTYPE.get(
input_array.dtype.name, target_int_dtype
)
if not is_integer_dtype(input_array.dtype) and is_numeric_dtype(
input_array.dtype
):
inferred_dtype = target_int_dtype
else:
if is_integer_dtype(inferred_dtype):
inferred_dtype = input_array.dtype
if convert_floating:
if not is_integer_dtype(input_array.dtype) and is_numeric_dtype(
input_array.dtype
):
from pandas.core.arrays.floating import FLOAT_STR_TO_DTYPE
inferred_float_dtype = FLOAT_STR_TO_DTYPE.get(
input_array.dtype.name, "Float64"
)
# if we could also convert to integer, check if all floats
# are actually integers
if convert_integer:
arr = input_array[notna(input_array)]
if (arr.astype(int) == arr).all():
inferred_dtype = "Int64"
else:
inferred_dtype = inferred_float_dtype
else:
inferred_dtype = inferred_float_dtype
else:
if is_float_dtype(inferred_dtype):
inferred_dtype = input_array.dtype
if convert_boolean:
if is_bool_dtype(input_array.dtype):
inferred_dtype = "boolean"
else:
if isinstance(inferred_dtype, str) and inferred_dtype == "boolean":
inferred_dtype = input_array.dtype
else:
inferred_dtype = input_array.dtype
return inferred_dtype
def maybe_castable(arr: np.ndarray) -> bool:
# return False to force a non-fastpath
assert isinstance(arr, np.ndarray) # GH 37024
# check datetime64[ns]/timedelta64[ns] are valid
# otherwise try to coerce
kind = arr.dtype.kind
if kind == "M":
return is_datetime64_ns_dtype(arr.dtype)
elif kind == "m":
return is_timedelta64_ns_dtype(arr.dtype)
return arr.dtype.name not in POSSIBLY_CAST_DTYPES
def maybe_infer_to_datetimelike(
value: Union[ArrayLike, Scalar], convert_dates: bool = False
):
"""
we might have a array (or single object) that is datetime like,
and no dtype is passed don't change the value unless we find a
datetime/timedelta set
this is pretty strict in that a datetime/timedelta is REQUIRED
in addition to possible nulls/string likes
Parameters
----------
value : np.array / Series / Index / list-like
convert_dates : bool, default False
if True try really hard to convert dates (such as datetime.date), other
leave inferred dtype 'date' alone
"""
# TODO: why not timedelta?
if isinstance(
value, (ABCDatetimeIndex, ABCPeriodIndex, ABCDatetimeArray, ABCPeriodArray)
):
return value
v = value
if not is_list_like(v):
v = [v]
v = np.array(v, copy=False)
# we only care about object dtypes
if not is_object_dtype(v):
return value
shape = v.shape
if v.ndim != 1:
v = v.ravel()
if not len(v):
return value
def try_datetime(v):
# safe coerce to datetime64
try:
# GH19671
v = tslib.array_to_datetime(v, require_iso8601=True, errors="raise")[0]
except ValueError:
# we might have a sequence of the same-datetimes with tz's
# if so coerce to a DatetimeIndex; if they are not the same,
# then these stay as object dtype, xref GH19671
from pandas import DatetimeIndex
try:
values, tz = conversion.datetime_to_datetime64(v)
return DatetimeIndex(values).tz_localize("UTC").tz_convert(tz=tz)
except (ValueError, TypeError):
pass
except Exception:
pass
return v.reshape(shape)
def try_timedelta(v):
# safe coerce to timedelta64
# will try first with a string & object conversion
from pandas import to_timedelta
try:
td_values = to_timedelta(v)
except ValueError:
return v.reshape(shape)
else:
return np.asarray(td_values).reshape(shape)
inferred_type = lib.infer_datetimelike_array(ensure_object(v))
if inferred_type == "date" and convert_dates:
value = try_datetime(v)
elif inferred_type == "datetime":
value = try_datetime(v)
elif inferred_type == "timedelta":
value = try_timedelta(v)
elif inferred_type == "nat":
# if all NaT, return as datetime
if isna(v).all():
value = try_datetime(v)
else:
# We have at least a NaT and a string
# try timedelta first to avoid spurious datetime conversions
# e.g. '00:00:01' is a timedelta but technically is also a datetime
value = try_timedelta(v)
if lib.infer_dtype(value, skipna=False) in ["mixed"]:
# cannot skip missing values, as NaT implies that the string
# is actually a datetime
value = try_datetime(v)
return value
def maybe_cast_to_datetime(value, dtype: Optional[DtypeObj]):
"""
try to cast the array/value to a datetimelike dtype, converting float
nan to iNaT
"""
from pandas.core.tools.datetimes import to_datetime
from pandas.core.tools.timedeltas import to_timedelta
if dtype is not None:
is_datetime64 = is_datetime64_dtype(dtype)
is_datetime64tz = is_datetime64tz_dtype(dtype)
is_timedelta64 = is_timedelta64_dtype(dtype)
if is_datetime64 or is_datetime64tz or is_timedelta64:
# Force the dtype if needed.
msg = (
f"The '{dtype.name}' dtype has no unit. "
f"Please pass in '{dtype.name}[ns]' instead."
)
if is_datetime64:
# unpack e.g. SparseDtype
dtype = getattr(dtype, "subtype", dtype)
if not is_dtype_equal(dtype, DT64NS_DTYPE):
# pandas supports dtype whose granularity is less than [ns]
# e.g., [ps], [fs], [as]
if dtype <= np.dtype("M8[ns]"):
if dtype.name == "datetime64":
raise ValueError(msg)
dtype = DT64NS_DTYPE
else:
raise TypeError(
f"cannot convert datetimelike to dtype [{dtype}]"
)
elif is_datetime64tz:
# our NaT doesn't support tz's
# this will coerce to DatetimeIndex with
# a matching dtype below
if is_scalar(value) and isna(value):
value = [value]
elif is_timedelta64 and not is_dtype_equal(dtype, TD64NS_DTYPE):
# pandas supports dtype whose granularity is less than [ns]
# e.g., [ps], [fs], [as]
if dtype <= np.dtype("m8[ns]"):
if dtype.name == "timedelta64":
raise ValueError(msg)
dtype = TD64NS_DTYPE
else:
raise TypeError(f"cannot convert timedeltalike to dtype [{dtype}]")
if is_scalar(value):
if value == iNaT or isna(value):
value = iNaT
elif not is_sparse(value):
value = np.array(value, copy=False)
# have a scalar array-like (e.g. NaT)
if value.ndim == 0:
value = iNaT
# we have an array of datetime or timedeltas & nulls
elif np.prod(value.shape) or not is_dtype_equal(value.dtype, dtype):
try:
if is_datetime64:
value = to_datetime(value, errors="raise")
# GH 25843: Remove tz information since the dtype
# didn't specify one
if value.tz is not None:
value = value.tz_localize(None)
value = value._values
elif is_datetime64tz:
# The string check can be removed once issue #13712
# is solved. String data that is passed with a
# datetime64tz is assumed to be naive which should
# be localized to the timezone.
is_dt_string = is_string_dtype(value.dtype)
value = to_datetime(value, errors="raise").array
if is_dt_string:
# Strings here are naive, so directly localize
value = value.tz_localize(dtype.tz)
else:
# Numeric values are UTC at this point,
# so localize and convert
value = value.tz_localize("UTC").tz_convert(dtype.tz)
elif is_timedelta64:
value = to_timedelta(value, errors="raise")._values
except OutOfBoundsDatetime:
raise
except (AttributeError, ValueError, TypeError):
pass
# coerce datetimelike to object
elif is_datetime64_dtype(
getattr(value, "dtype", None)
) and not is_datetime64_dtype(dtype):
if is_object_dtype(dtype):
if value.dtype != DT64NS_DTYPE:
value = value.astype(DT64NS_DTYPE)
ints = np.asarray(value).view("i8")
return ints_to_pydatetime(ints)
# we have a non-castable dtype that was passed
raise TypeError(f"Cannot cast datetime64 to {dtype}")
else:
is_array = isinstance(value, np.ndarray)
# catch a datetime/timedelta that is not of ns variety
# and no coercion specified
if is_array and value.dtype.kind in ["M", "m"]:
dtype = value.dtype
if dtype.kind == "M" and dtype != DT64NS_DTYPE:
value = conversion.ensure_datetime64ns(value)
elif dtype.kind == "m" and dtype != TD64NS_DTYPE:
value = conversion.ensure_timedelta64ns(value)
# only do this if we have an array and the dtype of the array is not
# setup already we are not an integer/object, so don't bother with this
# conversion
elif not (
is_array
and not (
issubclass(value.dtype.type, np.integer) or value.dtype == np.object_
)
):
value = maybe_infer_to_datetimelike(value)
return value
def find_common_type(types: List[DtypeObj]) -> DtypeObj:
"""
Find a common data type among the given dtypes.
Parameters
----------
types : list of dtypes
Returns
-------
pandas extension or numpy dtype
See Also
--------
numpy.find_common_type
"""
if len(types) == 0:
raise ValueError("no types given")
first = types[0]
# workaround for find_common_type([np.dtype('datetime64[ns]')] * 2)
# => object
if all(is_dtype_equal(first, t) for t in types[1:]):
return first
# get unique types (dict.fromkeys is used as order-preserving set())
types = list(dict.fromkeys(types).keys())
if any(isinstance(t, ExtensionDtype) for t in types):
for t in types:
if isinstance(t, ExtensionDtype):
res = t._get_common_dtype(types)
if res is not None:
return res
return np.dtype("object")
# take lowest unit
if all(is_datetime64_dtype(t) for t in types):
return np.dtype("datetime64[ns]")
if all(is_timedelta64_dtype(t) for t in types):
return np.dtype("timedelta64[ns]")
# don't mix bool / int or float or complex
# this is different from numpy, which casts bool with float/int as int
has_bools = any(is_bool_dtype(t) for t in types)
if has_bools:
for t in types:
if is_integer_dtype(t) or is_float_dtype(t) or is_complex_dtype(t):
return np.dtype("object")
return np.find_common_type(types, [])
def cast_scalar_to_array(
shape: Shape, value: Scalar, dtype: Optional[DtypeObj] = None
) -> np.ndarray:
"""
Create np.ndarray of specified shape and dtype, filled with values.
Parameters
----------
shape : tuple
value : scalar value
dtype : np.dtype, optional
dtype to coerce
Returns
-------
ndarray of shape, filled with value, of specified / inferred dtype
"""
if dtype is None:
dtype, fill_value = infer_dtype_from_scalar(value)
else:
fill_value = value
values = np.empty(shape, dtype=dtype)
values.fill(fill_value)
return values
def construct_1d_arraylike_from_scalar(
value: Scalar, length: int, dtype: DtypeObj
) -> ArrayLike:
"""
create a np.ndarray / pandas type of specified shape and dtype
filled with values
Parameters
----------
value : scalar value
length : int
dtype : pandas_dtype or np.dtype
Returns
-------
np.ndarray / pandas type of length, filled with value
"""
if is_extension_array_dtype(dtype):
cls = dtype.construct_array_type()
subarr = cls._from_sequence([value] * length, dtype=dtype)
else:
if length and | is_integer_dtype(dtype) | pandas.core.dtypes.common.is_integer_dtype |
# -*- coding: utf-8 -*-
"""
Get input data from Excel files, and calculate epidemiological parameters
"""
import os
import numpy as np
import pandas as pd
import datetime as dt
from . import param_parser
from .get_initial_state import InitialModelState
from datetime import datetime
def aggregate_params_and_data(yaml_fp):
"""Aggregates all run parameters. Reads from a config YAML file
at `yaml_fp`, and calls SEIR_get_data to retrieve demographic data.
Returns a dictionary of aggregated parameters.
"""
config = param_parser.load(yaml_fp, validate=False)
# -------------Get data/params from get_data/params ----------------
# handling of legacy param names, formatted as:
# [old name which is still supported, new name]
legacy_conversions = tuple([
['sd_date', 'c_reduction_date'],
['DATA_FOLDER', 'data_folder'],
['CITY', 'city'],
])
for conversion in legacy_conversions:
old_name = conversion[0]
new_name = conversion[1]
if new_name not in config:
assert old_name in config, "config YAML has no field " + \
"`{}` (formerly known as `{}`)".format(new_name, old_name)
config[new_name] = config[old_name]
# get demographics, school calendar, and transmission data from Excel files
AgeGroupDict, metro_pop, school_calendar, \
time_begin, FallStartDate, Phi, symp_h_ratio_overall, \
symp_h_ratio, hosp_f_ratio = SEIR_get_data(config=config)
config.update({
"AgeGroupDict": AgeGroupDict,
'metro_pop': metro_pop,
'school_calendar': school_calendar,
'time_begin': time_begin,
'FallStartDate': FallStartDate,
'phi': Phi,
#initial_state': config['initial_state'],
'initial_i': config['I0'],
'symp_h_ratio_overall': symp_h_ratio_overall,
'symp_h_ratio': symp_h_ratio,
'hosp_f_ratio': hosp_f_ratio
})
# -------------Get initial state of model --------------------------
## -- get initial state of compartments
# todo: SEIR model should take a new arg "init_type" that explicitly states whether to initialize every compartment or just infected
# todo: currently the type of initialization is inferred from the instance type of "initial_i" -- that is sure to break at some point
init_state = InitialModelState(config['total_time'], config['interval_per_day'], config['n_age'], config['n_risk'],
config['I0'], metro_pop)
compartments = init_state.initialize()
# todo: more graceful and transparent override of user config specified start date
# todo: perhaps in param_parser we can check that time_begin_sim is None if a I0 is a file path
if init_state.start_day:
print('Start date as specified in the config file is overridden by initialization from a deterministic solution.')
print('The new start date is {}'.format(init_state.start_day))
date_begin = init_state.start_day
config['time_begin_sim'] = datetime.strftime(date_begin, '%Y%m%d') # return datetime to its expected string format
# todo: we should re-save this config to reflect the updated start time
# ------------- Update config with revised initial conditions -------
config['initial_state'] = compartments
config['t_offset'] = init_state.offset
return config
def SEIR_get_data(config):
""" Gets input data from Excel files. Takes a configuration
dictionary `config` that must minimally contain the following keys:
:data_folder: str, path of Excel files
:city: str, name of city simulated
:n_age: int, number of age groups
:n_risk: int, number of risk groups
"""
# ingest from configuration dictionary
data_folder = config['data_folder']
city = config['city']
n_age = config['n_age']
n_risk = config['n_risk']
H_RELATIVE_RISK_IN_HIGH = config['H_RELATIVE_RISK_IN_HIGH']
D_RELATIVE_RISK_IN_HIGH = config['D_RELATIVE_RISK_IN_HIGH']
HIGH_RISK_RATIO = config['HIGH_RISK_RATIO']
H_FATALITY_RATIO = config['H_FATALITY_RATIO']
INFECTION_FATALITY_RATIO = config['INFECTION_FATALITY_RATIO']
OVERALL_H_RATIO = config['OVERALL_H_RATIO']
ASYMP_RATE = config['ASYMP_RATE']
age_group_dict = config['age_group_dict']
# ------------------------------
us_population_filename = 'US_pop_UN.csv'
population_filename = '{}_Population_{}_age_groups.csv'
population_filename_dict = {}
for key in age_group_dict.keys():
population_filename_dict[key] = population_filename.format(city, str(key))
school_calendar_filename = '{}_School_Calendar.csv'.format(city)
contact_matrix_all_filename_dict = {5: 'ContactMatrixAll_5AgeGroups.csv',
3: 'ContactMatrixAll_3AgeGroups.csv'}
contact_matrix_school_filename_dict = {5: 'ContactMatrixSchool_5AgeGroups.csv',
3: 'ContactMatrixSchool_3AgeGroups.csv'}
contact_matrix_work_filename_dict = {5: 'ContactMatrixWork_5AgeGroups.csv',
3: 'ContactMatrixWork_3AgeGroups.csv'}
contact_matrix_home_filename_dict = {5: 'ContactMatrixHome_5AgeGroups.csv',
3: 'ContactMatrixHome_3AgeGroups.csv'}
## Load data
# Population in US
df_US = pd.read_csv(data_folder + us_population_filename, index_col=False)
GroupPaperPop = df_US.groupby('GroupPaper')['Value'].sum().reset_index(name='GroupPaperPop')
GroupCOVIDPop = df_US.groupby('GroupCOVID')['Value'].sum().reset_index(name='GroupCOVIDPop')
df_US = pd.merge(df_US, GroupPaperPop)
df_US = pd.merge(df_US, GroupCOVIDPop)
# Calculate age specific and risk group specific symptomatic hospitalization ratio
df_US['Overall_H_Ratio'] = df_US['GroupPaper'].map(OVERALL_H_RATIO) / 100.
df_US['YHR_paper'] = df_US['Overall_H_Ratio'] / (1 - ASYMP_RATE)
df_US['YHN_1yr'] = df_US['YHR_paper'] * df_US['Value']
GroupCOVID_YHN = df_US.groupby('GroupCOVID')['YHN_1yr'].sum().reset_index(name='GroupCOVID_YHN')
df_US = pd.merge(df_US, GroupCOVID_YHN)
df_US['YHR'] = df_US['GroupCOVID_YHN'] / df_US['GroupCOVIDPop']
df_US['GroupCOVIDHighRiskRatio'] = df_US['GroupCOVID'].map(HIGH_RISK_RATIO) / 100.
df_US['YHR_low'] = df_US['YHR'] /(1 - df_US['GroupCOVIDHighRiskRatio'] + \
H_RELATIVE_RISK_IN_HIGH * df_US['GroupCOVIDHighRiskRatio'])
df_US['YHR_high'] = H_RELATIVE_RISK_IN_HIGH * df_US['YHR_low']
# Calculate age specific and risk group specific hospitalized fatality ratio
df_US['I_Fatality_Ratio'] = df_US['GroupPaper'].map(INFECTION_FATALITY_RATIO) / 100.
df_US['YFN_1yr'] = df_US['I_Fatality_Ratio'] * df_US['Value'] / (1 - ASYMP_RATE)
GroupCOVID_YFN = df_US.groupby('GroupCOVID')['YFN_1yr'].sum().reset_index(name='GroupCOVID_YFN')
df_US = pd.merge(df_US, GroupCOVID_YFN)
df_US['YFR'] = df_US['GroupCOVID_YFN'] / df_US['GroupCOVIDPop']
df_US['YFR_low'] = df_US['YFR'] / (1 - df_US['GroupCOVIDHighRiskRatio'] + \
D_RELATIVE_RISK_IN_HIGH * df_US['GroupCOVIDHighRiskRatio'])
df_US['YFR_high'] = D_RELATIVE_RISK_IN_HIGH * df_US['YFR_low']
df_US['HFR'] = df_US['YFR'] / df_US['YHR']
df_US['HFR_low'] = df_US['YFR_low'] / df_US['YHR_low']
df_US['HFR_high'] = df_US['YFR_high'] / df_US['YHR_high']
df_US_dict = df_US[['GroupCOVID', 'YHR', 'YHR_low', 'YHR_high', \
'HFR_low', 'HFR_high']].drop_duplicates().set_index('GroupCOVID').to_dict()
Symp_H_Ratio_dict = df_US_dict['YHR']
Symp_H_Ratio_L_dict = df_US_dict['YHR_low']
Symp_H_Ratio_H_dict = df_US_dict['YHR_high']
Hosp_F_Ratio_L_dict = df_US_dict['HFR_low']
Hosp_F_Ratio_H_dict = df_US_dict['HFR_high']
Symp_H_Ratio = np.array([Symp_H_Ratio_dict[i] for i in age_group_dict[n_age]])
Symp_H_Ratio_w_risk = np.array([[Symp_H_Ratio_L_dict[i] for i in age_group_dict[n_age]], \
[Symp_H_Ratio_H_dict[i] for i in age_group_dict[n_age]]])
Hosp_F_Ratio_w_risk = np.array([[Hosp_F_Ratio_L_dict[i] for i in age_group_dict[n_age]], \
[Hosp_F_Ratio_H_dict[i] for i in age_group_dict[n_age]]])
df = pd.read_csv(data_folder + population_filename_dict[n_age], index_col=False)
pop_metro = np.zeros(shape=(n_age, n_risk))
for r in range(n_risk):
pop_metro[:, r] = df.loc[df['RiskGroup'] == r, age_group_dict[n_age]].values.reshape(-1)
# Transmission adjustment multiplier per day and per metropolitan area
df_school_calendar = pd.read_csv(data_folder + school_calendar_filename, index_col=False)
school_calendar = df_school_calendar['Calendar'].values.reshape(-1)
school_calendar_start_date = dt.datetime.strptime(np.str(df_school_calendar['Date'][0]), '%m/%d/%y')
df_school_calendar_aug = df_school_calendar[df_school_calendar['Date'].str[0].astype(int) >= 8]
fall_start_date = df_school_calendar_aug[df_school_calendar_aug['Calendar'] == 1].Date.to_list()[0]
fall_start_date = '20200' + fall_start_date.split('/')[0] + fall_start_date.split('/')[1]
# Contact matrix
phi_all = | pd.read_csv(data_folder + contact_matrix_all_filename_dict[n_age], header=None) | pandas.read_csv |
import pandas as pd
import json
import numpy as np
import matplotlib.pyplot as plt
import os
lis=os.listdir("graphs")
lis.sort(key=len)
train= | pd.DataFrame() | pandas.DataFrame |
import bz2
from collections import Counter
from contextlib import contextmanager
from datetime import datetime
from functools import wraps
import gzip
import operator
import os
import re
import string
from typing import (
Any,
Callable,
ContextManager,
List,
Optional,
Sequence,
Tuple,
Type,
Union,
cast,
)
import warnings
import zipfile
import numpy as np
from pandas._config.localization import ( # noqa:F401
can_set_locale,
get_locales,
set_locale,
)
from pandas._typing import Dtype, FilePathOrBuffer, FrameOrSeries
from pandas.compat import get_lzma_file, import_lzma
from pandas.core.dtypes.common import (
is_datetime64_dtype,
is_datetime64tz_dtype,
is_period_dtype,
is_sequence,
is_timedelta64_dtype,
)
import pandas as pd
from pandas import (
Categorical,
CategoricalIndex,
DataFrame,
DatetimeIndex,
Index,
IntervalIndex,
MultiIndex,
RangeIndex,
Series,
bdate_range,
)
from pandas._testing.asserters import ( # noqa:F401
assert_almost_equal,
assert_attr_equal,
assert_categorical_equal,
assert_class_equal,
assert_contains_all,
assert_copy,
assert_datetime_array_equal,
assert_dict_equal,
assert_equal,
assert_extension_array_equal,
assert_frame_equal,
assert_index_equal,
assert_interval_array_equal,
assert_is_sorted,
assert_is_valid_plot_return_object,
assert_numpy_array_equal,
assert_period_array_equal,
assert_series_equal,
assert_sp_array_equal,
assert_timedelta_array_equal,
raise_assert_detail,
)
from pandas._testing.contexts import ( # noqa:F401
decompress_file,
ensure_clean,
ensure_clean_dir,
ensure_safe_environment_variables,
set_timezone,
use_numexpr,
with_csv_dialect,
)
from pandas.core.arrays import DatetimeArray, PeriodArray, TimedeltaArray, period_array
from pandas.io.common import urlopen
lzma = import_lzma()
_N = 30
_K = 4
_RAISE_NETWORK_ERROR_DEFAULT = False
UNSIGNED_INT_DTYPES: List[Dtype] = ["uint8", "uint16", "uint32", "uint64"]
UNSIGNED_EA_INT_DTYPES: List[Dtype] = ["UInt8", "UInt16", "UInt32", "UInt64"]
SIGNED_INT_DTYPES: List[Dtype] = [int, "int8", "int16", "int32", "int64"]
SIGNED_EA_INT_DTYPES: List[Dtype] = ["Int8", "Int16", "Int32", "Int64"]
ALL_INT_DTYPES = UNSIGNED_INT_DTYPES + SIGNED_INT_DTYPES
ALL_EA_INT_DTYPES = UNSIGNED_EA_INT_DTYPES + SIGNED_EA_INT_DTYPES
FLOAT_DTYPES: List[Dtype] = [float, "float32", "float64"]
FLOAT_EA_DTYPES: List[Dtype] = ["Float32", "Float64"]
COMPLEX_DTYPES: List[Dtype] = [complex, "complex64", "complex128"]
STRING_DTYPES: List[Dtype] = [str, "str", "U"]
DATETIME64_DTYPES: List[Dtype] = ["datetime64[ns]", "M8[ns]"]
TIMEDELTA64_DTYPES: List[Dtype] = ["timedelta64[ns]", "m8[ns]"]
BOOL_DTYPES = [bool, "bool"]
BYTES_DTYPES = [bytes, "bytes"]
OBJECT_DTYPES = [object, "object"]
ALL_REAL_DTYPES = FLOAT_DTYPES + ALL_INT_DTYPES
ALL_NUMPY_DTYPES = (
ALL_REAL_DTYPES
+ COMPLEX_DTYPES
+ STRING_DTYPES
+ DATETIME64_DTYPES
+ TIMEDELTA64_DTYPES
+ BOOL_DTYPES
+ OBJECT_DTYPES
+ BYTES_DTYPES
)
NULL_OBJECTS = [None, np.nan, pd.NaT, float("nan"), pd.NA]
EMPTY_STRING_PATTERN = re.compile("^$")
# set testing_mode
_testing_mode_warnings = (DeprecationWarning, ResourceWarning)
def set_testing_mode():
# set the testing mode filters
testing_mode = os.environ.get("PANDAS_TESTING_MODE", "None")
if "deprecate" in testing_mode:
# pandas\_testing.py:119: error: Argument 2 to "simplefilter" has
# incompatible type "Tuple[Type[DeprecationWarning],
# Type[ResourceWarning]]"; expected "Type[Warning]"
warnings.simplefilter(
"always", _testing_mode_warnings # type: ignore[arg-type]
)
def reset_testing_mode():
# reset the testing mode filters
testing_mode = os.environ.get("PANDAS_TESTING_MODE", "None")
if "deprecate" in testing_mode:
# pandas\_testing.py:126: error: Argument 2 to "simplefilter" has
# incompatible type "Tuple[Type[DeprecationWarning],
# Type[ResourceWarning]]"; expected "Type[Warning]"
warnings.simplefilter(
"ignore", _testing_mode_warnings # type: ignore[arg-type]
)
set_testing_mode()
def reset_display_options():
"""
Reset the display options for printing and representing objects.
"""
pd.reset_option("^display.", silent=True)
def round_trip_pickle(
obj: Any, path: Optional[FilePathOrBuffer] = None
) -> FrameOrSeries:
"""
Pickle an object and then read it again.
Parameters
----------
obj : any object
The object to pickle and then re-read.
path : str, path object or file-like object, default None
The path where the pickled object is written and then read.
Returns
-------
pandas object
The original object that was pickled and then re-read.
"""
_path = path
if _path is None:
_path = f"__{rands(10)}__.pickle"
with ensure_clean(_path) as temp_path:
pd.to_pickle(obj, temp_path)
return | pd.read_pickle(temp_path) | pandas.read_pickle |
import sys
sys.path.extend(["../../", "../", "./"])
import warnings
warnings.filterwarnings("ignore")
from sklearn.preprocessing import MinMaxScaler
import gensim
from sklearn.metrics import classification_report
from sklearn.svm import SVC
from sklearn.model_selection import GridSearchCV
from sklearn import metrics
from sklearn.model_selection import cross_val_predict
import time
import argparse
from sklearn.preprocessing import StandardScaler
import pandas as pd
import numpy as np
from gensim.models import word2vec
from sklearn.externals import joblib
# 将fatsa文件切分成单词默认为kmer切分
def save_wordfile(fastafile, splite, kmer):
train_words = []
for i in fastafile:
f = open(i)
k = kmer - 1
documents = f.readlines()
string = ""
flag = 0
for document in documents:
if document.startswith(">") and flag == 0:
flag = 1
continue
elif document.startswith(">") and flag == 1:
if splite == 0:
b = [string[i:i + kmer] for i in range(len(string)) if i < len(string) - k]
else:
b = [string[i:i + kmer] for i in range(0, len(string), kmer) if i < len(string) - k]
train_words.append(b)
string = ""
else:
string += document
string = string.strip()
if splite == 0:
b = [string[i:i + kmer] for i in range(len(string)) if i < len(string) - k]
else:
b = [string[i:i + kmer] for i in range(0, len(string), kmer) if i < len(string) - k]
train_words.append(b)
f.close()
return train_words
def splite_word(trainfasta_file, kmer, splite):
train_file = trainfasta_file
# train set transform to word
word = save_wordfile(train_file, splite, kmer)
return word
# 训练词向量并将文件转化为csv文件
def save_csv(words, model, b):
wv = model.wv
vocab_list = wv.index2word
feature = []
for word in words:
l = []
for i in word:
i = i.strip()
if i not in vocab_list:
flag = [b] * 100
else:
flag = model[i]
l.append(flag)
word_vec = np.array(l)
feature.append(np.mean(word_vec, axis=0))
return np.array(feature)
def tocsv(train_word, sg, hs, window, size, model_name, b, iter1, spmodel):
if spmodel:
print("loading model ......")
model = gensim.models.KeyedVectors.load_word2vec_format(spmodel, binary=False)
else:
model = word2vec.Word2Vec(train_word, iter=iter1, sg=sg, hs=hs, min_count=1, window=window, size=size)
model.wv.save_word2vec_format(model_name, binary=False)
csv = save_csv(train_word, model, b)
return csv
# svm
def svm(traincsv, train_y, cv, n_job, mms, ss, grad, model):
cv = cv
cpu_num = n_job
svc = SVC(probability=True)
X = traincsv
y = train_y
if mms:
print("MinMaxScaler")
minMax = MinMaxScaler()
minMax.fit(X)
X = minMax.transform(X)
if ss:
print("StandardScaler")
scaler = StandardScaler()
scaler.fit(X)
X = scaler.transform(X)
# 网格搜索
def get_bestparameter(X, y):
a = [2 ** x for x in range(-2, 5)]
b = [2 ** x for x in range(-5, 2)]
parameters = [
{
'C': a,
'gamma': b,
'kernel': ['rbf']
},
{
'C': a,
'kernel': ['linear']
}
]
clf = GridSearchCV(svc, parameters, cv=cv, scoring='accuracy', n_jobs=cpu_num)
clf.fit(X, y)
print("Best parameters set found on development set:")
print(clf.best_params_)
print(clf.best_score_)
return clf
if grad:
clf = get_bestparameter(X, y)
p = clf.best_params_
if clf.best_params_["kernel"] == "rbf":
clf = SVC(C=p["C"], kernel=p["kernel"], gamma=p["gamma"], probability=True)
else:
clf = SVC(C=p["C"], kernel=p["kernel"], probability=True)
else:
clf = SVC(C=0.5, gamma=0.05, probability=True)
if cv:
print("------------------------cv--------------------------")
predicted = cross_val_predict(clf, X, y, cv=cv, n_jobs=cpu_num)
# y_predict_prob = cross_val_predict(clf, X, y, cv=cv, n_jobs=cpu_num, method='predict_proba')
# ROC_AUC_area = metrics.roc_auc_score(y, y_predict_prob[:, 1])
# print("AUC:{}".format(ROC_AUC_area))
print("ACC:{}".format(metrics.accuracy_score(y, predicted)))
print("MCC:{}\n".format(metrics.matthews_corrcoef(y, predicted)))
print(classification_report(y, predicted))
print("confusion matrix\n")
print(pd.crosstab( | pd.Series(y, name='Actual') | pandas.Series |
# Copyright (c) 2018-2021, NVIDIA CORPORATION.
import array as arr
import datetime
import io
import operator
import random
import re
import string
import textwrap
from copy import copy
import cupy
import numpy as np
import pandas as pd
import pyarrow as pa
import pytest
from numba import cuda
import cudf
from cudf.core._compat import PANDAS_GE_110, PANDAS_GE_120
from cudf.core.column import column
from cudf.tests import utils
from cudf.tests.utils import (
ALL_TYPES,
DATETIME_TYPES,
NUMERIC_TYPES,
assert_eq,
assert_exceptions_equal,
does_not_raise,
gen_rand,
)
def test_init_via_list_of_tuples():
data = [
(5, "cats", "jump", np.nan),
(2, "dogs", "dig", 7.5),
(3, "cows", "moo", -2.1, "occasionally"),
]
pdf = pd.DataFrame(data)
gdf = cudf.DataFrame(data)
assert_eq(pdf, gdf)
def _dataframe_na_data():
return [
pd.DataFrame(
{
"a": [0, 1, 2, np.nan, 4, None, 6],
"b": [np.nan, None, "u", "h", "d", "a", "m"],
},
index=["q", "w", "e", "r", "t", "y", "u"],
),
pd.DataFrame({"a": [0, 1, 2, 3, 4], "b": ["a", "b", "u", "h", "d"]}),
pd.DataFrame(
{
"a": [None, None, np.nan, None],
"b": [np.nan, None, np.nan, None],
}
),
pd.DataFrame({"a": []}),
pd.DataFrame({"a": [np.nan], "b": [None]}),
pd.DataFrame({"a": ["a", "b", "c", None, "e"]}),
pd.DataFrame({"a": ["a", "b", "c", "d", "e"]}),
]
@pytest.mark.parametrize("rows", [0, 1, 2, 100])
def test_init_via_list_of_empty_tuples(rows):
data = [()] * rows
pdf = pd.DataFrame(data)
gdf = cudf.DataFrame(data)
assert_eq(
pdf,
gdf,
check_like=True,
check_column_type=False,
check_index_type=False,
)
@pytest.mark.parametrize(
"dict_of_series",
[
{"a": pd.Series([1.0, 2.0, 3.0])},
{"a": pd.Series([1.0, 2.0, 3.0], index=[4, 5, 6])},
{
"a": pd.Series([1.0, 2.0, 3.0], index=[4, 5, 6]),
"b": pd.Series([1.0, 2.0, 4.0], index=[1, 2, 3]),
},
{"a": [1, 2, 3], "b": pd.Series([1.0, 2.0, 3.0], index=[4, 5, 6])},
{
"a": | pd.Series([1.0, 2.0, 3.0], index=["a", "b", "c"]) | pandas.Series |
import pandas as pd
import numpy as np
import datetime as dt
import math
#输入H 文件名
def cal_riskrt(H,source):
source=source.iloc[:,0:6]
source=source.drop(columns=["Unnamed: 0"])
source=source.set_index('date').dropna(subset=['long_rt','short_rt','long_short_rt'],how='all')
#新建一个数据框记录各种指标
df=pd.DataFrame(columns=['rt','volatility','mdd','sharpe','calmar'],index=['long','short','long_short','excess'])
#计算多头各项指标
rt=pd.DataFrame(source['long_rt'])
rt['prod'] = np.cumprod(rt['long_rt'] + 1)
holding_period = pd.to_datetime(rt.index.values[-1]) - pd.to_datetime(rt.index.values[0])
# #年化收益率
annual_ret = pow(rt['prod'][-1], 365 / holding_period.days) - 1
# #年化波动率
volatility = rt['long_rt'].std() * (math.sqrt(250 / H))
# #sharpe
sharpe = annual_ret / volatility
# #计算最大回撤
rt['max2here'] = rt['prod'].expanding(1).max()
rt['dd2here'] = (rt['prod'] / rt['max2here']) - 1
mdd = rt['dd2here'].min()
calmar = annual_ret / abs(mdd)
df.loc['long','rt']=annual_ret
df.loc['long','volatility']=volatility
df.loc['long','mdd']=mdd
df.loc['long','sharpe']=sharpe
df.loc['long','calmar']=calmar
#计算空头组合的指标(对照组)
rt = pd.DataFrame(source['short_rt'])
rt['short_rt']=rt['short_rt']
rt['prod'] = np.cumprod(rt['short_rt'] + 1)
holding_period = pd.to_datetime(rt.index.values[-1]) - pd.to_datetime(rt.index.values[0])
# #年化收益率
annual_ret = pow(rt['prod'][-1], 365 / holding_period.days) - 1
# #年化波动率
volatility = rt['short_rt'].std() * (math.sqrt(250 / H))
# #sharpe
sharpe = annual_ret / volatility
# #计算最大回撤
rt['max2here'] = rt['prod'].expanding(1).max()
rt['dd2here'] = (rt['prod'] / rt['max2here']) - 1
mdd = rt['dd2here'].min()
calmar = annual_ret / abs(mdd)
df.loc['short', 'rt'] = annual_ret
df.loc['short', 'volatility'] = volatility
df.loc['short', 'mdd'] = mdd
df.loc['short', 'sharpe'] = sharpe
df.loc['short', 'calmar'] = calmar
# 计算多空组合的指标
rt = | pd.DataFrame(source['long_short_rt']) | pandas.DataFrame |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
import pandas as pd
from datetime import datetime, timedelta
import numpy as np
import matplotlib
matplotlib.use('Agg')
import matplotlib.pyplot as plt
import matplotlib.ticker as tck
import matplotlib.font_manager as fm
import math as m
import matplotlib.dates as mdates
import netCDF4 as nc
from netCDF4 import Dataset
id
import itertools
import datetime
from scipy.stats import ks_2samp
import matplotlib.colors as colors
import matplotlib.cm as cm
import os
Path_save = '/home/nacorreasa/Maestria/Datos_Tesis/Arrays/'
Horizonte = 'Anio' ##-->'Anio' para los datos del 2018 y 2019y 'EXP' para los datos a partir del experimento.
#------------------------------------------------------------------------------
# Motivación codigo -----------------------------------------------------------
'Código para la derteminacion de la frecuencia y a demas de la dimension fractal (con el fin de revelar relaciones'
'entre ambos conceptos). En la entrada anteriore se define el horizonte de tiempo con el cual se quiere trabajar.'
'Además se obtiene el scatter q relaciona las reflectancias con las anomalías de la radiación.'
#-----------------------------------------------------------------------------
# Rutas para las fuentes -----------------------------------------------------
prop = fm.FontProperties(fname='/home/nacorreasa/SIATA/Cod_Califi/AvenirLTStd-Heavy.otf' )
prop_1 = fm.FontProperties(fname='/home/nacorreasa/SIATA/Cod_Califi/AvenirLTStd-Book.otf')
prop_2 = fm.FontProperties(fname='/home/nacorreasa/SIATA/Cod_Califi/AvenirLTStd-Black.otf')
##########################################################################################
## ----------------LECTURA DE LOS DATOS DE LAS ANOMALIAS DE LA RADIACION--------------- ##
##########################################################################################
Anomal_df_975 = pd.read_csv('/home/nacorreasa/Maestria/Datos_Tesis/Arrays/df_AnomalRad_pix975_2018_2019.csv', sep=',')
Anomal_df_348 = pd.read_csv('/home/nacorreasa/Maestria/Datos_Tesis/Arrays/df_AnomalRad_pix348_2018_2019.csv', sep=',')
Anomal_df_350 = pd.read_csv('/home/nacorreasa/Maestria/Datos_Tesis/Arrays/df_AnomalRad_pix350_2018_2019.csv', sep=',')
Anomal_df_975['fecha_hora'] = pd.to_datetime(Anomal_df_975['fecha_hora'], format="%Y-%m-%d %H:%M", errors='coerce')
Anomal_df_975.index = Anomal_df_975['fecha_hora']
Anomal_df_975 = Anomal_df_975.drop(['fecha_hora'], axis=1)
Anomal_df_975 = Anomal_df_975.between_time('06:00', '18:00') ##--> Seleccionar solo los datos de horas del dia
Anomal_df_975_h = Anomal_df_975.groupby(pd.Grouper(freq="H")).mean()
Anomal_df_350['fecha_hora'] = pd.to_datetime(Anomal_df_350['fecha_hora'], format="%Y-%m-%d %H:%M", errors='coerce')
Anomal_df_350.index = Anomal_df_350['fecha_hora']
Anomal_df_350 = Anomal_df_350.drop(['fecha_hora'], axis=1)
Anomal_df_350 = Anomal_df_350.between_time('06:00', '18:00') ##--> Seleccionar solo los datos de horas del dia
Anomal_df_350_h = Anomal_df_350.groupby(pd.Grouper(freq="H")).mean()
Anomal_df_348['fecha_hora'] = pd.to_datetime(Anomal_df_348['fecha_hora'], format="%Y-%m-%d %H:%M", errors='coerce')
Anomal_df_348.index = Anomal_df_348['fecha_hora']
Anomal_df_348 = Anomal_df_348.drop(['fecha_hora'], axis=1)
Anomal_df_348 = Anomal_df_348.between_time('06:00', '18:00') ##--> Seleccionar solo los datos de horas del dia
Anomal_df_348_h = Anomal_df_348.groupby(pd.Grouper(freq="H")).mean()
Anomal_df_348_h = Anomal_df_348_h.drop(['Radiacion_Med', 'radiacion',], axis=1)
Anomal_df_350_h = Anomal_df_350_h.drop(['Radiacion_Med', 'radiacion',], axis=1)
Anomal_df_975_h = Anomal_df_975_h.drop(['Radiacion_Med', 'radiacion',], axis=1)
Anomal_df_348_h = Anomal_df_348_h.loc[~Anomal_df_348_h.index.duplicated(keep='first')]
Anomal_df_350_h = Anomal_df_350_h.loc[~Anomal_df_350_h.index.duplicated(keep='first')]
Anomal_df_975_h = Anomal_df_975_h.loc[~Anomal_df_975_h.index.duplicated(keep='first')]
################################################################################################
## -------------------------------UMBRALES DE LAS REFLECTANCIAS------------------------------ ##
################################################################################################
Umbral_up_348 = pd.read_table('/home/nacorreasa/Maestria/Datos_Tesis/Umbrales_Horarios/Umbral_Hourly_348_Nuba.csv', sep=',', header = None)
Umbral_down_348 = pd.read_table('/home/nacorreasa/Maestria/Datos_Tesis/Umbrales_Horarios/Umbral_Hourly_348_Desp.csv', sep=',', header = None)
Umbral_up_348.columns=['Hora', 'Umbral']
Umbral_up_348.index = Umbral_up_348['Hora']
Umbral_up_348 = Umbral_up_348.drop(['Hora'], axis=1)
Umbral_down_348.columns=['Hora', 'Umbral']
Umbral_down_348.index = Umbral_down_348['Hora']
Umbral_down_348 = Umbral_down_348.drop(['Hora'], axis=1)
Umbral_up_350 = pd.read_table('/home/nacorreasa/Maestria/Datos_Tesis/Umbrales_Horarios/Umbral_Hourly_350_Nuba.csv', sep=',', header = None)
Umbral_down_350 = pd.read_table('/home/nacorreasa/Maestria/Datos_Tesis/Umbrales_Horarios/Umbral_Hourly_350_Desp.csv', sep=',', header = None)
Umbral_up_350.columns=['Hora', 'Umbral']
Umbral_up_350.index = Umbral_up_350['Hora']
Umbral_up_350 = Umbral_up_350.drop(['Hora'], axis=1)
Umbral_down_350.columns=['Hora', 'Umbral']
Umbral_down_350.index = Umbral_down_350['Hora']
Umbral_down_350 = Umbral_down_350.drop(['Hora'], axis=1)
Umbral_up_975 = pd.read_table('/home/nacorreasa/Maestria/Datos_Tesis/Umbrales_Horarios/Umbral_Hourly_975_Nuba.csv', sep=',', header = None)
Umbral_down_975 = | pd.read_table('/home/nacorreasa/Maestria/Datos_Tesis/Umbrales_Horarios/Umbral_Hourly_975_Desp.csv', sep=',', header = None) | pandas.read_table |
# -*- coding: utf-8 -*-
from __future__ import print_function
from datetime import datetime, time
from numpy import nan
from numpy.random import randn
import numpy as np
from pandas import (DataFrame, Series, Index,
Timestamp, DatetimeIndex,
to_datetime, date_range)
import pandas as pd
import pandas.tseries.offsets as offsets
from pandas.util.testing import (assert_almost_equal,
assert_series_equal,
assert_frame_equal,
assertRaisesRegexp)
import pandas.util.testing as tm
from pandas.compat import product
from pandas.tests.frame.common import TestData
class TestDataFrameTimeSeriesMethods(tm.TestCase, TestData):
def test_diff(self):
the_diff = self.tsframe.diff(1)
assert_series_equal(the_diff['A'],
self.tsframe['A'] - self.tsframe['A'].shift(1))
# int dtype
a = 10000000000000000
b = a + 1
s = Series([a, b])
rs = DataFrame({'s': s}).diff()
self.assertEqual(rs.s[1], 1)
# mixed numeric
tf = self.tsframe.astype('float32')
the_diff = tf.diff(1)
assert_series_equal(the_diff['A'],
tf['A'] - tf['A'].shift(1))
# issue 10907
df = pd.DataFrame({'y': pd.Series([2]), 'z': pd.Series([3])})
df.insert(0, 'x', 1)
result = df.diff(axis=1)
expected = pd.DataFrame({'x': np.nan, 'y': pd.Series(
1), 'z': pd.Series(1)}).astype('float64')
assert_frame_equal(result, expected)
def test_diff_timedelta(self):
# GH 4533
df = DataFrame(dict(time=[Timestamp('20130101 9:01'),
Timestamp('20130101 9:02')],
value=[1.0, 2.0]))
res = df.diff()
exp = DataFrame([[pd.NaT, np.nan],
[pd.Timedelta('00:01:00'), 1]],
columns=['time', 'value'])
assert_frame_equal(res, exp)
def test_diff_mixed_dtype(self):
df = DataFrame(np.random.randn(5, 3))
df['A'] = np.array([1, 2, 3, 4, 5], dtype=object)
result = df.diff()
self.assertEqual(result[0].dtype, np.float64)
def test_diff_neg_n(self):
rs = self.tsframe.diff(-1)
xp = self.tsframe - self.tsframe.shift(-1)
assert_frame_equal(rs, xp)
def test_diff_float_n(self):
rs = self.tsframe.diff(1.)
xp = self.tsframe.diff(1)
assert_frame_equal(rs, xp)
def test_diff_axis(self):
# GH 9727
df = DataFrame([[1., 2.], [3., 4.]])
assert_frame_equal(df.diff(axis=1), DataFrame(
[[np.nan, 1.], [np.nan, 1.]]))
assert_frame_equal(df.diff(axis=0), DataFrame(
[[np.nan, np.nan], [2., 2.]]))
def test_pct_change(self):
rs = self.tsframe.pct_change(fill_method=None)
assert_frame_equal(rs, self.tsframe / self.tsframe.shift(1) - 1)
rs = self.tsframe.pct_change(2)
filled = self.tsframe.fillna(method='pad')
assert_frame_equal(rs, filled / filled.shift(2) - 1)
rs = self.tsframe.pct_change(fill_method='bfill', limit=1)
filled = self.tsframe.fillna(method='bfill', limit=1)
assert_frame_equal(rs, filled / filled.shift(1) - 1)
rs = self.tsframe.pct_change(freq='5D')
filled = self.tsframe.fillna(method='pad')
assert_frame_equal(rs, filled / filled.shift(freq='5D') - 1)
def test_pct_change_shift_over_nas(self):
s = Series([1., 1.5, np.nan, 2.5, 3.])
df = DataFrame({'a': s, 'b': s})
chg = df.pct_change()
expected = Series([np.nan, 0.5, np.nan, 2.5 / 1.5 - 1, .2])
edf = DataFrame({'a': expected, 'b': expected})
assert_frame_equal(chg, edf)
def test_frame_ctor_datetime64_column(self):
rng = date_range('1/1/2000 00:00:00', '1/1/2000 1:59:50', freq='10s')
dates = np.asarray(rng)
df = DataFrame({'A': np.random.randn(len(rng)), 'B': dates})
self.assertTrue(np.issubdtype(df['B'].dtype, np.dtype('M8[ns]')))
def test_frame_add_datetime64_column(self):
rng = date_range('1/1/2000 00:00:00', '1/1/2000 1:59:50', freq='10s')
df = DataFrame(index=np.arange(len(rng)))
df['A'] = rng
self.assertTrue(np.issubdtype(df['A'].dtype, np.dtype('M8[ns]')))
def test_frame_datetime64_pre1900_repr(self):
df = DataFrame({'year': date_range('1/1/1700', periods=50,
freq='A-DEC')})
# it works!
repr(df)
def test_frame_add_datetime64_col_other_units(self):
n = 100
units = ['h', 'm', 's', 'ms', 'D', 'M', 'Y']
ns_dtype = np.dtype('M8[ns]')
for unit in units:
dtype = np.dtype('M8[%s]' % unit)
vals = np.arange(n, dtype=np.int64).view(dtype)
df = DataFrame({'ints': np.arange(n)}, index=np.arange(n))
df[unit] = vals
ex_vals = to_datetime(vals.astype('O')).values
self.assertEqual(df[unit].dtype, ns_dtype)
self.assertTrue((df[unit].values == ex_vals).all())
# Test insertion into existing datetime64 column
df = DataFrame({'ints': np.arange(n)}, index=np.arange(n))
df['dates'] = np.arange(n, dtype=np.int64).view(ns_dtype)
for unit in units:
dtype = np.dtype('M8[%s]' % unit)
vals = np.arange(n, dtype=np.int64).view(dtype)
tmp = df.copy()
tmp['dates'] = vals
ex_vals = to_datetime(vals.astype('O')).values
self.assertTrue((tmp['dates'].values == ex_vals).all())
def test_shift(self):
# naive shift
shiftedFrame = self.tsframe.shift(5)
self.assert_index_equal(shiftedFrame.index, self.tsframe.index)
shiftedSeries = self.tsframe['A'].shift(5)
assert_series_equal(shiftedFrame['A'], shiftedSeries)
shiftedFrame = self.tsframe.shift(-5)
self.assert_index_equal(shiftedFrame.index, self.tsframe.index)
shiftedSeries = self.tsframe['A'].shift(-5)
assert_series_equal(shiftedFrame['A'], shiftedSeries)
# shift by 0
unshifted = self.tsframe.shift(0)
assert_frame_equal(unshifted, self.tsframe)
# shift by DateOffset
shiftedFrame = self.tsframe.shift(5, freq=offsets.BDay())
self.assertEqual(len(shiftedFrame), len(self.tsframe))
shiftedFrame2 = self.tsframe.shift(5, freq='B')
assert_frame_equal(shiftedFrame, shiftedFrame2)
d = self.tsframe.index[0]
shifted_d = d + offsets.BDay(5)
assert_series_equal(self.tsframe.xs(d),
shiftedFrame.xs(shifted_d), check_names=False)
# shift int frame
int_shifted = self.intframe.shift(1) # noqa
# Shifting with PeriodIndex
ps = tm.makePeriodFrame()
shifted = ps.shift(1)
unshifted = shifted.shift(-1)
self.assert_index_equal(shifted.index, ps.index)
self.assert_index_equal(unshifted.index, ps.index)
tm.assert_numpy_array_equal(unshifted.iloc[:, 0].valid().values,
ps.iloc[:-1, 0].values)
shifted2 = ps.shift(1, 'B')
shifted3 = ps.shift(1, offsets.BDay())
assert_frame_equal(shifted2, shifted3)
assert_frame_equal(ps, shifted2.shift(-1, 'B'))
assertRaisesRegexp(ValueError, 'does not match PeriodIndex freq',
ps.shift, freq='D')
# shift other axis
# GH 6371
df = DataFrame(np.random.rand(10, 5))
expected = pd.concat([DataFrame(np.nan, index=df.index,
columns=[0]),
df.iloc[:, 0:-1]],
ignore_index=True, axis=1)
result = df.shift(1, axis=1)
assert_frame_equal(result, expected)
# shift named axis
df = DataFrame(np.random.rand(10, 5))
expected = pd.concat([DataFrame(np.nan, index=df.index,
columns=[0]),
df.iloc[:, 0:-1]],
ignore_index=True, axis=1)
result = df.shift(1, axis='columns')
assert_frame_equal(result, expected)
def test_shift_bool(self):
df = DataFrame({'high': [True, False],
'low': [False, False]})
rs = df.shift(1)
xp = DataFrame(np.array([[np.nan, np.nan],
[True, False]], dtype=object),
columns=['high', 'low'])
assert_frame_equal(rs, xp)
def test_shift_categorical(self):
# GH 9416
s1 = pd.Series(['a', 'b', 'c'], dtype='category')
s2 = pd.Series(['A', 'B', 'C'], dtype='category')
df = DataFrame({'one': s1, 'two': s2})
rs = df.shift(1)
xp = DataFrame({'one': s1.shift(1), 'two': s2.shift(1)})
assert_frame_equal(rs, xp)
def test_shift_empty(self):
# Regression test for #8019
df = DataFrame({'foo': []})
rs = df.shift(-1)
assert_frame_equal(df, rs)
def test_tshift(self):
# PeriodIndex
ps = tm.makePeriodFrame()
shifted = ps.tshift(1)
unshifted = shifted.tshift(-1)
assert_frame_equal(unshifted, ps)
shifted2 = ps.tshift(freq='B')
assert_frame_equal(shifted, shifted2)
shifted3 = ps.tshift(freq=offsets.BDay())
assert_frame_equal(shifted, shifted3)
assertRaisesRegexp(ValueError, 'does not match', ps.tshift, freq='M')
# DatetimeIndex
shifted = self.tsframe.tshift(1)
unshifted = shifted.tshift(-1)
assert_frame_equal(self.tsframe, unshifted)
shifted2 = self.tsframe.tshift(freq=self.tsframe.index.freq)
assert_frame_equal(shifted, shifted2)
inferred_ts = DataFrame(self.tsframe.values,
Index(np.asarray(self.tsframe.index)),
columns=self.tsframe.columns)
shifted = inferred_ts.tshift(1)
unshifted = shifted.tshift(-1)
assert_frame_equal(shifted, self.tsframe.tshift(1))
assert_frame_equal(unshifted, inferred_ts)
no_freq = self.tsframe.iloc[[0, 5, 7], :]
self.assertRaises(ValueError, no_freq.tshift)
def test_truncate(self):
ts = self.tsframe[::3]
start, end = self.tsframe.index[3], self.tsframe.index[6]
start_missing = self.tsframe.index[2]
end_missing = self.tsframe.index[7]
# neither specified
truncated = ts.truncate()
assert_frame_equal(truncated, ts)
# both specified
expected = ts[1:3]
truncated = ts.truncate(start, end)
assert_frame_equal(truncated, expected)
truncated = ts.truncate(start_missing, end_missing)
assert_frame_equal(truncated, expected)
# start specified
expected = ts[1:]
truncated = ts.truncate(before=start)
assert_frame_equal(truncated, expected)
truncated = ts.truncate(before=start_missing)
assert_frame_equal(truncated, expected)
# end specified
expected = ts[:3]
truncated = ts.truncate(after=end)
assert_frame_equal(truncated, expected)
truncated = ts.truncate(after=end_missing)
assert_frame_equal(truncated, expected)
self.assertRaises(ValueError, ts.truncate,
before=ts.index[-1] - 1,
after=ts.index[0] + 1)
def test_truncate_copy(self):
index = self.tsframe.index
truncated = self.tsframe.truncate(index[5], index[10])
truncated.values[:] = 5.
self.assertFalse((self.tsframe.values[5:11] == 5).any())
def test_asfreq(self):
offset_monthly = self.tsframe.asfreq(offsets.BMonthEnd())
rule_monthly = self.tsframe.asfreq('BM')
assert_almost_equal(offset_monthly['A'], rule_monthly['A'])
filled = rule_monthly.asfreq('B', method='pad') # noqa
# TODO: actually check that this worked.
# don't forget!
filled_dep = rule_monthly.asfreq('B', method='pad') # noqa
# test does not blow up on length-0 DataFrame
zero_length = self.tsframe.reindex([])
result = zero_length.asfreq('BM')
self.assertIsNot(result, zero_length)
def test_asfreq_datetimeindex(self):
df = DataFrame({'A': [1, 2, 3]},
index=[datetime(2011, 11, 1), datetime(2011, 11, 2),
datetime(2011, 11, 3)])
df = df.asfreq('B')
tm.assertIsInstance(df.index, DatetimeIndex)
ts = df['A'].asfreq('B')
tm.assertIsInstance(ts.index, DatetimeIndex)
def test_asfreq_fillvalue(self):
# test for fill value during upsampling, related to issue 3715
# setup
rng = pd.date_range('1/1/2016', periods=10, freq='2S')
ts = pd.Series(np.arange(len(rng)), index=rng)
df = pd.DataFrame({'one': ts})
# insert pre-existing missing value
df.loc['2016-01-01 00:00:08', 'one'] = None
actual_df = df.asfreq(freq='1S', fill_value=9.0)
expected_df = df.asfreq(freq='1S').fillna(9.0)
expected_df.loc['2016-01-01 00:00:08', 'one'] = None
assert_frame_equal(expected_df, actual_df)
expected_series = ts.asfreq(freq='1S').fillna(9.0)
actual_series = ts.asfreq(freq='1S', fill_value=9.0)
assert_series_equal(expected_series, actual_series)
def test_first_last_valid(self):
N = len(self.frame.index)
mat = randn(N)
mat[:5] = nan
mat[-5:] = nan
frame = DataFrame({'foo': mat}, index=self.frame.index)
index = frame.first_valid_index()
self.assertEqual(index, frame.index[5])
index = frame.last_valid_index()
self.assertEqual(index, frame.index[-6])
# GH12800
empty = DataFrame()
self.assertIsNone(empty.last_valid_index())
self.assertIsNone(empty.first_valid_index())
def test_at_time_frame(self):
rng = date_range('1/1/2000', '1/5/2000', freq='5min')
ts = DataFrame(np.random.randn(len(rng), 2), index=rng)
rs = ts.at_time(rng[1])
self.assertTrue((rs.index.hour == rng[1].hour).all())
self.assertTrue((rs.index.minute == rng[1].minute).all())
self.assertTrue((rs.index.second == rng[1].second).all())
result = ts.at_time('9:30')
expected = ts.at_time(time(9, 30))
assert_frame_equal(result, expected)
result = ts.loc[time(9, 30)]
expected = ts.loc[(rng.hour == 9) & (rng.minute == 30)]
assert_frame_equal(result, expected)
# midnight, everything
rng = date_range('1/1/2000', '1/31/2000')
ts = DataFrame(np.random.randn(len(rng), 3), index=rng)
result = ts.at_time(time(0, 0))
assert_frame_equal(result, ts)
# time doesn't exist
rng = date_range('1/1/2012', freq='23Min', periods=384)
ts = DataFrame(np.random.randn(len(rng), 2), rng)
rs = ts.at_time('16:00')
self.assertEqual(len(rs), 0)
def test_between_time_frame(self):
rng = date_range('1/1/2000', '1/5/2000', freq='5min')
ts = DataFrame(np.random.randn(len(rng), 2), index=rng)
stime = time(0, 0)
etime = time(1, 0)
close_open = product([True, False], [True, False])
for inc_start, inc_end in close_open:
filtered = ts.between_time(stime, etime, inc_start, inc_end)
exp_len = 13 * 4 + 1
if not inc_start:
exp_len -= 5
if not inc_end:
exp_len -= 4
self.assertEqual(len(filtered), exp_len)
for rs in filtered.index:
t = rs.time()
if inc_start:
self.assertTrue(t >= stime)
else:
self.assertTrue(t > stime)
if inc_end:
self.assertTrue(t <= etime)
else:
self.assertTrue(t < etime)
result = ts.between_time('00:00', '01:00')
expected = ts.between_time(stime, etime)
assert_frame_equal(result, expected)
# across midnight
rng = date_range('1/1/2000', '1/5/2000', freq='5min')
ts = DataFrame(np.random.randn(len(rng), 2), index=rng)
stime = time(22, 0)
etime = time(9, 0)
close_open = product([True, False], [True, False])
for inc_start, inc_end in close_open:
filtered = ts.between_time(stime, etime, inc_start, inc_end)
exp_len = (12 * 11 + 1) * 4 + 1
if not inc_start:
exp_len -= 4
if not inc_end:
exp_len -= 4
self.assertEqual(len(filtered), exp_len)
for rs in filtered.index:
t = rs.time()
if inc_start:
self.assertTrue((t >= stime) or (t <= etime))
else:
self.assertTrue((t > stime) or (t <= etime))
if inc_end:
self.assertTrue((t <= etime) or (t >= stime))
else:
self.assertTrue((t < etime) or (t >= stime))
def test_operation_on_NaT(self):
# Both NaT and Timestamp are in DataFrame.
df = pd.DataFrame({'foo': [pd.NaT, pd.NaT,
pd.Timestamp('2012-05-01')]})
res = df.min()
exp = pd.Series([pd.Timestamp('2012-05-01')], index=["foo"])
tm.assert_series_equal(res, exp)
res = df.max()
exp = pd.Series([ | pd.Timestamp('2012-05-01') | pandas.Timestamp |
import numpy as np
import pandas as pd
def find_null_columns(df):
"""
Return a list of columns with null values
Args:
df - dataframe - Dataframe to check columns of
Returns:
list of null columns
"""
return df.columns[df.isnull().any()].tolist()
def null_column_report(df, total=True, percent=True, ):
"""
Print each null column column in a dataframe that is null as well as percent null
Args:
df - pd dataframe
total - boolean - Flag to indicate whether to print total null records per column
percent - boolean - Flag to indicate whether to print percent of column that is null
Returns:
None
"""
null_columns = find_null_columns(df)
for col in null_columns:
total_null_records = df[col].isnull().sum()
print('Column:')
print(col)
if total:
print('Total Nulls:')
print(total_null_records)
if percent:
print('Percent Null:')
print(round(total_null_records/df.shape[0], 2))
print()
def column_comparison(series, col1, col2, comparison='equal', pos_return_val=1, neg_return_val=0):
"""
Apply to a dataframe row to return a binary feature depending on equality or inequality
E.g. df.apply(lambda s: column_match(s, 'day_of_week', 'day_of_sale'), axis=1) to for matching the two.
Result is series of positive_return_vals and neg_return_vals. Defaults to
"""
if comparison == 'equal':
if series[col1] == series[col2]:
return pos_return_val
else:
return neg_return_val
if comparison == 'inequal':
if series[col1] != series[col2]:
return pos_return_val
else:
return neg_return_val
def dummies_from_bins(df, col, bins, bin_labels, col_prefix):
"""
Given a dataframe and column to create binary features from bins, return dummy columns of said bins
concatenated onto the end of the df
"""
# cut the column values into bins. the labels provided are the returned values
# bins must increase monotonically
binned_values = pd.cut(df[col],
bins=bins,
labels=bin_labels)
# Create dummy variables and add prefix to col label
dummies_cols = pd.get_dummies(binned_values).add_prefix(col_prefix)
# Concatenate onto end of original df
df = pd.concat([df, dummies_cols], axis=1)
return df
def bin_apply(s, feature_col, min_val, max_val,binary=False):
"""
Apply function to pd df with axis=1 to evaluate row values and return value or binary response
If binary=True, response values are 1 if present 0 otherwise
Else returns the original value or a NaN
E.g.:
df.apply(lambda s: bin_feature_binary(s, 'hazard_rank', 0, 3), axis=1) to create a binary feature that returns
1 if hazard group is between 0-3 and 0 if otherwise
"""
if (s[feature_col] >= min_val) & (s[feature_col] <= max_val):
if binary:
return 1
else:
return s[feature_col]
else:
if binary:
return 0
else:
return np.nan
def bin_df_feature(df, feature_col, min_val, max_val, binary=False):
"""
Given a dataframe, feature column (series), bin edges, return a new series whose values are those that fit within the
bin edges. Optionally denote if binary response (1 if present, 0 otherwise)
"""
if binary:
return df.apply(lambda s: bin_apply(s, feature_col, min_val, max_val, binary=True), axis=1)
else:
return df.apply(lambda s: bin_apply(s, feature_col, min_val, max_val, binary=False), axis=1)
def binary_feature(df, feat_col, value, binary_feature_col_name=None, concat=False):
"""
Given a dataframe, feature column name and value to check, return a series of binary responses 1 and 0
1 if the value in the feature column to check is present, 0 if otherwise
binary_feature
"""
# If binary_feature_col_name is none use this instead
if not binary_feature_col_name:
binary_feature_col_name = feat_col+'_is_'+str(value)
def is_value_present(s, value):
"""
Given a series and a value, return a binary feature 1 if present and 0 if otherwise
"""
if s[feat_col] == value:
return 1
else:
return 0
# Return binary feature series
binary_feature = df.apply(lambda s: is_value_present(s, value), axis=1)
# Set series name
binary_feature.name = binary_feature_col_name
if concat:
return pd.concat([df, binary_feature], axis=1)
return binary_feature
def scale_feature(df, feat_col, scale, value, scaled_feature_col_name=None, concat=False):
"""
Given a dataframe, feature column name and value to check, return a scaled response
If the value is present, multiply it by the scale multiplier. Can be used to increase or decrease
importance of binary features
"""
# If weighted_feature_col_name is none use this instead
if not scaled_feature_col_name:
scaled_feature_col_name = feat_col+'_weighted'
def scale_value(s, value):
"""
Given a series and a value, return a binary feature 1 if present and 0 if otherwise
"""
if s[feat_col] == value:
return s[feat_col] * scale
else:
return s[feat_col]
# Return weighted feature series
scaled_feature = df.apply(lambda s: scale_value(s, value), axis=1)
# Set series name
scaled_feature.name = weighted_feature_col_name
if concat:
return pd.concat([df, scaled_feature], axis=1)
return scaled_feature
def pivot_df_to_row(df, col_pivot_out=None):
""" Take a dataframe and pivot out so that all data is in one row. Columns prefixed with original column name"""
"""
Parameters
----------
df : dataframe
A dataframe
col_pivot_out : list of strings
The columns to pivot out and return
Returns
-------
int
"Wide" dataframe with columns in format old_col_name_index_name
E.g. Polarity
Count 1
Would be Polarity_Count with associated value of 1 in the new row
"""
# If no columns specified, use all
if not col_pivot_out:
col_pivot_out = df.columns.values.tolist()
new_cols = []
for column in col_pivot_out:
new_cols.append(df.T.loc[column].add_prefix(column+'_'))
pivoted_df = pd.DataFrame( | pd.concat(new_cols) | pandas.concat |
"""
Tax-Calculator tax-filing-unit Records class.
"""
# CODING-STYLE CHECKS:
# pycodestyle records.py
# pylint --disable=locally-disabled records.py
import os
import json
import six
import numpy as np
import pandas as pd
from taxcalc.growfactors import GrowFactors
from taxcalc.utils import read_egg_csv, read_egg_json
class Records(object):
"""
Constructor for the tax-filing-unit Records class.
Parameters
----------
data: string or Pandas DataFrame
string describes CSV file in which records data reside;
DataFrame already contains records data;
default value is the string 'puf.csv'
For details on how to use your own data with the Tax-Calculator,
look at the test_Calculator_using_nonstd_input() function in the
tests/test_calculate.py file.
exact_calculations: boolean
specifies whether or not exact tax calculations are done without
any smoothing of "stair-step" provisions in income tax law;
default value is false.
gfactors: GrowFactors class instance or None
containing record data extrapolation (or "blowup") factors.
NOTE: the constructor should never call the _blowup() method.
weights: string or Pandas DataFrame or None
string describes CSV file in which weights reside;
DataFrame already contains weights;
None creates empty sample-weights DataFrame;
default value is filename of the PUF weights.
adjust_ratios: string or Pandas DataFrame or None
string describes CSV file in which adjustment ratios reside;
DataFrame already contains adjustment ratios;
None creates empty adjustment-ratios DataFrame;
default value is filename of the PUF adjustment ratios.
start_year: integer
specifies calendar year of the input data;
default value is PUFCSV_YEAR.
Note that if specifying your own data (see above) as being a custom
data set, be sure to explicitly set start_year to the
custom data's calendar year. For details on how to
use your own data with the Tax-Calculator, read the
DATAPREP.md file in the top-level directory and then
look at the test_Calculator_using_nonstd_input()
function in the taxcalc/tests/test_calculate.py file.
Raises
------
ValueError:
if data is not the appropriate type.
if taxpayer and spouse variables do not add up to filing-unit total.
if dividends is less than qualified dividends.
if gfactors is not None or a GrowFactors class instance.
if start_year is not an integer.
if files cannot be found.
Returns
-------
class instance: Records
Notes
-----
Typical usage when using PUF input data is as follows::
recs = Records()
which uses all the default parameters of the constructor, and
therefore, imputed variables are generated to augment the data and
initial-year grow factors are applied to the data. There are
situations in which you need to specify the values of the Record
constructor's arguments, but be sure you know exactly what you are
doing when attempting this.
Use Records.cps_constructor() to get a Records object instantiated
with CPS input data.
"""
# suppress pylint warnings about unrecognized Records variables:
# pylint: disable=no-member
# suppress pylint warnings about uppercase variable names:
# pylint: disable=invalid-name
# suppress pylint warnings about too many class instance attributes:
# pylint: disable=too-many-instance-attributes
PUFCSV_YEAR = 2011
CPSCSV_YEAR = 2014
CUR_PATH = os.path.abspath(os.path.dirname(__file__))
PUF_WEIGHTS_FILENAME = 'puf_weights.csv.gz'
PUF_RATIOS_FILENAME = 'puf_ratios.csv'
CPS_WEIGHTS_FILENAME = 'cps_weights.csv.gz'
CPS_RATIOS_FILENAME = None
VAR_INFO_FILENAME = 'records_variables.json'
CPS_BENEFITS_FILENAME = 'cps_benefits.csv.gz'
def __init__(self,
data='puf.csv',
exact_calculations=False,
gfactors=GrowFactors(),
weights=PUF_WEIGHTS_FILENAME,
adjust_ratios=PUF_RATIOS_FILENAME,
benefits=None,
start_year=PUFCSV_YEAR):
# pylint: disable=too-many-arguments,too-many-locals
self.__data_year = start_year
# read specified data
self._read_data(data, exact_calculations, (benefits is None))
# check that three sets of split-earnings variables have valid values
msg = 'expression "{0} == {0}p + {0}s" is not true for every record'
tol = 0.020001 # handles "%.2f" rounding errors
if not np.allclose(self.e00200, (self.e00200p + self.e00200s),
rtol=0.0, atol=tol):
raise ValueError(msg.format('e00200'))
if not np.allclose(self.e00900, (self.e00900p + self.e00900s),
rtol=0.0, atol=tol):
raise ValueError(msg.format('e00900'))
if not np.allclose(self.e02100, (self.e02100p + self.e02100s),
rtol=0.0, atol=tol):
raise ValueError(msg.format('e02100'))
# check that ordinary dividends are no less than qualified dividends
other_dividends = np.maximum(0., self.e00600 - self.e00650)
if not np.allclose(self.e00600, self.e00650 + other_dividends,
rtol=0.0, atol=tol):
msg = 'expression "e00600 >= e00650" is not true for every record'
raise ValueError(msg)
del other_dividends
# check that total pension income is no less than taxable pension inc
nontaxable_pensions = np.maximum(0., self.e01500 - self.e01700)
if not np.allclose(self.e01500, self.e01700 + nontaxable_pensions,
rtol=0.0, atol=tol):
msg = 'expression "e01500 >= e01700" is not true for every record'
raise ValueError(msg)
del nontaxable_pensions
# handle grow factors
is_correct_type = isinstance(gfactors, GrowFactors)
if gfactors is not None and not is_correct_type:
msg = 'gfactors is neither None nor a GrowFactors instance'
raise ValueError(msg)
self.gfactors = gfactors
# read sample weights
self.WT = None
self._read_weights(weights)
self.ADJ = None
self._read_ratios(adjust_ratios)
# read extrapolated benefit variables
self.BEN = None
self._read_benefits(benefits)
# weights must be same size as tax record data
if self.WT.size > 0 and self.array_length != len(self.WT.index):
# scale-up sub-sample weights by year-specific factor
sum_full_weights = self.WT.sum()
self.WT = self.WT.iloc[self.__index]
sum_sub_weights = self.WT.sum()
factor = sum_full_weights / sum_sub_weights
self.WT *= factor
# specify current_year and FLPDYR values
if isinstance(start_year, int):
self.__current_year = start_year
self.FLPDYR.fill(start_year)
else:
msg = 'start_year is not an integer'
raise ValueError(msg)
# construct sample weights for current_year
if self.WT.size > 0:
wt_colname = 'WT{}'.format(self.current_year)
if wt_colname in self.WT.columns:
self.s006 = self.WT[wt_colname] * 0.01
# specify that variable values do not include behavioral responses
self.behavioral_responses_are_included = False
@staticmethod
def cps_constructor(data=None,
no_benefits=False,
exact_calculations=False,
gfactors=GrowFactors()):
"""
Static method returns a Records object instantiated with CPS
input data. This works in a analogous way to Records(), which
returns a Records object instantiated with PUF input data.
This is a convenience method that eliminates the need to
specify all the details of the CPS input data just as the
default values of the arguments of the Records class constructor
eliminate the need to specify all the details of the PUF input
data.
"""
if data is None:
data = os.path.join(Records.CUR_PATH, 'cps.csv.gz')
if no_benefits:
benefits_filename = None
else:
benefits_filename = Records.CPS_BENEFITS_FILENAME
return Records(data=data,
exact_calculations=exact_calculations,
gfactors=gfactors,
weights=Records.CPS_WEIGHTS_FILENAME,
adjust_ratios=Records.CPS_RATIOS_FILENAME,
benefits=benefits_filename,
start_year=Records.CPSCSV_YEAR)
@property
def data_year(self):
"""
Records class original data year property.
"""
return self.__data_year
@property
def current_year(self):
"""
Records class current calendar year property.
"""
return self.__current_year
@property
def array_length(self):
"""
Length of arrays in Records class's DataFrame.
"""
return self.__dim
def increment_year(self):
"""
Add one to current year.
Also, does extrapolation, reweighting, adjusting for new current year.
"""
# no incrementing Records object that includes behavioral responses
assert self.behavioral_responses_are_included is False
# move to next year
self.__current_year += 1
# apply variable extrapolation grow factors
if self.gfactors is not None:
self._blowup(self.__current_year)
# apply variable adjustment ratios
self._adjust(self.__current_year)
# specify current-year sample weights
if self.WT.size > 0:
wt_colname = 'WT{}'.format(self.__current_year)
self.s006 = self.WT[wt_colname] * 0.01
# extrapolate benefit values
if self.BEN.size > 0:
self._extrapolate_benefits(self.current_year)
def set_current_year(self, new_current_year):
"""
Set current year to specified value and updates FLPDYR variable.
Unlike increment_year method, extrapolation, reweighting, adjusting
are skipped.
"""
self.__current_year = new_current_year
self.FLPDYR.fill(new_current_year)
@staticmethod
def read_var_info():
"""
Read Records variables metadata from JSON file;
returns dictionary and specifies static varname sets listed below.
"""
var_info_path = os.path.join(Records.CUR_PATH,
Records.VAR_INFO_FILENAME)
if os.path.exists(var_info_path):
with open(var_info_path) as vfile:
vardict = json.load(vfile)
else:
# cannot call read_egg_ function in unit tests
vardict = read_egg_json(
Records.VAR_INFO_FILENAME) # pragma: no cover
Records.INTEGER_READ_VARS = set(k for k, v in vardict['read'].items()
if v['type'] == 'int')
FLOAT_READ_VARS = set(k for k, v in vardict['read'].items()
if v['type'] == 'float')
Records.MUST_READ_VARS = set(k for k, v in vardict['read'].items()
if v.get('required'))
Records.USABLE_READ_VARS = Records.INTEGER_READ_VARS | FLOAT_READ_VARS
INT_CALCULATED_VARS = set(k for k, v in vardict['calc'].items()
if v['type'] == 'int')
FLOAT_CALCULATED_VARS = set(k for k, v in vardict['calc'].items()
if v['type'] == 'float')
FIXED_CALCULATED_VARS = set(k for k, v in vardict['calc'].items()
if v['type'] == 'unchanging_float')
Records.CALCULATED_VARS = (INT_CALCULATED_VARS |
FLOAT_CALCULATED_VARS |
FIXED_CALCULATED_VARS)
Records.CHANGING_CALCULATED_VARS = FLOAT_CALCULATED_VARS
Records.INTEGER_VARS = Records.INTEGER_READ_VARS | INT_CALCULATED_VARS
return vardict
# specify various sets of variable names
INTEGER_READ_VARS = None
MUST_READ_VARS = None
USABLE_READ_VARS = None
CALCULATED_VARS = None
CHANGING_CALCULATED_VARS = None
INTEGER_VARS = None
# ----- begin private methods of Records class -----
def _blowup(self, year):
"""
Apply to variables the grow factors for specified calendar year.
"""
# pylint: disable=too-many-locals,too-many-statements
AWAGE = self.gfactors.factor_value('AWAGE', year)
AINTS = self.gfactors.factor_value('AINTS', year)
ADIVS = self.gfactors.factor_value('ADIVS', year)
ATXPY = self.gfactors.factor_value('ATXPY', year)
ASCHCI = self.gfactors.factor_value('ASCHCI', year)
ASCHCL = self.gfactors.factor_value('ASCHCL', year)
ACGNS = self.gfactors.factor_value('ACGNS', year)
ASCHEI = self.gfactors.factor_value('ASCHEI', year)
ASCHEL = self.gfactors.factor_value('ASCHEL', year)
ASCHF = self.gfactors.factor_value('ASCHF', year)
AUCOMP = self.gfactors.factor_value('AUCOMP', year)
ASOCSEC = self.gfactors.factor_value('ASOCSEC', year)
ACPIM = self.gfactors.factor_value('ACPIM', year)
ABOOK = self.gfactors.factor_value('ABOOK', year)
AIPD = self.gfactors.factor_value('AIPD', year)
self.e00200 *= AWAGE
self.e00200p *= AWAGE
self.e00200s *= AWAGE
self.e00300 *= AINTS
self.e00400 *= AINTS
self.e00600 *= ADIVS
self.e00650 *= ADIVS
self.e00700 *= ATXPY
self.e00800 *= ATXPY
self.e00900s[:] = np.where(self.e00900s >= 0,
self.e00900s * ASCHCI,
self.e00900s * ASCHCL)
self.e00900p[:] = np.where(self.e00900p >= 0,
self.e00900p * ASCHCI,
self.e00900p * ASCHCL)
self.e00900[:] = self.e00900p + self.e00900s
self.e01100 *= ACGNS
self.e01200 *= ACGNS
self.e01400 *= ATXPY
self.e01500 *= ATXPY
self.e01700 *= ATXPY
self.e02000[:] = np.where(self.e02000 >= 0,
self.e02000 * ASCHEI,
self.e02000 * ASCHEL)
self.e02100 *= ASCHF
self.e02100p *= ASCHF
self.e02100s *= ASCHF
self.e02300 *= AUCOMP
self.e02400 *= ASOCSEC
self.e03150 *= ATXPY
self.e03210 *= ATXPY
self.e03220 *= ATXPY
self.e03230 *= ATXPY
self.e03270 *= ACPIM
self.e03240 *= ATXPY
self.e03290 *= ACPIM
self.e03300 *= ATXPY
self.e03400 *= ATXPY
self.e03500 *= ATXPY
self.e07240 *= ATXPY
self.e07260 *= ATXPY
self.e07300 *= ABOOK
self.e07400 *= ABOOK
self.p08000 *= ATXPY
self.e09700 *= ATXPY
self.e09800 *= ATXPY
self.e09900 *= ATXPY
self.e11200 *= ATXPY
# ITEMIZED DEDUCTIONS
self.e17500 *= ACPIM
self.e18400 *= ATXPY
self.e18500 *= ATXPY
self.e19200 *= AIPD
self.e19800 *= ATXPY
self.e20100 *= ATXPY
self.e20400 *= ATXPY
self.g20500 *= ATXPY
# CAPITAL GAINS
self.p22250 *= ACGNS
self.p23250 *= ACGNS
self.e24515 *= ACGNS
self.e24518 *= ACGNS
# SCHEDULE E
self.e26270 *= ASCHEI
self.e27200 *= ASCHEI
self.k1bx14p *= ASCHEI
self.k1bx14s *= ASCHEI
# MISCELLANOUS SCHEDULES
self.e07600 *= ATXPY
self.e32800 *= ATXPY
self.e58990 *= ATXPY
self.e62900 *= ATXPY
self.e87530 *= ATXPY
self.e87521 *= ATXPY
self.cmbtp *= ATXPY
def _adjust(self, year):
"""
Adjust value of income variables to match SOI distributions
Note: adjustment must leave variables as numpy.ndarray type
"""
if self.ADJ.size > 0:
# Interest income
self.e00300 *= self.ADJ['INT{}'.format(year)][self.agi_bin].values
def _extrapolate_benefits(self, year):
"""
Extrapolate benefit variables
"""
setattr(self, 'housing_ben', self.BEN['housing_{}'.format(year)])
setattr(self, 'ssi_ben', self.BEN['ssi_{}'.format(year)])
setattr(self, 'snap_ben', self.BEN['snap_{}'.format(year)])
setattr(self, 'tanf_ben', self.BEN['tanf_{}'.format(year)])
setattr(self, 'vet_ben', self.BEN['vet_{}'.format(year)])
setattr(self, 'wic_ben', self.BEN['wic_{}'.format(year)])
setattr(self, 'mcare_ben', self.BEN['mcare_{}'.format(year)])
setattr(self, 'mcaid_ben', self.BEN['mcaid_{}'.format(year)])
self.other_ben *= self.gfactors.factor_value('ABENEFITS', year)
def _read_data(self, data, exact_calcs, no_benefits):
"""
Read Records data from file or use specified DataFrame as data.
Specifies exact array depending on boolean value of exact_calcs.
Set benefits to zero if no_benefits is True; otherwise do nothing.
"""
# pylint: disable=too-many-statements,too-many-branches
if Records.INTEGER_VARS is None:
Records.read_var_info()
# read specified data
if isinstance(data, pd.DataFrame):
taxdf = data
elif isinstance(data, six.string_types):
if os.path.isfile(data):
taxdf = pd.read_csv(data)
else:
# cannot call read_egg_ function in unit tests
taxdf = read_egg_csv(data) # pragma: no cover
else:
msg = 'data is neither a string nor a Pandas DataFrame'
raise ValueError(msg)
self.__dim = len(taxdf.index)
self.__index = taxdf.index
# create class variables using taxdf column names
READ_VARS = set()
self.IGNORED_VARS = set()
for varname in list(taxdf.columns.values):
if varname in Records.USABLE_READ_VARS:
READ_VARS.add(varname)
if varname in Records.INTEGER_READ_VARS:
setattr(self, varname,
taxdf[varname].astype(np.int32).values)
else:
setattr(self, varname,
taxdf[varname].astype(np.float64).values)
else:
self.IGNORED_VARS.add(varname)
# check that MUST_READ_VARS are all present in taxdf
if not Records.MUST_READ_VARS.issubset(READ_VARS):
msg = 'Records data missing one or more MUST_READ_VARS'
raise ValueError(msg)
# delete intermediate taxdf object
del taxdf
# create other class variables that are set to all zeros
UNREAD_VARS = Records.USABLE_READ_VARS - READ_VARS
ZEROED_VARS = Records.CALCULATED_VARS | UNREAD_VARS
for varname in ZEROED_VARS:
if varname in Records.INTEGER_VARS:
setattr(self, varname,
np.zeros(self.array_length, dtype=np.int32))
else:
setattr(self, varname,
np.zeros(self.array_length, dtype=np.float64))
# check for valid MARS values
if not np.all(np.logical_and(np.greater_equal(self.MARS, 1),
np.less_equal(self.MARS, 5))):
raise ValueError('not all MARS values in [1,5] range')
# create variables derived from MARS, which is in MUST_READ_VARS
self.num[:] = np.where(self.MARS == 2, 2, 1)
self.sep[:] = np.where(self.MARS == 3, 2, 1)
# check for valid EIC values
if not np.all(np.logical_and(np.greater_equal(self.EIC, 0),
np.less_equal(self.EIC, 3))):
raise ValueError('not all EIC values in [0,3] range')
# specify value of exact array
self.exact[:] = np.where(exact_calcs is True, 1, 0)
# optionally set benefits to zero
if no_benefits:
self.housing_ben[:] = np.zeros(self.array_length, dtype=np.float64)
self.ssi_ben[:] = np.zeros(self.array_length, dtype=np.float64)
self.snap_ben[:] = np.zeros(self.array_length, dtype=np.float64)
self.tanf_ben[:] = np.zeros(self.array_length, dtype=np.float64)
self.vet_ben[:] = np.zeros(self.array_length, dtype=np.float64)
self.wic_ben[:] = np.zeros(self.array_length, dtype=np.float64)
self.mcare_ben[:] = np.zeros(self.array_length, dtype=np.float64)
self.mcaid_ben[:] = np.zeros(self.array_length, dtype=np.float64)
self.other_ben[:] = np.zeros(self.array_length, dtype=np.float64)
# delete intermediate variables
del READ_VARS
del UNREAD_VARS
del ZEROED_VARS
def zero_out_changing_calculated_vars(self):
"""
Set to zero all variables in the Records.CHANGING_CALCULATED_VARS set.
"""
for varname in Records.CHANGING_CALCULATED_VARS:
var = getattr(self, varname)
var.fill(0.)
del var
def _read_weights(self, weights):
"""
Read Records weights from file or
use specified DataFrame as data or
create empty DataFrame if None.
Assumes weights are integers equal to 100 times the real weight.
"""
if weights is None:
setattr(self, 'WT', pd.DataFrame({'nothing': []}))
return
if isinstance(weights, pd.DataFrame):
WT = weights
elif isinstance(weights, six.string_types):
weights_path = os.path.join(Records.CUR_PATH, weights)
if os.path.isfile(weights_path):
WT = pd.read_csv(weights_path)
else:
# cannot call read_egg_ function in unit tests
WT = read_egg_csv(
os.path.basename(weights_path)) # pragma: no cover
else:
msg = 'weights is not None or a string or a Pandas DataFrame'
raise ValueError(msg)
assert isinstance(WT, pd.DataFrame)
setattr(self, 'WT', WT.astype(np.int32))
del WT
def _read_ratios(self, ratios):
"""
Read Records adjustment ratios from file or
create empty DataFrame if None
"""
if ratios is None:
setattr(self, 'ADJ', pd.DataFrame({'nothing': []}))
return
if isinstance(ratios, six.string_types):
ratios_path = os.path.join(Records.CUR_PATH, ratios)
if os.path.isfile(ratios_path):
ADJ = pd.read_csv(ratios_path,
index_col=0)
else:
# cannot call read_egg_ function in unit tests
ADJ = read_egg_csv(os.path.basename(ratios_path),
index_col=0) # pragma: no cover
else:
msg = 'ratios is neither None nor a string'
raise ValueError(msg)
assert isinstance(ADJ, pd.DataFrame)
ADJ = ADJ.transpose()
if ADJ.index.name != 'agi_bin':
ADJ.index.name = 'agi_bin'
self.ADJ = pd.DataFrame()
setattr(self, 'ADJ', ADJ.astype(np.float32))
del ADJ
def _read_benefits(self, benefits):
"""
Read Records extrapolated benefits from a file or uses a specified
DataFrame or creates an empty DataFrame if None. Should only be
used with the cps.csv file
"""
if benefits is None:
setattr(self, 'BEN', | pd.DataFrame({'Nothing': []}) | pandas.DataFrame |
from flask import current_app
import numpy as np
import json
import re
import pandas as pd
from .utils import hash_data
class Serializer(object):
def __init__(self, schema, add_all):
""" Serialize and annotate results using a schema.
Args:
schema - json schema file
add_all - serialize features that are not in the schema
"""
self.schema = json.load(open(schema, 'r'))
self.add_all = add_all
class PredictorSerializer(Serializer):
def __init__(self, add_all=True, include=None, exclude=None, TR=None):
""" Initalize serializer for ingested features.
Args:
add_all - Add all variables including those not in the schema
include - List of variables to include
exclude - List of variables to exclude
TR - TR in seconds
"""
self.include = include
self.exclude = exclude
self.TR = TR
super().__init__(current_app.config['PREDICTOR_SCHEMA'], add_all)
def load(self, variable):
"""" Load and annotate a BIDSVariable
Args:
res - BIDSVariableCollection object
Returns a dictionary of annotated features
"""
if self.include is not None and variable.name not in self.include:
return None
if self.exclude is not None and variable.name in self.exclude:
return None
annotated = {}
annotated['original_name'] = variable.name
annotated['source'] = variable.source
for pattern, attr in self.schema.items():
if re.compile(pattern).match(variable.name):
annotated['name'] = re.sub(
pattern, attr.pop('name'), variable.name) \
if 'name' in attr else variable.name
annotated['description'] = re.sub(
pattern, attr.pop('description'), variable.name) \
if 'description' in attr else None
annotated.update(**attr) # Add any additional attributes
break
else:
annotated['name'] = variable.name
if self.add_all is False:
return None
# If SparseVariable
if hasattr(variable, 'onset'):
onsets = variable.onset.tolist()
durations = variable.duration.tolist()
values = variable.values.values.tolist()
# If Dense, resample, and sparsify
else:
TR = variable.sampling_rate / 2 if self.TR is None else self.TR
variable = variable.resample(1 / TR)
onsets = np.arange(
0, len(variable.values) * self.TR, self.TR).tolist()
durations = [(self.TR)] * len(variable.values)
values = variable.values[variable.name].values.tolist()
events = []
for i, onset in enumerate(onsets):
events.append(
{
'onset': onset,
'duration': durations[i],
'value': values[i]
}
)
return annotated, events
stim_map = {
'ImageStim': 'image',
'VideoStim': 'video',
'TextStim': 'text',
'ComplexTextStim': 'text',
'AudioStim': 'audio'
}
class FeatureSerializer(Serializer):
def __init__(self, add_all=True):
super().__init__(current_app.config['FEATURE_SCHEMA'], add_all)
def _annotate_feature(self, pattern, schema, feat, ext_hash, sub_df,
default_active=True):
""" Annotate a single pliers extracted result
Args:
pattern - regex pattern to match feature name
schema - sub-schema that matches feature name
feat - feature name from pliers
ext_hash - hash of the extractor
features - list of all features
default_active - set to active by default?
"""
name = re.sub(pattern, schema['name'], feat) \
if 'name' in schema else feat
description = re.sub(pattern, schema['description'], feat) \
if 'description' in schema else None
annotated = []
for i, v in sub_df[sub_df.value.notnull()].iterrows():
annotated.append(
(
{
'value': v['value'],
'onset': v['onset']
if not pd.isnull(v['onset']) else None,
'duration': v['duration']
if not | pd.isnull(v['duration']) | pandas.isnull |
import requests
import re
import pandas as pd
import json
def get_webtoon_genre_list():
url = "https://webtoon.p.rapidapi.com/originals/genres/list"
querystring = {"language":"en"}
headers = {
'x-rapidapi-host': "webtoon.p.rapidapi.com",
'x-rapidapi-key': "200898dbd8msh7effe9f4aca8119p1f02a4jsn9f53b70ac5e8"
}
response_gen = requests.request("GET", url, headers=headers, params=querystring)
webtoon_gen_json = response_gen.json()
webtoon_json_gen_df = | pd.DataFrame(webtoon_gen_json['message']['result']['genreList']['genres']) | pandas.DataFrame |
"""This script optimizes hyperparameters for a deep neural network (with LSTM units) based predictive model for outcome-oriented predictive process monitoring.
Author: <NAME> [<EMAIL>]
"""
import time
import os
import shutil
import sys
from sys import argv
import pickle
import csv
from collections import defaultdict
import pandas as pd
import numpy as np
from keras.models import Sequential
from keras.layers.core import Dense
from keras.layers import CuDNNLSTM, Dropout
from keras import regularizers
from keras.optimizers import Adam
from keras.callbacks import EarlyStopping, ReduceLROnPlateau
from hyperopt import Trials, STATUS_OK, tpe, fmin, hp
import hyperopt
from hyperopt.pyll.base import scope
from hyperopt.pyll.stochastic import sample
from DatasetManager import DatasetManager
PARAMS_DIR = "val_results_lstm"
# create directory
if not os.path.exists(os.path.join(PARAMS_DIR)):
os.makedirs(os.path.join(PARAMS_DIR))
def create_and_evaluate_model(params):
global trial_nr, all_results
trial_nr += 1
print("Trial %s out of %s" % (trial_nr, n_iter))
model = Sequential()
model.add(CuDNNLSTM(int(params["lstmsize"]),
kernel_initializer='glorot_uniform',
return_sequences=(params['n_layers'] != 1),
kernel_regularizer=regularizers.l1_l2(params["l1"],params["l2"]),
recurrent_regularizer=regularizers.l1_l2(params["l1"],params["l2"]),
input_shape=(max_len, data_dim)))
model.add(Dropout(params["dropout"]))
for i in range(2, params['n_layers']+1):
return_sequences = (i != params['n_layers'])
model.add(CuDNNLSTM(params['lstmsize'],
kernel_initializer='glorot_uniform',
return_sequences=return_sequences,
kernel_regularizer=regularizers.l1_l2(params["l1"],params["l2"]),
recurrent_regularizer=regularizers.l1_l2(params["l1"],params["l2"])))
model.add(Dropout(params["dropout"]))
model.add(Dense(2, activation=activation, kernel_initializer='glorot_uniform'))
opt = Adam(lr=params["learning_rate"])
model.compile(loss='binary_crossentropy', optimizer=opt)
early_stopping = EarlyStopping(monitor='val_loss', patience=10)
lr_reducer = ReduceLROnPlateau(monitor='val_loss', factor=0.5, patience=100, verbose=0, mode='auto', epsilon=0.0001, cooldown=0, min_lr=0)
# train the model, output generated text after each iteration
if dataset_name == "crm2":
history = model.fit_generator(dataset_manager.data_generator(dt_train, max_len, 2**params['batch_size']),
validation_data=dataset_manager.data_generator(dt_val, max_len, 2**params['batch_size']),
steps_per_epoch=int(np.ceil(len(dt_train)/2**params['batch_size'])),
validation_steps=int(np.ceil(len(dt_val)/2**params['batch_size'])),
callbacks=[early_stopping, lr_reducer],
epochs=nb_epoch, verbose=2)
else:
history = model.fit(X, y,
validation_data=(X_val, y_val),
callbacks=[early_stopping, lr_reducer],
batch_size=2**params['batch_size'], epochs=nb_epoch, verbose=2)
val_losses = [history.history['val_loss'][epoch] for epoch in range(len(history.history['loss']))]
del model
val_losses = val_losses[5:] # don't consider the first few epochs because it may be very volatile
best_epoch = np.argmin(val_losses)
# save current trial results
for k, v in params.items():
for epoch in range(len(history.history['loss'])):
all_results.append((trial_nr, k, v, -1, epoch, history.history['val_loss'][epoch]))
return {'loss': val_losses[best_epoch], 'status': STATUS_OK, 'best_epoch': best_epoch+5}
dataset_name = argv[1]
method_name = argv[2]
cls_method = argv[3]
n_iter = int(argv[4])
activation = "sigmoid"
nb_epoch = 100
train_ratio = 0.8
val_ratio = 0.2
random_state = 22
dataset_manager = DatasetManager(dataset_name)
data = dataset_manager.read_dataset()
train, _ = dataset_manager.split_data_strict(data, train_ratio)
train, val = dataset_manager.split_val(train, val_ratio)
if "traffic_fines" in dataset_name:
max_len = 10
elif "bpic2017" in dataset_name:
max_len = min(20, dataset_manager.get_pos_case_length_quantile(data, 0.90))
else:
max_len = min(40, dataset_manager.get_pos_case_length_quantile(data, 0.90))
del data
dt_train = dataset_manager.encode_data_for_lstm(train)
print("Encoded train")
del train
dt_train = dt_train.sort_values(dataset_manager.timestamp_col, ascending=True,
kind="mergesort").groupby(dataset_manager.case_id_col).head(max_len)
data_dim = dt_train.shape[1] - 3
if dataset_name != "crm2": # for crm2 we use data generator instead because of high memory usage
X, y = dataset_manager.generate_3d_data(dt_train, max_len)
del dt_train
dt_val = dataset_manager.encode_data_for_lstm(val)
del val
dt_val = dt_val.sort_values(dataset_manager.timestamp_col, ascending=True,
kind="mergesort").groupby(dataset_manager.case_id_col).head(max_len)
if dataset_name != "crm2":
X_val, y_val = dataset_manager.generate_3d_data(dt_val, max_len)
del dt_val
space = {'lstmsize': scope.int(hp.qloguniform('lstmsize', np.log(10), np.log(150), 1)),
'dropout': hp.uniform("dropout", 0, 0.3),
'n_layers': scope.int(hp.quniform('n_layers', 1, 3, 1)),
'batch_size': scope.int(hp.quniform('batch_size', 3, 6, 1)),
'learning_rate': hp.loguniform("learning_rate", np.log(0.000001), np.log(0.0001)),
'l1': hp.loguniform("l1", np.log(0.00001), np.log(0.1)),
'l2': hp.loguniform("l2", np.log(0.00001), np.log(0.1))}
# optimize parameters
trial_nr = 0
trials = Trials()
all_results = []
best = fmin(create_and_evaluate_model, space, algo=tpe.suggest, max_evals=n_iter, trials=trials)
# extract the best parameters
best_params = hyperopt.space_eval(space, best)
best_trial_nr = np.argmin([trial['loss'] for trial in trials.results])
best_params['nb_epoch'] = trials.results[best_trial_nr]['best_epoch']
# write to file
outfile = os.path.join(PARAMS_DIR, "optimal_params_%s_%s_%s.pickle" % (cls_method, dataset_name, method_name))
with open(outfile, "wb") as fout:
pickle.dump(best_params, fout)
dt_results = | pd.DataFrame(all_results, columns=["iter", "param", "value", "nr_events", "epoch", "score"]) | pandas.DataFrame |
# Manually load w2v
# import os
# from nlpia.data.loaders import BIGDATA_PATH
# from gensim.models import KeyedVectors
# path = os.path.join(BIGDATA_PATH, 'GoogleNews-vectors-negative300.bin.gz')
# wv = KeyedVectors.load_word2vec_format(path, binary=True)
# nlpia can now automatically download and load w2v
from nlpia.data.loaders import get_data
# from gensim.models import KeyedVectors
wv = get_data('word2vec')
# wv = KeyedVectors.load_word2vec_format(path, binary=True)
len(wv.vocab)
# 3000000
wv.vectors.shape
# (3000000, 300)
import pandas as pd # noqa
vocab = pd.Series(wv.vocab)
vocab.iloc[100000:100006] # different words for new KeyedVector format
# Illington_Fund Vocab(count:447860, index:2552140)
# Illingworth Vocab(count:2905166, index:94834)
# Illingworth_Halifax Vocab(count:1984281, index:1015719)
# Illini Vocab(count:2984391, index:15609)
# IlliniBoard.com Vocab(count:1481047, index:1518953)
# Illini_Bluffs Vocab(count:2636947, index:363053)
import numpy as np # noqa
np.linalg.norm(wv['Illinois'] - wv['Illini']) # <1>
# 3.3653798
similarity = np.dot(wv['Illinois'], wv['Illini']) / (
np.linalg.norm(wv['Illinois']) * np.linalg.norm(wv['Illini'])) # <2>
similarity
# 0.5501352
1 - similarity # <3>
# 0.4498648
# ----
# <1> Euclidean distance
# <2> Cosine similarity
# <3> Cosine distance
wv['Illini']
# array([ 0.15625 , 0.18652344, 0.33203125, 0.55859375, 0.03637695,
# -0.09375 , -0.05029297, 0.16796875, -0.0625 , 0.09912109,
# -0.0291748 , 0.39257812, 0.05395508, 0.35351562, -0.02270508,
from nlpia.data.loaders import get_data # noqa
cities = get_data('cities')
cities.head(1).T
# geonameid 3039154
# name <NAME>
# asciiname <NAME>
# alternatenames <NAME>,<NAME>
# latitude 42.5795
# longitude 1.65362
# feature_class P
# feature_code PPL
# country_code AD
# cc2 NaN
# admin1_code 02
# admin2_code NaN
# admin3_code NaN
# admin4_code NaN
# population 1052
# elevation NaN
# dem 1721
# timezone Europe/Andorra
# modification_date 2012-11-03
us = cities[(cities.country_code == 'US') & (cities.admin1_code.notnull())].copy()
states = | pd.read_csv('http://www.fonz.net/blog/wp-content/uploads/2008/04/states.csv') | pandas.read_csv |
# -*- coding: utf-8 -*-
# pylint: disable=W0612,E1101
from datetime import datetime
import operator
import nose
from functools import wraps
import numpy as np
import pandas as pd
from pandas import Series, DataFrame, Index, isnull, notnull, pivot, MultiIndex
from pandas.core.datetools import bday
from pandas.core.nanops import nanall, nanany
from pandas.core.panel import Panel
from pandas.core.series import remove_na
import pandas.core.common as com
from pandas import compat
from pandas.compat import range, lrange, StringIO, OrderedDict, signature
from pandas import SparsePanel
from pandas.util.testing import (assert_panel_equal, assert_frame_equal,
assert_series_equal, assert_almost_equal,
assert_produces_warning, ensure_clean,
assertRaisesRegexp, makeCustomDataframe as
mkdf, makeMixedDataFrame)
import pandas.core.panel as panelm
import pandas.util.testing as tm
def ignore_sparse_panel_future_warning(func):
"""
decorator to ignore FutureWarning if we have a SparsePanel
can be removed when SparsePanel is fully removed
"""
@wraps(func)
def wrapper(self, *args, **kwargs):
if isinstance(self.panel, SparsePanel):
with assert_produces_warning(FutureWarning,
check_stacklevel=False):
return func(self, *args, **kwargs)
else:
return func(self, *args, **kwargs)
return wrapper
class PanelTests(object):
panel = None
def test_pickle(self):
unpickled = self.round_trip_pickle(self.panel)
assert_frame_equal(unpickled['ItemA'], self.panel['ItemA'])
def test_rank(self):
self.assertRaises(NotImplementedError, lambda: self.panel.rank())
def test_cumsum(self):
cumsum = self.panel.cumsum()
assert_frame_equal(cumsum['ItemA'], self.panel['ItemA'].cumsum())
def not_hashable(self):
c_empty = Panel()
c = Panel(Panel([[[1]]]))
self.assertRaises(TypeError, hash, c_empty)
self.assertRaises(TypeError, hash, c)
class SafeForLongAndSparse(object):
_multiprocess_can_split_ = True
def test_repr(self):
repr(self.panel)
@ignore_sparse_panel_future_warning
def test_copy_names(self):
for attr in ('major_axis', 'minor_axis'):
getattr(self.panel, attr).name = None
cp = self.panel.copy()
getattr(cp, attr).name = 'foo'
self.assertIsNone(getattr(self.panel, attr).name)
def test_iter(self):
tm.equalContents(list(self.panel), self.panel.items)
def test_count(self):
f = lambda s: notnull(s).sum()
self._check_stat_op('count', f, obj=self.panel, has_skipna=False)
def test_sum(self):
self._check_stat_op('sum', np.sum)
def test_mean(self):
self._check_stat_op('mean', np.mean)
def test_prod(self):
self._check_stat_op('prod', np.prod)
def test_median(self):
def wrapper(x):
if isnull(x).any():
return np.nan
return np.median(x)
self._check_stat_op('median', wrapper)
def test_min(self):
self._check_stat_op('min', np.min)
def test_max(self):
self._check_stat_op('max', np.max)
def test_skew(self):
try:
from scipy.stats import skew
except ImportError:
raise nose.SkipTest("no scipy.stats.skew")
def this_skew(x):
if len(x) < 3:
return np.nan
return skew(x, bias=False)
self._check_stat_op('skew', this_skew)
# def test_mad(self):
# f = lambda x: np.abs(x - x.mean()).mean()
# self._check_stat_op('mad', f)
def test_var(self):
def alt(x):
if len(x) < 2:
return np.nan
return np.var(x, ddof=1)
self._check_stat_op('var', alt)
def test_std(self):
def alt(x):
if len(x) < 2:
return np.nan
return np.std(x, ddof=1)
self._check_stat_op('std', alt)
def test_sem(self):
def alt(x):
if len(x) < 2:
return np.nan
return np.std(x, ddof=1) / np.sqrt(len(x))
self._check_stat_op('sem', alt)
# def test_skew(self):
# from scipy.stats import skew
# def alt(x):
# if len(x) < 3:
# return np.nan
# return skew(x, bias=False)
# self._check_stat_op('skew', alt)
def _check_stat_op(self, name, alternative, obj=None, has_skipna=True):
if obj is None:
obj = self.panel
# # set some NAs
# obj.ix[5:10] = np.nan
# obj.ix[15:20, -2:] = np.nan
f = getattr(obj, name)
if has_skipna:
def skipna_wrapper(x):
nona = remove_na(x)
if len(nona) == 0:
return np.nan
return alternative(nona)
def wrapper(x):
return alternative(np.asarray(x))
for i in range(obj.ndim):
result = f(axis=i, skipna=False)
assert_frame_equal(result, obj.apply(wrapper, axis=i))
else:
skipna_wrapper = alternative
wrapper = alternative
for i in range(obj.ndim):
result = f(axis=i)
if not tm._incompat_bottleneck_version(name):
assert_frame_equal(result, obj.apply(skipna_wrapper, axis=i))
self.assertRaises(Exception, f, axis=obj.ndim)
# Unimplemented numeric_only parameter.
if 'numeric_only' in signature(f).args:
self.assertRaisesRegexp(NotImplementedError, name, f,
numeric_only=True)
class SafeForSparse(object):
_multiprocess_can_split_ = True
@classmethod
def assert_panel_equal(cls, x, y):
assert_panel_equal(x, y)
def test_get_axis(self):
assert (self.panel._get_axis(0) is self.panel.items)
assert (self.panel._get_axis(1) is self.panel.major_axis)
assert (self.panel._get_axis(2) is self.panel.minor_axis)
def test_set_axis(self):
new_items = Index(np.arange(len(self.panel.items)))
new_major = Index(np.arange(len(self.panel.major_axis)))
new_minor = Index(np.arange(len(self.panel.minor_axis)))
# ensure propagate to potentially prior-cached items too
item = self.panel['ItemA']
self.panel.items = new_items
if hasattr(self.panel, '_item_cache'):
self.assertNotIn('ItemA', self.panel._item_cache)
self.assertIs(self.panel.items, new_items)
# TODO: unused?
item = self.panel[0] # noqa
self.panel.major_axis = new_major
self.assertIs(self.panel[0].index, new_major)
self.assertIs(self.panel.major_axis, new_major)
# TODO: unused?
item = self.panel[0] # noqa
self.panel.minor_axis = new_minor
self.assertIs(self.panel[0].columns, new_minor)
self.assertIs(self.panel.minor_axis, new_minor)
def test_get_axis_number(self):
self.assertEqual(self.panel._get_axis_number('items'), 0)
self.assertEqual(self.panel._get_axis_number('major'), 1)
self.assertEqual(self.panel._get_axis_number('minor'), 2)
def test_get_axis_name(self):
self.assertEqual(self.panel._get_axis_name(0), 'items')
self.assertEqual(self.panel._get_axis_name(1), 'major_axis')
self.assertEqual(self.panel._get_axis_name(2), 'minor_axis')
def test_get_plane_axes(self):
# what to do here?
index, columns = self.panel._get_plane_axes('items')
index, columns = self.panel._get_plane_axes('major_axis')
index, columns = self.panel._get_plane_axes('minor_axis')
index, columns = self.panel._get_plane_axes(0)
@ignore_sparse_panel_future_warning
def test_truncate(self):
dates = self.panel.major_axis
start, end = dates[1], dates[5]
trunced = self.panel.truncate(start, end, axis='major')
expected = self.panel['ItemA'].truncate(start, end)
assert_frame_equal(trunced['ItemA'], expected)
trunced = self.panel.truncate(before=start, axis='major')
expected = self.panel['ItemA'].truncate(before=start)
assert_frame_equal(trunced['ItemA'], expected)
trunced = self.panel.truncate(after=end, axis='major')
expected = self.panel['ItemA'].truncate(after=end)
assert_frame_equal(trunced['ItemA'], expected)
# XXX test other axes
def test_arith(self):
self._test_op(self.panel, operator.add)
self._test_op(self.panel, operator.sub)
self._test_op(self.panel, operator.mul)
self._test_op(self.panel, operator.truediv)
self._test_op(self.panel, operator.floordiv)
self._test_op(self.panel, operator.pow)
self._test_op(self.panel, lambda x, y: y + x)
self._test_op(self.panel, lambda x, y: y - x)
self._test_op(self.panel, lambda x, y: y * x)
self._test_op(self.panel, lambda x, y: y / x)
self._test_op(self.panel, lambda x, y: y ** x)
self._test_op(self.panel, lambda x, y: x + y) # panel + 1
self._test_op(self.panel, lambda x, y: x - y) # panel - 1
self._test_op(self.panel, lambda x, y: x * y) # panel * 1
self._test_op(self.panel, lambda x, y: x / y) # panel / 1
self._test_op(self.panel, lambda x, y: x ** y) # panel ** 1
self.assertRaises(Exception, self.panel.__add__, self.panel['ItemA'])
@staticmethod
def _test_op(panel, op):
result = op(panel, 1)
assert_frame_equal(result['ItemA'], op(panel['ItemA'], 1))
def test_keys(self):
tm.equalContents(list(self.panel.keys()), self.panel.items)
def test_iteritems(self):
# Test panel.iteritems(), aka panel.iteritems()
# just test that it works
for k, v in self.panel.iteritems():
pass
self.assertEqual(len(list(self.panel.iteritems())),
len(self.panel.items))
@ignore_sparse_panel_future_warning
def test_combineFrame(self):
def check_op(op, name):
# items
df = self.panel['ItemA']
func = getattr(self.panel, name)
result = func(df, axis='items')
assert_frame_equal(result['ItemB'], op(self.panel['ItemB'], df))
# major
xs = self.panel.major_xs(self.panel.major_axis[0])
result = func(xs, axis='major')
idx = self.panel.major_axis[1]
assert_frame_equal(result.major_xs(idx),
op(self.panel.major_xs(idx), xs))
# minor
xs = self.panel.minor_xs(self.panel.minor_axis[0])
result = func(xs, axis='minor')
idx = self.panel.minor_axis[1]
assert_frame_equal(result.minor_xs(idx),
op(self.panel.minor_xs(idx), xs))
ops = ['add', 'sub', 'mul', 'truediv', 'floordiv']
if not compat.PY3:
ops.append('div')
# pow, mod not supported for SparsePanel as flex ops (for now)
if not isinstance(self.panel, SparsePanel):
ops.extend(['pow', 'mod'])
else:
idx = self.panel.minor_axis[1]
with assertRaisesRegexp(ValueError, "Simple arithmetic.*scalar"):
self.panel.pow(self.panel.minor_xs(idx), axis='minor')
with assertRaisesRegexp(ValueError, "Simple arithmetic.*scalar"):
self.panel.mod(self.panel.minor_xs(idx), axis='minor')
for op in ops:
try:
check_op(getattr(operator, op), op)
except:
com.pprint_thing("Failing operation: %r" % op)
raise
if compat.PY3:
try:
check_op(operator.truediv, 'div')
except:
com.pprint_thing("Failing operation: %r" % 'div')
raise
@ignore_sparse_panel_future_warning
def test_combinePanel(self):
result = self.panel.add(self.panel)
self.assert_panel_equal(result, self.panel * 2)
@ignore_sparse_panel_future_warning
def test_neg(self):
self.assert_panel_equal(-self.panel, self.panel * -1)
# issue 7692
def test_raise_when_not_implemented(self):
p = Panel(np.arange(3 * 4 * 5).reshape(3, 4, 5),
items=['ItemA', 'ItemB', 'ItemC'],
major_axis=pd.date_range('20130101', periods=4),
minor_axis=list('ABCDE'))
d = p.sum(axis=1).ix[0]
ops = ['add', 'sub', 'mul', 'truediv', 'floordiv', 'div', 'mod', 'pow']
for op in ops:
with self.assertRaises(NotImplementedError):
getattr(p, op)(d, axis=0)
@ignore_sparse_panel_future_warning
def test_select(self):
p = self.panel
# select items
result = p.select(lambda x: x in ('ItemA', 'ItemC'), axis='items')
expected = p.reindex(items=['ItemA', 'ItemC'])
self.assert_panel_equal(result, expected)
# select major_axis
result = p.select(lambda x: x >= datetime(2000, 1, 15), axis='major')
new_major = p.major_axis[p.major_axis >= datetime(2000, 1, 15)]
expected = p.reindex(major=new_major)
self.assert_panel_equal(result, expected)
# select minor_axis
result = p.select(lambda x: x in ('D', 'A'), axis=2)
expected = p.reindex(minor=['A', 'D'])
self.assert_panel_equal(result, expected)
# corner case, empty thing
result = p.select(lambda x: x in ('foo', ), axis='items')
self.assert_panel_equal(result, p.reindex(items=[]))
def test_get_value(self):
for item in self.panel.items:
for mjr in self.panel.major_axis[::2]:
for mnr in self.panel.minor_axis:
result = self.panel.get_value(item, mjr, mnr)
expected = self.panel[item][mnr][mjr]
assert_almost_equal(result, expected)
@ignore_sparse_panel_future_warning
def test_abs(self):
result = self.panel.abs()
result2 = abs(self.panel)
expected = np.abs(self.panel)
self.assert_panel_equal(result, expected)
self.assert_panel_equal(result2, expected)
df = self.panel['ItemA']
result = df.abs()
result2 = abs(df)
expected = np.abs(df)
assert_frame_equal(result, expected)
assert_frame_equal(result2, expected)
s = df['A']
result = s.abs()
result2 = abs(s)
expected = np.abs(s)
assert_series_equal(result, expected)
assert_series_equal(result2, expected)
self.assertEqual(result.name, 'A')
self.assertEqual(result2.name, 'A')
class CheckIndexing(object):
_multiprocess_can_split_ = True
def test_getitem(self):
self.assertRaises(Exception, self.panel.__getitem__, 'ItemQ')
def test_delitem_and_pop(self):
expected = self.panel['ItemA']
result = self.panel.pop('ItemA')
assert_frame_equal(expected, result)
self.assertNotIn('ItemA', self.panel.items)
del self.panel['ItemB']
self.assertNotIn('ItemB', self.panel.items)
self.assertRaises(Exception, self.panel.__delitem__, 'ItemB')
values = np.empty((3, 3, 3))
values[0] = 0
values[1] = 1
values[2] = 2
panel = Panel(values, lrange(3), lrange(3), lrange(3))
# did we delete the right row?
panelc = panel.copy()
del panelc[0]
assert_frame_equal(panelc[1], panel[1])
assert_frame_equal(panelc[2], panel[2])
panelc = panel.copy()
del panelc[1]
assert_frame_equal(panelc[0], panel[0])
assert_frame_equal(panelc[2], panel[2])
panelc = panel.copy()
del panelc[2]
assert_frame_equal(panelc[1], panel[1])
assert_frame_equal(panelc[0], panel[0])
def test_setitem(self):
# LongPanel with one item
lp = self.panel.filter(['ItemA', 'ItemB']).to_frame()
with tm.assertRaises(ValueError):
self.panel['ItemE'] = lp
# DataFrame
df = self.panel['ItemA'][2:].filter(items=['A', 'B'])
self.panel['ItemF'] = df
self.panel['ItemE'] = df
df2 = self.panel['ItemF']
assert_frame_equal(df, df2.reindex(index=df.index, columns=df.columns))
# scalar
self.panel['ItemG'] = 1
self.panel['ItemE'] = True
self.assertEqual(self.panel['ItemG'].values.dtype, np.int64)
self.assertEqual(self.panel['ItemE'].values.dtype, np.bool_)
# object dtype
self.panel['ItemQ'] = 'foo'
self.assertEqual(self.panel['ItemQ'].values.dtype, np.object_)
# boolean dtype
self.panel['ItemP'] = self.panel['ItemA'] > 0
self.assertEqual(self.panel['ItemP'].values.dtype, np.bool_)
self.assertRaises(TypeError, self.panel.__setitem__, 'foo',
self.panel.ix[['ItemP']])
# bad shape
p = Panel(np.random.randn(4, 3, 2))
with tm.assertRaisesRegexp(ValueError,
"shape of value must be \(3, 2\), "
"shape of given object was \(4, 2\)"):
p[0] = np.random.randn(4, 2)
def test_setitem_ndarray(self):
from pandas import date_range, datetools
timeidx = date_range(start=datetime(2009, 1, 1),
end=datetime(2009, 12, 31),
freq=datetools.MonthEnd())
lons_coarse = np.linspace(-177.5, 177.5, 72)
lats_coarse = np.linspace(-87.5, 87.5, 36)
P = Panel(items=timeidx, major_axis=lons_coarse,
minor_axis=lats_coarse)
data = np.random.randn(72 * 36).reshape((72, 36))
key = datetime(2009, 2, 28)
P[key] = data
assert_almost_equal(P[key].values, data)
def test_set_minor_major(self):
# GH 11014
df1 = DataFrame(['a', 'a', 'a', np.nan, 'a', np.nan])
df2 = DataFrame([1.0, np.nan, 1.0, np.nan, 1.0, 1.0])
panel = Panel({'Item1': df1, 'Item2': df2})
newminor = notnull(panel.iloc[:, :, 0])
panel.loc[:, :, 'NewMinor'] = newminor
assert_frame_equal(panel.loc[:, :, 'NewMinor'],
newminor.astype(object))
newmajor = notnull(panel.iloc[:, 0, :])
panel.loc[:, 'NewMajor', :] = newmajor
assert_frame_equal(panel.loc[:, 'NewMajor', :],
newmajor.astype(object))
def test_major_xs(self):
ref = self.panel['ItemA']
idx = self.panel.major_axis[5]
xs = self.panel.major_xs(idx)
result = xs['ItemA']
assert_series_equal(result, ref.xs(idx), check_names=False)
self.assertEqual(result.name, 'ItemA')
# not contained
idx = self.panel.major_axis[0] - bday
self.assertRaises(Exception, self.panel.major_xs, idx)
def test_major_xs_mixed(self):
self.panel['ItemD'] = 'foo'
xs = self.panel.major_xs(self.panel.major_axis[0])
self.assertEqual(xs['ItemA'].dtype, np.float64)
self.assertEqual(xs['ItemD'].dtype, np.object_)
def test_minor_xs(self):
ref = self.panel['ItemA']
idx = self.panel.minor_axis[1]
xs = self.panel.minor_xs(idx)
assert_series_equal(xs['ItemA'], ref[idx], check_names=False)
# not contained
self.assertRaises(Exception, self.panel.minor_xs, 'E')
def test_minor_xs_mixed(self):
self.panel['ItemD'] = 'foo'
xs = self.panel.minor_xs('D')
self.assertEqual(xs['ItemA'].dtype, np.float64)
self.assertEqual(xs['ItemD'].dtype, np.object_)
def test_xs(self):
itemA = self.panel.xs('ItemA', axis=0)
expected = self.panel['ItemA']
assert_frame_equal(itemA, expected)
# get a view by default
itemA_view = self.panel.xs('ItemA', axis=0)
itemA_view.values[:] = np.nan
self.assertTrue(np.isnan(self.panel['ItemA'].values).all())
# mixed-type yields a copy
self.panel['strings'] = 'foo'
result = self.panel.xs('D', axis=2)
self.assertIsNotNone(result.is_copy)
def test_getitem_fancy_labels(self):
p = self.panel
items = p.items[[1, 0]]
dates = p.major_axis[::2]
cols = ['D', 'C', 'F']
# all 3 specified
assert_panel_equal(p.ix[items, dates, cols],
p.reindex(items=items, major=dates, minor=cols))
# 2 specified
assert_panel_equal(p.ix[:, dates, cols],
p.reindex(major=dates, minor=cols))
assert_panel_equal(p.ix[items, :, cols],
p.reindex(items=items, minor=cols))
assert_panel_equal(p.ix[items, dates, :],
p.reindex(items=items, major=dates))
# only 1
assert_panel_equal(p.ix[items, :, :], p.reindex(items=items))
assert_panel_equal(p.ix[:, dates, :], p.reindex(major=dates))
assert_panel_equal(p.ix[:, :, cols], p.reindex(minor=cols))
def test_getitem_fancy_slice(self):
pass
def test_getitem_fancy_ints(self):
p = self.panel
# #1603
result = p.ix[:, -1, :]
expected = p.ix[:, p.major_axis[-1], :]
assert_frame_equal(result, expected)
def test_getitem_fancy_xs(self):
p = self.panel
item = 'ItemB'
date = p.major_axis[5]
col = 'C'
# get DataFrame
# item
assert_frame_equal(p.ix[item], p[item])
assert_frame_equal(p.ix[item, :], p[item])
assert_frame_equal(p.ix[item, :, :], p[item])
# major axis, axis=1
assert_frame_equal(p.ix[:, date], p.major_xs(date))
assert_frame_equal(p.ix[:, date, :], p.major_xs(date))
# minor axis, axis=2
assert_frame_equal(p.ix[:, :, 'C'], p.minor_xs('C'))
# get Series
assert_series_equal(p.ix[item, date], p[item].ix[date])
assert_series_equal(p.ix[item, date, :], p[item].ix[date])
assert_series_equal(p.ix[item, :, col], p[item][col])
assert_series_equal(p.ix[:, date, col], p.major_xs(date).ix[col])
def test_getitem_fancy_xs_check_view(self):
item = 'ItemB'
date = self.panel.major_axis[5]
# make sure it's always a view
NS = slice(None, None)
# DataFrames
comp = assert_frame_equal
self._check_view(item, comp)
self._check_view((item, NS), comp)
self._check_view((item, NS, NS), comp)
self._check_view((NS, date), comp)
self._check_view((NS, date, NS), comp)
self._check_view((NS, NS, 'C'), comp)
# Series
comp = assert_series_equal
self._check_view((item, date), comp)
self._check_view((item, date, NS), comp)
self._check_view((item, NS, 'C'), comp)
self._check_view((NS, date, 'C'), comp)
def test_ix_setitem_slice_dataframe(self):
a = Panel(items=[1, 2, 3], major_axis=[11, 22, 33],
minor_axis=[111, 222, 333])
b = DataFrame(np.random.randn(2, 3), index=[111, 333],
columns=[1, 2, 3])
a.ix[:, 22, [111, 333]] = b
assert_frame_equal(a.ix[:, 22, [111, 333]], b)
def test_ix_align(self):
from pandas import Series
b = Series(np.random.randn(10), name=0)
b.sort()
df_orig = Panel(np.random.randn(3, 10, 2))
df = df_orig.copy()
df.ix[0, :, 0] = b
assert_series_equal(df.ix[0, :, 0].reindex(b.index), b)
df = df_orig.swapaxes(0, 1)
df.ix[:, 0, 0] = b
assert_series_equal(df.ix[:, 0, 0].reindex(b.index), b)
df = df_orig.swapaxes(1, 2)
df.ix[0, 0, :] = b
assert_series_equal(df.ix[0, 0, :].reindex(b.index), b)
def test_ix_frame_align(self):
p_orig = tm.makePanel()
df = p_orig.ix[0].copy()
assert_frame_equal(p_orig['ItemA'], df)
p = p_orig.copy()
p.ix[0, :, :] = df
assert_panel_equal(p, p_orig)
p = p_orig.copy()
p.ix[0] = df
assert_panel_equal(p, p_orig)
p = p_orig.copy()
p.iloc[0, :, :] = df
assert_panel_equal(p, p_orig)
p = p_orig.copy()
p.iloc[0] = df
assert_panel_equal(p, p_orig)
p = p_orig.copy()
p.loc['ItemA'] = df
assert_panel_equal(p, p_orig)
p = p_orig.copy()
p.loc['ItemA', :, :] = df
assert_panel_equal(p, p_orig)
p = p_orig.copy()
p['ItemA'] = df
assert_panel_equal(p, p_orig)
p = p_orig.copy()
p.ix[0, [0, 1, 3, 5], -2:] = df
out = p.ix[0, [0, 1, 3, 5], -2:]
assert_frame_equal(out, df.iloc[[0, 1, 3, 5], [2, 3]])
# GH3830, panel assignent by values/frame
for dtype in ['float64', 'int64']:
panel = Panel(np.arange(40).reshape((2, 4, 5)),
items=['a1', 'a2'], dtype=dtype)
df1 = panel.iloc[0]
df2 = panel.iloc[1]
tm.assert_frame_equal(panel.loc['a1'], df1)
tm.assert_frame_equal(panel.loc['a2'], df2)
# Assignment by Value Passes for 'a2'
panel.loc['a2'] = df1.values
tm.assert_frame_equal(panel.loc['a1'], df1)
tm.assert_frame_equal(panel.loc['a2'], df1)
# Assignment by DataFrame Ok w/o loc 'a2'
panel['a2'] = df2
tm.assert_frame_equal(panel.loc['a1'], df1)
tm.assert_frame_equal(panel.loc['a2'], df2)
# Assignment by DataFrame Fails for 'a2'
panel.loc['a2'] = df2
tm.assert_frame_equal(panel.loc['a1'], df1)
tm.assert_frame_equal(panel.loc['a2'], df2)
def _check_view(self, indexer, comp):
cp = self.panel.copy()
obj = cp.ix[indexer]
obj.values[:] = 0
self.assertTrue((obj.values == 0).all())
comp(cp.ix[indexer].reindex_like(obj), obj)
def test_logical_with_nas(self):
d = Panel({'ItemA': {'a': [np.nan, False]},
'ItemB': {'a': [True, True]}})
result = d['ItemA'] | d['ItemB']
expected = DataFrame({'a': [np.nan, True]})
assert_frame_equal(result, expected)
# this is autodowncasted here
result = d['ItemA'].fillna(False) | d['ItemB']
expected = DataFrame({'a': [True, True]})
assert_frame_equal(result, expected)
def test_neg(self):
# what to do?
assert_panel_equal(-self.panel, -1 * self.panel)
def test_invert(self):
assert_panel_equal(-(self.panel < 0), ~(self.panel < 0))
def test_comparisons(self):
p1 = tm.makePanel()
p2 = tm.makePanel()
tp = p1.reindex(items=p1.items + ['foo'])
df = p1[p1.items[0]]
def test_comp(func):
# versus same index
result = func(p1, p2)
self.assert_numpy_array_equal(result.values,
func(p1.values, p2.values))
# versus non-indexed same objs
self.assertRaises(Exception, func, p1, tp)
# versus different objs
self.assertRaises(Exception, func, p1, df)
# versus scalar
result3 = func(self.panel, 0)
self.assert_numpy_array_equal(result3.values,
func(self.panel.values, 0))
test_comp(operator.eq)
test_comp(operator.ne)
test_comp(operator.lt)
test_comp(operator.gt)
test_comp(operator.ge)
test_comp(operator.le)
def test_get_value(self):
for item in self.panel.items:
for mjr in self.panel.major_axis[::2]:
for mnr in self.panel.minor_axis:
result = self.panel.get_value(item, mjr, mnr)
expected = self.panel[item][mnr][mjr]
assert_almost_equal(result, expected)
with tm.assertRaisesRegexp(TypeError,
"There must be an argument for each axis"):
self.panel.get_value('a')
def test_set_value(self):
for item in self.panel.items:
for mjr in self.panel.major_axis[::2]:
for mnr in self.panel.minor_axis:
self.panel.set_value(item, mjr, mnr, 1.)
assert_almost_equal(self.panel[item][mnr][mjr], 1.)
# resize
res = self.panel.set_value('ItemE', 'foo', 'bar', 1.5)
tm.assertIsInstance(res, Panel)
self.assertIsNot(res, self.panel)
self.assertEqual(res.get_value('ItemE', 'foo', 'bar'), 1.5)
res3 = self.panel.set_value('ItemE', 'foobar', 'baz', 5)
self.assertTrue(com.is_float_dtype(res3['ItemE'].values))
with tm.assertRaisesRegexp(TypeError,
"There must be an argument for each axis"
" plus the value provided"):
self.panel.set_value('a')
_panel = tm.makePanel()
tm.add_nans(_panel)
class TestPanel(tm.TestCase, PanelTests, CheckIndexing, SafeForLongAndSparse,
SafeForSparse):
_multiprocess_can_split_ = True
@classmethod
def assert_panel_equal(cls, x, y):
assert_panel_equal(x, y)
def setUp(self):
self.panel = _panel.copy()
self.panel.major_axis.name = None
self.panel.minor_axis.name = None
self.panel.items.name = None
def test_panel_warnings(self):
with tm.assert_produces_warning(FutureWarning):
shifted1 = self.panel.shift(lags=1)
with tm.assert_produces_warning(False):
shifted2 = self.panel.shift(periods=1)
tm.assert_panel_equal(shifted1, shifted2)
with tm.assert_produces_warning(False):
shifted3 = self.panel.shift()
tm.assert_panel_equal(shifted1, shifted3)
def test_constructor(self):
# with BlockManager
wp = Panel(self.panel._data)
self.assertIs(wp._data, self.panel._data)
wp = Panel(self.panel._data, copy=True)
self.assertIsNot(wp._data, self.panel._data)
assert_panel_equal(wp, self.panel)
# strings handled prop
wp = Panel([[['foo', 'foo', 'foo', ], ['foo', 'foo', 'foo']]])
self.assertEqual(wp.values.dtype, np.object_)
vals = self.panel.values
# no copy
wp = Panel(vals)
self.assertIs(wp.values, vals)
# copy
wp = Panel(vals, copy=True)
self.assertIsNot(wp.values, vals)
# GH #8285, test when scalar data is used to construct a Panel
# if dtype is not passed, it should be inferred
value_and_dtype = [(1, 'int64'), (3.14, 'float64'),
('foo', np.object_)]
for (val, dtype) in value_and_dtype:
wp = Panel(val, items=range(2), major_axis=range(3),
minor_axis=range(4))
vals = np.empty((2, 3, 4), dtype=dtype)
vals.fill(val)
assert_panel_equal(wp, Panel(vals, dtype=dtype))
# test the case when dtype is passed
wp = Panel(1, items=range(2), major_axis=range(3), minor_axis=range(4),
dtype='float32')
vals = np.empty((2, 3, 4), dtype='float32')
vals.fill(1)
assert_panel_equal(wp, Panel(vals, dtype='float32'))
def test_constructor_cast(self):
zero_filled = self.panel.fillna(0)
casted = Panel(zero_filled._data, dtype=int)
casted2 = Panel(zero_filled.values, dtype=int)
exp_values = zero_filled.values.astype(int)
assert_almost_equal(casted.values, exp_values)
assert_almost_equal(casted2.values, exp_values)
casted = Panel(zero_filled._data, dtype=np.int32)
casted2 = Panel(zero_filled.values, dtype=np.int32)
exp_values = zero_filled.values.astype(np.int32)
assert_almost_equal(casted.values, exp_values)
assert_almost_equal(casted2.values, exp_values)
# can't cast
data = [[['foo', 'bar', 'baz']]]
self.assertRaises(ValueError, Panel, data, dtype=float)
def test_constructor_empty_panel(self):
empty = Panel()
self.assertEqual(len(empty.items), 0)
self.assertEqual(len(empty.major_axis), 0)
self.assertEqual(len(empty.minor_axis), 0)
def test_constructor_observe_dtype(self):
# GH #411
panel = Panel(items=lrange(3), major_axis=lrange(3),
minor_axis=lrange(3), dtype='O')
self.assertEqual(panel.values.dtype, np.object_)
def test_constructor_dtypes(self):
# GH #797
def _check_dtype(panel, dtype):
for i in panel.items:
self.assertEqual(panel[i].values.dtype.name, dtype)
# only nan holding types allowed here
for dtype in ['float64', 'float32', 'object']:
panel = Panel(items=lrange(2), major_axis=lrange(10),
minor_axis=lrange(5), dtype=dtype)
_check_dtype(panel, dtype)
for dtype in ['float64', 'float32', 'int64', 'int32', 'object']:
panel = Panel(np.array(np.random.randn(2, 10, 5), dtype=dtype),
items=lrange(2),
major_axis=lrange(10),
minor_axis=lrange(5), dtype=dtype)
_check_dtype(panel, dtype)
for dtype in ['float64', 'float32', 'int64', 'int32', 'object']:
panel = Panel(np.array(np.random.randn(2, 10, 5), dtype='O'),
items=lrange(2),
major_axis=lrange(10),
minor_axis=lrange(5), dtype=dtype)
_check_dtype(panel, dtype)
for dtype in ['float64', 'float32', 'int64', 'int32', 'object']:
panel = Panel(np.random.randn(2, 10, 5), items=lrange(
2), major_axis=lrange(10), minor_axis=lrange(5), dtype=dtype)
_check_dtype(panel, dtype)
for dtype in ['float64', 'float32', 'int64', 'int32', 'object']:
df1 = DataFrame(np.random.randn(2, 5),
index=lrange(2), columns=lrange(5))
df2 = DataFrame(np.random.randn(2, 5),
index=lrange(2), columns=lrange(5))
panel = Panel.from_dict({'a': df1, 'b': df2}, dtype=dtype)
_check_dtype(panel, dtype)
def test_constructor_fails_with_not_3d_input(self):
with tm.assertRaisesRegexp(ValueError,
"The number of dimensions required is 3"):
Panel(np.random.randn(10, 2))
def test_consolidate(self):
self.assertTrue(self.panel._data.is_consolidated())
self.panel['foo'] = 1.
self.assertFalse(self.panel._data.is_consolidated())
panel = self.panel.consolidate()
self.assertTrue(panel._data.is_consolidated())
def test_ctor_dict(self):
itema = self.panel['ItemA']
itemb = self.panel['ItemB']
d = {'A': itema, 'B': itemb[5:]}
d2 = {'A': itema._series, 'B': itemb[5:]._series}
d3 = {'A': None,
'B': DataFrame(itemb[5:]._series),
'C': DataFrame(itema._series)}
wp = Panel.from_dict(d)
wp2 = Panel.from_dict(d2) # nested Dict
# TODO: unused?
wp3 = Panel.from_dict(d3) # noqa
self.assertTrue(wp.major_axis.equals(self.panel.major_axis))
assert_panel_equal(wp, wp2)
# intersect
wp = Panel.from_dict(d, intersect=True)
self.assertTrue(wp.major_axis.equals(itemb.index[5:]))
# use constructor
assert_panel_equal(Panel(d), Panel.from_dict(d))
assert_panel_equal(Panel(d2), Panel.from_dict(d2))
assert_panel_equal(Panel(d3), Panel.from_dict(d3))
# a pathological case
d4 = {'A': None, 'B': None}
# TODO: unused?
wp4 = Panel.from_dict(d4) # noqa
assert_panel_equal(Panel(d4), Panel(items=['A', 'B']))
# cast
dcasted = dict((k, v.reindex(wp.major_axis).fillna(0))
for k, v in compat.iteritems(d))
result = Panel(dcasted, dtype=int)
expected = Panel(dict((k, v.astype(int))
for k, v in compat.iteritems(dcasted)))
assert_panel_equal(result, expected)
result = Panel(dcasted, dtype=np.int32)
expected = Panel(dict((k, v.astype(np.int32))
for k, v in compat.iteritems(dcasted)))
assert_panel_equal(result, expected)
def test_constructor_dict_mixed(self):
data = dict((k, v.values) for k, v in self.panel.iteritems())
result = Panel(data)
exp_major = Index(np.arange(len(self.panel.major_axis)))
self.assertTrue(result.major_axis.equals(exp_major))
result = Panel(data, items=self.panel.items,
major_axis=self.panel.major_axis,
minor_axis=self.panel.minor_axis)
assert_panel_equal(result, self.panel)
data['ItemC'] = self.panel['ItemC']
result = Panel(data)
assert_panel_equal(result, self.panel)
# corner, blow up
data['ItemB'] = data['ItemB'][:-1]
self.assertRaises(Exception, Panel, data)
data['ItemB'] = self.panel['ItemB'].values[:, :-1]
self.assertRaises(Exception, Panel, data)
def test_ctor_orderedDict(self):
keys = list(set(np.random.randint(0, 5000, 100)))[
:50] # unique random int keys
d = OrderedDict([(k, mkdf(10, 5)) for k in keys])
p = Panel(d)
self.assertTrue(list(p.items) == keys)
p = Panel.from_dict(d)
self.assertTrue(list(p.items) == keys)
def test_constructor_resize(self):
data = self.panel._data
items = self.panel.items[:-1]
major = self.panel.major_axis[:-1]
minor = self.panel.minor_axis[:-1]
result = Panel(data, items=items, major_axis=major, minor_axis=minor)
expected = self.panel.reindex(items=items, major=major, minor=minor)
assert_panel_equal(result, expected)
result = Panel(data, items=items, major_axis=major)
expected = self.panel.reindex(items=items, major=major)
assert_panel_equal(result, expected)
result = Panel(data, items=items)
expected = self.panel.reindex(items=items)
assert_panel_equal(result, expected)
result = Panel(data, minor_axis=minor)
expected = self.panel.reindex(minor=minor)
assert_panel_equal(result, expected)
def test_from_dict_mixed_orient(self):
df = tm.makeDataFrame()
df['foo'] = 'bar'
data = {'k1': df, 'k2': df}
panel = Panel.from_dict(data, orient='minor')
self.assertEqual(panel['foo'].values.dtype, np.object_)
self.assertEqual(panel['A'].values.dtype, np.float64)
def test_constructor_error_msgs(self):
def testit():
Panel(np.random.randn(3, 4, 5), lrange(4), lrange(5), lrange(5))
assertRaisesRegexp(ValueError,
"Shape of passed values is \(3, 4, 5\), "
"indices imply \(4, 5, 5\)",
testit)
def testit():
Panel(np.random.randn(3, 4, 5), lrange(5), lrange(4), lrange(5))
assertRaisesRegexp(ValueError,
"Shape of passed values is \(3, 4, 5\), "
"indices imply \(5, 4, 5\)",
testit)
def testit():
Panel(np.random.randn(3, 4, 5), lrange(5), lrange(5), lrange(4))
assertRaisesRegexp(ValueError,
"Shape of passed values is \(3, 4, 5\), "
"indices imply \(5, 5, 4\)",
testit)
def test_conform(self):
df = self.panel['ItemA'][:-5].filter(items=['A', 'B'])
conformed = self.panel.conform(df)
assert (conformed.index.equals(self.panel.major_axis))
assert (conformed.columns.equals(self.panel.minor_axis))
def test_convert_objects(self):
# GH 4937
p = Panel(dict(A=dict(a=['1', '1.0'])))
expected = Panel(dict(A=dict(a=[1, 1.0])))
result = p._convert(numeric=True, coerce=True)
assert_panel_equal(result, expected)
def test_dtypes(self):
result = self.panel.dtypes
expected = Series(np.dtype('float64'), index=self.panel.items)
assert_series_equal(result, expected)
def test_apply(self):
# GH1148
# ufunc
applied = self.panel.apply(np.sqrt)
self.assertTrue(assert_almost_equal(applied.values, np.sqrt(
self.panel.values)))
# ufunc same shape
result = self.panel.apply(lambda x: x * 2, axis='items')
expected = self.panel * 2
assert_panel_equal(result, expected)
result = self.panel.apply(lambda x: x * 2, axis='major_axis')
expected = self.panel * 2
assert_panel_equal(result, expected)
result = self.panel.apply(lambda x: x * 2, axis='minor_axis')
expected = self.panel * 2
assert_panel_equal(result, expected)
# reduction to DataFrame
result = self.panel.apply(lambda x: x.dtype, axis='items')
expected = DataFrame(np.dtype('float64'), index=self.panel.major_axis,
columns=self.panel.minor_axis)
assert_frame_equal(result, expected)
result = self.panel.apply(lambda x: x.dtype, axis='major_axis')
expected = DataFrame(np.dtype('float64'), index=self.panel.minor_axis,
columns=self.panel.items)
assert_frame_equal(result, expected)
result = self.panel.apply(lambda x: x.dtype, axis='minor_axis')
expected = DataFrame(np.dtype('float64'), index=self.panel.major_axis,
columns=self.panel.items)
assert_frame_equal(result, expected)
# reductions via other dims
expected = self.panel.sum(0)
result = self.panel.apply(lambda x: x.sum(), axis='items')
assert_frame_equal(result, expected)
expected = self.panel.sum(1)
result = self.panel.apply(lambda x: x.sum(), axis='major_axis')
assert_frame_equal(result, expected)
expected = self.panel.sum(2)
result = self.panel.apply(lambda x: x.sum(), axis='minor_axis')
assert_frame_equal(result, expected)
# pass kwargs
result = self.panel.apply(lambda x, y: x.sum() + y, axis='items', y=5)
expected = self.panel.sum(0) + 5
assert_frame_equal(result, expected)
def test_apply_slabs(self):
# same shape as original
result = self.panel.apply(lambda x: x * 2,
axis=['items', 'major_axis'])
expected = (self.panel * 2).transpose('minor_axis', 'major_axis',
'items')
assert_panel_equal(result, expected)
result = self.panel.apply(lambda x: x * 2,
axis=['major_axis', 'items'])
assert_panel_equal(result, expected)
result = self.panel.apply(lambda x: x * 2,
axis=['items', 'minor_axis'])
expected = (self.panel * 2).transpose('major_axis', 'minor_axis',
'items')
assert_panel_equal(result, expected)
result = self.panel.apply(lambda x: x * 2,
axis=['minor_axis', 'items'])
assert_panel_equal(result, expected)
result = self.panel.apply(lambda x: x * 2,
axis=['major_axis', 'minor_axis'])
expected = self.panel * 2
assert_panel_equal(result, expected)
result = self.panel.apply(lambda x: x * 2,
axis=['minor_axis', 'major_axis'])
assert_panel_equal(result, expected)
# reductions
result = self.panel.apply(lambda x: x.sum(0), axis=[
'items', 'major_axis'
])
expected = self.panel.sum(1).T
assert_frame_equal(result, expected)
result = self.panel.apply(lambda x: x.sum(1), axis=[
'items', 'major_axis'
])
expected = self.panel.sum(0)
assert_frame_equal(result, expected)
# transforms
f = lambda x: ((x.T - x.mean(1)) / x.std(1)).T
# make sure that we don't trigger any warnings
with tm.assert_produces_warning(False):
result = self.panel.apply(f, axis=['items', 'major_axis'])
expected = Panel(dict([(ax, f(self.panel.loc[:, :, ax]))
for ax in self.panel.minor_axis]))
assert_panel_equal(result, expected)
result = self.panel.apply(f, axis=['major_axis', 'minor_axis'])
expected = Panel(dict([(ax, f(self.panel.loc[ax]))
for ax in self.panel.items]))
assert_panel_equal(result, expected)
result = self.panel.apply(f, axis=['minor_axis', 'items'])
expected = Panel(dict([(ax, f(self.panel.loc[:, ax]))
for ax in self.panel.major_axis]))
assert_panel_equal(result, expected)
# with multi-indexes
# GH7469
index = MultiIndex.from_tuples([('one', 'a'), ('one', 'b'), (
'two', 'a'), ('two', 'b')])
dfa = DataFrame(np.array(np.arange(12, dtype='int64')).reshape(
4, 3), columns=list("ABC"), index=index)
dfb = DataFrame(np.array(np.arange(10, 22, dtype='int64')).reshape(
4, 3), columns=list("ABC"), index=index)
p = Panel({'f': dfa, 'g': dfb})
result = p.apply(lambda x: x.sum(), axis=0)
# on windows this will be in32
result = result.astype('int64')
expected = p.sum(0)
assert_frame_equal(result, expected)
def test_apply_no_or_zero_ndim(self):
# GH10332
self.panel = Panel(np.random.rand(5, 5, 5))
result_int = self.panel.apply(lambda df: 0, axis=[1, 2])
result_float = self.panel.apply(lambda df: 0.0, axis=[1, 2])
result_int64 = self.panel.apply(lambda df: np.int64(0), axis=[1, 2])
result_float64 = self.panel.apply(lambda df: np.float64(0.0),
axis=[1, 2])
expected_int = expected_int64 = Series([0] * 5)
expected_float = expected_float64 = Series([0.0] * 5)
assert_series_equal(result_int, expected_int)
assert_series_equal(result_int64, expected_int64)
assert_series_equal(result_float, expected_float)
assert_series_equal(result_float64, expected_float64)
def test_reindex(self):
ref = self.panel['ItemB']
# items
result = self.panel.reindex(items=['ItemA', 'ItemB'])
assert_frame_equal(result['ItemB'], ref)
# major
new_major = list(self.panel.major_axis[:10])
result = self.panel.reindex(major=new_major)
assert_frame_equal(result['ItemB'], ref.reindex(index=new_major))
# raise exception put both major and major_axis
self.assertRaises(Exception, self.panel.reindex, major_axis=new_major,
major=new_major)
# minor
new_minor = list(self.panel.minor_axis[:2])
result = self.panel.reindex(minor=new_minor)
assert_frame_equal(result['ItemB'], ref.reindex(columns=new_minor))
# this ok
result = self.panel.reindex()
assert_panel_equal(result, self.panel)
self.assertFalse(result is self.panel)
# with filling
smaller_major = self.panel.major_axis[::5]
smaller = self.panel.reindex(major=smaller_major)
larger = smaller.reindex(major=self.panel.major_axis, method='pad')
assert_frame_equal(larger.major_xs(self.panel.major_axis[1]),
smaller.major_xs(smaller_major[0]))
# don't necessarily copy
result = self.panel.reindex(major=self.panel.major_axis, copy=False)
assert_panel_equal(result, self.panel)
self.assertTrue(result is self.panel)
def test_reindex_multi(self):
# with and without copy full reindexing
result = self.panel.reindex(items=self.panel.items,
major=self.panel.major_axis,
minor=self.panel.minor_axis, copy=False)
self.assertIs(result.items, self.panel.items)
self.assertIs(result.major_axis, self.panel.major_axis)
self.assertIs(result.minor_axis, self.panel.minor_axis)
result = self.panel.reindex(items=self.panel.items,
major=self.panel.major_axis,
minor=self.panel.minor_axis, copy=False)
assert_panel_equal(result, self.panel)
# multi-axis indexing consistency
# GH 5900
df = DataFrame(np.random.randn(4, 3))
p = Panel({'Item1': df})
expected = Panel({'Item1': df})
expected['Item2'] = np.nan
items = ['Item1', 'Item2']
major_axis = np.arange(4)
minor_axis = np.arange(3)
results = []
results.append(p.reindex(items=items, major_axis=major_axis,
copy=True))
results.append(p.reindex(items=items, major_axis=major_axis,
copy=False))
results.append(p.reindex(items=items, minor_axis=minor_axis,
copy=True))
results.append(p.reindex(items=items, minor_axis=minor_axis,
copy=False))
results.append(p.reindex(items=items, major_axis=major_axis,
minor_axis=minor_axis, copy=True))
results.append(p.reindex(items=items, major_axis=major_axis,
minor_axis=minor_axis, copy=False))
for i, r in enumerate(results):
assert_panel_equal(expected, r)
def test_reindex_like(self):
# reindex_like
smaller = self.panel.reindex(items=self.panel.items[:-1],
major=self.panel.major_axis[:-1],
minor=self.panel.minor_axis[:-1])
smaller_like = self.panel.reindex_like(smaller)
assert_panel_equal(smaller, smaller_like)
def test_take(self):
# axis == 0
result = self.panel.take([2, 0, 1], axis=0)
expected = self.panel.reindex(items=['ItemC', 'ItemA', 'ItemB'])
assert_panel_equal(result, expected)
# axis >= 1
result = self.panel.take([3, 0, 1, 2], axis=2)
expected = self.panel.reindex(minor=['D', 'A', 'B', 'C'])
assert_panel_equal(result, expected)
# neg indicies ok
expected = self.panel.reindex(minor=['D', 'D', 'B', 'C'])
result = self.panel.take([3, -1, 1, 2], axis=2)
assert_panel_equal(result, expected)
self.assertRaises(Exception, self.panel.take, [4, 0, 1, 2], axis=2)
def test_sort_index(self):
import random
ritems = list(self.panel.items)
rmajor = list(self.panel.major_axis)
rminor = list(self.panel.minor_axis)
random.shuffle(ritems)
random.shuffle(rmajor)
random.shuffle(rminor)
random_order = self.panel.reindex(items=ritems)
sorted_panel = random_order.sort_index(axis=0)
assert_panel_equal(sorted_panel, self.panel)
# descending
random_order = self.panel.reindex(items=ritems)
sorted_panel = random_order.sort_index(axis=0, ascending=False)
assert_panel_equal(sorted_panel,
self.panel.reindex(items=self.panel.items[::-1]))
random_order = self.panel.reindex(major=rmajor)
sorted_panel = random_order.sort_index(axis=1)
assert_panel_equal(sorted_panel, self.panel)
random_order = self.panel.reindex(minor=rminor)
sorted_panel = random_order.sort_index(axis=2)
assert_panel_equal(sorted_panel, self.panel)
def test_fillna(self):
filled = self.panel.fillna(0)
self.assertTrue(np.isfinite(filled.values).all())
filled = self.panel.fillna(method='backfill')
assert_frame_equal(filled['ItemA'],
self.panel['ItemA'].fillna(method='backfill'))
panel = self.panel.copy()
panel['str'] = 'foo'
filled = panel.fillna(method='backfill')
assert_frame_equal(filled['ItemA'],
panel['ItemA'].fillna(method='backfill'))
empty = self.panel.reindex(items=[])
filled = empty.fillna(0)
assert_panel_equal(filled, empty)
self.assertRaises(ValueError, self.panel.fillna)
self.assertRaises(ValueError, self.panel.fillna, 5, method='ffill')
self.assertRaises(TypeError, self.panel.fillna, [1, 2])
self.assertRaises(TypeError, self.panel.fillna, (1, 2))
# limit not implemented when only value is specified
p = Panel(np.random.randn(3, 4, 5))
p.iloc[0:2, 0:2, 0:2] = np.nan
self.assertRaises(NotImplementedError, lambda: p.fillna(999, limit=1))
def test_ffill_bfill(self):
assert_panel_equal(self.panel.ffill(),
self.panel.fillna(method='ffill'))
assert_panel_equal(self.panel.bfill(),
self.panel.fillna(method='bfill'))
def test_truncate_fillna_bug(self):
# #1823
result = self.panel.truncate(before=None, after=None, axis='items')
# it works!
result.fillna(value=0.0)
def test_swapaxes(self):
result = self.panel.swapaxes('items', 'minor')
self.assertIs(result.items, self.panel.minor_axis)
result = self.panel.swapaxes('items', 'major')
self.assertIs(result.items, self.panel.major_axis)
result = self.panel.swapaxes('major', 'minor')
self.assertIs(result.major_axis, self.panel.minor_axis)
panel = self.panel.copy()
result = panel.swapaxes('major', 'minor')
panel.values[0, 0, 1] = np.nan
expected = panel.swapaxes('major', 'minor')
assert_panel_equal(result, expected)
# this should also work
result = self.panel.swapaxes(0, 1)
self.assertIs(result.items, self.panel.major_axis)
# this works, but return a copy
result = self.panel.swapaxes('items', 'items')
assert_panel_equal(self.panel, result)
self.assertNotEqual(id(self.panel), id(result))
def test_transpose(self):
result = self.panel.transpose('minor', 'major', 'items')
expected = self.panel.swapaxes('items', 'minor')
assert_panel_equal(result, expected)
# test kwargs
result = self.panel.transpose(items='minor', major='major',
minor='items')
expected = self.panel.swapaxes('items', 'minor')
assert_panel_equal(result, expected)
# text mixture of args
result = self.panel.transpose('minor', major='major', minor='items')
expected = self.panel.swapaxes('items', 'minor')
assert_panel_equal(result, expected)
result = self.panel.transpose('minor', 'major', minor='items')
expected = self.panel.swapaxes('items', 'minor')
assert_panel_equal(result, expected)
# duplicate axes
with tm.assertRaisesRegexp(TypeError,
'not enough/duplicate arguments'):
self.panel.transpose('minor', maj='major', minor='items')
with tm.assertRaisesRegexp(ValueError, 'repeated axis in transpose'):
self.panel.transpose('minor', 'major', major='minor',
minor='items')
result = self.panel.transpose(2, 1, 0)
assert_panel_equal(result, expected)
result = self.panel.transpose('minor', 'items', 'major')
expected = self.panel.swapaxes('items', 'minor')
expected = expected.swapaxes('major', 'minor')
assert_panel_equal(result, expected)
result = self.panel.transpose(2, 0, 1)
assert_panel_equal(result, expected)
self.assertRaises(ValueError, self.panel.transpose, 0, 0, 1)
def test_transpose_copy(self):
panel = self.panel.copy()
result = panel.transpose(2, 0, 1, copy=True)
expected = panel.swapaxes('items', 'minor')
expected = expected.swapaxes('major', 'minor')
assert_panel_equal(result, expected)
panel.values[0, 1, 1] = np.nan
self.assertTrue(notnull(result.values[1, 0, 1]))
@ignore_sparse_panel_future_warning
def test_to_frame(self):
# filtered
filtered = self.panel.to_frame()
expected = self.panel.to_frame().dropna(how='any')
assert_frame_equal(filtered, expected)
# unfiltered
unfiltered = self.panel.to_frame(filter_observations=False)
assert_panel_equal(unfiltered.to_panel(), self.panel)
# names
self.assertEqual(unfiltered.index.names, ('major', 'minor'))
# unsorted, round trip
df = self.panel.to_frame(filter_observations=False)
unsorted = df.take(np.random.permutation(len(df)))
pan = unsorted.to_panel()
assert_panel_equal(pan, self.panel)
# preserve original index names
df = DataFrame(np.random.randn(6, 2),
index=[['a', 'a', 'b', 'b', 'c', 'c'],
[0, 1, 0, 1, 0, 1]],
columns=['one', 'two'])
df.index.names = ['foo', 'bar']
df.columns.name = 'baz'
rdf = df.to_panel().to_frame()
self.assertEqual(rdf.index.names, df.index.names)
self.assertEqual(rdf.columns.names, df.columns.names)
def test_to_frame_mixed(self):
panel = self.panel.fillna(0)
panel['str'] = 'foo'
panel['bool'] = panel['ItemA'] > 0
lp = panel.to_frame()
wp = lp.to_panel()
self.assertEqual(wp['bool'].values.dtype, np.bool_)
# Previously, this was mutating the underlying index and changing its
# name
assert_frame_equal(wp['bool'], panel['bool'], check_names=False)
# GH 8704
# with categorical
df = panel.to_frame()
df['category'] = df['str'].astype('category')
# to_panel
# TODO: this converts back to object
p = df.to_panel()
expected = panel.copy()
expected['category'] = 'foo'
assert_panel_equal(p, expected)
def test_to_frame_multi_major(self):
idx = MultiIndex.from_tuples([(1, 'one'), (1, 'two'), (2, 'one'), (
2, 'two')])
df = DataFrame([[1, 'a', 1], [2, 'b', 1], [3, 'c', 1], [4, 'd', 1]],
columns=['A', 'B', 'C'], index=idx)
wp = Panel({'i1': df, 'i2': df})
expected_idx = MultiIndex.from_tuples(
[
(1, 'one', 'A'), (1, 'one', 'B'),
(1, 'one', 'C'), (1, 'two', 'A'),
(1, 'two', 'B'), (1, 'two', 'C'),
(2, 'one', 'A'), (2, 'one', 'B'),
(2, 'one', 'C'), (2, 'two', 'A'),
(2, 'two', 'B'), (2, 'two', 'C')
],
names=[None, None, 'minor'])
expected = DataFrame({'i1': [1, 'a', 1, 2, 'b', 1, 3,
'c', 1, 4, 'd', 1],
'i2': [1, 'a', 1, 2, 'b',
1, 3, 'c', 1, 4, 'd', 1]},
index=expected_idx)
result = wp.to_frame()
assert_frame_equal(result, expected)
wp.iloc[0, 0].iloc[0] = np.nan # BUG on setting. GH #5773
result = wp.to_frame()
assert_frame_equal(result, expected[1:])
idx = MultiIndex.from_tuples([(1, 'two'), (1, 'one'), (2, 'one'), (
np.nan, 'two')])
df = DataFrame([[1, 'a', 1], [2, 'b', 1], [3, 'c', 1], [4, 'd', 1]],
columns=['A', 'B', 'C'], index=idx)
wp = Panel({'i1': df, 'i2': df})
ex_idx = MultiIndex.from_tuples([(1, 'two', 'A'), (1, 'two', 'B'),
(1, 'two', 'C'),
(1, 'one', 'A'),
(1, 'one', 'B'),
(1, 'one', 'C'),
(2, 'one', 'A'),
(2, 'one', 'B'),
(2, 'one', 'C'),
(np.nan, 'two', 'A'),
(np.nan, 'two', 'B'),
(np.nan, 'two', 'C')],
names=[None, None, 'minor'])
expected.index = ex_idx
result = wp.to_frame()
assert_frame_equal(result, expected)
def test_to_frame_multi_major_minor(self):
cols = MultiIndex(levels=[['C_A', 'C_B'], ['C_1', 'C_2']],
labels=[[0, 0, 1, 1], [0, 1, 0, 1]])
idx = MultiIndex.from_tuples([(1, 'one'), (1, 'two'), (2, 'one'), (
2, 'two'), (3, 'three'), (4, 'four')])
df = DataFrame([[1, 2, 11, 12], [3, 4, 13, 14],
['a', 'b', 'w', 'x'],
['c', 'd', 'y', 'z'], [-1, -2, -3, -4],
[-5, -6, -7, -8]], columns=cols, index=idx)
wp = Panel({'i1': df, 'i2': df})
exp_idx = MultiIndex.from_tuples(
[(1, 'one', 'C_A', 'C_1'), (1, 'one', 'C_A', 'C_2'),
(1, 'one', 'C_B', 'C_1'), (1, 'one', 'C_B', 'C_2'),
(1, 'two', 'C_A', 'C_1'), (1, 'two', 'C_A', 'C_2'),
(1, 'two', 'C_B', 'C_1'), (1, 'two', 'C_B', 'C_2'),
(2, 'one', 'C_A', 'C_1'), (2, 'one', 'C_A', 'C_2'),
(2, 'one', 'C_B', 'C_1'), (2, 'one', 'C_B', 'C_2'),
(2, 'two', 'C_A', 'C_1'), (2, 'two', 'C_A', 'C_2'),
(2, 'two', 'C_B', 'C_1'), (2, 'two', 'C_B', 'C_2'),
(3, 'three', 'C_A', 'C_1'), (3, 'three', 'C_A', 'C_2'),
(3, 'three', 'C_B', 'C_1'), (3, 'three', 'C_B', 'C_2'),
(4, 'four', 'C_A', 'C_1'), (4, 'four', 'C_A', 'C_2'),
(4, 'four', 'C_B', 'C_1'), (4, 'four', 'C_B', 'C_2')],
names=[None, None, None, None])
exp_val = [[1, 1], [2, 2], [11, 11], [12, 12], [3, 3], [4, 4],
[13, 13], [14, 14], ['a', 'a'], ['b', 'b'], ['w', 'w'],
['x', 'x'], ['c', 'c'], ['d', 'd'], ['y', 'y'], ['z', 'z'],
[-1, -1], [-2, -2], [-3, -3], [-4, -4], [-5, -5], [-6, -6],
[-7, -7], [-8, -8]]
result = wp.to_frame()
expected = DataFrame(exp_val, columns=['i1', 'i2'], index=exp_idx)
assert_frame_equal(result, expected)
def test_to_frame_multi_drop_level(self):
idx = MultiIndex.from_tuples([(1, 'one'), (2, 'one'), (2, 'two')])
df = DataFrame({'A': [np.nan, 1, 2]}, index=idx)
wp = Panel({'i1': df, 'i2': df})
result = wp.to_frame()
exp_idx = MultiIndex.from_tuples([(2, 'one', 'A'), (2, 'two', 'A')],
names=[None, None, 'minor'])
expected = DataFrame({'i1': [1., 2], 'i2': [1., 2]}, index=exp_idx)
assert_frame_equal(result, expected)
def test_to_panel_na_handling(self):
df = DataFrame(np.random.randint(0, 10, size=20).reshape((10, 2)),
index=[[0, 0, 0, 0, 0, 0, 1, 1, 1, 1],
[0, 1, 2, 3, 4, 5, 2, 3, 4, 5]])
panel = df.to_panel()
self.assertTrue(isnull(panel[0].ix[1, [0, 1]]).all())
def test_to_panel_duplicates(self):
# #2441
df = DataFrame({'a': [0, 0, 1], 'b': [1, 1, 1], 'c': [1, 2, 3]})
idf = df.set_index(['a', 'b'])
assertRaisesRegexp(ValueError, 'non-uniquely indexed', idf.to_panel)
def test_panel_dups(self):
# GH 4960
# duplicates in an index
# items
data = np.random.randn(5, 100, 5)
no_dup_panel = Panel(data, items=list("ABCDE"))
panel = Panel(data, items=list("AACDE"))
expected = no_dup_panel['A']
result = panel.iloc[0]
assert_frame_equal(result, expected)
expected = no_dup_panel['E']
result = panel.loc['E']
assert_frame_equal(result, expected)
expected = no_dup_panel.loc[['A', 'B']]
expected.items = ['A', 'A']
result = panel.loc['A']
assert_panel_equal(result, expected)
# major
data = np.random.randn(5, 5, 5)
no_dup_panel = Panel(data, major_axis=list("ABCDE"))
panel = Panel(data, major_axis=list("AACDE"))
expected = no_dup_panel.loc[:, 'A']
result = panel.iloc[:, 0]
assert_frame_equal(result, expected)
expected = no_dup_panel.loc[:, 'E']
result = panel.loc[:, 'E']
assert_frame_equal(result, expected)
expected = no_dup_panel.loc[:, ['A', 'B']]
expected.major_axis = ['A', 'A']
result = panel.loc[:, 'A']
assert_panel_equal(result, expected)
# minor
data = np.random.randn(5, 100, 5)
no_dup_panel = Panel(data, minor_axis=list("ABCDE"))
panel = Panel(data, minor_axis=list("AACDE"))
expected = no_dup_panel.loc[:, :, 'A']
result = panel.iloc[:, :, 0]
assert_frame_equal(result, expected)
expected = no_dup_panel.loc[:, :, 'E']
result = panel.loc[:, :, 'E']
assert_frame_equal(result, expected)
expected = no_dup_panel.loc[:, :, ['A', 'B']]
expected.minor_axis = ['A', 'A']
result = panel.loc[:, :, 'A']
assert_panel_equal(result, expected)
def test_filter(self):
pass
def test_compound(self):
compounded = self.panel.compound()
assert_series_equal(compounded['ItemA'],
(1 + self.panel['ItemA']).product(0) - 1,
check_names=False)
def test_shift(self):
# major
idx = self.panel.major_axis[0]
idx_lag = self.panel.major_axis[1]
shifted = self.panel.shift(1)
assert_frame_equal(self.panel.major_xs(idx), shifted.major_xs(idx_lag))
# minor
idx = self.panel.minor_axis[0]
idx_lag = self.panel.minor_axis[1]
shifted = self.panel.shift(1, axis='minor')
assert_frame_equal(self.panel.minor_xs(idx), shifted.minor_xs(idx_lag))
# items
idx = self.panel.items[0]
idx_lag = self.panel.items[1]
shifted = self.panel.shift(1, axis='items')
assert_frame_equal(self.panel[idx], shifted[idx_lag])
# negative numbers, #2164
result = self.panel.shift(-1)
expected = Panel(dict((i, f.shift(-1)[:-1])
for i, f in self.panel.iteritems()))
assert_panel_equal(result, expected)
# mixed dtypes #6959
data = [('item ' + ch, makeMixedDataFrame()) for ch in list('abcde')]
data = dict(data)
mixed_panel = Panel.from_dict(data, orient='minor')
shifted = mixed_panel.shift(1)
assert_series_equal(mixed_panel.dtypes, shifted.dtypes)
def test_tshift(self):
# PeriodIndex
ps = tm.makePeriodPanel()
shifted = ps.tshift(1)
unshifted = shifted.tshift(-1)
assert_panel_equal(unshifted, ps)
shifted2 = ps.tshift(freq='B')
assert_panel_equal(shifted, shifted2)
shifted3 = ps.tshift(freq=bday)
assert_panel_equal(shifted, shifted3)
assertRaisesRegexp(ValueError, 'does not match', ps.tshift, freq='M')
# DatetimeIndex
panel = _panel
shifted = panel.tshift(1)
unshifted = shifted.tshift(-1)
assert_panel_equal(panel, unshifted)
shifted2 = panel.tshift(freq=panel.major_axis.freq)
assert_panel_equal(shifted, shifted2)
inferred_ts = Panel(panel.values, items=panel.items,
major_axis=Index(np.asarray(panel.major_axis)),
minor_axis=panel.minor_axis)
shifted = inferred_ts.tshift(1)
unshifted = shifted.tshift(-1)
assert_panel_equal(shifted, panel.tshift(1))
assert_panel_equal(unshifted, inferred_ts)
no_freq = panel.ix[:, [0, 5, 7], :]
self.assertRaises(ValueError, no_freq.tshift)
def test_pct_change(self):
df1 = DataFrame({'c1': [1, 2, 5], 'c2': [3, 4, 6]})
df2 = df1 + 1
df3 = DataFrame({'c1': [3, 4, 7], 'c2': [5, 6, 8]})
wp = Panel({'i1': df1, 'i2': df2, 'i3': df3})
# major, 1
result = wp.pct_change() # axis='major'
expected = Panel({'i1': df1.pct_change(),
'i2': df2.pct_change(),
'i3': df3.pct_change()})
assert_panel_equal(result, expected)
result = wp.pct_change(axis=1)
assert_panel_equal(result, expected)
# major, 2
result = wp.pct_change(periods=2)
expected = Panel({'i1': df1.pct_change(2),
'i2': df2.pct_change(2),
'i3': df3.pct_change(2)})
assert_panel_equal(result, expected)
# minor, 1
result = wp.pct_change(axis='minor')
expected = Panel({'i1': df1.pct_change(axis=1),
'i2': df2.pct_change(axis=1),
'i3': df3.pct_change(axis=1)})
assert_panel_equal(result, expected)
result = wp.pct_change(axis=2)
assert_panel_equal(result, expected)
# minor, 2
result = wp.pct_change(periods=2, axis='minor')
expected = Panel({'i1': df1.pct_change(periods=2, axis=1),
'i2': df2.pct_change(periods=2, axis=1),
'i3': df3.pct_change(periods=2, axis=1)})
assert_panel_equal(result, expected)
# items, 1
result = wp.pct_change(axis='items')
expected = Panel({'i1': DataFrame({'c1': [np.nan, np.nan, np.nan],
'c2': [np.nan, np.nan, np.nan]}),
'i2': DataFrame({'c1': [1, 0.5, .2],
'c2': [1. / 3, 0.25, 1. / 6]}),
'i3': DataFrame({'c1': [.5, 1. / 3, 1. / 6],
'c2': [.25, .2, 1. / 7]})})
assert_panel_equal(result, expected)
result = wp.pct_change(axis=0)
assert_panel_equal(result, expected)
# items, 2
result = wp.pct_change(periods=2, axis='items')
expected = Panel({'i1': DataFrame({'c1': [np.nan, np.nan, np.nan],
'c2': [np.nan, np.nan, np.nan]}),
'i2': DataFrame({'c1': [np.nan, np.nan, np.nan],
'c2': [np.nan, np.nan, np.nan]}),
'i3': DataFrame({'c1': [2, 1, .4],
'c2': [2. / 3, .5, 1. / 3]})})
assert_panel_equal(result, expected)
def test_round(self):
values = [[[-3.2, 2.2], [0, -4.8213], [3.123, 123.12],
[-1566.213, 88.88], [-12, 94.5]],
[[-5.82, 3.5], [6.21, -73.272], [-9.087, 23.12],
[272.212, -99.99], [23, -76.5]]]
evalues = [[[float(np.around(i)) for i in j] for j in k]
for k in values]
p = Panel(values, items=['Item1', 'Item2'],
major_axis=pd.date_range('1/1/2000', periods=5),
minor_axis=['A', 'B'])
expected = Panel(evalues, items=['Item1', 'Item2'],
major_axis=pd.date_range('1/1/2000', periods=5),
minor_axis=['A', 'B'])
result = p.round()
self.assert_panel_equal(expected, result)
def test_multiindex_get(self):
ind = MultiIndex.from_tuples([('a', 1), ('a', 2), ('b', 1), ('b', 2)],
names=['first', 'second'])
wp = Panel(np.random.random((4, 5, 5)),
items=ind,
major_axis=np.arange(5),
minor_axis=np.arange(5))
f1 = wp['a']
f2 = wp.ix['a']
assert_panel_equal(f1, f2)
self.assertTrue((f1.items == [1, 2]).all())
self.assertTrue((f2.items == [1, 2]).all())
ind = MultiIndex.from_tuples([('a', 1), ('a', 2), ('b', 1)],
names=['first', 'second'])
def test_multiindex_blocks(self):
ind = MultiIndex.from_tuples([('a', 1), ('a', 2), ('b', 1)],
names=['first', 'second'])
wp = Panel(self.panel._data)
wp.items = ind
f1 = wp['a']
self.assertTrue((f1.items == [1, 2]).all())
f1 = wp[('b', 1)]
self.assertTrue((f1.columns == ['A', 'B', 'C', 'D']).all())
def test_repr_empty(self):
empty = Panel()
repr(empty)
def test_rename(self):
mapper = {'ItemA': 'foo', 'ItemB': 'bar', 'ItemC': 'baz'}
renamed = self.panel.rename_axis(mapper, axis=0)
exp = Index(['foo', 'bar', 'baz'])
self.assertTrue(renamed.items.equals(exp))
renamed = self.panel.rename_axis(str.lower, axis=2)
exp = Index(['a', 'b', 'c', 'd'])
self.assertTrue(renamed.minor_axis.equals(exp))
# don't copy
renamed_nocopy = self.panel.rename_axis(mapper, axis=0, copy=False)
renamed_nocopy['foo'] = 3.
self.assertTrue((self.panel['ItemA'].values == 3).all())
def test_get_attr(self):
assert_frame_equal(self.panel['ItemA'], self.panel.ItemA)
# specific cases from #3440
self.panel['a'] = self.panel['ItemA']
assert_frame_equal(self.panel['a'], self.panel.a)
self.panel['i'] = self.panel['ItemA']
assert_frame_equal(self.panel['i'], self.panel.i)
def test_from_frame_level1_unsorted(self):
tuples = [('MSFT', 3), ('MSFT', 2), ('AAPL', 2), ('AAPL', 1),
('MSFT', 1)]
midx = MultiIndex.from_tuples(tuples)
df = DataFrame(np.random.rand(5, 4), index=midx)
p = df.to_panel()
assert_frame_equal(p.minor_xs(2), df.xs(2, level=1).sort_index())
def test_to_excel(self):
try:
import xlwt # noqa
import xlrd # noqa
import openpyxl # noqa
from pandas.io.excel import ExcelFile
except ImportError:
raise nose.SkipTest("need xlwt xlrd openpyxl")
for ext in ['xls', 'xlsx']:
path = '__tmp__.' + ext
with ensure_clean(path) as path:
self.panel.to_excel(path)
try:
reader = ExcelFile(path)
except ImportError:
raise nose.SkipTest("need xlwt xlrd openpyxl")
for item, df in self.panel.iteritems():
recdf = reader.parse(str(item), index_col=0)
assert_frame_equal(df, recdf)
def test_to_excel_xlsxwriter(self):
try:
import xlrd # noqa
import xlsxwriter # noqa
from pandas.io.excel import ExcelFile
except ImportError:
raise nose.SkipTest("Requires xlrd and xlsxwriter. Skipping test.")
path = '__tmp__.xlsx'
with ensure_clean(path) as path:
self.panel.to_excel(path, engine='xlsxwriter')
try:
reader = ExcelFile(path)
except ImportError as e:
raise nose.SkipTest("cannot write excel file: %s" % e)
for item, df in self.panel.iteritems():
recdf = reader.parse(str(item), index_col=0)
assert_frame_equal(df, recdf)
def test_dropna(self):
p = Panel(np.random.randn(4, 5, 6), major_axis=list('abcde'))
p.ix[:, ['b', 'd'], 0] = np.nan
result = p.dropna(axis=1)
exp = p.ix[:, ['a', 'c', 'e'], :]
assert_panel_equal(result, exp)
inp = p.copy()
inp.dropna(axis=1, inplace=True)
assert_panel_equal(inp, exp)
result = p.dropna(axis=1, how='all')
assert_panel_equal(result, p)
p.ix[:, ['b', 'd'], :] = np.nan
result = p.dropna(axis=1, how='all')
exp = p.ix[:, ['a', 'c', 'e'], :]
assert_panel_equal(result, exp)
p = Panel(np.random.randn(4, 5, 6), items=list('abcd'))
p.ix[['b'], :, 0] = np.nan
result = p.dropna()
exp = p.ix[['a', 'c', 'd']]
assert_panel_equal(result, exp)
result = p.dropna(how='all')
assert_panel_equal(result, p)
p.ix['b'] = np.nan
result = p.dropna(how='all')
exp = p.ix[['a', 'c', 'd']]
assert_panel_equal(result, exp)
def test_drop(self):
df = DataFrame({"A": [1, 2], "B": [3, 4]})
panel = Panel({"One": df, "Two": df})
def check_drop(drop_val, axis_number, aliases, expected):
try:
actual = panel.drop(drop_val, axis=axis_number)
assert_panel_equal(actual, expected)
for alias in aliases:
actual = panel.drop(drop_val, axis=alias)
assert_panel_equal(actual, expected)
except AssertionError:
com.pprint_thing("Failed with axis_number %d and aliases: %s" %
(axis_number, aliases))
raise
# Items
expected = Panel({"One": df})
check_drop('Two', 0, ['items'], expected)
self.assertRaises(ValueError, panel.drop, 'Three')
# errors = 'ignore'
dropped = panel.drop('Three', errors='ignore')
assert_panel_equal(dropped, panel)
dropped = panel.drop(['Two', 'Three'], errors='ignore')
expected = Panel({"One": df})
assert_panel_equal(dropped, expected)
# Major
exp_df = DataFrame({"A": [2], "B": [4]}, index=[1])
expected = Panel({"One": exp_df, "Two": exp_df})
check_drop(0, 1, ['major_axis', 'major'], expected)
exp_df = DataFrame({"A": [1], "B": [3]}, index=[0])
expected = Panel({"One": exp_df, "Two": exp_df})
check_drop([1], 1, ['major_axis', 'major'], expected)
# Minor
exp_df = df[['B']]
expected = Panel({"One": exp_df, "Two": exp_df})
check_drop(["A"], 2, ['minor_axis', 'minor'], expected)
exp_df = df[['A']]
expected = | Panel({"One": exp_df, "Two": exp_df}) | pandas.core.panel.Panel |
# AUTOGENERATED! DO NOT EDIT! File to edit: notebooks/04_Create_Acs_Indicators_Original.ipynb (unless otherwise specified).
__all__ = ['racdiv', 'pasi', 'elheat', 'empl', 'fam', 'female', 'femhhs', 'heatgas', 'hh40inc', 'hh60inc', 'hh75inc',
'hhchpov', 'hhm75', 'hhpov', 'hhs', 'hsdipl', 'lesshs', 'male', 'nilf', 'othrcom', 'p2more', 'pubtran',
'age5', 'age24', 'age64', 'age18', 'age65', 'affordm', 'affordr', 'bahigher', 'carpool', 'drvalone',
'hh25inc', 'mhhi', 'nohhint', 'novhcl', 'paa', 'ppac', 'phisp', 'pwhite', 'sclemp', 'tpop', 'trav14',
'trav29', 'trav45', 'trav44', 'unempl', 'unempr', 'walked']
# Cell
#File: racdiv.py
#Author: <NAME>
#Date: 4/16/19
#Section: Bnia
#Email: <EMAIL>
#Description:
# Uses ACS Table B02001 - Race
# Universe: Total Population
# Uses ACS Table B03002 - HISPANIC OR LATINO ORIGIN BY RACE
# Universe: Total Population
# Table Creates: racdiv, paa, pwhite, pasi, phisp, p2more, ppac
#purpose:
#input: Year
#output:
import pandas as pd
import glob
def racdiv( year ):
def getColName (df, col): return df.columns[df.columns.str.contains(pat = col)][0]
def getColByName (df, col): return df[getColName(df, col)]
def addKey(df, fi, col):
key = getColName(df, col)
val = getColByName(df, col)
fi[key] = val
return fi
def nullIfEqual(df, c1, c2):
return df.apply(lambda x:
x[getColName(df, c1)]+x[getColName(df, c2)] if x[getColName(df, c1)]+x[getColName(df, c2)] != 0 else 0, axis=1)
def sumInts(df): return df.sum(numeric_only=True)
#~~~~~~~~~~~~~~~
# Step 1)
# Fetch Tract Files w/CSA Lables by Name from the 2_cleaned folder.
#~~~~~~~~~~~~~~~
fileName = ''
for name in glob.glob('AcsDataClean/B02001*5y'+str(year)+'_est.csv'):
fileName = name
df = pd.read_csv( fileName, index_col=0 )
fileName = ''
for name in glob.glob('AcsDataClean/B03002*5y'+str(year)+'_est.csv'):
fileName = name
df_hisp = pd.read_csv( fileName, index_col=0 )
# Aggregate by CSA
# Group By CSA so that they may be opperated on
df = df.groupby('CSA')
df_hisp = df_hisp.groupby('CSA')
# Aggregate Numeric Values by Sum
df = df.sum(numeric_only=True)
df_hisp = df_hisp.sum(numeric_only=True)
# Append the one column from the other ACS Table
df['B03002_012E_Total_Hispanic_or_Latino'] = df_hisp['B03002_012E_Total_Hispanic_or_Latino']
df1 = pd.DataFrame()
df1['CSA'] = df.index
df1.set_index('CSA', drop = True, inplace = True)
df1['African-American%'] = df[ 'B02001_003E_Total_Black_or_African_American_alone' ] / df[ 'B02001_001E_Total' ] * 100
df1['White%'] = df[ 'B02001_002E_Total_White_alone' ] / df[ 'B02001_001E_Total' ] * 100
df1['American Indian%'] = df[ 'B02001_004E_Total_American_Indian_and_Alaska_Native_alone' ]/ df[ 'B02001_001E_Total' ] * 100
df1['Asian%'] = df[ 'B02001_005E_Total_Asian_alone' ] / df[ 'B02001_001E_Total' ] * 100
df1['Native Hawaii/Pac Islander%'] = df[ 'B02001_006E_Total_Native_Hawaiian_and_Other_Pacific_Islander_alone'] / df[ 'B02001_001E_Total' ] * 100
df1['Hisp %'] = df['B03002_012E_Total_Hispanic_or_Latino'] / df[ 'B02001_001E_Total' ] * 100
# =1-(POWER(%AA/100,2)+POWER(%White/100,2)+POWER(%AmerInd/100,2)+POWER(%Asian/100,2) + POWER(%NativeAm/100,2))*(POWER(%Hispanci/100,2) + POWER(1-(%Hispanic/100),2))
df1['Diversity_index'] = ( 1- (
( df1['African-American%'] /100 )**2
+( df1['White%'] /100 )**2
+( df1['American Indian%'] /100 )**2
+( df1['Asian%'] /100 )**2
+( df1['Native Hawaii/Pac Islander%'] /100 )**2
)*(
( df1['Hisp %'] /100 )**2
+(1-( df1['Hisp %'] /100) )**2
) ) * 100
return df1['Diversity_index']
# Cell
#File: pasi.py
#Author: <NAME>
#Date: 4/16/19
#Section: Bnia
#Email: <EMAIL>
#Description:
# Uses ACS Table B03002 - HISPANIC OR LATINO ORIGIN BY RACE
# Universe: Total Population
# Table Creates: racdiv, paa, pwhite, pasi, phisp, p2more, ppac
#purpose:
#input: Year
#output:
import pandas as pd
import glob
def pasi( year ):
def getColName (df, col): return df.columns[df.columns.str.contains(pat = col)][0]
def getColByName (df, col): return df[getColName(df, col)]
def addKey(df, fi, col):
key = getColName(df, col)
val = getColByName(df, col)
fi[key] = val
return fi
def nullIfEqual(df, c1, c2):
return df.apply(lambda x:
x[getColName(df, c1)]+x[getColName(df, c2)] if x[getColName(df, c1)]+x[getColName(df, c2)] != 0 else 0, axis=1)
def sumInts(df): return df.sum(numeric_only=True)
#~~~~~~~~~~~~~~~
# Step 1)
# Fetch Tract Files w/CSA Lables by Name from the 2_cleaned folder.
#~~~~~~~~~~~~~~~
fileName = ''
for name in glob.glob('AcsDataClean/B03002*5y'+str(year)+'_est.csv'):
fileName = name
df = pd.read_csv( fileName, index_col=0 )
# Aggregate by CSA
# Group By CSA so that they may be opperated on
df = df.groupby('CSA')
# Aggregate Numeric Values by Sum
df = df.sum(numeric_only=True)
# Append the one column from the other ACS Table
df['B03002_012E_Total_Hispanic_or_Latino']
df1 = pd.DataFrame()
df1['CSA'] = df.index
df1.set_index('CSA', drop = True, inplace = True)
tot = df[ 'B03002_001E_Total' ]
df1['Asian%NH'] = df[ 'B03002_006E_Total_Not_Hispanic_or_Latino_Asian_alone' ]/ tot * 100
return df1['Asian%NH']
# Cell
#File: elheat.py
#Author: <NAME>
#Date: 1/17/19
#Section: Bnia
#Email: <EMAIL>
#Description:
# Uses ACS Table B25040 - HOUSE HEATING FUEL
# Universe - Occupied housing units
# Table Creates: elheat, heatgas
#purpose: Produce Sustainability - Percent of Residences Heated by Electricity Indicator
#input: Year
#output:
import pandas as pd
import glob
def elheat( year ):
def getColName (df, col): return df.columns[df.columns.str.contains(pat = col)][0]
def getColByName (df, col): return df[getColName(df, col)]
def addKey(df, fi, col):
key = getColName(df, col)
val = getColByName(df, col)
fi[key] = val
return fi
def nullIfEqual(df, c1, c2):
return df.apply(lambda x:
x[getColName(df, c1)]+x[getColName(df, c2)] if x[getColName(df, c1)]+x[getColName(df, c2)] != 0 else 0, axis=1)
def sumInts(df): return df.sum(numeric_only=True)
#~~~~~~~~~~~~~~~
# Step 1)
# Fetch Tract Files w/CSA Lables by Name from the 2_cleaned folder.
#~~~~~~~~~~~~~~~
fileName = ''
for name in glob.glob('AcsDataClean/B25040*5y'+str(year)+'_est.csv'):
fileName = name
df = pd.read_csv( fileName, index_col=0 )
# Aggregate by CSA
# Group By CSA so that they may be opperated on
df = df.groupby('CSA')
# Aggregate Numeric Values by Sum
df = sumInts(df)
# Add 'BALTIMORE' which is the SUM of all the CSAs
#~~~~~~~~~~~~~~~
# Step 2)
# Prepare the columns
#~~~~~~~~~~~~~~~
# Final Dataframe
fi = pd.DataFrame()
columns = ['B25040_004E','B25040_001E']
for col in columns:
fi = addKey(df, fi, col)
# Numerators
numerators = | pd.DataFrame() | pandas.DataFrame |
import os
import sys
import logging
import pandas as pd
import kleat.misc.settings as S
from kleat.misc.utils import timeit
logging.basicConfig(
level=logging.DEBUG, format='%(asctime)s|%(levelname)s|%(message)s')
def gen_outfile(infile):
base = '.'.join(os.path.basename(infile).split('.')[:-1])
return os.path.join(os.path.dirname(infile),
'{0}.ml_ready.pkl'.format(base))
def convert_to_ml_ready_df(adf):
adf['abs_dist_to_aclv'] = adf['signed_dist_to_aclv'].abs()
used_hexamers = [_[0] for _ in S.CANDIDATE_HEXAMERS] + ['NA']
ctg_dum_hxm = | pd.get_dummies(adf.ctg_hex, columns=used_hexamers) | pandas.get_dummies |
# Script to retrieve betting odds for NFL games via the odds-api
# Docs here: https://the-odds-api.com/liveapi/guides/v4/#overview
import pandas as pd
import requests
import os
import json
import numpy as np
import datetime as dt
api_key = "b13e7359d6aa2d094b2b3afe2e31c110"
url = f"https://api.the-odds-api.com/v4/sports?apiKey={api_key}"
nfl_url = f"https://api.the-odds-api.com/v4/sports/americanfootball_nfl/odds/?regions=us&oddsFormat=american&apiKey={api_key}"
# ##### Saving our API Calls
#r = requests.get(nfl_url)
#
###Writing reqpsonse to csv to preserve API calls
#for i, v in enumerate(r.json()):
# df = pd.DataFrame(v)
# if len(df)> 1:
# df.to_csv(f'my_odds/odds-{i}.csv')
#################################################
def getBestOdds(df):
'''
Pulls best odds from Odds-API response
parameters:
df - pandas.DataFrame object; contains matchup data for a given NFL week
'''
print('DATAFRAME:', df)
away_team = df['away_team'].iloc[1]
home_team = df['home_team'].iloc[1]
print(f'Selecting best odds for matchup: {away_team} @ {home_team}')
print(df.head())
odds = {away_team: [], home_team: [], 'oddsmaker':[]}
# Stripping API response for odds for the matchup
for p in df['bookmakers']:
d = json.loads(p.replace("\'", "\""))
# print(d['title'])
odds['oddsmaker'].append(d['title'])
for team in d['markets'][0]['outcomes']:
if team['name']==away_team:
away_odds = team['price']
odds[away_team].append(away_odds)
elif team['name']==home_team:
home_odds = team['price']
odds[home_team].append(home_odds)
print(f'Away odds {away_team}: {away_odds}')
print(f'Home odds {home_team}: {home_odds}')
df = | pd.DataFrame(odds) | pandas.DataFrame |
import contextlib
import numpy as np
import pandas as pd
import warnings
from xray import conventions, Variable, Dataset
from xray.core import utils, indexing
from . import TestCase, requires_netCDF4, unittest
from .test_backends import CFEncodedDataTest
from xray.core.pycompat import iteritems
from xray.backends.memory import InMemoryDataStore
from xray.conventions import cf_encoder, cf_decoder
class TestMaskedAndScaledArray(TestCase):
def test(self):
x = conventions.MaskedAndScaledArray(np.arange(3), fill_value=0)
self.assertEqual(x.dtype, np.dtype('float'))
self.assertEqual(x.shape, (3,))
self.assertEqual(x.size, 3)
self.assertEqual(x.ndim, 1)
self.assertEqual(len(x), 3)
self.assertArrayEqual([np.nan, 1, 2], x)
x = conventions.MaskedAndScaledArray(np.arange(3), add_offset=1)
self.assertArrayEqual(np.arange(3) + 1, x)
x = conventions.MaskedAndScaledArray(np.arange(3), scale_factor=2)
self.assertArrayEqual(2 * np.arange(3), x)
x = conventions.MaskedAndScaledArray(np.array([-99, -1, 0, 1, 2]),
-99, 0.01, 1)
expected = np.array([np.nan, 0.99, 1, 1.01, 1.02])
self.assertArrayEqual(expected, x)
def test_0d(self):
x = conventions.MaskedAndScaledArray(np.array(0), fill_value=0)
self.assertTrue(np.isnan(x))
self.assertTrue(np.isnan(x[...]))
x = conventions.MaskedAndScaledArray(np.array(0), fill_value=10)
self.assertEqual(0, x[...])
class TestCharToStringArray(TestCase):
def test_wrapper_class(self):
array = np.array(list('abc'), dtype='S')
actual = conventions.CharToStringArray(array)
expected = np.array('abc', dtype='S')
self.assertEqual(actual.dtype, expected.dtype)
self.assertEqual(actual.shape, expected.shape)
self.assertEqual(actual.size, expected.size)
self.assertEqual(actual.ndim, expected.ndim)
with self.assertRaises(TypeError):
len(actual)
self.assertArrayEqual(expected, actual)
with self.assertRaises(IndexError):
actual[:2]
self.assertEqual(str(actual), 'abc')
array = np.array([list('abc'), list('cdf')], dtype='S')
actual = conventions.CharToStringArray(array)
expected = np.array(['abc', 'cdf'], dtype='S')
self.assertEqual(actual.dtype, expected.dtype)
self.assertEqual(actual.shape, expected.shape)
self.assertEqual(actual.size, expected.size)
self.assertEqual(actual.ndim, expected.ndim)
self.assertEqual(len(actual), len(expected))
self.assertArrayEqual(expected, actual)
self.assertArrayEqual(expected[:1], actual[:1])
with self.assertRaises(IndexError):
actual[:, :2]
def test_char_to_string(self):
array = np.array([['a', 'b', 'c'], ['d', 'e', 'f']])
expected = np.array(['abc', 'def'])
actual = conventions.char_to_string(array)
self.assertArrayEqual(actual, expected)
expected = np.array(['ad', 'be', 'cf'])
actual = conventions.char_to_string(array.T) # non-contiguous
self.assertArrayEqual(actual, expected)
def test_string_to_char(self):
array = np.array([['ab', 'cd'], ['ef', 'gh']])
expected = np.array([[['a', 'b'], ['c', 'd']],
[['e', 'f'], ['g', 'h']]])
actual = conventions.string_to_char(array)
self.assertArrayEqual(actual, expected)
expected = np.array([[['a', 'b'], ['e', 'f']],
[['c', 'd'], ['g', 'h']]])
actual = conventions.string_to_char(array.T)
self.assertArrayEqual(actual, expected)
class TestDatetime(TestCase):
@requires_netCDF4
def test_cf_datetime(self):
import netCDF4 as nc4
for num_dates, units in [
(np.arange(100), 'days since 2000-01-01'),
(np.arange(100).reshape(10, 10), 'days since 2000-01-01'),
(12300 + np.arange(50), 'hours since 1680-01-01 00:00:00'),
(10, 'days since 2000-01-01'),
([10], 'days since 2000-01-01'),
([[10]], 'days since 2000-01-01'),
([10, 10], 'days since 2000-01-01'),
(0, 'days since 1000-01-01'),
([0], 'days since 1000-01-01'),
([[0]], 'days since 1000-01-01'),
(np.arange(20), 'days since 1000-01-01'),
(np.arange(0, 100000, 10000), 'days since 1900-01-01'),
]:
for calendar in ['standard', 'gregorian', 'proleptic_gregorian']:
expected = nc4.num2date(num_dates, units, calendar)
print(num_dates, units, calendar)
with warnings.catch_warnings():
warnings.filterwarnings('ignore',
'Unable to decode time axis')
actual = conventions.decode_cf_datetime(num_dates, units,
calendar)
if (isinstance(actual, np.ndarray)
and np.issubdtype(actual.dtype, np.datetime64)):
# self.assertEqual(actual.dtype.kind, 'M')
# For some reason, numpy 1.8 does not compare ns precision
# datetime64 arrays as equal to arrays of datetime objects,
# but it works for us precision. Thus, convert to us
# precision for the actual array equal comparison...
actual_cmp = actual.astype('M8[us]')
else:
actual_cmp = actual
self.assertArrayEqual(expected, actual_cmp)
encoded, _, _ = conventions.encode_cf_datetime(actual, units,
calendar)
self.assertArrayEqual(num_dates, np.around(encoded))
if (hasattr(num_dates, 'ndim') and num_dates.ndim == 1
and '1000' not in units):
# verify that wrapping with a pandas.Index works
# note that it *does not* currently work to even put
# non-datetime64 compatible dates into a pandas.Index :(
encoded, _, _ = conventions.encode_cf_datetime(
pd.Index(actual), units, calendar)
self.assertArrayEqual(num_dates, np.around(encoded))
def test_decoded_cf_datetime_array(self):
actual = conventions.DecodedCFDatetimeArray(
np.array([0, 1, 2]), 'days since 1900-01-01', 'standard')
expected = pd.date_range('1900-01-01', periods=3).values
self.assertEqual(actual.dtype, np.dtype('datetime64[ns]'))
self.assertArrayEqual(actual, expected)
# default calendar
actual = conventions.DecodedCFDatetimeArray(
np.array([0, 1, 2]), 'days since 1900-01-01')
self.assertEqual(actual.dtype, np.dtype('datetime64[ns]'))
self.assertArrayEqual(actual, expected)
def test_slice_decoded_cf_datetime_array(self):
actual = conventions.DecodedCFDatetimeArray(
np.array([0, 1, 2]), 'days since 1900-01-01', 'standard')
expected = pd.date_range('1900-01-01', periods=3).values
self.assertEqual(actual.dtype, np.dtype('datetime64[ns]'))
self.assertArrayEqual(actual[slice(0, 2)], expected[slice(0, 2)])
actual = conventions.DecodedCFDatetimeArray(
np.array([0, 1, 2]), 'days since 1900-01-01', 'standard')
expected = pd.date_range('1900-01-01', periods=3).values
self.assertEqual(actual.dtype, np.dtype('datetime64[ns]'))
self.assertArrayEqual(actual[[0, 2]], expected[[0, 2]])
def test_decode_cf_datetime_non_standard_units(self):
expected = pd.date_range(periods=100, start='1970-01-01', freq='h')
# netCDFs from madis.noaa.gov use this format for their time units
# they cannot be parsed by netcdftime, but pd.Timestamp works
units = 'hours since 1-1-1970'
actual = conventions.decode_cf_datetime(np.arange(100), units)
self.assertArrayEqual(actual, expected)
@requires_netCDF4
def test_decode_non_standard_calendar(self):
import netCDF4 as nc4
for calendar in ['noleap', '365_day', '360_day', 'julian', 'all_leap',
'366_day']:
units = 'days since 0001-01-01'
times = pd.date_range('2001-04-01-00', end='2001-04-30-23',
freq='H')
noleap_time = nc4.date2num(times.to_pydatetime(), units,
calendar=calendar)
expected = times.values
with warnings.catch_warnings():
warnings.filterwarnings('ignore', 'Unable to decode time axis')
actual = conventions.decode_cf_datetime(noleap_time, units,
calendar=calendar)
self.assertEqual(actual.dtype, np.dtype('M8[ns]'))
self.assertArrayEqual(actual, expected)
@requires_netCDF4
def test_decode_non_standard_calendar_single_element(self):
units = 'days since 0001-01-01'
for calendar in ['noleap', '365_day', '360_day', 'julian', 'all_leap',
'366_day']:
for num_time in [735368, [735368], [[735368]]]:
with warnings.catch_warnings():
warnings.filterwarnings('ignore',
'Unable to decode time axis')
actual = conventions.decode_cf_datetime(num_time, units,
calendar=calendar)
self.assertEqual(actual.dtype, np.dtype('M8[ns]'))
@requires_netCDF4
def test_decode_non_standard_calendar_single_element_fallback(self):
import netCDF4 as nc4
units = 'days since 0001-01-01'
dt = nc4.netcdftime.datetime(2001, 2, 29)
for calendar in ['360_day', 'all_leap', '366_day']:
num_time = nc4.date2num(dt, units, calendar)
with warnings.catch_warnings(record=True) as w:
warnings.simplefilter('always')
actual = conventions.decode_cf_datetime(num_time, units,
calendar=calendar)
self.assertEqual(len(w), 1)
self.assertIn('Unable to decode time axis',
str(w[0].message))
expected = np.asarray(nc4.num2date(num_time, units, calendar))
print(num_time, calendar, actual, expected)
self.assertEqual(actual.dtype, np.dtype('O'))
self.assertEqual(expected, actual)
@requires_netCDF4
def test_decode_non_standard_calendar_multidim_time(self):
import netCDF4 as nc4
calendar = 'noleap'
units = 'days since 0001-01-01'
times1 = | pd.date_range('2001-04-01', end='2001-04-05', freq='D') | pandas.date_range |
import pandas as pd
FCST_STOCKROW_CAT_NO_BALANCE_A_INDEX_str = ["2019-12-31 00:00:00", "2020-12-31 00:00:00"]
FCST_STOCKROW_CAT_NO_BALANCE_A_INDEX = [ | pd.to_datetime(val) | pandas.to_datetime |
#!/usr/bin/env python
# coding: utf-8
# In[ ]:
# !pip install requests
# !pip install pulp
# In[ ]:
import pandas as pd
import numpy as np
import requests as req
import re as regex
from time import sleep
# In[ ]:
# Fonte : https://docs.pokemontcg.io/#documentationrate_limits
###################################################################################################
# Code Name Description
###################################################################################################
# 400 Bad Request We could not process that action
# 403 Forbidden You exceeded the rate limit
# 404 Not Found The requested resource could not be found
# 500 Internal Server Error We had a problem with our server. Please try again later
# 503 Service Unavailable We are temporarily offline for maintenance. Please try again later
###################################################################################################
dict_error_code = {
200: 'Success !',
400: 'Bad Request.',
403: 'Forbidden !',
404: 'Not Found.',
500: 'Internal Server Error !',
503: 'Service Unavailable !'
}
# In[ ]:
# Definindo a URL da API e os parâmetros/condições a serem incluídas nela
url = "https://api.pokemontcg.io/v2/sets"
# Chamando a API do Pokemon TCG
response = req.get(url)
# Retornar o resultado do request
print(f' Resultado : ' + dict_error_code[response.status_code])
# dados de cada set de cartas
sets = response.json()['data']
# Lista com os IDs de cada set
set_names = [sets[i]['id'] for i in np.arange(len(sets))]
# In[ ]:
# Aplicando um loop pra retornar dados de pokémons de cada set
data = []
for s in set_names:
url = "https://api.pokemontcg.io/v2/cards"
params = 'q=supertype:pokemon set.id:' + s
# Chamando a API do Pokemon TCG
response = req.get(url,params=params)
# Convertendo o request em arquivo JSON e contando quantos registros este retornou
response_json = response.json()
dataset = response_json['data']
data.extend(dataset)
# Retornar o resultado do request
print(f' Set {s} ( {len(response_json["data"])} linhas ): {dict_error_code[response.status_code]}')
sleep(0.2)
# In[ ]:
print(f'O tipo da request da API: {type(response_json)}')
print(f'O tipo do elemento dentro da API ("data"): {type(data)}')
print(f'O tipo de um elemento da lista em data: {type(data[0])}')
# In[ ]:
# O primeiro pokémon do primeiro conjunto de cartas de Pokémon (Base Set)
data[0]['name']
# Mostrando qual a carta acima
# imgurl = data[0]['images']['large']
# resp = req.get(imgurl)
# img = Image.open(BytesIO(resp.content))
# img.show()
# In[ ]:
# Campos-chave dentro do dicionário
data[0].keys()
# In[ ]:
df = pd.DataFrame(data=data)
# Resumo das colunas do dataframe
df.info()
# # Funções para auxiliar o tratamento das bases
# In[ ]:
# Verificar se a carta está no formato standard
def is_standard_format(x):
if 'standard' in x:
return 1
else:
return 0
# In[ ]:
# Verificar qual o formato da carta, começando do mais barato para o mais caro
def get_card_foil_type(x):
foil_list = ['normal','holofoil','reverseHolofoil','1stEditionHolofoil']
foil_valid = ''
for foil in foil_list:
if foil in x['prices']:
foil_valid += foil
break
return foil_valid
# In[ ]:
# Verificar o preço de mercado (padrão) da carta com base no sua raridade/"foil"
def get_card_price(x,pricing = 'mid'):
try:
foil = get_card_foil_type(x)
return x['prices'][foil][pricing]
except:
return 0
# In[ ]:
# Criando função para capturar os elementos arrays/map dentro de cada coluna
def get_dict_elements(df,column_name,key,pos=0):
df_e = pd.DataFrame(df[column_name].fillna('').values.tolist(),index=df.index)
e = [] # colocar os valores nesta lista
if type(df[column_name][0]) == list:
for i in np.arange(len(df_e)):
try :
e.append(df_e[pos][i][key])
except TypeError:
if key == 'damage' or key == 'convertedEnergyCost':
e.append('0')
else:
e.append('')
return pd.Series(e,name=key+str(pos)).fillna('')
elif type(df[column_name][0]) == dict:
return df_e[key]
# In[ ]:
def regex_findsymbol(x,symbol):
pattern = '.*[' + symbol + ']'
# função retorna TRUE se a expressão deu match e FALSE caso contrário
return regex.match(pattern=pattern,string=x) != None
# In[ ]:
# A função regex é pra capturar o segundo grupo do padrão abaixo, onde constam apenas os valores do dano
def regex_get_added_damage(x):
# Pegar o valor do dano entre os textos "does" e "damage"
r = regex.search(pattern='(does|plus)\s([0-9]*)\s(more|damage).(?!to\sitself)',string=x)
if r == None:
return '0'
else:
return r.group(2)
# In[ ]:
def get_dmg_output(df,
cols = ['damage0','damage1','damage2','damage3'],
cols_add = ['dmg0plus','dmg1plus','dmg2plus','dmg3plus'],
cols_total = ['totaldmg0','totaldmg1','totaldmg2','totaldmg3']):
dict_sum_dmg = {}
for col, col_add , col_total in zip(cols,cols_add,cols_total):
sum_dmg = []
for i in np.arange(len(df)):
if regex_findsymbol(df[col][i],'+') == True:
d1 = int(df[col][i].replace('+',''))
d2 = int(df[col_add][i])
elif regex_findsymbol(df[col][i],'×') == True:
d1 = int(df[col][i].replace('×',''))*1
d2 = 0
elif regex_findsymbol(df[col][i],'-') == True:
d1 = int(df[col][i].replace('-',''))
d2 = 0
else:
d1 = int(df[col][i])
d2 = int(df[col_add][i])
d = d1 + d2
sum_dmg.append(d)
dict_sum_dmg[col_total] = sum_dmg
return pd.DataFrame(dict_sum_dmg)
# In[ ]:
def DivideColumns(x,y):
return np.where(y==0,0,x//y)
# In[ ]:
def find_prize_cards(x):
pattern = 'your\sopponent\stakes\s(\d*)\sPrize\scards'
r = regex.search(pattern=pattern,string=x)
if r == None:
return 1
else:
return r.group(1)
# # Testando as funções criadas !
# In[ ]:
id=0
print(f' A carta {df.name[id]} é do tipo {get_card_foil_type(df.tcgplayer[id])}!')
print(f' A carta {df.name[id]} custa {get_card_price(df.tcgplayer[id])} dólares!')
# In[ ]:
get_dict_elements(df,'attacks','text')
# # Preparando a ABT de damage
# In[ ]:
# Series com os danos de cada ataque
dmg0 = get_dict_elements(df,'attacks','damage')
dmg1 = get_dict_elements(df,'attacks','damage',pos=1)
dmg2 = get_dict_elements(df,'attacks','damage',pos=2)
dmg3 = get_dict_elements(df,'attacks','damage',pos=3)
# Series com os textos de cada ataque
text0 = get_dict_elements(df,'attacks','text')
text1 = get_dict_elements(df,'attacks','text',pos=1)
text2 = get_dict_elements(df,'attacks','text',pos=2)
text3 = get_dict_elements(df,'attacks','text',pos=3)
# Series com os danos adicionais de cada ataque escritos nos textos
dmg0_plus = text0.apply(lambda row: regex_get_added_damage(row)).rename('dmg0plus')
dmg1_plus = text1.apply(lambda row: regex_get_added_damage(row)).rename('dmg1plus')
dmg2_plus = text2.apply(lambda row: regex_get_added_damage(row)).rename('dmg2plus')
dmg3_plus = text3.apply(lambda row: regex_get_added_damage(row)).rename('dmg3plus')
# Series com o custo de energia de cada ataque
cost0 = get_dict_elements(df,'attacks','convertedEnergyCost')
cost1 = get_dict_elements(df,'attacks','convertedEnergyCost',pos=1)
cost2 = get_dict_elements(df,'attacks','convertedEnergyCost',pos=2)
cost3 = get_dict_elements(df,'attacks','convertedEnergyCost',pos=3)
# In[ ]:
df_atk = pd.concat([text0, dmg0, dmg0_plus,
text1, dmg1, dmg1_plus,
text2, dmg2, dmg2_plus,
text3, dmg3, dmg3_plus],axis=1)
df_atk_energycost = | pd.concat([cost0,cost1,cost2,cost3],axis=1) | pandas.concat |
"""
Created on Sat Nov 9 13:34:04 2019
generate 1-D coloumbic matrices
@author: user
"""
from rdkit.Chem import AllChem as Chem
from collections import Counter
import pandas as pd
import numpy as np
from openbabel import pybel
open_babel = True # if false, use rdkit
omit_repetition = False # omit repeated values in matrix
| pd.set_option('display.width', 150) | pandas.set_option |
from IPython.display import display
import pandas as pd
import pyomo.environ as pe
import numpy as np
import csv
import os
import shutil
class inosys:
def __init__(self, inp_folder, ref_bus, dshed_cost = 1000000, rshed_cost = 500, phase = 3, vmin=0.85, vmax=1.15, sbase = 1, sc_fa = 1):
'''
Initialise the investment and operation problem.
:param str inp_folder: The input directory for the data. It expects to find several CSV files detailing the system input data (Default current folder)
:param float dshed_cost: Demand Shedding Price (Default 1000000)
:param float rshed_cost: Renewable Shedding Price (Default 500)
:param int phase: Number of Phases (Default 3)
:param float vmin: Minimum node voltage (Default 0.85)
:param float vmax: Maximum node voltage (Default 1.15)
:param float sbase: Base Apparent Power (Default 1 kW)
:param int ref_bus: Reference node
:param float sc_fa: Scaling Factor (Default 1)
:Example:
>>> import pyeplan
>>> sys_inv = pyeplan.inosys("wat_inv", ref_bus = 260)
'''
self.cgen = pd.read_csv(inp_folder + os.sep + 'cgen_dist.csv')
self.egen = pd.read_csv(inp_folder + os.sep + 'egen_dist.csv')
self.csol = pd.read_csv(inp_folder + os.sep + 'csol_dist.csv')
self.esol = pd.read_csv(inp_folder + os.sep + 'esol_dist.csv')
self.cwin = pd.read_csv(inp_folder + os.sep + 'cwin_dist.csv')
self.ewin = | pd.read_csv(inp_folder + os.sep + 'ewin_dist.csv') | pandas.read_csv |
# -*- coding: utf-8 -*-
import json
import base64
import datetime
import requests
import pathlib
import math
import pandas as pd
import flask
import dash
import dash_core_components as dcc
import dash_html_components as html
import plotly.plotly as py
import plotly.graph_objs as go
from dash.dependencies import Input, Output, State
from plotly import tools
app = dash.Dash(
__name__, meta_tags=[{"name": "viewport", "content": "width=device-width"}]
)
server = app.server
PATH = pathlib.Path(__file__).parent
DATA_PATH = PATH.joinpath("data").resolve()
# Loading historical tick data
currency_pair_data = {
"EURUSD": pd.read_csv(
DATA_PATH.joinpath("EURUSD.csv"), index_col=1, parse_dates=["Date"]
),
"USDJPY": pd.read_csv(
DATA_PATH.joinpath("USDJPY.csv"), index_col=1, parse_dates=["Date"]
),
"GBPUSD": pd.read_csv(
DATA_PATH.joinpath("GBPUSD.csv"), index_col=1, parse_dates=["Date"]
),
"USDCHF": pd.read_csv(
DATA_PATH.joinpath("USDCHF.csv"), index_col=1, parse_dates=["Date"]
),
}
# Currency pairs
currencies = ["EURUSD", "USDCHF", "USDJPY", "GBPUSD"]
# API Requests for news div
news_requests = requests.get(
"https://newsapi.org/v2/top-headlines?sources=bbc-news&apiKey=da8e2e705b914f9f86ed2e9692e66012"
)
# API Call to update news
def update_news():
json_data = news_requests.json()["articles"]
df = pd.DataFrame(json_data)
df = pd.DataFrame(df[["title", "url"]])
max_rows = 10
return html.Div(
children=[
html.P(className="p-news", children="Headlines"),
html.P(
className="p-news float-right",
children="Last update : "
+ datetime.datetime.now().strftime("%H:%M:%S"),
),
html.Table(
className="table-news",
children=[
html.Tr(
children=[
html.Td(
children=[
html.A(
className="td-link",
children=df.iloc[i]["title"],
href=df.iloc[i]["url"],
target="_blank",
)
]
)
]
)
for i in range(min(len(df), max_rows))
],
),
]
)
# Returns dataset for currency pair with nearest datetime to current time
def first_ask_bid(currency_pair, t):
t = t.replace(year=2016, month=1, day=5)
items = currency_pair_data[currency_pair]
dates = items.index.to_pydatetime()
index = min(dates, key=lambda x: abs(x - t))
df_row = items.loc[index]
int_index = items.index.get_loc(index)
return [df_row, int_index] # returns dataset row and index of row
# Creates HTML Bid and Ask (Buy/Sell buttons)
def get_row(data):
index = data[1]
current_row = data[0]
return html.Div(
children=[
# Summary
html.Div(
id=current_row[0] + "summary",
className="row summary",
n_clicks=0,
children=[
html.Div(
id=current_row[0] + "row",
className="row",
children=[
html.P(
current_row[0], # currency pair name
id=current_row[0],
className="three-col",
),
html.P(
current_row[1].round(5), # Bid value
id=current_row[0] + "bid",
className="three-col",
),
html.P(
current_row[2].round(5), # Ask value
id=current_row[0] + "ask",
className="three-col",
),
html.Div(
index,
id=current_row[0]
+ "index", # we save index of row in hidden div
style={"display": "none"},
),
],
)
],
),
# Contents
html.Div(
id=current_row[0] + "contents",
className="row details",
children=[
# Button for buy/sell modal
html.Div(
className="button-buy-sell-chart",
children=[
html.Button(
id=current_row[0] + "Buy",
children="Buy/Sell",
n_clicks=0,
)
],
),
# Button to display currency pair chart
html.Div(
className="button-buy-sell-chart-right",
children=[
html.Button(
id=current_row[0] + "Button_chart",
children="Chart",
n_clicks=1
if current_row[0] in ["EURUSD", "USDCHF"]
else 0,
)
],
),
],
),
]
)
# color of Bid & Ask rates
def get_color(a, b):
if a == b:
return "white"
elif a > b:
return "#45df7e"
else:
return "#da5657"
# Replace ask_bid row for currency pair with colored values
def replace_row(currency_pair, index, bid, ask):
index = index + 1 # index of new data row
new_row = (
currency_pair_data[currency_pair].iloc[index]
if index != len(currency_pair_data[currency_pair])
else first_ask_bid(currency_pair, datetime.datetime.now())
) # if not the end of the dataset we retrieve next dataset row
return [
html.P(
currency_pair, id=currency_pair, className="three-col" # currency pair name
),
html.P(
new_row[1].round(5), # Bid value
id=new_row[0] + "bid",
className="three-col",
style={"color": get_color(new_row[1], bid)},
),
html.P(
new_row[2].round(5), # Ask value
className="three-col",
id=new_row[0] + "ask",
style={"color": get_color(new_row[2], ask)},
),
html.Div(
index, id=currency_pair + "index", style={"display": "none"}
), # save index in hidden div
]
# Display big numbers in readable format
def human_format(num):
try:
num = float(num)
# If value is 0
if num == 0:
return 0
# Else value is a number
if num < 1000000:
return num
magnitude = int(math.log(num, 1000))
mantissa = str(int(num / (1000 ** magnitude)))
return mantissa + ["", "K", "M", "G", "T", "P"][magnitude]
except:
return num
# Returns Top cell bar for header area
def get_top_bar_cell(cellTitle, cellValue):
return html.Div(
className="two-col",
children=[
html.P(className="p-top-bar", children=cellTitle),
html.P(id=cellTitle, className="display-none", children=cellValue),
html.P(children=human_format(cellValue)),
],
)
# Returns HTML Top Bar for app layout
def get_top_bar(
balance=50000, equity=50000, margin=0, fm=50000, m_level="%", open_pl=0
):
return [
get_top_bar_cell("Balance", balance),
get_top_bar_cell("Equity", equity),
get_top_bar_cell("Margin", margin),
get_top_bar_cell("Free Margin", fm),
get_top_bar_cell("Margin Level", m_level),
get_top_bar_cell("Open P/L", open_pl),
]
####### STUDIES TRACES ######
# Moving average
def moving_average_trace(df, fig):
df2 = df.rolling(window=5).mean()
trace = go.Scatter(
x=df2.index, y=df2["close"], mode="lines", showlegend=False, name="MA"
)
fig.append_trace(trace, 1, 1) # plot in first row
return fig
# Exponential moving average
def e_moving_average_trace(df, fig):
df2 = df.rolling(window=20).mean()
trace = go.Scatter(
x=df2.index, y=df2["close"], mode="lines", showlegend=False, name="EMA"
)
fig.append_trace(trace, 1, 1) # plot in first row
return fig
# Bollinger Bands
def bollinger_trace(df, fig, window_size=10, num_of_std=5):
price = df["close"]
rolling_mean = price.rolling(window=window_size).mean()
rolling_std = price.rolling(window=window_size).std()
upper_band = rolling_mean + (rolling_std * num_of_std)
lower_band = rolling_mean - (rolling_std * num_of_std)
trace = go.Scatter(
x=df.index, y=upper_band, mode="lines", showlegend=False, name="BB_upper"
)
trace2 = go.Scatter(
x=df.index, y=rolling_mean, mode="lines", showlegend=False, name="BB_mean"
)
trace3 = go.Scatter(
x=df.index, y=lower_band, mode="lines", showlegend=False, name="BB_lower"
)
fig.append_trace(trace, 1, 1) # plot in first row
fig.append_trace(trace2, 1, 1) # plot in first row
fig.append_trace(trace3, 1, 1) # plot in first row
return fig
# Accumulation Distribution
def accumulation_trace(df):
df["volume"] = ((df["close"] - df["low"]) - (df["high"] - df["close"])) / (
df["high"] - df["low"]
)
trace = go.Scatter(
x=df.index, y=df["volume"], mode="lines", showlegend=False, name="Accumulation"
)
return trace
# Commodity Channel Index
def cci_trace(df, ndays=5):
TP = (df["high"] + df["low"] + df["close"]) / 3
CCI = pd.Series(
(TP - TP.rolling(window=10, center=False).mean())
/ (0.015 * TP.rolling(window=10, center=False).std()),
name="cci",
)
trace = go.Scatter(x=df.index, y=CCI, mode="lines", showlegend=False, name="CCI")
return trace
# Price Rate of Change
def roc_trace(df, ndays=5):
N = df["close"].diff(ndays)
D = df["close"].shift(ndays)
ROC = pd.Series(N / D, name="roc")
trace = go.Scatter(x=df.index, y=ROC, mode="lines", showlegend=False, name="ROC")
return trace
# Stochastic oscillator %K
def stoc_trace(df):
SOk = pd.Series((df["close"] - df["low"]) / (df["high"] - df["low"]), name="SO%k")
trace = go.Scatter(x=df.index, y=SOk, mode="lines", showlegend=False, name="SO%k")
return trace
# Momentum
def mom_trace(df, n=5):
M = pd.Series(df["close"].diff(n), name="Momentum_" + str(n))
trace = go.Scatter(x=df.index, y=M, mode="lines", showlegend=False, name="MOM")
return trace
# Pivot points
def pp_trace(df, fig):
PP = pd.Series((df["high"] + df["low"] + df["close"]) / 3)
R1 = pd.Series(2 * PP - df["low"])
S1 = pd.Series(2 * PP - df["high"])
R2 = | pd.Series(PP + df["high"] - df["low"]) | pandas.Series |
import unittest
import itertools
import os
import pandas as pd
import platform
import numpy as np
import numba
import hpat
from hpat.tests.test_utils import (count_array_REPs, count_parfor_REPs,
count_parfor_OneDs, count_array_OneDs, dist_IR_contains)
from hpat.hiframes.rolling import supported_rolling_funcs
LONG_TEST = (int(os.environ['HPAT_LONG_ROLLING_TEST']) != 0
if 'HPAT_LONG_ROLLING_TEST' in os.environ else False)
test_funcs = ('mean', 'max',)
if LONG_TEST:
# all functions except apply, cov, corr
test_funcs = supported_rolling_funcs[:-3]
class TestRolling(unittest.TestCase):
def test_fixed1(self):
# test sequentially with manually created dfs
wins = (3,)
if LONG_TEST:
wins = (2, 3, 5)
centers = (False, True)
for func_name in test_funcs:
func_text = "def test_impl(df, w, c):\n return df.rolling(w, center=c).{}()\n".format(func_name)
loc_vars = {}
exec(func_text, {}, loc_vars)
test_impl = loc_vars['test_impl']
hpat_func = hpat.jit(test_impl)
for args in itertools.product(wins, centers):
df = pd.DataFrame({'B': [0, 1, 2, np.nan, 4]})
pd.testing.assert_frame_equal(hpat_func(df, *args), test_impl(df, *args))
df = | pd.DataFrame({'B': [0, 1, 2, -2, 4]}) | pandas.DataFrame |
from tqdm import tqdm
tqdm.monitor_interval = 0
import requests
import json
import datetime
import pandas as pd
import numpy as np
from .SeqlDB import SeqlDB
from . import EtherScamHTMLParser as esParser
class ethToSql():
def __init__(self, EthHost, EthPort, dbConString):
self.rpc_port=EthPort
self.host= EthHost
self.delay=0.0001
self.url = "{}:{}".format(EthHost, EthPort)
self.seqldb = SeqlDB(dbConString)
#desired order for the contract dataframe
self.dfContractColOrder = ['blockNumber', 'transactionHash', 'contractAddress',
'creator', #instead of "from"
'gasUsed', 'transactionIndex', 'cumulativeGasUsed']
#desired order for the Transactions dataframe
self.dfTransColsOrder = ['blockNumber', 'hash', 'transactionIndex', 'from', 'to',
'contractCreated', #aux field: determines the adress of the contract created on that transaction
'valEth', #aux field: value in Ether
'valFinney', #aux field: value in Finney
'valSzabo', #aux field: value in Szabo
'value', 'gas', 'gasPrice', 'nonce'
#, 'input', 'r', 's', 'v' #dont want these
]
#desired order for the Bloc dataframe
self.dfBlockColsOrder = ['number',
'transCount', #aux field: number of transactions on that block
'uniqueAccounts', #aux field: accounts on that block
'contractCount', #aux field: contracts created on the block
'hash', 'parentHash', 'miner', 'nonce', 'timestamp',
'difficulty', 'totalDifficulty', 'gasLimit', 'gasUsed',
'receiptsRoot', 'stateRoot', 'transactionsRoot',
'sha3Uncles', 'size'
#, 'extraData', 'logsBloom', 'mixHash'
,'alias' #aux field: alias of the block
,'hasAccountBalanceInfo'
]
self.dfBalColsOrder = ['blockNumber', 'account', 'balEth', 'balFinney', 'balSzabo', 'balance']
def makeRpcRequest(self, method, params, key='result', silent = False):
headers = {"content-type": "application/json"}
payload = {
"method": method,
"params": params,
"jsonrpc": "2.0",
"id": 0
}
res = requests.post(
self.url,
data=json.dumps(payload),
headers=headers).json()
if 'error' in res:
if not silent:
print (res['error']['message'])
return None
if key == None:
return res
else:
return res[key]
def hexToInt(self, _hex):
'''
Converst input to integer.
Input can be in the hexadecimal or string format
Ex:
hexToInt(0x1957e2) -> 1660898
hexToInt('0x1957e2') -> 1660898
'''
if isinstance(_hex, str):
return int(_hex, 16)
else:
return int(_hex)
def intToHex(self, _int):
return hex(_int)
def parseBlock(self, bnum, alias=None, getBalanceInfo=0, SAVE_TO_DB = True, printAtEnd = 0):
if alias == None:
alias = ''
contractCount = 0
block = self.makeRpcRequest("eth_getBlockByNumber", [self.intToHex(bnum), True])
transactions = block['transactions'] #the list of transactions returns everything eth_getTransactionByHash would return
########################################## Transactions
dfTrans = pd.DataFrame()
for tran in transactions:
df = pd.DataFrame({'blockNumber': [bnum]})
contractCreated = None #most of the transactions dont crete contracts
if tran['to'] == None: ##Contract creation
contractCount += 1
contract = self.makeRpcRequest("eth_getTransactionReceipt", [tran['hash']])
contractCreated = contract['contractAddress']#Will store the contract created on the Transaction Table as well
dfContract = pd.DataFrame({'blockNumber': [bnum]})
for c in contract: #loop trough the "columns" of the contract
if c in('blockHash', 'to', 'logs', 'logsBloom', 'root'): #dont need / empty / dont care(?)
continue
val = contract[c]
if c in ('blockNumber', 'gasUsed', 'transactionIndex', 'cumulativeGasUsed'):
val = self.hexToInt(val)
dfContract[c] = val
dfContract.rename(columns={'from':'creator'}, inplace=True)
dfContract = dfContract[self.dfContractColOrder]
dfContract.to_sql('Contract', self.seqldb.seqlEngine, if_exists='append', index=False) if SAVE_TO_DB else None
#else: #print ('Populate "is to contract" field?') # would need a list of contracts - need to populate the DB in order
df['contractCreated'] = contractCreated
for k in tran:
if k == 'blockHash': #already parsed' # do not use "in" because "s" is a property and it is in "blockHash"
continue
val = tran[k]
if k in ('blockNumber', 'transactionIndex', 'value', 'gas', 'gasPrice', 'nonce'):
val = self.hexToInt(val)
if k == 'value':
val = float(val)
df['valEth'] = round(val/10e17, 6)
df['valFinney'] = round(val/10e14, 6)
df['valSzabo'] = round(val/10e11, 6)
df[k] = val
dfTrans = dfTrans.append(df)
transCount = len(dfTrans)
if transCount >0:
dfTrans = dfTrans [self.dfTransColsOrder] #'input', 'r', 's', 'v' are being ignored
dfTrans.to_sql('BlockTransaction', self.seqldb.seqlEngine, if_exists='append', index=False) if SAVE_TO_DB else None
########################################## Accounts' balances per block
uniqueAccounts = 0
if transCount >0:
dfBalances = pd.DataFrame()
accounts = np.unique(#in case acc is on to and from
np.concatenate((
np.unique(dfTrans['from']),
np.unique(dfTrans['to'].dropna()
)), axis=0))
uniqueAccounts = len(accounts)
for a in accounts:
self.seqldb.execute("exec [insertIfNotExistAccAlias] '{}','{}'".format(a, 'other'))
if getBalanceInfo == 1:
for acc in accounts:
df = pd.DataFrame({'blockNumber': [bnum], 'account':[acc]})
balance = self.makeRpcRequest("eth_getBalance", [acc, self.intToHex(bnum)], silent=True)
if balance != None:
balance = float(self.hexToInt(balance))
df['balEth'] = round(balance/10e17, 6)
df['balFinney'] = round(balance/10e14, 6)
df['balSzabo'] = round(balance/10e11, 6)
df['balance'] = balance
dfBalances = dfBalances.append(df)
if len(dfBalances) >0 and len(dfBalances.columns)>2:
dfBalances = dfBalances[self.dfBalColsOrder]
dfBalances.to_sql('AccountBalances', self.seqldb.seqlEngine, if_exists='append', index=False) if SAVE_TO_DB else None
########################################## BLOCK
dfBlock = | pd.DataFrame({'number': [bnum]}) | pandas.DataFrame |
from datetime import date
from dotenv import load_dotenv
from pathlib import Path
from sqlalchemy import create_engine
import bs4 as bs
import ftplib
import gzip
import os
import pandas as pd
import psycopg2
import re
import sys
import time
import urllib.request
import wget
import zipfile
#%%
def getEnv(env):
return os.getenv(env)
load_dotenv()
dados_rf = 'http://172.16.58.3/CNPJ/'
output_files = Path(getEnv('OUTPUT_FILES_PATH'))
extracted_files = Path(getEnv('EXTRACTED_FILES_PATH'))
raw_html = urllib.request.urlopen(dados_rf)
raw_html = raw_html.read()
# Formatar página e converter em string
page_items = bs.BeautifulSoup(raw_html, 'lxml')
html_str = str(page_items)
# Obter arquivos
Files = []
text = '.zip'
for m in re.finditer(text, html_str):
i_start = m.start()-40
i_end = m.end()
i_loc = html_str[i_start:i_end].find('href=')+6
print(html_str[i_start+i_loc:i_end])
Files.append(html_str[i_start+i_loc:i_end])
print('Arquivos que serão baixados:')
i_f = 0
for f in Files:
i_f += 1
print(str(i_f) + ' - ' + f)
#%%
########################################################################################################################
## DOWNLOAD ############################################################################################################
########################################################################################################################
# Download files
# Create this bar_progress method which is invoked automatically from wget:
def bar_progress(current, total, width=80):
progress_message = "Downloading: %d%% [%d / %d] bytes - " % (current / total * 100, current, total)
# Don't use print() as it will print in new line every time.
sys.stdout.write("\r" + progress_message)
sys.stdout.flush()
#%%
# Download arquivos ################################################################################################################################
i_l = 0
for l in Files:
# Download dos arquivos
i_l += 1
print('Baixando arquivo:')
print(str(i_l) + ' - ' + l)
url = dados_rf+l
wget.download(url, out=output_files, bar=bar_progress)
#%%
# Download layout:
Layout = 'https://www.gov.br/receitafederal/pt-br/assuntos/orientacao-tributaria/cadastros/consultas/arquivos/NOVOLAYOUTDOSDADOSABERTOSDOCNPJ.pdf'
print('Baixando layout:')
wget.download(Layout, out=output_files, bar=bar_progress)
####################################################################################################################################################
#%%
# Creating directory to store the extracted files:
if not os.path.exists(extracted_files):
os.mkdir(extracted_files)
# Extracting files:
i_l = 0
for l in Files:
try:
i_l += 1
print('Descompactando arquivo:')
print(str(i_l) + ' - ' + l)
with zipfile.ZipFile(output_files / l, 'r') as zip_ref:
zip_ref.extractall(extracted_files)
except:
pass
#%%
########################################################################################################################
## LER E INSERIR DADOS #################################################################################################
########################################################################################################################
insert_start = time.time()
# Files:
Items = [name for name in os.listdir(extracted_files) if name.endswith('')]
# Separar arquivos:
arquivos_empresa = []
arquivos_estabelecimento = []
arquivos_socios = []
arquivos_simples = []
arquivos_cnae = []
arquivos_moti = []
arquivos_munic = []
arquivos_natju = []
arquivos_pais = []
arquivos_quals = []
for i in range(len(Items)):
if Items[i].find('EMPRE') > -1:
arquivos_empresa.append(Items[i])
elif Items[i].find('ESTABELE') > -1:
arquivos_estabelecimento.append(Items[i])
elif Items[i].find('SOCIO') > -1:
arquivos_socios.append(Items[i])
elif Items[i].find('SIMPLES') > -1:
arquivos_simples.append(Items[i])
elif Items[i].find('CNAE') > -1:
arquivos_cnae.append(Items[i])
elif Items[i].find('MOTI') > -1:
arquivos_moti.append(Items[i])
elif Items[i].find('MUNIC') > -1:
arquivos_munic.append(Items[i])
elif Items[i].find('NATJU') > -1:
arquivos_natju.append(Items[i])
elif Items[i].find('PAIS') > -1:
arquivos_pais.append(Items[i])
elif Items[i].find('QUALS') > -1:
arquivos_quals.append(Items[i])
else:
pass
#%%
# Conectar no banco de dados:
# Dados da conexão com o BD
user=getEnv('DB_USER')
passw=getEnv('DB_PASSWORD')
host=getEnv('DB_HOST')
port=getEnv('DB_PORT')
database=getEnv('DB_NAME')
# Conectar:
engine = create_engine('postgresql://'+user+':'+passw+'@'+host+':'+port+'/'+database)
conn = psycopg2.connect('dbname='+database+' '+'user='+user+' '+'host='+host+' '+'password='+passw)
cur = conn.cursor()
#%%
# Arquivos de empresa:
empresa_insert_start = time.time()
print("""
#######################
## Arquivos de EMPRESA:
#######################
""")
# Drop table antes do insert
cur.execute('DROP TABLE IF EXISTS "empresa";')
conn.commit()
for e in range(0, len(arquivos_empresa)):
print('Trabalhando no arquivo: '+arquivos_empresa[e]+' [...]')
try:
del empresa
except:
pass
empresa = | pd.DataFrame(columns=[0, 1, 2, 3, 4, 5, 6]) | pandas.DataFrame |
import pandas as pd
from . import DATA_DIR
import itertools
import csv
import numpy as np
from pathlib import Path
pd.options.mode.chained_assignment = None
REGION_MAPPING_FILEPATH = DATA_DIR / "regionmappingH12.csv"
IAM_ELEC_MARKETS = DATA_DIR / "electricity_markets.csv"
IEA_DIESEL_SHARE = DATA_DIR / "diesel_share_oecd.csv"
def extract_biofuel_shares_from_IAM(
model, fp, IAM_region, years, allocate_all_synfuel=False
):
"""
This function extracts biofuel shares from a IAM file provided.
:param fp: file path to IAM file
:type fp: str
:param IAM_region: IAM region for which to extract the biofuel shares
:type IAM_region: str
:param years: the list of years for which to extract biofuel shares
:param allocate_all_synfuel: Temporary workaround. If True, then all synfuel in the transport sector
is allocated to passenger cars.
:return: a dictionary that contains fuel types as keys and lists of fuel shares as values
"""
if model == "remind":
df = pd.read_csv(fp, delimiter=";", index_col=["Region", "Variable", "Unit"]).drop(
columns=["Model", "Scenario"]
)
if model == "image":
df = pd.read_excel(fp, index_col=[2, 3, 4]).drop(columns=["Model", "Scenario"])
df = df.reset_index()
df = df.loc[df["Region"] == IAM_region]
df = df.loc[:, : str(2050)]
df["Variable"] = df["Variable"].str.replace("|", "-")
if model == "remind":
if allocate_all_synfuel:
# get shares of synthetic fuel
df_total = df.loc[df["Variable"] == "FE-Transport-Pass-Road-LDV-Liquids"]
df_total.index = df.loc[df["Variable"] == "SE-Liquids-Hydrogen"].index
share = np.clip(
(
df.loc[df["Variable"] == "SE-Liquids-Hydrogen", "2005":].divide(
df_total.loc[:, "2005":], axis=0
)
).values,
0,
1,
)
var = ["FE-Transport-Liquids-Oil", "FE-Transport-Liquids-Biomass"]
df_liquids = df.loc[df["Variable"].isin(var), :]
df_liquids.iloc[:, 3:] /= df_liquids.iloc[:, 3:].sum(axis=0)
df_liquids.loc[:, "2005":] *= 1 - share
to_append = [IAM_region, "FE-Transport-Liquids-Hydrogen", "EJ/yr"] + share[
0
].tolist()
a_series = pd.Series(to_append, index=df_liquids.columns)
df_liquids = df_liquids.append(a_series, ignore_index=True)
else:
var = [
"FE-Transport-Liquids-Oil",
"FE-Transport-Liquids-Biomass",
"FE-Transport-Liquids-Hydrogen",
]
df_liquids = df.loc[df["Variable"].isin(var), :]
df_liquids.iloc[:, 3:] /= df_liquids.iloc[:, 3:].sum(axis=0)
var = ["FE-Transport-Gases-Non-Biomass", "FE-Transport-Gases-Biomass"]
df_gas = df.loc[df["Variable"].isin(var), :]
df_gas.iloc[:, 3:] /= df_gas.iloc[:, 3:].sum(axis=0)
d_map_fuels = {
"FE-Transport-Liquids-Oil": "liquid - fossil",
"FE-Transport-Liquids-Biomass": "liquid - biomass",
"FE-Transport-Liquids-Hydrogen": "liquid - synfuel",
"FE-Transport-Gases-Non-Biomass": "gas - fossil",
"FE-Transport-Gases-Biomass": "gas - biomass",
}
if model == "image":
var = [
"Final Energy|Transportation|Freight|Liquids|Oil",
"Final Energy-Transportation-Freight-Liquids-Biomass",
]
df_liquids = df.loc[df["Variable"].isin(var), :]
df_liquids.iloc[:, 3:] /= df_liquids.iloc[:, 3:].sum(axis=0)
var = ["Final Energy|Transportation|Freight|Gases"]
df_gas = df.loc[df["Variable"].isin(var), :]
df_gas.iloc[:, 3:] /= df_gas.iloc[:, 3:].sum(axis=0)
d_map_fuels = {
"Final Energy|Transportation|Freight|Liquids|Oil": "liquid - fossil",
"Final Energy-Transportation-Freight-Liquids-Biomass": "liquid - biomass",
"Final Energy|Transportation|Freight|Gases": "gas - fossil",
}
new_df = | pd.concat([df_liquids, df_gas]) | pandas.concat |
import re
import pandas as pd
import bs4
import requests
import spacy
from spacy import displacy
nlp = spacy.load('en_core_web_sm')
from spacy.matcher import Matcher
from spacy.tokens import Span
import networkx as nx
import matplotlib.pyplot as plt
from tqdm import tqdm
| pd.set_option('display.max_colwidth', 200) | pandas.set_option |
import webbrowser
import numpy as np
import pandas as pd
import tax_utils as tut
from tax_calculator import TaxCalculator
class NorwegianTax(TaxCalculator):
"""
to facilitate easy input
add random text to trigger a code push...
"""
def __init__(self, salary=0, birth_year=1978, tax_year=None, gains_from_sale_fondskonto_share_comp=0, gains_from_sale_fondskonto_interest_comp=0, gains_from_sale_of_shares_ask=0, property_taxable_value=0, pension=0, pension_months=12, pension_percent=100, property_sale_proceeds=0, rental_income=0, property_sale_loss=0, bank_deposits=0,
bank_interest_income=0, interest_expenses=0, dividends=0, mutual_fund_dividends=0, gains_from_sale_of_shares=0, mutual_fund_interest_comp_profit=0, mutual_fund_interest_comp_profit_combi_fund=0, mutual_fund_share_comp_profit=0, mutual_fund_share_comp_profit_combi_fund=0, loss_fondskonto_shares=0, loss_fondskonto_interest=0, loss_ask_sale=0,
loss_from_sale_of_shares=0, loss_from_sale_mutual_fund_share_comp=0, loss_from_sale_mutual_fund_share_comp_combi_fund=0, loss_from_sale_mutual_fund_interest_comp=0,
loss_from_sale_mutual_fund_interest_comp_combi_fund=0, mutual_fund_wealth_share_comp=0, mutual_fund_wealth_interest_comp=0, wealth_in_shares=0, wealth_in_unlisted_shares=0, wealth_ask_cash=0, wealth_ask_shares=0, wealth_fondskonto_cash_interest=0, wealth_fondskonto_shares=0, municipality='0402', case_idx=None):
self._salary = salary
self._birth_year = birth_year
if tax_year is None:
tax_year = pd.to_datetime('today').year
tax_url = "https://skatteberegning.app.skatteetaten.no/%d" % tax_year
self._gains_from_sale_fondskonto_share_comp = gains_from_sale_fondskonto_share_comp
self._gains_from_sale_fondskonto_interest_comp = gains_from_sale_fondskonto_interest_comp
self._gains_from_sale_of_shares_ask = gains_from_sale_of_shares_ask
self._property_taxable_value = property_taxable_value
self._pension = pension
self._pension_months = pension_months
self._pension_percent = pension_percent
self._property_sale_proceeds = property_sale_proceeds
self._rental_income = rental_income
self._property_sale_loss = property_sale_loss
self._bank_deposits = bank_deposits
self._bank_interest_income = bank_interest_income
self._interest_expenses = interest_expenses
self._dividends = dividends
self._mutual_fund_dividends = mutual_fund_dividends
self._gains_from_sale_of_shares = gains_from_sale_of_shares
self._mutual_fund_interest_comp_profit = mutual_fund_interest_comp_profit
self._mutual_fund_interest_comp_profit_combi_fund = mutual_fund_interest_comp_profit_combi_fund
self._mutual_fund_share_comp_profit = mutual_fund_share_comp_profit
self._mutual_fund_share_comp_profit_combi_fund = mutual_fund_share_comp_profit_combi_fund
self._loss_fondskonto_shares = loss_fondskonto_shares
self._loss_fondskonto_interest = loss_fondskonto_interest
self._loss_ask_sale = loss_ask_sale
self._loss_from_sale_of_shares = loss_from_sale_of_shares
self._loss_from_sale_mutual_fund_share_comp = loss_from_sale_mutual_fund_share_comp
self._loss_from_sale_mutual_fund_share_comp_combi_fund = loss_from_sale_mutual_fund_share_comp_combi_fund
self._loss_from_sale_mutual_fund_interest_comp = loss_from_sale_mutual_fund_interest_comp
self._loss_from_sale_mutual_fund_interest_comp_combi_fund = loss_from_sale_mutual_fund_interest_comp_combi_fund
self._mutual_fund_wealth_share_comp = mutual_fund_wealth_share_comp
self._mutual_fund_wealth_interest_comp = mutual_fund_wealth_interest_comp
self._wealth_in_shares = wealth_in_shares
self._wealth_in_unlisted_shares = wealth_in_unlisted_shares
self._wealth_ask_cash = wealth_ask_cash
self._wealth_ask_shares = wealth_ask_shares
self._wealth_fondskonto_cash_interest = wealth_fondskonto_cash_interest
self._wealth_fondskonto_shares = wealth_fondskonto_shares
self._municipality = municipality
super().__init__(
jurisdiction='NOR',
tax_year=tax_year,
tax_url=tax_url,
case_idx=case_idx)
@staticmethod
def tax_payable(basis=0, rate=0, limit=0, deduction=0,
apply_rounding=False):
"""
convenient utility function
"""
retval = max(basis - deduction - limit, 0) * rate
if apply_rounding:
return tut.tax_round(retval)
return retval
@property
def salary(self):
return self._salary
@salary.setter
def salary(self, value):
self._salary = value
@property
def municipality(self):
return self._municipality
@municipality.setter
def municipality(self, value):
self._municipality = value
@property
def birth_year(self):
return self._birth_year
@birth_year.setter
def birth_year(self, value):
self._birth_year = value
@property
def gains_from_sale_fondskonto_share_comp(self):
return self._gains_from_sale_fondskonto_share_comp
@gains_from_sale_fondskonto_share_comp.setter
def gains_from_sale_fondskonto_share_comp(self, value):
self._gains_from_sale_fondskonto_share_comp = value
@property
def gains_from_sale_fondskonto_interest_comp(self):
return self._gains_from_sale_fondskonto_interest_comp
@gains_from_sale_fondskonto_interest_comp.setter
def gains_from_sale_fondskonto_interest_comp(self, value):
self._gains_from_sale_fondskonto_interest_comp = value
@property
def gains_from_sale_of_shares_ask(self):
return self._gains_from_sale_of_shares_ask
@gains_from_sale_of_shares_ask.setter
def gains_from_sale_of_shares_ask(self, value):
self._gains_from_sale_of_shares_ask = value
@property
def property_taxable_value(self):
return self._property_taxable_value
@property_taxable_value.setter
def property_taxable_value(self, value):
self._property_taxable_value = value
@property
def pension(self):
return self._pension
@pension.setter
def pension(self, value):
self._pension = value
@property
def pension_months(self):
return self._pension_months
@pension_months.setter
def pension_months(self, value):
self._pension_months = value
@property
def pension_percent(self):
return self._pension_percent
@pension_percent.setter
def pension_percent(self, value):
self._pension_percent = value
@property
def property_sale_proceeds(self):
return self._property_sale_proceeds
@property_sale_proceeds.setter
def property_sale_proceeds(self, value):
self._property_sale_proceeds = value
@property
def rental_income(self):
return self._rental_income
@rental_income.setter
def rental_income(self, value):
self._rental_income = value
@property
def property_sale_loss(self):
return self._property_sale_loss
@property_sale_loss.setter
def property_sale_loss(self, value):
self._property_sale_loss = value
@property
def bank_deposits(self):
return self._bank_deposits
@bank_deposits.setter
def bank_deposits(self, value):
self._bank_deposits = value
@property
def bank_interest_income(self):
return self._bank_interest_income
@bank_interest_income.setter
def bank_interest_income(self, value):
self._bank_interest_income = value
@property
def interest_expenses(self):
return self._interest_expenses
@interest_expenses.setter
def interest_expenses(self, value):
self._interest_expenses = value
@property
def dividends(self):
return self._dividends
@dividends.setter
def dividends(self, value):
self._dividends = value
@property
def mutual_fund_dividends(self):
return self._mutual_fund_dividends
@mutual_fund_dividends.setter
def mutual_fund_dividends(self, value):
self._mutual_fund_dividends = value
@property
def gains_from_sale_of_shares(self):
return self._gains_from_sale_of_shares
@gains_from_sale_of_shares.setter
def gains_from_sale_of_shares(self, value):
self._gains_from_sale_of_shares = value
@property
def mutual_fund_interest_comp_profit(self):
return self._mutual_fund_interest_comp_profit
@mutual_fund_interest_comp_profit.setter
def mutual_fund_interest_comp_profit(self, value):
self._mutual_fund_interest_comp_profit = value
@property
def mutual_fund_interest_comp_profit_combi_fund(self):
return self._mutual_fund_interest_comp_profit_combi_fund
@mutual_fund_interest_comp_profit_combi_fund.setter
def mutual_fund_interest_comp_profit_combi_fund(self, value):
self._mutual_fund_interest_comp_profit_combi_fund = value
@property
def mutual_fund_share_comp_profit(self):
return self._mutual_fund_share_comp_profit
@mutual_fund_share_comp_profit.setter
def mutual_fund_share_comp_profit(self, value):
self._mutual_fund_share_comp_profit = value
@property
def mutual_fund_share_comp_profit_combi_fund(self):
return self._mutual_fund_share_comp_profit_combi_fund
@mutual_fund_share_comp_profit_combi_fund.setter
def mutual_fund_share_comp_profit_combi_fund(self, value):
self._mutual_fund_share_comp_profit_combi_fund = value
@property
def loss_fondskonto_shares(self):
return self._loss_fondskonto_shares
@loss_fondskonto_shares.setter
def loss_fondskonto_shares(self, value):
self._loss_fondskonto_shares = value
@property
def loss_fondskonto_interest(self):
return self._loss_fondskonto_interest
@loss_fondskonto_interest.setter
def loss_fondskonto_interest(self, value):
self._loss_fondskonto_interest = value
@property
def loss_ask_sale(self):
return self._loss_ask_sale
@loss_ask_sale.setter
def loss_ask_sale(self, value):
self._loss_ask_sale = value
@property
def loss_from_sale_of_shares(self):
return self._loss_from_sale_of_shares
@loss_from_sale_of_shares.setter
def loss_from_sale_of_shares(self, value):
self._loss_from_sale_of_shares = value
@property
def loss_from_sale_mutual_fund_share_comp(self):
return self._loss_from_sale_mutual_fund_share_comp
@loss_from_sale_mutual_fund_share_comp.setter
def loss_from_sale_mutual_fund_share_comp(self, value):
self._loss_from_sale_mutual_fund_share_comp = value
@property
def loss_from_sale_mutual_fund_share_comp_combi_fund(self):
return self._loss_from_sale_mutual_fund_share_comp_combi_fund
@loss_from_sale_mutual_fund_share_comp_combi_fund.setter
def loss_from_sale_mutual_fund_share_comp_combi_fund(self, value):
self._loss_from_sale_mutual_fund_share_comp_combi_fund = value
@property
def loss_from_sale_mutual_fund_interest_comp(self):
return self._loss_from_sale_mutual_fund_interest_comp
@loss_from_sale_mutual_fund_interest_comp.setter
def loss_from_sale_mutual_fund_interest_comp(self, value):
self._loss_from_sale_mutual_fund_interest_comp = value
@property
def loss_from_sale_mutual_fund_interest_comp_combi_fund(self):
return self._loss_from_sale_mutual_fund_interest_comp_combi_fund
@loss_from_sale_mutual_fund_interest_comp_combi_fund.setter
def loss_from_sale_mutual_fund_interest_comp_combi_fund(self, value):
self._loss_from_sale_mutual_fund_interest_comp_combi_fund = value
@property
def mutual_fund_wealth_share_comp(self):
return self._mutual_fund_wealth_share_comp
@mutual_fund_wealth_share_comp.setter
def mutual_fund_wealth_share_comp(self, value):
self._mutual_fund_wealth_share_comp = value
@property
def mutual_fund_wealth_interest_comp(self):
return self._mutual_fund_wealth_interest_comp
@mutual_fund_wealth_interest_comp.setter
def mutual_fund_wealth_interest_comp(self, value):
self._mutual_fund_wealth_interest_comp = value
@property
def wealth_in_shares(self):
return self._wealth_in_shares
@wealth_in_shares.setter
def wealth_in_shares(self, value):
self._wealth_in_shares = value
@property
def wealth_in_unlisted_shares(self):
return self._wealth_in_unlisted_shares
@wealth_in_unlisted_shares.setter
def wealth_in_unlisted_shares(self, value):
self._wealth_in_unlisted_shares = value
@property
def wealth_ask_cash(self):
return self._wealth_ask_cash
@wealth_ask_cash.setter
def wealth_ask_cash(self, value):
self._wealth_ask_cash = value
@property
def wealth_ask_shares(self):
return self._wealth_ask_shares
@wealth_ask_shares.setter
def wealth_ask_shares(self, value):
self._wealth_ask_shares = value
@property
def wealth_fondskonto_cash_interest(self):
return self._wealth_fondskonto_cash_interest
@wealth_fondskonto_cash_interest.setter
def wealth_fondskonto_cash_interest(self, value):
self._wealth_fondskonto_cash_interest = value
@property
def wealth_fondskonto_shares(self):
return self._wealth_fondskonto_shares
@wealth_fondskonto_shares.setter
def wealth_fondskonto_shares(self, value):
self._wealth_fondskonto_shares = value
@property
def share_related_income(self):
return self.gains_from_sale_fondskonto_share_comp + self.dividends + self.mutual_fund_dividends + self.gains_from_sale_of_shares + self.gains_from_sale_of_shares_ask + self.mutual_fund_share_comp_profit + \
self.mutual_fund_share_comp_profit_combi_fund - self.loss_fondskonto_shares - self.loss_from_sale_of_shares - \
self.loss_from_sale_mutual_fund_share_comp - \
self.loss_from_sale_mutual_fund_share_comp_combi_fund - self.loss_ask_sale
@property
def interest_related_income(self):
return self.gains_from_sale_fondskonto_interest_comp - self.interest_expenses + self.mutual_fund_interest_comp_profit + self.mutual_fund_interest_comp_profit_combi_fund + \
self.bank_interest_income - self.loss_fondskonto_interest - self.loss_from_sale_mutual_fund_interest_comp - \
self.loss_from_sale_mutual_fund_interest_comp_combi_fund
@property
def property_related_income(self):
return self.rental_income + self.property_sale_proceeds - self.property_sale_loss
@property
def non_pension_income(self):
return self.salary + self.share_related_income + \
self.interest_related_income + self.property_related_income
@property
def income_tax_basis(self):
if abs(self.non_pension_income) < 1e-4:
return max(self.pension - self.pension_only_minimum_deduction, 0)
if abs(self.pension) < 1e-4:
return max(self.salary - self.salary_only_minimum_deduction + self.parameter('share_income_grossup')
* self.share_related_income + self.interest_related_income + self.property_related_income, 0)
return max(self.salary + self.pension - self.pension_and_income_minimum_deduction + self.parameter('share_income_grossup')
* self.share_related_income + self.interest_related_income + self.property_related_income, 0)
@property
def state_wealth_tax_basis(self):
return (self.mutual_fund_wealth_share_comp + self.wealth_in_shares + self.wealth_in_unlisted_shares + self.wealth_ask_shares + self.wealth_fondskonto_shares) * \
self.parameter('percentage_of_taxable_wealth_cap_shares') + self.bank_deposits + self.property_taxable_value + \
self.mutual_fund_wealth_interest_comp + \
self.wealth_fondskonto_cash_interest + self.wealth_ask_cash
@property
def pension_deduction_raw(self):
return max(min(self.pension * self.parameter('pension_deduction_multiplier'),
self.parameter('max_pension_deduction')), self.parameter('min_pension_deduction'))
@property
def income_deduction_raw(self):
return max(min(self.salary * self.parameter('deduction_multiplier'),
self.parameter('max_deduction_limit')), self.parameter('min_deduction_limit'))
@property
def pension_and_income_minimum_deduction(self):
"""
does what it says
"""
if (abs(self.pension) < 1e-4) and (abs(self.salary) < 1e-4):
return 0
# you can't deduct more than what you earn:
income_deduction = min(self.income_deduction_raw, self.salary)
combo_deduction = self.pension_deduction_raw + \
max(min(self.salary * self.parameter('deduction_multiplier'), self.parameter('max_deduction_limit')),
min(self.parameter('min_pension_deduction'), self.salary, self.pension))
return min(max(income_deduction, combo_deduction),
self.parameter('max_deduction_limit'))
@property
def salary_only_minimum_deduction(self):
"""
this could be read from db, of course
https://www.skatteetaten.no/en/rates/minimum-standard-deduction/
"""
return int(min(self.income_deduction_raw, self.salary))
@property
def pension_only_minimum_deduction(self):
"""
does what it says
"""
return min(self.pension_deduction_raw, self.pension)
@property
def bracket_tax(self):
"""
Calculates the bracket tax
"""
tot_inc = self.salary + self.pension
if tot_inc <= self.parameter('trinnskatt_l1'):
return 0
if self.parameter('trinnskatt_l1') < tot_inc <= self.parameter(
'trinnskatt_l2'):
return tut.tax_round(self.parameter(
'trinnskatt_r1') * (tot_inc - self.parameter('trinnskatt_l1')))
if self.parameter('trinnskatt_l2') < tot_inc <= self.parameter(
'trinnskatt_l3'):
return tut.tax_round(self.parameter('trinnskatt_r2') * (tot_inc - self.parameter('trinnskatt_l2')) +
self.parameter('trinnskatt_r1') * (self.parameter('trinnskatt_l2') - self.parameter('trinnskatt_l1')))
if self.parameter('trinnskatt_l3') < tot_inc <= self.parameter(
'trinnskatt_l4'):
return tut.tax_round(self.parameter('trinnskatt_r3') * (tot_inc - self.parameter('trinnskatt_l3')) + self.parameter('trinnskatt_r2') *
(self.parameter('trinnskatt_l3') - self.parameter('trinnskatt_l2')) + self.parameter('trinnskatt_r1') * (self.parameter('trinnskatt_l2') - self.parameter('trinnskatt_l1')))
return tut.tax_round(self.parameter('trinnskatt_r4') * (tot_inc - self.parameter('trinnskatt_l4')) + self.parameter('trinnskatt_r3') * (self.parameter('trinnskatt_l4') -
self.parameter('trinnskatt_l3')) + self.parameter('trinnskatt_r2') * (self.parameter('trinnskatt_l3') - self.parameter('trinnskatt_l2')) + self.parameter('trinnskatt_r1') * (self.parameter('trinnskatt_l2') - self.parameter('trinnskatt_l1')))
@property
def age(self):
return pd.to_datetime('today').year - self.birth_year
@property
def bracket_tax_level(self):
"""
for debugging the excel sheet
"""
tot_inc = self.salary + self.pension
if tot_inc <= self.parameter('trinnskatt_l1'):
return "Below level 1"
if self.parameter('trinnskatt_l1') < tot_inc <= self.parameter(
'trinnskatt_l2'):
return "Between level 1 and 2"
if self.parameter('trinnskatt_l2') < tot_inc <= self.parameter(
'trinnskatt_l3'):
return "Between lebel 2 and 3"
if self.parameter('trinnskatt_l3') < tot_inc <= self.parameter(
'trinnskatt_l4'):
return "Between level 3 and 4"
return "Above level 4"
@property
def attribute_map(self):
karta = {'salary': '2.1.1', 'pension': '2.2.1', 'bank_interest_income': '3.1.1',
'interest_expenses': '3.3.1', 'property_taxable_value': '4.3.2', 'property_sale_proceeds': '2.8.4',
'property_sale_loss': '3.3.6', 'rental_income': '2.8.2', 'bank_deposits': '4.1.1',
'gains_from_sale_fondskonto_share_comp': '3.1.4', 'gains_from_sale_fondskonto_interest_comp': '3.1.4',
'dividends': '3.1.5', 'mutual_fund_dividends': '3.1.6', 'gains_from_sale_of_shares': '3.1.8',
'gains_from_sale_of_shares_ask': '3.1.8'}
for field in ['mutual_fund_interest_comp_profit', 'mutual_fund_interest_comp_profit_combi_fund',
'mutual_fund_share_comp_profit', 'mutual_fund_share_comp_profit_combi_fund']:
karta[field] = '3.1.9'
for field in ['loss_fondskonto_shares', 'loss_fondskonto_interest']:
karta[field] = '3.3.7'
for field in ['loss_ask_sale', 'loss_from_sale_of_shares']:
karta[field] = '3.3.8'
for field in ['loss_from_sale_mutual_fund_share_comp', 'loss_from_sale_mutual_fund_share_comp_combi_fund',
'loss_from_sale_mutual_fund_interest_comp', 'loss_from_sale_mutual_fund_interest_comp_combi_fund']:
karta[field] = '3.3.9'
karta['mutual_fund_wealth_share_comp'] = '4.1.4'
karta['mutual_fund_wealth_interest_comp'] = '4.1.5'
karta['wealth_in_shares'] = '4.1.7'
for field in ['wealth_in_unlisted_shares',
'wealth_ask_cash', 'wealth_ask_shares']:
karta[field] = '4.1.8'
for field in ['wealth_fondskonto_cash_interest',
'wealth_fondskonto_shares']:
karta[field] = '4.5.2'
return karta
def compare_calculated_tax_vs_correct_tax(
self, atol=1e-8, rtol=1e-6, check_basis=False):
"""
compares the config vs our calculations
It gives a more detailed breakdown so you can compare the components such as different basis etc.
"""
df_calc = self.tax_breakdown()
df_true = self.parsed_official_response()
out = []
elements = [['Formueskatt stat', 'formueskattTilStat'], ['Formueskatt kommune', 'formueskattTilKommune'],
['Inntektsskatt til kommune', 'inntektsskattTilKommune'], [
'Inntektsskatt til fylkeskommune', 'inntektsskattTilFylkeskommune'],
['Fellesskatt', 'fellesskatt'], ['Trinnskatt', 'trinnskatt'], ['Trygdeavgift', 'sumTrygdeavgift'], ['Sum skattefradrag', 'Sum skattefradrag']]
for calc_comp, correct_comp in elements:
tax_calc = df_calc.query("Skatt == '%s'" % calc_comp)
tax_calc_basis = tax_calc['Grunnlag'].item()
tax_calc_value = tut.tax_round(tax_calc['Beloep'].item())
# pdb.set_trace()
# if 'Inntekt' in calc_comp:
# pdb.set_trace()
tax_correct = df_true.query("tax_type == '%s'" % correct_comp)
if tax_correct.empty:
tax_correct_basis = 0
else:
tax_correct_basis = tax_correct['tax_basis'].item()
if 'skattefradrag' in calc_comp:
tax_calc_value *= -1
if not tax_correct.empty:
tax_correct_value = tax_correct['tax'].item()
else:
tax_correct_value = 0
error_basis = np.abs(tax_calc_basis - tax_correct_basis)
tol_basis = atol + rtol * np.abs(tax_correct_basis)
error_value = np.abs(tax_calc_value - tax_correct_value)
tol_value = atol + rtol * np.abs(tax_correct_value)
basis_pass = (error_basis <= tol_basis)
value_pass = (error_value <= tol_value)
if check_basis:
test_string = ''
if basis_pass and value_pass:
test_string = '++'
elif basis_pass and not value_pass:
test_string = '+-'
elif value_pass and not basis_pass:
test_string = '-+'
else:
test_string = '--'
else:
test_string = '+' if value_pass else '-'
if check_basis:
out.append([calc_comp,
tax_calc_basis,
tax_correct_basis,
tax_calc_value,
tax_correct_value,
error_basis,
error_value,
tol_basis,
tol_value,
test_string])
else:
out.append([calc_comp, tax_calc_value, tax_correct_value,
error_value, tol_value, test_string])
# check total tax:
# pdb.set_trace()
total_calculated_tax = df_calc.query(
"Skatt == 'Din Skatt'").Beloep.item()
total_tax = df_true.query("tax_type == 'total'").tax.item()
error_value = np.abs(total_calculated_tax - total_tax)
tol_value = atol + rtol * np.abs(total_tax)
basis_pass = True
value_pass = (error_value <= tol_value)
if check_basis:
test_string = '+' + '+' if value_pass else '-'
out.append(['Total skatt', np.nan, np.nan, total_calculated_tax,
total_tax, 0, error_value, 0, tol_value, test_string])
else:
test_string = '+' if value_pass else '-'
out.append(['Total skatt', total_calculated_tax,
total_tax, error_value, tol_value, test_string])
if check_basis:
return pd.DataFrame(out, columns=['component', 'basis_calc', 'basis_corr', 'value_calc',
'value_corr', 'basis_error', 'value_error', 'basis_tol', 'value_tol', 'test_pass'])
return pd.DataFrame(out, columns=[
'component', 'value_calc', 'value_corr', 'value_error', 'value_tol', 'test_pass'])
def state_wealth_tax(self):
return self.tax_payable(basis=self.state_wealth_tax_basis, rate=self.parameter(
'state_wealth_tax_rate'), limit=self.parameter('wealth_tax_lower_limit'))
def municipal_wealth_tax(self):
return self.tax_payable(basis=self.state_wealth_tax_basis, rate=self.parameter(
'municipal_wealth_tax_rate'), limit=self.parameter('wealth_tax_lower_limit'))
def explain_attribute(self, attr='pension'):
assert attr in self.attribute_map, "'%s' is not a valid attribute!" % attr
return self.explain_tax_item(self.attribute_map[attr])
def explain_tax_item(self, item_no='3.1.8'):
"""
just show the web-page for the item
"""
text = item_no.replace('.', '/')
url = "https://www.skatteetaten.no/person/skatt/skattemelding/finn-post/%s" % text
return webbrowser.open(url)
def _income_tax(self, basis_name='felles', apply_rounding=True):
"""
some gentle indirection
"""
if basis_name == 'felles':
return self.tax_payable(basis=self.income_tax_basis, rate=self.parameter('felles_tax_rate'),
deduction=self.parameter('personal_deduction'), apply_rounding=apply_rounding)
if basis_name == 'fylke':
return self.tax_payable(basis=self.income_tax_basis, rate=self.parameter('fylke_tax_rate'),
deduction=self.parameter('personal_deduction'), apply_rounding=apply_rounding)
if basis_name == 'kommun':
return self.tax_payable(basis=self.income_tax_basis, rate=self.parameter('municipal_tax_rate'),
deduction=self.parameter('personal_deduction'), apply_rounding=apply_rounding)
raise Exception(
"We only basis in {felles, fylke, kommun}, not '%s'" %
basis_name)
def municipal_income_tax(self):
return self._income_tax(basis_name='kommun')
def common_income_tax(self):
return self._income_tax(basis_name='felles')
def county_income_tax(self):
return self._income_tax(basis_name='fylke')
def national_insurance(self, apply_rounding=False):
"""
[NO]
seems to be 8.2% above 81.4k, goes up linearly from low limit to that?
ah, it can't be more than 25% of the amount above the lower limit (this isn't mentioned on the official site)
https://no.wikipedia.org/wiki/Trygdeavgift
"""
rate = self.parameter('trygde_rate')
if self.age > self.parameter('trygde_rate_cutoff_age_hi') or self.age < self.parameter(
'trygde_rate_cutoff_age_lo'):
rate = self.parameter('trygde_rate_extreme')
income = self.salary + self.pension
if income <= self.parameter('trygde_income_limit'):
return 0
overshooting_income = income - self.parameter('trygde_income_limit')
# share_of_overshooting =
raw_tax = rate * self.salary + \
self.parameter('trygde_rate_pension') * self.pension
if raw_tax >= self.parameter('trygde_max_share') * overshooting_income:
ans = self.parameter('trygde_max_share') * overshooting_income
if apply_rounding:
return tut.tax_round(ans)
return ans
if apply_rounding:
return tut.tax_round(raw_tax)
return raw_tax
def deduction(self):
if self.pension > 0:
max_ded = self.parameter(
'max_pension_tax_deduction') * (self.pension_percent / 100) * (self.pension_months / 12)
# pdb.set_trace()
reduction_stage1 = self.parameter(
'pension_deduction_stage_one_threshold') * (self.pension_percent / 100) * (self.pension_months / 12)
red_rate1 = self.parameter(
'stage_one_reduction_rate')
reduction_stage2 = self.parameter(
'pension_deduction_stage_two_threshold') * (self.pension_percent / 100) * (self.pension_months / 12)
red_rate2 = self.parameter(
'stage_two_reduction_rate')
cutoff = min(np.round(max_ded -
((min(self.pension, reduction_stage2) -
reduction_stage1) *
red_rate1 +
max((self.pension -
reduction_stage2) *
red_rate2, 0)), 0), max_ded)
deductions = self.municipal_income_tax() + self.county_income_tax() + \
self.common_income_tax() + self.bracket_tax + self.national_insurance()
return max(min(cutoff, deductions), 0)
return 0
def tax(self, apply_rounding=True):
if apply_rounding:
return tut.tax_round(self.state_wealth_tax()) + tut.tax_round(self.municipal_wealth_tax()) + tut.tax_round(self.municipal_income_tax()) + tut.tax_round(
self.county_income_tax()) + tut.tax_round(self.common_income_tax()) + tut.tax_round(self.bracket_tax) + tut.tax_round(self.national_insurance()) - tut.tax_round(self.deduction())
return self.state_wealth_tax() + self.municipal_wealth_tax() + self.municipal_income_tax() + self.county_income_tax() + \
self.common_income_tax() + self.bracket_tax + \
self.national_insurance() - self.deduction()
def tax_breakdown(self):
out = [['Formueskatt stat', self.state_wealth_tax_basis, self.state_wealth_tax()], [
'Formueskatt kommune', self.state_wealth_tax_basis, self.municipal_wealth_tax()]]
out += [['Inntektsskatt til kommune', self.income_tax_basis, self.municipal_income_tax()], ['Inntektsskatt til fylkeskommune', self.income_tax_basis, self.county_income_tax()],
['Fellesskatt', self.income_tax_basis, self.common_income_tax()], ['Trinnskatt', self.salary + self.pension, self.bracket_tax], ['Trygdeavgift', self.salary + self.pension, self.national_insurance()]]
out += [['Sum skattefradrag', 0, self.deduction()]]
# if apply_rounding:
out += [['Din Skatt', np.nan, self.tax()]]
return | pd.DataFrame(out, columns=['Skatt', 'Grunnlag', 'Beloep']) | pandas.DataFrame |
from __future__ import print_function
import six
import unittest
import numpy as np
from unittest import mock
import pandas as pd
from dataprofiler.profilers import column_profile_compilers as \
col_pro_compilers
from dataprofiler.profilers.profiler_options import BaseOption,\
StructuredOptions, UnstructuredOptions
class TestBaseProfileCompilerClass(unittest.TestCase):
def test_cannot_instantiate(self):
"""showing we normally can't instantiate an abstract class"""
with self.assertRaises(TypeError) as e:
col_pro_compilers.BaseCompiler()
self.assertRegex(
str(e.exception),
"Can't instantiate abstract class BaseCompiler with "
"abstract methods? profile"
)
@mock.patch.multiple(
col_pro_compilers.BaseCompiler, __abstractmethods__=set(),
_profilers=[mock.Mock()], _option_class=mock.Mock(spec=BaseOption))
@mock.patch.multiple(
col_pro_compilers.ColumnStatsProfileCompiler, _profilers=[mock.Mock()])
def test_add_profilers(self):
compiler1 = col_pro_compilers.BaseCompiler(mock.Mock())
compiler2 = col_pro_compilers.BaseCompiler(mock.Mock())
# test incorrect type
with self.assertRaisesRegex(TypeError,
'`BaseCompiler` and `int` are '
'not of the same profile compiler type.'):
compiler1 + 3
compiler3 = col_pro_compilers.ColumnStatsProfileCompiler(mock.Mock())
compiler3._profiles = [mock.Mock()]
with self.assertRaisesRegex(TypeError,
'`BaseCompiler` and '
'`ColumnStatsProfileCompiler` are '
'not of the same profile compiler type.'):
compiler1 + compiler3
# test mismatched names
compiler1.name = 'compiler1'
compiler2.name = 'compiler2'
with self.assertRaisesRegex(ValueError,
'Column profile names are unmatched: '
'compiler1 != compiler2'):
compiler1 + compiler2
# test mismatched profiles due to options
compiler2.name = 'compiler1'
compiler1._profiles = dict(test1=mock.Mock())
compiler2._profiles = dict(test2=mock.Mock())
with self.assertRaisesRegex(ValueError,
'Column profilers were not setup with the '
'same options, hence they do not calculate '
'the same profiles and cannot be added '
'together.'):
compiler1 + compiler2
# test success
compiler1._profiles = dict(test=1)
compiler2._profiles = dict(test=2)
merged_compiler = compiler1 + compiler2
self.assertEqual(3, merged_compiler._profiles['test'])
self.assertEqual('compiler1', merged_compiler.name)
def test_diff_primitive_compilers(self):
# Test different data types
data1 = pd.Series(['-2', '-1', '1', '2'])
data2 = pd.Series(["YO YO YO", "HELLO"])
compiler1 = col_pro_compilers.ColumnPrimitiveTypeProfileCompiler(data1)
compiler2 = col_pro_compilers.ColumnPrimitiveTypeProfileCompiler(data2)
expected_diff = {
'data_type_representation': {
'datetime': 'unchanged',
'int': 1.0,
'float': 1.0,
'text': 'unchanged'
},
'data_type': ['int', 'text']
}
self.assertDictEqual(expected_diff, compiler1.diff(compiler2))
# Test different data types with datetime specifically
data1 = pd.Series(['-2', '-1', '1', '2'])
data2 = pd.Series(["01/12/1967", "11/9/2024"])
compiler1 = col_pro_compilers.ColumnPrimitiveTypeProfileCompiler(data1)
compiler2 = col_pro_compilers.ColumnPrimitiveTypeProfileCompiler(data2)
expected_diff = {
'data_type_representation': {
'datetime': -1.0,
'int': 1.0,
'float': 1.0,
'text': 'unchanged'
},
'data_type': ['int', 'datetime']
}
self.assertDictEqual(expected_diff, compiler1.diff(compiler2))
# Test same data types
data1 = | pd.Series(['-2', '15', '1', '2']) | pandas.Series |
# ----------------------------------------------------------------------------
# Copyright (c) 2016-2022, QIIME 2 development team.
#
# Distributed under the terms of the Modified BSD License.
#
# The full license is in the file LICENSE, distributed with this software.
# ----------------------------------------------------------------------------
import collections
import unittest
import warnings
import pandas as pd
import numpy as np
from qiime2 import Artifact
from qiime2.metadata import (Metadata, CategoricalMetadataColumn,
NumericMetadataColumn)
from qiime2.core.testing.util import get_dummy_plugin, ReallyEqualMixin
class TestInvalidMetadataConstruction(unittest.TestCase):
def test_non_dataframe(self):
with self.assertRaisesRegex(
TypeError, 'Metadata constructor.*DataFrame.*not.*Series'):
Metadata(pd.Series([1, 2, 3], name='col',
index=pd.Index(['a', 'b', 'c'], name='id')))
def test_no_ids(self):
with self.assertRaisesRegex(ValueError, 'Metadata.*at least one ID'):
Metadata(pd.DataFrame({}, index=pd.Index([], name='id')))
with self.assertRaisesRegex(ValueError, 'Metadata.*at least one ID'):
Metadata(pd.DataFrame({'column': []},
index=pd.Index([], name='id')))
def test_invalid_id_header(self):
# default index name
with self.assertRaisesRegex(ValueError, r'Index\.name.*None'):
Metadata(pd.DataFrame(
{'col': [1, 2, 3]}, index=pd.Index(['a', 'b', 'c'])))
with self.assertRaisesRegex(ValueError, r'Index\.name.*my-id-header'):
Metadata(pd.DataFrame(
{'col': [1, 2, 3]},
index=pd.Index(['a', 'b', 'c'], name='my-id-header')))
def test_non_str_id(self):
with self.assertRaisesRegex(
TypeError, 'non-string metadata ID.*type.*float.*nan'):
Metadata(pd.DataFrame(
{'col': [1, 2, 3]},
index=pd.Index(['a', np.nan, 'c'], name='id')))
def test_non_str_column_name(self):
with self.assertRaisesRegex(
TypeError, 'non-string metadata column name.*type.*'
'float.*nan'):
Metadata(pd.DataFrame(
{'col': [1, 2, 3],
np.nan: [4, 5, 6]},
index=pd.Index(['a', 'b', 'c'], name='id')))
def test_empty_id(self):
with self.assertRaisesRegex(
ValueError, 'empty metadata ID.*at least one character'):
Metadata(pd.DataFrame(
{'col': [1, 2, 3]}, index=pd.Index(['a', '', 'c'], name='id')))
def test_empty_column_name(self):
with self.assertRaisesRegex(
ValueError, 'empty metadata column name.*'
'at least one character'):
Metadata(pd.DataFrame(
{'col': [1, 2, 3],
'': [4, 5, 6]}, index=pd.Index(['a', 'b', 'c'], name='id')))
def test_pound_sign_id(self):
with self.assertRaisesRegex(
ValueError, "metadata ID.*begins with a pound sign.*'#b'"):
Metadata(pd.DataFrame(
{'col': [1, 2, 3]},
index=pd.Index(['a', '#b', 'c'], name='id')))
def test_id_conflicts_with_id_header(self):
with self.assertRaisesRegex(
ValueError, "metadata ID 'sample-id'.*conflicts.*reserved.*"
"ID header"):
Metadata(pd.DataFrame(
{'col': [1, 2, 3]},
index=pd.Index(['a', 'sample-id', 'c'], name='id')))
def test_column_name_conflicts_with_id_header(self):
with self.assertRaisesRegex(
ValueError, "metadata column name 'featureid'.*conflicts.*"
"reserved.*ID header"):
Metadata(pd.DataFrame(
{'col': [1, 2, 3],
'featureid': [4, 5, 6]},
index=pd.Index(['a', 'b', 'c'], name='id')))
def test_duplicate_ids(self):
with self.assertRaisesRegex(ValueError, "Metadata IDs.*unique.*'a'"):
Metadata(pd.DataFrame(
{'col': [1, 2, 3]},
index=pd.Index(['a', 'b', 'a'], name='id')))
def test_duplicate_column_names(self):
data = [[1, 2, 3],
[4, 5, 6],
[7, 8, 9]]
with self.assertRaisesRegex(ValueError,
"Metadata column names.*unique.*'col1'"):
Metadata(pd.DataFrame(data, columns=['col1', 'col2', 'col1'],
index=pd.Index(['a', 'b', 'c'], name='id')))
def test_unsupported_column_dtype(self):
with self.assertRaisesRegex(
TypeError, "Metadata column 'col2'.*unsupported.*dtype.*bool"):
Metadata(pd.DataFrame(
{'col1': [1, 2, 3],
'col2': [True, False, True]},
index=pd.Index(['a', 'b', 'c'], name='id')))
def test_categorical_column_unsupported_type(self):
with self.assertRaisesRegex(
TypeError, "CategoricalMetadataColumn.*strings or missing "
r"values.*42\.5.*float.*'col2'"):
Metadata(pd.DataFrame(
{'col1': [1, 2, 3],
'col2': ['foo', 'bar', 42.5]},
index=pd.Index(['a', 'b', 'c'], name='id')))
def test_categorical_column_empty_str(self):
with self.assertRaisesRegex(
ValueError, "CategoricalMetadataColumn.*empty strings.*"
"column 'col2'"):
Metadata(pd.DataFrame(
{'col1': [1, 2, 3],
'col2': ['foo', '', 'bar']},
index=pd.Index(['a', 'b', 'c'], name='id')))
def test_numeric_column_infinity(self):
with self.assertRaisesRegex(
ValueError, "NumericMetadataColumn.*positive or negative "
"infinity.*column 'col2'"):
Metadata(pd.DataFrame(
{'col1': ['foo', 'bar', 'baz'],
'col2': [42, float('+inf'), 4.3]},
index=pd.Index(['a', 'b', 'c'], name='id')))
class TestMetadataConstructionAndProperties(unittest.TestCase):
def assertEqualColumns(self, obs_columns, exp):
obs = [(name, props.type) for name, props in obs_columns.items()]
self.assertEqual(obs, exp)
def test_minimal(self):
md = Metadata(pd.DataFrame({}, index=pd.Index(['a'], name='id')))
self.assertEqual(md.id_count, 1)
self.assertEqual(md.column_count, 0)
self.assertEqual(md.id_header, 'id')
self.assertEqual(md.ids, ('a',))
self.assertEqualColumns(md.columns, [])
def test_single_id(self):
index = pd.Index(['id1'], name='id')
df = pd.DataFrame({'col1': [1.0], 'col2': ['a'], 'col3': ['foo']},
index=index)
md = Metadata(df)
self.assertEqual(md.id_count, 1)
self.assertEqual(md.column_count, 3)
self.assertEqual(md.id_header, 'id')
self.assertEqual(md.ids, ('id1',))
self.assertEqualColumns(md.columns,
[('col1', 'numeric'), ('col2', 'categorical'),
('col3', 'categorical')])
def test_no_columns(self):
index = pd.Index(['id1', 'id2', 'foo'], name='id')
df = pd.DataFrame({}, index=index)
md = Metadata(df)
self.assertEqual(md.id_count, 3)
self.assertEqual(md.column_count, 0)
self.assertEqual(md.id_header, 'id')
self.assertEqual(md.ids, ('id1', 'id2', 'foo'))
self.assertEqualColumns(md.columns, [])
def test_single_column(self):
index = pd.Index(['id1', 'a', 'my-id'], name='id')
df = pd.DataFrame({'column': ['foo', 'bar', 'baz']}, index=index)
md = Metadata(df)
self.assertEqual(md.id_count, 3)
self.assertEqual(md.column_count, 1)
self.assertEqual(md.id_header, 'id')
self.assertEqual(md.ids, ('id1', 'a', 'my-id'))
self.assertEqualColumns(md.columns, [('column', 'categorical')])
def test_retains_column_order(self):
# Supply DataFrame constructor with explicit column ordering instead of
# a dict.
index = pd.Index(['id1', 'id2', 'id3'], name='id')
columns = ['z', 'a', 'ch']
data = [
[1.0, 'a', 'foo'],
[2.0, 'b', 'bar'],
[3.0, 'c', '42']
]
df = pd.DataFrame(data, index=index, columns=columns)
md = Metadata(df)
self.assertEqual(md.id_count, 3)
self.assertEqual(md.column_count, 3)
self.assertEqual(md.id_header, 'id')
self.assertEqual(md.ids, ('id1', 'id2', 'id3'))
self.assertEqualColumns(md.columns,
[('z', 'numeric'), ('a', 'categorical'),
('ch', 'categorical')])
def test_supported_id_headers(self):
case_insensitive = {
'id', 'sampleid', 'sample id', 'sample-id', 'featureid',
'feature id', 'feature-id'
}
exact_match = {
'#SampleID', '#Sample ID', '#OTUID', '#OTU ID', 'sample_name'
}
# Build a set of supported headers, including exact matches and headers
# with different casing.
headers = set()
for header in case_insensitive:
headers.add(header)
headers.add(header.upper())
headers.add(header.title())
for header in exact_match:
headers.add(header)
count = 0
for header in headers:
index = pd.Index(['id1', 'id2'], name=header)
df = pd.DataFrame({'column': ['foo', 'bar']}, index=index)
md = Metadata(df)
self.assertEqual(md.id_header, header)
count += 1
# Since this test case is a little complicated, make sure that the
# expected number of comparisons are happening.
self.assertEqual(count, 26)
def test_recommended_ids(self):
index = pd.Index(['c6ca034a-223f-40b4-a0e0-45942912a5ea', 'My.ID'],
name='id')
df = pd.DataFrame({'col1': ['foo', 'bar']}, index=index)
md = Metadata(df)
self.assertEqual(md.id_count, 2)
self.assertEqual(md.column_count, 1)
self.assertEqual(md.id_header, 'id')
self.assertEqual(md.ids,
('c6ca034a-223f-40b4-a0e0-45942912a5ea', 'My.ID'))
self.assertEqualColumns(md.columns, [('col1', 'categorical')])
def test_non_standard_characters(self):
index = pd.Index(['©id##1', '((id))2', "'id_3<>'", '"id#4"',
'i d\r\t\n5'], name='id')
columns = ['↩c@l1™', 'col(#2)', "#col'3", '"<col_4>"', 'col\t \r\n5']
data = [
['ƒoo', '(foo)', '#f o #o', 'fo\ro', np.nan],
["''2''", 'b#r', 'ba\nr', np.nan, np.nan],
['b"ar', 'c\td', '4\r\n2', np.nan, np.nan],
['b__a_z', '<42>', '>42', np.nan, np.nan],
['baz', np.nan, '42']
]
df = pd.DataFrame(data, index=index, columns=columns)
md = Metadata(df)
self.assertEqual(md.id_count, 5)
self.assertEqual(md.column_count, 5)
self.assertEqual(md.id_header, 'id')
self.assertEqual(
md.ids, ('©id##1', '((id))2', "'id_3<>'", '"id#4"', 'i d\r\t\n5'))
self.assertEqualColumns(md.columns, [('↩c@l1™', 'categorical'),
('col(#2)', 'categorical'),
("#col'3", 'categorical'),
('"<col_4>"', 'categorical'),
('col\t \r\n5', 'numeric')])
def test_missing_data(self):
index = pd.Index(['None', 'nan', 'NA', 'foo'], name='id')
df = pd.DataFrame(collections.OrderedDict([
('col1', [1.0, np.nan, np.nan, np.nan]),
('NA', [np.nan, np.nan, np.nan, np.nan]),
('col3', ['null', 'N/A', np.nan, 'NA']),
('col4', np.array([np.nan, np.nan, np.nan, np.nan],
dtype=object))]),
index=index)
md = Metadata(df)
self.assertEqual(md.id_count, 4)
self.assertEqual(md.column_count, 4)
self.assertEqual(md.id_header, 'id')
self.assertEqual(md.ids, ('None', 'nan', 'NA', 'foo'))
self.assertEqualColumns(md.columns, [('col1', 'numeric'),
('NA', 'numeric'),
('col3', 'categorical'),
('col4', 'categorical')])
def test_does_not_cast_ids_or_column_names(self):
index = pd.Index(['0.000001', '0.004000', '0.000000'], dtype=object,
name='id')
columns = ['42.0', '1000', '-4.2']
data = [
[2.0, 'b', 2.5],
[1.0, 'b', 4.2],
[3.0, 'c', -9.999]
]
df = pd.DataFrame(data, index=index, columns=columns)
md = Metadata(df)
self.assertEqual(md.id_count, 3)
self.assertEqual(md.column_count, 3)
self.assertEqual(md.id_header, 'id')
self.assertEqual(md.ids, ('0.000001', '0.004000', '0.000000'))
self.assertEqualColumns(md.columns, [('42.0', 'numeric'),
('1000', 'categorical'),
('-4.2', 'numeric')])
def test_mixed_column_types(self):
md = Metadata(
pd.DataFrame({'col0': [1.0, 2.0, 3.0],
'col1': ['a', 'b', 'c'],
'col2': ['foo', 'bar', '42'],
'col3': ['1.0', '2.5', '-4.002'],
'col4': [1, 2, 3],
'col5': [1, 2, 3.5],
'col6': [1e-4, -0.0002, np.nan],
'col7': ['cat', np.nan, 'dog'],
'col8': ['a', 'a', 'a'],
'col9': [0, 0, 0]},
index=pd.Index(['id1', 'id2', 'id3'], name='id')))
self.assertEqual(md.id_count, 3)
self.assertEqual(md.column_count, 10)
self.assertEqual(md.id_header, 'id')
self.assertEqual(md.ids, ('id1', 'id2', 'id3'))
self.assertEqualColumns(md.columns, [('col0', 'numeric'),
('col1', 'categorical'),
('col2', 'categorical'),
('col3', 'categorical'),
('col4', 'numeric'),
('col5', 'numeric'),
('col6', 'numeric'),
('col7', 'categorical'),
('col8', 'categorical'),
('col9', 'numeric')])
def test_case_insensitive_duplicate_ids(self):
index = pd.Index(['a', 'b', 'A'], name='id')
df = pd.DataFrame({'column': ['1', '2', '3']}, index=index)
metadata = Metadata(df)
self.assertEqual(metadata.ids, ('a', 'b', 'A'))
def test_case_insensitive_duplicate_column_names(self):
index = pd.Index(['a', 'b', 'c'], name='id')
df = pd.DataFrame({'column': ['1', '2', '3'],
'Column': ['4', '5', '6']}, index=index)
metadata = Metadata(df)
self.assertEqual(set(metadata.columns), {'column', 'Column'})
def test_categorical_column_leading_trailing_whitespace_value(self):
md1 = Metadata(pd.DataFrame(
{'col1': [1, 2, 3],
'col2': ['foo', ' bar ', 'baz']},
index=pd.Index(['a', 'b', 'c'], name='id')))
md2 = Metadata(pd.DataFrame(
{'col1': [1, 2, 3],
'col2': ['foo', 'bar', 'baz']},
index=pd.Index(['a', 'b', 'c'], name='id')))
self.assertEqual(md1, md2)
def test_leading_trailing_whitespace_id(self):
md1 = Metadata(pd.DataFrame(
{'col1': [1, 2, 3], 'col2': [4, 5, 6]},
index=pd.Index(['a', ' b ', 'c'], name='id')))
md2 = Metadata(pd.DataFrame(
{'col1': [1, 2, 3], 'col2': [4, 5, 6]},
index=pd.Index(['a', 'b', 'c'], name='id')))
self.assertEqual(md1, md2)
def test_leading_trailing_whitespace_column_name(self):
md1 = Metadata(pd.DataFrame(
{'col1': [1, 2, 3], ' col2 ': [4, 5, 6]},
index=pd.Index(['a', 'b', 'c'], name='id')))
md2 = Metadata(pd.DataFrame(
{'col1': [1, 2, 3], 'col2': [4, 5, 6]},
index=pd.Index(['a', 'b', 'c'], name='id')))
self.assertEqual(md1, md2)
class TestSourceArtifacts(unittest.TestCase):
def setUp(self):
self.md = Metadata(pd.DataFrame(
{'col': [1, 2, 3]}, index=pd.Index(['a', 'b', 'c'], name='id')))
def test_no_source_artifacts(self):
self.assertEqual(self.md.artifacts, ())
def test_add_zero_artifacts(self):
self.md._add_artifacts([])
self.assertEqual(self.md.artifacts, ())
def test_add_artifacts(self):
# First two artifacts have the same data but different UUIDs.
artifact1 = Artifact.import_data('Mapping', {'a': '1', 'b': '3'})
self.md._add_artifacts([artifact1])
artifact2 = Artifact.import_data('Mapping', {'a': '1', 'b': '3'})
artifact3 = Artifact.import_data('IntSequence1', [1, 2, 3, 4])
self.md._add_artifacts([artifact2, artifact3])
self.assertEqual(self.md.artifacts, (artifact1, artifact2, artifact3))
def test_add_non_artifact(self):
artifact = Artifact.import_data('Mapping', {'a': '1', 'b': '3'})
with self.assertRaisesRegex(TypeError, "Artifact object.*42"):
self.md._add_artifacts([artifact, 42])
# Test that the object hasn't been mutated.
self.assertEqual(self.md.artifacts, ())
def test_add_duplicate_artifact(self):
artifact1 = Artifact.import_data('Mapping', {'a': '1', 'b': '3'})
artifact2 = Artifact.import_data('IntSequence1', [1, 2, 3, 4])
self.md._add_artifacts([artifact1, artifact2])
with self.assertRaisesRegex(
ValueError, "Duplicate source artifacts.*artifact: Mapping"):
self.md._add_artifacts([artifact1])
# Test that the object hasn't been mutated.
self.assertEqual(self.md.artifacts, (artifact1, artifact2))
class TestRepr(unittest.TestCase):
def test_singular(self):
md = Metadata(pd.DataFrame({'col1': [42]},
index=pd.Index(['a'], name='id')))
obs = repr(md)
self.assertIn('Metadata', obs)
self.assertIn('1 ID x 1 column', obs)
self.assertIn("col1: ColumnProperties(type='numeric')", obs)
def test_plural(self):
md = Metadata(pd.DataFrame({'col1': [42, 42], 'col2': ['foo', 'bar']},
index=pd.Index(['a', 'b'], name='id')))
obs = repr(md)
self.assertIn('Metadata', obs)
self.assertIn('2 IDs x 2 columns', obs)
self.assertIn("col1: ColumnProperties(type='numeric')", obs)
self.assertIn("col2: ColumnProperties(type='categorical')", obs)
def test_column_name_padding(self):
data = [[0, 42, 'foo']]
index = pd.Index(['my-id'], name='id')
columns = ['col1', 'longer-column-name', 'c']
md = Metadata(pd.DataFrame(data, index=index, columns=columns))
obs = repr(md)
self.assertIn('Metadata', obs)
self.assertIn('1 ID x 3 columns', obs)
self.assertIn(
"col1: ColumnProperties(type='numeric')", obs)
self.assertIn(
"longer-column-name: ColumnProperties(type='numeric')", obs)
self.assertIn(
"c: ColumnProperties(type='categorical')", obs)
class TestEqualityOperators(unittest.TestCase, ReallyEqualMixin):
def setUp(self):
get_dummy_plugin()
def test_type_mismatch(self):
md = Metadata(
pd.DataFrame({'col1': [1.0, 2.0, 3.0],
'col2': ['a', 'b', 'c'],
'col3': ['foo', 'bar', '42']},
index=pd.Index(['id1', 'id2', 'id3'], name='id')))
mdc = md.get_column('col1')
self.assertIsInstance(md, Metadata)
self.assertIsInstance(mdc, NumericMetadataColumn)
self.assertReallyNotEqual(md, mdc)
def test_id_header_mismatch(self):
data = {'col1': ['foo', 'bar'], 'col2': [42, 43]}
md1 = Metadata(pd.DataFrame(
data, index=pd.Index(['id1', 'id2'], name='id')))
md2 = Metadata(pd.DataFrame(
data, index=pd.Index(['id1', 'id2'], name='ID')))
self.assertReallyNotEqual(md1, md2)
def test_source_mismatch(self):
# Metadata created from an artifact vs not shouldn't compare equal,
# even if the data is the same.
artifact = Artifact.import_data('Mapping', {'a': '1', 'b': '2'})
md_from_artifact = artifact.view(Metadata)
md_no_artifact = Metadata(md_from_artifact.to_dataframe())
pd.testing.assert_frame_equal(md_from_artifact.to_dataframe(),
md_no_artifact.to_dataframe())
self.assertReallyNotEqual(md_from_artifact, md_no_artifact)
def test_artifact_mismatch(self):
# Metadata created from different artifacts shouldn't compare equal,
# even if the data is the same.
artifact1 = Artifact.import_data('Mapping', {'a': '1', 'b': '2'})
artifact2 = Artifact.import_data('Mapping', {'a': '1', 'b': '2'})
md1 = artifact1.view(Metadata)
md2 = artifact2.view(Metadata)
pd.testing.assert_frame_equal(md1.to_dataframe(), md2.to_dataframe())
self.assertReallyNotEqual(md1, md2)
def test_id_mismatch(self):
md1 = Metadata(pd.DataFrame({'a': '1', 'b': '2'},
index=pd.Index(['0'], name='id')))
md2 = Metadata(pd.DataFrame({'a': '1', 'b': '2'},
index=pd.Index(['1'], name='id')))
self.assertReallyNotEqual(md1, md2)
def test_column_name_mismatch(self):
md1 = Metadata(pd.DataFrame({'a': '1', 'b': '2'},
index=pd.Index(['0'], name='id')))
md2 = Metadata(pd.DataFrame({'a': '1', 'c': '2'},
index=pd.Index(['0'], name='id')))
self.assertReallyNotEqual(md1, md2)
def test_column_type_mismatch(self):
md1 = Metadata(pd.DataFrame({'col1': ['42', '43']},
index=pd.Index(['id1', 'id2'], name='id')))
md2 = Metadata(pd.DataFrame({'col1': [42, 43]},
index=pd.Index(['id1', 'id2'], name='id')))
self.assertReallyNotEqual(md1, md2)
def test_column_order_mismatch(self):
index = pd.Index(['id1', 'id2'], name='id')
md1 = Metadata(pd.DataFrame([[42, 'foo'], [43, 'bar']], index=index,
columns=['z', 'a']))
md2 = Metadata(pd.DataFrame([['foo', 42], ['bar', 43]], index=index,
columns=['a', 'z']))
self.assertReallyNotEqual(md1, md2)
def test_data_mismatch(self):
md1 = Metadata(pd.DataFrame({'a': '1', 'b': '3'},
index=pd.Index(['0'], name='id')))
md2 = Metadata(pd.DataFrame({'a': '1', 'b': '2'},
index=pd.Index(['0'], name='id')))
self.assertReallyNotEqual(md1, md2)
def test_equality_without_artifact(self):
md1 = Metadata(pd.DataFrame({'a': '1', 'b': '3'},
index=pd.Index(['0'], name='id')))
md2 = Metadata(pd.DataFrame({'a': '1', 'b': '3'},
index=pd.Index(['0'], name='id')))
self.assertReallyEqual(md1, md2)
def test_equality_with_artifact(self):
artifact = Artifact.import_data('Mapping', {'a': '1', 'b': '2'})
md1 = artifact.view(Metadata)
md2 = artifact.view(Metadata)
self.assertReallyEqual(md1, md2)
def test_equality_with_missing_data(self):
md1 = Metadata(pd.DataFrame(
{'col1': [1, np.nan, 4.2],
'col2': [np.nan, 'foo', np.nan]},
index=pd.Index(['id1', 'id2', 'id3'], name='id')))
md2 = Metadata(pd.DataFrame(
{'col1': [1, np.nan, 4.2],
'col2': [np.nan, 'foo', np.nan]},
index=pd.Index(['id1', 'id2', 'id3'], name='id')))
self.assertReallyEqual(md1, md2)
class TestToDataframe(unittest.TestCase):
def test_minimal(self):
df = pd.DataFrame({}, index=pd.Index(['id1'], name='id'))
md = Metadata(df)
obs = md.to_dataframe()
pd.testing.assert_frame_equal(obs, df)
def test_id_header_preserved(self):
df = pd.DataFrame({'col1': [42, 2.5], 'col2': ['foo', 'bar']},
index=pd.Index(['id1', 'id2'], name='#SampleID'))
md = Metadata(df)
obs = md.to_dataframe()
pd.testing.assert_frame_equal(obs, df)
self.assertEqual(obs.index.name, '#SampleID')
def test_dataframe_copy(self):
df = pd.DataFrame({'col1': [42, 2.5], 'col2': ['foo', 'bar']},
index=pd.Index(['id1', 'id2'], name='id'))
md = Metadata(df)
obs = md.to_dataframe()
pd.testing.assert_frame_equal(obs, df)
self.assertIsNot(obs, df)
def test_retains_column_order(self):
index = pd.Index(['id1', 'id2'], name='id')
columns = ['z', 'a', 'ch']
data = [
[1.0, 'a', 'foo'],
[2.0, 'b', 'bar']
]
df = pd.DataFrame(data, index=index, columns=columns)
md = Metadata(df)
obs = md.to_dataframe()
pd.testing.assert_frame_equal(obs, df)
self.assertEqual(obs.columns.tolist(), ['z', 'a', 'ch'])
def test_missing_data(self):
# Different missing data representations should be normalized to np.nan
index = pd.Index(['None', 'nan', 'NA', 'id1'], name='id')
df = pd.DataFrame(collections.OrderedDict([
('col1', [42.5, np.nan, float('nan'), 3]),
('NA', [np.nan, 'foo', float('nan'), None]),
('col3', ['null', 'N/A', np.nan, 'NA']),
('col4', np.array([np.nan, np.nan, np.nan, np.nan],
dtype=object))]),
index=index)
md = Metadata(df)
obs = md.to_dataframe()
exp = pd.DataFrame(collections.OrderedDict([
('col1', [42.5, np.nan, np.nan, 3.0]),
('NA', [np.nan, 'foo', np.nan, np.nan]),
('col3', ['null', 'N/A', np.nan, 'NA']),
('col4', np.array([np.nan, np.nan, np.nan, np.nan],
dtype=object))]),
index=index)
pd.testing.assert_frame_equal(obs, exp)
self.assertEqual(obs.dtypes.to_dict(),
{'col1': np.float64, 'NA': object, 'col3': object,
'col4': object})
self.assertTrue(np.isnan(obs['col1']['NA']))
self.assertTrue(np.isnan(obs['NA']['NA']))
self.assertTrue(np.isnan(obs['NA']['id1']))
def test_dtype_int_normalized_to_dtype_float(self):
index = pd.Index(['id1', 'id2', 'id3'], name='id')
df = pd.DataFrame({'col1': [42, -43, 0],
'col2': [42.0, -43.0, 0.0],
'col3': [42, np.nan, 0]},
index=index)
self.assertEqual(df.dtypes.to_dict(),
{'col1': np.int64, 'col2': np.float64,
'col3': np.float64})
md = Metadata(df)
obs = md.to_dataframe()
exp = pd.DataFrame({'col1': [42.0, -43.0, 0.0],
'col2': [42.0, -43.0, 0.0],
'col3': [42.0, np.nan, 0.0]},
index=index)
pd.testing.assert_frame_equal(obs, exp)
self.assertEqual(obs.dtypes.to_dict(),
{'col1': np.float64, 'col2': np.float64,
'col3': np.float64})
class TestGetColumn(unittest.TestCase):
def setUp(self):
get_dummy_plugin()
def test_column_name_not_found(self):
df = pd.DataFrame({'col1': [42, 2.5], 'col2': ['foo', 'bar']},
index=pd.Index(['id1', 'id2'], name='id'))
md = Metadata(df)
with self.assertRaisesRegex(ValueError,
"'col3'.*not a column.*'col1', 'col2'"):
md.get_column('col3')
def test_artifacts_are_propagated(self):
A = Artifact.import_data('Mapping', {'a': '1', 'b': '3'})
md = A.view(Metadata)
obs = md.get_column('b')
exp = CategoricalMetadataColumn(
pd.Series(['3'], name='b', index=pd.Index(['0'], name='id')))
exp._add_artifacts([A])
self.assertEqual(obs, exp)
self.assertEqual(obs.artifacts, (A,))
def test_categorical_column(self):
df = pd.DataFrame({'col1': [42, 2.5], 'col2': ['foo', 'bar']},
index=pd.Index(['id1', 'id2'], name='id'))
md = Metadata(df)
obs = md.get_column('col2')
exp = CategoricalMetadataColumn(
pd.Series(['foo', 'bar'], name='col2',
index=pd.Index(['id1', 'id2'], name='id')))
self.assertEqual(obs, exp)
def test_numeric_column(self):
df = pd.DataFrame({'col1': [42, 2.5], 'col2': ['foo', 'bar']},
index=pd.Index(['id1', 'id2'], name='id'))
md = Metadata(df)
obs = md.get_column('col1')
exp = NumericMetadataColumn(
pd.Series([42, 2.5], name='col1',
index=pd.Index(['id1', 'id2'], name='id')))
self.assertEqual(obs, exp)
def test_id_header_preserved(self):
df = pd.DataFrame({'col1': [42, 2.5], 'col2': ['foo', 'bar']},
index=pd.Index(['a', 'b'], name='#OTU ID'))
md = Metadata(df)
obs = md.get_column('col1')
exp = NumericMetadataColumn(
pd.Series([42, 2.5], name='col1',
index=pd.Index(['a', 'b'], name='#OTU ID')))
self.assertEqual(obs, exp)
self.assertEqual(obs.id_header, '#OTU ID')
class TestGetIDs(unittest.TestCase):
def test_default(self):
df = pd.DataFrame({'Subject': ['subject-1', 'subject-1', 'subject-2'],
'SampleType': ['gut', 'tongue', 'gut']},
index=pd.Index(['S1', 'S2', 'S3'], name='id'))
metadata = Metadata(df)
actual = metadata.get_ids()
expected = {'S1', 'S2', 'S3'}
self.assertEqual(actual, expected)
def test_incomplete_where(self):
df = pd.DataFrame({'Subject': ['subject-1', 'subject-1', 'subject-2'],
'SampleType': ['gut', 'tongue', 'gut']},
index=pd.Index(['S1', 'S2', 'S3'], name='sampleid'))
metadata = Metadata(df)
where = "Subject='subject-1' AND SampleType="
with self.assertRaises(ValueError):
metadata.get_ids(where)
where = "Subject="
with self.assertRaises(ValueError):
metadata.get_ids(where)
def test_invalid_where(self):
df = pd.DataFrame({'Subject': ['subject-1', 'subject-1', 'subject-2'],
'SampleType': ['gut', 'tongue', 'gut']},
index=pd.Index(['S1', 'S2', 'S3'], name='sampleid'))
metadata = Metadata(df)
where = "not-a-column-name='subject-1'"
with self.assertRaises(ValueError):
metadata.get_ids(where)
def test_empty_result(self):
df = pd.DataFrame({'Subject': ['subject-1', 'subject-1', 'subject-2'],
'SampleType': ['gut', 'tongue', 'gut']},
index=pd.Index(['S1', 'S2', 'S3'], name='id'))
metadata = Metadata(df)
where = "Subject='subject-3'"
actual = metadata.get_ids(where)
expected = set()
self.assertEqual(actual, expected)
def test_simple_expression(self):
df = pd.DataFrame({'Subject': ['subject-1', 'subject-1', 'subject-2'],
'SampleType': ['gut', 'tongue', 'gut']},
index=pd.Index(['S1', 'S2', 'S3'], name='id'))
metadata = Metadata(df)
where = "Subject='subject-1'"
actual = metadata.get_ids(where)
expected = {'S1', 'S2'}
self.assertEqual(actual, expected)
where = "Subject='subject-2'"
actual = metadata.get_ids(where)
expected = {'S3'}
self.assertEqual(actual, expected)
where = "Subject='subject-3'"
actual = metadata.get_ids(where)
expected = set()
self.assertEqual(actual, expected)
where = "SampleType='gut'"
actual = metadata.get_ids(where)
expected = {'S1', 'S3'}
self.assertEqual(actual, expected)
where = "SampleType='tongue'"
actual = metadata.get_ids(where)
expected = {'S2'}
self.assertEqual(actual, expected)
def test_more_complex_expressions(self):
df = pd.DataFrame({'Subject': ['subject-1', 'subject-1', 'subject-2'],
'SampleType': ['gut', 'tongue', 'gut']},
index=pd.Index(['S1', 'S2', 'S3'], name='id'))
metadata = Metadata(df)
where = "Subject='subject-1' OR Subject='subject-2'"
actual = metadata.get_ids(where)
expected = {'S1', 'S2', 'S3'}
self.assertEqual(actual, expected)
where = "Subject='subject-1' AND Subject='subject-2'"
actual = metadata.get_ids(where)
expected = set()
self.assertEqual(actual, expected)
where = "Subject='subject-1' AND SampleType='gut'"
actual = metadata.get_ids(where)
expected = {'S1'}
self.assertEqual(actual, expected)
def test_query_by_id(self):
df = pd.DataFrame({'Subject': ['subject-1', 'subject-1', 'subject-2'],
'SampleType': ['gut', 'tongue', 'gut']},
index=pd.Index(['S1', 'S2', 'S3'], name='id'))
metadata = Metadata(df)
actual = metadata.get_ids(where="id='S2' OR id='S1'")
expected = {'S1', 'S2'}
self.assertEqual(actual, expected)
def test_query_by_alternate_id_header(self):
metadata = Metadata(pd.DataFrame(
{}, index=pd.Index(['id1', 'id2', 'id3'], name='#OTU ID')))
obs = metadata.get_ids(where="\"#OTU ID\" IN ('id2', 'id3')")
exp = {'id2', 'id3'}
self.assertEqual(obs, exp)
def test_no_columns(self):
metadata = Metadata(
pd.DataFrame({}, index=pd.Index(['a', 'b', 'my-id'], name='id')))
obs = metadata.get_ids()
exp = {'a', 'b', 'my-id'}
self.assertEqual(obs, exp)
def test_query_mixed_column_types(self):
df = pd.DataFrame({'Name': ['Foo', 'Bar', 'Baz', 'Baaz'],
# numbers that would sort incorrectly as strings
'Age': [9, 10, 11, 101],
'Age_Str': ['9', '10', '11', '101'],
'Weight': [80.5, 85.3, np.nan, 120.0]},
index=pd.Index(['S1', 'S2', 'S3', 'S4'], name='id'))
metadata = Metadata(df)
# string pattern matching
obs = metadata.get_ids(where="Name LIKE 'Ba_'")
exp = {'S2', 'S3'}
self.assertEqual(obs, exp)
# string comparison
obs = metadata.get_ids(where="Age_Str >= 11")
exp = {'S1', 'S3'}
self.assertEqual(obs, exp)
# numeric comparison
obs = metadata.get_ids(where="Age >= 11")
exp = {'S3', 'S4'}
self.assertEqual(obs, exp)
# numeric comparison with missing data
obs = metadata.get_ids(where="Weight < 100")
exp = {'S1', 'S2'}
self.assertEqual(obs, exp)
def test_column_with_space_in_name(self):
df = pd.DataFrame({'Subject': ['subject-1', 'subject-1', 'subject-2'],
'Sample Type': ['gut', 'tongue', 'gut']},
index=pd.Index(['S1', 'S2', 'S3'], name='id'))
metadata = Metadata(df)
with warnings.catch_warnings(record=True) as w:
warnings.simplefilter('always')
metadata.get_ids()
# The list of captured warnings should be empty
self.assertFalse(w)
class TestMerge(unittest.TestCase):
def setUp(self):
get_dummy_plugin()
def test_merging_nothing(self):
md = Metadata(pd.DataFrame(
{'a': [1, 2, 3], 'b': [4, 5, 6]},
index=pd.Index(['id1', 'id2', 'id3'], name='id')))
with self.assertRaisesRegex(ValueError,
'At least one Metadata.*nothing to merge'):
md.merge()
def test_merging_two(self):
md1 = Metadata(pd.DataFrame(
{'a': [1, 2, 3], 'b': [4, 5, 6]},
index=pd.Index(['id1', 'id2', 'id3'], name='id')))
md2 = Metadata(pd.DataFrame(
{'c': [7, 8, 9], 'd': [10, 11, 12]},
index=pd.Index(['id1', 'id2', 'id3'], name='id')))
obs = md1.merge(md2)
exp = Metadata(pd.DataFrame(
{'a': [1, 2, 3], 'b': [4, 5, 6],
'c': [7, 8, 9], 'd': [10, 11, 12]},
index=pd.Index(['id1', 'id2', 'id3'], name='id')))
self.assertEqual(obs, exp)
def test_merging_three(self):
md1 = Metadata(pd.DataFrame(
{'a': [1, 2, 3], 'b': [4, 5, 6]},
index=pd.Index(['id1', 'id2', 'id3'], name='id')))
md2 = Metadata(pd.DataFrame(
{'c': [7, 8, 9], 'd': [10, 11, 12]},
index=pd.Index(['id1', 'id2', 'id3'], name='id')))
md3 = Metadata(pd.DataFrame(
{'e': [13, 14, 15], 'f': [16, 17, 18]},
index=pd.Index(['id1', 'id2', 'id3'], name='id')))
obs = md1.merge(md2, md3)
exp = Metadata(pd.DataFrame(
{'a': [1, 2, 3], 'b': [4, 5, 6],
'c': [7, 8, 9], 'd': [10, 11, 12],
'e': [13, 14, 15], 'f': [16, 17, 18]},
index=pd.Index(['id1', 'id2', 'id3'], name='id')))
self.assertEqual(obs, exp)
def test_merging_unaligned_indices(self):
md1 = Metadata(pd.DataFrame(
{'a': [1, 2, 3], 'b': [4, 5, 6]},
index=pd.Index(['id1', 'id2', 'id3'], name='id')))
md2 = Metadata(pd.DataFrame(
{'c': [9, 8, 7], 'd': [12, 11, 10]},
index=pd.Index(['id3', 'id2', 'id1'], name='id')))
md3 = Metadata(pd.DataFrame(
{'e': [13, 15, 14], 'f': [16, 18, 17]},
index=pd.Index(['id1', 'id3', 'id2'], name='id')))
obs = md1.merge(md2, md3)
exp = Metadata(pd.DataFrame(
{'a': [1, 2, 3], 'b': [4, 5, 6],
'c': [7, 8, 9], 'd': [10, 11, 12],
'e': [13, 14, 15], 'f': [16, 17, 18]},
index=pd.Index(['id1', 'id2', 'id3'], name='id')))
self.assertEqual(obs, exp)
def test_inner_join(self):
md1 = Metadata(pd.DataFrame(
{'a': [1, 2, 3], 'b': [4, 5, 6]},
index=pd.Index(['id1', 'id2', 'id3'], name='id')))
md2 = Metadata(pd.DataFrame(
{'c': [7, 8, 9], 'd': [10, 11, 12]},
index=pd.Index(['id2', 'X', 'Y'], name='id')))
md3 = Metadata(pd.DataFrame(
{'e': [13, 14, 15], 'f': [16, 17, 18]},
index=pd.Index(['X', 'id3', 'id2'], name='id')))
# Single shared ID.
obs = md1.merge(md2, md3)
exp = Metadata(pd.DataFrame(
{'a': [2], 'b': [5], 'c': [7], 'd': [10], 'e': [15], 'f': [18]},
index=pd.Index(['id2'], name='id')))
self.assertEqual(obs, exp)
# Multiple shared IDs.
obs = md1.merge(md3)
exp = Metadata(pd.DataFrame(
{'a': [2, 3], 'b': [5, 6], 'e': [15, 14], 'f': [18, 17]},
index=pd.Index(['id2', 'id3'], name='id')))
self.assertEqual(obs, exp)
def test_index_and_column_merge_order(self):
md1 = Metadata(pd.DataFrame(
[[1], [2], [3], [4]],
index=pd.Index(['id1', 'id2', 'id3', 'id4'], name='id'),
columns=['a']))
md2 = Metadata(pd.DataFrame(
[[5], [6], [7]], index=pd.Index(['id4', 'id3', 'id1'], name='id'),
columns=['b']))
md3 = Metadata(pd.DataFrame(
[[8], [9], [10]], index=pd.Index(['id1', 'id4', 'id3'], name='id'),
columns=['c']))
obs = md1.merge(md2, md3)
exp = Metadata(pd.DataFrame(
[[1, 7, 8], [3, 6, 10], [4, 5, 9]],
index=pd.Index(['id1', 'id3', 'id4'], name='id'),
columns=['a', 'b', 'c']))
self.assertEqual(obs, exp)
# Merging in different order produces different ID/column order.
obs = md2.merge(md1, md3)
exp = Metadata(pd.DataFrame(
[[5, 4, 9], [6, 3, 10], [7, 1, 8]],
index=pd.Index(['id4', 'id3', 'id1'], name='id'),
columns=['b', 'a', 'c']))
self.assertEqual(obs, exp)
def test_id_column_only(self):
md1 = Metadata(pd.DataFrame({},
index=pd.Index(['id1', 'id2', 'id3'], name='id')))
md2 = Metadata(pd.DataFrame({},
index=pd.Index(['id2', 'X', 'id1'], name='id')))
md3 = Metadata(pd.DataFrame({},
index=pd.Index(['id1', 'id3', 'id2'], name='id')))
obs = md1.merge(md2, md3)
exp = Metadata(
pd.DataFrame({}, index=pd.Index(['id1', 'id2'], name='id')))
self.assertEqual(obs, exp)
def test_merged_id_column_name(self):
md1 = Metadata(pd.DataFrame(
{'a': [1, 2]},
index=pd.Index(['id1', 'id2'], name='sample ID')))
md2 = Metadata(pd.DataFrame(
{'b': [3, 4]},
index=pd.Index(['id1', 'id2'], name='feature ID')))
obs = md1.merge(md2)
exp = Metadata(pd.DataFrame(
{'a': [1, 2], 'b': [3, 4]},
index=pd.Index(['id1', 'id2'], name='id')))
self.assertEqual(obs, exp)
def test_merging_preserves_column_types(self):
# Test that column types remain the same even if a categorical column
# *could* be reinterpreted as numeric after the merge.
md1 = Metadata(pd.DataFrame(
{'a': [1, 2, 3],
'b': [np.nan, np.nan, np.nan]},
index=pd.Index(['id1', 'id2', 'id3'], name='id')))
md2 = Metadata(pd.DataFrame(
{'c': ['1', 'foo', '3'],
'd': np.array([np.nan, np.nan, np.nan], dtype=object)},
index=pd.Index(['id1', 'id4', 'id3'], name='id')))
obs = md1.merge(md2)
exp = Metadata(pd.DataFrame(
{'a': [1, 3], 'b': [np.nan, np.nan], 'c': ['1', '3'],
'd': np.array([np.nan, np.nan], dtype=object)},
index=pd.Index(['id1', 'id3'], name='id')))
self.assertEqual(obs, exp)
self.assertEqual(obs.columns['a'].type, 'numeric')
self.assertEqual(obs.columns['b'].type, 'numeric')
self.assertEqual(obs.columns['c'].type, 'categorical')
self.assertEqual(obs.columns['d'].type, 'categorical')
def test_no_artifacts(self):
md1 = Metadata(pd.DataFrame(
{'a': [1, 2]}, index=pd.Index(['id1', 'id2'], name='id')))
md2 = Metadata(pd.DataFrame(
{'b': [3, 4]}, index=pd.Index(['id1', 'id2'], name='id')))
metadata = md1.merge(md2)
self.assertEqual(metadata.artifacts, ())
def test_with_artifacts(self):
artifact1 = Artifact.import_data('Mapping', {'a': '1', 'b': '2'})
artifact2 = Artifact.import_data('Mapping', {'d': '4'})
md_from_artifact1 = artifact1.view(Metadata)
md_from_artifact2 = artifact2.view(Metadata)
md_no_artifact = Metadata(pd.DataFrame(
{'c': ['3', '42']}, index=pd.Index(['0', '1'], name='id')))
# Merge three metadata objects -- the first has an artifact, the second
# does not, and the third has an artifact.
obs_md = md_from_artifact1.merge(md_no_artifact, md_from_artifact2)
exp_df = pd.DataFrame(
{'a': '1', 'b': '2', 'c': '3', 'd': '4'},
index=pd.Index(['0'], name='id'))
exp_md = Metadata(exp_df)
exp_md._add_artifacts((artifact1, artifact2))
self.assertEqual(obs_md, exp_md)
self.assertEqual(obs_md.artifacts, (artifact1, artifact2))
def test_disjoint_indices(self):
md1 = Metadata(pd.DataFrame(
{'a': [1, 2, 3], 'b': [4, 5, 6]},
index=pd.Index(['id1', 'id2', 'id3'], name='id')))
md2 = Metadata(pd.DataFrame(
{'c': [7, 8, 9], 'd': [10, 11, 12]},
index=pd.Index(['X', 'Y', 'Z'], name='id')))
with self.assertRaisesRegex(ValueError, 'no IDs shared'):
md1.merge(md2)
def test_duplicate_columns(self):
md1 = Metadata(pd.DataFrame(
{'a': [1, 2], 'b': [3, 4]},
index=pd.Index(['id1', 'id2'], name='id')))
md2 = Metadata(pd.DataFrame(
{'c': [5, 6], 'b': [7, 8]},
index=pd.Index(['id1', 'id2'], name='id')))
with self.assertRaisesRegex(ValueError, "columns overlap: 'b'"):
md1.merge(md2)
def test_duplicate_columns_self_merge(self):
md = Metadata(pd.DataFrame(
{'a': [1, 2], 'b': [3, 4]},
index=pd.Index(['id1', 'id2'], name='id')))
with self.assertRaisesRegex(ValueError, "columns overlap: 'a', 'b'"):
md.merge(md)
class TestFilterIDs(unittest.TestCase):
def setUp(self):
get_dummy_plugin()
def test_supports_iterable(self):
md = Metadata(pd.DataFrame(
{'col1': [1, 2, 3], 'col2': ['foo', 'bar', 'baz']},
index=pd.Index(['a', 'b', 'c'], name='id')))
obs = md.filter_ids(iter({'a', 'c'}))
exp = Metadata(pd.DataFrame(
{'col1': [1, 3], 'col2': ['foo', 'baz']},
index=pd.Index(['a', 'c'], name='id')))
self.assertEqual(obs, exp)
def test_keep_all(self):
md = Metadata(pd.DataFrame(
{'col1': [1, 2, 3], 'col2': ['foo', 'bar', 'baz']},
index=pd.Index(['a', 'b', 'c'], name='id')))
obs = md.filter_ids({'a', 'b', 'c'})
self.assertEqual(obs, md)
self.assertIsNot(obs, md)
def test_keep_multiple(self):
md = Metadata(pd.DataFrame(
{'col1': [1, 2, 3], 'col2': ['foo', 'bar', 'baz']},
index=pd.Index(['a', 'b', 'c'], name='id')))
obs = md.filter_ids({'a', 'c'})
exp = Metadata(pd.DataFrame(
{'col1': [1, 3], 'col2': ['foo', 'baz']},
index=pd.Index(['a', 'c'], name='id')))
self.assertEqual(obs, exp)
def test_keep_one(self):
md = Metadata(pd.DataFrame(
{'col1': [1, 2, 3], 'col2': ['foo', 'bar', 'baz']},
index=pd.Index(['a', 'b', 'c'], name='id')))
obs = md.filter_ids({'b'})
exp = Metadata(pd.DataFrame(
{'col1': [2], 'col2': ['bar']}, index=pd.Index(['b'], name='id')))
self.assertEqual(obs, exp)
def test_filtering_preserves_column_types(self):
# Test that column types remain the same even if a categorical column
# *could* be reinterpreted as numeric after the filter.
md = Metadata(pd.DataFrame(
{'a': [1, 2, 3],
'b': [np.nan, np.nan, np.nan],
'c': ['1', 'foo', '3'],
'd': np.array([np.nan, np.nan, np.nan], dtype=object)},
index=pd.Index(['id1', 'id2', 'id3'], name='id')))
obs = md.filter_ids({'id1', 'id3'})
exp = Metadata(pd.DataFrame(
{'a': [1, 3], 'b': [np.nan, np.nan], 'c': ['1', '3'],
'd': np.array([np.nan, np.nan], dtype=object)},
index= | pd.Index(['id1', 'id3'], name='id') | pandas.Index |
import pandas as pd
import pandas.testing as pdt
import pytest
import pytz
from werkzeug.exceptions import RequestEntityTooLarge
from sfa_api.conftest import (
VALID_FORECAST_JSON, VALID_CDF_FORECAST_JSON, demo_forecasts)
from sfa_api.utils import request_handling
from sfa_api.utils.errors import (
BadAPIRequest, StorageAuthError, NotFoundException)
@pytest.mark.parametrize('start,end', [
('invalid', 'invalid'),
('NaT', 'NaT')
])
def test_validate_start_end_fail(app, forecast_id, start, end):
url = f'/forecasts/single/{forecast_id}/values?start={start}&end={end}'
with pytest.raises(request_handling.BadAPIRequest):
with app.test_request_context(url):
request_handling.validate_start_end()
@pytest.mark.parametrize('start,end', [
('20190101T120000Z', '20190101T130000Z'),
('20190101T120000', '20190101T130000'),
('20190101T120000', '20190101T130000Z'),
('20190101T120000Z', '20190101T130000+00:00'),
('20190101T120000Z', '20190101T140000+01:00'),
])
def test_validate_start_end_success(app, forecast_id, start, end):
url = f'/forecasts/single/{forecast_id}/values?start={start}&end={end}'
with app.test_request_context(url):
request_handling.validate_start_end()
@pytest.mark.parametrize('query,exc', [
('?start=20200101T0000Z', {'end'}),
('?end=20200101T0000Z', {'start'}),
('?start=20200101T0000Z&end=20210102T0000Z', {'end'}),
('', {'start', 'end'}),
pytest.param('?start=20200101T0000Z&end=20200102T0000Z', {},
marks=pytest.mark.xfail(strict=True))
])
def test_validate_start_end_not_provided(app, forecast_id, query, exc):
url = f'/forecasts/single/{forecast_id}/values{query}'
with app.test_request_context(url):
with pytest.raises(BadAPIRequest) as err:
request_handling.validate_start_end()
if exc:
assert set(err.value.errors.keys()) == exc
@pytest.mark.parametrize('content_type,payload', [
('text/csv', ''),
('application/json', '{}'),
('application/json', '{"values": "nope"}'),
('text/plain', 'nope'),
])
def test_validate_parsable_fail(app, content_type, payload, forecast_id):
url = f'/forecasts/single/{forecast_id}/values/'
with pytest.raises(request_handling.BadAPIRequest):
with app.test_request_context(
url, content_type=content_type, data=payload, method='POST',
content_length=len(payload)):
request_handling.validate_parsable_values()
@pytest.mark.parametrize('content_type', [
('text/csv'),
('application/json'),
('application/json'),
])
def test_validate_parsable_fail_too_large(app, content_type, forecast_id):
url = f'/forecasts/single/{forecast_id}/values/'
with pytest.raises(RequestEntityTooLarge):
with app.test_request_context(
url, content_type=content_type, method='POST',
content_length=17*1024*1024):
request_handling.validate_parsable_values()
@pytest.mark.parametrize('content_type,payload', [
('text/csv', 'timestamp,value\n2019-01-01T12:00:00Z,5'),
('application/json', ('{"values":[{"timestamp": "2019-01-01T12:00:00Z",'
'"value": 5}]}')),
])
def test_validate_parsable_success(app, content_type, payload, forecast_id):
with app.test_request_context(f'/forecasts/single/{forecast_id}/values/',
content_type=content_type, data=payload,
method='POST'):
request_handling.validate_parsable_values()
def test_validate_observation_values():
df = pd.DataFrame({'value': [0.1, '.2'],
'quality_flag': [0.0, 1],
'timestamp': ['20190101T0000Z',
'2019-01-01T03:00:00+07:00']})
request_handling.validate_observation_values(df)
def test_validate_observation_values_bad_value():
df = pd.DataFrame({'value': [0.1, 's.2'],
'quality_flag': [0.0, 1],
'timestamp': ['20190101T0000Z',
'2019-01-01T03:00:00+07:00']})
with pytest.raises(BadAPIRequest) as e:
request_handling.validate_observation_values(df)
assert 'value' in e.value.errors
def test_validate_observation_values_no_value():
df = pd.DataFrame({'quality_flag': [0.0, 1],
'timestamp': ['20190101T0000Z',
'2019-01-01T03:00:00+07:00']})
with pytest.raises(BadAPIRequest) as e:
request_handling.validate_observation_values(df)
assert 'value' in e.value.errors
def test_validate_observation_values_bad_timestamp():
df = pd.DataFrame({'value': [0.1, '.2'],
'quality_flag': [0.0, 1],
'timestamp': ['20190101T008Z',
'2019-01-01T03:00:00+07:00']})
with pytest.raises(BadAPIRequest) as e:
request_handling.validate_observation_values(df)
assert 'timestamp' in e.value.errors
def test_validate_observation_values_no_timestamp():
df = pd.DataFrame({
'value': [0.1, '.2'], 'quality_flag': [0.0, 1]})
with pytest.raises(BadAPIRequest) as e:
request_handling.validate_observation_values(df)
assert 'timestamp' in e.value.errors
@pytest.mark.parametrize('quality', [
[1, .1],
[1, '0.9'],
[2, 0],
['ham', 0]
])
def test_validate_observation_values_bad_quality(quality):
df = pd.DataFrame({'value': [0.1, .2],
'quality_flag': quality,
'timestamp': ['20190101T008Z',
'2019-01-01T03:00:00+07:00']})
with pytest.raises(BadAPIRequest) as e:
request_handling.validate_observation_values(df)
assert 'quality_flag' in e.value.errors
def test_validate_observation_values_no_quality():
df = pd.DataFrame({'value': [0.1, '.2'],
'timestamp': ['20190101T008Z',
'2019-01-01T03:00:00+07:00']})
with pytest.raises(BadAPIRequest) as e:
request_handling.validate_observation_values(df)
assert 'quality_flag' in e.value.errors
expected_parsed_df = pd.DataFrame({
'a': [1, 2, 3, 4],
'b': [4, 5, 6, 7],
})
csv_string = "a,b\n1,4\n2,5\n3,6\n4,7\n"
json_string = '{"values":{"a":[1,2,3,4],"b":[4,5,6,7]}}'
def test_parse_csv_success():
test_df = request_handling.parse_csv(csv_string)
pdt.assert_frame_equal(test_df, expected_parsed_df)
@pytest.mark.parametrize('csv_input', [
'',
"a,b\n1,4\n2.56,2.45\n1,2,3\n"
])
def test_parse_csv_failure(csv_input):
with pytest.raises(request_handling.BadAPIRequest):
request_handling.parse_csv(csv_input)
def test_parse_json_success():
test_df = request_handling.parse_json(json_string)
pdt.assert_frame_equal(test_df, expected_parsed_df)
@pytest.mark.parametrize('json_input', [
'',
"{'a':[1,2,3]}"
])
def test_parse_json_failure(json_input):
with pytest.raises(request_handling.BadAPIRequest):
request_handling.parse_json(json_input)
null_df = pd.DataFrame({
'timestamp': [
'2018-10-29T12:00:00Z',
'2018-10-29T13:00:00Z',
'2018-10-29T14:00:00Z',
'2018-10-29T15:00:00Z',
],
'value': [32.93, 25.17, None, None],
'quality_flag': [0, 0, 1, 0]
})
def test_parse_csv_nan():
test_df = request_handling.parse_csv("""
# comment line
timestamp,value,quality_flag
2018-10-29T12:00:00Z,32.93,0
2018-10-29T13:00:00Z,25.17,0
2018-10-29T14:00:00Z,,1 # this value is NaN
2018-10-29T15:00:00Z,NaN,0
""")
pdt.assert_frame_equal(test_df, null_df)
def test_parse_json_nan():
test_df = request_handling.parse_json("""
{"values":[
{"timestamp": "2018-10-29T12:00:00Z", "value": 32.93, "quality_flag": 0},
{"timestamp": "2018-10-29T13:00:00Z", "value": 25.17, "quality_flag": 0},
{"timestamp": "2018-10-29T14:00:00Z", "value": null, "quality_flag": 1},
{"timestamp": "2018-10-29T15:00:00Z", "value": null, "quality_flag": 0}
]}
""")
pdt.assert_frame_equal(test_df, null_df)
@pytest.mark.parametrize('data,mimetype', [
(csv_string, 'text/csv'),
(csv_string, 'application/vnd.ms-excel'),
(json_string, 'application/json')
])
def test_parse_values_success(app, data, mimetype):
with app.test_request_context():
test_df = request_handling.parse_values(data, mimetype)
pdt.assert_frame_equal(test_df, expected_parsed_df)
@pytest.mark.parametrize('data,mimetype', [
(csv_string, 'application/fail'),
(json_string, 'image/bmp'),
])
def test_parse_values_failure(data, mimetype):
with pytest.raises(request_handling.BadAPIRequest):
request_handling.parse_values(data, mimetype)
@pytest.mark.parametrize('dt_string,expected', [
('20190101T1200Z', pd.Timestamp('20190101T1200Z')),
('20190101T1200', pd.Timestamp('20190101T1200Z')),
('20190101T1200+0700', pd.Timestamp('20190101T0500Z'))
])
def test_parse_to_timestamp(dt_string, expected):
parsed_dt = request_handling.parse_to_timestamp(dt_string)
assert parsed_dt == expected
@pytest.mark.parametrize('dt_string', [
'invalid datetime',
'21454543251345234',
'20190101T2500Z',
'NaT',
])
def test_parse_to_timestamp_error(dt_string):
with pytest.raises(ValueError):
request_handling.parse_to_timestamp(dt_string)
@pytest.mark.parametrize('index,interval_length,previous_time', [
(pd.date_range(start='2019-09-01T1200Z', end='2019-09-01T1300Z',
freq='10min'), 10, pd.Timestamp('2019-09-01T1150Z')),
(pd.DatetimeIndex(['2019-09-01T0000Z', '2019-09-01T0200Z',
'2019-09-01T0400Z']), 120, None),
(pd.DatetimeIndex(['2019-09-01T0006Z', '2019-09-01T0011Z',
'2019-09-01T0016Z']),
5,
pd.Timestamp('2019-09-01T0001Z')),
(pd.DatetimeIndex(['2019-09-01T0006Z', '2019-09-01T0013Z',
'2019-09-01T0020Z']),
7,
pd.Timestamp('2019-08-31T2352Z')),
# out of order
pytest.param(
pd.DatetimeIndex(['2019-09-01T0013Z', '2019-09-01T0006Z',
'2019-09-01T0020Z']),
7,
pd.Timestamp('2019-08-31T2352Z'), marks=pytest.mark.xfail),
(pd.date_range(start='2019-03-10 00:00', end='2019-03-10 05:00',
tz='America/Denver', freq='1h'),
60, None), # DST transition
(pd.date_range(start='2019-11-03 00:00', end='2019-11-03 05:00',
tz='America/Denver', freq='1h'),
60, None), # DST transition
(pd.DatetimeIndex(['2019-01-01T000132Z']), 33, None),
(pd.DatetimeIndex(['2019-01-01T000132Z']), 30,
pd.Timestamp('2018-12-01T000132Z')),
(pd.DatetimeIndex(['2019-01-01T000132Z']), 30,
pd.Timestamp('2019-01-02T000132Z'))
])
def test_validate_index_period(index, interval_length, previous_time):
request_handling.validate_index_period(index, interval_length,
previous_time)
def test_validate_index_empty():
with pytest.raises(request_handling.BadAPIRequest):
request_handling.validate_index_period(pd.DatetimeIndex([]), 10,
None)
@pytest.mark.parametrize('index,interval_length', [
(pd.DatetimeIndex(['2019-09-01T0000Z', '2019-09-01T0200Z',
'2019-09-01T0300Z']), 60),
(pd.DatetimeIndex(['2019-09-01T0000Z', '2019-09-01T0030Z',
'2019-09-01T0300Z']), 30),
(pd.date_range(start='2019-09-01T1200Z', end='2019-09-01T1300Z',
freq='20min'), 10),
])
def test_validate_index_period_missing(index, interval_length):
with pytest.raises(request_handling.BadAPIRequest) as e:
request_handling.validate_index_period(index, interval_length,
index[0])
errs = e.value.errors['timestamp']
assert len(errs) == 1
assert 'Missing' in errs[0]
@pytest.mark.parametrize('index,interval_length', [
(pd.DatetimeIndex(['2019-09-01T0000Z', '2019-09-01T0100Z',
'2019-09-01T0200Z']), 120),
(pd.DatetimeIndex(['2019-09-01T0000Z', '2019-09-01T0030Z',
'2019-09-01T0045Z']), 30),
(pd.date_range(start='2019-09-01T1200Z', end='2019-09-01T1300Z',
freq='5min'), 10),
])
def test_validate_index_period_extra(index, interval_length):
with pytest.raises(request_handling.BadAPIRequest) as e:
request_handling.validate_index_period(index, interval_length,
index[0])
errs = e.value.errors['timestamp']
assert len(errs) == 1
assert 'extra' in errs[0]
@pytest.mark.parametrize('index,interval_length', [
(pd.DatetimeIndex(['2019-09-01T0000Z', '2019-09-01T0100Z',
'2019-09-01T0201Z']), 120),
(pd.DatetimeIndex(['2019-09-01T0000Z', '2019-09-01T0030Z',
'2019-09-01T0130Z']), 30),
(pd.date_range(start='2019-09-01T1200Z', end='2019-09-01T1305Z',
freq='5min'), 10),
])
def test_validate_index_period_other(index, interval_length):
with pytest.raises(request_handling.BadAPIRequest) as e:
request_handling.validate_index_period(index, interval_length,
index[0])
errs = e.value.errors['timestamp']
assert len(errs) > 0
@pytest.mark.parametrize('index,interval_length,previous_time', [
(pd.date_range(start='2019-09-01T1200Z', end='2019-09-01T1300Z',
freq='10min'), 10, pd.Timestamp('2019-09-01T1155Z')),
(pd.DatetimeIndex(['2019-09-01T0006Z', '2019-09-01T0011Z',
'2019-09-01T0016Z']),
5,
pd.Timestamp('2019-09-01T0000Z')),
(pd.DatetimeIndex(['2019-09-01T0006Z', '2019-09-01T0013Z',
'2019-09-01T0020Z']),
7,
| pd.Timestamp('2019-09-01T0000Z') | pandas.Timestamp |
#
# Copyright 2019 Quantopian, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from datetime import time, timedelta
from itertools import chain
import pandas as pd
from pandas.tseries.holiday import (
Holiday,
nearest_workday,
next_monday,
previous_friday,
sunday_to_monday,
)
from pytz import timezone
from .common_holidays import (
european_labour_day,
new_years_day,
chinese_lunar_new_year_dates,
qingming_festival_dates,
dragon_boat_festival_dates,
mid_autumn_festival_dates,
)
from .trading_calendar import (
TradingCalendar,
HolidayCalendar,
TUESDAY,
THURSDAY,
SATURDAY,
SUNDAY,
)
def before_chinese_new_year_offset(holidays):
"""
For Holidays that come before Chinese New Year, we subtract a day
and then move any weekends to previous friday.
"""
return pd.to_datetime(
holidays.map(
lambda d: | previous_friday(d) | pandas.tseries.holiday.previous_friday |
from src.config import file_name_for
from src.config import results_path
from src.config import set_results_folder
from src.constants import GBC, KNN, FEAT_NAMES_IMPORTANCE, FEAT_VALUES_IMPORTANCE, STD_CV, LOO
import pandas as pd
import collections
def save_results(full_results, parameters):
full_results_folder_path = results_path + folder_for(parameters)
set_results_folder(full_results_folder_path)
if parameters['model']['method'] == GBC:
pass
# @TODO: REFACTOR
#save_feature_importance(full_results_description, full_results, FEAT_VALUES_IMPORTANCE)
#save_feature_importance(full_results_description, full_results, FEAT_NAMES_IMPORTANCE)
if parameters['intrpl']:
save_full_and_summary_for(full_results['std'], full_results_folder_path, 'StandardTestTrain_Full.csv', 'StandardTestTrain_Summary.csv', STD_CV)
if parameters['extrpl']:
save_full_and_summary_for(full_results['loo'], full_results_folder_path, 'LeaveOneOut_Full.csv', 'LeaveOneOut_Summary.csv', LOO)
full_results_description = file_name_for(parameters)[:-1] + '.csv'
f = open(full_results_folder_path + 'info.txt', "a")
f.write(full_results_description)
f.close()
def save_full_and_summary_for(dict_results, full_results_path, full_result_filename, summary_filename, validation):
df_results = | pd.DataFrame.from_dict(dict_results, orient='columns') | pandas.DataFrame.from_dict |
import numpy as np
import pytest
import pandas.util._test_decorators as td
import pandas as pd
from pandas import (
Index,
Interval,
IntervalIndex,
Timedelta,
Timestamp,
date_range,
timedelta_range,
)
import pandas._testing as tm
from pandas.core.arrays import IntervalArray
@pytest.fixture(
params=[
(Index([0, 2, 4]), Index([1, 3, 5])),
(Index([0.0, 1.0, 2.0]), | Index([1.0, 2.0, 3.0]) | pandas.Index |
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License.
import pandas as pd
import os
import argparse
from sklearn.externals import joblib
from azureml.core.model import Model
import datetime
# 0.0 Parse input arguments
parser = argparse.ArgumentParser("split")
parser.add_argument("--forecast_horizon", type=int, help="input number of predictions")
parser.add_argument("--starting_date", type=str, help="date to begin forecasting") #change this to tak the last date and start from there
parser.add_argument("--target_column", type=str, help="target colunm to predict on")
parser.add_argument("--timestamp_column", type=str, help="timestamp column from data")
parser.add_argument("--model_type", type=str, help="model type")
parser.add_argument("--date_freq", type=str, help="the step size for predictions, daily, weekly")
args, _ = parser.parse_known_args()
def run(input_data):
# 1.0 Set up results dataframe
results = pd.DataFrame()
# 2.0 Iterate through input data
for idx, csv_file_path in enumerate(input_data):
file_name = os.path.basename(csv_file_path)[:-4]
model_name = args.model_type + '_' + file_name
store_name = file_name.split('_')[0]
brand_name = file_name.split('_')[1]
# 3.0 Set up data to predict on
data = pd.read_csv(csv_file_path, header = 0)
data[args.timestamp_column] = data[args.timestamp_column].apply(lambda x: datetime.datetime.strptime(x, '%Y-%m-%d'))
date_list = pd.date_range(args.starting_date, periods=args.forecast_horizon, freq=args.date_freq)
prediction_df = | pd.DataFrame() | pandas.DataFrame |
import pandas
import pytest
import modin.pandas as pd
import numpy as np
from .utils import test_data_values, test_data_keys, df_equals
@pytest.mark.parametrize("data", test_data_values, ids=test_data_keys)
def test_isna(data):
pandas_df = | pandas.DataFrame(data) | pandas.DataFrame |
import pandas as pd
import os
import time
try:from ethnicolr import census_ln, pred_census_ln,pred_wiki_name,pred_fl_reg_name
except: os.system('pip install ethnicolr')
import seaborn as sns
import matplotlib.pylab as plt
import scipy
from itertools import permutations
import numpy as np
import matplotlib.gridspec as gridspec
from igraph import VertexClustering
from itertools import combinations
plt.rcParams['pdf.fonttype'] = 42
plt.rcParams['ps.fonttype'] = 42
plt.rcParams['font.sans-serif'] = "Palatino"
plt.rcParams['font.serif'] = "Palatino"
plt.rcParams['mathtext.fontset'] = 'custom'
plt.rcParams['mathtext.it'] = 'Palatino:italic'
plt.rcParams['mathtext.bf'] = 'Palatino:bold'
plt.rcParams['mathtext.cal'] = 'Palatino'
from matplotlib.ticker import FormatStrFormatter
from matplotlib import ticker
from sklearn.ensemble import RandomForestClassifier,RandomForestRegressor
from sklearn.neural_network import MLPClassifier,MLPRegressor
from sklearn.linear_model import RidgeClassifierCV
from sklearn.multioutput import MultiOutputRegressor
from sklearn.linear_model import RidgeCV
from sklearn.decomposition import PCA
from statsmodels.stats.multitest import multipletests
import multiprocessing
from multiprocessing import Pool
import tqdm
import igraph
from scipy.stats import pearsonr
global paper_df
global main_df
global g
global graphs
global pal
global homedir
global method
global node_2_a
global a_2_node
global a_2_paper
global control
global matrix_idxs
global prs
# matrix_idxs = {'white_M':0,'white_W':1,'white_U':2,'api_M':3,'api_W':4,'api_U':5,'hispanic_M':6,'hispanic_W':7,'hispanic_U':8,'black_M':9,'black_W':10,'black_U':11}
pal = np.array([[72,61,139],[82,139,139],[180,205,205],[205,129,98]])/255.
# global us_only
# us_only = True
"""
AF = author names, with the format LastName, FirstName; LastName, FirstName; etc..
SO = journal
DT = document type (review or article)
CR = reference list
TC = total citations received (at time of downloading about a year ago)
PD = month of publication
PY = year of publication
DI = DOI
"""
import argparse
def str2bool(v):
if isinstance(v, bool):
return v
if v.lower() in ('yes', 'true', 't', 'y', '1'):
return True
elif v.lower() in ('no', 'false', 'f', 'n', '0'):
return False
else:
return v
parser = argparse.ArgumentParser()
parser.add_argument('-homedir',action='store',dest='homedir',default='/Users/maxwell/Dropbox/Bertolero_Bassett_Projects/citations/')
parser.add_argument('-method',action='store',dest='method',default='wiki')
parser.add_argument('-continent',type=str2bool,action='store',dest='continent',default=False)
parser.add_argument('-continent_only',type=str2bool,action='store',dest='continent_only',default=False)
parser.add_argument('-control',type=str2bool,action='store',dest='control',default=False)
parser.add_argument('-within_poc',type=str2bool,action='store',dest='within_poc',default=False)
parser.add_argument('-walk_length',type=str,action='store',dest='walk_length',default='cited')
parser.add_argument('-walk_papers',type=str2bool,action='store',dest='walk_papers',default=False)
r = parser.parse_args()
locals().update(r.__dict__)
globals().update(r.__dict__)
wiki_2_race = {"Asian,GreaterEastAsian,EastAsian":'api', "Asian,GreaterEastAsian,Japanese":'api',
"Asian,IndianSubContinent":'api', "GreaterAfrican,Africans":'black', "GreaterAfrican,Muslim":'black',
"GreaterEuropean,British":'white', "GreaterEuropean,EastEuropean":'white',
"GreaterEuropean,Jewish":'white', "GreaterEuropean,WestEuropean,French":'white',
"GreaterEuropean,WestEuropean,Germanic":'white', "GreaterEuropean,WestEuropean,Hispanic":'hispanic',
"GreaterEuropean,WestEuropean,Italian":'white', "GreaterEuropean,WestEuropean,Nordic":'white'}
matrix_idxs = {'white_M':0,'api_M':1,'hispanic_M':2,'black_M':3,'white_W':4,'api_W':5,'hispanic_W':6,'black_W':7}
def log_p_value(p):
if p == 0.0:
p = "-log10($\it{p}$)>250"
elif p > 0.001:
p = np.around(p,3)
p = "$\it{p}$=%s"%(p)
else:
p = (-1) * np.log10(p)
p = "-log10($\it{p}$)=%s"%(np.around(p,0).astype(int))
return p
def convert_r_p(r,p):
return "$\it{r}$=%s\n%s"%(np.around(r,2),log_p_value(p))
def nan_pearsonr(x,y):
xmask = np.isnan(x)
ymask = np.isnan(y)
mask = (xmask==False) & (ymask==False)
return pearsonr(x[mask],y[mask])
def mean_confidence_interval(data, confidence=0.95):
a = 1.0 * np.array(data)
n = len(a)
m, se = np.mean(a), scipy.stats.sem(a)
h = se * scipy.stats.t.ppf((1 + confidence) / 2., n-1)
return m, m-h, m+h
def make_df(method=method):
"""
this makes the actual data by pulling the race from the census or wiki data
"""
# if os.path.exists('/%s/data/result_df_%s.csv'%(homedir,method)):
# df = pd.read_csv('/%s/data/result_df_%s.csv'%(homedir,method))
# return df
main_df = pd.read_csv('/%s/article_data/NewArticleData2019_filtered.csv'%(homedir),header=0)
result_df = pd.DataFrame(columns=['fa_race','la_race','citation_count'])
store_fa_race = []
store_la_race = []
store_citations = []
store_year = []
store_journal = []
store_fa_g = []
store_la_g = []
store_fa_category = []
store_la_category = []
for entry in tqdm.tqdm(main_df.iterrows(),total=len(main_df)):
store_year.append(entry[1]['PY'])
store_journal.append(entry[1]['SO'])
fa = entry[1].AF.split(';')[0]
la = entry[1].AF.split(';')[-1]
fa_lname,fa_fname = fa.split(', ')
la_lname,la_fname = la.split(', ')
try:store_citations.append(len(entry[1].cited.split(',')))
except:store_citations.append(0)
##wiki
if method =='wiki':
names = [{'lname': fa_lname,'fname':fa_fname}]
fa_df = pd.DataFrame(names,columns=['lname','fname'])
fa_race = wiki_2_race[pred_wiki_name(fa_df,'lname','fname').race.values[0]]
names = [{'lname': la_lname,'fname':la_fname}]
la_df = pd.DataFrame(names,columns=['lname','fname'])
la_race = wiki_2_race[pred_wiki_name(la_df,'lname','fname').race.values[0]]
if method =='florida':
names = [{'lname': fa_lname,'fname':fa_fname}]
fa_df = pd.DataFrame(names,columns=['lname','fname'])
fa_race = pred_fl_reg_name(fa_df,'lname','fname').race.values[0].split('_')[-1]
names = [{'lname': la_lname,'fname':la_fname}]
la_df = pd.DataFrame(names,columns=['lname','fname'])
la_race = pred_fl_reg_name(la_df,'lname','fname').race.values[0].split('_')[-1]
#census
if method =='census':
names = [{'name': fa_lname},{'name':la_lname}]
la_df = pd.DataFrame(names)
r = pred_census_ln(la_df,'name')
fa_race,la_race= r.race.values
if method =='combined':
##wiki
names = [{'lname': fa_lname,'fname':fa_fname}]
fa_df = pd.DataFrame(names,columns=['fname','lname'])
fa_race_wiki = wiki_2_race[pred_wiki_name(fa_df,'fname','lname').race.values[0]]
names = [{'lname': la_lname,'fname':la_fname}]
la_df = pd.DataFrame(names,columns=['fname','lname'])
la_race_wiki = wiki_2_race[pred_wiki_name(la_df,'fname','lname').race.values[0]]
names = [{'name': fa_lname},{'name':la_lname}]
la_df = pd.DataFrame(names)
r = pred_census_ln(la_df,'name')
fa_race_census,la_race_census= r.race.values
if la_race_census != la_race_wiki:
if la_race_wiki == 'white':
la_race = la_race_census
if la_race_census == 'white':
la_race = la_race_wiki
elif (la_race_census != 'white') & (la_race_wiki != 'white'): la_race = la_race_wiki
elif la_race_census == la_race_wiki: la_race = la_race_wiki
if fa_race_census != fa_race_wiki:
if fa_race_wiki == 'white':
fa_race = fa_race_census
if fa_race_census == 'white':
fa_race = fa_race_wiki
elif (fa_race_census != 'white') & (fa_race_wiki != 'white'): fa_race = fa_race_wiki
elif fa_race_census == fa_race_wiki: fa_race = fa_race_wiki
store_la_race.append(la_race)
store_fa_race.append(fa_race)
store_fa_g.append(entry[1].AG[0])
store_la_g.append(entry[1].AG[1])
store_fa_category.append('%s_%s' %(fa_race,entry[1].AG[0]))
store_la_category.append('%s_%s' %(la_race,entry[1].AG[1]))
result_df['fa_race'] = store_fa_race
result_df['la_race'] = store_la_race
result_df['fa_g'] = store_fa_g
result_df['la_g'] = store_la_g
result_df['journal'] = store_journal
result_df['year'] = store_year
result_df['citation_count'] = store_citations
result_df['fa_category'] = store_fa_category
result_df['la_category'] = store_la_category
# result_df.citation_count = result_df.citation_count.values.astype(int)
result_df.to_csv('/%s/data/result_df_%s.csv'%(homedir,method),index=False)
return result_df
def make_pr_df(method=method):
"""
this makes the actual data by pulling the race from the census or wiki data
"""
main_df = pd.read_csv('/%s/article_data/NewArticleData2019.csv'%(homedir),header=0)
prs = np.zeros((main_df.shape[0],8,8))
gender_base = {}
for year in np.unique(main_df.PY.values):
ydf = main_df[main_df.PY==year].AG
fa = np.array([x[0] for x in ydf.values])
la = np.array([x[1] for x in ydf.values])
fa_m = len(fa[fa=='M'])/ len(fa[fa!='U'])
fa_w = len(fa[fa=='W'])/ len(fa[fa!='U'])
la_m = len(la[fa=='M'])/ len(la[la!='U'])
la_w = len(la[fa=='W'])/ len(la[la!='U'])
gender_base[year] = [fa_m,fa_w,la_m,la_w]
asian = [0,1,2]
black = [3,4]
white = [5,6,7,8,9,11,12]
hispanic = [10]
if method =='wiki_black':
black = [3]
for entry in tqdm.tqdm(main_df.iterrows(),total=len(main_df)):
fa = entry[1].AF.split(';')[0]
la = entry[1].AF.split(';')[-1]
fa_lname,fa_fname = fa.split(', ')
la_lname,la_fname = la.split(', ')
fa_g = entry[1].AG[0]
la_g = entry[1].AG[1]
paper_matrix = np.zeros((2,8))
# 1/0
##wiki
if method =='wiki' or method == 'wiki_black':
names = [{'lname': fa_lname,'fname':fa_fname}]
fa_df = pd.DataFrame(names,columns=['lname','fname'])
fa_race = pred_wiki_name(fa_df,'lname','fname').values[0][3:]
fa_race = [np.sum(fa_race[white]),np.sum(fa_race[asian]),np.sum(fa_race[hispanic]),np.sum(fa_race[black])]
names = [{'lname': la_lname,'fname':la_fname}]
la_df = pd.DataFrame(names,columns=['lname','fname'])
la_race = pred_wiki_name(la_df,'lname','fname').values[0][3:]
la_race = [np.sum(la_race[white]),np.sum(la_race[asian]),np.sum(la_race[hispanic]),np.sum(la_race[black])]
# #census
if method =='census':
names = [{'name': fa_lname},{'name':la_lname}]
la_df = pd.DataFrame(names)
r = pred_census_ln(la_df,'name')
fa_race = [r.iloc[0]['white'],r.iloc[0]['api'],r.iloc[0]['hispanic'],r.iloc[0]['black']]
la_race = [r.iloc[1]['white'],r.iloc[1]['api'],r.iloc[1]['hispanic'],r.iloc[1]['black']]
if method =='florida':
names = [{'lname': fa_lname,'fname':fa_fname}]
fa_df = pd.DataFrame(names,columns=['lname','fname'])
asian, hispanic, black, white = pred_fl_reg_name(fa_df,'lname','fname').values[0][3:]
fa_race = [white,asian,hispanic,black]
names = [{'lname': la_lname,'fname':la_fname}]
la_df = pd.DataFrame(names,columns=['lname','fname'])
asian, hispanic, black, white = pred_fl_reg_name(la_df,'lname','fname').values[0][3:]
la_race = [white,asian,hispanic,black]
if method == 'combined':
names = [{'lname': fa_lname,'fname':fa_fname}]
fa_df = pd.DataFrame(names,columns=['fname','lname'])
fa_race_wiki = pred_wiki_name(fa_df,'lname','fname').values[0][3:]
fa_race_wiki = [np.sum(fa_race_wiki[white]),np.sum(fa_race_wiki[asian]),np.sum(fa_race_wiki[hispanic]),np.sum(fa_race_wiki[black])]
names = [{'lname': la_lname,'fname':la_fname}]
la_df = pd.DataFrame(names,columns=['fname','lname'])
la_race_wiki = pred_wiki_name(la_df,'lname','fname').values[0][3:]
la_race_wiki = [np.sum(la_race_wiki[white]),np.sum(la_race_wiki[asian]),np.sum(la_race_wiki[hispanic]),np.sum(la_race_wiki[black])]
names = [{'name': fa_lname},{'name':la_lname}]
la_df = pd.DataFrame(names)
r = pred_census_ln(la_df,'name')
fa_race_census = [r.iloc[0]['white'],r.iloc[0]['api'],r.iloc[0]['hispanic'],r.iloc[0]['black']]
la_race_census = [r.iloc[1]['white'],r.iloc[1]['api'],r.iloc[1]['hispanic'],r.iloc[1]['black']]
if fa_race_census[0] < fa_race_wiki[0]: fa_race = fa_race_census
else: fa_race = fa_race_wiki
if la_race_census[0] < la_race_wiki[0]: la_race = la_race_census
else: la_race = la_race_wiki
gender_b = gender_base[year]
if fa_g == 'M': paper_matrix[0] = np.outer([1,0],fa_race).flatten()
if fa_g == 'W': paper_matrix[0] = np.outer([0,1],fa_race).flatten()
if fa_g == 'U': paper_matrix[0] = np.outer([gender_b[0],gender_b[1]],fa_race).flatten()
if la_g == 'M': paper_matrix[1] = np.outer([1,0],la_race).flatten()
if la_g == 'W': paper_matrix[1] = np.outer([0,1],la_race).flatten()
if la_g == 'U': paper_matrix[1] = np.outer([gender_b[2],gender_b[3]],la_race).flatten()
paper_matrix = np.outer(paper_matrix[0],paper_matrix[1])
paper_matrix = paper_matrix / np.sum(paper_matrix)
prs[entry[0]] = paper_matrix
np.save('/%s/data/result_pr_df_%s.npy'%(homedir,method),prs)
def make_all_author_race():
"""
this makes the actual data by pulling the race from the census or wiki data,
but this version include middle authors, which we use for the co-authorship networks
"""
main_df = pd.read_csv('/%s/article_data/NewArticleData2019.csv'%(homedir),header=0)
names = []
lnames = []
fnames = []
for entry in main_df.iterrows():
for a in entry[1].AF.split('; '):
a_lname,a_fname = a.split(', ')
lnames.append(a_lname.strip())
fnames.append(a_fname.strip())
names.append(a)
df = pd.DataFrame(np.array([names,fnames,lnames]).swapaxes(0,1),columns=['name','fname','lname'])
df = df.drop_duplicates('name')
if method =='florida':
# 1/0
r = pred_fl_reg_name(df,'lname','fname')
r.rename(columns={'nh_black':'black','nh_white':'white'})
r.to_csv('/%s/data/result_df_%s_all.csv'%(homedir,method),index=False)
if method =='census':
r = pred_census_ln(df,'lname')
r.to_csv('/%s/data/result_df_%s_all.csv'%(homedir,method),index=False)
all_races = []
r = dict(zip(df.name.values,df.race.values))
for idx,paper in tqdm.tqdm(main_df.iterrows(),total=main_df.shape[0]):
races = []
for a in paper.AF.split('; '):
a_lname,a_fname = a.split(', ')
races.append(r[a_lname.strip()])
all_races.append('_'.join(str(x) for x in races))
main_df['all_races'] = all_races
main_df.to_csv('/%s/data/all_data_%s.csv'%(homedir,method),index=False)
race2wiki = {'api': ["Asian,GreaterEastAsian,EastAsian","Asian,GreaterEastAsian,Japanese", "Asian,IndianSubContinent"],
'black':["GreaterAfrican,Africans", "GreaterAfrican,Muslim"],
'white':["GreaterEuropean,British", "GreaterEuropean,EastEuropean", "GreaterEuropean,Jewish", "GreaterEuropean,WestEuropean,French",
"GreaterEuropean,WestEuropean,Germanic", "GreaterEuropean,WestEuropean,Nordic", "GreaterEuropean,WestEuropean,Italian"],
'hispanic':["GreaterEuropean,WestEuropean,Hispanic"]}
if method =='wiki':
r = pred_wiki_name(df,'lname','fname')
for race in ['api','black','hispanic','white']:
r[race] = 0.0
for e in race2wiki[race]:
r[race] = r[race] + r[e]
for race in ['api','black','hispanic','white']:
for e in race2wiki[race]:
r = r.drop(columns=[e])
r.to_csv('/%s/data/result_df_%s_all.csv'%(homedir,method),index=False)
all_races = []
for idx,paper in tqdm.tqdm(main_df.iterrows(),total=main_df.shape[0]):
races = []
for a in paper.AF.split('; '):
races.append(r[r.name==a].race.values[0])
all_races.append('_'.join(str(x) for x in races))
main_df['all_races'] = all_races
main_df.to_csv('/%s/data/all_data_%s.csv'%(homedir,method),index=False)
if method =='combined':
r_wiki = pred_wiki_name(df,'lname','fname')
for race in ['api','black','hispanic','white']:
r_wiki[race] = 0.0
for e in race2wiki[race]:
r_wiki[race] = r_wiki[race] + r_wiki[e]
for race in ['api','black','hispanic','white']:
for e in race2wiki[race]:
r_wiki = r_wiki.drop(columns=[e])
r_census = pred_census_ln(df,'lname')
census = r_census.white < r_wiki.white
wiki = r_census.white > r_wiki.white
r = r_census.copy()
r[census] = r_census
r[wiki] = r_wiki
r.to_csv('/%s/data/result_df_%s_all.csv'%(homedir,method),index=False)
def figure_1_pr_authors():
df = pd.read_csv('/%s/data/result_df_%s_all.csv'%(homedir,method))
paper_df = pd.read_csv('/%s/article_data/NewArticleData2019.csv'%(homedir),header=0)
results = []
for year in np.unique(paper_df.PY.values):
print (year)
ydf = paper_df[paper_df.PY==year]
names = []
for p in ydf.iterrows():
for n in p[1].AF.split(';'):
names.append(n.strip())
names = np.unique(names)
result = np.zeros((len(names),4))
for idx,name in enumerate(names):
try:result[idx] = df[df.name==name].values[0][-4:]
except:result[idx] = np.nan
results.append(np.nansum(result,axis=0))
results = np.array(results)
plt.close()
sns.set(style='white',font='Palatino')
# pal = sns.color_palette("Set2")
# pal = sns.color_palette("vlag",4)
fig = plt.figure(figsize=(7.5,4),constrained_layout=False)
gs = gridspec.GridSpec(15, 14, figure=fig,wspace=.75,hspace=0,left=.1,right=.9,top=.9,bottom=.1)
ax1 = fig.add_subplot(gs[:15,:7])
ax1_plot = plt.stackplot(np.unique(paper_df.PY),np.flip(results.transpose()[[3,0,2,1]],axis=0), labels=['Black','Hispanic','Asian','White'],colors=np.flip(pal,axis=0), alpha=1)
handles, labels = plt.gca().get_legend_handles_labels()
labels.reverse()
handles.reverse()
leg = plt.legend(loc=2,frameon=False,labels=labels,handles=handles,fontsize=8)
for text in leg.get_texts():
plt.setp(text, color = 'black')
plt.margins(0,0)
plt.ylabel('sum of predicted author race')
plt.xlabel('publication year')
ax1.tick_params(axis='y', which='major', pad=0)
plt.title('a',{'fontweight':'bold'},'left',pad=2)
# 1/0
ax2 = fig.add_subplot(gs[:15,8:])
ax2_plot = plt.stackplot(np.unique(paper_df.PY),np.flip(np.divide(results.transpose()[[3,0,2,1]],np.sum(results,axis=1)),axis=0)*100, labels=['Black','Hispanic','Asian','White'],colors=np.flip(pal,axis=0),alpha=1)
handles, labels = plt.gca().get_legend_handles_labels()
labels.reverse()
handles.reverse()
leg = plt.legend(loc=2,frameon=False,labels=labels,handles=handles,fontsize=8)
for text in leg.get_texts():
plt.setp(text, color = 'white')
plt.margins(0,0)
plt.ylabel('percentage of predicted author race',labelpad=-5)
plt.xlabel('publication year')
ax2.yaxis.set_major_formatter(ticker.PercentFormatter())
ax2.tick_params(axis='y', which='major', pad=0)
plt.title('b',{'fontweight':'bold'},'left',pad=2)
plt.savefig('authors.pdf')
def figure_1_pr():
n_iters = 1000
df =pd.read_csv('/%s/article_data/NewArticleData2019.csv'%(homedir),header=0).rename({'PY':'year','SO':'journal'},axis='columns')
matrix = np.load('/%s/data/result_pr_df_%s.npy'%(homedir,method))
results = np.zeros((len(np.unique(df.year)),4))
if within_poc == False:
labels = ['white author & white author','white author & author of color','author of color & white author','author of color &\nauthor of color']
groups = [np.vectorize(matrix_idxs.get)(['white_M','white_W',]),
np.vectorize(matrix_idxs.get)(['api_M','api_W','hispanic_M','hispanic_W','black_M','black_W',])]
names = ['white-white','white-poc','poc-white','poc-poc']
plot_matrix = np.zeros((matrix.shape[0],len(groups),len(groups)))
plot_base_matrix = np.zeros((matrix.shape[0],len(groups),len(groups)))
for i in range(len(groups)):
for j in range(len(groups)):
plot_matrix[:,i,j] = np.nansum(matrix[:,groups[i]][:,:,groups[j]].reshape(matrix.shape[0],-1),axis=1)
for yidx,year in enumerate(np.unique(df.year)):
papers = df[df.year==year].index
r = np.mean(plot_matrix[papers],axis=0).flatten()
results[yidx,0] = r[0]
results[yidx,1] = r[1]
results[yidx,2] = r[2]
results[yidx,3] = r[3]
if within_poc == True:
names = ['white author','Asian author','Hispanic author','Black author']
groups = [[0,4],[1,5],[2,6],[3,7]]
labels = names
plot_matrix = np.zeros((matrix.shape[0],len(groups)))
for i in range(4):
plot_matrix[:,i] = plot_matrix[:,i] + np.nansum(np.nanmean(matrix[:,groups[i],:],axis=-1),axis=-1)
plot_matrix[:,i] = plot_matrix[:,i] + np.nansum(np.nanmean(matrix[:,:,groups[i]],axis=-1),axis=-1)
for yidx,year in enumerate(np.unique(df.year)):
papers = df[df.year==year].index
r = np.mean(plot_matrix[papers],axis=0).flatten()
results[yidx,0] = r[0]
results[yidx,1] = r[1]
results[yidx,2] = r[2]
results[yidx,3] = r[3]
plt.close()
sns.set(style='white',font='Palatino')
# pal = sns.color_palette("Set2")
# pal = sns.color_palette("vlag",4)
fig = plt.figure(figsize=(7.5,4),constrained_layout=False)
gs = gridspec.GridSpec(15, 16, figure=fig,wspace=.75,hspace=0,left=.1,right=.9,top=.9,bottom=.1)
ax1 = fig.add_subplot(gs[:15,:5])
plt.sca(ax1)
ax1_plot = plt.stackplot(np.unique(df.year),np.flip(results.transpose(),axis=0)*100, labels=np.flip(labels),colors=np.flip(pal,axis=0), alpha=1)
handles, labels = plt.gca().get_legend_handles_labels()
labels.reverse()
handles.reverse()
leg = plt.legend(loc=9,frameon=False,labels=labels,handles=handles,fontsize=8)
for text in leg.get_texts():
plt.setp(text, color = 'w')
plt.margins(0,0)
plt.ylabel('percentage of publications')
plt.xlabel('publication year')
ax1.tick_params(axis='x', which='major', pad=-1)
ax1.tick_params(axis='y', which='major', pad=0)
i,j,k,l = np.flip(results[0]*100)
i,j,k,l = [i,(i+j),(i+j+k),(i+j+k+l)]
i,j,k,l = [np.mean([0,i]),np.mean([i,j]),np.mean([j,k]),np.mean([k,l])]
# i,j,k,l = np.array([100]) - np.array([i,j,k,l])
plt.sca(ax1)
ax1.yaxis.set_major_formatter(ticker.PercentFormatter())
ax1.set_yticks([i,j,k,l])
ax1.set_yticklabels(np.flip(np.around(results[0]*100,0).astype(int)))
ax2 = ax1_plot[0].axes.twinx()
plt.sca(ax2)
i,j,k,l = np.flip(results[-1]*100)
i,j,k,l = [i,(i+j),(i+j+k),(i+j+k+l)]
i,j,k,l = [np.mean([0,i]),np.mean([i,j]),np.mean([j,k]),np.mean([k,l])]
plt.ylim(0,100)
ax2.yaxis.set_major_formatter(ticker.PercentFormatter())
ax2.set_yticks([i,j,k,l])
ax2.set_yticklabels(np.flip(np.around(results[-1]*100,0)).astype(int))
plt.xticks([1995., 2000., 2005., 2010., 2015., 2019],np.array([1995., 2000., 2005., 2010., 2015., 2019]).astype(int))
ax2.tick_params(axis='y', which='major', pad=0)
plt.title('a',{'fontweight':'bold'},'left',pad=2)
plot_df = pd.DataFrame(columns=['year','percentage','iteration'])
for yidx,year in enumerate(np.unique(df.year)):
for i in range(n_iters):
data = df[(df.year==year)]
papers = data.sample(int(len(data)),replace=True).index
r = np.mean(plot_matrix[papers],axis=0).flatten()
total = r.sum()
r = np.array(r[1:])/total
r = r.sum()
tdf = pd.DataFrame(np.array([r,year,i]).reshape(1,-1),columns=['percentage','year','iteration'])
plot_df = plot_df.append(tdf,ignore_index=True)
plot_df.percentage = plot_df.percentage.astype(float)
plot_df.iteration = plot_df.iteration.astype(int)
plot_df.percentage = plot_df.percentage.astype(float) * 100
pct_df = pd.DataFrame(columns=['year','percentage','iteration'])
plot_df = plot_df.sort_values('year')
for i in range(n_iters):
a = plot_df[(plot_df.iteration==i)].percentage.values
# change = np.diff(a) / a[:-1] * 100.
change = np.diff(a)
tdf = pd.DataFrame(columns=['year','percentage','iteration'])
tdf.year = range(1997,2020)
tdf.percentage = change[1:]
tdf.iteration = i
pct_df = pct_df.append(tdf,ignore_index=True)
pct_df = pct_df.dropna()
pct_df = pct_df[np.isinf(pct_df.percentage)==False]
ci = mean_confidence_interval(pct_df.percentage)
ci = np.around(ci,2)
print ("Across 1000 bootstraps, the mean percent increase per year was %s%% (95 CI:%s%%,%s%%)"%(ci[0],ci[1],ci[2]))
plt.text(.5,.48,"Increasing at %s%% per year\n(95%% CI:%s%%,%s%%)"%(ci[0],ci[1],ci[2]),{'fontsize':8,'color':'white'},horizontalalignment='center',verticalalignment='bottom',rotation=9,transform=ax2.transAxes)
axes = []
jidx = 3
for makea in range(5):
axes.append(fig.add_subplot(gs[jidx-3:jidx,6:10]))
jidx=jidx+3
for aidx,journal in enumerate(np.unique(df.journal)):
ax = axes[aidx]
plt.sca(ax)
if aidx == 2: ax.set_ylabel('percentage of publications')
if aidx == 4: ax.set_xlabel('publication\nyear',labelpad=-10)
results = np.zeros(( len(np.unique(df[(df.journal==journal)].year)),4))
for yidx,year in enumerate(np.unique(df[(df.journal==journal)].year)):
papers = df[(df.year==year)&(df.journal==journal)].index
r = np.mean(plot_matrix[papers],axis=0).flatten()
results[yidx,0] = r[0]
results[yidx,1] = r[1]
results[yidx,2] = r[2]
results[yidx,3] = r[3]
data = df[df.journal==journal]
if journal == 'NATURE NEUROSCIENCE':
for i in range(3): results = np.concatenate([[[0,0,0,0]],results],axis=0)
ax1_plot = plt.stackplot(np.unique(df.year),np.flip(results.transpose(),axis=0)*100, labels=np.flip(labels,axis=0),colors=np.flip(pal,axis=0), alpha=1)
plt.margins(0,0)
ax.set_yticks([])
if aidx != 4:
ax.set_xticks([])
else: plt.xticks(np.array([1996.5,2017.5]),np.array([1995.,2019]).astype(int))
plt.title(journal.title(), pad=-10,color='w',fontsize=8)
if aidx == 0: plt.text(0,1,'b',{'fontweight':'bold'},horizontalalignment='left',verticalalignment='bottom',transform=ax.transAxes)
journals = np.unique(df.journal)
plot_df = pd.DataFrame(columns=['journal','year','percentage','iteration'])
for j in journals:
for yidx,year in enumerate(np.unique(df.year)):
for i in range(n_iters):
data = df[(df.year==year)&(df.journal==j)]
papers = data.sample(int(len(data)),replace=True).index
r = np.mean(plot_matrix[papers],axis=0).flatten()
total = r.sum()
r = np.array(r[1:])/total
r = r.sum()
tdf = pd.DataFrame(np.array([j,r,year,i]).reshape(1,-1),columns=['journal','percentage','year','iteration'])
plot_df = plot_df.append(tdf,ignore_index=True)
plot_df.percentage = plot_df.percentage.astype(float)
plot_df.iteration = plot_df.iteration.astype(int)
plot_df.percentage = plot_df.percentage.astype(float) * 100
pct_df = pd.DataFrame(columns=['journal','year','percentage','iteration'])
plot_df = plot_df.sort_values('year')
for i in range(n_iters):
for j in journals:
a = plot_df[(plot_df.iteration==i)&(plot_df.journal==j)].percentage.values
# change = np.diff(a) / a[:-1] * 100.
change = np.diff(a)
tdf = pd.DataFrame(columns=['journal','year','percentage','iteration'])
tdf.year = range(1997,2020)
tdf.percentage = change[1:]
tdf.journal = j
tdf.iteration = i
pct_df = pct_df.append(tdf,ignore_index=True)
pct_df = pct_df.dropna()
pct_df = pct_df[np.isinf(pct_df.percentage)==False]
ci = pct_df.groupby(['journal']).percentage.agg(mean_confidence_interval).values
axes = []
jidx = 3
for makea in range(5):
axes.append(fig.add_subplot(gs[jidx-3:jidx,11:]))
jidx=jidx+3
for i,ax,journal,color in zip(range(5),axes,journals,sns.color_palette("rocket_r", 5)):
plt.sca(ax)
ax.clear()
#
# plot_df[np.isnan(plot_df.percentage)] = 0.0
if i == 0: plt.text(0,1,'c',{'fontweight':'bold'},horizontalalignment='left',verticalalignment='bottom',transform=ax.transAxes)
lp = sns.lineplot(data=plot_df[plot_df.journal==journal],y='percentage',x='year',color=color,ci='sd')
plt.margins(0,0)
thisdf = plot_df[plot_df.journal==journal]
minp = int(np.around(thisdf.mean()['percentage'],0))
thisdf = thisdf[thisdf.year==thisdf.year.max()]
maxp = int(np.around(thisdf.mean()['percentage'],0))
plt.text(-0.01,.5,'%s'%(minp),horizontalalignment='right',verticalalignment='top', transform=ax.transAxes,fontsize=10)
plt.text(1.01,.9,'%s'%(maxp),horizontalalignment='left',verticalalignment='top', transform=ax.transAxes,fontsize=10)
ax.set_yticks([])
# ax.set_xticks([])
ax.set_ylabel('')
plt.margins(0,0)
ax.set_yticks([])
if i == 2:
ax.set_ylabel('percentage of publications',labelpad=12)
if i != 4: ax.set_xticks([])
else: plt.xticks(np.array([1.5,22.5]),np.array([1995.,2019]).astype(int))
mean_pc,min_pc,max_pc = np.around(ci[i],2)
if i == 4: ax.set_xlabel('publication\nyear',labelpad=-10)
else: ax.set_xlabel('')
plt.text(.99,0,'95%' + "CI: %s<%s<%s"%(min_pc,mean_pc,max_pc),horizontalalignment='right',verticalalignment='bottom', transform=ax.transAxes,fontsize=8)
if journal == 'NATURE NEUROSCIENCE':
plt.xlim(-3,21)
plt.savefig('/%s/figures/figure1_pr_%s_%s.pdf'%(homedir,method,within_poc))
def validate():
black_names = pd.read_csv('%s/data/Black scientists - Faculty.csv'%(homedir))['Name'].values[1:]
fnames = []
lnames = []
all_names =[]
for n in black_names:
try:
fn,la = n.split(' ')[:2]
fnames.append(fn.strip())
lnames.append(la.strip())
all_names.append('%s_%s'%(fn.strip(),la.strip()))
except:continue
black_df = pd.DataFrame(np.array([all_names,fnames,lnames]).swapaxes(0,1),columns=['name','fname','lname'])
main_df = pd.read_csv('/%s/article_data/NewArticleData2019.csv'%(homedir),header=0)
names = []
lnames = []
fnames = []
for entry in main_df.iterrows():
for a in entry[1].AF.split('; '):
a_lname,a_fname = a.split(', ')
lnames.append(a_lname.strip())
fnames.append(a_fname.strip())
names.append('%s_%s'%(a_fname,a_lname))
main_df = pd.DataFrame(np.array([names,fnames,lnames]).swapaxes(0,1),columns=['name','fname','lname'])
main_df = main_df.drop_duplicates('name')
if method == 'wiki':
black_r = pred_wiki_name(black_df,'lname','fname')
all_r = pred_wiki_name(main_df,'lname','fname')
asian = [0,1,2]
black = [3,4]
white = [5,6,7,8,9,11,12]
hispanic = [10]
all_df = pd.DataFrame(columns=['probability','sample'])
all_df['probability'] = all_r.as_matrix()[:,4:][:,black].sum(axis=1)
all_df['sample'] = 'papers'
black_df = pd.DataFrame(columns=['probability','sample'])
black_df['probability'] = black_r.as_matrix()[:,4:][:,black].sum(axis=1)
black_df['sample'] = 'Black-in-STEM'
if method == 'florida':
black_r = pred_fl_reg_name(black_df,'lname','fname')
all_r = pred_fl_reg_name(main_df,'lname','fname')
asian = [0,1,2]
black = [3,4]
white = [5,6,7,8,9,11,12]
hispanic = [10]
all_df = pd.DataFrame(columns=['probability','sample'])
all_df['probability'] = all_r.values[:,-2]
all_df['sample'] = 'papers'
black_df = pd.DataFrame(columns=['probability','sample'])
black_df['probability'] = black_r.values[:,-2]
black_df['sample'] = 'Black-in-STEM'
if method == 'census':
black_r = pred_census_ln(black_df,'lname')
all_r = pred_census_ln(main_df,'lname')
all_df = pd.DataFrame(columns=['probability','sample'])
all_df['probability'] = all_r.values[:,-3]
all_df['sample'] = 'papers'
black_df = pd.DataFrame(columns=['probability','sample'])
black_df['probability'] = black_r.values[:,-3]
black_df['sample'] = 'Black-in-STEM'
data = all_df.append(black_df,ignore_index=True)
data.probability = data.probability.astype(float)
plt.close()
sns.set(style='white',font='Palatino')
fig = plt.figure(figsize=(7.5,3),constrained_layout=True)
gs = gridspec.GridSpec(6,6, figure=fig)
ax1 = fig.add_subplot(gs[:6,:3])
plt.sca(ax1)
sns.histplot(data=data,x='probability',hue="sample",stat='density',common_norm=False,bins=20)
ax2 = fig.add_subplot(gs[:6,3:])
plt.sca(ax2)
sns.histplot(data=data,x='probability',hue="sample",stat='density',common_norm=False,bins=20)
plt.ylim(0,2.5)
plt.savefig('Black-in-STEM_%s.pdf'%(method))
plt.close()
sns.set(style='white',font='Palatino')
fig = plt.figure(figsize=(7.5,3),constrained_layout=True)
gs = gridspec.GridSpec(6,6, figure=fig)
ax1 = fig.add_subplot(gs[:6,:3])
plt.sca(ax1)
sns.histplot(data=data[data['sample']=='papers'],x='probability',stat='density',common_norm=False,bins=20)
ax2 = fig.add_subplot(gs[:6,3:])
plt.sca(ax2)
sns.histplot(data=data[data['sample']=='Black-in-STEM'],x='probability',hue="sample",stat='density',common_norm=False,bins=20)
# plt.ylim(0,2.5)
plt.savefig('Black-in-STEM_2.pdf')
def make_pr_control():
"""
control for features of citing article
"""
# 1) the year of publication
# 2) the journal in which it was published
# 3) the number of authors
# 4) whether the paper was a review article
# 5) the seniority of the paper’s first and last authors.
# 6) paper location
df = pd.read_csv('/%s/article_data/NewArticleData2019.csv'%(homedir),header=0)
prs = np.load('/%s/data/result_pr_df_%s.npy'%(homedir,method))
cont = pd.read_csv('/%s/article_data/CountryAndContData.csv'%(homedir))
df = df.merge(cont,how='outer',left_index=True, right_index=True)
df = df.merge(pd.read_csv('/%s/article_data/SeniorityData.csv'%(homedir)),left_index=True, right_index=True)
reg_df = pd.DataFrame(columns=['year','n_authors','journal','paper_type','senior','location'])
for entry in tqdm.tqdm(df.iterrows(),total=len(df)):
idx = entry[0]
paper = entry[1]
year = entry[1].PY
n_authors = len(paper.AF.split(';'))
journal = entry[1].SO
paper_type = paper.DT
senior = entry[1].V4
try: loc = entry[1]['FirstListed.Cont'].split()[0]
except: loc = 'None'
reg_df.loc[len(reg_df)] = [year,n_authors,journal,paper_type,senior,loc]
reg_df["n_authors"] = pd.to_numeric(reg_df["n_authors"])
reg_df["year"] = pd.to_numeric(reg_df["year"])
reg_df["senior"] = pd.to_numeric(reg_df["senior"])
skl_df = pd.get_dummies(reg_df).values
ridge = MultiOutputRegressor(RidgeCV(alphas=[1e-5,1e-4,1e-3, 1e-2, 1e-1, 1,10,25,50,75,100])).fit(skl_df,prs.reshape(prs.shape[0],-1))
ridge_probabilities = ridge.predict(skl_df)
ridge_probabilities = np.divide((ridge_probabilities), np.sum(ridge_probabilities,axis=1).reshape(-1,1))
ridge_probabilities = ridge_probabilities.reshape(ridge_probabilities.shape[0],8,8)
np.save('/%s/data/probabilities_pr_%s.npy'%(homedir,method),ridge_probabilities)
def make_pr_control_jn():
"""
control for features of citing article
"""
# 1) the year of publication
# 2) the journal in which it was published
# 3) the number of authors
# 4) whether the paper was a review article
# 5) the seniority of the paper’s first and last authors.
# 6) paper location
# 6) paper sub-field
df = pd.read_csv('/%s/article_data/NewArticleData2019.csv'%(homedir),header=0)
prs = np.load('/%s/data/result_pr_df_%s.npy'%(homedir,method))
cont = pd.read_csv('/%s/article_data/CountryAndContData.csv'%(homedir))
df = df.merge(cont,how='outer',left_index=True, right_index=True)
df = df.merge(pd.read_csv('/%s/article_data/SeniorityData.csv'%(homedir)),left_index=True, right_index=True)
df = df.rename(columns={'DI':'doi'})
df['category'] = 'none'
sub = pd.read_csv('/%s/article_data/JoNcategories_no2019.csv'%(homedir))
for cat,doi in zip(sub.category,sub.doi):
df.iloc[np.where(df.doi==doi)[0],-1] = cat
reg_df = pd.DataFrame(columns=['year','n_authors','journal','paper_type','senior','location','category'])
for entry in tqdm.tqdm(df.iterrows(),total=len(df)):
idx = entry[0]
paper = entry[1]
year = entry[1].PY
n_authors = len(paper.AF.split(';'))
journal = entry[1].SO
paper_type = paper.DT
senior = entry[1].V4
cat = entry[1].category
try: loc = entry[1]['FirstListed.Cont'].split()[0]
except: loc = 'None'
reg_df.loc[len(reg_df)] = [year,n_authors,journal,paper_type,senior,loc,cat]
reg_df["n_authors"] = pd.to_numeric(reg_df["n_authors"])
reg_df["year"] = pd.to_numeric(reg_df["year"])
reg_df["senior"] = pd.to_numeric(reg_df["senior"])
skl_df = pd.get_dummies(reg_df).values
ridge = MultiOutputRegressor(RidgeCV(alphas=[1e-5,1e-4,1e-3, 1e-2, 1e-1, 1,10,25,50,75,100])).fit(skl_df,prs.reshape(prs.shape[0],-1))
ridge_probabilities = ridge.predict(skl_df)
ridge_probabilities = np.divide((ridge_probabilities), np.sum(ridge_probabilities,axis=1).reshape(-1,1))
ridge_probabilities = ridge_probabilities.reshape(ridge_probabilities.shape[0],8,8)
np.save('/%s/data/probabilities_pr_%s_jn.npy'%(homedir,method),ridge_probabilities)
df = df.rename(columns={'DI':'doi'})
df['category'] = 'none'
def write_matrix():
main_df = pd.read_csv('/%s/data/ArticleDataNew.csv'%(homedir))
prs = np.load('/%s/data/result_pr_df_%s.npy'%(homedir,method))
small_matrix = np.zeros((2,2))
matrix_idxs = {'white':0,'api':1,'hispanic':2,'black':3}
small_idxs = {'white':0,'api':1,'hispanic':1,'black':1}
for fa_r in ['white','api','hispanic','black']:
for la_r in ['white','api','hispanic','black']:
small_matrix[small_idxs[fa_r],small_idxs[la_r]] += np.sum(prs[:,matrix_idxs[fa_r],matrix_idxs[la_r]],axis=0)
np.save('/Users/maxwell/Documents/GitHub/unbiasedciter/expected_matrix_%s.npy'%(method),np.sum(prs,axis=0))
np.save('//Users/maxwell/Documents/GitHub/unbiasedciter/expected_small_matrix_%s.npy'%(method),small_matrix)
def convert_df():
main_df = pd.read_csv('/%s/article_data/NewArticleData2019.csv'%(homedir),header=0)
race_df = pd.read_csv('/%s/data/result_df_%s.csv'%(homedir,method))
df = race_df.merge(main_df,how='outer',left_index=True, right_index=True)
df['cited'] = np.nan
for idx,paper in tqdm.tqdm(df.iterrows(),total=df.shape[0]):
self_cites = np.array(paper.SA.split(',')).astype(int)
try: cites = np.array(paper.CP.split(',')).astype(int)
except:
if np.isnan(paper.CP):
continue
cites = cites[np.isin(cites,self_cites) == False]
df.iloc[idx,-1] = ', '.join(cites.astype(str))
df.to_csv('/%s/article_data/NewArticleData2019_filtered.csv'%(homedir))
def make_pr_percentages(control):
print (control)
df = pd.read_csv('/%s/article_data/NewArticleData2019_filtered.csv'%(homedir),header=0)
citing_prs = np.load('/%s/data/result_pr_df_%s.npy'%(homedir,method))
if control == 'True_jn' or control == 'null_jn':
base_prs = np.load('/%s/data/probabilities_pr_%s_jn.npy'%(homedir,method))
if control == True: base_prs = np.load('/%s/data/probabilities_pr_%s.npy'%(homedir,method))
if control == 'null_True': base_prs = np.load('/%s/data/probabilities_pr_%s.npy'%(homedir,method))
if control == 'null_walk' or control == 'walk':
if walk_length == 'cited':
base_prs = np.load('/%s/data/walk_pr_probabilities_%s_cited.npy'%(homedir,method)).reshape(-1,8,8)
if walk_length[:3] == 'all':
base_prs = np.load('/%s/data/walk_pr_probabilities_%s_%s.npy'%(homedir,method,walk_length)).reshape(-1,8,8)
if type(control) != bool and control[:4] == 'null':
matrix = np.zeros((100,df.shape[0],8,8))
matrix[:] = np.nan
base_matrix = np.zeros((100,df.shape[0],8,8))
base_matrix[:] = np.nan
else:
matrix = np.zeros((df.shape[0],8,8))
matrix[:] = np.nan
base_matrix = np.zeros((df.shape[0],8,8))
base_matrix[:] = np.nan
if control == False:
year_df = pd.DataFrame(columns=['year','month','prs'])
citable_df = pd.DataFrame(columns=['year','month','index'])
for year in df.PY.unique():
if year < 2009:continue
for month in df.PD.unique():
rdf = df[(df.year<year) | ((df.year==year) & (df.PD<=month))]
this_base_matrix = citing_prs[rdf.index.values].mean(axis=0)
year_df = year_df.append(pd.DataFrame(np.array([year,month,this_base_matrix]).reshape(1,-1),columns=['year','month','prs']),ignore_index=True)
citable_df = citable_df.append(pd.DataFrame(np.array([year,month,rdf.index.values]).reshape(1,-1),columns=['year','month','index']),ignore_index=True)
if type(control) != bool and control[5:] == 'False':
year_df = pd.DataFrame(columns=['year','month','prs'])
citable_df = pd.DataFrame(columns=['year','month','index'])
for year in df.PY.unique():
if year < 2009:continue
for month in df.PD.unique():
rdf = df[(df.year<year) | ((df.year==year) & (df.PD<=month))]
this_base_matrix = citing_prs[rdf.index.values].mean(axis=0)
year_df = year_df.append(pd.DataFrame(np.array([year,month,this_base_matrix]).reshape(1,-1),columns=['year','month','prs']),ignore_index=True)
citable_df = citable_df.append(pd.DataFrame(np.array([year,month,rdf.index.values]).reshape(1,-1),columns=['year','month','index']),ignore_index=True)
for idx,paper in tqdm.tqdm(df.iterrows(),total=df.shape[0]):
#only look at papers published 2009 or later
year = paper.year
if year < 2009:continue
#only look at papers that cite at least 10 papers in our data
if type(paper.cited) != str:
if np.isnan(paper.cited)==True: continue
n_cites = len(paper['cited'].split(','))
if n_cites < 10: continue
if control == 'null_True' or control == 'null_jn':
for i in range(100):
this_base_matrix = []
this_matrix = []
for p in base_prs[np.array(paper['cited'].split(',')).astype(int)-1]: #for each cited paper
if np.min(p) < 0:p = p + abs(np.min(p))
p = p + abs(np.min(p))
p = p.flatten()/p.sum()
this_base_matrix.append(p.reshape((8,8))) #use model prs as base matrix
choice = np.zeros((8,8))
choice[np.unravel_index(np.random.choice(range(64),p=p),(8,8))] = 1 #and randomly assign race category as citation matrix
this_matrix.append(choice)
this_base_matrix = np.sum(this_base_matrix,axis=0)
this_matrix = np.sum(this_matrix,axis=0)
matrix[i,idx] = this_matrix
base_matrix[i,idx] = this_base_matrix
elif control == 'null_False':
citable = citable_df[(citable_df['year']==year)&(citable_df.month==paper.PD)]['index'].values[0]
for i in range(100):
this_base_matrix = []
this_matrix = []
for p in citing_prs[np.random.choice(citable,n_cites,False)]: #for each cited paper #for naive sampling random papers
if np.min(p) < 0:p = p + abs(np.min(p))
p = p + abs(np.min(p))
p = p.flatten()/p.sum()
this_base_matrix.append(p.reshape((8,8))) #use naive base rate as base matrix
choice = np.zeros((8,8))
choice[np.unravel_index(np.random.choice(range(64),p=p),(8,8))] = 1 #and randomly assign race category as citation matrix based on base rates
this_matrix.append(choice)
this_base_matrix = np.sum(this_base_matrix,axis=0)
this_matrix = np.sum(this_matrix,axis=0)
matrix[i,idx] = this_matrix
base_matrix[i,idx] = this_base_matrix
elif control == 'null_walk':
for i in range(100):
this_base_matrix = []
this_matrix = []
for p in base_prs[np.array(paper['cited'].split(',')).astype(int)-1]: #for each cited paper
choice = np.zeros((8,8))
if np.isnan(p).any():
this_base_matrix.append(p.reshape((8,8))) #use model prs as base matrix
choice[:] = np.nan
this_matrix.append(choice)
continue
if np.min(p) < 0:p = p + abs(np.min(p))
p = p + abs(np.min(p))
p = p.flatten()/p.sum()
this_base_matrix.append(p.reshape((8,8))) #use model prs as base matrix
choice[np.unravel_index(np.random.choice(range(64),p=p),(8,8))] = 1 #and randomly assign race category as citation matrix
this_matrix.append(choice)
this_base_matrix = np.nansum(this_base_matrix,axis=0)
this_matrix = np.nansum(this_matrix,axis=0)
matrix[i,idx] = this_matrix
base_matrix[i,idx] = this_base_matrix
else:
this_matrix = citing_prs[np.array(paper['cited'].split(',')).astype(int)-1].sum(axis=0)
if control == False:
this_base_matrix = year_df[(year_df.year==year) & (year_df.month<=month)]['prs'].values[0] * n_cites
if control == True:
this_base_matrix = base_prs[np.array(paper['cited'].split(',')).astype(int)-1].sum(axis=0)
if control == 'True_jn':
this_base_matrix = base_prs[np.array(paper['cited'].split(',')).astype(int)-1].sum(axis=0)
if control == 'walk':
this_base_matrix = np.nansum(base_prs[np.array(paper['cited'].split(',')).astype(int)-1],axis=0)
matrix[idx] = this_matrix
base_matrix[idx] = this_base_matrix
if type(control) == bool or control == 'True_jn':
np.save('/%s/data/citation_matrix_pr_%s_%s.npy'%(homedir,method,control),matrix)
np.save('/%s/data/base_citation_matrix_pr_%s_%s.npy'%(homedir,method,control),base_matrix)
elif control =='null_True' or control =='null_False' or control == 'null_jn':
np.save('/%s/data/citation_matrix_pr_%s_%s.npy'%(homedir,method,control),matrix)
np.save('/%s/data/base_citation_matrix_pr_%s_%s.npy'%(homedir,method,control),base_matrix)
else:
np.save('/%s/data/citation_matrix_pr_%s_%s_%s.npy'%(homedir,method,control,walk_length),matrix)
np.save('/%s/data/base_citation_matrix_pr_%s_%s_%s.npy'%(homedir,method,control,walk_length),base_matrix)
def self_citing(method):
main_df = pd.read_csv('/%s/article_data/NewArticleData2019.csv'%(homedir),header=0)
race_df = pd.read_csv('/%s/data/result_df_%s.csv'%(homedir,method))
df = race_df.merge(main_df,how='outer',left_index=True, right_index=True)
df['self_cites'] = np.zeros((df.shape[0]))
for idx,paper in tqdm.tqdm(df.iterrows(),total=df.shape[0]):
#only look at papers published 2009 or later
year = paper.year
if year < 2009: continue
df.iloc[idx,-1] = len(paper.SA.split(','))
scipy.stats.ttest_ind(df[(df.fa_race=='white')&(df.fa_race=='white')].self_cites,df[(df.fa_race!='white')|(df.fa_race!='white')].self_cites)
np.median(df[(df.fa_race=='white')&(df.fa_race=='white')].self_cites.values)
np.median(df[(df.fa_race!='white')|(df.fa_race!='white')].self_cites.values)
np.mean(df[(df.fa_race=='white')&(df.fa_race=='white')].self_cites.values)
np.mean(df[(df.fa_race!='white')|(df.fa_race!='white')].self_cites.values)
def plot_pr_intersections(control,citing):
main_df = pd.read_csv('/%s/article_data/NewArticleData2019.csv'%(homedir),header=0)
race_df = pd.read_csv('/%s/data/result_df_%s.csv'%(homedir,method.split('_')[0]))
df = race_df.merge(main_df,how='outer',left_index=True, right_index=True)
n_iters = 1000
if type(control) == bool:
matrix = np.load('/%s/data/citation_matrix_pr_%s_%s.npy'%(homedir,method,control))
base_matrix = np.load('/%s/data/base_citation_matrix_pr_%s_%s.npy'%(homedir,method,control))
elif control == 'all':
matrix = np.load('/%s/data/citation_matrix_pr_%s_%s.npy'%(homedir,method,False))
base_matrix = []
for control_type in [True,False]: base_matrix.append(np.load('/%s/data/base_citation_matrix_pr_%s_%s.npy'%(homedir,method,control_type)))
base_matrix.append(np.load('/%s/data/base_citation_matrix_pr_%s_%s_%s.npy'%(homedir,method,'walk','cited')))
base_matrix[0] = base_matrix[0] / np.nansum(base_matrix[0])
base_matrix[1] = base_matrix[1] / np.nansum(base_matrix[1])
base_matrix[2] = base_matrix[2] / np.nansum(base_matrix[2])
base_matrix = np.mean(base_matrix,axis=0)
else:
matrix = np.load('/%s/data/citation_matrix_pr_%s_%s_%s.npy'%(homedir,method,control,walk_length))
base_matrix = np.load('/%s/data/base_citation_matrix_pr_%s_%s_%s.npy'%(homedir,method,control,walk_length))
# np.save('/Users/maxwell/Documents/GitHub/unbiasedciter/expected_matrix_%s.npy'%(method),np.mean(matrix,axis=0))
if type(control) == bool:
null = np.load('/%s/data/citation_matrix_pr_%s_%s_%s.npy'%(homedir,method,'null',control))
null_base = np.load('/%s/data/base_citation_matrix_pr_%s_%s_%s.npy'%(homedir,method,'null',control))[0]
elif control == 'all':
null = np.load('/%s/data/citation_matrix_pr_%s_%s_%s.npy'%(homedir,method,'null',False))
null_base = []
null_base.append(np.load('/%s/data/base_citation_matrix_pr_%s_%s_%s.npy'%(homedir,method,'null',True))[0])
null_base.append(np.load('/%s/data/base_citation_matrix_pr_%s_%s_%s.npy'%(homedir,method,'null',True))[0])
null_base.append(np.load('/%s/data/base_citation_matrix_pr_%s_%s_%s_%s.npy'%(homedir,method,'null','walk','cited'))[0])
null_base = np.mean(null_base,axis=0)
else:
null = np.load('/%s/data/citation_matrix_pr_%s_%s_%s_%s.npy'%(homedir,method,'null',control,walk_length))
null_base = np.load('/%s/data/base_citation_matrix_pr_%s_%s_%s_%s.npy'%(homedir,method,'null',control,walk_length))[0]
boot_matrix = np.zeros((n_iters,8,8))
boot_r_matrix = np.zeros((n_iters,8,8))
ww_indices = df[(df.year>=2009)&(df.fa_race=='white')&(df.la_race=='white')].index
wa_indices = df[(df.year>=2009)&(df.fa_race=='white')&(df.la_race!='white')].index
aw_indices = df[(df.year>=2009)&(df.fa_race!='white')&(df.la_race=='white')].index
aa_indices = df[(df.year>=2009)&(df.fa_race!='white')&(df.la_race!='white')].index
black_indices = df[(df.year>=2009)&((df.fa_race=='black')|(df.la_race=='black'))].index
white_indices = df[(df.year>=2009)&((df.fa_race=='white')|(df.la_race=='white'))].index
hispanic_indices = df[(df.year>=2009)&((df.fa_race=='hispanic')|(df.la_race=='hispanic'))].index
api_indices = df[(df.year>=2009)&((df.fa_race=='api')|(df.la_race=='api'))].index
for b in range(n_iters):
if citing == 'all':
papers = np.random.choice(range(matrix.shape[0]),matrix.shape[0],replace=True)
if citing == 'ww':
papers = np.random.choice(ww_indices,ww_indices.shape[0],replace=True)
if citing == 'wa':
papers = np.random.choice(wa_indices,wa_indices.shape[0],replace=True)
if citing == 'aw':
papers = np.random.choice(aw_indices,aw_indices.shape[0],replace=True)
if citing == 'aa':
papers = np.random.choice(aa_indices,aa_indices.shape[0],replace=True)
if citing == 'black':
papers = np.random.choice(black_indices,black_indices.shape[0],replace=True)
if citing == 'hispanic':
papers = np.random.choice(hispanic_indices,hispanic_indices.shape[0],replace=True)
if citing == 'api':
papers = np.random.choice(api_indices,api_indices.shape[0],replace=True)
if citing == 'white':
papers = np.random.choice(white_indices,white_indices.shape[0],replace=True)
m = np.nansum(matrix[papers],axis=0)
m = m / np.sum(m)
e = np.nansum(base_matrix[papers],axis=0)
e = e / np.sum(e)
r = np.nansum(null[np.random.choice(100,1),papers],axis=0)
r = r / np.sum(r)
er = np.nansum(null_base[papers],axis=0)
er = er / np.sum(er)
rate = (m - e) / e
r_rate = (r - er) / er
boot_matrix[b] = rate
boot_r_matrix[b] = r_rate
# np.save('/%s/data/intersection_boot_matrix_%s.npy'%(homedir),boot_matrix,method)
p_matrix = np.zeros((8,8))
for i,j in combinations(range(8),2):
x = boot_matrix[:,i,j]
y = boot_r_matrix[:,i,j]
ay = abs(y)
ax = abs(x.mean())
p_matrix[i,j] = len(ay[ay>ax])
p_matrix = p_matrix / n_iters
multi_mask = multipletests(p_matrix.flatten(),0.05,'holm')[0].reshape(8,8)
names = ['white(m)','Asian(m)','Hispanic(m)','Black(m)','white(w)','Asian(w)','Hispanic(w)','Black(w)']
matrix_idxs = {'white(m)':0,'api(m)':1,'hispanic(m)':2,'black(m)':3,'white(w)':4,'api(w)':5,'hispanic(w)':6,'black(w)':7}
men_aoc = np.vectorize(matrix_idxs.get)(['api(m)','hispanic(m)','black(m)'])
women_aoc = np.vectorize(matrix_idxs.get)(['api(w)','hispanic(w)','black(w)'])
men_aoc = boot_matrix[:,men_aoc][:,:,men_aoc].flatten()
women_aoc = boot_matrix[:,women_aoc][:,:,women_aoc].flatten()
white_men = np.vectorize(matrix_idxs.get)(['white(m)'])
white_women = np.vectorize(matrix_idxs.get)(['white(w)'])
white_men= boot_matrix[:,white_men][:,:,white_men].flatten()
white_women = boot_matrix[:,white_women][:,:,white_women].flatten()
# def exact_mc_perm_test(xs, ys, nmc=10000):
# n, k = len(xs), 0
# diff = np.abs(np.mean(xs) - np.mean(ys))
# zs = np.concatenate([xs, ys])
# for j in range(nmc):
# np.random.shuffle(zs)
# k += diff <= np.abs(np.mean(zs[:n]) - np.mean(zs[n:]))
# return k / nmc
# p = exact_mc_perm_test(men_aoc,women_aoc)
# p = log_p_value(p)
def direction(d):
if d<=0: return 'less'
else: return 'greater'
diff = (men_aoc-women_aoc)
high,low = np.percentile(diff,97.5),np.percentile(diff,2.5)
low,high = np.around(low*100,2),np.around(high*100,2)
diff = np.around(diff.mean()*100,2)
print (control)
if control == 'walk': print (walk_length)
print ('AoC men papers are cited at %s percentage points %s than women AoC papers 95pecentCI=%s,%s'%(abs(diff),direction(diff),low,high))
diff = (white_men-men_aoc[:len(white_men)])
high,low = np.percentile(diff,97.5),np.percentile(diff,2.5)
low,high = np.around(low*100,2),np.around(high*100,2)
diff = np.around(diff.mean()*100,2)
if control == 'walk': print (walk_length)
print ('white men papers are cited at %s percentage points %s than men AoC papers 95pecentCI=%s,%s'%(abs(diff),direction(diff),low,high))
diff = (white_men-white_women)
high,low = np.percentile(diff,97.5),np.percentile(diff,2.5)
low,high = np.around(low*100,2),np.around(high*100,2)
diff = np.around(diff.mean()*100,2)
print ('white men papers are cited at %s percentage points %s than white women papers 95pecentCI=%s,%s'%(abs(diff),direction(diff),low,high))
diff = (white_women-women_aoc[:len(white_women)])
high,low = np.percentile(diff,97.5),np.percentile(diff,2.5)
low,high = np.around(low*100,2),np.around(high*100,2)
diff = np.around(diff.mean()*100,2)
print ('white women papers are cited at %s percentage points %s than women-AoC papers 95pecentCI=%s,%s'%(abs(diff),direction(diff),low,high))
diff = (white_women-men_aoc[:len(white_women)])
high,low = np.percentile(diff,97.5),np.percentile(diff,2.5)
low,high = np.around(low*100,2),np.around(high*100,2)
diff = np.around(diff.mean()*100,2)
if control == 'walk': print (walk_length)
print ('white women papers are cited at %s percentage points %s than men AoC papers 95pecentCI=%s,%s'%(abs(diff),direction(diff),low,high))
# 1/0
if type(control) == bool:
orig_matrix = np.load('/%s/data/citation_matrix_pr_%s_%s.npy'%(homedir,method,control))
orig_base_matrix = np.load('/%s/data/base_citation_matrix_pr_%s_%s.npy'%(homedir,method,control))
elif control == 'all':
orig_matrix = np.load('/%s/data/citation_matrix_pr_%s_%s.npy'%(homedir,method,False))
orig_base_matrix = []
for control_type in [True,False]:
orig_base_matrix.append(np.load('/%s/data/base_citation_matrix_pr_%s_%s.npy'%(homedir,method,control_type)))
orig_base_matrix.append(np.load('/%s/data/base_citation_matrix_pr_%s_%s_%s.npy'%(homedir,method,'walk','cited')))
orig_base_matrix[0] = orig_base_matrix[0] / np.nansum(orig_base_matrix[0])
orig_base_matrix[1] = orig_base_matrix[1] / np.nansum(orig_base_matrix[1])
orig_base_matrix[2] = orig_base_matrix[2] / np.nansum(orig_base_matrix[2])
orig_base_matrix = np.mean(orig_base_matrix,axis=0)
else:
orig_matrix = np.load('/%s/data/citation_matrix_pr_%s_%s_%s.npy'%(homedir,method,control,walk_length))
orig_base_matrix = np.load('/%s/data/base_citation_matrix_pr_%s_%s_%s.npy'%(homedir,method,control,walk_length))
matrix_idxs = {'white_M':0,'api_M':1,'hispanic_M':2,'black_M':3,'white_W':4,'api_W':5,'hispanic_W':6,'black_W':7}
df = pd.DataFrame(columns=['bias type','bias amount','boot','race'])
for race in ['white','black','api','hispanic']:
for idx in range(n_iters):
#norm matrix
if citing == 'all':
pick = np.random.choice(np.arange(orig_matrix.shape[0]),int(orig_matrix.shape[0]),replace=True)
# papers = np.random.choice(range(matrix.shape[0]),matrix.shape[0],replace=True)
if citing == 'ww':
pick = np.random.choice(ww_indices,ww_indices.shape[0],replace=True)
if citing == 'wa':
pick = np.random.choice(wa_indices,wa_indices.shape[0],replace=True)
if citing == 'aw':
pick = np.random.choice(aw_indices,aw_indices.shape[0],replace=True)
if citing == 'aa':
pick = np.random.choice(aa_indices,aa_indices.shape[0],replace=True)
if citing == 'black':
pick = np.random.choice(black_indices,black_indices.shape[0],replace=True)
if citing == 'hispanic':
pick = np.random.choice(hispanic_indices,hispanic_indices.shape[0],replace=True)
if citing == 'api':
pick = np.random.choice(api_indices,api_indices.shape[0],replace=True)
if citing == 'white':
pick = np.random.choice(white_indices,white_indices.shape[0],replace=True)
matrix = orig_matrix[pick]
matrix = matrix / np.nansum(matrix)
base_matrix= orig_base_matrix[pick]
base_matrix = base_matrix / np.nansum(base_matrix)
man_e1 = np.nansum(matrix[:,matrix_idxs['%s_M'%(race)],matrix_idxs['%s_M'%(race)]])
man_b1 = np.nansum(base_matrix[:,matrix_idxs['%s_M'%(race)],matrix_idxs['%s_M'%(race)]])
woman_e1 = np.nansum(matrix[:,matrix_idxs['%s_W'%(race)],matrix_idxs['%s_W'%(race)]])
woman_b1 = np.nansum(base_matrix[:,matrix_idxs['%s_W'%(race)],matrix_idxs['%s_W'%(race)]])
x = ((man_e1 - man_b1)/ man_b1) - ((woman_e1 - woman_b1)/ woman_b1) # bias against women within this race
if race == 'black':
groups = [np.vectorize(matrix_idxs.get)(['white_M','white_W']),
np.vectorize(matrix_idxs.get)(['black_M','black_W'])]
if race == 'api':
groups = [np.vectorize(matrix_idxs.get)(['white_M','white_W']),
np.vectorize(matrix_idxs.get)(['api_M','api_W'])]
if race == 'hispanic':
groups = [np.vectorize(matrix_idxs.get)(['white_M','white_W']),
np.vectorize(matrix_idxs.get)(['hispanic_M','hispanic_W'])]
if race == 'white':
groups = [np.vectorize(matrix_idxs.get)(['hispanic_M','hispanic_W','api_M','api_W','black_M','black_W']),
np.vectorize(matrix_idxs.get)(['white_M','white_W'])]
race_e1 = np.nansum(matrix[:,groups[1],groups[1]])
race_b1 = np.nansum(base_matrix[:,groups[1],groups[1]])
other_e1 = np.nansum(matrix[:,groups[0],groups[0]])
other_b1 = np.nansum(base_matrix[:,groups[0],groups[0]])
other = (other_e1 - other_b1) / other_b1
race_c = (race_e1 - race_b1) / race_b1
y = other - race_c # bias against this race
df = df.append(pd.DataFrame(np.array(['gender',x,idx,race]).reshape(1,4),columns=['bias type','bias amount','boot','race']),ignore_index=True)
df = df.append(pd.DataFrame(np.array(['race',y,idx,race]).reshape(1,4),columns=['bias type','bias amount','boot','race']),ignore_index=True)
df['bias amount'] = df['bias amount'].astype(float) * 100
df.race[df.race == 'hispanic'] = 'Hispanic'
df.race[df.race == 'api'] = 'Asian'
df.race[df.race == 'black'] = 'Black'
plt.close()
sns.set(style='white',font='Palatino')
cmap = sns.diverging_palette(220, 10, as_cmap=True)
fig = plt.figure(figsize=(7.5,3),constrained_layout=True)
gs = gridspec.GridSpec(2, 2, figure=fig)
ax1 = fig.add_subplot(gs[:2,:1])
ax2 = fig.add_subplot(gs[:2,1:])
plt.sca(ax1)
d = np.around(np.nanmean(boot_matrix,axis=0)*100,0)
# d[multi_mask==False] = np.nan
heat = sns.heatmap(d,annot=True,fmt='g',cmap=cmap,vmin=-25,vmax=25,annot_kws={"size": 8})
heat.set_ylabel('first author',labelpad=0)
heat.set_yticklabels(names,rotation=25)
heat.set_xlabel('last author',labelpad=0)
heat.set_xticklabels(names,rotation=65)
heat.set_title('a',{'fontweight':'bold'},'left',pad=1)
for text, show_annot in zip(ax1.texts, (element for row in multi_mask for element in row)):
text.set_visible(show_annot)
cbar = heat.collections[0].colorbar
cbar.ax.set_yticklabels(["{:.0%}".format(i/100) for i in cbar.get_ticks()])
plt.sca(ax2)
df['bias amount'] = df['bias amount'].astype(float)*-1
sns.barplot(data=df,y='bias amount',x='race',hue='bias type',ci='sd',palette=['grey','white'],order=['white','Asian','Hispanic','Black'],**{'edgecolor':'grey'})
plt.ylabel('percentage point difference')
ax2.yaxis.set_major_locator(plt.MaxNLocator(8))
ax2.tick_params(axis='y', which='major', pad=-5)
ax2.yaxis.set_major_formatter(FormatStrFormatter('%.0f'))
ax2.yaxis.set_major_formatter(ticker.PercentFormatter(decimals=0))
plt.legend(ncol=2,fontsize='small',frameon=False,columnspacing=0.5,handletextpad=0)
plt.title('b',{'fontweight':'bold'},'left',pad=1)
if type(control) == bool: plt.savefig('/%s/figures/intersection/intersection_%s_%s_%s.pdf'%(homedir,method,control,citing))
else: plt.savefig('/%s/figures/intersection/intersection_%s_%s_%s_%s.pdf'%(homedir,method,control,walk_length,citing))
plt.close()
def all_inter():
for c in ['aa','aw','wa','ww']:
plot_pr_intersections(control,c)
def all_inter_main():
global walk_length
plot_pr_intersections(False,'all')
plot_pr_intersections(True,'all')
walk_length = 'cited'
plot_pr_intersections('walk','all')
walk_length = 'all'
plot_pr_intersections('walk','all')
def plot_ethnicolor_confusion():
# order = []
# for r in wiki_2_race.keys():
# order.append(r.split(',')[-1])
# r = [[873, 44, 7, 6, 6, 114, 8, 10, 7, 1, 8, 9, 6],
# [17, 1300, 7, 20, 2, 58, 7, 6, 2, 0, 36, 10, 2],
# [10, 10, 1188, 23, 107, 121, 21, 22, 15, 9, 17, 22, 7],
# [5, 18, 48, 321, 72, 126, 12, 32, 31, 6, 37, 21, 5],
# [6, 3, 118, 36, 824, 80, 45, 64, 23, 6, 15, 16, 12],
# [52, 11, 57, 45, 52, 7341, 45, 260, 161, 39, 59, 101, 66],
# [8, 5, 16, 14, 19, 84, 1262, 122, 21, 44, 18, 30, 23],
# [7, 8, 27, 20, 66, 633, 119, 881, 59, 71, 80, 45, 32],
# [13, 7, 14, 32, 34, 488, 37, 112, 1417, 41, 125, 118, 21],
# [3, 0, 5, 7, 5, 167, 19, 98, 36, 318, 26, 23, 67],
# [12, 12, 16, 19, 16, 174, 23, 56, 64, 18, 1437, 213, 22],
# [4, 10, 13, 25, 8, 165, 34, 39, 99, 24, 147, 1790, 16],
# [10, 2, 3, 7, 13, 141, 30, 31, 18, 44, 13, 11, 640]]
name_dict = {'asian':[0,1,2],'black':[3,4],'white':[5,6,7,8,9,11,12],'hispanic':[10]}
names = ['asian','black','hispanic','white']
# small_r = np.zeros((4,4))
# r = np.array(r)
# for idx,i in enumerate(names):
# for jdx,j in enumerate(names):
# small_r[idx,jdx] = r[name_dict[i],:][:,name_dict[j]].sum()
small_r = [[2214,363,257,1693],[82 , 13522 , 427 , 4409],[ 144 , 408 , 12410 , 15624],[ 438 , 3511 , 3804, 138256]]
plt.close()
sns.set(style='white',font='Palatino')
fig = plt.figure(figsize=(7.5,4))
ax1 = fig.add_subplot(121)
ax2 = fig.add_subplot(122)
# ax1 = fig.add_subplot(gs[:,0])
# ax2 = fig.add_subplot(gs[:,1])
plt.sca(ax1)
order = ['Asian','Black','Hispanic','white']
# asian, hispanic, black, white
# small_r[[0,2,1,3]]
heat = sns.heatmap(np.array(small_r)[[0,2,1,3]][:,[0,2,1,3]],vmax=20000,annot=True,fmt='g',annot_kws={"size": 10},cbar_kws={'shrink': .5},square=True)
locs, labels = plt.yticks()
plt.yticks(locs,order,rotation=360,**{'fontsize':10},)
locs, labels = plt.xticks()
plt.xticks(locs,order,rotation=90,**{'fontsize':10})
plt.ylabel('observed racial category',**{'fontsize':12})
plt.xlabel('predicted racial category',**{'fontsize':12})
plt.title('a',{'fontweight':'bold'},'left',pad=3)
plt.tight_layout()
plt.sca(ax2)
r = [[5743, 42, 796, 3490],[257, 1693, 218, 22649],[173,82,25118,7609],[694,1157, 2442, 27837]]
order = ['Asian','Black','Hispanic','white']
heat = sns.heatmap(np.array(r),vmax=20000,annot=True,fmt='g',annot_kws={"size": 10},cbar_kws={'shrink': .5},square=True)
locs, labels = plt.yticks()
plt.yticks(locs,order,rotation=360,**{'fontsize':10},)
locs, labels = plt.xticks()
plt.xticks(locs,order,rotation=90,**{'fontsize':10})
plt.ylabel('observed racial category',**{'fontsize':12})
plt.xlabel('predicted racial category',**{'fontsize':12})
plt.title('b',{'fontweight':'bold'},'left',pad=3)
plt.tight_layout()
plt.savefig('/%s/dazed_and_confused.pdf'%(homedir))
plt.savefig('/%s/dazed_and_confused.png'%(homedir))
def plot_histy():
control,within_poc,walk_papers = func_vars[0],func_vars[1],func_vars[2]
"""
Figure 2
"""
n_iters = 10000
main_df = pd.read_csv('/%s/article_data/NewArticleData2019.csv'%(homedir),header=0)
race_df = pd.read_csv('/%s/data/result_df_%s.csv'%(homedir,method))
df = race_df.merge(main_df,how='outer',left_index=True, right_index=True)
if control == False:
null = np.load('/%s/data/citation_matrix_pr_%s_%s.npy'%(homedir,method,'null_False'))
null_base = np.load('/%s/data/base_citation_matrix_pr_%s_%s.npy'%(homedir,method,'null_False'))
if control == True:
null = np.load('/%s/data/citation_matrix_pr_%s_%s.npy'%(homedir,method,'null_True'))
null_base = np.load('/%s/data/base_citation_matrix_pr_%s_%s.npy'%(homedir,method,'null_True'))
if control == 'walk':
null = np.load('/%s/data/citation_matrix_pr_%s_%s_%s.npy'%(homedir,method,'null_walk',walk_length))
null_base = np.load('/%s/data/base_citation_matrix_pr_%s_%s_%s.npy'%(homedir,method,'null_walk',walk_length))
if type(control) == bool:
matrix = np.load('/%s/data/citation_matrix_pr_%s_%s.npy'%(homedir,method,control))
base_matrix = np.load('/%s/data/base_citation_matrix_pr_%s_%s.npy'%(homedir,method,control))
else:
matrix = np.load('/%s/data/citation_matrix_pr_%s_%s_%s.npy'%(homedir,method,control,walk_length))
base_matrix = np.load('/%s/data/base_citation_matrix_pr_%s_%s_%s.npy'%(homedir,method,control,walk_length))
if walk_papers == True:
walk_base_matrix = np.load('/%s/data/base_citation_matrix_%s_walk.npy'%(homedir,method))
matrix[np.isnan(walk_base_matrix[:,0,0])] = np.nan
base_matrix[np.isnan(walk_base_matrix[:,0,0])] = np.nan
matrix_idxs = {'white_M':0,'api_M':1,'hispanic_M':2,'black_M':3,'white_W':4,'api_W':5,'hispanic_W':6,'black_W':7}
if within_poc == False:
groups = [np.vectorize(matrix_idxs.get)(['white_M','white_W',]),
np.vectorize(matrix_idxs.get)(['api_M','api_W','hispanic_M','hispanic_W','black_M','black_W',])]
names = ['white-white','white-poc','poc-white','poc-poc']
plot_matrix = np.zeros((matrix.shape[0],len(groups),len(groups)))
plot_base_matrix = np.zeros((matrix.shape[0],len(groups),len(groups)))
plot_null = np.zeros((null.shape[0],matrix.shape[0],len(groups),len(groups)))
plot_null_base = np.zeros((null.shape[0],matrix.shape[0],len(groups),len(groups)))
for i in range(len(groups)):
for j in range(len(groups)):
plot_matrix[:,i,j] = np.nansum(matrix[:,groups[i]][:,:,groups[j]].reshape(matrix.shape[0],-1),axis=1)
plot_base_matrix[:,i,j] = np.nansum(base_matrix[:,groups[i]][:,:,groups[j]].reshape(matrix.shape[0],-1),axis=1)
for iteration in range(null.shape[0]):
plot_null[iteration,:,i,j] = np.nansum(null[iteration,:,groups[i]][:,:,groups[j]].reshape(matrix.shape[0],-1),axis=1)
plot_null_base[iteration,:,i,j] = np.nansum(null_base[iteration,:,groups[i]][:,:,groups[j]].reshape(matrix.shape[0],-1),axis=1)
#make sure that, if we don't have data for a paper, we also are not including it's base rates
#this is mostly for when the random walk fails because it's not part of the graph.
x = plot_matrix.sum(axis=1).sum(axis=1)
y = plot_base_matrix.sum(axis=1).sum(axis=1)
mask = np.where(x==0)[0]
assert y[mask].sum() == 0
data_type = np.zeros((4)).astype(str)
data_type[:] = 'real'
rdata_type = np.zeros((4)).astype(str)
rdata_type[:] = 'random'
data = []
papers = df[df.year>=2009]
for boot in range(n_iters):
boot_papers = papers.sample(len(papers),replace=True).index
emperical = plot_matrix[boot_papers].reshape(-1,4)[:,3]
total = plot_matrix[boot_papers].reshape(-1,4).sum(axis=1)
# emperical = emperical / total
data.append(emperical)
plt.close()
sns.set(style='white',font='Palatino')
fig = plt.figure(figsize=(7.5,3),constrained_layout=True)
gs = gridspec.GridSpec(12, 10, figure=fig)
ax1 = fig.add_subplot(gs[:12,:5])
plt.sca(ax1)
dp = sns.distplot(np.nanmean(data,axis=0), kde=False, rug=False)
# plt.xlim(0,.2)
plt.xlim(0.5,2)
ax2 = fig.add_subplot(gs[0:6,5:])
ax3 = fig.add_subplot(gs[6:,5:])
data = []
papers = df[(df.year>=2009)&(df.fa_race=='white')&(df.la_race=='white')]
for boot in range(n_iters):
boot_papers = papers.sample(len(papers),replace=True).index
emperical = plot_matrix[boot_papers].reshape(-1,4)[:,3]
total = plot_matrix[boot_papers].reshape(-1,4).sum(axis=1)
# emperical = emperical / total
data.append(emperical)
plt.sca(ax2)
dp = sns.distplot(np.nanmean(data,axis=0), kde=False, rug=False)
# plt.xlim(0,.2)
plt.xlim(0.5,2)
data = []
papers = df[(df.year>=2009)&((df.fa_race!='white')| (df.la_race!='white'))]
for boot in range(n_iters):
boot_papers = papers.sample(len(papers),replace=True).index
emperical = plot_matrix[boot_papers].reshape(-1,4)[:,3]
total = plot_matrix[boot_papers].reshape(-1,4).sum(axis=1)
# emperical = emperical / total
data.append(emperical)
plt.sca(ax3)
dp = sns.distplot(np.nanmean(data,axis=0), kde=False, rug=False)
# plt.xlim(0,.2)
plt.xlim(0.5,2)
def plot_compare():
names = ['white-white','white-AoC','AoC-white','AoC-AoC']
jn_df = pd.read_csv('/%s/%s_%s_2compare.csv'%(homedir,'True_jn',True))
jn_df['model'] = 'subfield'
df = pd.read_csv('/%s/%s_%s_2compare.csv'%(homedir,True,True))
df['model'] = 'paper'
df = df.append(jn_df,ignore_index=True)
plt.close()
sns.set(style='white',font='Palatino')
fig = plt.figure(figsize=(7.5,3.5),constrained_layout=True)
gs = gridspec.GridSpec(2,2, figure=fig)
ax0 = fig.add_subplot(gs[:,0])
ax1 = fig.add_subplot(gs[0,1])
ax2 = fig.add_subplot(gs[1,1])
axes = [ax0,ax1,ax2]
for ax,citation_type in zip(axes,['all','white','AoC']):
if citation_type == 'all': plot_df = df[df['citing authors'] == 'all']
if citation_type == 'white': plot_df = df[df['citing authors'] == 'white']
if citation_type == 'AoC': plot_df = df[df['citing authors'] == 'AoC']
plt.sca(ax)
bx = sns.violinplot(data=plot_df,y='citation_rate',x='citation_type',hue='model',split=True,palette=pal,order=names,saturation=1,cut=0,scale='width')
for index,violin in enumerate([bx.collections[:3],bx.collections[3:6],bx.collections[6:9],bx.collections[9:]]):
i,j,k = violin
i.set_color(pal[index])
j.set_color(pal[index])
k.set_color('white')
# plt.ylabel("percent over-/under-citation",labelpad=0)
plt.xlabel('')
plt.ylabel('')
plt.title('%s'%(citation_type),{'fontweight':'bold'},'left',pad=0)
ax.yaxis.set_major_locator(plt.MaxNLocator(8))
ax.tick_params(axis='y', which='major', pad=-5)
ax.yaxis.set_major_formatter(FormatStrFormatter('%.0f'))
ax.yaxis.set_major_formatter(ticker.PercentFormatter(decimals=0))
mean = plot_df.groupby('citation_type',sort=False).mean()['citation_rate']
std = plot_df.groupby('citation_type',sort=False).std()['citation_rate']
maxval = plot_df.groupby('citation_type',sort=False).max()['citation_rate']
minval = plot_df.groupby('citation_type',sort=False).min()['citation_rate']
for i,citation_type in enumerate([ 'white-white','white-AoC','AoC-white','AoC-AoC']):
y = plot_df[(plot_df.model=='paper')&(plot_df.citation_type==citation_type)].citation_rate.values
x = plot_df[(plot_df.model=='subfield')&(plot_df.citation_type==citation_type)].citation_rate.values
diff=x-y
ci_mean = np.around(np.mean(diff),2)
ci = [np.around(np.percentile(diff,2.5),2),np.around(np.percentile(diff, 97.5),2)]
m,s = mean.values[i],std.values[i]
if m > 0: loc = minval.values[i] - s
if m < 0: loc = maxval.values[i] + s
if loc > plt.ylim()[1]:
loc = plt.ylim()[1]
if loc < plt.ylim()[0]:
loc = plt.ylim()[0]
ax.text(i,loc,'%s<%s>%s'%(ci[0],ci_mean,ci[1]),horizontalalignment='center',fontsize=8)
ax.legend_.remove()
plt.savefig('/%s/figures/percentages/jneuro_papers.pdf'%(homedir))
def plot_pr_percentages_booty_matrix_jn(func_vars):
control,within_poc,walk_papers = func_vars[0],func_vars[1],func_vars[2]
"""
Figure 2
"""
n_iters = 100
main_df = pd.read_csv('/%s/article_data/NewArticleData2019.csv'%(homedir),header=0)
race_df = pd.read_csv('/%s/data/result_df_%s.csv'%(homedir,method))
df = race_df.merge(main_df,how='outer',left_index=True, right_index=True)
df = df.rename(columns={'DI':'doi'})
df['category'] = 'none'
sub = pd.read_csv('/%s/article_data/JoNcategories_no2019.csv'%(homedir))
for cat,doi in zip(sub.category,sub.doi):
df.iloc[np.where(df.doi==doi)[0],-1] = cat
if control == False:
null = np.load('/%s/data/citation_matrix_pr_%s_%s.npy'%(homedir,method,'null_False'))
null_base = np.load('/%s/data/base_citation_matrix_pr_%s_%s.npy'%(homedir,method,'null_False'))
if control == True:
null = np.load('/%s/data/citation_matrix_pr_%s_%s.npy'%(homedir,method,'null_True'))
null_base = np.load('/%s/data/base_citation_matrix_pr_%s_%s.npy'%(homedir,method,'null_True'))
if control == 'walk':
null = np.load('/%s/data/citation_matrix_pr_%s_%s_%s.npy'%(homedir,method,'null_walk',walk_length))
null_base = np.load('/%s/data/base_citation_matrix_pr_%s_%s_%s.npy'%(homedir,method,'null_walk',walk_length))
if type(control) == bool:
matrix = np.load('/%s/data/citation_matrix_pr_%s_%s.npy'%(homedir,method,control))
base_matrix = np.load('/%s/data/base_citation_matrix_pr_%s_%s.npy'%(homedir,method,control))
else:
matrix = np.load('/%s/data/citation_matrix_pr_%s_%s_%s.npy'%(homedir,method,control,walk_length))
base_matrix = np.load('/%s/data/base_citation_matrix_pr_%s_%s_%s.npy'%(homedir,method,control,walk_length))
if walk_papers == True:
walk_base_matrix = np.load('/%s/data/base_citation_matrix_%s_walk.npy'%(homedir,method))
matrix[np.isnan(walk_base_matrix[:,0,0])] = np.nan
base_matrix[np.isnan(walk_base_matrix[:,0,0])] = np.nan
matrix_idxs = {'white_M':0,'api_M':1,'hispanic_M':2,'black_M':3,'white_W':4,'api_W':5,'hispanic_W':6,'black_W':7}
if within_poc == False:
groups = [np.vectorize(matrix_idxs.get)(['white_M','white_W',]),
np.vectorize(matrix_idxs.get)(['api_M','api_W','hispanic_M','hispanic_W','black_M','black_W',])]
names = ['white-white','white-AoC','AoC-white','AoC-AoC']
if within_poc == 'black':
# groups = [np.vectorize(matrix_idxs.get)(['white_M','white_W','api_M','api_W','hispanic_M','hispanic_W',]),
groups = [np.vectorize(matrix_idxs.get)(['white_M','white_W']),
np.vectorize(matrix_idxs.get)(['black_M','black_W',])]
names = ['white-white','white-black','black-white','black-black']
if within_poc == 'api':
# groups = [np.vectorize(matrix_idxs.get)(['white_M','white_W','hispanic_M','hispanic_W','black_M','black_W',]),
groups = [np.vectorize(matrix_idxs.get)(['white_M','white_W']),
np.vectorize(matrix_idxs.get)(['api_M','api_W',])]
names = ['white-white','white-asian','asian-white','asian-asian']
if within_poc == 'hispanic':
groups = [np.vectorize(matrix_idxs.get)(['white_M','white_W']),
# groups = [np.vectorize(matrix_idxs.get)(['white_M','white_W','api_M','api_W','black_M','black_W',]),
np.vectorize(matrix_idxs.get)(['hispanic_M','hispanic_W',])]
names = ['white-white','white-hispanic','hispanic-white','hispanic-hispanic']
plot_matrix = np.zeros((matrix.shape[0],len(groups),len(groups)))
plot_base_matrix = np.zeros((matrix.shape[0],len(groups),len(groups)))
plot_null = np.zeros((null.shape[0],matrix.shape[0],len(groups),len(groups)))
plot_null_base = np.zeros((null.shape[0],matrix.shape[0],len(groups),len(groups)))
for i in range(len(groups)):
for j in range(len(groups)):
plot_matrix[:,i,j] = np.nansum(matrix[:,groups[i]][:,:,groups[j]].reshape(matrix.shape[0],-1),axis=1)
plot_base_matrix[:,i,j] = np.nansum(base_matrix[:,groups[i]][:,:,groups[j]].reshape(matrix.shape[0],-1),axis=1)
for iteration in range(null.shape[0]):
plot_null[iteration,:,i,j] = np.nansum(null[iteration,:,groups[i]][:,:,groups[j]].reshape(matrix.shape[0],-1),axis=1)
plot_null_base[iteration,:,i,j] = np.nansum(null_base[iteration,:,groups[i]][:,:,groups[j]].reshape(matrix.shape[0],-1),axis=1)
#make sure that, if we don't have data for a paper, we also are not including it's base rates
#this is mostly for when the random walk fails because it's not part of the graph.
x = plot_matrix.sum(axis=1).sum(axis=1)
y = plot_base_matrix.sum(axis=1).sum(axis=1)
mask = np.where(x==0)[0]
assert y[mask].sum() == 0
plt.close()
sns.set(style='white',font='Palatino')
fig = plt.figure(figsize=(7.5,7.5),constrained_layout=True)
gs = gridspec.GridSpec(4,2, figure=fig)
ax0 = fig.add_subplot(gs[0,0])
ax1 = fig.add_subplot(gs[1,0])
ax2 = fig.add_subplot(gs[2,0])
ax3 = fig.add_subplot(gs[3,0])
ax4 = fig.add_subplot(gs[0,1])
ax5 = fig.add_subplot(gs[1,1])
ax6 = fig.add_subplot(gs[2,1])
ax7 = fig.add_subplot(gs[3,1])
axes = [ax0,ax1,ax2,ax3,ax4,ax5,ax6,ax7]
categories = ['all','behavioral/cognitive','behavioral/systems/cognitive', 'brief communications', 'cellular/molecular', 'development/plasticity/repair','neurobiology of disease', 'systems/circuits']
for thisax,cat in zip(axes,categories):
plt.sca(thisax)
data_type = np.zeros((4)).astype(str)
data_type[:] = 'real'
rdata_type = np.zeros((4)).astype(str)
rdata_type[:] = 'random'
data = pd.DataFrame(columns=['citation_rate','citation_type','data_type'])
if cat == 'all': papers = df[(df.journal=='JOURNAL OF NEUROSCIENCE')&(df.year>=2009)]
else: papers = df[(df.category==cat)&(df.year>=2009)]
for boot in range(n_iters):
boot_papers = papers.sample(len(papers),replace=True).index
emperical = np.nanmean(plot_matrix[boot_papers],axis=0)
expected = np.nanmean(plot_base_matrix[boot_papers],axis=0)
emperical = emperical / np.sum(emperical)
expected = expected / np.sum(expected)
rate = (emperical - expected) / expected
random = np.nanmean(plot_null[np.random.choice(plot_null.shape[0]),boot_papers],axis=0)
e_random = np.nanmean(plot_null_base[np.random.choice(plot_null.shape[0]),boot_papers],axis=0)
random = random / np.sum(random)
e_random = e_random / np.sum(e_random)
r_rate = (random - e_random) / e_random
data = data.append(pd.DataFrame(data= np.array([rate.flatten(),names,data_type]).swapaxes(0,1),columns=['citation_rate','citation_type','data_type']),ignore_index=True)
data = data.append(pd.DataFrame(data= np.array([r_rate.flatten(),names,rdata_type]).swapaxes(0,1),columns=['citation_rate','citation_type','data_type']),ignore_index=True)
data.citation_rate = (data.citation_rate.astype(float)*100)
p_vals = np.zeros((4))
for idx,name in enumerate(names):
x = data[(data.data_type=='real')&(data.citation_type==name)].citation_rate.values
y = data[(data.data_type=='random')&(data.citation_type==name)].citation_rate.values
ay = abs(y)
ax = abs(x.mean())
p_vals[idx] = len(ay[ay>ax])
p_vals = p_vals / n_iters
if type(control) == bool:
data.to_csv('/%s/data/citaion_rates_%s_%s.csv'%(homedir,method,control),index=False)
if control == 'walk': data.to_csv('/%s/data/citaion_rates_%s_%s_%s.csv'%(homedir,method,control,walk_length),index=False)
plot_data = data[data.data_type=='real']
mean = plot_data.groupby('citation_type',sort=False).mean()
std = plot_data.groupby('citation_type',sort=False).std()
bx = sns.violinplot(data=plot_data,y='citation_rate',x='citation_type',palette=pal,order=names,saturation=1,cut=0,scale='width')
for i,v in enumerate(bx.collections[::2]):
v.set_color(pal[i])
bx2 = sns.violinplot(data=data[data.data_type=='random'],y='citation_rate',x='citation_type',palette=pal,order=names,saturation=1,cut=0,scale='width',inner=None)
for i,v in enumerate(bx2.collections[8:]):
v.set_color([pal[i][0],pal[i][1],pal[i][2],.35])
plt.ylabel(" ",labelpad=0)
plt.xlabel('')
plt.title('%s,n=%s'%(cat,len(papers)),{'fontweight':'bold'},'left',pad=1)
thisax.yaxis.set_major_locator(plt.MaxNLocator(8))
thisax.tick_params(axis='y', which='major', pad=-5)
thisax.yaxis.set_major_formatter(FormatStrFormatter('%.0f'))
thisax.yaxis.set_major_formatter(ticker.PercentFormatter(decimals=0))
for i in range(4):
m,s = mean.values[i],std.values[i]
loc = m + (s*3)
low = np.around(m - (s*2),1)[0]
high = np.around(m + (s*2),1)[0]
m = np.around(m,1)[0]
if m > 0: loc = loc * -1
if loc > plt.ylim()[1]:
loc = plt.ylim()[1]
if loc < plt.ylim()[0]:
loc = plt.ylim()[0]
thisax.text(i,loc,'%s<%s>%s\n%s'%(low,m,high,log_p_value(p_vals[i])),horizontalalignment='center',fontsize=8)
plt.savefig('sub_fields.pdf')
def plot_pr_percentages_booty_matrix(func_vars):
control,within_poc,jneuro_papers = func_vars[0],func_vars[1],func_vars[2]
"""
Figure 2
"""
n_iters = 100
t_n_iters = 10
main_df = pd.read_csv('/%s/article_data/NewArticleData2019.csv'%(homedir),header=0)
race_df = pd.read_csv('/%s/data/result_df_%s.csv'%(homedir,method))
df = race_df.merge(main_df,how='outer',left_index=True, right_index=True)
if control == False:
null = np.load('/%s/data/citation_matrix_pr_%s_%s.npy'%(homedir,method,'null_False'))
null_base = np.load('/%s/data/base_citation_matrix_pr_%s_%s.npy'%(homedir,method,'null_False'))
if control == True:
null = np.load('/%s/data/citation_matrix_pr_%s_%s.npy'%(homedir,method,'null_True'))
null_base = np.load('/%s/data/base_citation_matrix_pr_%s_%s.npy'%(homedir,method,'null_True'))
if control == 'True_jn':
null = np.load('/%s/data/citation_matrix_pr_%s_%s.npy'%(homedir,method,'null_jn'))
null_base = np.load('/%s/data/base_citation_matrix_pr_%s_%s.npy'%(homedir,method,'null_jn'))
if control == 'walk':
null = np.load('/%s/data/citation_matrix_pr_%s_%s_%s.npy'%(homedir,method,'null_walk',walk_length))
null_base = np.load('/%s/data/base_citation_matrix_pr_%s_%s_%s.npy'%(homedir,method,'null_walk',walk_length))
if type(control) == bool or control == 'True_jn':
matrix = np.load('/%s/data/citation_matrix_pr_%s_%s.npy'%(homedir,method,control))
base_matrix = np.load('/%s/data/base_citation_matrix_pr_%s_%s.npy'%(homedir,method,control))
else:
matrix = np.load('/%s/data/citation_matrix_pr_%s_%s_%s.npy'%(homedir,method,control,walk_length))
base_matrix = np.load('/%s/data/base_citation_matrix_pr_%s_%s_%s.npy'%(homedir,method,control,walk_length))
# if jneuro_papers == True:
# walk_base_matrix = np.load('/%s/data/base_citation_matrix_%s_walk.npy'%(homedir,method))
# matrix[np.isnan(walk_base_matrix[:,0,0])] = np.nan
# base_matrix[np.isnan(walk_base_matrix[:,0,0])] = np.nan
matrix_idxs = {'white_M':0,'api_M':1,'hispanic_M':2,'black_M':3,'white_W':4,'api_W':5,'hispanic_W':6,'black_W':7}
if within_poc == False:
groups = [np.vectorize(matrix_idxs.get)(['white_M','white_W',]),
np.vectorize(matrix_idxs.get)(['api_M','api_W','hispanic_M','hispanic_W','black_M','black_W',])]
names = ['white-white','white-AoC','AoC-white','AoC-AoC']
if within_poc == 'black':
# groups = [np.vectorize(matrix_idxs.get)(['white_M','white_W','api_M','api_W','hispanic_M','hispanic_W',]),
groups = [np.vectorize(matrix_idxs.get)(['white_M','white_W']),
np.vectorize(matrix_idxs.get)(['black_M','black_W',])]
names = ['white-white','white-black','black-white','black-black']
if within_poc == 'api':
# groups = [np.vectorize(matrix_idxs.get)(['white_M','white_W','hispanic_M','hispanic_W','black_M','black_W',]),
groups = [np.vectorize(matrix_idxs.get)(['white_M','white_W']),
np.vectorize(matrix_idxs.get)(['api_M','api_W',])]
names = ['white-white','white-asian','asian-white','asian-asian']
if within_poc == 'hispanic':
groups = [np.vectorize(matrix_idxs.get)(['white_M','white_W']),
# groups = [np.vectorize(matrix_idxs.get)(['white_M','white_W','api_M','api_W','black_M','black_W',]),
np.vectorize(matrix_idxs.get)(['hispanic_M','hispanic_W',])]
names = ['white-white','white-hispanic','hispanic-white','hispanic-hispanic']
plot_matrix = np.zeros((matrix.shape[0],len(groups),len(groups)))
plot_base_matrix = np.zeros((matrix.shape[0],len(groups),len(groups)))
plot_null = np.zeros((null.shape[0],matrix.shape[0],len(groups),len(groups)))
plot_null_base = np.zeros((null.shape[0],matrix.shape[0],len(groups),len(groups)))
for i in range(len(groups)):
for j in range(len(groups)):
plot_matrix[:,i,j] = np.nansum(matrix[:,groups[i]][:,:,groups[j]].reshape(matrix.shape[0],-1),axis=1)
plot_base_matrix[:,i,j] = np.nansum(base_matrix[:,groups[i]][:,:,groups[j]].reshape(matrix.shape[0],-1),axis=1)
for iteration in range(null.shape[0]):
plot_null[iteration,:,i,j] = np.nansum(null[iteration,:,groups[i]][:,:,groups[j]].reshape(matrix.shape[0],-1),axis=1)
plot_null_base[iteration,:,i,j] = np.nansum(null_base[iteration,:,groups[i]][:,:,groups[j]].reshape(matrix.shape[0],-1),axis=1)
#make sure that, if we don't have data for a paper, we also are not including it's base rates
#this is mostly for when the random walk fails because it's not part of the graph.
x = plot_matrix.sum(axis=1).sum(axis=1)
y = plot_base_matrix.sum(axis=1).sum(axis=1)
mask = np.where(x==0)[0]
assert y[mask].sum() == 0
for papers in [df[df.year>=2009],df[(df.year>=2009)&(df.fa_race=='white')&(df.la_race=='white')],df[(df.year>=2009)&((df.fa_race!='white')|(df.la_race!='white'))]]:
print (papers.citation_count.sum())
sum_cites = papers.citation_count.sum()
papers = papers.index
emperical = np.nanmean(plot_matrix[papers],axis=0)
expected = np.nanmean(plot_base_matrix[papers],axis=0)
emperical = emperical / np.sum(emperical)
expected = expected / np.sum(expected)
rate = (emperical - expected) / expected
p = np.array([np.around(emperical.flatten()*100,1),np.around(expected.flatten()*100,1)]).flatten()
print ('Of the citations given between 2009 and 2019, WW papers received %s, compared to %s for WA papers, %s for AW papers, and %s for AA papers. The expected proportions based on the pool of citable papers were %s for WW, %s for WA, %s for AW, and %s for AA.'%(p[0],p[1],p[2],p[3],p[4],p[5],p[6],p[7]))
p = np.around(rate.flatten()*100,1)
print ('By this measure, WW papers were cited %s more than expected, WA papers were cited %s less than expected, AW papers were cited %s less than expected, and AA papers were cited %s less than expected.'%(p[0],p[1],p[2],p[3]))
p = np.around(((emperical - expected) * sum_cites).flatten(),-1).astype(int)
print ('These values correspond to WW papers being cited roughly %s more times than expected, compared to roughly %s more times for WA papers, %s fewer for AW papers, and %s fewer for AA papers'%(p[0],p[1],p[2],p[3]))
data_type = np.zeros((4)).astype(str)
data_type[:] = 'real'
rdata_type = np.zeros((4)).astype(str)
rdata_type[:] = 'random'
data = pd.DataFrame(columns=['citation_rate','citation_type','data_type'])
papers = df[df.year>=2009]
if jneuro_papers == True: papers = papers[papers.journal=='JOURNAL OF NEUROSCIENCE']
for boot in range(n_iters):
boot_papers = papers.sample(len(papers),replace=True).index
emperical = np.nanmean(plot_matrix[boot_papers],axis=0)
expected = np.nanmean(plot_base_matrix[boot_papers],axis=0)
emperical = emperical / np.sum(emperical)
expected = expected / np.sum(expected)
rate = (emperical - expected) / expected
random = np.nanmean(plot_null[np.random.choice(plot_null.shape[0]),boot_papers],axis=0)
e_random = np.nanmean(plot_null_base[np.random.choice(plot_null.shape[0]),boot_papers],axis=0)
random = random / np.sum(random)
e_random = e_random / np.sum(e_random)
r_rate = (random - e_random) / e_random
data = data.append(pd.DataFrame(data= np.array([rate.flatten(),names,data_type]).swapaxes(0,1),columns=['citation_rate','citation_type','data_type']),ignore_index=True)
data = data.append(pd.DataFrame(data= np.array([r_rate.flatten(),names,rdata_type]).swapaxes(0,1),columns=['citation_rate','citation_type','data_type']),ignore_index=True)
data.citation_rate = (data.citation_rate.astype(float)*100)
p_vals = np.zeros((4))
for idx,name in enumerate(names):
x = data[(data.data_type=='real')&(data.citation_type==name)].citation_rate.values
y = data[(data.data_type=='random')&(data.citation_type==name)].citation_rate.values
ay = abs(y)
ax = abs(x.mean())
p_vals[idx] = len(ay[ay>ax])
p_vals = p_vals / n_iters
if type(control) == bool:
data.to_csv('/%s/data/citaion_rates_%s_%s.csv'%(homedir,method,control),index=False)
if control == 'walk': data.to_csv('/%s/data/citaion_rates_%s_%s_%s.csv'%(homedir,method,control,walk_length),index=False)
plot_data = data[data.data_type=='real']
mean = plot_data.groupby('citation_type',sort=False).mean()
std = plot_data.groupby('citation_type',sort=False).std()
all_data = plot_data.copy()
all_data['citing authors'] = 'all'
plt.close()
sns.set(style='white',font='Palatino')
fig = plt.figure(figsize=(7.5,3),constrained_layout=True)
gs = gridspec.GridSpec(12, 10, figure=fig)
ax1 = fig.add_subplot(gs[:12,:5])
plt.sca(ax1)
bx = sns.violinplot(data=plot_data,y='citation_rate',x='citation_type',palette=pal,order=names,saturation=1,cut=0,scale='width')
for i,v in enumerate(bx.collections[::2]):
v.set_color(pal[i])
bx2 = sns.violinplot(data=data[data.data_type=='random'],y='citation_rate',x='citation_type',palette=pal,order=names,saturation=1,cut=0,scale='width',inner=None)
for i,v in enumerate(bx2.collections[8:]):
v.set_color([pal[i][0],pal[i][1],pal[i][2],.35])
plt.ylabel("percent over-/under-citation",labelpad=0)
plt.xlabel('')
plt.title('a, all citers',{'fontweight':'bold'},'left',pad=1)
ax1.yaxis.set_major_locator(plt.MaxNLocator(8))
ax1.tick_params(axis='y', which='major', pad=-5)
ax1.yaxis.set_major_formatter(FormatStrFormatter('%.0f'))
ax1.yaxis.set_major_formatter(ticker.PercentFormatter(decimals=0))
for i in range(4):
m,s = mean.values[i],std.values[i]
loc = m + (s*3)
low = np.around(m - (s*2),1)[0]
high = np.around(m + (s*2),1)[0]
m = np.around(m,1)[0]
if m > 0: loc = loc * -1
if loc > plt.ylim()[1]:
loc = plt.ylim()[1]
if loc < plt.ylim()[0]:
loc = plt.ylim()[0]
ax1.text(i,loc,'%s<%s>%s\n%s'%(low,m,high,log_p_value(p_vals[i])),horizontalalignment='center',fontsize=8)
ax2 = fig.add_subplot(gs[0:6,5:])
ax3 = fig.add_subplot(gs[6:,5:])
plt.sca(ax2)
data = pd.DataFrame(columns=['citation_rate','citation_type','data_type'])
papers = df[(df.year>=2009)&(df.fa_race=='white')&(df.la_race=='white')]
if jneuro_papers == True: papers = papers[papers.journal=='JOURNAL OF NEUROSCIENCE']
for boot in range(n_iters):
boot_papers = papers.sample(len(papers),replace=True).index
emperical = np.nanmean(plot_matrix[boot_papers],axis=0)
expected = np.nanmean(plot_base_matrix[boot_papers],axis=0)
emperical = emperical / np.sum(emperical)
expected = expected / np.sum(expected)
rate = (emperical - expected) / expected
random = np.nanmean(plot_null[np.random.choice(plot_null.shape[0]),boot_papers],axis=0)
e_random = np.nanmean(plot_null_base[np.random.choice(plot_null.shape[0]),boot_papers],axis=0)
random = random / np.sum(random)
e_random = e_random / np.sum(e_random)
r_rate = (random - e_random) / e_random
data = data.append(pd.DataFrame(data= np.array([rate.flatten(),names,data_type]).swapaxes(0,1),columns=['citation_rate','citation_type','data_type']),ignore_index=True)
data = data.append(pd.DataFrame(data= np.array([r_rate.flatten(),names,rdata_type]).swapaxes(0,1),columns=['citation_rate','citation_type','data_type']),ignore_index=True)
data.citation_rate = (data.citation_rate.astype(float)*100)
p_vals = np.zeros((4))
for idx,name in enumerate(names):
x = data[(data.data_type=='real')&(data.citation_type==name)].citation_rate.values
y = data[(data.data_type=='random')&(data.citation_type==name)].citation_rate.values
ay = abs(y)
ax = abs(x.mean())
p_vals[idx] = len(ay[ay>ax])
p_vals = p_vals / n_iters
plot_data = data[data.data_type=='real']
plot_data['citing authors'] = 'white'
all_data = all_data.append(plot_data,ignore_index=True)
mean = plot_data.groupby('citation_type',sort=False).mean()
std = plot_data.groupby('citation_type',sort=False).std()
plt.sca(ax2)
bx = sns.violinplot(data=plot_data,y='citation_rate',x='citation_type',palette=pal,order=names,saturation=1,cut=0,scale='width')
for i,v in enumerate(bx.collections[::2]):
v.set_color(pal[i])
bx2 = sns.violinplot(data=data[data.data_type=='random'],y='citation_rate',x='citation_type',palette=pal,order=names,saturation=1,cut=0,scale='width',inner=None)
for i,v in enumerate(bx2.collections[8:]):
v.set_color([pal[i][0],pal[i][1],pal[i][2],.35])
# plt.ylabel("percent over-/under-citation",labelpad=0)
plt.xlabel('')
plt.ylabel('')
plt.title('b, white citers',{'fontweight':'bold'},'left',pad=1)
ax2.yaxis.set_major_locator(plt.MaxNLocator(8))
ax2.tick_params(axis='y', which='major', pad=-5)
ax2.yaxis.set_major_formatter(FormatStrFormatter('%.0f'))
ax2.yaxis.set_major_formatter(ticker.PercentFormatter(decimals=0))
for i in range(4):
m,s = mean.values[i],std.values[i]
loc = m + (s*3)
low = np.around(m - (s*2),1)[0]
high = np.around(m + (s*2),1)[0]
m = np.around(m,1)[0]
if m > 0: loc = loc * -1
if loc > plt.ylim()[1]:
loc = plt.ylim()[1]
if loc < plt.ylim()[0]:
loc = plt.ylim()[0]
ax2.text(i,loc,'%s<%s>%s\n%s'%(low,m,high,log_p_value(p_vals[i])),horizontalalignment='center',fontsize=8)
plt.sca(ax3)
data = pd.DataFrame(columns=['citation_rate','citation_type','data_type'])
papers = df[(df.year>=2009)&((df.fa_race!='white')|(df.la_race!='white'))]
if jneuro_papers == True: papers = papers[papers.journal=='JOURNAL OF NEUROSCIENCE']
for boot in range(n_iters):
boot_papers = papers.sample(len(papers),replace=True).index
emperical = np.nanmean(plot_matrix[boot_papers],axis=0)
expected = np.nanmean(plot_base_matrix[boot_papers],axis=0)
emperical = emperical / np.sum(emperical)
expected = expected / np.sum(expected)
rate = (emperical - expected) / expected
random = np.nanmean(plot_null[np.random.choice(plot_null.shape[0]),boot_papers],axis=0)
e_random = np.nanmean(plot_null_base[np.random.choice(plot_null.shape[0]),boot_papers],axis=0)
random = random / np.sum(random)
e_random = e_random / np.sum(e_random)
r_rate = (random - e_random) / e_random
data = data.append(pd.DataFrame(data= np.array([rate.flatten(),names,data_type]).swapaxes(0,1),columns=['citation_rate','citation_type','data_type']),ignore_index=True)
data = data.append(pd.DataFrame(data= np.array([r_rate.flatten(),names,rdata_type]).swapaxes(0,1),columns=['citation_rate','citation_type','data_type']),ignore_index=True)
data.citation_rate = (data.citation_rate.astype(float)*100)
p_vals = np.zeros((4))
for idx,name in enumerate(names):
x = data[(data.data_type=='real')&(data.citation_type==name)].citation_rate.values
y = data[(data.data_type=='random')&(data.citation_type==name)].citation_rate.values
ay = abs(y)
ax = abs(x.mean())
p_vals[idx] = len(ay[ay>ax])
p_vals = p_vals / n_iters
plot_data = data[data.data_type=='real']
plot_data['citing authors'] = 'AoC'
all_data = all_data.append(plot_data,ignore_index=True)
if method == 'florida':all_data.to_csv('/%s/%s_%s_2compare.csv'%(homedir,control,jneuro_papers))
mean = plot_data.groupby('citation_type',sort=False).mean()
std = plot_data.groupby('citation_type',sort=False).std()
bx = sns.violinplot(data=plot_data,y='citation_rate',x='citation_type',palette=pal,order=names,saturation=1,cut=0,scale='width')
for i,v in enumerate(bx.collections[::2]):
v.set_color(pal[i])
bx2 = sns.violinplot(data=data[data.data_type=='random'],y='citation_rate',x='citation_type',palette=pal,order=names,saturation=1,cut=0,scale='width',inner=None)
for i,v in enumerate(bx2.collections[8:]):
v.set_color([pal[i][0],pal[i][1],pal[i][2],.35])
# plt.ylabel("percent over-/under-citation",labelpad=0)
plt.xlabel('')
plt.ylabel('')
plt.title('c, citers of color',{'fontweight':'bold'},'left',pad=1)
ax3.yaxis.set_major_locator(plt.MaxNLocator(8))
ax3.tick_params(axis='y', which='major', pad=-5)
ax3.yaxis.set_major_formatter(FormatStrFormatter('%.0f'))
ax3.yaxis.set_major_formatter(ticker.PercentFormatter(decimals=0))
for i in range(4):
m,s = mean.values[i],std.values[i]
loc = m + (s*3)
low = np.around(m - (s*2),1)[0]
high = np.around(m + (s*2),1)[0]
m = np.around(m,1)[0]
if m > 0: loc = loc * -1
if loc > plt.ylim()[1]:
loc = plt.ylim()[1]
if loc < plt.ylim()[0]:
loc = plt.ylim()[0]
ax3.text(i,loc,'%s<%s>%s\n%s'%(low,m,high,log_p_value(p_vals[i])),horizontalalignment='center',fontsize=8)
ylim = np.array([ax3.get_ylim(),ax2.get_ylim()]).min(),np.array([ax3.get_ylim(),ax2.get_ylim()]).max()
plt.sca(ax3)
plt.ylim(ylim)
plt.sca(ax2)
plt.ylim(ylim)
if type(control) == bool or control == 'True_jn': plt.savefig('/%s/figures/percentages/method-%s_control-%s_poc-%s_wp-%s.pdf'%(homedir,method,control,within_poc,jneuro_papers))
else: plt.savefig('/%s/figures/percentages/method-%s_control-%s_poc-%s_wl-%s.pdf'%(homedir,method,control,within_poc,walk_length))
plt.close()
# return None
"""
temporal trends
"""
n_iters = t_n_iters
white_data = pd.DataFrame(columns=['citation_rate','citation_type','year','base_rate','emperical_rate','data_type','boot'])
for year in range(2009,2020):
papers = df[(df.year==year)&(df.fa_race=='white')&(df.la_race=='white')]
for boot in range(n_iters):
boot_papers = papers.sample(len(papers),replace=True).index
emperical = np.nanmean(plot_matrix[boot_papers],axis=0)
expected = np.nanmean(plot_base_matrix[boot_papers],axis=0)
emperical = emperical / np.sum(emperical)
expected = expected / np.sum(expected)
rate = (emperical - expected) / expected
random = np.nanmean(plot_null[np.random.choice(plot_null.shape[0]),boot_papers],axis=0)
e_random = np.nanmean(plot_null_base[np.random.choice(plot_null.shape[0]),boot_papers],axis=0)
random = random / np.sum(random)
e_random = e_random / np.sum(e_random)
r_rate = (random - e_random) / e_random
boot_df = pd.DataFrame(data= np.array([rate.flatten(),names]).swapaxes(0,1),columns=['citation_rate','citation_type'])
boot_df['year'] = year
boot_df['base_rate'] = expected.flatten()
boot_df['emperical_rate'] = emperical.flatten()
boot_df['data_type'] = 'real'
boot_df['boot'] = boot
white_data = white_data.append(boot_df,ignore_index=True)
boot_df = pd.DataFrame(data= np.array([r_rate.flatten(),names]).swapaxes(0,1),columns=['citation_rate','citation_type'])
boot_df['year'] = year
boot_df['base_rate'] = e_random.flatten()
boot_df['emperical_rate'] = random.flatten()
boot_df['data_type'] = 'random'
boot_df['boot'] = boot
white_data = white_data.append(boot_df,ignore_index=True)
white_data = white_data.dropna()
white_data.citation_rate = (white_data.citation_rate.astype(float)*100)
white_data.base_rate = (white_data.base_rate .astype(float)*100)
white_data.emperical_rate = (white_data.emperical_rate.astype(float)*100)
slope_boot_df = pd.DataFrame(columns=['slope','data','citation_type'])
for boot in range(n_iters):
for name in names:
real_slope = scipy.stats.linregress(white_data[(white_data.data_type=='real')&(white_data.citation_type==name)&(white_data.boot==boot)].citation_rate.values,range(11))[0]
random_slope = scipy.stats.linregress(white_data[(white_data.data_type=='random')&(white_data.citation_type==name)&(white_data.boot==boot)].citation_rate.values,range(11))[0]
slope_boot_df = slope_boot_df.append(pd.DataFrame(data= np.array([[real_slope,random_slope],['real','random'],[name,name]]).swapaxes(0,1),columns=['slope','data','citation_type']))
slope_boot_df.slope=slope_boot_df.slope.astype(float)
non_white_data = pd.DataFrame(columns=['citation_rate','citation_type','year','base_rate','emperical_rate','data_type','boot'])
for year in range(2009,2020):
papers = df[(df.year==year)&((df.fa_race!='white')|(df.la_race!='white'))]
for boot in range(n_iters):
boot_papers = papers.sample(len(papers),replace=True).index
emperical = np.nanmean(plot_matrix[boot_papers],axis=0)
expected = np.nanmean(plot_base_matrix[boot_papers],axis=0)
emperical = emperical / np.sum(emperical)
expected = expected / np.sum(expected)
rate = (emperical - expected) / expected
random = np.nanmean(plot_null[np.random.choice(plot_null.shape[0]),boot_papers],axis=0)
e_random = np.nanmean(plot_null_base[np.random.choice(plot_null.shape[0]),boot_papers],axis=0)
random = random / np.sum(random)
e_random = e_random / np.sum(e_random)
r_rate = (random - e_random) / e_random
boot_df = pd.DataFrame(data= np.array([rate.flatten(),names]).swapaxes(0,1),columns=['citation_rate','citation_type'])
boot_df['year'] = year
boot_df['base_rate'] = expected.flatten()
boot_df['emperical_rate'] = emperical.flatten()
boot_df['data_type'] = 'real'
boot_df['boot'] = boot
non_white_data = non_white_data.append(boot_df,ignore_index=True)
boot_df = pd.DataFrame(data= np.array([r_rate.flatten(),names]).swapaxes(0,1),columns=['citation_rate','citation_type'])
boot_df['year'] = year
boot_df['base_rate'] = e_random.flatten()
boot_df['emperical_rate'] = random.flatten()
boot_df['data_type'] = 'random'
boot_df['boot'] = boot
non_white_data = non_white_data.append(boot_df,ignore_index=True)
non_white_data = non_white_data.dropna()
non_white_data.citation_rate = (non_white_data.citation_rate.astype(float)*100)
non_white_data.base_rate = (non_white_data.base_rate .astype(float)*100)
non_white_data.emperical_rate = (non_white_data.emperical_rate.astype(float)*100)
non_white_slope_boot_df = pd.DataFrame(columns=['slope','data','citation_type'])
for boot in range(n_iters):
for name in names:
real_slope = scipy.stats.linregress(non_white_data[(non_white_data.data_type=='real')&(non_white_data.citation_type==name)&(non_white_data.boot==boot)].citation_rate.values,range(11))[0]
random_slope = scipy.stats.linregress(non_white_data[(non_white_data.data_type=='random')&(non_white_data.citation_type==name)&(non_white_data.boot==boot)].citation_rate.values,range(11))[0]
non_white_slope_boot_df = non_white_slope_boot_df.append(pd.DataFrame(data= np.array([[real_slope,random_slope],['real','random'],[name,name]]).swapaxes(0,1),columns=['slope','data','citation_type']))
non_white_slope_boot_df.slope=non_white_slope_boot_df.slope.astype(float)
plt.close()
sns.set(style='white',font='Palatino')
fig = plt.figure(figsize=(7.5,6),constrained_layout=True)
gs = fig.add_gridspec(4, 4)
ax1 = fig.add_subplot(gs[:2,:2])
ax2 = fig.add_subplot(gs[:2,2:])
ax3 = fig.add_subplot(gs[2,0])
ax4 = fig.add_subplot(gs[2,1])
ax5 = fig.add_subplot(gs[3,0])
ax6 = fig.add_subplot(gs[3,1])
ax7 = fig.add_subplot(gs[2,2])
ax8 = fig.add_subplot(gs[2,3])
ax9 = fig.add_subplot(gs[3,2])
ax10 = fig.add_subplot(gs[3,3])
plt.sca(ax1)
sns.lineplot(x="year", y="citation_rate",hue="citation_type",data=white_data[white_data.data_type=='real'],ax=ax1,hue_order=names,ci='sd',palette=pal)
plt.legend(labels=names,ncol=2,fontsize='small',frameon=False,columnspacing=0.5,handletextpad=0)#bbox_to_anchor=(0., 1.05))
ax1.set_xlabel('')
plt.title('a, white citers',{'fontweight':'bold'},'left',pad=1)
ax1.set_ylabel('percent over-/under-citation',labelpad=0)
ax1.tick_params(axis='x', which='major', pad=-5)
ax1.yaxis.set_major_locator(plt.MaxNLocator(8))
ax1.tick_params(axis='y', which='major', pad=-5)
ax1.yaxis.set_major_formatter(FormatStrFormatter('%.0f'))
ax1.xaxis.set_major_formatter(FormatStrFormatter('%.0f'))
ax1.yaxis.set_major_formatter(ticker.PercentFormatter(decimals=0))
plt.axhline(0, color="grey", clip_on=False,linestyle='--')
plt.xlim(2009,2019)
for color,name in zip(pal,names):
y_val=white_data[(white_data.data_type=='real')&(white_data.citation_type==name)&((white_data.year==2017)|(white_data.year==2018)|(white_data.year==2019))].citation_rate.max()
x = slope_boot_df[(slope_boot_df.data=='real')&(slope_boot_df.citation_type==name)].slope.values
y = slope_boot_df[(slope_boot_df.data=='random')&(slope_boot_df.citation_type==name)].slope.values
p_val = min(len(y[y>x.mean()]),len(y[y<x.mean()]))
p_val = p_val/n_iters
print (p_val)
p_val = log_p_value(p_val)
plt.text(2019,y_val,'slope=%s,%s'%(np.around(x.mean(),2),p_val),horizontalalignment='right',verticalalignment='bottom',fontsize=8,color=color)
plt.sca(ax2)
sns.lineplot(x="year", y="citation_rate",hue="citation_type",data=non_white_data[non_white_data.data_type=='real'],ax=ax2,hue_order=names,ci='sd',palette=pal)
plt.legend(labels=names,ncol=2,fontsize='small',frameon=False,columnspacing=0.5,handletextpad=0)#,bbox_to_anchor=(0., 1.05))
ax2.set_xlabel('')
# plt.axhline(0, color="grey", clip_on=False,axes=ax2,linestyle='--')
plt.title('b, citer of color',{'fontweight':'bold'},'left',pad=1)
sns.despine()
ax2.set_ylabel('percent over-/under-citation',labelpad=0)
ax2.tick_params(axis='x', which='major', pad=-5)
ax2.yaxis.set_major_locator(plt.MaxNLocator(8))
ax2.tick_params(axis='y', which='major', pad=-5)
ax2.yaxis.set_major_formatter(FormatStrFormatter('%.0f'))
ax2.xaxis.set_major_formatter(FormatStrFormatter('%.0f'))
ax2.yaxis.set_major_formatter(ticker.PercentFormatter(decimals=0))
fig.text(0.00, 0.26, 'percentage of citations', va='center', rotation='vertical')
plt.axhline(0, color="grey", clip_on=False,linestyle='--')
plt.xlim(2009,2019)
for color,name in zip(pal,names):
y_val=non_white_data[(non_white_data.data_type=='real')&(non_white_data.citation_type==name)&((non_white_data.year==2017)|(non_white_data.year==2018)|(non_white_data.year==2019))].citation_rate.max()
x = non_white_slope_boot_df[(non_white_slope_boot_df.data=='real')&(non_white_slope_boot_df.citation_type==name)].slope.values
y = non_white_slope_boot_df[(non_white_slope_boot_df.data=='random')&(non_white_slope_boot_df.citation_type==name)].slope.values
p_val = min(len(y[y>x.mean()]),len(y[y<x.mean()]))
p_val = p_val/n_iters
print (p_val)
p_val = log_p_value(p_val)
plt.text(2019,y_val,'slope=%s,%s'%(np.around(x.mean(),2),p_val),horizontalalignment='right',verticalalignment='bottom',fontsize=8,color=color)
ylim = np.array(np.array([ax1.get_ylim(),ax1.get_ylim()]).min(),np.array([ax2.get_ylim(),ax2.get_ylim()]).max())
plt.sca(ax1)
plt.ylim(ylim*1.1)
plt.sca(ax2)
plt.ylim(ylim*1.1)
white_data = white_data[white_data.data_type=='real']
non_white_data = non_white_data[non_white_data.data_type=='real']
label = True
white_max = np.max([white_data.groupby('citation_type').max()['emperical_rate'],white_data.groupby('citation_type').max()['base_rate']],axis=0)
white_min = np.min([white_data.groupby('citation_type').min()['emperical_rate'],white_data.groupby('citation_type').min()['base_rate']],axis=0)
aoc_max = np.max([non_white_data.groupby('citation_type').max()['emperical_rate'],non_white_data.groupby('citation_type').max()['base_rate']],axis=0)
aoc_min = np.min([non_white_data.groupby('citation_type').min()['emperical_rate'],non_white_data.groupby('citation_type').min()['base_rate']],axis=0)
min_y = np.flip(np.min([white_min,aoc_min],axis=0))
max_y = np.flip(np.max([white_max,aoc_max],axis=0))
i = 0
for ax,citation_type,color in zip([ax3,ax4,ax5,ax6],white_data.citation_type.unique(),pal):
plt.sca(ax)
ax.clear()
if label == True:
plt.title('c, white citers',{'fontweight':'bold'},'left',pad=1)
label = False
tmp_ax0 = sns.lineplot(x="year", y="emperical_rate",data=white_data[white_data.citation_type==citation_type],ci='sd',color=color,marker='o')
tmp_ax1 = sns.lineplot(x="year", y="base_rate",data=white_data[white_data.citation_type==citation_type],ci='sd',color='grey',marker='o')
if citation_type == 'white-white' or citation_type== 'poc-poc' :
s,i_,r,p,std = scipy.stats.linregress(white_data[white_data.citation_type==citation_type].groupby('year').mean()['emperical_rate'],range(11))
print (s,p)
s,i_,r,p,std = scipy.stats.linregress(white_data[white_data.citation_type==citation_type].groupby('year').mean()['base_rate'],range(11))
print (s,p)
ax.set_xlabel('')
# ax3.set_ylabel('percentage of citations',labelpad=0)
sns.despine()
ax.yaxis.set_major_locator(plt.MaxNLocator(6))
ax.tick_params(axis='y', which='major', pad=-5)
ax.tick_params(axis='x', which='major', bottom=False,top=False,labelbottom=False)
ax.yaxis.set_major_formatter(FormatStrFormatter('%.0f'))
ax.set_ylabel('')
ax.yaxis.set_major_formatter(ticker.PercentFormatter(decimals=1))
plt.ylim(min_y[i],max_y[i])
i = i + 1
label = True
i = 0
for ax,citation_type,color in zip([ax7,ax8,ax9,ax10],non_white_data.citation_type.unique(),pal):
plt.sca(ax)
if label == True:
plt.title('d, citers of color',{'fontweight':'bold'},'left',pad=1)
label = False
tmp_ax0 = sns.lineplot(x="year", y="emperical_rate",data=non_white_data[non_white_data.citation_type==citation_type],ci='sd',color=color,marker='o')
tmp_ax1 = sns.lineplot(x="year", y="base_rate",data=non_white_data[non_white_data.citation_type==citation_type],ci='sd',color='grey',marker='o')
ax.set_xlabel('')
# ax3.set_ylabel('percentage of citations',labelpad=0)
sns.despine()
ax.yaxis.set_major_locator(plt.MaxNLocator(6))
ax.tick_params(axis='y', which='major', pad=-5)
ax.tick_params(axis='x', which='major', bottom=False,top=False,labelbottom=False)
ax.yaxis.set_major_formatter(FormatStrFormatter('%.0f'))
ax.set_ylabel('')
ax.yaxis.set_major_formatter(ticker.PercentFormatter(decimals=1))
plt.ylim(min_y[i],max_y[i])
i = i + 1
if type(control) == bool: plt.savefig('/%s/figures/temporal/method-%s_control-%s_poc-%s_wp-%s.pdf'%(homedir,method,control,within_poc,walk_papers))
else: plt.savefig('/%s/figures/temporal/method-%s_control-%s_poc-%s_wl-%s.pdf'%(homedir,method,control,within_poc,walk_length))
plt.close()
def compare_nulls():
paper_data = pd.read_csv('/%s/data/citaion_rates_%s_%s.csv'%(homedir,method,'True'))
paper_data = paper_data[paper_data.data_type=='real']
walk_cite = pd.read_csv('/%s/data/citaion_rates_%s_%s_%s.csv'%(homedir,method,'walk','cited'))
walk_cite = walk_cite[walk_cite.data_type=='real']
walk_all = pd.read_csv('/%s/data/citaion_rates_%s_%s_%s.csv'%(homedir,method,'walk','all'))
walk_all = walk_all[walk_all.data_type=='real']
raw_data = | pd.read_csv('/%s/data/citaion_rates_%s_%s.csv'%(homedir,method,'False')) | pandas.read_csv |
import numpy as np
import pandas as pd
import pytest
from hypothesis import given, settings
from pandas.testing import assert_frame_equal
from janitor.testing_utils.strategies import (
conditional_df,
conditional_right,
conditional_series,
)
@pytest.mark.xfail(reason="empty object will pass thru")
@given(s=conditional_series())
def test_df_empty(s):
"""Raise ValueError if `df` is empty."""
df = pd.DataFrame([], dtype="int", columns=["A"])
with pytest.raises(ValueError):
df.conditional_join(s, ("A", "non", "=="))
@pytest.mark.xfail(reason="empty object will pass thru")
@given(df=conditional_df())
def test_right_empty(df):
"""Raise ValueError if `right` is empty."""
s = pd.Series([], dtype="int", name="A")
with pytest.raises(ValueError):
df.conditional_join(s, ("A", "non", "=="))
@given(df=conditional_df())
def test_right_df(df):
"""Raise TypeError if `right` is not a Series/DataFrame."""
with pytest.raises(TypeError):
df.conditional_join({"non": [2, 3, 4]}, ("A", "non", "=="))
@given(df=conditional_df(), s=conditional_series())
def test_right_series(df, s):
"""Raise ValueError if `right` is not a named Series."""
with pytest.raises(ValueError):
df.conditional_join(s, ("A", "non", "=="))
@given(df=conditional_df())
def test_df_MultiIndex(df):
"""Raise ValueError if `df` columns is a MultiIndex."""
with pytest.raises(ValueError):
df.columns = [list("ABCDE"), list("FGHIJ")]
df.conditional_join(
pd.Series([2, 3, 4], name="A"), (("A", "F"), "non", "==")
)
@given(df=conditional_df())
def test_right_MultiIndex(df):
"""Raise ValueError if `right` columns is a MultiIndex."""
with pytest.raises(ValueError):
right = df.copy()
right.columns = [list("ABCDE"), list("FGHIJ")]
df.conditional_join(right, (("A", "F"), "non", ">="))
@given(df=conditional_df(), s=conditional_series())
def test_check_conditions_exist(df, s):
"""Raise ValueError if no condition is provided."""
with pytest.raises(ValueError):
s.name = "B"
df.conditional_join(s)
@given(df=conditional_df(), s=conditional_series())
def test_check_condition_type(df, s):
"""Raise TypeError if any condition in conditions is not a tuple."""
with pytest.raises(TypeError):
s.name = "B"
df.conditional_join(s, ("A", "B", ""), ["A", "B"])
@given(df=conditional_df(), s=conditional_series())
def test_check_condition_length(df, s):
"""Raise ValueError if any condition is not length 3."""
with pytest.raises(ValueError):
s.name = "B"
df.conditional_join(s, ("A", "B", "C", "<"))
df.conditional_join(s, ("A", "B", ""), ("A", "B"))
@given(df=conditional_df(), s=conditional_series())
def test_check_left_on_type(df, s):
"""Raise TypeError if left_on is not a string."""
with pytest.raises(TypeError):
s.name = "B"
df.conditional_join(s, (1, "B", "<"))
@given(df=conditional_df(), s=conditional_series())
def test_check_right_on_type(df, s):
"""Raise TypeError if right_on is not a string."""
with pytest.raises(TypeError):
s.name = "B"
df.conditional_join(s, ("B", 1, "<"))
@given(df=conditional_df(), s=conditional_series())
def test_check_op_type(df, s):
"""Raise TypeError if the operator is not a string."""
with pytest.raises(TypeError):
s.name = "B"
df.conditional_join(s, ("B", "B", 1))
@given(df=conditional_df(), s=conditional_series())
def test_check_column_exists_df(df, s):
"""
Raise ValueError if `left_on`
can not be found in `df`.
"""
with pytest.raises(ValueError):
s.name = "B"
df.conditional_join(s, ("C", "B", "<"))
@given(df=conditional_df(), s=conditional_series())
def test_check_column_exists_right(df, s):
"""
Raise ValueError if `right_on`
can not be found in `right`.
"""
with pytest.raises(ValueError):
s.name = "B"
df.conditional_join(s, ("B", "A", ">="))
@given(df=conditional_df(), s=conditional_series())
def test_check_op_correct(df, s):
"""
Raise ValueError if `op` is not any of
`!=`, `<`, `>`, `>=`, `<=`.
"""
with pytest.raises(ValueError):
s.name = "B"
df.conditional_join(s, ("B", "B", "=!"))
@given(df=conditional_df(), s=conditional_series())
def test_check_how_type(df, s):
"""
Raise TypeError if `how` is not a string.
"""
with pytest.raises(TypeError):
s.name = "B"
df.conditional_join(s, ("B", "B", "<"), how=1)
@given(df=conditional_df(), s=conditional_series())
def test_check_how_value(df, s):
"""
Raise ValueError if `how` is not one of
`inner`, `left`, or `right`.
"""
with pytest.raises(ValueError):
s.name = "B"
df.conditional_join(s, ("B", "B", "<"), how="INNER")
@given(df=conditional_df(), right=conditional_right())
def test_dtype_strings_non_equi(df, right):
"""
Raise ValueError if the dtypes are both strings
on a non-equi operator.
"""
with pytest.raises(ValueError):
df.conditional_join(right, ("C", "Strings", "<"))
@given(df=conditional_df(), s=conditional_series())
def test_dtype_not_permitted(df, s):
"""
Raise ValueError if dtype of column in `df`
is not an acceptable type.
"""
df["F"] = pd.Timedelta("1 days")
with pytest.raises(ValueError):
s.name = "A"
df.conditional_join(s, ("F", "A", "<"))
@given(df=conditional_df(), s=conditional_series())
def test_dtype_str(df, s):
"""
Raise ValueError if dtype of column in `df`
does not match the dtype of column from `right`.
"""
with pytest.raises(ValueError):
s.name = "A"
df.conditional_join(s, ("C", "A", "<"))
@given(df=conditional_df(), s=conditional_series())
def test_dtype_category_non_equi(df, s):
"""
Raise ValueError if dtype is category,
and op is non-equi.
"""
with pytest.raises(ValueError):
s.name = "A"
s = s.astype("category")
df["C"] = df["C"].astype("category")
df.conditional_join(s, ("C", "A", "<"))
@given(df=conditional_df(), s=conditional_series())
def test_check_sort_by_appearance_type(df, s):
"""
Raise TypeError if `sort_by_appearance` is not a boolean.
"""
with pytest.raises(TypeError):
s.name = "B"
df.conditional_join(s, ("B", "B", "<"), sort_by_appearance="True")
@given(df=conditional_df(), right=conditional_right())
def test_single_condition_less_than_floats(df, right):
"""Test output for a single condition. "<"."""
left_on, right_on = ["B", "Numeric"]
expected = (
df.assign(t=1)
.merge(right.assign(t=1), on="t")
.query(f"{left_on} < {right_on}")
.reset_index(drop=True)
)
expected = expected.filter([left_on, right_on])
actual = df.conditional_join(
right, (left_on, right_on, "<"), how="inner", sort_by_appearance=True
)
actual = actual.filter([left_on, right_on])
assert_frame_equal(expected, actual)
@given(df=conditional_df(), right=conditional_right())
def test_single_condition_less_than_ints(df, right):
"""Test output for a single condition. "<"."""
left_on, right_on = ["A", "Integers"]
expected = (
df.assign(t=1)
.merge(right.assign(t=1, C="2"), on="t")
.query(f"{left_on} < {right_on}")
.reset_index(drop=True)
)
expected = expected.filter([left_on, right_on])
actual = df.conditional_join(
right, (left_on, right_on, "<"), how="inner", sort_by_appearance=True
)
actual = actual.filter([left_on, right_on])
assert_frame_equal(expected, actual)
@given(df=conditional_df(), right=conditional_right())
def test_single_condition_less_than_ints_extension_array(df, right):
"""Test output for a single condition. "<"."""
df = df.assign(A=df["A"].astype("Int64"))
right = right.assign(Integers=right["Integers"].astype(pd.Int64Dtype()))
left_on, right_on = ["A", "Integers"]
expected = (
df.assign(t=1)
.merge(right.assign(t=1), on="t")
.query(f"{left_on} < {right_on}")
.reset_index(drop=True)
)
expected = expected.filter([left_on, right_on])
actual = df.conditional_join(
right, (left_on, right_on, "<"), how="inner", sort_by_appearance=True
)
actual = actual.filter([left_on, right_on])
assert_frame_equal(expected, actual)
@given(df=conditional_df(), right=conditional_right())
def test_single_condition_less_than_equal(df, right):
"""Test output for a single condition. "<=". DateTimes"""
left_on, right_on = ["E", "Dates"]
expected = (
df.assign(t=1)
.merge(right.assign(t=1), on="t")
.query(f"{left_on} <= {right_on}")
.reset_index(drop=True)
)
expected = expected.filter([left_on, right_on])
actual = df.conditional_join(
right, (left_on, right_on, "<="), how="inner", sort_by_appearance=True
)
actual = actual.filter([left_on, right_on])
assert_frame_equal(expected, actual)
@given(df=conditional_df(), right=conditional_right())
def test_single_condition_less_than_date(df, right):
"""Test output for a single condition. "<". Dates"""
left_on, right_on = ["E", "Dates"]
expected = (
df.assign(t=1)
.merge(right.assign(t=1), on="t")
.query(f"{left_on} < {right_on}")
.reset_index(drop=True)
)
expected = expected.filter([left_on, right_on])
actual = df.conditional_join(
right, (left_on, right_on, "<"), how="inner", sort_by_appearance=True
)
actual = actual.filter([left_on, right_on])
assert_frame_equal(expected, actual)
@given(df=conditional_df(), right=conditional_right())
def test_single_condition_greater_than_datetime(df, right):
"""Test output for a single condition. ">". Datetimes"""
left_on, right_on = ["E", "Dates"]
expected = (
df.assign(t=1)
.merge(right.assign(t=1), on="t")
.query(f"{left_on} > {right_on}")
.reset_index(drop=True)
)
expected = expected.filter([left_on, right_on])
actual = df.conditional_join(
right, (left_on, right_on, ">"), how="inner", sort_by_appearance=True
)
actual = actual.filter([left_on, right_on])
assert_frame_equal(expected, actual)
@given(df=conditional_df(), right=conditional_right())
def test_single_condition_greater_than_ints(df, right):
"""Test output for a single condition. ">="."""
left_on, right_on = ["A", "Integers"]
expected = (
df.assign(t=1)
.merge(right.assign(t=1), on="t")
.query(f"{left_on} >= {right_on}")
.reset_index(drop=True)
)
expected = expected.filter([left_on, right_on])
actual = df.conditional_join(
right, (left_on, right_on, ">="), how="inner", sort_by_appearance=True
)
actual = actual.filter([left_on, right_on])
assert_frame_equal(expected, actual)
@given(df=conditional_df(), right=conditional_right())
def test_single_condition_greater_than_floats_floats(df, right):
"""Test output for a single condition. ">"."""
left_on, right_on = ["B", "Numeric"]
expected = (
df.assign(t=1)
.merge(right.assign(t=1), on="t")
.query(f"{left_on} > {right_on}")
.reset_index(drop=True)
)
expected = expected.filter([left_on, right_on])
actual = df.conditional_join(
right, (left_on, right_on, ">"), how="inner", sort_by_appearance=True
)
actual = actual.filter([left_on, right_on])
assert_frame_equal(expected, actual)
@given(df=conditional_df(), right=conditional_right())
def test_single_condition_greater_than_ints_extension_array(df, right):
"""Test output for a single condition. ">="."""
left_on, right_on = ["A", "Integers"]
df = df.assign(A=df["A"].astype("Int64"))
right = right.assign(Integers=right["Integers"].astype(pd.Int64Dtype()))
expected = (
df.assign(t=1)
.merge(right.assign(t=1), on="t")
.query(f"{left_on} > {right_on}")
.reset_index(drop=True)
)
expected = expected.filter([left_on, right_on])
actual = df.conditional_join(
right, (left_on, right_on, ">"), how="inner", sort_by_appearance=True
)
actual = actual.filter([left_on, right_on])
assert_frame_equal(expected, actual)
@given(df=conditional_df(), right=conditional_right())
def test_single_condition_not_equal_numeric(df, right):
"""Test output for a single condition. "!="."""
left_on, right_on = ["A", "Integers"]
expected = (
df.assign(t=1)
.merge(right.assign(t=1), on="t")
.dropna(subset=["A", "Integers"])
.query(f"{left_on} != {right_on}")
.reset_index(drop=True)
)
expected = expected.filter([left_on, right_on])
actual = df.conditional_join(
right, (left_on, right_on, "!="), how="inner", sort_by_appearance=True
)
actual = actual.filter([left_on, right_on])
assert_frame_equal(expected, actual)
@given(df=conditional_df(), right=conditional_right())
def test_single_condition_not_equal_ints_only(df, right):
"""Test output for a single condition. "!="."""
left_on, right_on = ["A", "Integers"]
expected = (
df.assign(t=1)
.merge(right.assign(t=1), on="t")
.dropna(subset=["A", "Integers"])
.query(f"{left_on} != {right_on}")
.reset_index(drop=True)
)
expected = expected.filter([left_on, right_on])
actual = df.conditional_join(
right, (left_on, right_on, "!="), how="inner", sort_by_appearance=True
)
actual = actual.filter([left_on, right_on])
assert_frame_equal(expected, actual)
@given(df=conditional_df(), right=conditional_right())
def test_single_condition_not_equal_floats_only(df, right):
"""Test output for a single condition. "!="."""
left_on, right_on = ["B", "Numeric"]
expected = (
df.assign(t=1)
.merge(right.assign(t=1), on="t")
.dropna(subset=["B", "Numeric"])
.query(f"{left_on} != {right_on}")
.reset_index(drop=True)
)
expected = expected.filter([left_on, right_on])
actual = df.conditional_join(
right, (left_on, right_on, "!="), how="inner", sort_by_appearance=True
)
actual = actual.filter([left_on, right_on])
assert_frame_equal(expected, actual)
@given(df=conditional_df(), right=conditional_right())
def test_single_condition_not_equal_datetime(df, right):
"""Test output for a single condition. "!="."""
left_on, right_on = ["E", "Dates"]
expected = (
df.assign(t=1)
.merge(right.assign(t=1), on="t")
.dropna(subset=["E", "Dates"])
.query(f"{left_on} != {right_on}")
.reset_index(drop=True)
)
expected = expected.filter([left_on, right_on])
actual = df.conditional_join(
right, (left_on, right_on, "!="), how="inner", sort_by_appearance=True
)
actual = actual.filter([left_on, right_on])
assert_frame_equal(expected, actual)
@given(df=conditional_df(), right=conditional_right())
def test_single_condition_equality_string(df, right):
"""Test output for a single condition. "=="."""
left_on, right_on = ["C", "Strings"]
expected = df.dropna(subset=[left_on]).merge(
right.dropna(subset=[right_on]), left_on=left_on, right_on=right_on
)
expected = expected.reset_index(drop=True)
expected = expected.filter([left_on, right_on])
actual = df.conditional_join(
right, (left_on, right_on, "=="), how="inner", sort_by_appearance=False
)
actual = actual.filter([left_on, right_on])
assert_frame_equal(expected, actual)
@pytest.mark.xfail(
reason="""sometimes, categories are coerced to objects;
might be a pandas version issue.
"""
)
@given(df=conditional_df(), right=conditional_right())
def test_single_condition_equality_category(df, right):
"""Test output for a single condition. "=="."""
left_on, right_on = ["C", "Strings"]
df = df.assign(C=df["C"].astype("category"))
right = right.assign(Strings=right["Strings"].astype("category"))
expected = df.dropna(subset=[left_on]).merge(
right.dropna(subset=[right_on]), left_on=left_on, right_on=right_on
)
expected = expected.reset_index(drop=True)
expected = expected.filter([left_on, right_on])
actual = df.conditional_join(
right, (left_on, right_on, "=="), how="inner", sort_by_appearance=False
)
actual = actual.filter([left_on, right_on])
assert_frame_equal(expected, actual)
@given(df=conditional_df(), right=conditional_right())
def test_single_condition_equality_numeric(df, right):
"""Test output for a single condition. "=="."""
left_on, right_on = ["A", "Integers"]
df = df.assign(A=df["A"].astype("Int64"))
right = right.assign(Integers=right["Integers"].astype(pd.Int64Dtype()))
df.loc[0, "A"] = pd.NA
right.loc[0, "Integers"] = pd.NA
expected = df.dropna(subset=[left_on]).merge(
right.dropna(subset=[right_on]), left_on=left_on, right_on=right_on
)
expected = expected.reset_index(drop=True)
expected = expected.filter([left_on, right_on])
actual = df.conditional_join(
right, (left_on, right_on, "=="), how="inner", sort_by_appearance=False
)
actual = actual.filter([left_on, right_on])
assert_frame_equal(expected, actual)
@given(df=conditional_df(), right=conditional_right())
def test_single_condition_equality_datetime(df, right):
"""Test output for a single condition. "=="."""
left_on, right_on = ["E", "Dates"]
expected = df.dropna(subset=[left_on]).merge(
right.dropna(subset=[right_on]), left_on=left_on, right_on=right_on
)
expected = expected.reset_index(drop=True)
expected = expected.filter([left_on, right_on])
actual = df.conditional_join(
right, (left_on, right_on, "=="), how="inner", sort_by_appearance=False
)
actual = actual.filter([left_on, right_on])
assert_frame_equal(expected, actual)
@given(df=conditional_df(), right=conditional_right())
def test_how_left(df, right):
"""Test output when `how==left`. "<="."""
left_on, right_on = ["A", "Integers"]
expected = (
df.assign(t=1, index=np.arange(len(df)))
.merge(right.assign(t=1), on="t")
.query(f"{left_on} <= {right_on}")
)
expected = expected.set_index("index")
expected.index.name = None
expected = df.join(
expected.filter(right.columns), how="left", sort=False
).reset_index(drop=True)
actual = df.conditional_join(
right, (left_on, right_on, "<="), how="left", sort_by_appearance=True
)
| assert_frame_equal(expected, actual) | pandas.testing.assert_frame_equal |
import xgboost as xgb
import pandas as pd
from sklearn.model_selection import train_test_split
data = pd.DataFrame()
for i in range(0,29):
rpath = "train_data\\train_data_p"+str(i)+".csv"
print("[TRAIN] Reading:",rpath)
d0 = pd.read_csv(rpath)
data = pd.concat([data,d0])
test = pd.read_csv('train_data\\train_data_p29.csv')
buy = data.groupby('buy')
ybuy = buy.get_group(1)
nbuy = buy.get_group(0).head(115000)
train_new = | pd.concat([ybuy,nbuy]) | pandas.concat |
# -*- encoding: utf-8 -*-
"""
==============
Classification
==============
The following example shows how to fit a simple classification model with
*auto-sklearn*.
"""
import os
import openml
import pandas as pd
import sklearn.metrics
from sklearn.preprocessing import LabelEncoder
from sklearn.utils.multiclass import type_of_target
import autosklearn.classification
############################################################################
# Data Loading
# ============
def load_dataset(dataset_id):
dataset = openml.datasets.get_dataset(dataset_id)
tasks_a = openml.tasks.list_tasks(task_type_id=1, status='active')
tasks_a = | pd.DataFrame.from_dict(tasks_a, orient="index") | pandas.DataFrame.from_dict |
import pytest
from datetime import datetime, timedelta
import pytz
import numpy as np
from pandas import (NaT, Index, Timestamp, Timedelta, Period,
DatetimeIndex, PeriodIndex,
TimedeltaIndex, Series, isna)
from pandas.util import testing as tm
from pandas._libs.tslib import iNaT
@pytest.mark.parametrize('nat, idx', [(Timestamp('NaT'), DatetimeIndex),
(Timedelta('NaT'), TimedeltaIndex),
(Period('NaT', freq='M'), PeriodIndex)])
def test_nat_fields(nat, idx):
for field in idx._field_ops:
# weekday is a property of DTI, but a method
# on NaT/Timestamp for compat with datetime
if field == 'weekday':
continue
result = getattr(NaT, field)
assert np.isnan(result)
result = getattr(nat, field)
assert np.isnan(result)
for field in idx._bool_ops:
result = getattr(NaT, field)
assert result is False
result = getattr(nat, field)
assert result is False
def test_nat_vector_field_access():
idx = DatetimeIndex(['1/1/2000', None, None, '1/4/2000'])
for field in DatetimeIndex._field_ops:
# weekday is a property of DTI, but a method
# on NaT/Timestamp for compat with datetime
if field == 'weekday':
continue
result = getattr(idx, field)
expected = Index([getattr(x, field) for x in idx])
tm.assert_index_equal(result, expected)
s = Series(idx)
for field in DatetimeIndex._field_ops:
# weekday is a property of DTI, but a method
# on NaT/Timestamp for compat with datetime
if field == 'weekday':
continue
result = getattr(s.dt, field)
expected = [getattr(x, field) for x in idx]
tm.assert_series_equal(result, Series(expected))
for field in DatetimeIndex._bool_ops:
result = getattr(s.dt, field)
expected = [getattr(x, field) for x in idx]
tm.assert_series_equal(result, Series(expected))
@pytest.mark.parametrize('klass', [Timestamp, Timedelta, Period])
def test_identity(klass):
assert klass(None) is NaT
result = klass(np.nan)
assert result is NaT
result = klass(None)
assert result is NaT
result = klass(iNaT)
assert result is NaT
result = klass(np.nan)
assert result is NaT
result = klass(float('nan'))
assert result is NaT
result = klass(NaT)
assert result is NaT
result = klass('NaT')
assert result is NaT
assert isna(klass('nat'))
@pytest.mark.parametrize('klass', [Timestamp, Timedelta, Period])
def test_equality(klass):
# nat
if klass is not Period:
klass('').value == iNaT
klass('nat').value == iNaT
klass('NAT').value == iNaT
klass(None).value == iNaT
klass(np.nan).value == iNaT
assert isna(klass('nat'))
@pytest.mark.parametrize('klass', [Timestamp, Timedelta])
def test_round_nat(klass):
# GH14940
ts = klass('nat')
for method in ["round", "floor", "ceil"]:
round_method = getattr(ts, method)
for freq in ["s", "5s", "min", "5min", "h", "5h"]:
assert round_method(freq) is ts
def test_NaT_methods():
# GH 9513
raise_methods = ['astimezone', 'combine', 'ctime', 'dst',
'fromordinal', 'fromtimestamp', 'isocalendar',
'strftime', 'strptime', 'time', 'timestamp',
'timetuple', 'timetz', 'toordinal', 'tzname',
'utcfromtimestamp', 'utcnow', 'utcoffset',
'utctimetuple']
nat_methods = ['date', 'now', 'replace', 'to_datetime', 'today',
'tz_convert', 'tz_localize']
nan_methods = ['weekday', 'isoweekday']
for method in raise_methods:
if hasattr(NaT, method):
with pytest.raises(ValueError):
getattr(NaT, method)()
for method in nan_methods:
if hasattr(NaT, method):
assert np.isnan(getattr(NaT, method)())
for method in nat_methods:
if hasattr(NaT, method):
# see gh-8254
exp_warning = None
if method == 'to_datetime':
exp_warning = FutureWarning
with tm.assert_produces_warning(
exp_warning, check_stacklevel=False):
assert getattr(NaT, method)() is NaT
# GH 12300
assert NaT.isoformat() == 'NaT'
@pytest.mark.parametrize('klass', [Timestamp, Timedelta])
def test_isoformat(klass):
result = klass('NaT').isoformat()
expected = 'NaT'
assert result == expected
def test_nat_arithmetic():
# GH 6873
i = 2
f = 1.5
for (left, right) in [(NaT, i), (NaT, f), (NaT, np.nan)]:
assert left / right is NaT
assert left * right is NaT
assert right * left is NaT
with pytest.raises(TypeError):
right / left
# Timestamp / datetime
t = Timestamp('2014-01-01')
dt = datetime(2014, 1, 1)
for (left, right) in [(NaT, NaT), (NaT, t), (NaT, dt)]:
# NaT __add__ or __sub__ Timestamp-like (or inverse) returns NaT
assert right + left is NaT
assert left + right is NaT
assert left - right is NaT
assert right - left is NaT
# timedelta-like
# offsets are tested in test_offsets.py
delta = timedelta(3600)
td = Timedelta('5s')
for (left, right) in [(NaT, delta), (NaT, td)]:
# NaT + timedelta-like returns NaT
assert right + left is NaT
assert left + right is NaT
assert right - left is NaT
assert left - right is NaT
# GH 11718
t_utc = Timestamp('2014-01-01', tz='UTC')
t_tz = Timestamp('2014-01-01', tz='US/Eastern')
dt_tz = pytz.timezone('Asia/Tokyo').localize(dt)
for (left, right) in [(NaT, t_utc), (NaT, t_tz),
(NaT, dt_tz)]:
# NaT __add__ or __sub__ Timestamp-like (or inverse) returns NaT
assert right + left is NaT
assert left + right is NaT
assert left - right is NaT
assert right - left is NaT
# int addition / subtraction
for (left, right) in [(NaT, 2), (NaT, 0), (NaT, -3)]:
assert right + left is NaT
assert left + right is NaT
assert left - right is NaT
assert right - left is NaT
def test_nat_arithmetic_index():
# GH 11718
dti = DatetimeIndex(['2011-01-01', '2011-01-02'], name='x')
exp = DatetimeIndex([NaT, NaT], name='x')
tm.assert_index_equal(dti + NaT, exp)
| tm.assert_index_equal(NaT + dti, exp) | pandas.util.testing.assert_index_equal |
import configparser
import os
import sys
import marg_mcmc as wl
sys.path.insert(0, '../bin_analysis')
import glob
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
from astropy.io import fits
import time
#import whitelight2018 as wl
import batman
import get_limb as gl
#from wave_solution import orbits
def event_time(date, properties):
"""Program to determine the expected event time
Inputs
date: 1D array of the date of each exposure (MJD)
properties: 1D array containing the last observed eclipse
and the period. (MJD, days)"""
time=properties[1]
period=properties[4]
while time < date[0]:
time+=period
return float(time)
def get_orbits(date):
"""Procedure to organize light curve data by HST orbit"""
orbit=np.zeros(1).astype(int)
for i in range(len(date)-1):
t=date[i+1]-date[i]
if t*86400 > 1200.:
orbit=np.append(orbit, i+1) # 1800s is about half an HST orbit
return np.append(orbit, len(date))
def inputs(data, transit=True):
""" Function to read in priors for a system.
INPUTS:
data: data table of priors for a particular planet
OUTPUTS:
Returns array of system properties: [rprs, central event time, inc
,a/r, period, depth]
"""
inp_values=pd.read_table(data,sep=' ', index_col=None)
data_arr=inp_values.iloc[:,2].values
labels=inp_values.iloc[:,0].values
param_errs=inp_values.iloc[:,3].values
# Rj-m, Rsolar-m,AU-m, JD -> MJD
print("Fix this to read in a/rs and rp/rs automatically")
conversions=np.array([6.9911e7, 6.957e8, 1.49598e11, -2400000.5])
inc=data_arr[5]
period=data_arr[4]
a_R=data_arr[7]*conversions[2]/(data_arr[1]*conversions[1])
a_R_err=np.sqrt((param_errs[7]*conversions[2]/data_arr[1]/conversions[1])**2
+ (a_R*param_errs[1]/conversions[1])**2)
rprs = data_arr[0]*conversions[0]/(data_arr[1]*conversions[1])
if transit==True:
epoch=data_arr[6]+conversions[3]
depth=rprs*rprs
else:
epoch=data_arr[6]+conversions[3]+period/2.
depth = rprs*rprs*(data_arr[2]/data_arr[3])/3
props=np.zeros(6)
props[0]=rprs
props[1]=epoch
props[2]=inc
props[3]=a_R
props[4]=period
props[5]=depth
errors=np.zeros(6)
errors[0]=0
errors[1]=param_errs[6]
errors[2]=param_errs[5]
errors[3]=a_R_err
errors[4]=param_errs[4]
errors[5]=0
return [props,errors]
# def correction(inputs, date, flux, transit=False):
# params=batman.TransitParams()
# params.w=90.
# params.ecc=0
# params.rp=np.sqrt(inputs[0])
# t0=inputs[1]
# params.inc=inputs[2]
# params.a=inputs[3]
# params.per=inputs[4]
# depth=inputs[5]
# phase = (date-t0)/params.per
# phase = phase - np.floor(phase)
# phase[phase > 0.5] = phase[phase > 0.5] - 1.0
# if transit==True:
# params.t0=t0
# params.u=inputs[6:]
# params.limb_dark="quadratic"
# m=batman.TransitModel(params, date)
# model=m.light_curve(params)
# else:
# params.fp=inputs[5]
# params.t_secondary=t0
# params.u=[]
# params.limb_dark="uniform"
# m=batman.TransitModel(params, date, transittype="secondary")
# model=m.light_curve(params)
# corrected=flux/model
# return corrected
# def remove_bad_data(light_curve, spectra, light_corrected, date1, light_err, spec_err
# , user_inputs, check=False):
# """Procedure to remove "bad" data from light curve"""
# med= np.ma.median(light_corrected)
# sigma = np.sqrt(np.sum((light_corrected-med)**2)/(2*len(light_corrected)))
# medi=np.zeros_like(date1)+med
# sig3=medi+3*sigma
# sig4=medi+4*sigma
# sig5=medi+5*sigma
# sig3m=medi-3*sigma
# sig4m=medi-4*sigma
# sig5m=medi-5*sigma
# if check==False:
# nPasses=int(user_inputs[3])
# sigma_cut_factor=user_inputs[2]
# else:
# data=plt.plot(date1, light_corrected,'bo',ls='dotted')
# plt.xlabel('MJD')
# plt.ylabel('Total Flux')
# s5=plt.plot(date1, sig5,'pink',date1, sig5m, 'pink')
# s5[0].set_label('5-sigma')
# s4=plt.plot(date1, sig4,'g', date1, sig4m, 'g')
# s4[0].set_label('4-sigma')
# s3=plt.plot(date1, sig3,'r', date1, sig3m, 'r')
# s3[0].set_label('3-sigma')
# plt.plot(date1, medi, label='Median',ls='solid')
# plt.legend(scatterpoints=1)
# plt.show(block=False)
# cut = raw_input("Enter the sigma-cut factor (3-5 recommended): ")
# sigma_cut_factor = float(cut)
# user_inputs[2]=sigma_cut_factor
# passes=raw_input("Enter the number of passes for the sigma-cut: ")
# nPasses=int(passes)
# user_inputs[3]=nPasses
# plt.close()
# # Cut out the "bad" data
# for j in range(nPasses):
# med= np.ma.median(light_corrected)
# sigma = np.sqrt(np.sum((light_corrected-med)**2)/(2*len(light_corrected)))
# dif= np.abs(light_corrected-med)
# index=np.where(dif < sigma_cut_factor*sigma)[0]
# light_curve=light_curve[index]
# date1=date1[index]
# light_corrected=light_corrected[index]
# light_err=light_err[index]
# spectra=spectra[index,:]
# spec_err=spec_err[index,:]
# return [light_curve, spectra, light_corrected, date1, light_err, spec_err]
def preprocess_whitelight(visit
, direction
, x=0, y=0
, check=True
, inp_file=False
, save_processed_data=False
, transit=False
, data_plots=True
, mcmc=False
, openinc=False
, openar=True
, fixtime=False
, norandomt=True
, fit_plots=True
, save_mcmc=False
, save_model_info=False):
"""
Function to allow user to extract relevant orbital data from reduced time
series of a visit. Also allow user to exclude first orbit or first exposure
of each orbit. The selected data is then fed into "marg_mcmc" for model light
curve fitting.
INPUTS
See config.py file
[DATA]
x, y: Allow the user to reduce aperture by (x,y) pixels
checks: set to "on" to manually reduce data
inp_file: Allow user to load in preprocess information instead of manually
finding. Cannot have checks and inp_file both off or both on.
If checks is set to on, "user_inputs" will return the inputs
that the user used: [first orbit, last orbit, sigma cut factor,
number of passes, center eclipse time]. If checks is set to off, then
the user_inputs array will be used as inputs (easier to automate)
[MODEL]
mcmc: Use MCMC sampler, extracting corner plot and other diagnostics
openinc: Fit for inclination (default is fixed)
openar: Fit for a/Rstar (default is fixed)
fixtime: Fix center of event time (default is open)
norandomt: Do now allow center of event time starting point to be vary randomly
fit_plots: Show model light curve fit in real time
[SAVE]
save_processed_data: Save the data with the systematics removed for the best fit model
save_model_info: Save best fit parameters for every systematic model
save_mcmc: Save MCMC products, such as corner plot, autocorrelation, etc.
The save files will all be saved with key or name "planet/visitXX/direction"
"""
if direction != 'both':
folder = '../data_reduction/reduced/%s/%s/final/*.fits' % (visit, direction)
data=np.sort(np.asarray(glob.glob(folder)))
nexposure = len(data)
print('There are %d exposures in this visit' % nexposure)
alldate=np.zeros(len(data))
time=np.zeros_like(alldate)
test=fits.open(data[0])
xlen, ylen = test[0].data.shape
test.close()
xlen-=2*x
ylen-=2*y
allspec=np.ma.zeros((len(data),xlen, ylen))
allerr=np.zeros((len(data),xlen,ylen))
xmin=x
xmax=xlen-x
ymin=y
ymax=ylen-y
for i, img in enumerate(data):
expfile=fits.open(img)
hdr=expfile[0].header
exp=expfile[0].data
mask=expfile[1].data
errs=expfile[2].data
expfile.close()
alldate[i]=(hdr['EXPSTART']+hdr['EXPEND'])/2.
time[i]=hdr['EXPTIME']
expo=exp[xmin:xmax, ymin:ymax]
mask=mask[xmin:xmax, ymin:ymax]
errs=errs[xmin:xmax, ymin:ymax]
allspec[i,:,:]=np.ma.array(expo, mask=mask)
allerr[i,:,:]=np.ma.array(errs, mask=mask)
allspec1d=np.ma.sum(allspec,axis=1)
allerr1d=np.sqrt(np.ma.sum(allerr*allerr, axis=1))
median_flux = np.median(np.ma.sum(allspec1d, axis=1))
# Regardless of direction, if all exposures share the same one we make
# dir_array all zeros for easy parameter use in model fitting.
dir_array = np.zeros_like(alldate)
else:
direction = 'forward'
folder = '../data_reduction/reduced/%s/%s/final/*.fits' % (visit, direction)
data=np.sort(np.asarray(glob.glob(folder)))
nexposure = len(data)
print('There are %d exposures in this visit' % nexposure)
alldate=np.zeros(len(data))
time=np.zeros_like(alldate)
test=fits.open(data[0])
xlen, ylen = test[0].data.shape
test.close()
xlen-=2*x
ylen-=2*y
allspec=np.ma.zeros((len(data),xlen, ylen))
allerr=np.zeros((len(data),xlen,ylen))
xmin=x
xmax=xlen-x
ymin=y
ymax=ylen-y
for i, img in enumerate(data):
expfile=fits.open(img)
hdr=expfile[0].header
exp=expfile[0].data
mask=expfile[1].data
errs=expfile[2].data
expfile.close()
alldate[i]=(hdr['EXPSTART']+hdr['EXPEND'])/2.
time[i]=hdr['EXPTIME']
expo=exp[xmin:xmax, ymin:ymax]
mask=mask[xmin:xmax, ymin:ymax]
errs=errs[xmin:xmax, ymin:ymax]
allspec[i,:,:]=np.ma.array(expo, mask=mask)
allerr[i,:,:]=np.ma.array(errs, mask=mask)
allspec1d=np.ma.sum(allspec,axis=1)
allerr1d=np.sqrt(np.ma.sum(allerr*allerr, axis=1))
median_flux = np.median(np.ma.sum(allspec1d, axis=1))
# Now do for other direction
direction = 'reverse'
folder = '../data_reduction/reduced/%s/%s/final/*.fits' % (visit, direction)
rdata=np.sort(np.asarray(glob.glob(folder)))
nexposure = len(rdata)
print('There are %d exposures in this visit' % nexposure)
rdate=np.zeros(len(rdata))
rtime=np.zeros_like(rdate)
rtest=fits.open(rdata[0])
rxlen,rylen = rtest[0].data.shape
test.close()
xlen-=2*x
ylen-=2*y
rallspec=np.ma.zeros((len(rdata),rxlen, rylen))
rallerr=np.zeros((len(rdata),rxlen,rylen))
rxmin=x
rxmax=rxlen-x
rymin=y
rymax=rylen-y
for i, img in enumerate(rdata):
expfile=fits.open(img)
hdr=expfile[0].header
exp=expfile[0].data
mask=expfile[1].data
errs=expfile[2].data
expfile.close()
rdate[i]=(hdr['EXPSTART']+hdr['EXPEND'])/2.
rtime[i]=hdr['EXPTIME']
expo=exp[rxmin:rxmax, rymin:rymax]
mask=mask[rxmin:rxmax, rymin:rymax]
errs=errs[rxmin:rxmax, rymin:rymax]
rallspec[i,:,:]=np.ma.array(expo, mask=mask)
rallerr[i,:,:]=np.ma.array(errs, mask=mask)
rallspec1d=np.ma.sum(rallspec,axis=1)
rallerr1d=np.sqrt(np.ma.sum(rallerr*rallerr, axis=1))
rmedian_flux = np.median(np.ma.sum(rallspec1d, axis=1))
dir_factor = median_flux / rmedian_flux
#dir_factor=1
rallspec1d = rallspec1d * dir_factor
rallerr1d = rallerr1d * dir_factor
# Define array that has 0s for forward scan and 1s for reverse
dir_array = np.append(np.zeros_like(alldate), np.ones_like(rdate))
alldate = np.ma.append(alldate,rdate)
allspec1d = np.ma.append(allspec1d, rallspec1d, axis=0)
allerr1d = np.ma.append(allerr1d, rallerr1d, axis=0)
direction = 'both'
# Put in correct time order
date_order=np.argsort(alldate)
dir_array = dir_array[date_order]
dir_save = dir_array
alldate=alldate[date_order]
allspec1d=allspec1d[date_order,:]
allerr1d=allerr1d[date_order,:]
#ix = np.arange(len(dir_array))
#ix = ix[17:]
#ix=np.delete(ix, [0,5, 19,38,57])
#dir_array = dir_array[ix]
#alldate=alldate[ix]
#allspec1d=allspec1d[ix, :]
#allerr1d=allerr1d[ix, :]
#0, 19, 38, 57
# Classify the data by each HST orbit. Returns array (orbit)
# which contains the indeces for the start of each orbit
orbit=get_orbits(alldate)
planet=visit[:-8]
props, errs=inputs('../planets/%s/inputs.dat' % planet, transit)
a1=gl.get_limb(planet,14000.,'a1')
a2=gl.get_limb(planet,14000.,'a2')
a3=gl.get_limb(planet,14000.,'a3')
a4=gl.get_limb(planet,14000.,'a4')
props=np.append(props, [a1,a2,a3,a4])
errs=np.append(errs, np.zeros(4))
props_hold=props.copy()
#orbit = np.zeros(1)
print("Number of total orbits: %d" % (len(orbit)-1))
# Choose which orbits to include in the eclipse fitting. 1-2 on either
# side of the eclipse is recommended
check2=check
if check == False:
if inp_file == True:
df=pd.read_csv('./preprocess_info.csv')
df=df[df.loc[:,'Transit']==transit]
user_inputs=df.loc[visit+direction,'User Inputs'].values
else:
sys.exit('Either allow checking or give csv file with pandas info.')
#allspec1d=np.ma.sum(allspec,axis=1).data
#allerr1d=np.sqrt(np.ma.sum(allerr*allerr, axis=1)).data
first_orbit=user_inputs[0]
last_orbit=user_inputs[1]
first_data = orbit[first_orbit]
last_data=orbit[last_orbit+1]
date=alldate[first_data:last_data]
dir_array=dir_array[first_data:last_data]
#allspec2d=allspec[first_data:last_data,:,:]
#allerr2d=allerr[first_data:last_data,:,:]
spec1d=allspec[first_data:last_data,:]
err1d=allerr[first_data:last_data,:]
#allspec1d=np.ma.sum(allspec2d,axis=1) #spectra for each exposure: these axes may be backwards
#allerr1d=np.sqrt(np.ma.sum(allerr2d*allerr2d, axis=1))
light = np.ma.sum(spec1d, axis=1) # total light for each exposure
lighterr=np.sqrt(np.ma.sum(err1d*err1d, axis=1))
user_inputs[5], user_inputs[6] = first_data, last_data
sss
if check == True:
user_inputs=np.zeros(7)
while check2==True:
if data_plots==True:
print('woo')
#err=np.sqrt(np.sum(np.sum(allerr[:,:,:]*allerr[:,:,:], axis=1), axis=1))
#fl= np.sum(allspec[:,:,:], (1,2))
err=np.sqrt(np.sum(allerr1d*allerr1d, axis=1))
fl= np.sum(allspec1d, axis=1)
plt.errorbar(alldate,fl,err, fmt='o')
plt.xlabel('MJD')
plt.ylabel('Total Flux')
plt.show(block=False)
first = input("Enter the first orbit to include (starting from 0): ")
first_orbit=int(first)
user_inputs[0]=first_orbit
last= input("Enter the last orbit to include (starting form 0): ")
last_orbit=int(last)
if data_plots==True: plt.close()
user_inputs[1]=last_orbit
#allspec1d=np.ma.sum(allspec,axis=1).data
#allerr1d=np.sqrt(np.ma.sum(allerr*allerr, axis=1)).data
first_data = orbit[first_orbit]
last_data=orbit[last_orbit+1]
date=alldate[first_data:last_data]
dir_array=dir_array[first_data:last_data]
#spec2d=allspec[first_data:last_data,:,:]
#err2d=allerr[first_data:last_data,:,:]
spec1d=allspec1d[first_data:last_data,:]
err1d=allerr1d[first_data:last_data,:]
#spec1d=np.ma.sum(spec2d,axis=1)
#err1d=np.sqrt(np.ma.sum(err2d*err2d, axis=1))
light = np.ma.sum(spec1d,axis=1)
lighterr=np.sqrt(np.ma.sum(err1d*err1d, axis=1))
user_inputs[5], user_inputs[6] = first_data, last_data
if data_plots==True:
plt.errorbar(date, light/max(light),lighterr/max(light),fmt='o')
plt.xlabel('MJD')
plt.ylabel('Total Flux')
plt.show(block=False)
ans = input("Is this correct? (Y/N): ")
if ans.lower() in ['y','yes']: check2=False
if data_plots==True: plt.close()
props[1]=event_time(date, props)
user_inputs[4]=props[1]
# We are only interested in scatter within orbits, so correct for flux
# between orbits by setting the median of each orbit to the median of
# the first orbit
# light_corrected=correction(props, date1, light, transit)
# Do a 4-pass sigma cut. 3-5 sigma is ideal. Change n to see how data
# is affected. A sigma of 3, 4, or 5 could be used, it depends on the
# data
# light2=light.copy()
# lighterr2=lighterr.copy()
# allspec2=allspec1.copy()
# allerr2=allerr1.copy()
# date2=date1.copy()
# light_corrected2=light_corrected.copy()
# ans2=''
# if check==False:
# light, allspec1, light_corrected, date1, lighterr, allerr1 = remove_bad_data(light
# , allspec1
# , light_corrected
# , date1
# , lighterr
# , allerr1
# , user_inputs)
# if check==True:
# while check==True:
# light=light2.copy()
# lighterr=lighterr2.copy()
# allspec1=allspec2.copy()
# allerr1=allerr2.copy()
# date1=date2.copy()
# light_corrected=light_corrected2.copy()
# # This performs the sigma cut and returns input for the fitter: a
# # double array which contains a spectra for each data point
# light, allspec1, light_corrected, date1, lighterr, allerr1 = remove_bad_data(light
# , allspec1
# , light_corrected
# , date1
# , lighterr
# , allerr1
# , user_inputs
# , check=check)
# if ploton==True:
# plt.errorbar(date2, light2,lighterr2, fmt='ro')
# plt.xlabel('MJD')
# plt.ylabel('Total Flux')
# plt.errorbar(date1, light,lighterr, fmt='o',ls='dotted')
# plt.show(block=False)
# ans2=raw_input('This is the new data, with the red points removed. Is this okay? (Y/N): ')
# if ploton==True: plt.close()
# if ans2.lower() in ['y','yes']: check=False
"""if transit == True:
fixtime = False
norandomt = True
#openar = True
openar = True
openinc = False
mcmc = False
else:
fixtime = True
norandomt = True
openar = False
openinc = False
mcmc = False
save_name = visit + '/' + direction
#savemc = visit
save_model_info = False
#save_model_info = visit
#visit=False
#savedata=False
save_mcmc=False
#savewl=False"""
# Set inclination (2), ars (3) to desired value if you want
#props[2]=89.17
#props[3]=5.55
# dir_array has only been included in marg_mcmc so far
#results=wl.whitelight2018(props, date, spec1d.data, err1d.data,
# plotting=True, norandomt=norandomt,
# openinc=openinc, openar=openar, fixtime=fixtime,
# transit=transit, savewl=visit)
print(props)
sss
results=wl.whitelight2020(props, date, spec1d.data, err1d.data, dir_array,
plotting=fit_plots, mcmc=mcmc, norandomt=norandomt,
openinc=openinc, openar=openar, fixtime=fixtime,
transit=transit, save_mcmc=save_mcmc, save_model_info=save_model_info,
save_name =save_name)
#direction = 'forward'
if save_processed_data == True:
sh=wl.get_shift(allspec1d)
cols=['Pixel %03d' % i for i in range(allspec1d.shape[1])]
subindex=['Value']*allspec1d.shape[0] + ['Error']*allspec1d.shape[0]
ind=pd.MultiIndex.from_product([[save_name], subindex])
processed_data=pd.DataFrame(np.vstack((allspec1d,allerr1d)),columns=cols, index=ind)
processed_data['Date']=np.append(alldate,alldate)
processed_data['sh']=np.append(sh,sh)
processed_data['Transit']=transit
processed_data['Scan Direction'] = np.append(dir_save, dir_save)
sys_p=pd.DataFrame(np.vstack((props_hold, errs)).T, columns=['Properties'
, 'Errors'])
sys_p['Visit']=save_name
sys_p=sys_p.set_index('Visit')
try:
cur=pd.read_csv('./processed_data.csv', index_col=[0,1])
cur=cur.drop(save_name, level=0, errors='ignore')
cur=pd.concat((cur,processed_data), sort=False)
cur.to_csv('./processed_data.csv', index_label=['Obs', 'Type'])
except IOError:
processed_data.to_csv('./processed_data.csv', index_label=['Obs','Type'])
try:
curr=pd.read_csv('./system_params.csv', index_col=0)
curr=curr.drop(save_name, errors='ignore')
curr=pd.concat((curr,sys_p), sort=False)
curr.to_csv('./system_params.csv')
except IOError:
sys_p.to_csv('./system_params.csv', index_label='Obs')
return [results, user_inputs]
if __name__=='__main__':
#if len(sys.argv) < 4:
# sys.exit('Format: preprocess_whitelight.py [planet] [visit] [direction]')
#visit=sys.argv[1]+'/'+sys.argv[2]
#direction=sys.argv[3]
#transit=True
#if len(sys.argv)==5:
# transit=bool(int(sys.argv[4]))
config = configparser.ConfigParser()
config.read('config.py')
planet = config.get('DATA', 'planet')
visit_number = config.get('DATA', 'visit_number')
visit = planet + '/' + visit_number
direction = config.get('DATA', 'scan_direction')
transit = config.getboolean('DATA', 'transit')
check = config.getboolean('DATA', 'check')
inp_file = config.getboolean('DATA', 'inp_file')
data_plots = config.getboolean('DATA', 'data_plots')
mcmc = config.getboolean('MODEL', 'mcmc')
openar = config.getboolean('MODEL', 'openar')
openinc = config.getboolean('MODEL', 'openinc')
fixtime = config.getboolean('MODEL', 'fixtime')
norandomt = config.getboolean('MODEL', 'norandomt')
fit_plots = config.getboolean('MODEL', 'fit_plots')
save_mcmc = config.getboolean('SAVE', 'save_mcmc')
save_model_info = config.getboolean('SAVE', 'save_model_info')
save_processed_data = config.getboolean('SAVE', 'save_processed_data')
#assert(check != inp_file)
print(visit)
best_results, inputs= preprocess_whitelight(visit
, direction
, transit=transit
, check=check
, inp_file=inp_file
, data_plots=data_plots
, save_processed_data=save_processed_data
, save_model_info=save_model_info
, fixtime=fixtime
, norandomt=norandomt
, openar=openar
, openinc=openinc
, fit_plots=fit_plots
, mcmc=mcmc
, save_mcmc=save_mcmc)
print(best_results)
print("Marg Depth: %f +/- %f" % (best_results[0]*1e6, best_results[1]*1e6))
print("Marg Central Event Time: %f +/- %f" % (best_results[2], best_results[3]))
print("Marg Inclination: %f +/- %f" % (best_results[4], best_results[5]))
print("Marg a/R*: %f +/- %f" % (best_results[6], best_results[7]))
print("Marg limb darkening params: ", best_results[8], "+/-", best_results[9])
inp= | pd.DataFrame(inputs, columns=['User Inputs']) | pandas.DataFrame |
###############
#
# Transform R to Python Copyright (c) 2019 <NAME> Released under the MIT license
#
###############
import os
import numpy as np
import pystan
import pandas
import pickle
import seaborn as sns
import matplotlib.pyplot as plt
fish_num_climate_2 = pandas.read_csv('4-1-1-fish-num-2.csv')
print(fish_num_climate_2.head())
print(fish_num_climate_2.describe())
sns.scatterplot(
x='temperature',
y='fish_num',
hue='weather',
data=fish_num_climate_2
)
plt.show()
fish_num_climate_2_d = pandas.get_dummies(fish_num_climate_2, columns=["weather", "id"])
print(fish_num_climate_2_d.head())
fish_num = fish_num_climate_2_d['fish_num']
sample_num = len(fish_num)
sunny = fish_num_climate_2_d['weather_sunny']
temperature = fish_num_climate_2_d['temperature']
stan_data = {
'N': sample_num,
'fish_num': fish_num,
'sunny': sunny,
'temp': temperature
}
if os.path.exists('4-1-1-poisson.pkl'):
sm = pickle.load(open('4-1-1-poisson.pkl', 'rb'))
else:
# a model using prior for mu and sigma.
sm = pystan.StanModel(file='4-1-1-poisson.stan')
mcmc_result = sm.sampling(
data=stan_data,
seed=1,
chains=4,
iter=2000,
warmup=1000,
thin=1
)
print(mcmc_result)
mcmc_result.plot()
plt.show()
mcmc_sample = mcmc_result.extract()
df = | pandas.DataFrame(mcmc_sample) | pandas.DataFrame |
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Sat Oct 13 16:47:25 2018
@author: julius
"""
import flask
from flask import request
import tensorflow as tf
import pandas as pd
import numpy as np
from tensorflow.python.data import Dataset
import math
import argparse
import json
import load_model
def my_input_fn(features):
"""Normalize and pass features to linear or nn classifier for prediction.
Args:
features: A pandas DataFrame of features
Returns:
Normalized features of input.
"""
# 53 is the max year_range in training set.
max_year_range = 53
normalized_features = pd.DataFrame()
for feature in ['pc', 'cn', 'hi', 'gi']:
normalized_features[feature] = features[feature].apply(
lambda val: math.log(val + 1.0))
normalized_features['year_range'] = features['year_range'].apply(
lambda val: val / max_year_range)
features = {key: np.array(value) for key, value in dict(normalized_features).items()}
ds = Dataset.from_tensor_slices(features)
ds = ds.batch(1).repeat(1)
features = ds.make_one_shot_iterator().get_next()
return features
def parse(classifier, features):
"""Predict student identity for an author.
Args:
classifier: a trained linear or nn classifier object for predictions.
features: pandas DataFrame of features.
Returns:
pred_class_id: A 'list' of predicted label as 'int'.
probabilities: A 'list' of prediction probabilities as 'float32'.
"""
prediction_input_fn = lambda: my_input_fn(features)
predictions = list(classifier.predict(input_fn = prediction_input_fn))
pred_class_id = [int(item['class_ids'][0]) for item in predictions]
probabilities = [item['probabilities'][item['class_ids'][0]] for item in predictions]
return pred_class_id, probabilities
def launch_api(classifier_name, host, port):
'''Launch the api for predictions with a certain classifier.
Args:
classifier: A trained linear or nn classifier object for predictions.
host: A 'str', the host url of api.
port: An 'int', the port of the host api used.
'''
# Choose which classifier to use.
if classifier_name == 'dnn_classifier':
classifier = load_model.load_DNNClassifier()
elif classifier_name == 'linear_classifier':
classifier = load_model.load_LinearClassifier()
else:
return print('No model matched. Choose one between \'dnn_classifier\' and \'linear_classifier\'.')
# Launch api.
app = flask.Flask(__name__)
app.config["DEBUG"] = True
@app.route('/', methods = ['GET'])
def home():
return '''
<h1>Student Identity Judgement</h1>
<p>An API for judging student identity of authors.</p>
<p>POST to '/judge' with the following parameters in JSON format:
<br> <b>pc</b>: total number of publications
<br> <b>cn</b>: total number of citations
<br> <b>hi</b>: h-index
<br> <b>gi</b>: g-index
<br> <b>year_range</b>: time range from the first to the last publication
<br> <b>id</b> <i>(optional)</i>: id of authors</p>
'''
@app.errorhandler(404)
def page_not_found(e):
return '<h1>404</h1><p>The resource could not be found.</p>', 404
@app.route('/judge', methods = ['POST'])
def judge():
request_data = request.get_json()
try:
features = pd.read_json(json.dumps(request_data))
except:
return '''
<h1>Incomplete Query Parameters</h1>
<p>Five parameters are needed for judging student identity,
at least one is missing.</p>
'''
labels, probabilities = parse(classifier, features)
# Integrate different DataFrames into one called results_df.
results_df = | pd.DataFrame() | pandas.DataFrame |
import pandas as pd
import csv, os, time
from omsql.omsq import *
import omsql.omsqlite3 as sq3
import telepot
from telepot.loop import MessageLoop
from pprint import pprint
def sqllite3():
svpt = os.getcwd() + '\\VIP.csv'
df = | pd.read_csv(svpt) | pandas.read_csv |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.