prompt
stringlengths 19
1.03M
| completion
stringlengths 4
2.12k
| api
stringlengths 8
90
|
---|---|---|
# coding=utf-8
import pandas as pd
import matplotlib.pyplot as plt
import numpy as np # NumPy是一种用于进行科学计算的常用工具包
print("version: ", pd.__version__)
# ### 显示设置
# 默认情况下,如果DataFrame的行列数量太多,print将只显示部分内容
pd.set_option('display.max_rows', None) # 显示的最大行数,None表示显示所有行
pd.set_option('display.max_columns', None) # 显示的最大列数, None表示显示所有列
pd.set_option('display.width', 200) # 显示宽度(以字符为单位)
pd.set_option('max_colwidth', 100) # 列长度,默认为50
| pd.set_option('expand_frame_repr', False) | pandas.set_option |
import datetime
import enum
import typing
from dataclasses import dataclass
import pandas as pd
from dataclasses_json import dataclass_json
from typing_extensions import Annotated
from flytekit import kwtypes, task, workflow
from flytekit.types.file import FlyteFile
from flytekit.types.structured.structured_dataset import StructuredDataset
superset_cols = kwtypes(name=str, age=int)
subset_cols = kwtypes(age=int)
@task
def get_subset_df(df: Annotated[pd.DataFrame, superset_cols]) -> Annotated[StructuredDataset, subset_cols]:
df = pd.concat([df, pd.DataFrame([[30]], columns=["age"])])
return StructuredDataset(dataframe=df)
@task
def show_sd(in_sd: StructuredDataset):
| pd.set_option("expand_frame_repr", False) | pandas.set_option |
#!/usr/bin/python
# -*- coding: utf-8 -*-
""" @brief Precision and Recall
@ref https://en.wikipedia.org/wiki/Precision_and_recall
Modified from https://github.com/lyst/lightfm and https://github.com/jfkirk/tensorrec
@author <<NAME>> <EMAIL> """
import numpy as np
import pandas as pd
from scipy import sparse
__all__ = ["precision_at_k", "recall_at_k", "rank_matrix"]
def rank_matrix(mat):
assert isinstance(mat, (np.ndarray,))
mat_ = | pd.DataFrame(data=mat) | pandas.DataFrame |
from ThesisAnalysis import get_data, ThesisHDF5Writer
from ThesisAnalysis.files import MCLab_Opct40_5MHz, Lab_TFPoly, CHECM
import numpy as np
import pandas as pd
from CHECLabPy.core.io import DL1Reader
from CHECLabPy.spectrum_fitters.gentile import GentileFitter
from CHECLabPy.spectrum_fitters.mapm import MAPMFitter
import warnings
from pandas.errors import PerformanceWarning
def get_dict(type, input_paths, config_path, roi, poi, fitter):
readers = [DL1Reader(path) for path in input_paths]
n_illuminations = len(readers)
fitter = fitter(n_illuminations, config_path)
charges = []
for reader in readers:
pixel, charge = reader.select_columns(['pixel', 'charge'])
if poi != -1:
charge_p = charge[pixel == poi]
else:
charge_p = charge
charges.append(charge_p)
fitter.apply(*charges)
eped = fitter.coeff['eped']
spe = fitter.coeff['spe']
charges_norm = [(c - eped) / spe for c in charges]
fitter.range = [-1, 5]
fitter.initial['eped_sigma'] = 0.5
fitter.initial['spe'] = 1
fitter.initial['spe_sigma'] = 0.1
fitter.limits['limit_eped_sigma'] = [0.001, 1]
fitter.limits['limit_spe'] = [0.001, 2]
fitter.limits['limit_spe_sigma'] = [0.001, 1]
fitter.apply(*charges_norm)
fitx = np.linspace(fitter.range[0], fitter.range[1], 1000)
coeff = fitter.coeff.copy()
errors0 = fitter.errors.copy()
errors = dict()
for key, value in errors0.items():
errors['error_' + key] = value
spe = coeff['spe']
print(spe)
return dict(
type=type,
edges=fitter.edges,
between=fitter.between,
fitx=fitx,
hist=fitter.hist[roi],
fit=fitter.fit_function(fitx, **coeff)[roi],
roi=roi,
**coeff,
**errors
)
def process(comparison_list, output_path):
d_list = list()
for d in comparison_list:
name = d['name']
file = d['file']
roi = d['roi']
fitter = d['fitter']
input_paths = file.spe_files
config_path = file.spe_config_path
poi = file.poi
d_list.append(get_dict(name, input_paths, config_path, roi, poi, fitter))
df = | pd.DataFrame(d_list) | pandas.DataFrame |
#!/usr/bin/env python -W ignore::DeprecationWarning
import os
import ast
import pathlib
import pandas as pd
import numpy as np
import random
import itertools
from tqdm import tqdm
from skimage import measure
from scipy import stats
def warn(*args, **kwargs):
pass
import warnings
warnings.warn = warn
import logging
log = logging.getLogger('werkzeug')
log.setLevel(logging.ERROR)
logging.captureWarnings(True)
import inputfuncs
import ccprocess
import libcc
import segmfuncs
import parcelfuncs
import webbrowser
from threading import Timer
import dash
import dash_table
import dash_core_components as dcc
import dash_html_components as html
import dash_bootstrap_components as dbc
import dash_daq as daq
from dash.dependencies import Input, Output, State, ALL, MATCH
from dash.exceptions import PreventUpdate
from dash_extensions import Download
from dash_extensions.snippets import send_data_frame
import plotly.io as pio
import plotly.figure_factory as ff
import plotly.express as px
import plotly.graph_objs as go
from plotly.subplots import make_subplots
class Error(Exception):
pass
theme = 'plotly'
print(' ')
# GENERAL DEFINITIONS -------------------------------------------------------------------------
dict_segmentation_functions = {'ROQS': segmfuncs.segm_roqs,
'Watershed': segmfuncs.segm_watershed,
'Imported Masks': segmfuncs.segm_mask}
dict_parcellation_functions = {'Witelson': parcelfuncs.parc_witelson,
'Hofer': parcelfuncs.parc_hofer,
'Chao': parcelfuncs.parc_chao,
'Cover': parcelfuncs.parc_cover,
'Freesurfer': parcelfuncs.parc_freesurfer}
dict_3d_segmentation_functions = {'Watershed3d': segmfuncs.segm_watershed_3d}
scalar_list = ['FA', 'MD', 'RD', 'AD']
colors_list = px.colors.qualitative.Plotly
dict_parcellation_methods = {'Witelson': 'witelson', 'Hofer & Frahm': 'hofer', 'Chao et al':'chao', 'Cover et al': 'cover', 'Freesurfer':'freesurfer'}
dict_segmentation_methods = {'ROQS': 'roqs', 'Watershed': 'watershed'}
dict_3d_segmentation_methods = {'Watershed3d':'watershed3d'}
# DATA IMPORTING -----------------------------------------------------------------------------
# Arg parser
opts = inputfuncs.get_parser().parse_args()
if opts.staple is True:
dict_segmentation_functions['STAPLE'] = segmfuncs.segm_staple
dict_segmentation_methods['STAPLE'] = 'staple'
df_categories = pd.DataFrame()
df_numerical = pd.DataFrame()
# Read external data
if opts.ext_data is not None:
external_data_path = opts.ext_data
external_data = pd.read_excel(external_data_path, dtype={'Subjects':'object'})
external_data = external_data.set_index('Subjects')
# Clear NaNs on index
external_data = external_data[external_data.index.notnull()]
# Remove unnamed columns
external_data = external_data.loc[:,~external_data.columns.str.match("Unnamed")]
df_categories = external_data.select_dtypes(include=['object'])
df_numerical = external_data.select_dtypes(include=['number'])
col_categories = ['Method'] + list(df_categories.columns)
# Get indicated directories
path_dict = {}
if opts.folders is not None:
for directory in opts.folders:
if directory is not None:
if inputfuncs.check_directory(directory, opts.basename):
path_dict[os.path.basename(directory)] = os.path.join(directory, '')
# Import the subjects inside the parents folders
group_dict = {}
if opts.parents is not None:
for parent in opts.parents:
if parent is not None:
directory_dict, dict_folders = inputfuncs.import_parent(parent, opts.basename)
path_dict.update(directory_dict)
# Create dict with subjects as keys and group (parents names) as values
group_dict.update(dict_folders)
df_group = pd.DataFrame.from_dict(group_dict, orient='index', columns=["Folder"])
df_categories = pd.concat([df_categories, df_group], axis = 1)
# Warning for no imported subjects
if len(path_dict.values()) == 0:
print('Error: No subjects were imported')
print('Terminating program.\n')
raise SystemExit(0)
# Check if we are importing masks
if opts.maskname is not None:
mask_basename = opts.maskname
dict_segmentation_methods['Imported Masks'] ='imported_mask'
else:
mask_basename = None
# DATA PROCESSING -----------------------------------------------------------------------------
# Create dataframe for each segmentation method
scalar_statistics_names = ['FA','FA StdDev','MD','MD StdDev','RD','RD StdDev','AD','AD StdDev']
scalar_midline_names = list(range(0,200))
loaded_subjects = []
dict_segmentation_masks = {}
dict_scalar_maps = {}
dict_scalar_statistics = {}
dict_scalar_midlines = {}
dict_error_prob = {}
dict_parcellations_masks = {}
dict_parcellations_statistics = {}
dict_thickness = {}
dict_removed_subjects = {}
dict_scalar_outliers = {}
# Segment and get info
for subject_path in tqdm(path_dict.values()):
for segmentation_method in dict_segmentation_methods.keys():
if segmentation_method not in dict_scalar_statistics.keys():
dict_segmentation_masks[segmentation_method] = {}
dict_scalar_maps[segmentation_method] = {}
dict_scalar_statistics[segmentation_method] = {}
dict_scalar_midlines[segmentation_method] = {}
dict_error_prob[segmentation_method] = {}
dict_parcellations_masks[segmentation_method] = {}
dict_thickness[segmentation_method] = {}
dict_removed_subjects[segmentation_method] = []
dict_scalar_outliers[segmentation_method] = []
# Get data path info
folderpath = subject_path + 'inCCsight/'
filename = 'segm_' + dict_segmentation_methods[segmentation_method] + '_data.npy'
subject_name = os.path.basename(os.path.dirname(subject_path))
# Process/Load data
if segmentation_method == 'Imported Masks':
if ccprocess.check_mask(subject_path, mask_basename) is False:
continue
data_tuple = ccprocess.segment(subject_path,
segmentation_method,
dict_segmentation_functions,
dict_parcellation_functions,
opts.basename,
mask_basename)
if data_tuple is None:
continue
#except:
# print('> Warning: Segmentation failed for subject {} with method {}'.format(subject_name, segmentation_method))
# continue
segmentation_mask, scalar_maps, scalar_statistics, scalar_midlines, error_prob, parcellations_masks = data_tuple
# Get thickness
try:
thick, _, _ = libcc.thickness(segmentation_mask, 200)
except:
thick = np.empty(200)
# Assemble dictionaries
dict_segmentation_masks[segmentation_method][subject_name] = segmentation_mask
dict_scalar_maps[segmentation_method][subject_name] = scalar_maps
dict_scalar_statistics[segmentation_method][subject_name] = scalar_statistics
dict_scalar_midlines[segmentation_method][subject_name] = scalar_midlines
dict_error_prob[segmentation_method][subject_name] = error_prob
dict_parcellations_masks[segmentation_method][subject_name] = parcellations_masks
dict_thickness[segmentation_method][subject_name] = thick
# Save array with subject keys
loaded_subjects.append(subject_name)
loaded_subjects = list(set(loaded_subjects))
loaded_subjects.sort()
for segmentation_method in dict_segmentation_methods.keys():
# Convert to pandas dataframe
dict_scalar_statistics[segmentation_method] = pd.DataFrame.from_dict(dict_scalar_statistics[segmentation_method],
orient='index',
columns=scalar_statistics_names)
dict_scalar_midlines[segmentation_method] = pd.DataFrame.from_dict(dict_scalar_midlines[segmentation_method],
orient='index')
dict_thickness[segmentation_method] = pd.DataFrame.from_dict(dict_thickness[segmentation_method],
orient='index')
dict_error_prob[segmentation_method] = pd.DataFrame.from_dict(dict_error_prob[segmentation_method], columns=['error_prob'], orient='index')
dict_parcellations_statistics[segmentation_method] = inputfuncs.parcellations_dfs_dicts(dict_scalar_maps[segmentation_method], dict_parcellations_masks[segmentation_method], segmentation_method)
# Get FA and/or other scalars ouliers
for scalar in ['FA']:
df = dict_scalar_statistics[segmentation_method][scalar]
outliers = df[~df.between(df.quantile(.1), df.quantile(.9))].index
dict_scalar_outliers[segmentation_method] += list(outliers)
# VISUALIZATION -------------------------------------------------------------------------------
app = dash.Dash(__name__,
meta_tags=[{"name": "viewport", "content": "width=device-width, initial-scale=.8, maximum-scale=.8"}],
external_stylesheets = [dbc.themes.BOOTSTRAP],
prevent_initial_callbacks=True)
server = app.server
app.config["suppress_callback_exceptions"] = True
app.title = 'inCCsight'
# ------------------------------- BUILD FUNCS -----------------------------------------------
def build_banner():
return html.Div(
id="banner",
className="twelve columns",
children=[
html.Div(
className='twelve columns',
style=dict(display='flex', justifyContent='flex-start'),
children=[
html.Img(src=app.get_asset_url("unicampw.png"), style=dict(height='9rem', marginBottom='2rem', padding='1rem')),
html.Img(src=app.get_asset_url("miclab.png"), style=dict(height='9rem', marginBottom='2rem', padding='1rem')),
]),
html.Div(
className='twelve columns',
style=dict(display='flex', justifyContent='center'),
children=[
html.Img(src=app.get_asset_url("inccsight.png"), style=dict(height='25rem')),
]),
],
)
def build_graph_title(title):
return html.P(className="graph-title", children=title)
# DataViz ------------------------------------------------------------------------------------
def build_group_segm_boxplot(mode='Method', segmentation_method='ROQS', extra_dims=list(df_numerical.columns)):
std_colors = pio.templates[theme]['layout']['colorway']
if mode == 'Method':
scalar_names = ['FA', 'MD', 'RD', 'AD'] + extra_dims
subplots = make_subplots(rows=1, cols=len(scalar_names), subplot_titles=scalar_names)
for i, scalar in enumerate(scalar_names):
for j, segmentation_method in enumerate(dict_segmentation_methods.keys()):
if len(extra_dims) == 0:
df = pd.DataFrame()
else:
df = df_numerical[extra_dims]
df = pd.concat([df, dict_scalar_statistics[segmentation_method]], axis=1)
df = df.drop(dict_removed_subjects[segmentation_method])
subplots.add_trace(go.Box(y=df[scalar], name=segmentation_method, legendgroup=segmentation_method, hovertext=df.index, marker=dict(color=std_colors[j])), row=1, col=i+1)
if i == 0:
subplots.data[-1].update(name=segmentation_method, legendgroup=segmentation_method)
else:
subplots.data[-1].update(showlegend=False)
else:
scalar_names = ['FA', 'MD', 'RD', 'AD'] + extra_dims
subplots = make_subplots(rows=1, cols=len(scalar_names), subplot_titles=scalar_names)
if len(extra_dims) == 0:
df = pd.DataFrame()
else:
df = df_numerical[extra_dims]
df = pd.concat([df, df_categories[mode], dict_scalar_statistics[segmentation_method]], axis=1)
df = df.drop(dict_removed_subjects[segmentation_method])
df = df.dropna(axis=0)
categories = set(df[mode])
for i, scalar in enumerate(scalar_names):
for j, category in enumerate(categories):
subplots.add_trace(go.Box(y=df[df[mode] == category][scalar], name=category, legendgroup=category, hovertext=df.index, marker=dict(color=std_colors[j])), row=1, col=i+1)
if i == 0:
subplots.data[-1].update(name=category, legendgroup=category)
else:
subplots.data[-1].update(showlegend=False)
subplots.update_layout(height=400, paper_bgcolor='rgba(0,0,0,0)', legend_orientation="h")
subplots.update_layout(font=dict(family="Open Sans, sans-serif", size=12), margin=dict(r=0, l=0, t=60))
return subplots
def build_parcel_boxplot(scalar='FA', mode='Method', segmentation_method='ROQS', parcellation_method='Witelson'):
std_colors = pio.templates[theme]['layout']['colorway']
list_regions = ['P1', 'P2', 'P3', 'P4', 'P5']
subplots = make_subplots(rows=1, cols=5, subplot_titles=list_regions)
if mode == 'Method':
for i, region in enumerate(list_regions):
for j, segmentation_method in enumerate(dict_segmentation_methods.keys()):
df = dict_parcellations_statistics[segmentation_method][parcellation_method][region][scalar]
df = df.drop(dict_removed_subjects[segmentation_method])
df = df.dropna(axis=0)
subplots.add_trace(go.Box(y=df, name=segmentation_method, legendgroup=segmentation_method, hovertext=df.index, marker=dict(color=std_colors[j])), row=1, col=i+1)
if i == 0:
subplots.data[-1].update(name=segmentation_method, legendgroup=segmentation_method)
else:
subplots.data[-1].update(showlegend=False)
else:
categories = list(set(df_categories[mode]))
df = pd.DataFrame()
for category in categories:
df_aux = pd.DataFrame()
for region in list_regions:
df_aux = pd.concat([df_aux, dict_parcellations_statistics[segmentation_method][parcellation_method][region][scalar][df_categories[mode] == category]], axis=1)
df_aux[mode] = category
df = pd.concat([df, df_aux], axis=0)
df = df.drop(dict_removed_subjects[segmentation_method])
df = df.dropna(axis=0)
names = list_regions + [mode]
df.columns = names
categories = set(df[mode])
for i, region in enumerate(list_regions):
for j, category in enumerate(categories):
subplots.add_trace(go.Box(y=df[df[mode] == category][region], name=category, legendgroup=category, hovertext=df.index, marker=dict(color=std_colors[j])), row=1, col=i+1)
if i == 0:
subplots.data[-1].update(name=category, legendgroup=category)
else:
subplots.data[-1].update(showlegend=False)
subplots.update_layout(height=400, paper_bgcolor='rgba(0,0,0,0)', legend_orientation="h")
subplots.update_layout(font=dict(family="Open Sans, sans-serif", size=12), margin=dict(r=0, l=0, t=60))
return subplots
def build_segm_scatterplot(mode='Method', segmentation_method = 'ROQS', scalar_x = 'FA', scalar_y = 'MD', trendline=None):
df = pd.DataFrame()
if mode == 'Method':
for segmentation_method in dict_segmentation_methods.keys():
df_aux = dict_scalar_statistics[segmentation_method]
df_aux['Method'] = segmentation_method
df = pd.concat([df, df_aux], axis=0)
else:
df = pd.concat([df_categories[mode], dict_scalar_statistics[segmentation_method]], axis=1)
df = df.join(df_numerical, how='outer')
df = df.drop(dict_removed_subjects[segmentation_method])
df = df.dropna(axis=0)
print(df)
fig = px.scatter(df,
x=scalar_x,
y=scalar_y,
color=mode,
marginal_y="violin",
marginal_x="histogram",
hover_name=df.index,
trendline=trendline)
fig.update_layout(height=800, paper_bgcolor='rgba(0,0,0,0)')
fig.update_layout(legend=dict(orientation="h", yanchor="bottom", y=1.02, xanchor='right', x=1))
fig.update_layout(font=dict(family="Open Sans, sans-serif", size=12))
return fig
def build_segm_scattermatrix(mode='Method', segmentation_method = 'ROQS', extra_dims = list(df_numerical.columns)):
dimensions = ['FA','MD','RD','AD']
df = pd.DataFrame()
if mode == 'Method':
for segmentation_method in dict_segmentation_methods.keys():
df_aux = dict_scalar_statistics[segmentation_method]
df_aux['Method'] = segmentation_method
df = pd.concat([df, df_aux], axis=0)
else:
df = pd.concat([df_categories[mode], dict_scalar_statistics[segmentation_method]], axis=1)
if len(extra_dims) > 0:
dimensions = ['FA','MD','RD','AD'] + extra_dims
df = pd.concat([df, df_numerical], axis=1)
df = df.drop(dict_removed_subjects[segmentation_method])
df = df.dropna(axis=0)
fig = px.scatter_matrix(df,
dimensions=dimensions,
color=mode,
hover_name=df.index)
if mode == 'Method':
n_cats = len(dict_segmentation_methods.keys())
else:
n_cats = len(set(df_categories[mode].dropna(axis=0)))
fig.update_layout(legend=dict(orientation="h", yanchor="bottom", y=1.02, xanchor='right', x=1))
fig.update_layout(height=500+250*n_cats, paper_bgcolor='rgba(0,0,0,0)')
fig.update_layout(font=dict(family="Open Sans, sans-serif", size=12), margin=dict(r=0, l=0))
return fig
def build_midline_plot(scalar='FA', mode='Method', segmentation_method='ROQS'):
df = pd.DataFrame()
if mode == 'Method':
for segmentation_method in dict_segmentation_methods.keys():
if scalar in ['FA', 'MD', 'AD', 'RD']:
df_aux = pd.DataFrame.from_dict(dict(dict_scalar_midlines[segmentation_method][scalar]), orient='index', columns=scalar_midline_names)
elif scalar == 'Thickness':
df_aux = dict_thickness[segmentation_method]
df_aux['Method'] = segmentation_method
df = pd.concat([df, df_aux], axis=0)
df = df.drop(dict_removed_subjects[segmentation_method])
df = df.dropna(axis=0)
df_grouped = df.groupby('Method').mean().transpose()
df_melt = pd.melt(df_grouped.reset_index(), id_vars='index', value_vars=set(df[mode]))
else:
if scalar in ['FA', 'MD', 'AD', 'RD']:
df_aux = pd.DataFrame.from_dict(dict(dict_scalar_midlines[segmentation_method][scalar]), orient='index', columns=scalar_midline_names)
elif scalar == 'Thickness':
df_aux = dict_thickness[segmentation_method]
df = pd.concat([df_categories[mode], df_aux], axis=1)
df = df.drop(dict_removed_subjects[segmentation_method])
df = df.dropna(axis=0)
df_grouped = df.groupby(mode).mean().transpose()
df_melt = pd.melt(df_grouped.reset_index(), id_vars='index', value_vars=set(df[mode]))
fig = px.line(df_melt, x='index', y='value', color=mode)
fig.update_layout(legend=dict(orientation="h", yanchor="bottom", y=1.02, xanchor='right', x=1))
fig.update_layout(height=400, paper_bgcolor='rgba(0,0,0,0)')
fig.update_layout(font=dict(family="Open Sans, sans-serif", size=12))
fig.update_layout(xaxis_title='Points along CC body', yaxis_title=scalar, legend_title=mode)
return fig
def build_bubble_grouped(mode='Method', segmentation_method='ROQS', scalar='Thickness', size=True):
def build_bubble_plot(scalar='FA', segmentation_method='Watershed', size = True, category_index = None):
df_pts = pd.read_pickle('./assets/bubble_plot_xy.pkl')
segm_contour = np.load('./assets/bubble_plot_contour.npy')
if scalar == 'Thickness':
df_aux = dict_thickness[segmentation_method][list(np.linspace(0,195,40))+[199]]
else:
df_aux = pd.DataFrame.from_dict(dict(dict_scalar_midlines[segmentation_method][scalar]), orient='index', columns=scalar_midline_names)[list(np.linspace(0,195,40))+[199]]
df_aux = df_aux.drop(dict_removed_subjects[segmentation_method])
if category_index is not None:
df_aux = df_aux.loc[np.intersect1d(df_aux.index, category_index)]
df_aux = df_aux.dropna(axis=0)
df_pts[scalar] = df_aux.mean().reset_index()[0]
if size == True:
fig = px.scatter(df_pts, x="x", y="y", color = scalar, size = df_pts[scalar])
fig.update_traces(marker=dict(sizeref = 2. * max(df_pts[scalar]) / (45 ** 2)))
else:
fig = px.scatter(df_pts, x="x", y="y", color=scalar)
fig.update_traces(marker=dict(size=25))
fig.update_layout(paper_bgcolor='rgba(0,0,0,0)', legend_orientation="h")
fig.update_layout(font=dict(family="Open Sans, sans-serif", size=12))
fig.add_trace(go.Contour(z=segm_contour, contours=dict(start=0, end=70, size=70, coloring='none'),
showscale=False, line_width=3, line=dict(color='rgba(0,0,0,.5)', dash='dot')))
return fig
if mode == 'Method':
n_cats = len(dict_segmentation_methods.keys())
fig = make_subplots(rows=n_cats, cols=1, vertical_spacing=0.1/n_cats)
for i, segmentation_method in enumerate(dict_segmentation_methods):
fig.add_trace(build_bubble_plot(scalar=scalar, segmentation_method=segmentation_method, size=size)['data'][0], row=i+1, col=1)
fig.update_yaxes(title_text="{} for<br>{} method".format(scalar, segmentation_method), row=i+1, col=1)
else:
df = df_categories[mode]
df = df.drop(dict_removed_subjects[segmentation_method])
df = df.dropna(axis=0)
n_cats = len(set(df))
fig = make_subplots(rows=n_cats, cols=1, vertical_spacing=0.1/n_cats)
for i, category in enumerate(set(df)):
category_index = df_categories.loc[df_categories[mode] == category].index
fig.add_trace(build_bubble_plot(scalar=scalar, segmentation_method=segmentation_method, size=size, category_index=category_index)['data'][0], row=i+1, col=1)
fig.update_yaxes(title_text="{} for<br>{} category".format(scalar, category), row=i+1, col=1)
fig.update_xaxes(title_text="Points along CC body", row=n_cats, col=1)
fig.update_layout(height=250*n_cats, paper_bgcolor='rgba(0,0,0,0)')
fig.update_layout(font=dict(family="Open Sans, sans-serif", size=12), margin=dict(r=0, l=0, t=60))
return fig
def build_bubble_grouped_pvalue(mode='Method', segmentation_method='ROQS', scalar='Thickness', threshold=0.05):
def build_bubble_pvalue(pvalue, threshold=0.05, size=False, gray=False):
df_pts = pd.read_pickle('./assets/bubble_plot_xy.pkl')
segm_contour = np.load('./assets/bubble_plot_contour.npy')
marker_color = 'rgba(100,100,100,0.5)'
df_pts['p-value'] = pvalue
if gray:
fig = px.scatter(df_pts, x="x", y="y", hover_data=['p-value'])
fig.update_traces(marker=(dict(color=marker_color)))
else:
fig = px.scatter(df_pts.loc[df_pts['p-value'] < threshold], x="x", y="y", color = 'p-value')
fig.update_traces(marker=dict(size=25))
fig.update_layout(paper_bgcolor='rgba(0,0,0,0)', legend_orientation="h")
fig.update_layout(font=dict(family="Open Sans, sans-serif", size=12))
return fig
if mode == 'Method':
categories = list(itertools.combinations(dict_segmentation_methods.keys(), 2))
n_cats = len(categories)
if n_cats == 0:
return empty_figure_with_text('Not enough categories to calculate p-values.')
fig = make_subplots(rows=n_cats, cols=1)
for i, category in enumerate(categories):
if scalar == 'Thickness':
pvalue = stats.ttest_ind(dict_thickness[category[0]], dict_thickness[category[1]]).pvalue
else:
pvalue = stats.ttest_ind(pd.DataFrame.from_dict(dict(dict_scalar_midlines[category[0]][scalar]), orient='index', columns=scalar_midline_names),
pd.DataFrame.from_dict(dict(dict_scalar_midlines[category[1]][scalar]), orient='index', columns=scalar_midline_names)).pvalue
pvalue = np.take(pvalue, list(np.linspace(0,195,40)))
new_gray_fig = build_bubble_pvalue(pvalue, gray=True)
new_fig = build_bubble_pvalue(pvalue, threshold)['data']
if len(new_fig) > 0:
fig.add_trace(new_gray_fig['data'][0], row=i+1, col=1)
fig.add_trace(new_fig[0], row=i+1, col=1)
else:
fig.add_trace(new_gray_fig['data'][0], row=i+1, col=1)
fig.update_yaxes(title_text="{} x {}".format(category[0], category[1]), row=i+1, col=1)
else:
df = df_categories[mode]
df = df.dropna(axis=0)
categories = list(itertools.combinations(set(df), 2))
n_cats = len(categories)
if n_cats == 0:
return empty_figure_with_text('Not enough categories to calculate p-values.')
fig = make_subplots(rows=n_cats, cols=1, x_title='Statistic Meaningful Differences (p < 0.05)')
for i, category in enumerate(categories):
if scalar == 'Thickness':
pvalue = stats.ttest_ind(dict_thickness[segmentation_method].loc[df_categories[mode] == category[0]],
dict_thickness[segmentation_method].loc[df_categories[mode] == category[1]]).pvalue
else:
pvalue = stats.ttest_ind(pd.DataFrame.from_dict(dict(dict_scalar_midlines[segmentation_method][scalar]), orient='index', columns=scalar_midline_names).loc[df_categories[mode] == category[0]],
pd.DataFrame.from_dict(dict(dict_scalar_midlines[segmentation_method][scalar]), orient='index', columns=scalar_midline_names).loc[df_categories[mode] == category[1]]).pvalue
pvalue = np.take(pvalue, list(np.linspace(0,195,40)))
new_gray_fig = build_bubble_pvalue(pvalue, gray=True)
new_fig = build_bubble_pvalue(pvalue, threshold)['data']
if len(new_fig) > 0:
fig.add_trace(new_gray_fig['data'][0], row=i+1, col=1)
fig.add_trace(new_fig[0], row=i+1, col=1)
else:
fig.add_trace(new_gray_fig['data'][0], row=i+1, col=1)
fig.update_yaxes(title_text="{} x {}".format(category[0], category[1]), row=i+1, col=1)
fig.update_layout(height=400*n_cats)
fig.update_xaxes(title_text="Points along CC body", row=n_cats, col=1)
fig.update_layout(paper_bgcolor='rgba(0,0,0,0)', legend_orientation="h")
fig.update_layout(font=dict(family="Open Sans, sans-serif", size=12))
return fig
# Dropdowns --------------------------------------------------------------------------------
def build_midlineplot_dropdown():
options = [{'label': scalar, 'value': scalar} for scalar in scalar_list+['Thickness']]
layout = html.Div([
html.Div([
html.H6('Scalar:', className='table-options-title', style={'padding':'0px 10px 0px 10px'}),
dcc.Dropdown(id='dropdown-midline-scalars',
options=options,
multi=False,
value='FA',
style={'width':'120px'}),
], className='row', style=dict(display='flex', justifyContent='left', verticalAlign='center')),
]
)
return layout
def build_segm_scatterplot_dropdowns():
options = [{'label': scalar, 'value': scalar} for scalar in scalar_list + list(df_numerical.columns)]
options_trendlines = [{'label': scalar, 'value': scalar} for scalar in ['None', 'OLS', 'Lowess']]
layout = html.Div([
html.Div([
html.Div([
html.H6('Scalar Y:', className='table-options-title', style={'padding':'0px 10px 0px 0px'}),
dcc.Dropdown(id='dropdown-scalars-left',
options=options,
multi=False,
value='FA',
style={'width':'90px'}),
], className='row', style={'margin':'0px 0px 0px 10px'}),
html.Div([
html.H6('Scalar X:', className='table-options-title', style={'padding':'0px 10px 0px 0px'}),
dcc.Dropdown(id='dropdown-scalars-right',
options=options,
multi=False,
value='MD',
style={'width':'90px'}),
], className='row', style={'margin':'0px 0px 0px 30px'}),
html.Div([
html.H6('Trendline:', className='table-options-title', style={'padding':'0px 10px 0px 0px'}),
dcc.Dropdown(id='dropdown-scalars-trendline',
options=options_trendlines,
multi=False,
value='None',
style={'width':'120px'}),
], className='row', style={'margin':'0px 0px 0px 30px'}),
], className='row', style=dict(display='flex', justifyContent='left', verticalAlign='center')),
]
)
return layout
def build_parcel_boxplot_dropdowns():
options_scalars = [{'label': scalar, 'value': scalar} for scalar in scalar_list]
options_parcel_method = [{'label': parc, 'value': parc} for parc in dict_parcellations_statistics[segmentation_method].keys()]
layout = html.Div([
html.Div([
html.H6('Parc. Method:', className='table-options-title', style={'padding':'0px 10px 0px 10px'}),
dcc.Dropdown(id='dropdown-parcel-boxplot-left',
options=options_parcel_method,
multi=False,
value=list(dict_parcellation_methods.keys())[0],
style={'width':'150px'}),
html.H6('Scalar:', className='table-options-title', style={'padding':'0px 10px 0px 30px'}),
dcc.Dropdown(id='dropdown-parcel-scalars-right',
options=options_scalars,
multi=False,
value='FA',
style={'width':'120px'})
], className='row', style=dict(display='flex', justifyContent='left', verticalAlign='center')),
]
)
return layout
def build_bubbleplot_dropdowns():
options_pvalue = [{'label': scalar, 'value': scalar} for scalar in ['Scalar', 'p-value']]
options_scalars = [{'label': scalar, 'value': scalar} for scalar in scalar_list+['Thickness']]
options_size = [{'label': scalar, 'value': scalar} for scalar in ['True', 'False']]
layout = html.Div([
html.Div([
html.Div([
html.H6('Mode:', className='table-options-title', style={'padding':'0px 10px 0px 0px'}),
dcc.Dropdown(id='dropdown-bubbleplot-mode',
options=options_pvalue,
multi=False,
value='Scalar',
style={'width':'150px'}),
], className='row', style={'margin':'0px 0px 0px 10px'}),
html.Div([
html.H6('Scalar:', className='table-options-title', style={'padding':'0px 10px 0px 0px'}),
dcc.Dropdown(id='dropdown-bubbleplot-left',
options=options_scalars,
multi=False,
value='Thickness',
style={'width':'150px'}),
], className='row', style={'margin':'0px 0px 0px 30px'}),
html.Div([
html.H6('Size:', className='table-options-title', style={'padding':'0px 10px 0px 0px'}),
dcc.Dropdown(id='dropdown-bubbleplot-right',
options=options_size,
multi=False,
value='True',
style={'width':'120px'})
], className='row', style={'margin':'0px 0px 0px 30px'}, id='div-bubble-dropdown-right'),
html.Div([
html.H6('Threshold:', className='table-options-title', style={'padding':'0px 10px 0px 0px'}),
dcc.Dropdown(id='dropdown-bubbleplot-threshold',
options=[{'label': num/100, 'value': num/100} for num in list(np.arange(0, 10, 1)) + list(np.arange(15, 95, 5))],
multi=False,
value=0.05,
style={'width':'120px'})
], className='row', style=dict(display='none', margin='0px 0px 0px 30px'), id='div-bubble-dropdown-threshold'),
], className='row', style=dict(display='flex', justifyContent='left', verticalAlign='center', marginBottom='50px')),
]
)
return layout
def build_fissure_image_dropdown(subject_id, segmentation_methods_available=dict_segmentation_methods.keys()):
options = [{'label': segmentation_method, 'value': segmentation_method} for segmentation_method in list(segmentation_methods_available)+['None']]
options_scalars = [{'label': scalar, 'value': scalar} for scalar in ['wFA']+scalar_list]
layout = html.Div([
html.Div([
html.Div(
children=[
html.H6('Segm. Method:', className='table-options-title', style={'padding':'0px 20px 0px 10px'}),
dcc.Dropdown(id='dropdown-subj-collapse-segm-methods',
options=options,
multi=False,
value=segmentation_methods_available[0],
style={'width':'120px'}),
], className='row'),
html.Div(
dbc.Button('Remove segmentation',
outline=True,
color='danger',
id=dict(type='btn-remove-subject', index=subject_id),
style=dict(padding='0 15px', margin='0px 0px 0px 20px', fontSize='1.2rem')
),
id=f"tooltip-div-wrapper-{subject_id}"),
], className='row', style=dict(display='flex', justifyContent='left', verticalAlign='center', marginLeft='0')),
html.Div(
children=[
html.H6('Scalar:', className='table-options-title', style={'padding':'0px 10px 0px 10px'}),
dcc.Dropdown(id='dropdown-subj-collapse-scalars',
options=options_scalars,
multi=False,
value='wFA',
style={'width':'150px'}),
], className='row', style=dict(display='flex', justifyContent='left', verticalAlign='center', marginTop='2px')),
dbc.Tooltip("This will remove only the ROQS segmentation, indicated on the dropdown to the left",
target=f"tooltip-div-wrapper-{subject_id}",
id='subject-collapse-tooltip',
style=dict(fontSize='12pt'),
placement='bottom',
),
]
)
return layout
def build_individual_subject_segm_table_dropdown(segmentation_methods_available=dict_segmentation_methods.keys()):
options = [{'label': segmentation_method, 'value': segmentation_method} for segmentation_method in segmentation_methods_available]
options_stddev = [{'label': scalar, 'value': scalar} for scalar in ['Show', 'Hide']]
layout = html.Div([
html.Div([
html.H6('Segm. method:', className='table-options-title', style={'padding':'0px 10px 0px 10px'}),
dcc.Dropdown(id='dropdown-subj-collapse-table-segm-methods',
options=options,
multi=False,
value=segmentation_methods_available[0],
style={'width':'150px'}),
html.H6('Show Std.Dev.:', className='table-options-title', style={'padding':'0px 10px 0px 30px'}),
dcc.Dropdown(id='dropdown-subj-collapse-table-segm-std-dev',
options=options_stddev,
multi=False,
value='Hide',
style={'width':'120px'})
], className='row', style=dict(display='flex', justifyContent='left', verticalAlign='center')),
]
)
return layout
def build_individual_subject_parcel_table_dropdown(segmentation_methods_available=dict_segmentation_methods.keys()):
options = [{'label': segmentation_method, 'value': segmentation_method} for segmentation_method in segmentation_methods_available]
options_scalars = [{'label': scalar, 'value': scalar} for scalar in scalar_list]
options_parcel_method = [{'label': parc, 'value': parc} for parc in dict_parcellations_statistics[segmentation_method].keys()]
layout = html.Div([
html.Div([
html.H6('Segm. method:', className='table-options-title', style={'padding':'0px 10px 0px 10px'}),
dcc.Dropdown(id='dropdown-subj-collapse-table-parcel-segm-methods',
options=options,
multi=False,
value=segmentation_methods_available[0],
style={'width':'150px'}),
html.H6('Parcel. method:', className='table-options-title', style={'padding':'0px 10px 0px 30px'}),
dcc.Dropdown(id='dropdown-subj-collapse-table-parcel-methods',
options=options_parcel_method,
multi=False,
value=list(dict_parcellation_methods.keys())[0],
style={'width':'150px'}),
html.H6('Scalar:', className='table-options-title', style={'padding':'0px 10px 0px 30px'}),
dcc.Dropdown(id='dropdown-subj-collapse-table-parcel-scalars',
options=options_scalars,
multi=False,
value='FA',
style={'width':'120px'})
], className='row', style=dict(display='flex', justifyContent='left', verticalAlign='center')),
]
)
return layout
# Collapses and others ---------------------------------------------------------------------
def build_subjects_list():
'''
button_group = dbc.ButtonGroup(
[dbc.Button(i, color='light', size='lg', style=dict(width='100%')) for i in loaded_subjects],
vertical=True,
style=dict(width='100%')
)
'''
button_group = []
for i, subject_id in enumerate(loaded_subjects):
if i%2 == 0:
background = 'rgba(50,50,70,.5)'
else:
background = 'rgba(60,60,70,.5)'
button_group.append(html.Button(subject_id,
style=dict(fontSize='1.8rem', width='100%', backgroundColor=background, marginBottom='2px', color='rgb(255,255,255)'),
id={'type': 'subject-list-btns', 'index': subject_id}))
return html.Div(button_group, style=dict(width='100%'))
def build_quality_collapse():
layout = html.Div([
html.Div([
build_graph_title("Quality evaluation"),
html.Button('X', style=dict(fontSize='1.5rem', margin='10px', padding='0 13px', fontFamily= 'Open Sans', borderRadius='20px'),
id='btn-exit-quality')
], className='twelve columns', style=dict(display='flex', justifyContent='space-between')),
html.Div([
html.Div([
html.H6("Threshold:", style=dict(fontSize='1.8rem')),
dcc.Dropdown(id='dropdown-quality-threshold',
options= [{'label': num/100, 'value': num/100} for num in np.arange(95, 5, -5)],
multi=False,
value='0.7',
style={'width':'100px', 'marginLeft':'5px'}),
html.H6("Scalar:", style=dict(fontSize='1.8rem', marginLeft='2rem')),
dcc.Dropdown(id='dropdown-quality-scalar',
options= [{'label': i, 'value': i} for i in ['wFA']+scalar_list],
multi=False,
value='wFA',
style={'width':'100px', 'marginLeft':'5px'}),
], className='row', style=dict(margin='20px 0 0 12px')),
], className='twelve columns', style=dict(display="flex", justifyContent="space-between")),
html.Div([
dbc.Button("Restore Removed", color='info', outline=True, size='lg', id='restore_btn', style=dict(marginRight='1rem')),
dbc.Button("Remove Selected", color='danger', outline=True, size='lg', id='remove_btn', style=dict(marginRight='2rem')),
], className='twelve columns', style=dict(display='flex', justifyContent='flex-end')),
html.Div([
dbc.Button("Unselect all", color='primary', outline=True, size='lg', id='unselect_all_btn', style=dict(marginRight='1rem')),
dbc.Button("Select all", color='primary', outline=True, size='lg', id='select_all_btn', style=dict(marginRight='2rem')),
], className='twelve columns', style=dict(display='flex', verticalAlign='center', justifyContent='flex-end', padding='10px 0px 10px 0px', marginBottom='-1rem')),
html.Div(children=build_quality_images(),
className="twelve columns",
id='photo-container',
style=dict(margin="0rem 0rem 2rem 0rem")),
], style={'margin':'20px', 'height':"100vh", 'backgroundColor':'#FAFAFA', 'border-radius':'20px', 'border':'1px solid rgba(0,0,0,.125)'})
return layout
def build_quality_images(threshold=0.7, scalar='wFA'):
def get_quality_tab_children(segmentation_method, scalar='wFA'):
children = []
# Get error probs
df = dict_error_prob[segmentation_method]
df = df.drop(dict_removed_subjects[segmentation_method])
# Order by error probs
index_error_probs = df.query('error_prob > '+str(threshold)).index.tolist()
index_error_probs.sort()
# Get ouliers
index_outliers = dict_scalar_outliers[segmentation_method]
index_outliers = [x for x in index_outliers if x not in dict_removed_subjects[segmentation_method]]
index_outliers.sort()
index_no_quality = list(set(index_error_probs + index_outliers))
# Retrieve images and segmentation
for subject_id in index_no_quality:
children.append(dcc.Loading(
html.Div([
html.Div([
html.H6("Subject: {}".format(subject_id),
style=dict(fontSize='2rem')),
dcc.Checklist(
id={'type': 'remove-cbx', 'index': 'cbx-{}-{}'.format(segmentation_method, subject_id)},
options=[{'label': 'Remove', 'value': 'Remove'}],
value=[],
style=dict(fontSize='1.8rem'), inputStyle=dict(marginRight="10px")),
], className='twelve columns', style=dict(width='100%', display='flex', verticalAlign='center', justifyContent='space-between')),
build_quality_badges(subject_id, index_error_probs, index_outliers),
html.Div([
dcc.Graph(figure=build_fissure_image(subject_id, segmentation_method, scalar))
], className='twlve columns'),
], className = 'twelve columns')))
return children
def get_quality_tab(segmentation_method):
tab = dbc.Tab(label=segmentation_method, children=html.Div(get_quality_tab_children(segmentation_method, scalar), style=dict(height='80vh', overflowY="auto", padding='20px 20px 20px 20px')))
return tab
tabs = []
for segmentation_method in dict_segmentation_methods.keys():
tabs.append(get_quality_tab(segmentation_method))
return dbc.Tabs(tabs, style=dict(height='40px', verticalAlign='center', padding='0px 10px 0px 10px'))
def build_fissure_image(subject_id, segmentation_method, scalar = 'FA'):
scalar_maps = dict_scalar_maps[segmentation_method][subject_id]
scalar_maps_list = ['wFA','FA','MD','RD','AD']
scalar_map = scalar_maps[scalar_maps_list.index(scalar)]
fig = px.imshow(scalar_map, color_continuous_scale='gray', aspect='auto')
if segmentation_method != 'None':
segmentation = dict_segmentation_masks[segmentation_method][subject_id]
contours = measure.find_contours(segmentation, 0.1)
contour = sorted(contours, key=lambda x: len(x))[-1]
fig.add_trace(go.Scatter(x=contour[:, 1], y=contour[:, 0]))
fig.update_layout(height=250, width=450, paper_bgcolor='rgba(0,0,0,0)', legend_orientation="h", coloraxis_showscale=True)
fig.update_layout(margin = dict(l=0, r=0,t=0,b=30))
return fig
def build_3d_visualization(subject_id):
folderpath = path_dict[subject_id] + 'inCCsight/'
filepath = folderpath + 'segm_watershed3d.npy'
if os.path.exists(filepath):
_, segmentation3d, wFA_v, _ = np.load(filepath, allow_pickle=True)
verts, faces, normals, values = measure.marching_cubes_lewiner(wFA_v, 0)
tri_FA = ff.create_trisurf(x=verts[:,0], y=verts[:,2], z=verts[:,1]*-1+70,
simplices=faces,
colormap=[(1,0,0), (1,0,0)],
aspectratio=dict(x=1, y=1, z=.66),
plot_edges = False,
show_colorbar = False)
tri_FA['data'][0].update(opacity=0.2)
verts, faces, normals, values = measure.marching_cubes_lewiner(segmentation3d, 0)
tri_CC = ff.create_trisurf(x=verts[:,0], y=verts[:,2], z=verts[:,1]*-1+70,
simplices=faces,
colormap=[(0,0,1), (0,0,1)],
aspectratio=dict(x=1, y=1, z=.66),
plot_edges = False,
show_colorbar = False)
tri_CC['data'][0].update(opacity=0.1)
fig = go.Figure(tri_FA)
fig.add_trace(tri_CC.data[0])
fig.update_layout(title="3D Visualization")
fig.update_layout(plot_bgcolor='rgba(0,0,0,0)', paper_bgcolor='rgba(0,0,0,0)')
return fig
else:
return []
def build_subject_collapse(segmentation_method='ROQS', scalar_map='wFA', subject_id = list(path_dict.keys())[0]):
segmentation_methods_available = []
for segm_method in dict_segmentation_methods.keys():
if subject_id in dict_scalar_statistics[segm_method].index:
segmentation_methods_available.append(segm_method)
layout = dbc.Card([
html.Div([
html.Div([
build_graph_title("Subject " + subject_id),
], className='row'),
html.Button('X', style=dict(fontSize='1.5rem', margin='10px', padding='0 13px', fontFamily= 'Open Sans', borderRadius='20px'),
id=dict(type='btn-exit-subject', index=subject_id))
], className='twelve columns', style=dict(display='flex', justifyContent='space-between')),
html.Div([
# Segmentation image
html.Div([
build_graph_title("Scalar maps"),
#dcc.Graph(figure=build_3d_visualization(subject_id))
dcc.Graph(figure=build_fissure_image(subject_id, segmentation_methods_available[0], scalar = scalar_map), id='subject_collapse_fissure_img'),
build_fissure_image_dropdown(subject_id, segmentation_methods_available),
], className = 'four columns', style=dict(display='grid', justifyContent='center')),
html.Div(className='one column'),
html.Div([
build_graph_title("Segmentation data"),
build_individual_subject_segm_table(subject_id, segmentation_methods_available[0]),
build_individual_subject_segm_table_dropdown(segmentation_methods_available),
build_graph_title("Parcellation data"),
build_individual_subject_parcel_table(subject_id, segmentation_methods_available[0], parcellation_method='Witelson', scalar='FA'),
build_individual_subject_parcel_table_dropdown(segmentation_methods_available),
], className='six columns'),
], className='row', style={'justifyContent':'center'}),
], style={'margin':'20px', 'backgroundColor':'#FAFAFA', 'border-radius':'20px', 'padding':'0px 0px 50px 0px'})
return layout
def build_quality_badges(subject_id, index_error_probs, index_outliers):
children = []
if subject_id in index_error_probs:
children.append(dbc.Badge("Abnormal Shape", color="secondary", pill=True, style=dict(marginRight='10px', fontSize='12pt', fontWeight='600')))
if subject_id in index_outliers:
children.append(dbc.Badge("FA Outlier", color="secondary", pill=True, style=dict(marginRight='10px', fontSize='12pt', fontWeight='600')))
return html.Div(children=children, className='twelve columns', style=dict(marginLeft='27px'))
# DataTable functions ---------------------------------------------------------------------
def extend_colorscale(colormap, factor = 5):
from plotly.colors import n_colors
new_colormap = []
for i in range(len(colormap)-1):
new_colormap += n_colors(eval(colormap[i][3:]), eval(colormap[i+1][3:]), factor)
for i, color in enumerate(new_colormap):
new_colormap[i] = 'rgb' + str(color)
return new_colormap
def color_table(df, use_values_limits = True, mean = None, stdev = None):
if 'id' in df:
numeric_columns = df.select_dtypes('number').drop(['index'], axis=1)
else:
numeric_columns = df.select_dtypes('number')
colormap = px.colors.diverging.RdBu[::-1][2:-2]
colormap = extend_colorscale(colormap, 4)
styles = []
for col in numeric_columns:
values = df[col]
if use_values_limits is True:
mean = np.mean(values)
min_value = np.min(values)
max_value = np.max(values)
else:
min_value = mean - stdev
max_value = mean + stdev
limits = np.linspace(min_value, max_value, len(colormap))
for value in values:
idx = (np.abs(limits - value)).argmin()
styles.append({
'if': {
'filter_query': '{{{col}}} = {value}'.format(
col = col, value = value
),
'column_id': col
},
'backgroundColor': colormap[idx]
})
styles.append({
'if': {
'filter_query': '{{{col}}} = {value}'.format(col = col, value = value),
'state': 'selected',
'column_id': col,
},
'backgroundColor': colormap[idx]
})
styles.append({
'if': {
'filter_query': '{{{col}}} = {value}'.format(col = col, value = value),
'state': 'selected',
'column_id': 'index',
},
'backgroundColor': 'rgb(228, 228, 255)'
})
return styles
def stripped_rows():
style_data_conditional = []
style_data_conditional.append({'if': {'row_index': 'odd'},
'backgroundColor': 'rgb(248, 248, 248)'})
style_data_conditional.append({'if': {'row_index': 'odd', 'state': 'selected'},
'backgroundColor': 'rgb(228, 228, 228)'})
style_data_conditional.append({'if': {'row_index': 'even'},
'backgroundColor': 'rgb(255, 255, 255)'})
style_data_conditional.append({'if': {'row_index': 'even', 'state': 'selected'},
'backgroundColor': 'rgb(235, 235, 235)'})
style_data_conditional.append({'if': {'state': 'selected'},
"border": "3px solid blue"})
return style_data_conditional
def build_segm_table(mode = 'Method', segmentation_method = 'ROQS', show_stdev = False, color = False, pvalue=False):
list_scalars = ['FA','RD','AD','MD']
if mode == 'subjects':
df = dict_scalar_statistics[segmentation_method]
df = df.drop(dict_removed_subjects[segmentation_method], errors='ignore')
df = df.reset_index().rename(columns={"index": "Subject"})
names = ['Subject'] + list_scalars
else:
if mode == 'Method':
df = pd.DataFrame()
for segmentation_method in dict_segmentation_methods.keys():
df_aux = dict_scalar_statistics[segmentation_method]
df_aux = df_aux.drop(dict_removed_subjects[segmentation_method], errors='ignore')
df_aux = df_aux.reset_index()
df_aux['Method'] = segmentation_method
df = pd.concat([df, df_aux], axis=0)
category_list = list(dict_segmentation_methods.keys())
else:
df = dict_scalar_statistics[segmentation_method]
df = df.drop(dict_removed_subjects[segmentation_method], errors='ignore')
df = pd.concat([df_categories[mode], df], axis=1)
category_list = list(set(df_categories[mode]))
if pvalue is True:
dict_pvalues = {}
for cat1, cat2 in itertools.combinations(category_list, 2):
df1 = df[df[mode] == cat1]
df2 = df[df[mode] == cat2]
dict_pvalues['{} x {}'.format(cat1, cat2)] = get_column_pvalues(df1, df2)
df = pd.DataFrame().from_dict(dict_pvalues, orient='index', columns=scalar_statistics_names).reset_index()
df = df.rename(columns = {'index':mode})
names = [mode] + list_scalars
else:
df = df.groupby(mode).mean().reset_index()
names = [mode] + list_scalars
df = df.round(6).sort_index()
if show_stdev is False:
columns=[{"name": i, "id": i} for i in names]
data=df[names]
else:
columns=[{"name": i, "id": i} for i in df.columns[:-1]]
data=df
layout = dash_table.DataTable(
id = 'segm_table',
columns = columns,
data = data.to_dict('records'),
page_action = 'none',
fixed_rows = {'headers': True},
style_table={
'maxHeight': '300px',
'overflowY': 'auto'},
style_header = {
'fontWeight': 'bold',
},
style_cell = {
'font_family': 'Open Sans',
'font_size': '18px',
'text_align': 'center'
},
style_as_list_view = True,
export_format='xlsx',
export_headers='display',
style_data_conditional = stripped_rows(),
)
return layout
def build_parcel_table(mode = 'Method', segmentation_method = 'ROQS', parcellation_method = 'Witelson', scalar = 'FA', color = False, pvalue = False):
list_regions = ['P1', 'P2', 'P3', 'P4', 'P5']
if mode == 'subjects':
df = pd.DataFrame()
for region in list_regions:
df = | pd.concat([df, dict_parcellations_statistics[segmentation_method][parcellation_method][region][scalar]], axis=1) | pandas.concat |
# Copyright 2021 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import datetime as dt
from typing import Optional
import pandas
import pandas.api.extensions
import pandas.testing
import pyarrow
import pytest
import db_dtypes
SECOND_NANOS = 1_000_000_000
MINUTE_NANOS = 60 * SECOND_NANOS
HOUR_NANOS = 60 * MINUTE_NANOS
def types_mapper(
pyarrow_type: pyarrow.DataType,
) -> Optional[pandas.api.extensions.ExtensionDtype]:
type_str = str(pyarrow_type)
if type_str.startswith("date32") or type_str.startswith("date64"):
return db_dtypes.DateDtype
elif type_str.startswith("time32") or type_str.startswith("time64"):
return db_dtypes.TimeDtype
else:
# Use default type mapping.
return None
SERIES_ARRAYS_DEFAULT_TYPES = [
( | pandas.Series([], dtype="dbdate") | pandas.Series |
import argparse
import sys
from stock.marketdata.storefactory import get_store
from config import store_type
from stock.utils.symbol_util import load_concept, load_industry, get_realtime_by_date, get_stock_basics
import numpy as np
import pandas as pd
def get_stock_increase(date):
store = get_store(store_type)
exsymbols = store.get_stock_exsymbols()
df = pd.DataFrame(columns=["increase", "turnover"])
df_basics = get_stock_basics()
for exsymbol in exsymbols:
try:
df_stock = store.get(exsymbol)
if len(df_stock) < 10:
continue
if date not in df_stock.index:
continue
idx = df_stock.index.get_loc(date)
min10 = np.min(df_stock.iloc[idx-1:idx].close)
increase = df_stock.iloc[idx].close/min10 - 1
outstanding = df_basics.loc[exsymbol]["outstanding"]
turnover = df_stock.iloc[idx].volume/outstanding/1e6
df.loc[exsymbol] = [increase, turnover]
except Exception as e:
continue
return df
def get_best_stock(group):
idx = group["increase"].idxmax()
if np.isnan(idx):
return [np.nan, np.nan, np.nan, np.nan]
max_increase = group.loc[idx]["increase"]
dragon_increase = group.loc[idx]["exsymbol"]
idx = group["turnover"].idxmax()
max_turnover = group.loc[idx]["turnover"]
dragon_turnover = group.loc[idx]["exsymbol"]
return [dragon_increase, max_increase, dragon_turnover, max_turnover]
def get_concept_dragon_head(df, date):
df_grp = df.groupby("concept")
df_res = pd.DataFrame(columns=["dragon_incr", "increase", "dragon_tnov", "turnover"])
for concept, group in df_grp:
exsymbols = group["exsymbol"]
df_res.loc[concept] = get_best_stock(group)
return df_res
def get_industry_dragon_head(df, date):
df_grp = df.groupby("industry")
df_res = pd.DataFrame(columns=["dragon_incr", "increase", "dragon_tnov", "turnover"])
for industry, group in df_grp:
exsymbols = group["exsymbol"]
df_res.loc[industry] = get_best_stock(group)
return df_res
def get_stock_chg(date):
df = get_realtime_by_date(date)
df.loc[:, "chg"] = df["chgperc"]/100
df.loc[:, "zt_price"] = df.yest_close.apply(lambda x: round(x*1.1+1e-8, 2))
df.loc[:, "is_zhangting"] = np.absolute(df["zt_price"]-df["close"])<1e-8
return df
def get_lianban(date):
df = get_realtime_by_date(date)
df.loc[:, "zt_price"] = df.yest_close.apply(lambda x: round(x*1.1+1e-8, 2))
df.loc[:, "is_zhangting"] = np.absolute(df["zt_price"]-df["close"])<1e-8
df.loc[:, "fengdan_money"] = df["b1_v"]*df["b1_p"]*100/1e8
df_zt = df[df.is_zhangting==True]
store = get_store(store_type)
for exsymbol in df_zt.index.tolist():
try:
df_stock = store.get(exsymbol)
idx = df_stock.index.get_loc(date)
df_stock = df_stock.iloc[:idx+1]
df_stock.loc[:, "zt_price"] = df_stock.close.shift(1).apply(lambda x: round(x*1.1+1e-8, 2))
df_stock.loc[:, "is_zhangting"] = np.absolute(df_stock["zt_price"]-df_stock["close"])<1e-8
df_nozt = df_stock[df_stock.is_zhangting==False]
lianban = 0
if len(df_nozt) == 0:
lianban = len(df_stock)
else:
idx_start = df_stock.index.get_loc(df_nozt.index[-1])
idx_end = df_stock.index.get_loc(df_stock.index[-1])
lianban = idx_end - idx_start
df_zt.loc[exsymbol, "lianban"] = lianban
df_zt.loc[exsymbol, "xingu"] = lianban == len(df_stock)-1
df_zt.loc[exsymbol, "fengdan_money"] = df.loc[exsymbol, "fengdan_money"]
df_zt.loc[exsymbol, "lt_mcap"] = df.loc[exsymbol, "lt_mcap"]
except:
continue
columns = ["lianban", "fengdan_money", "lt_mcap", "xingu"]
return df_zt[df_zt.xingu==False][columns].sort_values("lianban")
if __name__ == "__main__":
pd.set_option('display.max_rows', None)
| pd.set_option('display.max_columns', None) | pandas.set_option |
# -*- coding: utf-8 -*-
"""
Created on Sun May 24 15:12:03 2020
Code for reformating the raw html data from USGS water watch page
Futher the code reads the NWS flood stages and merges it to the daily records
@author: balajiramesh
"""
import pandas as pd
import numpy as np
#import jenkspy
import geopandas
#%%read stram flow stations
stations=pd.read_csv(r"Z:\Balaji\stram_flow\imelda\stations",sep="\t").drop(0)
file_in = open(r'Z:\Balaji\stram_flow\imelda\data', 'r')
lines=file_in.readlines()+["#"]
file_in.close()
merged_df= | pd.DataFrame() | pandas.DataFrame |
import pandas as pd
def calculateRSI(prices_data, n=14, today_price=None):
"""Calculate the Relative Strength Index of an asset.
Args:
prices_data (pandas dataframe object): prices data
n (int, optional): number of . Defaults to 14.
today_price(int, optional): today's price to predict future RSI. Defaults to None
Return:
rsi (pandas series object): relative strength index
"""
price = prices_data['prices']
# Append today's date if used for prediction
if today_price is not None:
price = price.append( | pd.Series({price.size: today_price}) | pandas.Series |
import pandas as pd
import numpy as np
np.random.seed(123)
variables = ['X', 'Y', 'Z']
labels = ['ID_'+str(i) for i in range(0,5)]
X = np.random.random_sample([len(labels),len(variables)])*10
df = | pd.DataFrame(X, columns=variables, index=labels) | pandas.DataFrame |
import pandas as pd
import matplotlib.pyplot as plt
from sklearn.metrics import classification_report
import seaborn as sn
from app.services.sentimental import Sentimental
def get_report_test(dictionary, is_unigram):
# результат классификации
prediction = []
# реальный результат
actual = []
sent = Sentimental(dictionary=dictionary,
negation='D:\\GitHub\\sentiment-analyzer\\app\\dictionaries\\negations.csv',
modifier='D:\\GitHub\\sentiment-analyzer\\app\\dictionaries\\modifier.csv',
is_unigram=is_unigram)
for i in range(0, len(texts)):
result = sent.analyze(texts[i])
if ratings[i] in range(4, 6):
actual.append('positive')
if result['score'] > 0:
prediction.append('positive')
elif result['score'] <= 0:
prediction.append('negative')
else:
actual.append('negative')
if result['score'] > 0:
prediction.append('positive')
elif result['score'] <= 0:
prediction.append('negative')
print(i)
print(classification_report(actual, prediction))
return prediction, actual
# загрузка тестового набора данных
df = | pd.read_excel('D:\\GitHub\\sentiment-analyzer\\app\static\\reviews.xlsx', sheet_name="Sheet1") | pandas.read_excel |
import numpy as np
import pandas as pd
import pickle as pkl
import proj_utils as pu
from os.path import isdir, join
from os import mkdir
from copy import deepcopy
from imblearn.over_sampling import RandomOverSampler, SMOTE
from imblearn.under_sampling import RandomUnderSampler
from sklearn import ensemble, feature_selection, model_selection, preprocessing, svm, metrics, neighbors
from sklearn.utils.testing import ignore_warnings
from sklearn.utils import shuffle
from sklearn.exceptions import ConvergenceWarning
seed = 13
def calc_scores(y_test, predicted):
balanced = metrics.balanced_accuracy_score(y_test, predicted)
chance = metrics.balanced_accuracy_score(y_test, predicted, adjusted=True)
f1 = metrics.f1_score(y_test, predicted, average=None)
return balanced, chance, f1
def save_scores(f1_scores, balanced_scores, chance_scores, class_labels):
# Calculate average performance and tack it onto the end of the score list, save to nice df
n_folds = len(balanced_scores)
f1_array = np.asarray(f1_scores)
if n_folds != f1_array.shape[0]:
raise ValueError("Number of folds does not match")
rownames = ['Fold %02d' % (n+1) for n in range(n_folds)]
rownames.append('Average')
f1_class_averages = np.mean(f1_array, axis=0)
f1_data = np.vstack((f1_array, f1_class_averages))
f1_df = pd.DataFrame(f1_data, index=rownames, columns=class_labels)
balanced_scores.append(np.mean(balanced_scores))
chance_scores.append(np.mean(chance_scores))
accuracy_data = np.asarray([balanced_scores, chance_scores]).T
score_df = pd.DataFrame(data=accuracy_data, index=rownames, columns=['Balanced accuracy', 'Chance accuracy'])
return f1_df, score_df
def svmc(x_train, y_train, x_test, cleaned_features):
clf = svm.LinearSVC(fit_intercept=False, random_state=seed)
clf.fit(x_train, y_train)
target_classes = clf.classes_
target_classes = [str(c) for c in target_classes]
predicted = clf.predict(x_test)
if len(target_classes) == 2:
idx_label = ['coefficients']
else:
idx_label = target_classes
coef_df = pd.DataFrame(clf.coef_, index=idx_label, columns=cleaned_features)
return predicted, coef_df, clf
def extra_trees(x_train, y_train, x_test, cleaned_features):
clf = ensemble.ExtraTreesClassifier(random_state=seed)
clf.fit(x_train, y_train)
predicted = clf.predict(x_test)
feature_df = pd.DataFrame(columns=cleaned_features)
feature_df.loc['feature_importances'] = clf.feature_importances_
return predicted, feature_df, clf
def knn(x_train, y_train, x_test):
clf = neighbors.KNeighborsClassifier()
clf.fit(x_train, y_train)
predicted = clf.predict(x_test)
return predicted, clf
def convert_hads_to_single_label(hads_array):
hads_array = hads_array.astype(int)
vartypes = ['anxiety', 'depression']
hads_single_label = []
for row in range(hads_array.shape[0]):
str_combos = []
for col in range(hads_array.shape[1]):
val = hads_array[row, col]
if val == 0:
str_convert = '%s_normal' % vartypes[col]
elif val == 1:
str_convert = '%s_borderline' % vartypes[col]
elif val == 2:
str_convert = '%s_abnormal' % vartypes[col]
str_combos.append(str_convert)
hads_combined = '%s-%s' % (str_combos[0], str_combos[1])
hads_single_label.append(hads_combined)
return hads_single_label
def feature_selection_with_covariates(x_train, x_test, y_train, continuous_indices, categorical_indices, feature_names):
# Split data for continuous, categorical preprocessing
x_train_cont, x_test_cont = x_train[:, continuous_indices], x_test[:, continuous_indices]
x_train_cat, x_test_cat = x_train[:, categorical_indices], x_test[:, categorical_indices]
# Standardization for continuous data
preproc = preprocessing.StandardScaler().fit(x_train_cont)
x_train_z = preproc.transform(x_train_cont)
x_test_z = preproc.transform(x_test_cont)
# Variance threshold for categorical data
varthresh = feature_selection.VarianceThreshold(threshold=0).fit(x_train_cat)
x_train_v = varthresh.transform(x_train_cat)
x_test_v = varthresh.transform(x_test_cat)
x_train_data = np.hstack((x_train_z, x_train_v))
x_test_data = np.hstack((x_test_z, x_test_v))
# Feature selection with extra trees
extra_tree_fs = ensemble.ExtraTreesClassifier(random_state=seed)
feature_model = feature_selection.SelectFromModel(extra_tree_fs, threshold="2*mean")
# Transform train and test data with feature selection model
x_train_feature_selected = feature_model.fit_transform(x_train_data, y_train)
x_test_feature_selected = feature_model.transform(x_test_data)
feature_indices = feature_model.get_support(indices=True)
cleaned_features = [feature_names[i] for i in feature_indices]
return x_train_feature_selected, x_test_feature_selected, cleaned_features
def feature_selection_without_covariates(x_train, x_test, y_train, feature_names):
# Standardization for continuous data
preproc = preprocessing.StandardScaler().fit(x_train)
x_train_z = preproc.transform(x_train)
x_test_z = preproc.transform(x_test)
# Feature selection with extra trees
extra_tree_fs = ensemble.ExtraTreesClassifier(random_state=seed)
feature_model = feature_selection.SelectFromModel(extra_tree_fs, threshold="2*mean")
# Transform train and test data with feature selection model
x_train_feature_selected = feature_model.fit_transform(x_train_z, y_train)
x_test_feature_selected = feature_model.transform(x_test_z)
feature_indices = feature_model.get_support(indices=True)
cleaned_features = [feature_names[i] for i in feature_indices]
return x_train_feature_selected, x_test_feature_selected, cleaned_features
@ignore_warnings(category=ConvergenceWarning)
def eeg_classify(eeg_data, target_data, target_type, model, outdir=None, resample='SMOTE'):
feature_names = list(eeg_data)
if "categorical_sex_male" in feature_names:
cv_check = 'with_covariates'
else:
cv_check = 'without_covariates'
if resample is 'no_resample':
class NoResample: # for convenience
@staticmethod
def fit_resample(a, b):
return a.values, np.asarray(b)
resampler = NoResample()
elif resample is 'ROS':
resampler = RandomOverSampler(sampling_strategy='not majority', random_state=seed)
elif resample is 'SMOTE':
resampler = SMOTE(sampling_strategy='not majority', random_state=seed)
elif resample is 'RUS':
resampler = RandomUnderSampler(sampling_strategy='not minority', random_state=seed)
x_res, y_res = resampler.fit_resample(eeg_data, target_data)
if outdir is not None:
model_outdir = join(outdir, '%s %s %s %s' % (target_type, model, cv_check, resample))
if not isdir(model_outdir):
mkdir(model_outdir)
print('%s: Running classification - %s %s %s %s' % (pu.ctime(), target_type, model, cv_check, resample))
# Apply k-fold splitter
n_splits = 50
skf = model_selection.StratifiedKFold(n_splits=n_splits, random_state=seed)
skf.get_n_splits(x_res, y_res)
fold_count = 0
classifier_objects, classifier_coefficients, cm_dict, norm_cm_dict = {}, {}, {}, {}
balanced_acc, chance_acc, f1_scores = [], [], []
for train_idx, test_idx in skf.split(x_res, y_res):
fold_count += 1
print('%s: Running FOLD %d for %s' % (pu.ctime(), fold_count, target_type))
foldname = 'Fold %02d' % fold_count
# Stratified k-fold splitting
x_train, x_test = x_res[train_idx], x_res[test_idx]
y_train, y_test = y_res[train_idx], y_res[test_idx]
if "categorical_sex_male" in feature_names:
continuous_features = [f for f in feature_names if 'categorical' not in f]
continuous_indices = [eeg_data.columns.get_loc(cont) for cont in continuous_features]
categorical_features = [f for f in feature_names if 'categorical' in f]
categorical_indices = [eeg_data.columns.get_loc(cat) for cat in categorical_features]
x_train_fs, x_test_fs, cleaned_features = feature_selection_with_covariates(
x_train, x_test, y_train, continuous_indices, categorical_indices, feature_names)
else:
x_train_fs, x_test_fs, cleaned_features = feature_selection_without_covariates(
x_train, x_test, y_train, feature_names)
if model is 'svm':
predicted, coef_df, clf = svmc(x_train_fs, y_train, x_test_fs, cleaned_features)
classifier_coefficients[foldname] = coef_df
elif model is 'extra_trees':
predicted, feature_importances, clf = extra_trees(x_train_fs, y_train, x_test_fs, cleaned_features)
classifier_coefficients[foldname] = feature_importances
elif model is 'knn':
predicted, clf = knn(x_train_fs, y_train, x_test_fs)
classifier_objects[foldname] = clf
# Calculating fold performance scores
balanced, chance, f1 = calc_scores(y_test, predicted)
balanced_acc.append(balanced)
chance_acc.append(chance)
f1_scores.append(f1)
# Calculating fold confusion matrix
cm = metrics.confusion_matrix(y_test, predicted)
normalized_cm = cm.astype('float')/cm.sum(axis=1)[:, np.newaxis]
cm_dict[foldname] = pd.DataFrame(cm, index=clf.classes_, columns=clf.classes_)
norm_cm_dict[foldname] = pd.DataFrame(normalized_cm, index=clf.classes_, columns=clf.classes_)
# Saving performance scores
f1_df, score_df = save_scores(f1_scores, balanced_acc, chance_acc, class_labels=clf.classes_)
scores_dict = {'accuracy scores': score_df,
'f1 scores': f1_df}
try:
pu.save_xls(scores_dict, join(model_outdir, 'performance.xlsx'))
# Saving coefficients
if bool(classifier_coefficients):
pu.save_xls(classifier_coefficients, join(model_outdir, 'coefficients.xlsx'))
pu.save_xls(cm_dict, join(model_outdir, 'confusion_matrices.xlsx'))
pu.save_xls(norm_cm_dict, join(model_outdir, 'confusion_matrices_normalized.xlsx'))
# Saving classifier object
with open(join(model_outdir, 'classifier_object.pkl'), 'wb') as file:
pkl.dump(classifier_objects, file)
except Exception:
pass
return scores_dict
def side_classification_drop_asym(ml_data, behavior_data, output_dir, models=None):
print('%s: Running classification on tinnitus side, dropping asymmetrical subjects' % pu.ctime())
ml_copy = deepcopy(ml_data)
if models is None:
models = ['extra_trees']
resample_methods = [None, 'over', 'under']
t = pu.convert_tin_to_str(behavior_data['tinnitus_side'].values.astype(float), 'tinnitus_side')
t_df = pd.DataFrame(t, index=ml_copy.index)
asym_indices = []
for asym in ['Right>Left', 'Left>Right']:
asym_indices.extend([i for i, s in enumerate(t) if asym == s])
asym_data = ml_copy.iloc[asym_indices]
ml_copy.drop(index=asym_data.index, inplace=True)
t_df.drop(index=asym_data.index, inplace=True)
target_cleaned = np.ravel(t_df.values)
for model in models:
for res in resample_methods:
eeg_classify(ml_copy, target_cleaned, 'tinnitus_side_no_asym', model, output_dir, resample=res)
# side_classification_drop_asym(ml_data, behavior_data, output_dir, models=models)
def type_classification_drop_mixed(ml_data, behavior_data, output_dir, models=None):
print('%s: Running classification on tinnitus type, dropping mixed type subjects' % pu.ctime())
ml_copy = deepcopy(ml_data)
if models is None:
models = ['extra_trees']
resample_methods = [None, 'over', 'under']
t = pu.convert_tin_to_str(behavior_data['tinnitus_type'].values.astype(float), 'tinnitus_type')
t_df = | pd.DataFrame(t, index=ml_copy.index) | pandas.DataFrame |
# -*- coding: utf-8 -*-
"""
Part of slugdetection package
@author: <NAME>
github: dapolak
"""
import numpy as np
import pandas as pd
from datetime import datetime, timedelta
from slugdetection.Slug_Detection import Slug_Detection
import unittest
class Test_Slug_Detection(unittest.TestCase):
"""
Unitest class for the Slug Detection class
"""
def test_create_class(self, spark_data):
"""
Unit test for class creation
Parameters
----------
spark_data : Spark data frame
well data frame
"""
test_class = Slug_Detection(spark_data)
assert hasattr(test_class, "well_df"), "Assert well_df attribute is created"
assert len(test_class.well_df.head(1)) != 0, \
"well_df attribute not empty" # Pyspark has no clear empty attribute
def test_jump(self, spark_data):
"""
Unit test for jump method
Parameters
----------
spark_data : Spark data frame
well data frame
"""
# Standard Data Engineering
test_class = Slug_Detection(spark_data)
test_class.timeframe(start="12-SEP-16 09:09",
end="18-SEP-16 09:09") # known interval that has 3 section of data over 99% choke
test_class.data_range(verbose=False)
test_class.clean_choke(method="99")
sd_df = test_class.df_toPandas()
test_class.jump()
assert 'count_id' in test_class.pd_df.columns, "Assert new count_id column was created"
assert test_class.pd_df['count_id'].nunique() >= 3, \
"For this example, assert that there are three continuous sets of data"
def test_clean_short_sub(self, spark_data):
"""
Unit test for clean_short_sub method
Parameters
----------
spark_data : Spark data frame
well data frame
"""
test_class = Slug_Detection(spark_data)
test_class.timeframe(start="12-SEP-16 09:09",
end="18-SEP-16 09:09") # known interval that has 3 section of data over 99% choke
test_class.data_range(verbose=False)
test_class.clean_choke(method="99")
sd_df = test_class.df_toPandas()
test_class.jump()
a = len(test_class.pd_df) # Store length of pd_df data frame
test_class.clean_short_sub(min_df_size=200) # Apply clean_short_sub method
b = len(test_class.pd_df) # Store length of pd_df data frame
assert a > b, "For this example, the post clean_short_sub pd_df attribute should be shorter"
def test_sub_data(self, spark_data):
"""
Unit test for clean_short_sub method
Parameters
----------
spark_data : Spark data frame
well data frame
"""
# Standard Data Engineering
test_class = Slug_Detection(spark_data)
test_class.timeframe(start="12-SEP-16 09:09",
end="18-SEP-16 09:09") # known interval that has 3 section of data over 99% choke
test_class.data_range(verbose=False)
test_class.clean_choke(method="99")
sd_df = test_class.df_toPandas()
test_class.sub_data(min_df_size=200)
assert hasattr(test_class, "sub_df_dict"), "New attribute must have been created"
a = test_class.pd_df["count_id"].nunique()
assert a == len(test_class.sub_df_dict), "Number of unique count ids must be the same as number of data " \
"frames in sub_df_dict dictionary"
a = test_class.sub_df_dict[0] # Get first element of the dictionary
assert isinstance(a, pd.DataFrame), "sub_df_dict elements are pandas data frames"
for f in test_class.features:
assert f in a.columns, "data frame must contain all features"
def test_slug_check(self, spark_data):
"""
Unit test for slug_check method
Parameters
----------
spark_data : Spark data frame
well data frame
"""
# Standard Data Engineering steps
test_class = Slug_Detection(spark_data)
test_class.timeframe(start="18-SEP-16 01:09", end="18-SEP-16 09:09") # example interval
test_class.data_range(verbose=False)
test_class.clean_choke(method="99")
sd_df = test_class.df_toPandas()
test_class.sub_data()
## Test 1 : Test that slug_check returns right value
##
# Create fake dataframe
datetime_format = '%d-%b-%y %H:%M' # datetime date format
base = datetime.strptime("01-JAN-16 09:09", datetime_format) # Create datetime type timestamp
date_list = [[base + timedelta(minutes=x)] for x in range(1000)] # Create list of timestamps
x = np.linspace(0, 100 * np.pi, 1000) # Get evenly spaced x array
whp_list = (np.sin(x) * 3) + 10 # Create sin wave array (slug-like)
fake_df = pd.DataFrame(data=date_list, columns=["ts"], dtype=str) # Create data frame with timestamp
fake_df["ts"] = pd.to_datetime(fake_df["ts"]) # Ensure timestamp are datetime type
fake_df["WH_P"] = whp_list # Add sine wave as WHP data
test_class.sub_df_dict = {
1: fake_df
} # Override sub_df_dict attribute with fake data frame
slug_idx = pd.Series(whp_list)[whp_list > 12.90].index.tolist() # Create list of slug peaks for fake slugs
first = test_class.slug_check(slug_idx, 1) # Get results from slug_check method
assert len(first) == 1, "First slug index list should only contain one value in this example"
## Test 2 : Test that slug_check returns right value
##
# Create fake data frame
datetime_format = '%d-%b-%y %H:%M' # datetime date format
base = datetime.strptime("01-JAN-16 09:09", datetime_format) # Create datetime type timestamp
date_list = [[base + timedelta(minutes=x)] for x in range(2300)] # Create list of timestamps
x = np.linspace(0, 100 * np.pi, 1000) # Get evenly spaced x array
whp_list = (np.sin(x) * 3) + 10 # Create sin wave array (slug-like)
whp_list = np.append(whp_list, [10 for i in range(300)]) # Add flat flow to simulate normal flow
whp_list = np.append(whp_list, (np.sin(x) * 3) + 10) # Add more slugs
fake_df = pd.DataFrame(data=date_list, columns=["ts"], dtype=str) # Create data frame with timestamp
fake_df["ts"] = pd.to_datetime(fake_df["ts"]) # Ensure timestamp are datetime type
fake_df["WH_P"] = whp_list # Add fake whp data
slug_idx = pd.Series(whp_list)[whp_list > 12.90].index.tolist() # Create list of slug peaks
test_class.sub_df_dict = {
1: fake_df
} # Override sub_df_dict attribute with fake data frame
first = test_class.slug_check(slug_idx, 1) # Get results from slug_check method
assert first, "First slug index list should not be empty"
assert len(first) == 2, "First slug index list should only contain two value in this example"
assert first[1] == 1305, "In this example, the second first slug of the data set occurs at minutes = 1305"
def test_label_slugs(self, spark_data):
"""
Unit test for label_slugs method
Parameters
----------
spark_data : Spark data frame
well data frame
"""
# Standard Data Engineering steps
test_class = Slug_Detection(spark_data)
test_class.timeframe(start="18-SEP-16 01:09", end="30-SEP-16 09:09") # example interval
test_class.data_range(verbose=False)
test_class.clean_choke(method="99")
sd_df = test_class.df_toPandas()
try:
f, s = test_class.label_slugs()
print("Sub df dict attribute has not been created")
raise ValueError
except AssertionError:
pass
test_class.sub_data() # Create sub df dict
# create fake data set
datetime_format = '%d-%b-%y %H:%M'
base = datetime.strptime("01-JAN-16 09:09", datetime_format)
date_list = [[base + timedelta(minutes=x)] for x in range(1000)] # Creat time, one minute appart
x = np.linspace(0, 100 * np.pi, 1000)
whp_list = (np.sin(x) * 3) + 10 # create sin wave
fake_df = pd.DataFrame(data=date_list, columns=["ts"], dtype=str)
fake_df["ts"] = | pd.to_datetime(fake_df["ts"]) | pandas.to_datetime |
import numpy as np
import scipy as sp
import scipy.special
import scipy.stats
import gegenbauer
import matplotlib.pyplot as plt
import approx_learning_curves
import compute_NTK_spectrum
import pandas as pd
import argparse
def sample_random_points(num_pts, d):
R = np.random.multivariate_normal(np.zeros(d), np.eye(d), num_pts)
for i in range(num_pts):
R[i,:] = R[i,:] / np.linalg.norm(R[i,:])
return R
def compute_kernel(X, Xp, spectrum, d, kmax):
P = X.shape[0]
Pp = Xp.shape[0]
gram = X @ Xp.T
gram = np.reshape(gram, P*Pp)
Q = gegenbauer.get_gegenbauer_fast2(gram, kmax, d)
degens = np.array( [gegenbauer.degeneracy(d,k) for k in range(kmax)] )
K = Q.T @ (spectrum * degens)
K = np.reshape(K, (P,Pp))
return K
def generalization(P, X_teach, spectrum, kmax, d, num_repeats, lamb = 1e-6):
errors_avg = np.zeros(kmax)
errors_tot_MC = 0
all_errs = np.zeros((kmax, num_repeats))
all_MC = np.zeros(num_repeats)
X_teach = sample_random_points(P_teach,d)
alpha_teach = np.sign( np.random.random_sample(P_teach) - 0.5* np.ones(P_teach) )
for i in range(num_repeats):
X_teach = sample_random_points(P_teach,d)
alpha_teach = np.sign( np.random.random_sample(P_teach) - 0.5* np.ones(P_teach) )
X = sample_random_points(P, d)
K_student = compute_kernel(X,X, spectrum, d, kmax)
K_stu_te = compute_kernel(X,X_teach, spectrum, d, kmax)
y = K_stu_te @ alpha_teach
K_inv = np.linalg.inv(K_student + lamb * np.eye(P))
alpha = K_inv @ y
degens = np.array( [gegenbauer.degeneracy(d,k) for k in range(kmax)] )
gram_ss = X @ X.T
gram_st = X @ X_teach.T
gram_tt = X_teach @ X_teach.T
Q_ss = gegenbauer.get_gegenbauer_fast2(np.reshape(gram_ss, P**2), kmax, d)
Q_st = gegenbauer.get_gegenbauer_fast2(np.reshape(gram_st, P*P_teach), kmax, d)
Q_tt = gegenbauer.get_gegenbauer_fast2(np.reshape(gram_tt, P_teach**2), kmax, d)
errors = np.zeros(kmax)
for k in range(kmax):
Q_ssk = np.reshape(Q_ss[k,:], (P,P))
Q_stk = np.reshape(Q_st[k,:], (P,P_teach))
Q_ttk = np.reshape(Q_tt[k,:], (P_teach,P_teach))
errors[k] = spectrum[k]**2 * degens[k] * ( alpha.T @ Q_ssk @ alpha - 2*alpha.T @ Q_stk @ alpha_teach + alpha_teach.T @ Q_ttk @ alpha_teach )
errors_avg += 1/num_repeats * errors
all_errs[:,i] = errors
num_test = 2500
X_test = sample_random_points(num_test, d)
K_s = compute_kernel(X,X_test, spectrum, d, kmax)
K_t = compute_kernel(X_teach,X_test, spectrum, d, kmax)
y_s = K_s.T @ alpha
y_t = K_t.T @ alpha_teach
tot_error = 1/num_test * np.linalg.norm(y_s - y_t)**2
print("errors")
print("expt: %e" % tot_error)
print("theory: %e" % np.sum(errors))
errors_tot_MC += 1/num_repeats * tot_error
all_MC[i] = tot_error
std_errs = sp.stats.sem(all_errs, axis=1)
std_MC = sp.stats.sem(all_MC)
return errors_avg, errors_tot_MC, std_errs, std_MC
parser = argparse.ArgumentParser()
parser.add_argument('--input_dim', type=int, default= 10,
help='data input dimension')
parser.add_argument('--lamb', type=float,
help='explicit regularization penalty', default = 0)
parser.add_argument('--NTK_depth', type=int, default= 3,
help='depth of Fully Connected ReLU NTK')
args = parser.parse_args()
d = args.input_dim
lamb = args.lamb
depth = args.NTK_depth
kmax = 30
degens = np.array( [gegenbauer.degeneracy(d,k) for k in range(kmax)] )
spectrum = compute_NTK_spectrum.get_effective_spectrum([depth], kmax, d, ker = 'NTK')[0,:]
s = [i for i in spectrum if i > 0]
P = 50
P_teach = 300
P_vals = np.logspace(0.25, 3, num = 15).astype('int')
num_repeats = 50
all_errs = np.zeros((len(P_vals), kmax))
all_mc = np.zeros(len(P_vals))
std_errs = np.zeros( (len(P_vals), kmax) )
std_MC = np.zeros(len(P_vals))
for i in range(len(P_vals)):
P = P_vals[i]
all_errs[i,:], all_mc[i], std_errs[i,:], std_MC[i] = generalization(P, P, spectrum, kmax, d, num_repeats, lamb=lamb)
sol, p = approx_learning_curves.simulate_uc(spectrum, degens, lamb = lamb)
plt.rcParams.update({'font.size': 12})
kplot = [0,1,2,4,6]
colors = ['b','r','g', 'm', 'c']
all_errsdf = pd.DataFrame(all_errs)
std_errsdf = pd.DataFrame(std_errs)
mc_df = pd.DataFrame(all_mc)
std_mc_df = | pd.DataFrame(std_MC) | pandas.DataFrame |
import json
import logging
import os
import subprocess
import pandas as pd
from csgo.utils import check_go_version
class DemoParser:
"""DemoParser can parse, load and clean data from a CSGO demofile. Can be instantiated without a specified demofile.
Attributes:
demofile (string): A string denoting the path to the demo file, which ends in .dem
log (boolean): A boolean denoting if a log will be written. If true, log is written to "csgo_parser.log"
demo_id (string): A unique demo name/game id. Default is inferred from demofile name
parse_rate (int): One of 128, 64, 32, 16, 8, 4, 2, or 1. The lower the value, the more frames are collected. Indicates spacing between parsed demo frames in ticks. Default is 128.
parse_frames (bool): Flag if you want to parse frames (trajectory data) or not
trade_time (int): Length of the window for a trade (in seconds). Default is 5.
dmg_rolled (bool): Boolean if you want damages rolled up (since multiple damages for a player can happen in 1 tick from the same weapon.)
buy_style (string): Buy style string, one of "hltv" or "csgo"
use_exe_parser (bool): Flag if you want to parse demo on Windows without installing Go lang
Raises:
ValueError: Raises a ValueError if the Golang version is lower than 1.14
"""
def __init__(
self,
demofile="",
outpath=None,
log=False,
demo_id=None,
parse_rate=128,
parse_frames=True,
trade_time=5,
dmg_rolled=False,
buy_style="hltv",
use_exe_parser=None
):
# Set up logger
if log:
logging.basicConfig(
filename="csgo_demoparser.log",
level=logging.INFO,
format="%(asctime)s [%(levelname)s] %(message)s",
datefmt="%H:%M:%S",
)
self.logger = logging.getLogger("CSGODemoParser")
self.logger.handlers = []
fh = logging.FileHandler("csgo_demoparser.log")
fh.setLevel(logging.INFO)
self.logger.addHandler(fh)
else:
logging.basicConfig(
level=logging.INFO,
format="%(asctime)s [%(levelname)s] %(message)s",
datefmt="%H:%M:%S",
)
self.logger = logging.getLogger("CSGODemoParser")
# Handle demofile and demo_id name. Finds right most '/' in case demofile is a specified path.
self.demofile = os.path.abspath(demofile)
self.logger.info("Initialized CSGODemoParser with demofile " + self.demofile)
if (demo_id is None) | (demo_id == ""):
self.demo_id = demofile[demofile.rfind("/") + 1 : -4]
else:
self.demo_id = demo_id
if outpath is None:
self.outpath = os.path.abspath(os.getcwd())
else:
self.outpath = os.path.abspath(outpath)
self.logger.info("Setting demo id to " + self.demo_id)
# Handle parse rate. If the parse rate is less than 64, likely to be slow
if parse_rate < 1 or type(parse_rate) is not int:
self.logger.warning(
"Parse rate of "
+ str(parse_rate)
+ " not acceptable! Parse rate must be an integer greater than 0."
)
parse_rate = 128
self.parse_rate = parse_rate
if parse_rate < 64 and parse_rate > 1:
self.logger.warning(
"A parse rate lower than 64 may be slow depending on the tickrate of the demo, which is usually 64 for MM and 128 for pro demos."
)
self.parse_rate = parse_rate
elif parse_rate >= 256:
self.logger.warning(
"A high parse rate means very few frames. Only use for testing purposes."
)
self.parse_rate = parse_rate
else:
self.parse_rate = parse_rate
self.logger.info("Setting parse rate to " + str(self.parse_rate))
# Handle trade time
if trade_time <= 0:
self.logger.warning(
"Trade time can't be negative, setting to default value of 5 seconds."
)
self.trade_time = 5
elif trade_time > 7:
self.logger.warning(
"Trade time of "
+ str(trade_time)
+ " is rather long. Consider a value between 4-7."
)
else:
self.trade_time = trade_time
self.logger.info("Setting trade time to " + str(self.trade_time))
# Handle buy style
if buy_style not in ["hltv", "csgo"]:
self.logger.warning(
"Buy style specified is not one of hltv, csgo, will be set to hltv by default"
)
self.buy_style = "hltv"
else:
self.buy_style = buy_style
self.logger.info("Setting buy style to " + str(self.buy_style))
self.dmg_rolled = dmg_rolled
self.parse_frames = parse_frames
self.logger.info("Rollup damages set to " + str(self.dmg_rolled))
self.logger.info("Parse frames set to " + str(self.parse_frames))
self.logger.info("Setting demo id to " + self.demo_id)
if (use_exe_parser is None) | (not use_exe_parser):
self.use_exe_parser = False
else:
self.use_exe_parser = True
# Set parse error to False
self.parse_error = False
def parse_demo(self):
"""Parse a demofile using the Go script parse_demo.go -- this function needs the .demofile to be set in the class, and the file needs to exist.
Returns:
Outputs a JSON file to current working directory.
Raises:
ValueError: Raises a ValueError if the Golang version is lower than 1.14
FileNotFoundError: Raises a FileNotFoundError if the demofile path does not exist.
"""
# Check if Golang version is compatible
if self.use_exe_parser:
self.logger.info("Use exe parser")
else:
acceptable_go = check_go_version()
if not acceptable_go:
self.logger.error(
"Error calling Go. Check if Go is installed using 'go version'. Need at least v1.14.0."
)
raise ValueError(
"Error calling Go. Check if Go is installed using 'go version'. Need at least v1.14.0."
)
else:
self.logger.info("Go version>=1.14.0")
# Check if demofile exists
if not os.path.exists(os.path.abspath(self.demofile)):
self.logger.error("Demofile path does not exist!")
raise FileNotFoundError("Demofile path does not exist!")
path = os.path.join(os.path.dirname(__file__), "")
self.logger.info("Running parser from " + path)
self.logger.info("Looking for file at " + self.demofile)
self.parser_cmd = [os.path.join(os.path.dirname(os.path.abspath(__file__)), 'parse_demo.exe')] if self.use_exe_parser else ["go", "run", "parse_demo.go"]
self.parser_cmd += [
"-demo",
self.demofile,
"-parserate",
str(self.parse_rate),
"-tradetime",
str(self.trade_time),
"-buystyle",
str(self.buy_style),
"-demoid",
str(self.demo_id),
"-out",
self.outpath,
]
if self.dmg_rolled:
self.parser_cmd.append("--dmgrolled")
if self.parse_frames:
self.parser_cmd.append("--parseframes")
proc = subprocess.Popen(
self.parser_cmd,
stdout=subprocess.PIPE,
cwd=path,
)
stdout = proc.stdout.read().splitlines()
self.output_file = self.demo_id + ".json"
if os.path.isfile(self.output_file):
self.logger.info("Wrote demo parse output to " + self.output_file)
self.parse_error = False
else:
self.parse_error = True
self.logger.error("No file produced, error in calling Golang")
self.logger.error(stdout)
def read_json(self, json_path):
"""Reads the JSON file given a JSON path. Can be used to read in already processed demofiles.
Args:
json_path (string): Path to JSON file
Returns:
JSON in Python dictionary form
Raises:
FileNotFoundError: Raises a FileNotFoundError if the JSON path doesn't exist
"""
# Check if JSON exists
if not os.path.exists(os.path.abspath(json_path)):
self.logger.error("JSON path does not exist!")
raise FileNotFoundError("JSON path does not exist!")
# Read in json to .json attribute
with open(json_path, encoding="utf8") as f:
demo_data = json.load(f)
self.json = demo_data
self.logger.info(
"JSON data loaded, available in the `json` attribute to parser"
)
return demo_data
def parse(self, return_type="json"):
"""Wrapper for parse_demo() and read_json(). Use to parse a demo.
Args:
return_type (string): Either "json" or "df"
Returns:
A dictionary of output (which is parsed to a JSON file in the working directory)
Raises:
ValueError: Raises a ValueError if the return_type is not "json" or "df"
AttributeError: Raises an AttributeError if the .json attribute is None
"""
self.parse_demo()
self.read_json(json_path=self.outpath + "/" + self.output_file)
if self.json:
self.logger.info("JSON output found")
if return_type == "json":
return self.json
elif return_type == "df":
demo_data = self.parse_json_to_df()
self.logger.info("Returned dataframe output")
return demo_data
else:
self.logger.error("Parse return_type must be either 'json' or 'df'")
raise ValueError("return_type must be either 'json' or 'df'")
else:
self.logger.error("JSON couldn't be returned")
raise AttributeError("No JSON parsed! Error in producing JSON.")
def parse_json_to_df(self):
"""Returns JSON into dictionary where keys correspond to data frames
Returns:
A dictionary of output
Raises:
AttributeError: Raises an AttributeError if the .json attribute is None
"""
if self.json:
demo_data = {}
demo_data["matchID"] = self.json["matchID"]
demo_data["clientName"] = self.json["clientName"]
demo_data["mapName"] = self.json["mapName"]
demo_data["tickRate"] = self.json["tickRate"]
demo_data["playbackTicks"] = self.json["playbackTicks"]
demo_data["rounds"] = self._parse_rounds()
demo_data["kills"] = self._parse_kills()
demo_data["damages"] = self._parse_damages()
demo_data["grenades"] = self._parse_grenades()
demo_data["flashes"] = self._parse_flashes()
demo_data["weaponFires"] = self._parse_weapon_fires()
demo_data["bombEvents"] = self._parse_bomb_events()
demo_data["frames"] = self._parse_frames()
demo_data["playerFrames"] = self._parse_player_frames()
self.logger.info("Returned dataframe output")
return demo_data
else:
self.logger.error(
"JSON not found. Run .parse() or .read_json() if JSON already exists"
)
raise AttributeError(
"JSON not found. Run .parse() or .read_json() if JSON already exists"
)
def _parse_frames(self):
"""Returns frames as a Pandas dataframe
Returns:
A Pandas dataframe where each row is a frame (game state) in the demo, which is a discrete point of time.
Raises:
AttributeError: Raises an AttributeError if the .json attribute is None
"""
if self.json:
frames_dataframes = []
keys = ["tick", "seconds"]
for r in self.json["gameRounds"]:
if r["frames"]:
for frame in r["frames"]:
frame_item = {}
frame_item["roundNum"] = r["roundNum"]
for k in keys:
frame_item[k] = frame[k]
for side in ["ct", "t"]:
if side == "ct":
frame_item["ctTeamName"] = frame["ct"]["teamName"]
frame_item["ctEqVal"] = frame["ct"]["teamEqVal"]
frame_item["ctAlivePlayers"] = frame["ct"][
"alivePlayers"
]
frame_item["ctUtility"] = frame["ct"]["totalUtility"]
else:
frame_item["tTeamName"] = frame["t"]["teamName"]
frame_item["tEqVal"] = frame["t"]["teamEqVal"]
frame_item["tAlivePlayers"] = frame["t"]["alivePlayers"]
frame_item["tUtility"] = frame["t"]["totalUtility"]
frames_dataframes.append(frame_item)
frames_df = pd.DataFrame(frames_dataframes)
frames_df["matchID"] = self.json["matchID"]
frames_df["mapName"] = self.json["mapName"]
return | pd.DataFrame(frames_dataframes) | pandas.DataFrame |
from preprocessor import FeaturePreprocessor
from regression_model import RegressionModel
from hyperparameter_optimizer import HyperParameterOptimizer
from diagnostics import HPODiagnoser, ModelDiagnoser
from interpret import ModelInterpreter
import pandas as pd
import matplotlib.pyplot as plt
from sklearn import datasets
from sklearn.model_selection import train_test_split, KFold
def preprocessor_test():
pass
def regression_model_test():
X, y = datasets.load_boston(return_X_y = True)
X = pd.DataFrame(X)
model_name = "random_forest"
random_seed = 1001
obj_func_name = "mse"
eval_func_names = ["r_squared", "rmse"]
n_estimators = 400
model = RegressionModel(X_train = X, y_train = y, model_type = model_name,
obj_func_name = obj_func_name, random_seed = random_seed)
if model_name == "random_forest":
model_params = {"n_estimators":n_estimators}
else:
model_params = {}
model.fit(model_params = model_params)
training_set_preds = model.predict(X)
print(training_set_preds)
cv_metrics = model.cross_validate(train_valid_folds = 10,
eval_func_names = eval_func_names,
model_params = model_params)
print(cv_metrics)
def hyperparameter_optimizer_test():
X, y = datasets.load_boston(return_X_y = True)
X = pd.DataFrame(X)
model_name = "random_forest"
random_seed = 1001
obj_func_name = "mse"
n_estimators = 400
total_n_iterations = 50
base_model = RegressionModel(X_train = X, y_train = y, model_type = model_name,
obj_func_name = obj_func_name, random_seed = random_seed)
hpo = HyperParameterOptimizer(verbosity = 1)
if model_name == "random_forest":
override_params = {"n_estimators":n_estimators}
else:
override_params = {}
hpo.tune_and_fit(model = base_model,
total_n_iterations = total_n_iterations,
train_valid_folds = 10,
override_params = override_params,
use_model_copy = True)
tuned_model = hpo.model
return tuned_model
def model_diagnoser_test():
X, y = datasets.load_boston(return_X_y = True)
X = | pd.DataFrame(X) | pandas.DataFrame |
# -*- coding: utf-8 -*-
import string
from collections import OrderedDict
from datetime import date, datetime
import numpy as np
import pandas as pd
import pandas.util.testing as pdt
import pytest
from kartothek.core.common_metadata import make_meta, store_schema_metadata
from kartothek.core.index import ExplicitSecondaryIndex
from kartothek.core.naming import DEFAULT_METADATA_VERSION
from kartothek.io_components.metapartition import (
MetaPartition,
_unique_label,
parse_input_to_metapartition,
partition_labels_from_mps,
)
from kartothek.serialization import DataFrameSerializer, ParquetSerializer
def test_store_single_dataframe_as_partition(
store, metadata_storage_format, metadata_version
):
df = pd.DataFrame(
{"P": np.arange(0, 10), "L": np.arange(0, 10), "TARGET": np.arange(10, 20)}
)
mp = MetaPartition(
label="test_label", data={"core": df}, metadata_version=metadata_version
)
meta_partition = mp.store_dataframes(
store=store,
df_serializer=ParquetSerializer(),
dataset_uuid="dataset_uuid",
store_metadata=True,
metadata_storage_format=metadata_storage_format,
)
assert len(meta_partition.data) == 0
expected_key = "dataset_uuid/core/test_label.parquet"
assert meta_partition.files == {"core": expected_key}
assert meta_partition.label == "test_label"
files_in_store = list(store.keys())
expected_num_files = 1
assert len(files_in_store) == expected_num_files
stored_df = DataFrameSerializer.restore_dataframe(store=store, key=expected_key)
pdt.assert_frame_equal(df, stored_df)
files_in_store.remove(expected_key)
assert len(files_in_store) == expected_num_files - 1
def test_store_single_dataframe_as_partition_no_metadata(store, metadata_version):
df = pd.DataFrame(
{"P": np.arange(0, 10), "L": np.arange(0, 10), "TARGET": np.arange(10, 20)}
)
mp = MetaPartition(
label="test_label", data={"core": df}, metadata_version=metadata_version
)
partition = mp.store_dataframes(
store=store,
df_serializer=ParquetSerializer(),
dataset_uuid="dataset_uuid",
store_metadata=False,
)
assert len(partition.data) == 0
expected_file = "dataset_uuid/core/test_label.parquet"
assert partition.files == {"core": expected_file}
assert partition.label == "test_label"
# One meta one actual file
files_in_store = list(store.keys())
assert len(files_in_store) == 1
stored_df = DataFrameSerializer.restore_dataframe(store=store, key=expected_file)
pdt.assert_frame_equal(df, stored_df)
def test_load_dataframe_logical_conjunction(
store, meta_partitions_files_only, metadata_version, metadata_storage_format
):
df = pd.DataFrame(
{"P": np.arange(0, 10), "L": np.arange(0, 10), "TARGET": np.arange(10, 20)}
)
mp = MetaPartition(
label="cluster_1",
data={"core": df},
metadata_version=metadata_version,
logical_conjunction=[("P", ">", 4)],
)
meta_partition = mp.store_dataframes(
store=store,
df_serializer=None,
dataset_uuid="dataset_uuid",
store_metadata=True,
metadata_storage_format=metadata_storage_format,
)
predicates = None
loaded_mp = meta_partition.load_dataframes(store=store, predicates=predicates)
data = {
"core": pd.DataFrame(
{"P": [5, 6, 7, 8, 9], "L": [5, 6, 7, 8, 9], "TARGET": [15, 16, 17, 18, 19]}
).set_index(np.arange(5, 10))
}
pdt.assert_frame_equal(loaded_mp.data["core"], data["core"])
predicates = [[("L", ">", 6), ("TARGET", "<", 18)]]
loaded_mp = meta_partition.load_dataframes(store=store, predicates=predicates)
data = {
"core": pd.DataFrame({"P": [7], "L": [7], "TARGET": [17]}).set_index(
np.array([7])
)
}
pdt.assert_frame_equal(loaded_mp.data["core"], data["core"])
predicates = [[("L", ">", 2), ("TARGET", "<", 17)], [("TARGET", "==", 19)]]
loaded_mp = meta_partition.load_dataframes(store=store, predicates=predicates)
data = {
"core": pd.DataFrame(
{"P": [5, 6, 9], "L": [5, 6, 9], "TARGET": [15, 16, 19]}
).set_index(np.array([5, 6, 9]))
}
pdt.assert_frame_equal(loaded_mp.data["core"], data["core"])
def test_store_multiple_dataframes_as_partition(
store, metadata_storage_format, metadata_version
):
df = pd.DataFrame(
{"P": np.arange(0, 10), "L": np.arange(0, 10), "TARGET": np.arange(10, 20)}
)
df_2 = pd.DataFrame({"P": np.arange(0, 10), "info": string.ascii_lowercase[:10]})
mp = MetaPartition(
label="cluster_1",
data={"core": df, "helper": df_2},
metadata_version=metadata_version,
)
meta_partition = mp.store_dataframes(
store=store,
df_serializer=None,
dataset_uuid="dataset_uuid",
store_metadata=True,
metadata_storage_format=metadata_storage_format,
)
expected_file = "dataset_uuid/core/cluster_1.parquet"
expected_file_helper = "dataset_uuid/helper/cluster_1.parquet"
assert meta_partition.files == {
"core": expected_file,
"helper": expected_file_helper,
}
assert meta_partition.label == "cluster_1"
files_in_store = list(store.keys())
assert len(files_in_store) == 2
stored_df = DataFrameSerializer.restore_dataframe(store=store, key=expected_file)
pdt.assert_frame_equal(df, stored_df)
files_in_store.remove(expected_file)
stored_df = DataFrameSerializer.restore_dataframe(
store=store, key=expected_file_helper
)
pdt.assert_frame_equal(df_2, stored_df)
files_in_store.remove(expected_file_helper)
@pytest.mark.parametrize("predicate_pushdown_to_io", [True, False])
def test_load_dataframes(
meta_partitions_files_only, store_session, predicate_pushdown_to_io
):
expected_df = pd.DataFrame(
OrderedDict(
[
("P", [1]),
("L", [1]),
("TARGET", [1]),
("DATE", pd.to_datetime([date(2010, 1, 1)])),
]
)
)
expected_df_2 = pd.DataFrame(OrderedDict([("P", [1]), ("info", ["a"])]))
mp = meta_partitions_files_only[0]
assert len(mp.files) > 0
assert len(mp.data) == 0
mp = meta_partitions_files_only[0].load_dataframes(
store=store_session, predicate_pushdown_to_io=predicate_pushdown_to_io
)
assert len(mp.data) == 2
data = mp.data
pdt.assert_frame_equal(data["core"], expected_df, check_dtype=False)
pdt.assert_frame_equal(data["helper"], expected_df_2, check_dtype=False)
empty_mp = MetaPartition("empty_mp", metadata_version=mp.metadata_version)
empty_mp.load_dataframes(
store_session, predicate_pushdown_to_io=predicate_pushdown_to_io
)
assert empty_mp.data == {}
def test_remove_dataframes(meta_partitions_files_only, store_session):
mp = meta_partitions_files_only[0].load_dataframes(store=store_session)
assert len(mp.data) == 2
mp = mp.remove_dataframes()
assert mp.data == {}
def test_load_dataframes_selective(meta_partitions_files_only, store_session):
expected_df = pd.DataFrame(
OrderedDict(
[
("P", [1]),
("L", [1]),
("TARGET", [1]),
("DATE", pd.to_datetime([date(2010, 1, 1)])),
]
)
)
mp = meta_partitions_files_only[0]
assert len(mp.files) > 0
assert len(mp.data) == 0
mp = meta_partitions_files_only[0].load_dataframes(
store=store_session, tables=["core"]
)
assert len(mp.data) == 1
data = mp.data
pdt.assert_frame_equal(data["core"], expected_df, check_dtype=False)
def test_load_dataframes_columns_projection(
meta_partitions_evaluation_files_only, store_session
):
expected_df = pd.DataFrame(OrderedDict([("P", [1]), ("L", [1]), ("HORIZON", [1])]))
mp = meta_partitions_evaluation_files_only[0]
assert len(mp.files) > 0
assert len(mp.data) == 0
mp = meta_partitions_evaluation_files_only[0].load_dataframes(
store=store_session, tables=["PRED"], columns={"PRED": ["P", "L", "HORIZON"]}
)
assert len(mp.data) == 1
data = mp.data
pdt.assert_frame_equal(data["PRED"], expected_df, check_dtype=False)
def test_load_dataframes_columns_raises_missing(
meta_partitions_evaluation_files_only, store_session
):
mp = meta_partitions_evaluation_files_only[0]
assert len(mp.files) > 0
assert len(mp.data) == 0
with pytest.raises(ValueError) as e:
meta_partitions_evaluation_files_only[0].load_dataframes(
store=store_session,
tables=["PRED"],
columns={"PRED": ["P", "L", "HORIZON", "foo", "bar"]},
)
assert str(e.value) == "Columns cannot be found in stored dataframe: bar, foo"
def test_load_dataframes_columns_table_missing(
meta_partitions_evaluation_files_only, store_session
):
# test behavior of load_dataframes for columns argument given
# specifying table that doesn't exist
mp = meta_partitions_evaluation_files_only[0]
assert len(mp.files) > 0
assert len(mp.data) == 0
with pytest.raises(
ValueError,
match=r"You are trying to read columns from invalid table\(s\). .*PRED_typo.*",
):
mp.load_dataframes(
store=store_session,
columns={"PRED_typo": ["P", "L", "HORIZON", "foo", "bar"]},
)
# ensure typo in tables argument doesn't raise, as specified in docstring
dfs = mp.load_dataframes(store=store_session, tables=["PRED_typo"])
assert len(dfs) > 0
def test_from_dict():
df = pd.DataFrame({"a": [1]})
dct = {"data": {"core": df}, "label": "test_label"}
meta_partition = MetaPartition.from_dict(dct)
pdt.assert_frame_equal(meta_partition.data["core"], df)
assert meta_partition.metadata_version == DEFAULT_METADATA_VERSION
def test_eq():
df = pd.DataFrame({"a": [1]})
df_same = pd.DataFrame({"a": [1]})
df_other = pd.DataFrame({"a": [2]})
df_diff_col = pd.DataFrame({"b": [1]})
df_diff_type = pd.DataFrame({"b": [1.0]})
meta_partition = MetaPartition.from_dict(
{"label": "test_label", "data": {"core": df}}
)
assert meta_partition == meta_partition
meta_partition_same = MetaPartition.from_dict(
{"label": "test_label", "data": {"core": df_same}}
)
assert meta_partition == meta_partition_same
meta_partition_diff_label = MetaPartition.from_dict(
{"label": "another_label", "data": {"core": df}}
)
assert meta_partition != meta_partition_diff_label
assert meta_partition_diff_label != meta_partition
meta_partition_diff_files = MetaPartition.from_dict(
{"label": "another_label", "data": {"core": df}, "files": {"core": "something"}}
)
assert meta_partition != meta_partition_diff_files
assert meta_partition_diff_files != meta_partition
meta_partition_diff_col = MetaPartition.from_dict(
{"label": "test_label", "data": {"core": df_diff_col}}
)
assert meta_partition != meta_partition_diff_col
assert meta_partition_diff_col != meta_partition
meta_partition_diff_type = MetaPartition.from_dict(
{"label": "test_label", "data": {"core": df_diff_type}}
)
assert meta_partition != meta_partition_diff_type
assert meta_partition_diff_type != meta_partition
meta_partition_diff_metadata = MetaPartition.from_dict(
{
"label": "test_label",
"data": {"core": df_diff_type},
"dataset_metadata": {"some": "metadata"},
}
)
assert meta_partition != meta_partition_diff_metadata
assert meta_partition_diff_metadata != meta_partition
meta_partition_different_df = MetaPartition.from_dict(
{"label": "test_label", "data": {"core": df_other}}
)
assert not meta_partition == meta_partition_different_df
meta_partition_different_label = MetaPartition.from_dict(
{"label": "test_label", "data": {"not_core": df_same}}
)
assert not meta_partition == meta_partition_different_label
meta_partition_empty_data = MetaPartition.from_dict(
{"label": "test_label", "data": {}}
)
assert meta_partition_empty_data == meta_partition_empty_data
meta_partition_more_data = MetaPartition.from_dict(
{"label": "test_label", "data": {"core": df, "not_core": df}}
)
assert not (meta_partition == meta_partition_more_data)
assert not meta_partition == "abc"
def test_add_nested_to_plain():
mp = MetaPartition(
label="label_1",
files={"core": "file"},
data={"core": pd.DataFrame({"test": [1, 2, 3]})},
indices={"test": [1, 2, 3]},
dataset_metadata={"dataset": "metadata"},
)
to_nest = [
MetaPartition(
label="label_2",
data={"core": pd.DataFrame({"test": [4, 5, 6]})},
indices={"test": [4, 5, 6]},
),
MetaPartition(
label="label_22",
data={"core": pd.DataFrame({"test": [4, 5, 6]})},
indices={"test": [4, 5, 6]},
),
]
mp_nested = to_nest[0].add_metapartition(to_nest[1])
mp_add_nested = mp.add_metapartition(mp_nested)
mp_iter = mp.add_metapartition(to_nest[0]).add_metapartition(to_nest[1])
assert mp_add_nested == mp_iter
def test_add_nested_to_nested():
mps1 = [
MetaPartition(
label="label_1",
files={"core": "file"},
data={"core": pd.DataFrame({"test": [1, 2, 3]})},
indices={"test": [1, 2, 3]},
dataset_metadata={"dataset": "metadata"},
),
MetaPartition(
label="label_33",
files={"core": "file"},
data={"core": pd.DataFrame({"test": [1, 2, 3]})},
indices={"test": [1, 2, 3]},
dataset_metadata={"dataset": "metadata"},
),
]
mpn_1 = mps1[0].add_metapartition(mps1[1])
mps2 = [
MetaPartition(
label="label_2",
data={"core": pd.DataFrame({"test": [4, 5, 6]})},
indices={"test": [4, 5, 6]},
),
MetaPartition(
label="label_22",
data={"core": pd.DataFrame({"test": [4, 5, 6]})},
indices={"test": [4, 5, 6]},
),
]
mpn_2 = mps2[0].add_metapartition(mps2[1])
mp_nested_merge = mpn_1.add_metapartition(mpn_2)
mp_iter = mps1.pop()
for mp_ in [*mps1, *mps2]:
mp_iter = mp_iter.add_metapartition(mp_)
assert mp_nested_merge == mp_iter
def test_eq_nested():
mp_1 = MetaPartition(
label="label_1",
files={"core": "file"},
data={"core": pd.DataFrame({"test": [1, 2, 3]})},
indices={"test": [1, 2, 3]},
dataset_metadata={"dataset": "metadata"},
)
mp_2 = MetaPartition(
label="label_2",
data={"core": pd.DataFrame({"test": [4, 5, 6]})},
indices={"test": [4, 5, 6]},
)
mp = mp_1.add_metapartition(mp_2)
assert mp == mp
assert mp != mp_2
assert mp_2 != mp
mp_other = MetaPartition(
label="label_3", data={"core": pd.DataFrame({"test": [4, 5, 6]})}
)
mp_other = mp_1.add_metapartition(mp_other)
assert mp != mp_other
assert mp_other != mp
def test_nested_incompatible_meta():
mp = MetaPartition(
label="label_1",
data={"core": pd.DataFrame({"test": np.array([1, 2, 3], dtype=np.int8)})},
metadata_version=4,
)
mp_2 = MetaPartition(
label="label_2",
data={"core": pd.DataFrame({"test": np.array([4, 5, 6], dtype=np.float64)})},
metadata_version=4,
)
with pytest.raises(ValueError):
mp.add_metapartition(mp_2)
def test_concatenate_no_change():
input_dct = {
"first_0": pd.DataFrame({"A": [1], "B": [1]}),
"second": pd.DataFrame({"A": [3], "B": [3], "C": [3]}),
}
dct = {"label": "test_label", "data": input_dct}
meta_partition = MetaPartition.from_dict(dct)
result = meta_partition.concat_dataframes()
assert result == meta_partition
def test_concatenate_identical_col_df():
input_dct = {
"first_0": pd.DataFrame({"A": [1], "B": [1]}),
"first_1": pd.DataFrame({"A": [2], "B": [2]}),
"second": | pd.DataFrame({"A": [3], "B": [3], "C": [3]}) | pandas.DataFrame |
# @package mktDataAnalysis
# mktDataAnalysis class in charge of creating the needed indicators using the market data
# @author <NAME>
import sys
sys.path.insert(0, r'')
import json
import os
from tradingBot.src.utils.exceptions import BadKwargs, SymbolNotSupported
import datetime
import numpy as np
import matplotlib.pyplot as plt
import pandas as pd
import random
pd.options.mode.chained_assignment = None
class mktDataAnalysis():
## mktDataAnalysis
# @class mktDataAnalysis
paths = {"mainPath": "tradingBot/dataBase/{}/{}", "subPaths": [
{"id": "indicators", "subPath": "/indicators"},
{"id": "intervals", "subPath": "/intervals"}
]}
def __init__(self, coin=None, pair=None, coinBotObj=None):
self.coinBotObj = coinBotObj
self.dBIntervals = coinBotObj.tmfrmVar
mainPath = self.paths['mainPath'].format(coin, pair)
self.indicPath = mainPath + self.paths['subPaths'][0]["subPath"]
self.DBPath = mainPath + self.paths['subPaths'][1]["subPath"]
self.rptIndic = {}
self.indicIntervals = []
self.getIndInterval = []
self.coin = coin
self.pair = pair
#This section will be deleted in future
if not os.path.exists(self.indicPath):
os.makedirs(self.indicPath)
#TODO ACCESS THE ACTUALIZED DB FROM CB
#for nameDB in self.dBIntervals:
# setattr(self, nameDB, getattr(self.coinBotObj, nameDB))
#TODO IF WE ELIMINATE ALL INDICATORS THEN WHY WE OPEN THEM HERE.
#self.openInd()
#self.delAllIndicator()
def newIndicator(self, indicator=None, period=None, interval=None):
# @fn newIndicator
# @brief Adds a new indicator
# @param indicator The name of the indicator. Send string (e.g., "WMA")
# @param period Period at which the indicador will be working. Send int (e.g., 100)
# @param interval Interval of the analisis. Send tuple with int and string. (e.g, (1, "hour"))
# @exception False If indicator is already created.
if not isinstance(period, int):
return False
interval = self._getIntvl(timeframe=interval)
id = str(period) + indicator + interval
for indicFiles in self.getIndInterval:
indicInterval = getattr(self, indicFiles["indicator_int"])
for line in indicInterval['indicators']:
if line["id"] == id:
self.rptIndic[id] = self.rptIndic[id] + 1 if id in self.rptIndic else 1
return False
"""
if indicator == "RSI" and indicator == \
line['indicator'] and interval == line['interval']:
return False
"""
if not self.checkIntDB(interval=interval):
return False
newInd = {
"indicator": indicator,
"interval": interval,
"period": period,
"id": id,
"start": 0,
"end": 0,
"data": []
}
newInd['data'] = self.actlIndData(indicator=indicator, period=period, interval=interval,\
start=None, end=None, int_unix=None)
if interval not in self.indicIntervals:
self.indicIntervals.append(interval)
if not newInd['data']:
return False
newInd['start'] = newInd['data'][0]['timestamp']
newInd['end'] = newInd['data'][-1]['timestamp']
indic = getattr(self, "indic_" + interval)
indic["indicators"].append(newInd)
setattr(self, "indic_" + interval, indic)
def delIndicator(self, id=None):
# @fn delIndicator
# @brief Delates one indicator
# @param id ID of the indicator. Send string (e.g., "80SMA1h")
for indicFiles in self.getIndInterval:
newInd = {"indicators": []}
indicInterval = getattr(self, indicFiles["indicator_int"])
for line in indicInterval['indicators']:
if line["id"] == id:
if id in self.rptIndic:
self.rptIndic[id] = self.rptIndic[id] - 1
if self.rptIndic[id] >= 0:
newInd["indicators"].append(line)
else:
self.rptIndic[id] = -1
if not line["id"] == id:
newInd["indicators"].append(line)
setattr(self, indicFiles["indicator_int"], newInd)
def delAllIndicator(self):
# @fn delAllIndicator
# @brief Delates all indicators that we currently have.
for indicFiles in self.getIndInterval:
newInd = {"indicators": []}
setattr(self, indicFiles["indicator_int"], newInd)
def actlIndicators(self):
# @fn actlIndicators
# @brief Actualize all indicators from all intervals
for indicFiles in self.getIndInterval:
newInd = {"indicators": []}
indicInterval = getattr(self, indicFiles["indicator_int"])
for line in indicInterval['indicators']:
info = {
"indicator": line['indicator'],
"interval": line['interval'],
"period": line['period'],
"id": line['id'],
"start": line['data'][0]['timestamp'],
"end": line['data'][-1]['timestamp'],
"data": line['data']
}
int_unix = info['data'][1]['timestamp'] - info['data'][0]['timestamp']
newData = self.actlIndData(indicator=info['indicator'], period=info['period'],\
interval=info['interval'], start=info['start'], end=info['end'], int_unix=int_unix)
if newData[0]['timestamp'] == info['end']:
info['data'][-1] = newData[0]
else:
del info['data'][0:len(newData)]
info['data'] += newData
info['start'] = info['data'][0]['timestamp']
info['end'] = info['data'][-1]['timestamp']
newInd["indicators"].append(info)
setattr(self, indicFiles["indicator_int"], newInd)
def actlIndData(self, indicator=None, period=None, interval=None, start=None, end=None, int_unix=None):
# @fn actlIndicators
# @brief Send the indicator to be actualized to its respective function.
# @param indicator The name of the indicator. Send string (e.g., "WMA")
# @param period Period at which the indicador will be working. Send int (e.g., 100)
# @param interval Interval of the analisis. Send tuple with int and string. (e.g, (1, "hour"))
# @param start Start of indicator in unix format. Send int (e.g., 1616032800000)
# @param end End of indicator in unix format. Send int (e.g., 1618146000000)
# @param int_unix Interval of indicator expreses in unix format. Send int (e.g., 3600000)
# @return data Data actualized in array format with json's as values.
if "EMA" == indicator:
data = self.indEMA(period=period, interval=interval, start=start, end=end, int_unix=int_unix)
elif "RSI" == indicator:
data = self.indRSI(period=period, interval=interval, start=start, end=end, int_unix=int_unix)
elif "SMA" == indicator:
data = self.indSMA(period=period, interval=interval, start=start, end=end, int_unix=int_unix)
elif "WMA" == indicator:
data = self.indWMA(period=period, interval=interval, start=start, end=end, int_unix=int_unix)
elif "ATR" == indicator:
data = self.indATR(period=period, interval=interval, start=start, end=end, int_unix=int_unix)
elif "BB" == indicator:
data = self.indBB(period=period, interval=interval, start=start, end=end, int_unix=int_unix)
else:
return False
return data
def viewIndicators(self):
# @fn viewIndicators
# @brief View all indicators
# @return data All indicators in command line
indica = {"indicators": []}
for indicFiles in self.getIndInterval:
indicInterval = getattr(self, indicFiles["indicator_int"])
for line in indicInterval['indicators']:
newInd = {
"indicator": line['indicator'],
"interval": line['interval'],
"period": line['period'],
"id": line['id'],
}
indica["indicators"].append(newInd)
data = pd.DataFrame.from_dict(indica['indicators'], orient='columns')
data = data.sort_values(by=['interval', 'indicator', 'period'])
data = data.reindex(columns=['interval', 'indicator', 'period', 'id'])
print(data.to_string(index=False))
def indRSI(self, period=None, interval=None, start=None, end=None, int_unix=None):
# @fn indRSI
# @brief Actualize each indicator depending on all variables.
# @param period Period at which the indicador will be working. Send int (e.g., 100)
# @param interval Interval of the analisis. Send tuple with int and string. (e.g, (1, "hour"))
# @param start Start of indicator in unix format. Send int (e.g., 1616032800000)
# @param end End of indicator in unix format. Send int (e.g., 1618146000000)
# @param int_unix Interval of indicator expreses in unix format. Send int (e.g., 3600000)
# @return actData Data actualized in array format with json's as values.
def calcData(data=None, kLines=None):
# @fn calcData
# @brief
# @param data
# @param kLines
# @exception
# @return actData
delta = data['close'].diff(1)
delta.dropna(inplace=True)
positive = delta.copy()
negative = delta.copy()
positive[positive < 0] = 0
negative[negative > 0] = 0
average_gain = positive.rolling(window=period).mean()
average_loss = abs(negative.rolling(window=period).mean())
relative_strength = average_gain / average_loss
rsi = 100.0 - (100.0 / (1.0 + relative_strength))
actData = pd.DataFrame()
actData['timestamp'] = data['timestamp']
actData['value'] = rsi
actData = json.loads(actData.to_json(orient="records"))
return actData[kLines:]
data = getattr(self.coinBotObj, interval)
data = pd.DataFrame.from_dict(data, orient='columns')
data["close"] = pd.to_numeric(data["close"])
startDB = data.iloc[0]['timestamp']
endDB = data.iloc[-1]['timestamp']
if int_unix == None or end > data.iloc[-1]['timestamp']:
actData = calcData(data=data, kLines=0)
elif end == endDB:
actData = calcData(data=data[-(period + 1):], kLines=-1)
else:
opData, kLines = self.checkLen(period=period, end=end, endDB=endDB, int_unix=int_unix)
actData = calcData(data=data[opData:], kLines=kLines)
return actData
def indEMA(self, period=None, interval=None, start=None, end=None, int_unix=None):
# @fn indEMA
# @brief Actualize each indicator depending on all variables.
# @param period Period at which the indicador will be working. Send int (e.g., 100)
# @param interval Interval of the analisis. Send tuple with int and string. (e.g, (1, "hour"))
# @param start Start of indicator in unix format. Send int (e.g., 1616032800000)
# @param end End of indicator in unix format. Send int (e.g., 1618146000000)
# @param int_unix Interval of indicator expreses in unix format. Send int (e.g., 3600000)
# @return actData Data actualized in array format with json's as values.
def calcData(data=None, kLines=None):
# @fn calcData
# @brief
# @param data
# @param kLines
# @exception
# @return actData
ema = data['close'].ewm(span=period, adjust=False).mean()
actData = pd.DataFrame()
actData['timestamp'] = data['timestamp']
actData['value'] = ema
actData = json.loads(actData.to_json(orient="records"))
return actData[kLines:]
data = getattr(self.coinBotObj, interval)
print("My data intvl {} for latest candle: {}".format(interval, data[-1]['timestamp']))
data = | pd.DataFrame.from_dict(data, orient='columns') | pandas.DataFrame.from_dict |
from airflow import DAG
from airflow.operators.dummy_operator import DummyOperator
from airflow.operators.python_operator import PythonOperator
from airflow.utils import timezone
import pandas
default_args = {
'owner': 'ODDS',
}
dag = DAG('product_price_pipeline',
default_args=default_args,
start_date=timezone.datetime(2020, 8, 1),
catchup=False)
start = DummyOperator(task_id='start', dag=dag)
def get_product_upc_and_description():
df = pandas.read_csv('products-lookup-table.csv', header=1)
new_df = df[['UPC', 'DESCRIPTION']]
new_df.to_csv('product_upc_and_description.csv', index=False)
get_product_upc_and_description = PythonOperator(
task_id='get_product_upc_and_description',
python_callable=get_product_upc_and_description,
dag=dag
)
def get_upc_and_price():
df = pandas.read_csv('transaction-data-table.csv', header=1)
new_df = df[['UPC', 'PRICE']]
new_df.to_csv('upc_and_price.csv', index=False)
get_upc_and_price = PythonOperator(
task_id='get_upc_and_price',
python_callable=get_upc_and_price,
dag=dag
)
def merge():
df_product_description = | pandas.read_csv('product_upc_and_description.csv') | pandas.read_csv |
#
# Pyserini: Reproducible IR research with sparse and dense representations
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import argparse
import pandas as pd
from tqdm import tqdm
from pyserini.query_iterator import get_query_iterator, TopicsFormat
from transformers import BertModel, BertTokenizerFast
import torch
class DkrrDprQueryEncoder():
def __init__(self, encoder: str = None, device: str = 'cpu', prefix: str = "question:"):
self.device = device
self.model = BertModel.from_pretrained(encoder)
self.model.to(self.device)
self.tokenizer = BertTokenizerFast.from_pretrained("bert-base-uncased")
self.prefix = prefix
@staticmethod
def _mean_pooling(model_output, attention_mask):
model_output = model_output[0].masked_fill(1 - attention_mask[:, :, None], 0.)
model_output = torch.sum(model_output, dim=1) / torch.clamp(torch.sum(attention_mask, dim=1), min=1e-9)[:, None]
return model_output.flatten()
def encode(self, query: str):
if self.prefix:
query = f'{self.prefix} {query}'
inputs = self.tokenizer(query, return_tensors='pt', max_length=40, padding="max_length")
inputs.to(self.device)
outputs = self.model(input_ids=inputs["input_ids"],
attention_mask=inputs["attention_mask"])
embeddings = self._mean_pooling(outputs, inputs['attention_mask']).detach().cpu().numpy()
return embeddings.flatten()
if __name__ == '__main__':
parser = argparse.ArgumentParser()
parser.add_argument('--topics', type=str, metavar='topic_name', required=True,
help="Name of topics.")
parser.add_argument('--encoder', type=str, help='encoder name or path',
default='facebook/dpr-question_encoder-multiset-base', required=False)
parser.add_argument('--output', type=str, help='path to store query embeddings', required=True)
parser.add_argument('--device', type=str,
help='device cpu or cuda [cuda:0, cuda:1...]', default='cpu', required=False)
args = parser.parse_args()
query_iterator = get_query_iterator(args.topics, TopicsFormat(TopicsFormat.DEFAULT.value))
topics = query_iterator.topics
encoder = DkrrDprQueryEncoder(args.encoder, args.device)
embeddings = {'id': [], 'text': [], 'embedding': []}
for index, (topic_id, text) in enumerate(tqdm(query_iterator, total=len(topics.keys()))):
embeddings['id'].append(topic_id)
embeddings['text'].append(text)
embeddings['embedding'].append(encoder.encode(text))
embeddings = | pd.DataFrame(embeddings) | pandas.DataFrame |
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Wed Feb 5 00:04:41 2020
@author: shashanknigam
web parser for amazon:
Things to be extracted: 1. Title of the product span id = "productTitle"
2. Number of rating : span id = acrCustomerReviewText
3. Average rating given:span class a-icon-alt
4. Description: div id = featurebullets_feature_div.text
5. Product description: heading description format h3:a-spacing-mini :- neighboring text p class="a-spacing-base"
6. Other features if any h4 class="a-spacing-mini" p : afterwards.
-- later consideration 6.5: Comparison id=HLCXComparisonTable
item heading: tr class="comparison_table_image_row"
img.src :Name
class="a-row a-spacing-top-small"
7. Product information div id = "productDetails_detailBullets_sections1"
1. Product dimensions th label td value
2. Item weight
3. Shipping weight
4. Manufacturer
5. ASIN
6. Model Number
7. Customer reviews
8. Best sellers rank
9. Warantee if any
8. Question answers: div =class="a-section a-spacing-none askBtfTopQuestionsContainer" ; span class = "a-text-bold" next sibling id (class="a-declarative")the child question next span class= askLongText class="a-color-tertiary a-nowrap" for r the next teritory wrap
9. Customer reviews: all if possible : - class="cr-lighthouse-term " (terms)
1. data-hook="review-star-rating" user rating
2. data-hook="review-title"
3. class="a-row a-spacing-small review-data" detailed review
4. data-hook="see-all-reviews-link-foot"
5. class="a-last"
10. Price: span id = priceblock_ourprice
Hanumanji
a-section celwidget
cr-dp-lighthut
["a-fixed-left-grid","a-spacing-base"]
['a-fixed-left-grid-col', 'a-col-right']
reviews-medley-footer
id="cr-dp-desktop-lighthut"
["a-fixed-right-grid-col","cm_cr_grid_center_right"]
"""
"""
Getting each details out:
"""
from selenium import webdriver
import time
from bs4 import BeautifulSoup as soup
import bs4
import sys
import traceback
import numpy as np
import pandas as pd
import gc
product_dict={"ASIN":[],"Name":[]}
productDetails = {"ASIN":[],"Name":[],"Average Rating":[],"TotalRating":[],"Price":[],"Features":[]}
Description = {"ASIN":[],"ShortDescription":[],"LongDescription":[]}
productReview = {"ASIN":[],"Date":[],"Rating":[],"Title":[],"Detail":[]}
productQA = {"ASIN":[],"Question":[],"Answer":[]}
productInformation={"ASIN":[]} #Rest of the fields are optional
productRating={"ASIN":[],"5":[],"4":[],"3":[],"2":[],"1":[]}
ASIN=""
failed = []
#QA= {"Question":[],"Answers":[],"ASIN":[]}
#customerReviews = {"ASIN":[],"UserRating":[],"Title":[],"detailedReview":[]}
pages=0
driver = 0
ASIN_LIST = []
def initASIN_LIST():
global ASIN_LIST
df = pd.read_excel('/Users/shashanknigam/downloads/nlp_project/shopBot/webscraping/AmazonDataSet/ProductDictionary.xlsx')
ASIN_LIST = list(df['ASIN'])
def readWebpage(url,driver_not_in_use=-1):
try:
global pages
global driver
driver = np.random.randint(0,2)
while driver==driver_not_in_use:
driver = np.random.randint(0,2)
if driver ==0:
browser = webdriver.Safari()
elif driver==1:
browser = webdriver.Chrome('/Users/shashanknigam/Downloads/Beautiful Soup/chromedriver')
#elif driver==2:
# browser=webdriver.Firefox('/Users/shashanknigam/Downloads/Beautiful Soup/')
browser.get(url)
contents = browser.page_source
#time.sleep(1)
browser.close()
del browser
return contents
except:
try:
driver = np.random.randint(0,2)
if driver ==0:
browser = webdriver.Safari()
elif driver==1:
browser = webdriver.Chrome('/Users/shashanknigam/Downloads/Beautiful Soup/chromedriver')
#elif driver==2:
# browser=webdriver.Firefox('/Users/shashanknigam/Downloads/Beautiful Soup/')
browser.get(url)
browser.close()
del browser
return contents
except:
print(sys.exc_info())
print(traceback.format_exc())
return None
#time.sleep(10)
def getSoup(url):
global driver
w = readWebpage(url)
if w is not None:
s = soup(w,'html.parser')
while "Robot Check" in s.text:
w = readWebpage(url,driver)
s = soup(w,'html.parser')
else:
s=None
return s
def get(s,tag,attr=None):
if attr is None:
return s.find_all(tag)
else:
#print("searching for attribute:"+attr)
tags = s.find_all(tag)
return [t for t in tags if attr in t.attrs.keys()]
def getNextSibling(tag):
while True:
if tag.next_sibling == '' or tag.next_sibling is None:
return None
elif tag.next_sibling in ['\n','\xa0'] or tag.next_sibling.name=='br':
tag = tag.next_sibling
else:
return tag.next_sibling
def getNextSiblingText(tag):
while True:
#print(tag)
if tag.next_sibling == '' or tag.next_sibling is None:
return ''
elif tag.next_sibling in ['\n','\xa0'] or tag.next_sibling.name=='br' or tag.next_sibling==' ':
tag = tag.next_sibling
else:
if isinstance(tag.next_sibling,bs4.element.Tag):
return tag.next_sibling.text
else:
return str(tag.next_sibling)
def parseQA(url,QA,ASIN):
s=getSoup(url)
if s is not None:
s_div = get(s,'div','class')
qa_div = [q for q in s_div if q['class']==['celwidget']]
if len(qa_div)>1:
qa_div = qa_div[1]
elif len(qa_div)==1:
qa_div = qa_div[0]
else:
qa_div=None
if qa_div is not None:
qa=get(qa_div,'div','class')
qa_inner = [q for q in qa if q['class']==['a-fixed-left-grid-col', 'a-col-right']]
#print("qa_inner",len(qa_inner))
for i in qa_inner:
qa_inner_temp=get(i,'div','class')
qa_inner_inner=[q for q in qa_inner_temp if q['class']==['a-fixed-left-grid-col', 'a-col-right']]
#print(len(qa_inner_inner))
if len(qa_inner_inner)>1:
QA['ASIN'].append(ASIN)
QA['Question'].append(qa_inner_inner[0].text.strip())
QA['Answer'].append(qa_inner_inner[1].span.text.strip())
#QA[qa_inner_inner[0].text.strip()]=qa_inner_inner[1].span.text.strip()
elif len(qa_inner_inner)==1:
#print(qa_inner_inner)
QA['ASIN'].append(ASIN)
QA['Question'].append(qa_inner_inner[0].text.strip())
QA['Answer'].append('')
#QA[qa_inner_inner[0].text.strip()]=''
li = get(s,'li','class')
li_last = [l for l in li if l['class']==['a-last']]
next_url = ""
if len(li_last)!=0:
if 'https://www.amazon.com/' not in li_last[0].a['href']:
next_url='https://www.amazon.com/'+li_last[0].a['href']
else:
next_url= li_last[0].a['href']
else:
next_url=""
s.decompose()
else:
next_url=""
return QA,next_url
def parseReview(url,review,ASIN):
#cm_cr-review_list
s=getSoup(url)
if s is not None:
s_div = get(s,'div','id')
div_reviews = [d for d in s_div if d['id']=="cm_cr-review_list"]
if len(div_reviews)>0:
div_reviews=div_reviews[0]
div_review = get(div_reviews,"div","data-hook")
div_r = [r for r in div_review if r['data-hook']=='review']
for i in div_r:
try:
rating_i = get(i,'i','data-hook')
rating = [r for r in rating_i if r['data-hook']=="review-star-rating"]
rating = rating[0].text.strip()
span_d = get(i,'span','data-hook')
date = [d for d in span_d if d['data-hook']=="review-date"]
date = date[0].text.strip()
review_t = get(i,'a','data-hook')
review_title=[t for t in review_t if t['data-hook']=="review-title"]
review_title = review_title[0].text.strip()
review_b=[b for b in span_d if b['data-hook']=="review-body"]
review_b = review_b[0].text.strip()
review["ASIN"].append(ASIN)
review["Rating"].append(rating)
review["Date"].append(date)
review["Title"].append(review_title)
review["Body"].append(review_b)
except:
print(sys.exc_info())
print(traceback.format_exc())
pass
li = get(s,'li','class')
next_url = [l for l in li if l['class']==["a-last"]]
if len(next_url)>0:
url ='https://www.amazon.com'+next_url[0].a['href']
else:
print("Error")
url=None
else:
url=None
s.decompose()
else:
url=None
#span
# data-hook = "review-date"
# i data-hook "review-star-rating"
# span data-hook "review-title"
#a-section review aok-relative
return url,review
def appendExcel(filename,df1):
df = | pd.read_excel(filename,index_col=0) | pandas.read_excel |
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""Retrieve and Process city neighbourhood data."""
# pylint: disable=invalid-name,dangerous-default-value
# pylint: disable=logging-fstring-interpolation
from typing import Dict
import geopandas as gpd
import pandas as pd
import pandera as pa
import prefect
import requests
neigh_demog_schema = pa.DataFrameSchema(
columns={
"name": pa.Column( | pd.StringDtype() | pandas.StringDtype |
import re
import itertools
import bokeh.plotting as plotting
from bokeh.models import Legend, ColumnDataSource, HoverTool, Div
import bokeh.transform as transform
import bokeh.layouts as layouts
from bokeh.palettes import Spectral6, Spectral11
import pandas as pd
def create_data_frame(data):
"""
Create a Bokeh ColumnDataSource from an Astropy Table containing results from
a navostats query.
Parameters
----------
data : astropy.table.Table
A table presumed to contain the results from a query on navostats.
In particular, the following columns must be present: location, start_time,
do_query_dur, stream_to_file_dur, num_rows, base_name, service_type, ra, dec, sr
Returns
-------
`~pandas.core.frame.DataFrame`
A Pandas data frame suitable for plotting most quantities.
"""
# Masked values in integer columns show up as <NA> when exported to Pandas.
# Such values seem to cause weird errors when displayed in bokeh, even when those rows
# are filtered out with dropna(). So convert the column to float, which results
# in masked values being NaN (np.nan) which are smoothly ignored by bokeh.
data["num_rows"] = data["num_rows"].astype(float)
data["size"] = data["size"].astype(float)
# This one gives us one value for the whole day which is something we can group on.
data['datestr'] = data['start_time'].astype('U10')
# Create the Pandas DataFrame, with start time officially being a datetime.
df = data["location", "start_time", "datestr", "do_query_dur", "query_total_dur", "stream_to_file_dur",
"size", "num_rows", "base_name", "service_type", "ra", "dec", "sr"].to_pandas().copy()
df["dt_start_time"] = pd.to_datetime(df["start_time"], format='%Y-%m-%d %H:%M:%S.%f')
# A datetime version of the date rounded to a whole day.
df["date"] = | pd.to_datetime(df["datestr"], format='%Y-%m-%d') | pandas.to_datetime |
from operator import mul
import sys
import matplotlib.pyplot as plt
import numpy as np
from holoviews import opts
from scipy.signal.ltisys import dfreqresp
from scipy.spatial import Voronoi
from sklearn.cluster import KMeans
from sklearn.preprocessing import StandardScaler
from sklearn.decomposition import PCA
from sklearn.metrics import silhouette_score
import pandas as pd
import ipywidgets as widgets
from ipywidgets import interact, interactive, fixed, interact_manual, Text, interactive_output
from ipywidgets import Button, HBox, VBox,Layout,Label
import panel as pn
import seaborn as sns
from kneed import KneeLocator
from PatientGraphPheno import *
from patientKG.config.bedrock_connection import *
#from patientKG import utils_pickle
import patientKG.utils_pickle
from holoviews.operation.datashader import datashade, bundle_graph
import holoviews as hv
from holoviews import opts
from datetime import datetime
import re
import plotly.graph_objects as go
from pivottablejs import pivot_ui
from IPython.display import display, HTML
from sklearn.feature_selection import VarianceThreshold
from sklearn import preprocessing
import urllib, json
sns.set(style="ticks")
hv.extension('bokeh')
defaults = dict(width=1000, height=1000, padding=0.1)
from patientKG.tests.test_graphs import *
from ipywidgets import TwoByTwoLayout
import itertools
import time
from IPython.display import IFrame
import json, io
from patientKG.priorKnowledge.Hb1AC import *
from patientKG.priorKnowledge.Albumin import *
from patientKG.priorKnowledge.FBC import *
from patientKG.priorKnowledge.Creactive import *
from scipy.stats import chi2_contingency
import scipy.stats as stats
def show_SpellHRG_HRG_Table(HRG,Degree,Readmit):
Degree_ReAdmitted_HRG = patientKG.utils_pickle.read("Degree_ReAdmitted_HRG")
return Degree_ReAdmitted_HRG.loc[(Degree_ReAdmitted_HRG['SpellHRG'] == HRG)
&(((Degree_ReAdmitted_HRG['Sum_Degree']>=Degree[0])&(Degree_ReAdmitted_HRG['Sum_Degree'] <=Degree[1])))
&(((Degree_ReAdmitted_HRG['ReAdmitted in DAYS']>=Readmit[0])&(Degree_ReAdmitted_HRG['ReAdmitted in DAYS'] <=Readmit[1])))]
#This below block is for Jupyter-Notebook
"""stats = interact(PatientGraphVisuExplore.show_SpellHRG_HRG_Table,
HRG=widgets.Dropdown(options=list(Degree_HRG['SpellHRG'].dropna().unique()))
,Degree=widgets.IntRangeSlider(value=[5,100],
min=0,
max=max(Degree_HRG['Sum_Degree']),
step=1,
description='Degree:',
disabled=False,
continuous_update=False,
orientation='horizontal',
readout=True,
readout_format='d')
,Readmit=widgets.IntRangeSlider(value=[-1,30],
min=-1,
max=30,
step=1,
description='ReAdmitted:',
disabled=False,
continuous_update=False,
orientation='horizontal',
readout=True,
readout_format='d'))"""
def plot_SpellHRG_Income_Scatter(HRG,Degree,Readmit):
Degree_ReAdmitted_HRG = utils_pickle.read("Degree_ReAdmitted_HRG")
data=Degree_ReAdmitted_HRG.loc[(Degree_ReAdmitted_HRG['SpellHRG'] == HRG)
&(((Degree_ReAdmitted_HRG['Sum_Degree']>=Degree[0])&(Degree_ReAdmitted_HRG['Sum_Degree'] <=Degree[1])))
&(((Degree_ReAdmitted_HRG['ReAdmitted in DAYS']>=Readmit[0])&(Degree_ReAdmitted_HRG['ReAdmitted in DAYS'] <=Readmit[1])))]
plt.scatter(data['Sum_Degree'], data['INCOME'], edgecolors='r')
"""
stats = interact(PatientGraphVisuExplore.plot_SpellHRG_Income_Scatter,
HRG=widgets.Dropdown(options=list(Degree_HRG['SpellHRG'].dropna().unique()))
,Degree=widgets.IntRangeSlider(value=[5,100],
min=0,
max=max(Degree_HRG['Sum_Degree']),
step=1,
description='Degree:',
disabled=False,
continuous_update=False,
orientation='horizontal',
readout=True,
readout_format='d')
,Readmit=widgets.IntRangeSlider(value=[-1,30],
min=-1,
max=30,
step=1,
description='ReAdmitted:',
disabled=False,
continuous_update=False,
orientation='horizontal',
readout=True,
readout_format='d'))"""
def plot_SpellHRG_LOS_Scatter(HRG,Degree,Readmit):
Degree_ReAdmitted_HRG = utils_pickle.read("Degree_ReAdmitted_HRG")
for item in HRG:
#print(item)
data=Degree_ReAdmitted_HRG.loc[(Degree_ReAdmitted_HRG['SpellHRG'] == item)
&(((Degree_ReAdmitted_HRG['Sum_Degree']>=Degree[0])&(Degree_ReAdmitted_HRG['Sum_Degree'] <=Degree[1])))
&(((Degree_ReAdmitted_HRG['ReAdmitted in DAYS']>=Readmit[0])&(Degree_ReAdmitted_HRG['ReAdmitted in DAYS'] <=Readmit[1])))]
plt.scatter(data['Sum_Degree'], data['Total_LOS'], edgecolors='r')
"""
stats = interact(PatientGraphVisuExplore.plot_SpellHRG_LOS_Scatter,
HRG=widgets.SelectMultiple(
options=list(Degree_HRG['SpellHRG'].dropna().unique()),
value=['WJ06E'],
#rows=10,
description='HRG',
disabled=False
)
#widgets.Dropdown(options=list(Degree_HRG['SpellHRG'].dropna().unique()))
,Degree=widgets.IntRangeSlider(value=[5,100],
min=0,
max=max(Degree_HRG['Sum_Degree']),
step=1,
description='Degree:',
disabled=False,
continuous_update=False,
orientation='horizontal',
readout=True,
readout_format='d')
,Readmit=widgets.IntRangeSlider(value=[-1,30],
min=-1,
max=30,
step=1,
description='ReAdmitted:',
disabled=False,
continuous_update=False,
orientation='horizontal',
readout=True,
readout_format='d'))"""
def plot_SpellHRG_Pairplot(HRG,Degree,Readmit):
df = pd.DataFrame()
Degree_ReAdmitted_HRG = utils_pickle.read("Degree_ReAdmitted_HRG")
for item in HRG:
#print(item)
data=Degree_ReAdmitted_HRG.loc[(Degree_ReAdmitted_HRG['SpellHRG'] == item)
&(((Degree_ReAdmitted_HRG['Sum_Degree']>=Degree[0])&(Degree_ReAdmitted_HRG['Sum_Degree'] <=Degree[1])))
&(((Degree_ReAdmitted_HRG['ReAdmitted in DAYS']>=Readmit[0])&(Degree_ReAdmitted_HRG['ReAdmitted in DAYS'] <=Readmit[1])))]
df = pd.concat([df,data])
sns.pairplot(df[df.columns.difference(['ACTIVITY_IDENTIFIER','POD_CODE','ReAdmitted in DAYS'])], hue="SpellHRG")
"""
stats = interact(PatientGraphVisuExplore.plot_SpellHRG_Pairplot,
HRG=widgets.SelectMultiple(
options=list(Degree_HRG['SpellHRG'].dropna().unique()),
value=['WJ06E'],
#rows=10,
description='HRG',
disabled=False
)
#widgets.Dropdown(options=list(Degree_HRG['SpellHRG'].dropna().unique()))
,Degree=widgets.IntRangeSlider(value=[5,100],
min=0,
max=max(Degree_HRG['Sum_Degree']),
step=1,
description='Degree:',
disabled=False,
continuous_update=False,
orientation='horizontal',
readout=True,
readout_format='d')
,Readmit=widgets.IntRangeSlider(value=[-1,30],
min=-1,
max=30,
step=1,
description='ReAdmitted:',
disabled=False,
continuous_update=False,
orientation='horizontal',
readout=True,
readout_format='d'))"""
def plot_SpellHRG_HRG_ICD(HRG,ICD,Degree,Readmit,POD):
df = pd.DataFrame()
Degree_ReAdmitted_HRG = utils_pickle.read("Degree_ReAdmitted_HRG")
for item in HRG:
#print(item)
if ICD == None:
data=Degree_ReAdmitted_HRG.loc[(Degree_ReAdmitted_HRG['SpellHRG'] == item)&(Degree_ReAdmitted_HRG['POD_CODE'] == POD)
&(((Degree_ReAdmitted_HRG['Sum_Degree']>=Degree[0])&(Degree_ReAdmitted_HRG['Sum_Degree'] <=Degree[1])))
&(((Degree_ReAdmitted_HRG['ReAdmitted in DAYS']>=Readmit[0])&(Degree_ReAdmitted_HRG['ReAdmitted in DAYS'] <=Readmit[1])))]
else:
data=Degree_ReAdmitted_HRG.loc[(Degree_ReAdmitted_HRG['SpellPDiag'] == ICD)&(Degree_ReAdmitted_HRG['SpellHRG'] == item)&(Degree_ReAdmitted_HRG['POD_CODE'] == POD)
&(((Degree_ReAdmitted_HRG['Sum_Degree']>=Degree[0])&(Degree_ReAdmitted_HRG['Sum_Degree'] <=Degree[1])))
&(((Degree_ReAdmitted_HRG['ReAdmitted in DAYS']>=Readmit[0])&(Degree_ReAdmitted_HRG['ReAdmitted in DAYS'] <=Readmit[1])))]
df = pd.concat([df,data])
features = ['Sum_Degree','Global_Central', 'Total_LOS', 'INCOME']#,'Turnaround_Degree','DIAG_COUNT']
# Separating out the features
x = df.loc[:, features].values
# Separating out the target
#y = test.loc[:,['target']].values
# Standardizing the features
x = StandardScaler().fit_transform(x)
pca = PCA(n_components=2)
principalComponents = pca.fit_transform(x)
principalDf = pd.DataFrame(data = principalComponents
, columns = ['principal component 1', 'principal component 2'])
#Voronoi at least four points, though clusters not less than 4
kmeans = KMeans(n_clusters=2)
#pair = ['INCOME','Total_LOS']
kmeans.fit(principalDf)
labels = kmeans.predict(principalDf)
centroids = kmeans.cluster_centers_
#print(centroids)
v = np.vstack([centroids,[0,0]])
#print(v)
vor = Voronoi(principalComponents)
# plot
regions, vertices = voronoi_finite_polygons_2d(vor)
fig = plt.figure(figsize=(10, 10))
colmap = {1: 'g', 2: 'r', 3: 'b', 4:'y'}
marker = {1:'circle', 2:'diamond', 3:'dot', 4:'triangle'}
size = {1:2,2:2,3:2,4:2}
colors = list(map(lambda x: colmap[x+1], labels))
markers = list(map(lambda x: marker[x+1], labels))
sizes = list(map(lambda x: size[x+1], labels))
#print(principalComponents)
df['principal component 1'] = principalComponents[:,0]
df['principal component 2'] = principalComponents[:,1]
df['color'] = colors
df['marker'] = markers
df['sizes'] = sizes
opts.defaults(opts.Points(padding=0.1, size=8, line_color='black'))
data ={'x':list(df['principal component 1'])
,'y':list(df['principal component 2'])
,'color':list(df['color'])
,'marker':list(df['marker'])
,'sizes':list(df['sizes'])}
#hv.Points(data, vdims=['color', 'marker', 'sizes']).opts(color='color', marker='marker', size='sizes')
plt.scatter(df['principal component 1'], df['principal component 2'], color=colors, alpha=0.5, edgecolor='k')
#for idx, centroid in enumerate(centroids):
#plt.scatter(*centroid, color=colmap[idx+1])
df['labels'] = labels
#print(list(df['labels'].unique()))
shape_ = {}
for item in list(df['labels'].unique()):
shape_.update({item:[(df[df['labels'] ==item].shape[0]),df[df['labels'] == item]['Sum_Degree'].mean()]})
print('Complex Degree:',df[df['labels'] == item]['Sum_Degree'].mean())
#print(shape_)
#print(sorted(shape_.items(), key=lambda x: x[1]))
minor_=sorted(shape_.items(), key=lambda x: x[1])[0][0]
major_=sorted(shape_.items(), key=lambda x: x[1])[1][0]
#sns.pairplot(df[df['labels'] ==1][df.columns.difference(['ACTIVITY_IDENTIFIER','POD_CODE'])], hue="SpellHRG")
#for label,x,y in zip(df[df['labels'] == minor_]['ACTIVITY_IDENTIFIER'],df[df['labels'] == minor_]['principal component 1'],df[df['labels'] == minor_]['principal component 2']):
for label,x,y in zip(df['ACTIVITY_IDENTIFIER'],df['principal component 1'],df['principal component 2']):
label = label
plt.annotate(label, (x,y),textcoords="offset points",xytext=(0,10),ha='center', size =20)
test=zip(regions, df['color'])
for item in test:
polygon = vertices[item[0]]
#print(region,polygon)
#print(*zip(*polygon))
plt.fill(*zip(*polygon), alpha=0.4
,color=item[1]
)
plt.xlim(vor.min_bound[0]-0.1, vor.max_bound[0]+0.1)
plt.ylim(vor.min_bound[1]-0.1, vor.max_bound[1]+0.1)
print('Minor Complex Degree:',df[df['labels'] == minor_]['Sum_Degree'].mean())
print('Major Complex Degree:',df[df['labels'] == major_]['Sum_Degree'].mean())
#df.loc[(df['POD_CODE'] == POD)]
return df[(df['POD_CODE'] == POD)][['ACTIVITY_IDENTIFIER','age','sex','SpellHRG']+features+ ['ReAdmitted in DAYS','POD_CODE','SpellPDiag','SpellSDiag']]#,'ALL_DIAG']]
"""
codes =list(Degree_ReAdmitted_HRG['SpellHRG'].unique())
cardi=['DZ31Z',
'EC21Z',
'EC22Z',
'EY50Z',
'EY51Z',
'EY01A',
'EY01B',
'EY02A',
'EY02B',
'EY11Z',
'EY12A',
'EY12B',
'EY13Z',
'EY16A',
'EY16B',
'EY17A',
'EY17B']
init_code = list(set(codes).intersection(cardi))
stats = interact(plot_SpellHRG_HRG_ICD,
HRG=widgets.SelectMultiple(
options=
init_code,
#list(Degree_HRG['SpellHRG'].dropna().unique()),
value=init_code,
#rows=10,
description='HRG',
disabled=False
),
ICD=widgets.Dropdown(
options=
#init_code,
sorted(list(Degree_HRG['SpellPDiag'].dropna().unique())),value=None
)
,POD=widgets.Dropdown(options=list(Degree_HRG['POD_CODE'].dropna().unique()))
,Degree=widgets.IntRangeSlider(value=[5,500],
min=0,
max=max(Degree_HRG['Sum_Degree']),
step=1,
description='Degree:',
disabled=False,
continuous_update=False,
orientation='horizontal',
readout=True,
readout_format='d')
,Readmit=widgets.IntRangeSlider(value=[-1,30],
min=-1,
max=30,
step=1,
description='ReAdmitted:',
disabled=False,
continuous_update=False,
orientation='horizontal',
readout=True,
readout_format='d'))"""
def plot_SpellHRG_ICD(ICD,Degree,Age,POD):
df = pd.DataFrame()
Degree_ReAdmitted_HRG = utils_pickle.read("Degree_ReAdmitted_HRG")
for item in ICD:
#print(item)
data=Degree_ReAdmitted_HRG.loc[(Degree_ReAdmitted_HRG['SpellPDiag'] == item)&(Degree_ReAdmitted_HRG['POD_CODE'] == POD)
&(((Degree_ReAdmitted_HRG['Sum_Degree']>=Degree[0])&(Degree_ReAdmitted_HRG['Sum_Degree'] <=Degree[1])))
&(((Degree_ReAdmitted_HRG['age'].astype(int)>=Age[0])&(Degree_ReAdmitted_HRG['age'].astype(int) <=Age[1])))]
df = pd.concat([df,data])
features = ['Sum_Degree','Global_Central', 'Total_LOS', 'INCOME']#,'Turnaround_Degree','DIAG_COUNT']
# Separating out the features
x = df.loc[:, features].values
# Separating out the target
#y = test.loc[:,['target']].values
# Standardizing the features
x = StandardScaler().fit_transform(x)
pca = PCA(n_components=2)
principalComponents = pca.fit_transform(x)
principalDf = pd.DataFrame(data = principalComponents
, columns = ['principal component 1', 'principal component 2'])
#Voronoi at least four points, though clusters not less than 4
kmeans = KMeans(n_clusters=2)
#pair = ['INCOME','Total_LOS']
kmeans.fit(principalDf)
labels = kmeans.predict(principalDf)
centroids = kmeans.cluster_centers_
#print(centroids)
v = np.vstack([centroids,[0,0]])
#print(v)
vor = Voronoi(principalComponents)
# plot
regions, vertices = voronoi_finite_polygons_2d(vor)
fig = plt.figure(figsize=(10, 10))
colmap = {1: 'g', 2: 'r', 3: 'b', 4:'y'}
marker = {1:'circle', 2:'diamond', 3:'dot', 4:'triangle'}
size = {1:2,2:2,3:2,4:2}
colors = list(map(lambda x: colmap[x+1], labels))
markers = list(map(lambda x: marker[x+1], labels))
sizes = list(map(lambda x: size[x+1], labels))
#print(principalComponents)
df['principal component 1'] = principalComponents[:,0]
df['principal component 2'] = principalComponents[:,1]
df['color'] = colors
df['marker'] = markers
df['sizes'] = sizes
opts.defaults(opts.Points(padding=0.1, size=8, line_color='black'))
data ={'x':list(df['principal component 1'])
,'y':list(df['principal component 2'])
,'color':list(df['color'])
,'marker':list(df['marker'])
,'sizes':list(df['sizes'])}
#hv.Points(data, vdims=['color', 'marker', 'sizes']).opts(color='color', marker='marker', size='sizes')
plt.scatter(df['principal component 1'], df['principal component 2'], color=colors, alpha=0.5, edgecolor='k')
#for idx, centroid in enumerate(centroids):
#plt.scatter(*centroid, color=colmap[idx+1])
df['labels'] = labels
#print(list(df['labels'].unique()))
shape_ = {}
for item in list(df['labels'].unique()):
shape_.update({item:[(df[df['labels'] ==item].shape[0]),df[df['labels'] == item]['Sum_Degree'].mean()]})
print('Complex Degree:',df[df['labels'] == item]['Sum_Degree'].mean())
#print(shape_)
#print(sorted(shape_.items(), key=lambda x: x[1]))
minor_=sorted(shape_.items(), key=lambda x: x[1])[0][0]
major_=sorted(shape_.items(), key=lambda x: x[1])[1][0]
#sns.pairplot(df[df['labels'] ==1][df.columns.difference(['ACTIVITY_IDENTIFIER','POD_CODE'])], hue="SpellHRG")
#for label,x,y in zip(df[df['labels'] == minor_]['ACTIVITY_IDENTIFIER'],df[df['labels'] == minor_]['principal component 1'],df[df['labels'] == minor_]['principal component 2']):
for label,x,y in zip(df['ACTIVITY_IDENTIFIER'],df['principal component 1'],df['principal component 2']):
label = label
plt.annotate(label, (x,y),textcoords="offset points",xytext=(0,10),ha='center', size =20)
test=zip(regions, df['color'])
for item in test:
polygon = vertices[item[0]]
#print(region,polygon)
#print(*zip(*polygon))
plt.fill(*zip(*polygon), alpha=0.4
,color=item[1]
)
plt.xlim(vor.min_bound[0]-0.1, vor.max_bound[0]+0.1)
plt.ylim(vor.min_bound[1]-0.1, vor.max_bound[1]+0.1)
print('Minor Complex Degree:',df[df['labels'] == minor_]['Sum_Degree'].mean())
print('Major Complex Degree:',df[df['labels'] == major_]['Sum_Degree'].mean())
#df.loc[(df['POD_CODE'] == POD)]
return df[(df['POD_CODE'] == POD)][['age','sex','SpellHRG']+features+ ['POD_CODE','SpellPDiag','SpellSDiag']]#,'ALL_DIAG']]
#This block is for Jupyter-Notebook script
"""
codes =list(Degree_ReAdmitted_HRG['SpellHRG'].unique())
cardi=['DZ31Z',
'EC21Z',
'EC22Z',
'EY50Z',
'EY51Z',
'EY01A',
'EY01B',
'EY02A',
'EY02B',
'EY11Z',
'EY12A',
'EY12B',
'EY13Z',
'EY16A',
'EY16B',
'EY17A',
'EY17B']
init_code = list(set(codes).intersection(cardi))
stats = interact(plot_SpellHRG_ICD,
ICD=widgets.SelectMultiple(
options=
#init_code,
list(Degree_HRG['SpellPDiag'].dropna().unique()),
value=['A415'],
#rows=10,
description='ICD',
disabled=False
)
,POD=widgets.Dropdown(options=list(Degree_HRG['POD_CODE'].dropna().unique()))
,Degree=widgets.IntRangeSlider(value=[5,500],
min=0,
max=max(Degree_HRG['Sum_Degree']),
step=1,
description='Degree:',
disabled=False,
continuous_update=False,
orientation='horizontal',
readout=True,
readout_format='d')
,Age=widgets.IntRangeSlider(value=[-1,30],
min=-1,
max=100,
step=1,
description='Age:',
disabled=False,
continuous_update=False,
orientation='horizontal',
readout=True,
readout_format='d')) """
def plot_SpellHRG_HRG(HRG,Degree,Readmit,POD):
df = pd.DataFrame()
Degree_ReAdmitted_HRG = utils_pickle.read("Degree_ReAdmitted_HRG")
for item in HRG:
#print(item)
data=Degree_ReAdmitted_HRG.loc[(Degree_ReAdmitted_HRG['SpellHRG'] == item)&(Degree_ReAdmitted_HRG['POD_CODE'] == POD)
&(((Degree_ReAdmitted_HRG['Sum_Degree']>=Degree[0])&(Degree_ReAdmitted_HRG['Sum_Degree'] <=Degree[1])))
&(((Degree_ReAdmitted_HRG['ReAdmitted in DAYS']>=Readmit[0])&(Degree_ReAdmitted_HRG['ReAdmitted in DAYS'] <=Readmit[1])))]
df = pd.concat([df,data])
features = ['Sum_Degree','Global_Central', 'Total_LOS', 'INCOME','Turnaround_Degree','DIAG_COUNT']
# Separating out the features
x = df.loc[:, features].values
# Separating out the target
#y = test.loc[:,['target']].values
# Standardizing the features
x = StandardScaler().fit_transform(x)
pca = PCA(n_components=2)
principalComponents = pca.fit_transform(x)
principalDf = pd.DataFrame(data = principalComponents
, columns = ['principal component 1', 'principal component 2'])
#Voronoi at least four points, though clusters not less than 4
kmeans = KMeans(n_clusters=2)
#pair = ['INCOME','Total_LOS']
kmeans.fit(principalDf)
labels = kmeans.predict(principalDf)
centroids = kmeans.cluster_centers_
#print(centroids)
v = np.vstack([centroids,[0,0]])
#print(v)
vor = Voronoi(principalComponents)
# plot
regions, vertices = voronoi_finite_polygons_2d(vor)
fig = plt.figure(figsize=(10, 10))
colmap = {1: 'g', 2: 'r', 3: 'b', 4:'y'}
marker = {1:'circle', 2:'diamond', 3:'dot', 4:'triangle'}
size = {1:2,2:2,3:2,4:2}
colors = list(map(lambda x: colmap[x+1], labels))
markers = list(map(lambda x: marker[x+1], labels))
sizes = list(map(lambda x: size[x+1], labels))
#print(principalComponents)
df['principal component 1'] = principalComponents[:,0]
df['principal component 2'] = principalComponents[:,1]
df['color'] = colors
df['marker'] = markers
df['sizes'] = sizes
opts.defaults(opts.Points(padding=0.1, size=8, line_color='black'))
data ={'x':list(df['principal component 1'])
,'y':list(df['principal component 2'])
,'color':list(df['color'])
,'marker':list(df['marker'])
,'sizes':list(df['sizes'])}
#hv.Points(data, vdims=['color', 'marker', 'sizes']).opts(color='color', marker='marker', size='sizes')
plt.scatter(df['principal component 1'], df['principal component 2'], color=colors, alpha=0.5, edgecolor='k')
#for idx, centroid in enumerate(centroids):
#plt.scatter(*centroid, color=colmap[idx+1])
df['labels'] = labels
#print(list(df['labels'].unique()))
shape_ = {}
for item in list(df['labels'].unique()):
shape_.update({item:[(df[df['labels'] ==item].shape[0]),df[df['labels'] == item]['Sum_Degree'].mean()]})
print('Complex Degree:',df[df['labels'] == item]['Sum_Degree'].mean())
#print(shape_)
#print(sorted(shape_.items(), key=lambda x: x[1]))
minor_=sorted(shape_.items(), key=lambda x: x[1])[0][0]
major_=sorted(shape_.items(), key=lambda x: x[1])[1][0]
#sns.pairplot(df[df['labels'] ==1][df.columns.difference(['ACTIVITY_IDENTIFIER','POD_CODE'])], hue="SpellHRG")
#for label,x,y in zip(df[df['labels'] == minor_]['ACTIVITY_IDENTIFIER'],df[df['labels'] == minor_]['principal component 1'],df[df['labels'] == minor_]['principal component 2']):
for label,x,y in zip(df['ACTIVITY_IDENTIFIER'],df['principal component 1'],df['principal component 2']):
label = label
plt.annotate(label, (x,y),textcoords="offset points",xytext=(0,10),ha='center', size =20)
test=zip(regions, df['color'])
for item in test:
polygon = vertices[item[0]]
#print(region,polygon)
#print(*zip(*polygon))
plt.fill(*zip(*polygon), alpha=0.4
,color=item[1]
)
plt.xlim(vor.min_bound[0]-0.1, vor.max_bound[0]+0.1)
plt.ylim(vor.min_bound[1]-0.1, vor.max_bound[1]+0.1)
print('Minor Complex Degree:',df[df['labels'] == minor_]['Sum_Degree'].mean())
print('Major Complex Degree:',df[df['labels'] == major_]['Sum_Degree'].mean())
#df.loc[(df['POD_CODE'] == POD)]
return df[(df['POD_CODE'] == POD)][['ACTIVITY_IDENTIFIER','SpellHRG']+features+ ['ReAdmitted in DAYS','POD_CODE','ALL_DIAG']]
#The below block is for Jupyter-Notebook
"""
codes =list(Degree_ReAdmitted_HRG['SpellHRG'].unique())
cardi=['DZ31Z',
'EC21Z',
'EC22Z',
'EY50Z',
'EY51Z',
'EY01A',
'EY01B',
'EY02A',
'EY02B',
'EY11Z',
'EY12A',
'EY12B',
'EY13Z',
'EY16A',
'EY16B',
'EY17A',
'EY17B']
init_code = list(set(codes).intersection(cardi))
stats = interact(plot_SpellHRG_HRG,
HRG=widgets.SelectMultiple(
options=
#init_code,
list(Degree_HRG['SpellHRG'].dropna().unique()),
value=init_code,
#rows=10,
description='HRG',
disabled=False
)
,POD=widgets.Dropdown(options=list(Degree_HRG['POD_CODE'].dropna().unique()))
,Degree=widgets.IntRangeSlider(value=[5,500],
min=0,
max=max(Degree_HRG['Sum_Degree']),
step=1,
description='Degree:',
disabled=False,
continuous_update=False,
orientation='horizontal',
readout=True,
readout_format='d')
,Readmit=widgets.IntRangeSlider(value=[-1,30],
min=-1,
max=30,
step=1,
description='ReAdmitted:',
disabled=False,
continuous_update=False,
orientation='horizontal',
readout=True,
readout_format='d'))"""
def plot_SpellHRG_HRG_Degree(HRG,Degree,Readmit,POD):
df = pd.DataFrame()
Degree_ReAdmitted_HRG = utils_pickle.read("Degree_ReAdmitted_HRG")
Degree_ReAdmitted_HRG = Degree_ReAdmitted_HRG[Degree_ReAdmitted_HRG['SpellHRG'].notna()]
Degree_ReAdmitted_HRG['ReAdmitted in DAYS'] = Degree_ReAdmitted_HRG['ReAdmitted in DAYS'].fillna(-1)
for item in HRG:
#print(item)
data=Degree_ReAdmitted_HRG.loc[(Degree_ReAdmitted_HRG['SpellHRG'] == item)&(Degree_ReAdmitted_HRG['POD_CODE'] == POD)
&(((Degree_ReAdmitted_HRG['Sum_Degree']>=Degree[0])&(Degree_ReAdmitted_HRG['Sum_Degree'] <=Degree[1])))
&(((Degree_ReAdmitted_HRG['ReAdmitted in DAYS']>=Readmit[0])&(Degree_ReAdmitted_HRG['ReAdmitted in DAYS'] <=Readmit[1])))]
df = pd.concat([df,data])
features = ['Sum_Degree','Global_Central', 'Total_LOS', 'INCOME','Turnaround_Degree','DIAG_COUNT']
principalComponents = sliced_principle_components(df,features,2)
principalDf = pd.DataFrame(data = principalComponents, columns = ['principal component 1', 'principal component 2'])
kmax = 10
best_n = best_eblow_k(principalDf.values.tolist(),kmax = 10)
df = plot_vor(df,principalComponents, best_n)
return df[(df['POD_CODE'] == POD)][['ACTIVITY_IDENTIFIER','SpellHRG']+features+ ['ReAdmitted in DAYS','POD_CODE','ALL_DIAG','labels']]
"""
codes =list(Degree_ReAdmitted_HRG['SpellHRG'].unique())
cardi=['DZ31Z',
'EC21Z',
'EC22Z',
'EY50Z',
'EY51Z',
'EY01A',
'EY01B',
'EY02A',
'EY02B',
'EY11Z',
'EY12A',
'EY12B',
'EY13Z',
'EY16A',
'EY16B',
'EY17A',
'EY17B']
init_code = list(set(codes).intersection(cardi))
stats = interact(plot_SpellHRG_HRG_Degree,
HRG=widgets.SelectMultiple(
options=
#init_code,
list(Degree_HRG['SpellHRG'].dropna().unique()),
value=init_code,
#rows=10,
description='HRG',
disabled=False
)
,POD=widgets.Dropdown(options=list(Degree_HRG['POD_CODE'].dropna().unique()))
,Degree=widgets.IntRangeSlider(value=[5,500],
min=0,
max=max(Degree_HRG['Sum_Degree']),
step=1,
description='Degree:',
disabled=False,
continuous_update=False,
orientation='horizontal',
readout=True,
readout_format='d')
,Readmit=widgets.IntRangeSlider(value=[-1,30],
min=-1,
max=30,
step=1,
description='ReAdmitted:',
disabled=False,
continuous_update=False,
orientation='horizontal',
readout=True,
readout_format='d'))"""
def plot_SpellHRG_HRG_Degree_PairCompare(HRG,Degree,Readmit,POD):
df = pd.DataFrame()
Degree_ReAdmitted_HRG = utils_pickle.read("../Degree_ReAdmitted_HRG")
Degree_ReAdmitted_HRG = Degree_ReAdmitted_HRG[Degree_ReAdmitted_HRG['SpellHRG'].notna()]
Degree_ReAdmitted_HRG['ReAdmitted in DAYS'] = Degree_ReAdmitted_HRG['ReAdmitted in DAYS'].fillna(-1)
for item in HRG:
#print(item)
data=Degree_ReAdmitted_HRG.loc[(Degree_ReAdmitted_HRG['SpellHRG'] == item)&(Degree_ReAdmitted_HRG['POD_CODE'] == POD)
&(((Degree_ReAdmitted_HRG['Sum_Degree']>=Degree[0])&(Degree_ReAdmitted_HRG['Sum_Degree'] <=Degree[1])))
&(((Degree_ReAdmitted_HRG['ReAdmitted in DAYS']>=Readmit[0])&(Degree_ReAdmitted_HRG['ReAdmitted in DAYS'] <=Readmit[1])))]
df = | pd.concat([df,data]) | pandas.concat |
import pandas as pd
import pandas_datareader.data as web
import datetime as dt
import math
import matplotlib.pyplot as plt
import mplfinance as mpf
import seaborn as sns
from openpyxl import Workbook, load_workbook
from openpyxl.utils import get_column_letter
from openpyxl.styles import Font
pd.set_option('display.max_rows',10000,'display.max_columns',10000)
# Create Dataframes
df14 = pd.DataFrame()
df15 = pd.DataFrame()
df16 = pd.DataFrame()
df17 = pd.DataFrame()
df18 = pd.DataFrame()
df19 = | pd.DataFrame() | pandas.DataFrame |
import re
import pandas as pd
import numpy as np
def parse_lat_lon(val):
m = re.match(r'(\d+)°(\d+)’(\d+)” ([NS]), (\d+)°(\d+)’(\d+)” ?([WE])', val)
if m:
lat1,lat2,lat3,latdir,lon1,lon2,lon3,londir = m.groups()
lat = int(lat1)+int(lat2)/60. + int(lat3)/3600.
if latdir == 'S':
lat *= -1
lon = int(lon1)+int(lon2)/60. + int(lon3)/3600.
if londir == 'E':
lon *= -1
return pd.Series({'latitude': lat, 'longitude': lon})
m = re.match(r'^(\d+\.\d+) ?([NS]),? *(-?\d+\.\d+) ?([WE])$', val)
if m:
lat,latdir,lon,londir = m.groups()
lat = float(lat)
if latdir == 'S':
lat = -lat
lon = float(lon)
if londir == 'E':
lon = -lon
return pd.Series({'latitude': lat, 'longitude': lon})
m = re.match(r'^(\d+\.\d+), *(-?\d+\.\d+)$', val)
if m:
lat,lon = m.groups()
lat = float(lat)
lon = float(lon)
return pd.Series({'latitude': lat, 'longitude': lon})
m = re.match(r'^(\d+)\'W, 37N', val)
if m:
lat = 37
lon = float(m.group(1))
return pd.Series({'latitude': lat, 'longitude': lon})
raise ValueError(f'Could not parse {val}')
compl = pd.read_table('compl_geo_cntr.tsv', index_col=0)
biome = pd.read_table('biome.txt', index_col=0, squeeze=True)
meta = pd.read_table('/g/scb2/bork/coelho/DD_DeCaF/genecats.cold/selected-cleaned-metadata-100.tsv', index_col=0)
fullmeta = | pd.read_table('/home/luispedro/work/genecats/sources/fetch-data/data/sample-meta.tsv', index_col=0) | pandas.read_table |
#
# Copyright 2018 Quantopian, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import warnings
import datetime
from datetime import timedelta
from functools import partial
from textwrap import dedent
from copy import deepcopy
import logbook
import toolz
from logbook import TestHandler, WARNING
from parameterized import parameterized
from six import iteritems, itervalues, string_types
from six.moves import range
from testfixtures import TempDirectory
import numpy as np
import pandas as pd
import pytz
from pandas.errors import PerformanceWarning
from trading_calendars import get_calendar, register_calendar
import zipline.api
from zipline.api import FixedSlippage
from zipline.assets import Equity, Future, Asset
from zipline.assets.continuous_futures import ContinuousFuture
from zipline.assets.synthetic import (
make_jagged_equity_info,
make_simple_equity_info,
)
from zipline.errors import (
AccountControlViolation,
CannotOrderDelistedAsset,
IncompatibleSlippageModel,
RegisterTradingControlPostInit,
ScheduleFunctionInvalidCalendar,
SetCancelPolicyPostInit,
SymbolNotFound,
TradingControlViolation,
UnsupportedCancelPolicy,
UnsupportedDatetimeFormat,
ZeroCapitalError
)
from zipline.finance.commission import PerShare, PerTrade
from zipline.finance.execution import LimitOrder
from zipline.finance.order import ORDER_STATUS
from zipline.finance.trading import SimulationParameters
from zipline.finance.asset_restrictions import (
Restriction,
HistoricalRestrictions,
StaticRestrictions,
RESTRICTION_STATES,
)
from zipline.finance.controls import AssetDateBounds
from zipline.testing import (
FakeDataPortal,
create_daily_df_for_asset,
create_data_portal_from_trade_history,
create_minute_df_for_asset,
make_test_handler,
make_trade_data_for_asset_info,
parameter_space,
str_to_seconds,
to_utc,
)
from zipline.testing import RecordBatchBlotter
import zipline.testing.fixtures as zf
from zipline.test_algorithms import (
access_account_in_init,
access_portfolio_in_init,
api_algo,
api_get_environment_algo,
api_symbol_algo,
handle_data_api,
handle_data_noop,
initialize_api,
initialize_noop,
noop_algo,
record_float_magic,
record_variables,
call_with_kwargs,
call_without_kwargs,
call_with_bad_kwargs_current,
call_with_bad_kwargs_history,
bad_type_history_assets,
bad_type_history_fields,
bad_type_history_bar_count,
bad_type_history_frequency,
bad_type_history_assets_kwarg_list,
bad_type_current_assets,
bad_type_current_fields,
bad_type_can_trade_assets,
bad_type_is_stale_assets,
bad_type_history_assets_kwarg,
bad_type_history_fields_kwarg,
bad_type_history_bar_count_kwarg,
bad_type_history_frequency_kwarg,
bad_type_current_assets_kwarg,
bad_type_current_fields_kwarg,
call_with_bad_kwargs_get_open_orders,
call_with_good_kwargs_get_open_orders,
call_with_no_kwargs_get_open_orders,
empty_positions,
no_handle_data,
)
from zipline.testing.predicates import assert_equal
from zipline.utils.api_support import ZiplineAPI
from zipline.utils.context_tricks import CallbackManager, nop_context
from zipline.utils.events import (
date_rules,
time_rules,
Always,
ComposedRule,
Never,
OncePerDay,
)
import zipline.utils.factory as factory
# Because test cases appear to reuse some resources.
_multiprocess_can_split_ = False
class TestRecord(zf.WithMakeAlgo, zf.ZiplineTestCase):
ASSET_FINDER_EQUITY_SIDS = (133,)
SIM_PARAMS_DATA_FREQUENCY = 'daily'
DATA_PORTAL_USE_MINUTE_DATA = False
def test_record_incr(self):
def initialize(self):
self.incr = 0
def handle_data(self, data):
self.incr += 1
self.record(incr=self.incr)
name = 'name'
self.record(name, self.incr)
zipline.api.record(name, self.incr, 'name2', 2, name3=self.incr)
output = self.run_algorithm(
initialize=initialize,
handle_data=handle_data,
)
np.testing.assert_array_equal(output['incr'].values,
range(1, len(output) + 1))
np.testing.assert_array_equal(output['name'].values,
range(1, len(output) + 1))
np.testing.assert_array_equal(output['name2'].values,
[2] * len(output))
np.testing.assert_array_equal(output['name3'].values,
range(1, len(output) + 1))
class TestMiscellaneousAPI(zf.WithMakeAlgo, zf.ZiplineTestCase):
START_DATE = pd.Timestamp('2006-01-04', tz='UTC')
END_DATE = pd.Timestamp('2006-01-05', tz='UTC')
SIM_PARAMS_DATA_FREQUENCY = 'minute'
sids = 1, 2
# FIXME: Pass a benchmark source instead of this.
BENCHMARK_SID = None
@classmethod
def make_equity_info(cls):
return pd.concat((
make_simple_equity_info(cls.sids, '2002-02-1', '2007-01-01'),
pd.DataFrame.from_dict(
{3: {'symbol': 'PLAY',
'start_date': '2002-01-01',
'end_date': '2004-01-01',
'exchange': 'TEST'},
4: {'symbol': 'PLAY',
'start_date': '2005-01-01',
'end_date': '2006-01-01',
'exchange': 'TEST'}},
orient='index',
),
))
@classmethod
def make_futures_info(cls):
return pd.DataFrame.from_dict(
{
5: {
'symbol': 'CLG06',
'root_symbol': 'CL',
'start_date': pd.Timestamp('2005-12-01', tz='UTC'),
'notice_date': pd.Timestamp('2005-12-20', tz='UTC'),
'expiration_date': pd.Timestamp('2006-01-20', tz='UTC'),
'exchange': 'TEST'
},
6: {
'root_symbol': 'CL',
'symbol': 'CLK06',
'start_date': pd.Timestamp('2005-12-01', tz='UTC'),
'notice_date': pd.Timestamp('2006-03-20', tz='UTC'),
'expiration_date': pd.Timestamp('2006-04-20', tz='UTC'),
'exchange': 'TEST',
},
7: {
'symbol': 'CLQ06',
'root_symbol': 'CL',
'start_date': pd.Timestamp('2005-12-01', tz='UTC'),
'notice_date': pd.Timestamp('2006-06-20', tz='UTC'),
'expiration_date': pd.Timestamp('2006-07-20', tz='UTC'),
'exchange': 'TEST',
},
8: {
'symbol': 'CLX06',
'root_symbol': 'CL',
'start_date': pd.Timestamp('2006-02-01', tz='UTC'),
'notice_date': pd.Timestamp('2006-09-20', tz='UTC'),
'expiration_date': pd.Timestamp('2006-10-20', tz='UTC'),
'exchange': 'TEST',
}
},
orient='index',
)
def test_cancel_policy_outside_init(self):
code = """
from zipline.api import cancel_policy, set_cancel_policy
def initialize(algo):
pass
def handle_data(algo, data):
set_cancel_policy(cancel_policy.NeverCancel())
"""
algo = self.make_algo(script=code)
with self.assertRaises(SetCancelPolicyPostInit):
algo.run()
def test_cancel_policy_invalid_param(self):
code = """
from zipline.api import set_cancel_policy
def initialize(algo):
set_cancel_policy("foo")
def handle_data(algo, data):
pass
"""
algo = self.make_algo(script=code)
with self.assertRaises(UnsupportedCancelPolicy):
algo.run()
def test_zipline_api_resolves_dynamically(self):
# Make a dummy algo.
algo = self.make_algo(
initialize=lambda context: None,
handle_data=lambda context, data: None,
)
# Verify that api methods get resolved dynamically by patching them out
# and then calling them
for method in algo.all_api_methods():
name = method.__name__
sentinel = object()
def fake_method(*args, **kwargs):
return sentinel
setattr(algo, name, fake_method)
with ZiplineAPI(algo):
self.assertIs(sentinel, getattr(zipline.api, name)())
def test_sid_datetime(self):
algo_text = """
from zipline.api import sid, get_datetime
def initialize(context):
pass
def handle_data(context, data):
aapl_dt = data.current(sid(1), "last_traded")
assert_equal(aapl_dt, get_datetime())
"""
self.run_algorithm(
script=algo_text,
namespace={'assert_equal': self.assertEqual},
)
def test_datetime_bad_params(self):
algo_text = """
from zipline.api import get_datetime
from pytz import timezone
def initialize(context):
pass
def handle_data(context, data):
get_datetime(timezone)
"""
algo = self.make_algo(script=algo_text)
with self.assertRaises(TypeError):
algo.run()
@parameterized.expand([
(-1000, 'invalid_base'),
(0, 'invalid_base'),
])
def test_invalid_capital_base(self, cap_base, name):
"""
Test that the appropriate error is being raised and orders aren't
filled for algos with capital base <= 0
"""
algo_text = """
def initialize(context):
pass
def handle_data(context, data):
order(sid(24), 1000)
"""
sim_params = SimulationParameters(
start_session=pd.Timestamp("2006-01-04", tz='UTC'),
end_session= | pd.Timestamp("2006-01-06", tz='UTC') | pandas.Timestamp |
"""
:mod:`pandas.io.xml` is a module for reading XML.
"""
from __future__ import annotations
import io
from pandas._typing import (
Buffer,
CompressionOptions,
FilePathOrBuffer,
StorageOptions,
)
from pandas.compat._optional import import_optional_dependency
from pandas.errors import (
AbstractMethodError,
ParserError,
)
from pandas.util._decorators import doc
from pandas.core.dtypes.common import is_list_like
from pandas.core.frame import DataFrame
from pandas.core.shared_docs import _shared_docs
from pandas.io.common import (
file_exists,
get_handle,
is_fsspec_url,
is_url,
stringify_path,
)
from pandas.io.parsers import TextParser
class _XMLFrameParser:
"""
Internal subclass to parse XML into DataFrames.
Parameters
----------
path_or_buffer : a valid JSON str, path object or file-like object
Any valid string path is acceptable. The string could be a URL. Valid
URL schemes include http, ftp, s3, and file.
xpath : str or regex
The XPath expression to parse required set of nodes for
migration to `Data Frame`. `etree` supports limited XPath.
namespacess : dict
The namespaces defined in XML document (`xmlns:namespace='URI')
as dicts with key being namespace and value the URI.
elems_only : bool
Parse only the child elements at the specified `xpath`.
attrs_only : bool
Parse only the attributes at the specified `xpath`.
names : list
Column names for Data Frame of parsed XML data.
encoding : str
Encoding of xml object or document.
stylesheet : str or file-like
URL, file, file-like object, or a raw string containing XSLT,
`etree` does not support XSLT but retained for consistency.
compression : {'infer', 'gzip', 'bz2', 'zip', 'xz', None}, default 'infer'
Compression type for on-the-fly decompression of on-disk data.
If 'infer', then use extension for gzip, bz2, zip or xz.
storage_options : dict, optional
Extra options that make sense for a particular storage connection,
e.g. host, port, username, password, etc.,
See also
--------
pandas.io.xml._EtreeFrameParser
pandas.io.xml._LxmlFrameParser
Notes
-----
To subclass this class effectively you must override the following methods:`
* :func:`parse_data`
* :func:`_parse_nodes`
* :func:`_parse_doc`
* :func:`_validate_names`
* :func:`_validate_path`
See each method's respective documentation for details on their
functionality.
"""
def __init__(
self,
path_or_buffer,
xpath,
namespaces,
elems_only,
attrs_only,
names,
encoding,
stylesheet,
compression,
storage_options,
) -> None:
self.path_or_buffer = path_or_buffer
self.xpath = xpath
self.namespaces = namespaces
self.elems_only = elems_only
self.attrs_only = attrs_only
self.names = names
self.encoding = encoding
self.stylesheet = stylesheet
self.is_style = None
self.compression = compression
self.storage_options = storage_options
def parse_data(self) -> list[dict[str, str | None]]:
"""
Parse xml data.
This method will call the other internal methods to
validate xpath, names, parse and return specific nodes.
"""
raise | AbstractMethodError(self) | pandas.errors.AbstractMethodError |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
"""
DCC model
=========
"""
from __future__ import print_function, division
import numpy as np
import pandas as pd
import scipy.linalg as scl
import scipy.optimize as sco
from arch import arch_model
from .param_dcc import ParamDCC
from .data_dcc import DataDCC
from .arch_forecast import garch_forecast
from .dcc_recursion import (dcc_recursion_python, dcc_recursion_numba,
corr_dcc_python, corr_dcc_numba)
__all__ = ['DCC']
class DCC(object):
"""DCC model.
Attributes
----------
Methods
-------
"""
def __init__(self, ret=None):
"""Initialize the model.
"""
self.param = None
self.data = DataDCC(ret=ret)
@staticmethod
def simulate(nobs=2000, ndim=3, persistence=.99, beta=.85,
volmean=.2, acorr=.15, bcorr=.8, rho=.9, error=None):
"""Simulate returns and (co)variances.
Parameters
----------
Returns
-------
"""
alpha = persistence - beta
hvar = np.zeros((nobs+1, ndim, ndim))
rho_series = np.ones(nobs+1)
dvec = np.ones(ndim) * volmean
corr_target = (1 - rho) * np.eye(ndim) \
+ rho * np.ones((ndim, ndim))
qmat = corr_target.copy()
ret = np.zeros((nobs+1, ndim))
mean, cov = np.zeros(ndim), np.eye(ndim)
if error is None:
error = np.random.multivariate_normal(mean, cov, nobs+1)
error = (error - error.mean(0)) / error.std(0)
qeta = np.zeros(ndim)
for t in range(1, nobs+1):
dvec = volmean * (1 - persistence) \
+ alpha * ret[t-1]**2 + beta * dvec
qmat = corr_target * (1 - acorr - bcorr) \
+ acorr * qeta[:, np.newaxis] * qeta \
+ bcorr * qmat
qdiag = np.diag(qmat) ** .5
corr_dcc = (1 / qdiag[:, np.newaxis] / qdiag) * qmat
rho_series[t] = (corr_dcc.sum() - ndim) / (ndim - 1) / ndim
hvar[t] = (dvec[:, np.newaxis] * dvec)**.5 * corr_dcc
ret[t] = error[t].dot(scl.cholesky(hvar[t], 0))
qeta = qdiag * ret[t] / dvec**.5
return | pd.DataFrame(ret[1:]) | pandas.DataFrame |
import pandas
import numpy as np
import scipy.stats as stats
import matplotlib.pyplot as plt
import seaborn as sns
def evaluate_components(clf, x, y, n_iterations=500, check = 100,
evaluate = True, plot = True, thr = 0.95,
metric=None, random_state=123):
if type(x) != type(pandas.DataFrame()):
x = pandas.DataFrame(x)
# fit model
clf.fit(x,y)
n_comps = clf.n_components
# prepare output
results = pandas.DataFrame(index = range(n_comps * (n_iterations+1)),
columns = ['score', 'component', 'model'])
results.loc[:,'component'] = list(range(n_comps))*(n_iterations+1)
results.loc[range(n_comps),'model'] = ['True']*n_comps
results.loc[range(n_comps,n_comps*(n_iterations+1)), 'model'
] = ['Null']*(n_comps*n_iterations)
if not metric:
true_scores = [stats.pearsonr(clf.x_scores_[:,x], clf.y_scores_[:,x]
)[0]**2 for x in range(n_comps)]
else:
true_scores = [metric(clf.x_scores_[:,x], clf.y_scores_[:,x]
) for x in range(n_comps)]
results.loc[results[results.model=='True'].index,'score'] = true_scores
k = clf.n_components
# permute and refit model
rs = np.random.RandomState(random_state)
x.index = range(len(x.index))
for i in range(n_iterations):
new_ind = rs.permutation(x.index)
new_x = x.iloc[new_ind]
newmod = clf.fit(new_x,y)
if not metric:
new_scores = [stats.pearsonr(newmod.x_scores_[:,x],
newmod.y_scores_[:,x]
)[0]**2 for x in range(n_comps)]
else:
new_scores = [metric(newmod.x_scores_[:,x], newmod.y_scores_[:,x]
) for x in range(n_comps)]
results.loc[range(k, k+n_comps), 'score'] = new_scores
if check:
if i % check == 0:
print('finished iteration',i)
k += n_comps
if evaluate:
if plot:
cr = display_results(results, thr)
else:
cr = display_results(results, thr, False)
return results, cr
def display_results(results, thr = 0.95, plot=True):
if plot:
# plot components
sns.set_context('paper')
plt.close()
sns.catplot(x='component', y = 'score', hue='model', data=results,kind='point')
plt.show()
# get p-values
comp_results = pandas.DataFrame(index=results.component.unique(),
columns = ['r','p','sig'])
for i in results.component.unique():
nullz = results[(results.component==i) & (results.model=='Null')
]['score'].sort_values().values
real = results[(results.component==i) & (results.model=='True')]['score'].values[0]
comp_results.loc[i,'r'] = real
p = (len(nullz[nullz>real])+1) / len(nullz)
if p < (1 - thr):
comp_results.loc[i,['p','sig']] = [p, 1]
print('component %s: p = %s ***'%(i,p))
else:
comp_results.loc[i,['p','sig']] = [p, 0]
print('component %s: p = %s'%(i,p))
return comp_results
def bootstrap_features(clf, fit_model, X, y, n_iterations=500, check = 100, on ='x'):
if type(X) != type(pandas.DataFrame()):
X = pandas.DataFrame(X)
if type(y) != type( | pandas.DataFrame() | pandas.DataFrame |
import sys
import numpy as np
import pandas as pd
np.random.seed(2019)
from src import Evaluation
def postprocess(model_name):
if model_name == 'CIN':
submission_result = 'submission/nn_pred_CIN_test.csv'
else:
submission_result = 'submission/lgb_pred_test.csv'
predict_label = | pd.read_csv(submission_result,sep='\t',names=['id','preds']) | pandas.read_csv |
import requests
import pandas as pd
import os
def new_retail_data(base_url='https://python.zgulde.net'):
'''
This function acquires new retail data, returns three dataframes, and saves those dataframes to .csv files.
'''
# Acquiring items data
response = requests.get('https://python.zgulde.net/api/v1/items')
data = response.json()
df = pd.DataFrame(data['payload']['items'])
while data['payload']['next_page'] != None:
response = requests.get(base_url + data['payload']['next_page'])
data = response.json()
df = pd.concat([df, | pd.DataFrame(data['payload']['items']) | pandas.DataFrame |
# Importing the libraries
import math
import nsepy
import numpy
import pandas as pd
import sklearn
import importlib
import seaborn as sns
import matplotlib.pyplot as plt
from datetime import date
from scipy.stats import norm
from nsepy import get_history
from matplotlib.pyplot import figure
# Declaring neccessary global variables
global nifty500
global numberofsamples
global mean
global latest
global standard_deviation
global terminals
global confidence_intervals
# Determine Normal Distribution of Index
def normal_distribution_of_pe_ratio(symbol) :
# Referencing global variables for using them in local scope
global nifty500
global numberofsamples
global mean
global latest
global standard_deviation
global terminals
global confidence_intervals
# Downloading the most recent data from NSE
print("")
print('Wait while we download data starting from 2000 from NSE server...\n')
nifty500 = nsepy.get_index_pe_history(symbol, date(2000, 1, 1), date.today())
print("PE Based SIP Model Thesis : ")
print("")
print("1. Continue monthly SIP of Rs. 10,000 when NIFTY 500 PE < R2")
print("2. Sell entire holdings when NIFTY 500 PE > R2")
print("3. Buy stock back with all available cash as soon as NIFTY 500 PE < R2")
print("")
# Calculating the most critical data
numberofsamples = len(nifty500)
mean = nifty500['P/E'].mean()
latest = nifty500['P/E'].iloc[-1]
standard_deviation = nifty500['P/E'].std()
terminals = numpy.array([nifty500['P/E'].min(), nifty500['P/E'].max()])
confidence_intervals = numpy.array([mean - 3*standard_deviation, mean - 2*standard_deviation, mean - standard_deviation, mean + standard_deviation, mean + 2*standard_deviation, mean + 3*standard_deviation])
# PE Based SIP Model
def pe_based_sip_model (company) :
# Referencing global variables for using them in local scope
global nifty500
global numberofsamples
global mean
global latest
global standard_deviation
global terminals
global confidence_intervals
# Downloading the most recent data from NSE
stock = get_history(symbol = company, start = date(2000, 1, 1), end = date.today())
# Declaring local variables
row_num = 0 # Row Iterator
current_profit = 0.00 # Current Profit
cash_available = 0.00 # Initial balance
total_num_of_stocks = 0 # Number of stocks
total_invested_amount = 0.00 # Initial Invested Amount
current_portfolio_value = 0.00 # Current Portfolio Value
ledger = pd.DataFrame(columns = ['Type', 'Symbol', 'Price', 'Unit', 'Cash Available'])
results = | pd.DataFrame(columns = ['Total Investment', 'Cash in Hand', 'Current Portfolio Value', 'Net Profit']) | pandas.DataFrame |
import unittest
from setup.settings import *
from numpy.testing import *
from pandas.util.testing import *
import numpy as np
import dolphindb_numpy as dnp
import pandas as pd
import orca
class FunctionBitwiseAndTest(unittest.TestCase):
@classmethod
def setUpClass(cls):
# connect to a DolphinDB server
orca.connect(HOST, PORT, "admin", "123456")
def test_function_math_binary_bitwise_and_scalar(self):
self.assertEqual(dnp.bitwise_and(1, 4), 0)
self.assertEqual(np.bitwise_and(1, 4), 0)
# self.assertEqual(dnp.bitwise_and(1, 4), np.bitwise_and(1, 4))
self.assertEqual(dnp.bitwise_and(1, -5), 1)
self.assertEqual(np.bitwise_and(1, -5), 1)
# self.assertEqual(dnp.bitwise_and(1, -5), np.bitwise_and(1, -5))
self.assertEqual(dnp.bitwise_and(0, 9), 0)
self.assertEqual(np.bitwise_and(0, 9), 0)
# self.assertEqual(dnp.bitwise_and(0, 9), np.bitwise_and(0, 9))
def test_function_math_binary_bitwise_and_list(self):
lst1 = [1, 2, 3]
lst2 = [4, 6, 9]
assert_array_equal(dnp.bitwise_and(lst1, lst2), np.bitwise_and(lst1, lst2))
def test_function_math_binary_bitwise_and_array_with_scalar(self):
npa = np.array([1, 2, 3])
dnpa = dnp.array([1, 2, 3])
assert_array_equal(dnp.bitwise_and(dnpa, 1), np.bitwise_and(npa, 1))
# TODO: bitwise_and bug
# assert_array_equal(dnp.bitwise_and(1, dnpa), np.bitwise_and(1, npa))
def test_function_math_binary_bitwise_and_array_with_array(self):
npa1 = np.array([1, 2, 3])
npa2 = np.array([4, 6, 9])
dnpa1 = dnp.array([1, 2, 3])
dnpa2 = dnp.array([4, 6, 9])
assert_array_equal(dnp.bitwise_and(dnpa1, dnpa2), np.bitwise_and(npa1, npa2))
def test_function_math_binary_bitwise_and_array_with_array_param_out(self):
npa1 = np.array([1, 2, 3])
npa2 = np.array([4, 6, 9])
npa = np.zeros(shape=(1, 3))
dnpa1 = dnp.array([1, 2, 3])
dnpa2 = dnp.array([4, 6, 9])
dnpa = dnp.zeros(shape=(1, 3))
np.bitwise_and(npa1, npa2, out=npa)
dnp.bitwise_and(dnpa1, dnpa2, out=dnpa)
# TODO: dolphindb numpy bitwise_and bug
# assert_array_equal(dnpa.to_numpy(), npa)
def test_function_math_binary_bitwise_and_array_with_series(self):
npa = np.array([1, 2, 3])
dnpa = dnp.array([1, 2, 3])
ps = pd.Series([4, 6, 9])
os = orca.Series([4, 6, 9])
assert_series_equal(dnp.bitwise_and(dnpa, os).to_pandas(), np.bitwise_and(npa, ps))
assert_series_equal(dnp.bitwise_and(os, dnpa).to_pandas(), np.bitwise_and(ps, npa))
pser = pd.Series([1, 2, 3])
oser = orca.Series([1, 2, 3])
assert_series_equal(dnp.bitwise_and(os, oser).to_pandas(), np.bitwise_and(ps, pser))
def test_function_math_binary_bitwise_and_array_with_dataframe(self):
npa = np.array([1, 2, 3])
dnpa = dnp.array([1, 2, 3])
pdf = | pd.DataFrame({'A': [4, 6, 9]}) | pandas.DataFrame |
import os
from os import listdir
from os.path import isfile, join
import re
from path import Path
import numpy as np
import pandas as pd
from poor_trader import utils
from poor_trader.utils import quotes_range
from poor_trader.config import INDICATORS_OUTPUT_PATH
def _true_range(df_quotes, indices):
cur = df_quotes.iloc[indices[1]]
prev = df_quotes.iloc[indices[0]]
high, low, prev_close = cur.High, cur.Low, prev.Close
a = utils.roundn(high - low, 4)
b = utils.roundn(abs(high - prev_close), 4)
c = utils.roundn(abs(low - prev_close), 4)
return max(a, b, c)
def true_range(df_quotes):
df = pd.DataFrame(index=df_quotes.index)
df['n_index'] = range(len(df_quotes))
_trf = lambda x: _true_range(df_quotes, [int(i) for i in x])
df['true_range'] = df.n_index.rolling(2).apply(_trf)
return df.filter(like='true_range')
def SMA(df_quotes, period, field='Close', symbol=None):
if symbol:
outpath = INDICATORS_OUTPUT_PATH / '{}/{}_{}_SMA_{}.pkl'.format(symbol, quotes_range(df_quotes), field, period)
if os.path.exists(outpath):
return pd.read_pickle(outpath)
df = pd.DataFrame(index=df_quotes.index)
df['SMA'] = df_quotes[field].rolling(period).mean()
df = utils.round_df(df)
if symbol:
if not os.path.exists(outpath.parent):
os.makedirs(outpath.parent)
df.to_pickle(outpath)
return df
def STDEV(df_quotes, period, field='Close', symbol=None):
if symbol:
outpath = INDICATORS_OUTPUT_PATH / '{}/{}_{}_STDEV_{}.pkl'.format(symbol, quotes_range(df_quotes), field, period)
if os.path.exists(outpath):
return pd.read_pickle(outpath)
df = pd.DataFrame(index=df_quotes.index)
df['STDEV'] = df_quotes[field].rolling(period).std()
df = utils.round_df(df)
if symbol:
if not os.path.exists(outpath.parent):
os.makedirs(outpath.parent)
df.to_pickle(outpath)
return df
def _ema(i, df_quotes, df_ema, period, field='Close'):
i = [int(_) for _ in i]
prev_ema, price = df_ema.iloc[i[0]], df_quotes.iloc[i[1]]
if pd.isnull(prev_ema.EMA):
return prev_ema.EMA
else:
c = 2. / (period + 1.)
return c * price[field] + (1. - c) * prev_ema.EMA
def EMA(df_quotes, period, field='Close', symbol=None):
if symbol:
outpath = INDICATORS_OUTPUT_PATH / '{}/{}_{}_EMA_{}.pkl'.format(symbol, quotes_range(df_quotes), field, period)
if os.path.exists(outpath):
return pd.read_pickle(outpath)
c = 2./(period + 1.)
df = pd.DataFrame(columns=['EMA'], index=df_quotes.index)
sma = SMA(df_quotes, period, field)
_sma = sma.dropna()
if len(_sma.index.values) == 0:
print('ts')
df.loc[_sma.index.values[0], 'EMA'] = _sma.SMA.values[0]
for i in range(1, len(df_quotes)):
prev_ema = df.iloc[i-1]
if pd.isnull(prev_ema.EMA): continue
price = df_quotes.iloc[i]
ema_value = c * price[field] + (1. - c) * prev_ema.EMA
df.loc[df_quotes.index.values[i], 'EMA'] = ema_value
df = utils.round_df(df)
if symbol:
if not os.path.exists(outpath.parent):
os.makedirs(outpath.parent)
df.to_pickle(outpath)
return df
def ATR(df_quotes, period=10, symbol=None):
if symbol:
outpath = INDICATORS_OUTPUT_PATH / '{}/{}_ATR_{}.pkl'.format(symbol, quotes_range(df_quotes), period)
if os.path.exists(outpath):
return pd.read_pickle(outpath)
df = pd.DataFrame(columns=['ATR'], index=df_quotes.index)
df_true_range = true_range(df_quotes)
for i in range(1+len(df_quotes)-period):
if pd.isnull(df_true_range.iloc[i].true_range): continue
start = i
end = i+period
last_index = end - 1
trs = df_true_range[start:end]
prev_atr = df.iloc[last_index-1].ATR
if pd.isnull(prev_atr):
atr = np.mean([tr for tr in trs.true_range.values])
else:
atr = (prev_atr * (period-1) + df_true_range.iloc[last_index].true_range) / period
df.loc[df_quotes.index.values[last_index], 'ATR'] = atr
if symbol:
if not os.path.exists(outpath.parent):
os.makedirs(outpath.parent)
df.to_pickle(outpath)
return utils.round_df(df)
def atr_channel(df_quotes, top=7, bottom=3, sma=150, symbol=None):
if symbol:
outpath = INDICATORS_OUTPUT_PATH / '{}/{}_atr_channel_{}_{}_{}.pkl'.format(symbol, quotes_range(df_quotes), top, bottom, sma)
if os.path.exists(outpath):
return pd.read_pickle(outpath)
df_top_atr = ATR(df_quotes, period=top, symbol=symbol)
df_bottom_atr = ATR(df_quotes, period=bottom, symbol=symbol)
df_sma = SMA(df_quotes, period=sma, symbol=symbol)
df = pd.DataFrame(columns=['top', 'mid', 'bottom'], index=df_quotes.index)
df['mid'] = df_sma.SMA
df['top'] = df.mid + df_top_atr.ATR
df['bottom'] = df.mid - df_bottom_atr.ATR
df = utils.round_df(df)
if symbol:
if not os.path.exists(outpath.parent):
os.makedirs(outpath.parent)
df.to_pickle(outpath)
return df
def trailing_stops(df_quotes, multiplier=4, period=10, symbol=None):
if symbol:
outpath = INDICATORS_OUTPUT_PATH / '{}/{}_trailing_stops_{}_{}.pkl'.format(symbol, quotes_range(df_quotes), period, multiplier)
if os.path.exists(outpath):
return pd.read_pickle(outpath)
df = pd.DataFrame(columns=['BuyStops', 'SellStops'], index=df_quotes.index)
df_atr = ATR(df_quotes, period=period, symbol=symbol)
sign = -1 # SellStops: -1, BuyStops: 1
for i in range(len(df_quotes)-1):
if pd.isnull(df_atr.iloc[i].ATR): continue
start = i - period
end = i
quotes = df_quotes.iloc[start+1:end+1]
cur_quote = df_quotes.iloc[i]
next_quote = df_quotes.iloc[i + 1]
_atr = df_atr.iloc[i].ATR
# close_price = next_quote.Close
# trend_dir_sign = -1 if close_price > _atr else 1
max_price = quotes.Close.max()
min_price = quotes.Close.min()
sell = max_price + sign * (multiplier * _atr)
buy = min_price + sign * (multiplier * _atr)
sell = [sell, df.iloc[i].SellStops]
buy = [buy, df.iloc[i].BuyStops]
try:
sell = np.max([x for x in sell if not pd.isnull(x)])
buy = np.min([x for x in buy if not pd.isnull(x)])
except:
print(sell)
if sign < 0:
df.set_value(index=df_quotes.index.values[i+1], col='SellStops', value=sell)
if next_quote.Close <= sell:
sign = 1
else:
df.set_value(index=df_quotes.index.values[i+1], col='BuyStops', value=buy)
if next_quote.Close >= buy:
sign = -1
df = utils.round_df(df)
if symbol:
if not os.path.exists(outpath.parent):
os.makedirs(outpath.parent)
df.to_pickle(outpath)
return df
def donchian_channel(df_quotes, high=50, low=50, symbol=None):
if symbol:
outpath = INDICATORS_OUTPUT_PATH / '{}/{}_donchian_channel_{}_{}.pkl'.format(symbol, quotes_range(df_quotes), high, low)
if os.path.exists(outpath):
return pd.read_pickle(outpath)
df = pd.DataFrame(columns=['high', 'mid', 'low'], index=df_quotes.index)
df['high'] = df_quotes.High.rolling(window=high).max()
df['low'] = df_quotes.Low.rolling(window=low).min()
df['mid'] = (df.high + df.low)/2
df = utils.round_df(df)
if symbol:
if not os.path.exists(outpath.parent):
os.makedirs(outpath.parent)
df.to_pickle(outpath)
return df
def MACD(df_quotes, fast=12, slow=26, signal=9, symbol=None):
if symbol:
outpath = INDICATORS_OUTPUT_PATH / '{}/{}_MACD_{}_{}_{}.pkl'.format(symbol, quotes_range(df_quotes), fast, slow, signal)
if os.path.exists(outpath):
return pd.read_pickle(outpath)
df = pd.DataFrame(index=df_quotes.index)
fast_ema = EMA(df_quotes, fast, symbol=symbol)
slow_ema = EMA(df_quotes, slow, symbol=symbol)
df['MACD'] = fast_ema.EMA - slow_ema.EMA
signal_ema = EMA(df, signal, field='MACD', symbol=symbol)
df['Signal'] = signal_ema.EMA
df['MACDCrossoverSignal'] = np.where(np.logical_and(df.MACD > df.Signal, df.MACD.shift(1) <= df.Signal.shift(1)), 1, 0)
df['SignalCrossoverMACD'] = np.where(np.logical_and(df.MACD < df.Signal, df.Signal.shift(1) <= df.MACD.shift(1)), 1, 0)
df = utils.round_df(df)
if symbol:
if not os.path.exists(outpath.parent):
os.makedirs(outpath.parent)
df.to_pickle(outpath)
return df
def SMA_cross(df_quotes, fast=40, slow=60, symbol=None, field='Close'):
if symbol:
outpath = INDICATORS_OUTPUT_PATH / '{}/{}_{}_SMA_cross_{}_{}.pkl'.format(symbol, quotes_range(df_quotes), field, fast, slow)
if os.path.exists(outpath):
return pd.read_pickle(outpath)
df = pd.DataFrame(index=df_quotes.index)
fast_sma = SMA(df_quotes, fast, field=field, symbol=symbol)
slow_sma = SMA(df_quotes, slow, field=field, symbol=symbol)
df['FastSMA'] = fast_sma.SMA
df['SlowSMA'] = slow_sma.SMA
df['SlowCrossoverFast'] = np.where(np.logical_and(df.FastSMA <= df.SlowSMA, df.FastSMA.shift(1) > df.SlowSMA.shift(1)), 1, 0)
df['FastCrossoverSlow'] = np.where(np.logical_and(df.FastSMA >= df.SlowSMA, df.SlowSMA.shift(1) > df.FastSMA.shift(1)), 1, 0)
df = utils.round_df(df)
if symbol:
if not os.path.exists(outpath.parent):
os.makedirs(outpath.parent)
df.to_pickle(outpath)
return df
def SLSMA(df_quotes, s_fast=40, s_slow=60, l_fast=100, l_slow=150, field='Close', symbol=None):
if symbol:
outpath = INDICATORS_OUTPUT_PATH / '{}/{}_{}_SLSMA_cross_{}_{}.pkl'.format(symbol, quotes_range(df_quotes), field, s_fast, s_slow, l_fast, l_slow)
if os.path.exists(outpath):
return pd.read_pickle(outpath)
# For charting...
df = pd.DataFrame(index=df_quotes.index)
s_fast_sma = SMA(df_quotes, s_fast, field=field, symbol=symbol)
s_slow_sma = SMA(df_quotes, s_slow, field=field, symbol=symbol)
l_fast_sma = SMA(df_quotes, l_fast, field=field, symbol=symbol)
l_slow_sma = SMA(df_quotes, l_slow, field=field, symbol=symbol)
df['S_FastSMA'] = s_fast_sma.SMA
df['S_SlowSMA'] = s_slow_sma.SMA
df['L_FastSMA'] = l_fast_sma.SMA
df['L_SlowSMA'] = l_slow_sma.SMA
df = utils.round_df(df)
if symbol:
if not os.path.exists(outpath.parent):
os.makedirs(outpath.parent)
df.to_pickle(outpath)
return df
def volume(df_quotes, period=20):
df = pd.DataFrame(index=df_quotes.index)
ema = EMA(df_quotes, period=period, field='Volume')
df['Volume'] = df_quotes.Volume
df['EMA'] = ema.EMA
df = utils.round_df(df)
return df
def trend_strength_indicator(df_quotes, start=40, end=150, step=5, symbol=None):
if symbol:
outpath = INDICATORS_OUTPUT_PATH / '{}/{}_trend_strength_indicator_{}_{}_{}.pkl'.format(symbol, quotes_range(df_quotes), start, end, step)
if os.path.exists(outpath):
return pd.read_pickle(outpath)
df = pd.DataFrame(index=df_quotes.index)
columns = [x for x in range(start, end, step)]
columns += [end]
for col in columns:
df['SMA{}'.format(col)] = SMA(df_quotes, col, symbol=symbol)
col_size = len(columns)
df_comparison = df.lt(df_quotes.Close, axis=0)
df_comparison['CountSMABelowPrice'] = round(100 * (df_comparison.filter(like='SMA') == True).astype(int).sum(axis=1) / col_size)
df_comparison['CountSMAAbovePrice'] = round(100 * -(df_comparison.filter(like='SMA') == False).astype(int).sum(axis=1) / col_size)
df['TrendStrength'] = df_comparison.CountSMABelowPrice + df_comparison.CountSMAAbovePrice
df = utils.round_df(df)
if symbol:
if not os.path.exists(outpath.parent):
os.makedirs(outpath.parent)
df.to_pickle(outpath)
return df
def bollinger_band(df_quotes, period=60, stdev=1.2, symbol=None):
if symbol:
outpath = INDICATORS_OUTPUT_PATH / '{}/{}_bollinger_band_{}_{}.pkl'.format(symbol, quotes_range(df_quotes), period, stdev)
if os.path.exists(outpath):
return pd.read_pickle(outpath)
df = | pd.DataFrame(index=df_quotes.index) | pandas.DataFrame |
# coding: utf-8
# Plotting script for TREACTMECH files written by <NAME> - hr0392 at bristol.ac.uk
#
# Run the script within the directory containing the flowdata, flowvector, stress strain, displacement files. Output by default is within same directory.
#
# Displacement gives the corner nodes, everything else gives the centre of the cells.
#
#
#
import pandas as pd
import os
import numpy as np
import matplotlib.dates as mdates
import datetime
import matplotlib.pyplot as plt
import matplotlib.colors as colors
from matplotlib.backends.backend_pdf import PdfPages
import sys
from trexoptions import * #import the option file from within the same folder
cwd = os.getcwd()
def flowdata_import():
"""
Imports the flowdata file from current working directory. Column names are largely preserved. Takes in only the last time step values.
Returns a dictionary 'flowfaces' that contains the flowdata for each of the default and user specificed faces.
"""
flowdata=pd.read_csv(cwd+'/flowdata.tec',sep=r"\s*",skiprows=[0],engine='python')
flowdata_modified= flowdata[flowdata.columns[:-1]]
flowdata_modified.columns = flowdata.columns[1:]
flowdata=flowdata_modified.rename(index=str,columns={'"X(m)"':"X", '"Y(m)"':"Y", '"Z(m)"':"Z", '"P(Pa)"':"Pressure(Pa)", '"T(C)"':"Temperature(C)",
'"SatGas"':"SatGas",'"SatLiq"':"SatLiq",'"X1"':"X1", '"X2"':"X2", '"Pcap(Pa)"':"Pcap", '"DGas_kg/m3"':"DGas_kg_m3",
'"DLiq_kg/m3"':"DLiq_kg_m3", '"Porosity"':"Porosity", '"Perm_X(m2)"':"Perm_X(m2)", '"Perm_Y(m2)"':"Perm_Y(m2)",
'"Perm_Z(m2)"':"Perm_Z(m2)", '"Krel_Gas"':"Krel_Gas", '"Krel_Liq"':"Krel_Liq", '"HGas(J/kg)"':"HGas(J_kg)",
'"HLiq(J/kg)"':"HLiq(J_kg)", '"Cp(J/kg/C)"':"Cp(J_kg_C)", '"TC(W/m/C)"':"TC(W_m_C)", '"DBlk_kg/m3"':"DBlk_kg_m3",
'"Tdif(m2/s)"':"Tdif(m2_s)"})
#Last time step - top, bottom, side walls
val=int(flowdata.loc[flowdata["X"] == 'Zone'][-1:].index[0])#value of the last time zone
lastval=int(flowdata.index[-1])
length=lastval - val #length of last time zone
zone=flowdata[val+1:lastval+1]
zone[zone.columns] = zone[zone.columns].apply(pd.to_numeric, errors='ignore', downcast='float')
top,tpval =zone.loc[zone["Z"] == max(zone.Z) ],max(zone.Z) #2D array of the top surface
bot,btval =zone.loc[zone["Z"] == min(zone.Z) ],min(zone.Z)#bottom surface
MaxY,MxYval=zone.loc[zone["Y"] == max(zone.Y) ],max(zone.Y)#MaxY face
MinY,MnYval=zone.loc[zone["Y"] == min(zone.Y) ],min(zone.Y)#MinY face
MaxX,MxXval=zone.loc[zone["X"] == max(zone.X) ],max(zone.X)#MaxX face
MinX,MnXval=zone.loc[zone["X"] == min(zone.X) ],min(zone.X)#MinX face
xsec_x,xsec_x_val=zone.loc[zone["Y"] == zone.Y.unique()[int(len(zone.Y.unique())/2)]],zone.Y.unique()[int(len(zone.Y.unique())/2)]
xsec_y,xsec_y_val=zone.loc[zone["X"] == zone.X.unique()[int(len(zone.X.unique())/2)]],zone.X.unique()[int(len(zone.X.unique())/2)]
xsec_z,xsec_z_val=zone.loc[zone["Z"] == zone.Z.unique()[int(len(zone.Z.unique())/2)]],zone.Z.unique()[int(len(zone.Z.unique())/2)]
flowfaces={'Top':top,'Bot':bot,'Max-Y':MaxY,'Min-Y':MinY,'Max-X':MaxX,'Min-X':MinX,
'tpval' : tpval, 'btval' : btval, 'MxYval' : MxYval, 'MnYval' : MnYval,
'MxXval' : MxXval, 'MnXval' : MnXval,'xsec_x_half':xsec_x,'xsec_x_half_val':xsec_x_val,
'xsec_y_half':xsec_y,'xsec_y_val_half':xsec_y_val}
if op_xsec_X_user == True:
for i in list(range(len(xsec_user_xvals))):
xsec_x_user,xsec_x_user_val=zone.loc[zone["Y"] == zone.Y.unique()[xsec_user_xvals[i]]],zone.Y.unique()[xsec_user_xvals[i]]
flowfaces.update({'xsec_x_user_'+str(xsec_user_xvals[i]):xsec_x_user,'xsec_x_user_val'+str(xsec_user_xvals[i]):xsec_x_user_val})
if op_xsec_Y_user == True:
for i in list(range(len(xsec_user_yvals))):
xsec_y_user,xsec_y_user_val=zone.loc[zone["X"] == zone.X.unique()[xsec_user_yvals[i]]],zone.X.unique()[xsec_user_yvals[i]]
flowfaces.update({'xsec_y_user_'+str(xsec_user_yvals[i]):xsec_y_user,'xsec_y_user_val'+str(xsec_user_yvals[i]):xsec_y_user_val})
if op_xsec_Z_user == True:
for i in list(range(len(xsec_user_zvals))):
xsec_z_user,xsec_z_user_val=zone.loc[zone["Z"] == zone.Z.unique()[xsec_user_zvals[i]]],zone.Z.unique()[xsec_user_zvals[i]]
flowfaces.update({'xsec_z_user_'+str(xsec_user_zvals[i]):xsec_z_user,'xsec_z_user_val'+str(xsec_user_zvals[i]):xsec_z_user_val})
return flowfaces
def flowvector_import():
"""
Imports the flowvector file from current working directory. Column names are largely preserved. Takes in only the last time step values.
Returns a dictionary 'vecfaces' that contains the vector data for each of the default and user specificed faces.
"""
flowvector=pd.read_csv(cwd+'/flowvector.tec',sep=r"\s*",skiprows=[0],engine='python')
flowvector_modified= flowvector[flowvector.columns[:-1]]
flowvector_modified.columns = flowvector.columns[1:]
flowvector=flowvector_modified.rename(index=str,columns={'"X(m)"':"X", '"Y(m)"':"Y",'"Z(m)"':"Z",
'"FluxLiq"':"FluxLiq", '"FluxLiq_X"':"FluxLiq_X",'"FluxLiq_Y"':"FluxLiq_Y", '"FluxLiq_Z"':"FluxLiq_Z",
'"PorVelLiq"':"PorVelLiq", '"PorVelLiqX"':"PorVelLiqX",'"PorVelLiqY"':"PorVelLiqY", '"PorVelLiqZ"':"PorVelLiqZ",
'"FluxGas"':"FluxGas",'"FluxGas_X"':"FluxGas_X",'"FluxGas_Y"':"FluxGas_Y", '"FluxGas_Z"':"FluxGas_Z",
'"PorVelGas"':"PorVelGas",'"PorVelGasX"':"PorVelGasX",'"PorVelGasY"':"PorVelGasY", '"PorVelGasZ"':"PorVelGasZ",
'"HeatFlux"':"HeatFlux", '"HeatFlux_X"':"HeatFlux_X",'"HeatFlux_Y"':"HeatFlux_Y", '"HeatFlux_Z"':"HeatFlux_Z"})
val=int(flowvector.loc[flowvector["X"] == 'Zone'][-1:].index[0])
lastval=int(flowvector.index[-1])
length=lastval - val
zone=flowvector[val+1:lastval+1]
zone[zone.columns] = zone[zone.columns].apply(pd.to_numeric, errors='ignore', downcast='float')
top,tpval =zone.loc[zone["Z"] == max(zone.Z) ],max(zone.Z)
bot,btval =zone.loc[zone["Z"] == min(zone.Z) ],min(zone.Z)
MaxY,MxYval=zone.loc[zone["Y"] == max(zone.Y) ],max(zone.Y)
MinY,MnYval=zone.loc[zone["Y"] == min(zone.Y) ],min(zone.Y)
MaxX,MxXval=zone.loc[zone["X"] == max(zone.X) ],max(zone.X)
MinX,MnXval=zone.loc[zone["X"] == min(zone.X) ],min(zone.X)
xsec_x,xsec_x_val=zone.loc[zone["Y"] == zone.Y.unique()[int(len(zone.Y.unique())/2)]],zone.Y.unique()[int(len(zone.Y.unique())/2)]
xsec_y,xsec_y_val=zone.loc[zone["X"] == zone.X.unique()[int(len(zone.X.unique())/2)]],zone.X.unique()[int(len(zone.X.unique())/2)]
xsec_z,xsec_z_val=zone.loc[zone["Z"] == zone.Z.unique()[int(len(zone.Z.unique())/2)]],zone.Z.unique()[int(len(zone.Z.unique())/2)]
vecfaces={'Top':top,'Bot':bot,'Max-Y':MaxY,'Min-Y':MinY,'Max-X':MaxX,'Min-X':MinX,
'tpval' : tpval, 'btval' : btval, 'MxYval' : MxYval, 'MnYval' : MnYval,
'MxXval' : MxXval, 'MnXval' : MnXval,'xsec_x_half':xsec_x,'xsec_x_half_val':xsec_x_val,
'xsec_y_half':xsec_y,'xsec_y_val_half':xsec_y_val}
if op_xsec_X_user == True:
for i in list(range(len(xsec_user_xvals))):
xsec_x_user,xsec_x_user_val=zone.loc[zone["Y"] == zone.Y.unique()[xsec_user_xvals[i]]],zone.Y.unique()[xsec_user_xvals[i]]
vecfaces.update({'xsec_x_user_'+str(xsec_user_xvals[i]):xsec_x_user,'xsec_x_user_val'+str(xsec_user_xvals[i]):xsec_x_user_val})
if op_xsec_Y_user == True:
for i in list(range(len(xsec_user_yvals))):
xsec_y_user,xsec_y_user_val=zone.loc[zone["X"] == zone.X.unique()[xsec_user_yvals[i]]],zone.X.unique()[xsec_user_yvals[i]]
vecfaces.update({'xsec_y_user_'+str(xsec_user_yvals[i]):xsec_y_user,'xsec_y_user_val'+str(xsec_user_yvals[i]):xsec_y_user_val})
if op_xsec_Z_user == True:
for i in list(range(len(xsec_user_zvals))):
xsec_z_user,xsec_z_user_val=zone.loc[zone["Z"] == zone.Z.unique()[xsec_user_zvals[i]]],zone.Z.unique()[xsec_user_zvals[i]]
vecfaces.update({'xsec_z_user_'+str(xsec_user_zvals[i]):xsec_z_user,'xsec_z_user_val'+str(xsec_user_zvals[i]):xsec_z_user_val})
return vecfaces
def displace_import():
"""
Imports the displacement file from current working directory. Column names are largely preserved. Takes in only the last time step values.
Returns a dictionary 'dispfaces' that contains the vector data for each of the default and user specificed faces.
Note I added one to xsec user and half values as you get an extra datapoint for displacement output files.
"""
column_names=["X","Y","Z","Disp_x","Disp_y","Disp_z"]
displace=pd.read_csv(cwd+'/displacement.tec',sep=r"\s+",skiprows=[0,1],usecols=[0,1,2,3,4,5],
names=column_names,engine='python')
val=int(displace.loc[displace["X"] == 'Zone'][-1:].index[0])
lastval=int(displace.index[-1])
length=lastval - val
zone=displace[val+1:lastval+1]
zone[zone.columns] = zone[zone.columns].apply(pd.to_numeric, errors='ignore', downcast='float')
top,tpval =zone.loc[zone["Z"] == max(zone.Z) ],max(zone.Z)
bot,btval =zone.loc[zone["Z"] == min(zone.Z) ],min(zone.Z)
MaxY,MxYval=zone.loc[zone["Y"] == max(zone.Y) ],max(zone.Y)
MinY,MnYval=zone.loc[zone["Y"] == min(zone.Y) ],min(zone.Y)
MaxX,MxXval=zone.loc[zone["X"] == max(zone.X) ],max(zone.X)
MinX,MnXval=zone.loc[zone["X"] == min(zone.X) ],min(zone.X)
xsec_x,xsec_x_val=zone.loc[zone["Y"] == zone.Y.unique()[int(len(zone.Y.unique())/2)+1]],zone.Y.unique()[int(len(zone.Y.unique())/2)+1]
xsec_y,xsec_y_val=zone.loc[zone["X"] == zone.X.unique()[int(len(zone.X.unique())/2)+1]],zone.X.unique()[int(len(zone.X.unique())/2)+1]
xsec_z,xsec_z_val=zone.loc[zone["Z"] == zone.Z.unique()[int(len(zone.Z.unique())/2)+1]],zone.Z.unique()[int(len(zone.Z.unique())/2)+1]
dispfaces={'Top':top,'Bot':bot,'Max-Y':MaxY,'Min-Y':MinY,'Max-X':MaxX,'Min-X':MinX,
'tpval' : tpval, 'btval' : btval, 'MxYval' : MxYval, 'MnYval' : MnYval,
'MxXval' : MxXval, 'MnXval' : MnXval,'xsec_x_half':xsec_x,'xsec_x_half_val':xsec_x_val,
'xsec_y_half':xsec_y,'xsec_y_val_half':xsec_y_val}
if op_xsec_X_user == True: #added one to xsec half values as you get an extra datapoint
for i in list(range(len(xsec_user_xvals))):
xsec_x_user,xsec_x_user_val=zone.loc[zone["Y"] == zone.Y.unique()[xsec_user_xvals[i]]],zone.Y.unique()[xsec_user_xvals[i]+1]
dispfaces.update({'xsec_x_user_'+str(xsec_user_xvals[i]):xsec_x_user,'xsec_x_user_val'+str(xsec_user_xvals[i]):xsec_x_user_val})
if op_xsec_Y_user == True:
for i in list(range(len(xsec_user_yvals))):
xsec_y_user,xsec_y_user_val=zone.loc[zone["X"] == zone.X.unique()[xsec_user_yvals[i]]],zone.X.unique()[xsec_user_yvals[i]+1]
dispfaces.update({'xsec_y_user_'+str(xsec_user_yvals[i]):xsec_y_user,'xsec_y_user_val'+str(xsec_user_yvals[i]):xsec_y_user_val})
if op_xsec_Z_user == True:
for i in list(range(len(xsec_user_zvals))):
xsec_z_user,xsec_z_user_val=zone.loc[zone["Z"] == zone.Z.unique()[xsec_user_zvals[i]]],zone.Z.unique()[xsec_user_zvals[i]]
dispfaces.update({'xsec_z_user_'+str(xsec_user_zvals[i]):xsec_z_user,'xsec_z_user_val'+str(xsec_user_zvals[i]):xsec_z_user_val})
return dispfaces
def aq_conc_import():
"""
Imports the aq_conc file
"""
aqconcdata=pd.read_csv(cwd+'/aqconc.tec',sep=r"\s*",skiprows=[0],engine='python')
aqconcdata_modified= aqconcdata[aqconcdata.columns[:-1]]
aqconcdata_modified.columns = aqconcdata.columns[1:]
aqconcdata=aqconcdata_modified.rename(index=str,columns=aqconc_name)
#Last time step - top, bottom, side walls
val=int(aqconcdata.loc[aqconcdata["X"] == 'Zone'][-1:].index[0])#value of the last time zone
lastval=int(aqconcdata.index[-1])
length=lastval - val #length of last time zone
zone=aqconcdata[val+1:lastval+1]
zone[zone.columns] = zone[zone.columns].apply(pd.to_numeric, errors='ignore', downcast='float')
top,tpval =zone.loc[zone["Z"] == max(zone.Z) ],max(zone.Z) #2D array of the top surface
bot,btval =zone.loc[zone["Z"] == min(zone.Z) ],min(zone.Z)#bottom surface
MaxY,MxYval=zone.loc[zone["Y"] == max(zone.Y) ],max(zone.Y)#MaxY face
MinY,MnYval=zone.loc[zone["Y"] == min(zone.Y) ],min(zone.Y)#MinY face
MaxX,MxXval=zone.loc[zone["X"] == max(zone.X) ],max(zone.X)#MaxX face
MinX,MnXval=zone.loc[zone["X"] == min(zone.X) ],min(zone.X)#MinX face
xsec_x,xsec_x_val=zone.loc[zone["Y"] == zone.Y.unique()[int(len(zone.Y.unique())/2)]],zone.Y.unique()[int(len(zone.Y.unique())/2)]
xsec_y,xsec_y_val=zone.loc[zone["X"] == zone.X.unique()[int(len(zone.X.unique())/2)]],zone.X.unique()[int(len(zone.X.unique())/2)]
xsec_z,xsec_z_val=zone.loc[zone["Z"] == zone.Z.unique()[int(len(zone.Z.unique())/2)]],zone.Z.unique()[int(len(zone.Z.unique())/2)]
aqconcfaces={'Top':top,'Bot':bot,'Max-Y':MaxY,'Min-Y':MinY,'Max-X':MaxX,'Min-X':MinX,
'tpval' : tpval, 'btval' : btval, 'MxYval' : MxYval, 'MnYval' : MnYval,
'MxXval' : MxXval, 'MnXval' : MnXval,'xsec_x_half':xsec_x,'xsec_x_half_val':xsec_x_val,
'xsec_y_half':xsec_y,'xsec_y_val_half':xsec_y_val}
if op_xsec_X_user == True:
for i in list(range(len(xsec_user_xvals))):
xsec_x_user,xsec_x_user_val=zone.loc[zone["Y"] == zone.Y.unique()[xsec_user_xvals[i]]],zone.Y.unique()[xsec_user_xvals[i]]
aqconcfaces.update({'xsec_x_user_'+str(xsec_user_xvals[i]):xsec_x_user,'xsec_x_user_val'+str(xsec_user_xvals[i]):xsec_x_user_val})
if op_xsec_Y_user == True:
for i in list(range(len(xsec_user_yvals))):
xsec_y_user,xsec_y_user_val=zone.loc[zone["X"] == zone.X.unique()[xsec_user_yvals[i]]],zone.X.unique()[xsec_user_yvals[i]]
aqconcfaces.update({'xsec_y_user_'+str(xsec_user_yvals[i]):xsec_y_user,'xsec_y_user_val'+str(xsec_user_yvals[i]):xsec_y_user_val})
if op_xsec_Z_user == True:
for i in list(range(len(xsec_user_zvals))):
xsec_z_user,xsec_z_user_val=zone.loc[zone["Z"] == zone.Z.unique()[xsec_user_zvals[i]]],zone.Z.unique()[xsec_user_zvals[i]]
aqconcfaces.update({'xsec_z_user_'+str(xsec_user_zvals[i]):xsec_z_user,'xsec_z_user_val'+str(xsec_user_zvals[i]):xsec_z_user_val})
return aqconcfaces
def gas_volfrac_import():
"""
Imports the gas_volfrac file
Why? See https://stackoverflow.com/questions/18039057/python-pandas-error-tokenizing-data
I need the 'bad lines' - specifically the time-step lines which are 11 values long and wanted to re-use other code that reads in those
lines for simplicity sake. Theres probably a much cleaner pandas import rule that could have been applied to all file Imports
that could be worked up fairly quickly... All the importing functions Im sure could be better/cleaner but y'know....
BE CAREFUL and make sure you wipe the written-in header in the .tec file if you go chaning stuff in here. The function will write a new header if the
gas_volfrac_name dictionary changes at all. Also because I'm writing inplace for simplicity I make a backup .tec file by default for caution
"""
with open(cwd+'/gas_volfrac.tec', 'r') as original: data = original.read()
header=str([i for i in gas_volfrac_name.values()]).strip('[]').replace(',','')
print (header)
print (data[0:len(header)])
if data[0:len(header)]!=header:
with open(cwd+'/gas_volfrac.tec', 'w') as modified: modified.write(header + "\n" + data)
gas_volfracdata=pd.read_csv(cwd+'/gas_volfrac.tec',sep=r"\s*",skiprows=[2],engine='python')
gas_volfracdata=gas_volfracdata.rename(columns=gas_volfrac_name) #fit the column name values with the dictionary
#Last time step - top, bottom, side walls
val=int(gas_volfracdata.loc[gas_volfracdata["X"] == 'Zone'][-1:].index[0])#value of the last time zone
lastval=int(gas_volfracdata.index[-1])
length=lastval - val #length of last time zone
zone=gas_volfracdata[val+1:lastval+1]
zone[zone.columns] = zone[zone.columns].apply(pd.to_numeric, errors='ignore', downcast='float')
top,tpval =zone.loc[zone["Z"] == max(zone.Z) ],max(zone.Z) #2D array of the top surface
bot,btval =zone.loc[zone["Z"] == min(zone.Z) ],min(zone.Z)#bottom surface
MaxY,MxYval=zone.loc[zone["Y"] == max(zone.Y) ],max(zone.Y)#MaxY face
MinY,MnYval=zone.loc[zone["Y"] == min(zone.Y) ],min(zone.Y)#MinY face
MaxX,MxXval=zone.loc[zone["X"] == max(zone.X) ],max(zone.X)#MaxX face
MinX,MnXval=zone.loc[zone["X"] == min(zone.X) ],min(zone.X)#MinX face
xsec_x,xsec_x_val=zone.loc[zone["Y"] == zone.Y.unique()[int(len(zone.Y.unique())/2)]],zone.Y.unique()[int(len(zone.Y.unique())/2)]
xsec_y,xsec_y_val=zone.loc[zone["X"] == zone.X.unique()[int(len(zone.X.unique())/2)]],zone.X.unique()[int(len(zone.X.unique())/2)]
xsec_z,xsec_z_val=zone.loc[zone["Z"] == zone.Z.unique()[int(len(zone.Z.unique())/2)]],zone.Z.unique()[int(len(zone.Z.unique())/2)]
gas_volfracfaces={'Top':top,'Bot':bot,'Max-Y':MaxY,'Min-Y':MinY,'Max-X':MaxX,'Min-X':MinX,
'tpval' : tpval, 'btval' : btval, 'MxYval' : MxYval, 'MnYval' : MnYval,
'MxXval' : MxXval, 'MnXval' : MnXval,'xsec_x_half':xsec_x,'xsec_x_half_val':xsec_x_val,
'xsec_y_half':xsec_y,'xsec_y_val_half':xsec_y_val}
if op_xsec_X_user == True:
for i in list(range(len(xsec_user_xvals))):
xsec_x_user,xsec_x_user_val=zone.loc[zone["Y"] == zone.Y.unique()[xsec_user_xvals[i]]],zone.Y.unique()[xsec_user_xvals[i]]
gas_volfracfaces.update({'xsec_x_user_'+str(xsec_user_xvals[i]):xsec_x_user,'xsec_x_user_val'+str(xsec_user_xvals[i]):xsec_x_user_val})
if op_xsec_Y_user == True:
for i in list(range(len(xsec_user_yvals))):
xsec_y_user,xsec_y_user_val=zone.loc[zone["X"] == zone.X.unique()[xsec_user_yvals[i]]],zone.X.unique()[xsec_user_yvals[i]]
gas_volfracfaces.update({'xsec_y_user_'+str(xsec_user_yvals[i]):xsec_y_user,'xsec_y_user_val'+str(xsec_user_yvals[i]):xsec_y_user_val})
if op_xsec_Z_user == True:
for i in list(range(len(xsec_user_zvals))):
xsec_z_user,xsec_z_user_val=zone.loc[zone["Z"] == zone.Z.unique()[xsec_user_zvals[i]]],zone.Z.unique()[xsec_user_zvals[i]]
gas_volfracfaces.update({'xsec_z_user_'+str(xsec_user_zvals[i]):xsec_z_user,'xsec_z_user_val'+str(xsec_user_zvals[i]):xsec_z_user_val})
return gas_volfracfaces
def mineral_ab_import():
"""
Imports the mineral.tec file - mineral Abundances
"""
mineral_ab_data=pd.read_csv(cwd+'/mineral.tec',sep=r"\s*",skiprows=[0],engine='python')
mineral_ab_data_modified= mineral_ab_data[mineral_ab_data.columns[:-1]]
mineral_ab_data_modified.columns = mineral_ab_data.columns[1:]
mineral_ab_data=mineral_ab_data_modified.rename(index=str,columns=min_ab_name)
#Last time step - top, bottom, side walls
val=int(mineral_ab_data.loc[mineral_ab_data["X"] == 'Zone'][-1:].index[0])#value of the last time zone
lastval=int(mineral_ab_data.index[-1])
length=lastval - val #length of last time zone
zone=mineral_ab_data[val+1:lastval+1]
zone[zone.columns] = zone[zone.columns].apply(pd.to_numeric, errors='ignore', downcast='float')
top,tpval =zone.loc[zone["Z"] == max(zone.Z) ],max(zone.Z) #2D array of the top surface
bot,btval =zone.loc[zone["Z"] == min(zone.Z) ],min(zone.Z)#bottom surface
MaxY,MxYval=zone.loc[zone["Y"] == max(zone.Y) ],max(zone.Y)#MaxY face
MinY,MnYval=zone.loc[zone["Y"] == min(zone.Y) ],min(zone.Y)#MinY face
MaxX,MxXval=zone.loc[zone["X"] == max(zone.X) ],max(zone.X)#MaxX face
MinX,MnXval=zone.loc[zone["X"] == min(zone.X) ],min(zone.X)#MinX face
xsec_x,xsec_x_val=zone.loc[zone["Y"] == zone.Y.unique()[int(len(zone.Y.unique())/2)]],zone.Y.unique()[int(len(zone.Y.unique())/2)]
xsec_y,xsec_y_val=zone.loc[zone["X"] == zone.X.unique()[int(len(zone.X.unique())/2)]],zone.X.unique()[int(len(zone.X.unique())/2)]
xsec_z,xsec_z_val=zone.loc[zone["Z"] == zone.Z.unique()[int(len(zone.Z.unique())/2)]],zone.Z.unique()[int(len(zone.Z.unique())/2)]
mineral_ab_faces={'Top':top,'Bot':bot,'Max-Y':MaxY,'Min-Y':MinY,'Max-X':MaxX,'Min-X':MinX,
'tpval' : tpval, 'btval' : btval, 'MxYval' : MxYval, 'MnYval' : MnYval,
'MxXval' : MxXval, 'MnXval' : MnXval,'xsec_x_half':xsec_x,'xsec_x_half_val':xsec_x_val,
'xsec_y_half':xsec_y,'xsec_y_val_half':xsec_y_val}
if op_xsec_X_user == True:
for i in list(range(len(xsec_user_xvals))):
xsec_x_user,xsec_x_user_val=zone.loc[zone["Y"] == zone.Y.unique()[xsec_user_xvals[i]]],zone.Y.unique()[xsec_user_xvals[i]]
mineral_ab_faces.update({'xsec_x_user_'+str(xsec_user_xvals[i]):xsec_x_user,'xsec_x_user_val'+str(xsec_user_xvals[i]):xsec_x_user_val})
if op_xsec_Y_user == True:
for i in list(range(len(xsec_user_yvals))):
xsec_y_user,xsec_y_user_val=zone.loc[zone["X"] == zone.X.unique()[xsec_user_yvals[i]]],zone.X.unique()[xsec_user_yvals[i]]
mineral_ab_faces.update({'xsec_y_user_'+str(xsec_user_yvals[i]):xsec_y_user,'xsec_y_user_val'+str(xsec_user_yvals[i]):xsec_y_user_val})
if op_xsec_Z_user == True:
for i in list(range(len(xsec_user_zvals))):
xsec_z_user,xsec_z_user_val=zone.loc[zone["Z"] == zone.Z.unique()[xsec_user_zvals[i]]],zone.Z.unique()[xsec_user_zvals[i]]
mineral_ab_faces.update({'xsec_z_user_'+str(xsec_user_zvals[i]):xsec_z_user,'xsec_z_user_val'+str(xsec_user_zvals[i]):xsec_z_user_val})
return mineral_ab_faces
def mineral_si_import():
"""
Imports the min_SI.tec file - mineral saturation index
"""
mineral_si_data=pd.read_csv(cwd+'/mineral.tec',sep=r"\s*",skiprows=[0],engine='python')
mineral_si_data_modified= mineral_si_data[mineral_si_data.columns[:-1]]
mineral_si_data_modified.columns = mineral_si_data.columns[1:]
mineral_si_data=mineral_si_data_modified.rename(index=str,columns=min_si_name)
#Last time step - top, bottom, side walls
val=int(mineral_si_data.loc[mineral_si_data["X"] == 'Zone'][-1:].index[0])#value of the last time zone
lastval=int(mineral_si_data.index[-1])
length=lastval - val #length of last time zone
zone=mineral_si_data[val+1:lastval+1]
zone[zone.columns] = zone[zone.columns].apply(pd.to_numeric, errors='ignore', downcast='float')
top,tpval =zone.loc[zone["Z"] == max(zone.Z) ],max(zone.Z) #2D array of the top surface
bot,btval =zone.loc[zone["Z"] == min(zone.Z) ],min(zone.Z)#bottom surface
MaxY,MxYval=zone.loc[zone["Y"] == max(zone.Y) ],max(zone.Y)#MaxY face
MinY,MnYval=zone.loc[zone["Y"] == min(zone.Y) ],min(zone.Y)#MinY face
MaxX,MxXval=zone.loc[zone["X"] == max(zone.X) ],max(zone.X)#MaxX face
MinX,MnXval=zone.loc[zone["X"] == min(zone.X) ],min(zone.X)#MinX face
xsec_x,xsec_x_val=zone.loc[zone["Y"] == zone.Y.unique()[int(len(zone.Y.unique())/2)]],zone.Y.unique()[int(len(zone.Y.unique())/2)]
xsec_y,xsec_y_val=zone.loc[zone["X"] == zone.X.unique()[int(len(zone.X.unique())/2)]],zone.X.unique()[int(len(zone.X.unique())/2)]
xsec_z,xsec_z_val=zone.loc[zone["Z"] == zone.Z.unique()[int(len(zone.Z.unique())/2)]],zone.Z.unique()[int(len(zone.Z.unique())/2)]
mineral_si_faces={'Top':top,'Bot':bot,'Max-Y':MaxY,'Min-Y':MinY,'Max-X':MaxX,'Min-X':MinX,
'tpval' : tpval, 'btval' : btval, 'MxYval' : MxYval, 'MnYval' : MnYval,
'MxXval' : MxXval, 'MnXval' : MnXval,'xsec_x_half':xsec_x,'xsec_x_half_val':xsec_x_val,
'xsec_y_half':xsec_y,'xsec_y_val_half':xsec_y_val}
if op_xsec_X_user == True:
for i in list(range(len(xsec_user_xvals))):
xsec_x_user,xsec_x_user_val=zone.loc[zone["Y"] == zone.Y.unique()[xsec_user_xvals[i]]],zone.Y.unique()[xsec_user_xvals[i]]
mineral_si_faces.update({'xsec_x_user_'+str(xsec_user_xvals[i]):xsec_x_user,'xsec_x_user_val'+str(xsec_user_xvals[i]):xsec_x_user_val})
if op_xsec_Y_user == True:
for i in list(range(len(xsec_user_yvals))):
xsec_y_user,xsec_y_user_val=zone.loc[zone["X"] == zone.X.unique()[xsec_user_yvals[i]]],zone.X.unique()[xsec_user_yvals[i]]
mineral_si_faces.update({'xsec_y_user_'+str(xsec_user_yvals[i]):xsec_y_user,'xsec_y_user_val'+str(xsec_user_yvals[i]):xsec_y_user_val})
if op_xsec_Z_user == True:
for i in list(range(len(xsec_user_zvals))):
xsec_z_user,xsec_z_user_val=zone.loc[zone["Z"] == zone.Z.unique()[xsec_user_zvals[i]]],zone.Z.unique()[xsec_user_zvals[i]]
mineral_si_faces.update({'xsec_z_user_'+str(xsec_user_zvals[i]):xsec_z_user,'xsec_z_user_val'+str(xsec_user_zvals[i]):xsec_z_user_val})
return mineral_si_faces
def stress_strain_import():
"""
Imports the stress-strain file from current working directory. Column names are largely preserved. Takes in only the last time step values.
Returns a dictionary 'stressfaces' that contains the stress_strain data for each of the default and user specificed faces.
"""
column_names=["X","Y","Z","Sigma_xx","Sigma_yy","Sigma_zz","Sigma_yz","Sigma_xz","Sigma_xy",
"Strain_xx","Strain_yy","Strain_zz","Strain_yz", "Strain_xz", "Strain_xy","Vol_Strain",
"E_fail_xx", "E_fail_yy", "E_fail_zz","E_fail_yz2","E_fail_xz2","E_fail_xy2","E_fail_vol"]
stress= | pd.read_csv(cwd+'/stress_strain.tec',sep=r"\s+",skiprows=[1],names=column_names,engine='python') | pandas.read_csv |
import io
import unittest.mock
import freezegun
import numpy as np
import pandas as pd
import pytest
import functions
def test_git_pull0():
with unittest.mock.patch('git.Repo') as mock_git_repo:
status = functions.git_pull('./.')
mock_git_repo.assert_called_once_with('./.')
assert status is True
def test_git_pull1():
status = functions.git_pull('./.')
assert status is False
def test_read_users0_file_not_found():
with unittest.mock.patch('os.path.isfile', return_value=False) as mocked_os:
with pytest.raises(Exception) as exc:
users = functions.read_users(users_file='test_file')
next(users)
mocked_os.assert_called_once_with('test_file')
assert 'test_file not found!' in str(exc.value)
def test_read_users1():
expected_users_list = ['aaa', 'bbb', 'ccc']
with unittest.mock.patch('os.path.isfile', return_value=True) as mocked_os:
with unittest.mock.patch('builtins.open', unittest.mock.mock_open(read_data='aaa\nbbb\nccc')) as mocked_open:
gen_users_list = functions.read_users(users_file='test_file')
users_list = [user for user in gen_users_list]
mocked_os.assert_called_once_with('test_file')
mocked_open.assert_called_once_with('test_file', 'r')
assert users_list == expected_users_list
def test_format_dataframe():
file_content = """1,1111111111111,xxxx,0.60,20"""
data_df = pd.read_csv(io.StringIO(file_content), header=None)
data_df.columns = ['nr', 'code', 'desc', 'price', 'stock']
types = {'nr': int, 'code': str, 'desc': str, 'price': float, 'stock': int}
data_df = functions.format_dataframe(data_df=data_df, types=types)
assert data_df['nr'].dtype == np.int64
assert data_df['code'].dtype == np.object
assert data_df['price'].dtype == np.float64
def test_read_csv_file0_file_not_found():
with unittest.mock.patch('os.path.isfile', return_value=False) as mocked_os:
with pytest.raises(Exception) as exc:
functions.read_csv_file(file='test_file', columns=[],
column_types={})
mocked_os.assert_called_once_with('test_file')
assert 'test_file not found!' in str(exc.value)
def test_read_csv_file1():
file_content = """1,1111111111111,xxxx,0.60,20\n2,2222222222222,yyyy,0.80,10"""
df_from_file = pd.read_csv(io.StringIO(file_content), header=None)
columns = ['nr', 'code', 'desc', 'price', 'stock']
expected_df = pd.DataFrame([[1, '1111111111111', 'xxxx', 0.60, 20],
[2, '2222222222222', 'yyyy', 0.80, 10]],
columns=columns)
with unittest.mock.patch('os.path.isfile', return_value=True) as mocked_os:
with unittest.mock.patch('pandas.read_csv', return_value=df_from_file):
prod_df = functions.read_csv_file(file='test_file',
columns=columns,
column_types={'code': str, 'price': float})
mocked_os.assert_called_once_with('test_file')
pd.testing.assert_frame_equal(prod_df, expected_df)
def test_csv_file2_empty_file():
# TODO: write unit test if file is empty
pass
@pytest.mark.parametrize(['input_df', 'expected_set'],
[(pd.DataFrame(['1', '22', '', '4444'], columns=['code']), {1, 2, 4}),
( | pd.DataFrame(['1', '22', '', '4444', '0000'], columns=['code']) | pandas.DataFrame |
"""Tools for generating and forecasting with ensembles of models."""
import datetime
import numpy as np
import pandas as pd
import json
from autots.models.base import PredictionObject
def BestNEnsemble(
ensemble_params,
forecasts_list,
forecasts,
lower_forecasts,
upper_forecasts,
forecasts_runtime,
prediction_interval,
):
"""Generate mean forecast for ensemble of models."""
# id_list = list(ensemble_params['models'].keys())
# does it handle missing models well?
# model_indexes = [x for x in forecasts.keys() if x in id_list]
model_count = len(forecasts.keys())
if model_count < 1:
raise ValueError("BestN failed, no component models available.")
sample_df = next(iter(forecasts.values()))
columnz = sample_df.columns
indices = sample_df.index
ens_df = pd.DataFrame(0, index=indices, columns=columnz)
for idx, x in forecasts.items():
ens_df = ens_df + x
ens_df = ens_df / model_count
ens_df_lower = pd.DataFrame(0, index=indices, columns=columnz)
for idx, x in lower_forecasts.items():
ens_df_lower = ens_df_lower + x
ens_df_lower = ens_df_lower / model_count
ens_df_upper = pd.DataFrame(0, index=indices, columns=columnz)
for idx, x in upper_forecasts.items():
ens_df_upper = ens_df_upper + x
ens_df_upper = ens_df_upper / model_count
ens_runtime = datetime.timedelta(0)
for x in forecasts_runtime.values():
ens_runtime = ens_runtime + x
ens_result = PredictionObject(
model_name="Ensemble",
forecast_length=len(ens_df.index),
forecast_index=ens_df.index,
forecast_columns=ens_df.columns,
lower_forecast=ens_df_lower,
forecast=ens_df,
upper_forecast=ens_df_upper,
prediction_interval=prediction_interval,
predict_runtime=datetime.timedelta(0),
fit_runtime=ens_runtime,
model_parameters=ensemble_params,
)
return ens_result
def DistEnsemble(
ensemble_params,
forecasts_list,
forecasts,
lower_forecasts,
upper_forecasts,
forecasts_runtime,
prediction_interval,
):
"""Generate forecast for distance ensemble."""
# handle that the inputs are now dictionaries
forecasts = list(forecasts.values())
lower_forecasts = list(lower_forecasts.values())
upper_forecasts = list(upper_forecasts.values())
forecasts_runtime = list(forecasts_runtime.values())
first_model_index = forecasts_list.index(ensemble_params['FirstModel'])
second_model_index = forecasts_list.index(ensemble_params['SecondModel'])
forecast_length = forecasts[0].shape[0]
dis_frac = ensemble_params['dis_frac']
first_bit = int(np.ceil(forecast_length * dis_frac))
second_bit = int(np.floor(forecast_length * (1 - dis_frac)))
ens_df = (
forecasts[first_model_index]
.head(first_bit)
.append(forecasts[second_model_index].tail(second_bit))
)
ens_df_lower = (
lower_forecasts[first_model_index]
.head(first_bit)
.append(lower_forecasts[second_model_index].tail(second_bit))
)
ens_df_upper = (
upper_forecasts[first_model_index]
.head(first_bit)
.append(upper_forecasts[second_model_index].tail(second_bit))
)
id_list = list(ensemble_params['models'].keys())
model_indexes = [idx for idx, x in enumerate(forecasts_list) if x in id_list]
ens_runtime = datetime.timedelta(0)
for idx, x in enumerate(forecasts_runtime):
if idx in model_indexes:
ens_runtime = ens_runtime + forecasts_runtime[idx]
ens_result_obj = PredictionObject(
model_name="Ensemble",
forecast_length=len(ens_df.index),
forecast_index=ens_df.index,
forecast_columns=ens_df.columns,
lower_forecast=ens_df_lower,
forecast=ens_df,
upper_forecast=ens_df_upper,
prediction_interval=prediction_interval,
predict_runtime=datetime.timedelta(0),
fit_runtime=ens_runtime,
model_parameters=ensemble_params,
)
return ens_result_obj
def summarize_series(df):
"""Summarize time series data. For now just df.describe()."""
df_sum = df.describe(percentiles=[0.1, 0.25, 0.5, 0.75, 0.9])
return df_sum
def horizontal_classifier(df_train, known: dict, method: str = "whatever"):
"""
CLassify unknown series with the appropriate model for horizontal ensembling.
Args:
df_train (pandas.DataFrame): historical data about the series. Columns = series_ids.
known (dict): dict of series_id: classifier outcome including some but not all series in df_train.
Returns:
dict.
"""
# known = {'EXUSEU': 'xx1', 'MCOILWTICO': 'xx2', 'CSUSHPISA': 'xx3'}
columnz = df_train.columns.tolist()
X = summarize_series(df_train).transpose()
known_l = list(known.keys())
unknown = list(set(columnz) - set(known_l))
Xt = X.loc[known_l]
Xf = X.loc[unknown]
Y = np.array(list(known.values()))
from sklearn.naive_bayes import GaussianNB
clf = GaussianNB()
clf.fit(Xt, Y)
result = clf.predict(Xf)
result_d = dict(zip(Xf.index.tolist(), result))
# since this only has estimates, overwrite with known that includes more
final = {**result_d, **known}
# temp = pd.DataFrame({'series': list(final.keys()), 'model': list(final.values())})
# temp2 = temp.merge(X, left_on='series', right_index=True)
return final
def generalize_horizontal(
df_train, known_matches: dict, available_models: list, full_models: list = None
):
"""generalize a horizontal model trained on a subset of all series
Args:
df_train (pd.DataFrame): time series data
known_matches (dict): series:model dictionary for some to all series
available_models (dict): list of models actually available
full_models (dict): models that are available for every single series
"""
org_idx = df_train.columns
org_list = org_idx.tolist()
# remove any unavailable models or unnecessary series
known_matches = {ser: mod for ser, mod in known_matches.items() if ser in org_list}
k = {ser: mod for ser, mod in known_matches.items() if mod in available_models}
# check if any series are missing from model list
if not k:
raise ValueError("Horizontal template has no models matching this data!")
if len(set(org_list) - set(list(k.keys()))) > 0:
# filter down to only models available for all
# print(f"Models not available: {[ser for ser, mod in known_matches.items() if mod not in available_models]}")
# print(f"Series not available: {[ser for ser in df_train.columns if ser not in list(known_matches.keys())]}")
if full_models is not None:
k2 = {ser: mod for ser, mod in k.items() if mod in full_models}
else:
k2 = k.copy()
all_series_part = horizontal_classifier(df_train, k2)
# since this only has "full", overwrite with known that includes more
all_series = {**all_series_part, **k}
else:
all_series = known_matches
return all_series
def HorizontalEnsemble(
ensemble_params,
forecasts_list,
forecasts,
lower_forecasts,
upper_forecasts,
forecasts_runtime,
prediction_interval,
df_train=None,
prematched_series: dict = None,
):
"""Generate forecast for per_series ensembling."""
# this is meant to fill in any failures
available_models = list(forecasts.keys())
train_size = df_train.shape
# print(f"running inner generalization with training size: {train_size}")
full_models = [
mod for mod, fcs in forecasts.items() if fcs.shape[1] == train_size[1]
]
if not full_models:
full_models = available_models # hope it doesn't need to fill
# print(f"FULLMODEL {len(full_models)}: {full_models}")
if prematched_series is None:
prematched_series = ensemble_params['series']
all_series = generalize_horizontal(
df_train, prematched_series, available_models, full_models
)
# print(f"ALLSERIES {len(all_series.keys())}: {all_series}")
org_idx = df_train.columns
forecast_df, u_forecast_df, l_forecast_df = (
pd.DataFrame(),
pd.DataFrame(),
pd.DataFrame(),
)
for series, mod_id in all_series.items():
try:
c_fore = forecasts[mod_id][series]
forecast_df = pd.concat([forecast_df, c_fore], axis=1)
except Exception as e:
print(f"Horizontal ensemble unable to add model {repr(e)}")
# upper
c_fore = upper_forecasts[mod_id][series]
u_forecast_df = pd.concat([u_forecast_df, c_fore], axis=1)
# lower
c_fore = lower_forecasts[mod_id][series]
l_forecast_df = pd.concat([l_forecast_df, c_fore], axis=1)
# make sure columns align to original
forecast_df.reindex(columns=org_idx)
u_forecast_df.reindex(columns=org_idx)
l_forecast_df.reindex(columns=org_idx)
# combine runtimes
ens_runtime = datetime.timedelta(0)
for idx, x in forecasts_runtime.items():
ens_runtime = ens_runtime + x
ens_result = PredictionObject(
model_name="Ensemble",
forecast_length=len(forecast_df.index),
forecast_index=forecast_df.index,
forecast_columns=forecast_df.columns,
lower_forecast=l_forecast_df,
forecast=forecast_df,
upper_forecast=u_forecast_df,
prediction_interval=prediction_interval,
predict_runtime=datetime.timedelta(0),
fit_runtime=ens_runtime,
model_parameters=ensemble_params,
)
return ens_result
def HDistEnsemble(
ensemble_params,
forecasts_list,
forecasts,
lower_forecasts,
upper_forecasts,
forecasts_runtime,
prediction_interval,
):
"""Generate forecast for per_series per distance ensembling."""
# handle that the inputs are now dictionaries
forecasts = list(forecasts.values())
lower_forecasts = list(lower_forecasts.values())
upper_forecasts = list(upper_forecasts.values())
forecasts_runtime = list(forecasts_runtime.values())
id_list = list(ensemble_params['models'].keys())
mod_dic = {x: idx for idx, x in enumerate(forecasts_list) if x in id_list}
forecast_length = forecasts[0].shape[0]
dist_n = int(np.ceil(ensemble_params['dis_frac'] * forecast_length))
dist_last = forecast_length - dist_n
forecast_df, u_forecast_df, l_forecast_df = (
pd.DataFrame(),
pd.DataFrame(),
pd.DataFrame(),
)
for series, mod_id in ensemble_params['series1'].items():
l_idx = mod_dic[mod_id]
try:
c_fore = forecasts[l_idx][series]
forecast_df = pd.concat([forecast_df, c_fore], axis=1)
except Exception as e:
repr(e)
print(forecasts[l_idx].columns)
print(forecasts[l_idx].head())
# upper
c_fore = upper_forecasts[l_idx][series]
u_forecast_df = pd.concat([u_forecast_df, c_fore], axis=1)
# lower
c_fore = lower_forecasts[l_idx][series]
l_forecast_df = pd.concat([l_forecast_df, c_fore], axis=1)
forecast_df2, u_forecast_df2, l_forecast_df2 = (
pd.DataFrame(),
pd.DataFrame(),
pd.DataFrame(),
)
for series, mod_id in ensemble_params['series2'].items():
l_idx = mod_dic[mod_id]
try:
c_fore = forecasts[l_idx][series]
forecast_df2 = pd.concat([forecast_df2, c_fore], axis=1)
except Exception as e:
repr(e)
print(forecasts[l_idx].columns)
print(forecasts[l_idx].head())
# upper
c_fore = upper_forecasts[l_idx][series]
u_forecast_df2 = pd.concat([u_forecast_df2, c_fore], axis=1)
# lower
c_fore = lower_forecasts[l_idx][series]
l_forecast_df2 = pd.concat([l_forecast_df2, c_fore], axis=1)
forecast_df = pd.concat(
[forecast_df.head(dist_n), forecast_df2.tail(dist_last)], axis=0
)
u_forecast_df = pd.concat(
[u_forecast_df.head(dist_n), u_forecast_df2.tail(dist_last)], axis=0
)
l_forecast_df = pd.concat(
[l_forecast_df.head(dist_n), l_forecast_df2.tail(dist_last)], axis=0
)
ens_runtime = datetime.timedelta(0)
for idx, x in enumerate(forecasts_runtime):
if idx in list(mod_dic.values()):
ens_runtime = ens_runtime + forecasts_runtime[idx]
ens_result = PredictionObject(
model_name="Ensemble",
forecast_length=len(forecast_df.index),
forecast_index=forecast_df.index,
forecast_columns=forecast_df.columns,
lower_forecast=l_forecast_df,
forecast=forecast_df,
upper_forecast=u_forecast_df,
prediction_interval=prediction_interval,
predict_runtime=datetime.timedelta(0),
fit_runtime=ens_runtime,
model_parameters=ensemble_params,
)
return ens_result
def EnsembleForecast(
ensemble_str,
ensemble_params,
forecasts_list,
forecasts,
lower_forecasts,
upper_forecasts,
forecasts_runtime,
prediction_interval,
df_train=None,
prematched_series: dict = None,
):
"""Return PredictionObject for given ensemble method."""
s3list = ['best3', 'best3horizontal', 'bestn']
if ensemble_params['model_name'].lower().strip() in s3list:
ens_forecast = BestNEnsemble(
ensemble_params,
forecasts_list,
forecasts,
lower_forecasts,
upper_forecasts,
forecasts_runtime,
prediction_interval,
)
return ens_forecast
if ensemble_params['model_name'].lower().strip() == 'dist':
ens_forecast = DistEnsemble(
ensemble_params,
forecasts_list,
forecasts,
lower_forecasts,
upper_forecasts,
forecasts_runtime,
prediction_interval,
)
return ens_forecast
hlist = ['horizontal', 'probabilistic']
if ensemble_params['model_name'].lower().strip() in hlist:
ens_forecast = HorizontalEnsemble(
ensemble_params,
forecasts_list,
forecasts,
lower_forecasts,
upper_forecasts,
forecasts_runtime,
prediction_interval,
df_train=df_train,
prematched_series=prematched_series,
)
return ens_forecast
if ensemble_params['model_name'].lower().strip() == 'hdist':
ens_forecast = HDistEnsemble(
ensemble_params,
forecasts_list,
forecasts,
lower_forecasts,
upper_forecasts,
forecasts_runtime,
prediction_interval,
)
return ens_forecast
def EnsembleTemplateGenerator(
initial_results, forecast_length: int = 14, ensemble: str = "simple"
):
"""Generate ensemble templates given a table of results."""
ensemble_templates = | pd.DataFrame() | pandas.DataFrame |
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Tue Sep 15 14:37:43 2020
@author: hamishgibbs
"""
import sys
import __main__ as main
import pandas as pd
import graph_tool as gt
from graph_tool import inference as gti
import numpy as np
#%%
if not hasattr(main, '__file__'):
argv = ['code', '/Users/hamishgibbs/Documents/Covid-19/facebook_mobility_uk/src/analysis/infomap/sbm_example/data/movement_2020_03_2020.csv',
'/Users/hamishgibbs/Documents/Covid-19/facebook_mobility_uk/src/analysis/infomap/sbm_example/processed/sbm.csv']
else:
argv = sys.argv
#%%
mob = pd.read_csv(argv[1], dtype = {'start_quadkey':str, 'end_quadkey':str})
#%%
#trying to drop shetland
mob = mob.loc[[x not in ['Orkney Islands', 'Shetland Islands'] for x in mob['NAME_3']], :]
#%%
def od_df(df):
df = df.loc[:,['start_quadkey', 'end_quadkey', 'n_crisis']].rename(columns = {'start_quadkey':'from', 'end_quadkey':'to', 'n_crisis':'weight'})
return(df)
def extract_nested_blockmodel(state, g, node_id, v_map_i):
levels = state.get_levels()
level_states = {}
for i, level in enumerate(levels):
level_states[i] = []
for v in g.vertices():
tmp = levels[i].get_blocks()[v]
level_states[i].append({'quadkey':v_map_i[node_id[v]], 'cluster':tmp, 'level':i})
level_states[i] = | pd.DataFrame(level_states[i]) | pandas.DataFrame |
# coding: utf-8
"""基于HDF文件的数据库"""
import pandas as pd
import numpy as np
import os
import warnings
from multiprocessing import Lock
from ..utils.datetime_func import Datetime2DateStr, DateStr2Datetime
from ..utils.tool_funcs import ensure_dir_exists
from ..utils.disk_persist_provider import DiskPersistProvider
from .helpers import handle_ids, FIFODict
from pathlib import Path
from FactorLib.utils.tool_funcs import is_non_string_iterable
pd.options.compute.use_numexpr = True
lock = Lock()
warnings.simplefilter('ignore', category=FutureWarning)
def append_along_index(df1, df2):
df1, df2 = df1.align(df2, axis='columns')
new = pd.DataFrame(np.vstack((df1.values, df2.values)),
columns=df1.columns,
index=df1.index.append(df2.index))
new.sort_index(inplace=True)
return new
def auto_increase_keys(_dict, keys):
if _dict:
max_v = max(_dict.values())
else:
max_v = 0
for key in keys:
if key not in _dict:
max_v += 1
_dict[key] = max_v
return _dict
class H5DB(object):
def __init__(self, data_path, max_cached_files=30):
self.data_path = str(data_path)
self.feather_data_path = os.path.abspath(self.data_path+'/../feather')
self.csv_data_path = os.path.abspath(self.data_path+'/../csv')
self.data_dict = None
self.cached_data = FIFODict(max_cached_files)
self.max_cached_files = max_cached_files
# self._update_info()
def _update_info(self):
factor_list = []
for root, subdirs, files in os.walk(self.data_path):
relpath = "/%s/"%os.path.relpath(root, self.data_path).replace("\\", "/")
for file in files:
if file.endswith(".h5"):
factor_list.append([relpath, file[:-3]])
self.data_dict = pd.DataFrame(
factor_list, columns=['path', 'name'])
def _read_h5file(self, file_path, key):
if file_path in self.cached_data:
return self.cached_data[file_path]
lock.acquire()
try:
data = pd.read_hdf(file_path, key)
except KeyError:
data = pd.read_hdf(file_path, 'data')
finally:
lock.release()
# update at 2020.02.15: surpport wide dataframe
columns_mapping = self._read_columns_mapping(file_path)
if not columns_mapping.empty:
data.rename(
columns=pd.Series(columns_mapping.index, index=columns_mapping.to_numpy()),
inplace=True
)
if self.max_cached_files > 0:
self.cached_data[file_path] = data
return data
def _read_columns_mapping(self, file_path):
try:
data = pd.read_hdf(file_path, 'column_name_mapping')
except KeyError:
data = pd.Series()
return data
def _normalize_columns(self, input, column_mapping):
return column_mapping[column_mapping.index.isin(input)].tolist()
def _save_h5file(self, data, file_path, key,
complib='blosc', complevel=9,
mode='w', **kwargs):
try:
lock.acquire()
# update at 2020.02.15: surpport wide dataframe
if data.shape[1] > 1000:
columns_mapping = {x:y for x, y in zip(data.columns, range(data.shape[1]))}
data2 = data.rename(columns=columns_mapping)
else:
data2 = data
columns_mapping = {}
with pd.HDFStore(file_path, mode=mode, complevel=complevel,
complib=complib) as f:
f.put(key, data2, **kwargs)
f.put('column_name_mapping', pd.Series(columns_mapping))
if file_path in self.cached_data:
self.cached_data.update({file_path: data})
lock.release()
except Exception as e:
lock.release()
raise e
def _read_pklfile(self, file_path):
if file_path in self.cached_data:
return self.cached_data[file_path]
lock.acquire()
try:
d = | pd.read_pickle(file_path) | pandas.read_pickle |
from typing import List
from thompson_sampling.base import BasePrior
from pandas import isna
# TODO build out functionality to add priors
class BetaPrior(BasePrior):
def __init__(self):
"""
Initializes a prior distribution object
"""
super().__init__()
def _param_calculator(self, mean: float, variance: float, effective_size: int):
"""
Hidden method that creates the beta prior given specifications
"""
if mean >= 1 or mean <= 0:
raise ValueError(f"mean:{mean} must be in (0,1)")
if variance <= 0 or variance >= 0.5 ** 2 or variance >= (mean * (1 - mean)):
raise ValueError(
f"variance: {variance} must be in (0,{round(min([0.25, mean*(1-mean)]), 3)})"
)
if effective_size <= 0:
raise ValueError(f"effective_size: {effective_size} must be greater then 0")
alpha = round((((1 - mean) / variance) - (1 / mean)) * (mean ** 2), 3)
beta = round(alpha * (1 / mean - 1), 3)
ratio = effective_size / (alpha + beta) # effective_size = beta+alpha
return {"a": round(alpha * ratio), "b": round(beta * ratio)}
class GammaPrior(BasePrior):
def __init__(self):
super().__init__()
def _param_calculator(self, mean, variance: None, effective_size: None):
if not | isna(variance) | pandas.isna |
import numpy as np
from datetime import timedelta
from distutils.version import LooseVersion
import pandas as pd
import pandas.util.testing as tm
from pandas import to_timedelta
from pandas.util.testing import assert_series_equal, assert_frame_equal
from pandas import (Series, Timedelta, DataFrame, Timestamp, TimedeltaIndex,
timedelta_range, date_range, DatetimeIndex, Int64Index,
_np_version_under1p10, Float64Index, Index, tslib)
from pandas.tests.test_base import Ops
class TestTimedeltaIndexOps(Ops):
def setUp(self):
super(TestTimedeltaIndexOps, self).setUp()
mask = lambda x: isinstance(x, TimedeltaIndex)
self.is_valid_objs = [o for o in self.objs if mask(o)]
self.not_valid_objs = []
def test_ops_properties(self):
self.check_ops_properties(['days', 'hours', 'minutes', 'seconds',
'milliseconds'])
self.check_ops_properties(['microseconds', 'nanoseconds'])
def test_asobject_tolist(self):
idx = timedelta_range(start='1 days', periods=4, freq='D', name='idx')
expected_list = [Timedelta('1 days'), Timedelta('2 days'),
Timedelta('3 days'), Timedelta('4 days')]
expected = pd.Index(expected_list, dtype=object, name='idx')
result = idx.asobject
self.assertTrue(isinstance(result, Index))
self.assertEqual(result.dtype, object)
self.assert_index_equal(result, expected)
self.assertEqual(result.name, expected.name)
self.assertEqual(idx.tolist(), expected_list)
idx = TimedeltaIndex([timedelta(days=1), timedelta(days=2), pd.NaT,
timedelta(days=4)], name='idx')
expected_list = [Timedelta('1 days'), Timedelta('2 days'), pd.NaT,
Timedelta('4 days')]
expected = pd.Index(expected_list, dtype=object, name='idx')
result = idx.asobject
self.assertTrue(isinstance(result, Index))
self.assertEqual(result.dtype, object)
self.assert_index_equal(result, expected)
self.assertEqual(result.name, expected.name)
self.assertEqual(idx.tolist(), expected_list)
def test_minmax(self):
# monotonic
idx1 = TimedeltaIndex(['1 days', '2 days', '3 days'])
self.assertTrue(idx1.is_monotonic)
# non-monotonic
idx2 = TimedeltaIndex(['1 days', np.nan, '3 days', 'NaT'])
self.assertFalse(idx2.is_monotonic)
for idx in [idx1, idx2]:
self.assertEqual(idx.min(), Timedelta('1 days')),
self.assertEqual(idx.max(), Timedelta('3 days')),
self.assertEqual(idx.argmin(), 0)
self.assertEqual(idx.argmax(), 2)
for op in ['min', 'max']:
# Return NaT
obj = TimedeltaIndex([])
self.assertTrue(pd.isnull(getattr(obj, op)()))
obj = TimedeltaIndex([pd.NaT])
self.assertTrue(pd.isnull(getattr(obj, op)()))
obj = TimedeltaIndex([pd.NaT, pd.NaT, pd.NaT])
self.assertTrue(pd.isnull(getattr(obj, op)()))
def test_numpy_minmax(self):
dr = pd.date_range(start='2016-01-15', end='2016-01-20')
td = TimedeltaIndex(np.asarray(dr))
self.assertEqual(np.min(td), Timedelta('16815 days'))
self.assertEqual(np.max(td), Timedelta('16820 days'))
errmsg = "the 'out' parameter is not supported"
tm.assertRaisesRegexp(ValueError, errmsg, np.min, td, out=0)
tm.assertRaisesRegexp(ValueError, errmsg, np.max, td, out=0)
self.assertEqual(np.argmin(td), 0)
self.assertEqual(np.argmax(td), 5)
if not _np_version_under1p10:
errmsg = "the 'out' parameter is not supported"
tm.assertRaisesRegexp(ValueError, errmsg, np.argmin, td, out=0)
tm.assertRaisesRegexp(ValueError, errmsg, np.argmax, td, out=0)
def test_round(self):
td = pd.timedelta_range(start='16801 days', periods=5, freq='30Min')
elt = td[1]
expected_rng = TimedeltaIndex([
Timedelta('16801 days 00:00:00'),
Timedelta('16801 days 00:00:00'),
Timedelta('16801 days 01:00:00'),
Timedelta('16801 days 02:00:00'),
Timedelta('16801 days 02:00:00'),
])
expected_elt = expected_rng[1]
tm.assert_index_equal(td.round(freq='H'), expected_rng)
self.assertEqual(elt.round(freq='H'), expected_elt)
msg = pd.tseries.frequencies._INVALID_FREQ_ERROR
with self.assertRaisesRegexp(ValueError, msg):
td.round(freq='foo')
with tm.assertRaisesRegexp(ValueError, msg):
elt.round(freq='foo')
msg = "<MonthEnd> is a non-fixed frequency"
tm.assertRaisesRegexp(ValueError, msg, td.round, freq='M')
tm.assertRaisesRegexp(ValueError, msg, elt.round, freq='M')
def test_representation(self):
idx1 = TimedeltaIndex([], freq='D')
idx2 = TimedeltaIndex(['1 days'], freq='D')
idx3 = TimedeltaIndex(['1 days', '2 days'], freq='D')
idx4 = TimedeltaIndex(['1 days', '2 days', '3 days'], freq='D')
idx5 = TimedeltaIndex(['1 days 00:00:01', '2 days', '3 days'])
exp1 = """TimedeltaIndex([], dtype='timedelta64[ns]', freq='D')"""
exp2 = ("TimedeltaIndex(['1 days'], dtype='timedelta64[ns]', "
"freq='D')")
exp3 = ("TimedeltaIndex(['1 days', '2 days'], "
"dtype='timedelta64[ns]', freq='D')")
exp4 = ("TimedeltaIndex(['1 days', '2 days', '3 days'], "
"dtype='timedelta64[ns]', freq='D')")
exp5 = ("TimedeltaIndex(['1 days 00:00:01', '2 days 00:00:00', "
"'3 days 00:00:00'], dtype='timedelta64[ns]', freq=None)")
with pd.option_context('display.width', 300):
for idx, expected in zip([idx1, idx2, idx3, idx4, idx5],
[exp1, exp2, exp3, exp4, exp5]):
for func in ['__repr__', '__unicode__', '__str__']:
result = getattr(idx, func)()
self.assertEqual(result, expected)
def test_representation_to_series(self):
idx1 = TimedeltaIndex([], freq='D')
idx2 = TimedeltaIndex(['1 days'], freq='D')
idx3 = TimedeltaIndex(['1 days', '2 days'], freq='D')
idx4 = TimedeltaIndex(['1 days', '2 days', '3 days'], freq='D')
idx5 = TimedeltaIndex(['1 days 00:00:01', '2 days', '3 days'])
exp1 = """Series([], dtype: timedelta64[ns])"""
exp2 = """0 1 days
dtype: timedelta64[ns]"""
exp3 = """0 1 days
1 2 days
dtype: timedelta64[ns]"""
exp4 = """0 1 days
1 2 days
2 3 days
dtype: timedelta64[ns]"""
exp5 = """0 1 days 00:00:01
1 2 days 00:00:00
2 3 days 00:00:00
dtype: timedelta64[ns]"""
with pd.option_context('display.width', 300):
for idx, expected in zip([idx1, idx2, idx3, idx4, idx5],
[exp1, exp2, exp3, exp4, exp5]):
result = repr(pd.Series(idx))
self.assertEqual(result, expected)
def test_summary(self):
# GH9116
idx1 = TimedeltaIndex([], freq='D')
idx2 = TimedeltaIndex(['1 days'], freq='D')
idx3 = TimedeltaIndex(['1 days', '2 days'], freq='D')
idx4 = TimedeltaIndex(['1 days', '2 days', '3 days'], freq='D')
idx5 = TimedeltaIndex(['1 days 00:00:01', '2 days', '3 days'])
exp1 = """TimedeltaIndex: 0 entries
Freq: D"""
exp2 = """TimedeltaIndex: 1 entries, 1 days to 1 days
Freq: D"""
exp3 = """TimedeltaIndex: 2 entries, 1 days to 2 days
Freq: D"""
exp4 = """TimedeltaIndex: 3 entries, 1 days to 3 days
Freq: D"""
exp5 = ("TimedeltaIndex: 3 entries, 1 days 00:00:01 to 3 days "
"00:00:00")
for idx, expected in zip([idx1, idx2, idx3, idx4, idx5],
[exp1, exp2, exp3, exp4, exp5]):
result = idx.summary()
self.assertEqual(result, expected)
def test_add_iadd(self):
# only test adding/sub offsets as + is now numeric
# offset
offsets = [pd.offsets.Hour(2), timedelta(hours=2),
np.timedelta64(2, 'h'), Timedelta(hours=2)]
for delta in offsets:
rng = timedelta_range('1 days', '10 days')
result = rng + delta
expected = timedelta_range('1 days 02:00:00', '10 days 02:00:00',
freq='D')
tm.assert_index_equal(result, expected)
rng += delta
tm.assert_index_equal(rng, expected)
# int
rng = timedelta_range('1 days 09:00:00', freq='H', periods=10)
result = rng + 1
expected = timedelta_range('1 days 10:00:00', freq='H', periods=10)
tm.assert_index_equal(result, expected)
rng += 1
tm.assert_index_equal(rng, expected)
def test_sub_isub(self):
# only test adding/sub offsets as - is now numeric
# offset
offsets = [pd.offsets.Hour(2), timedelta(hours=2),
np.timedelta64(2, 'h'), Timedelta(hours=2)]
for delta in offsets:
rng = timedelta_range('1 days', '10 days')
result = rng - delta
expected = timedelta_range('0 days 22:00:00', '9 days 22:00:00')
tm.assert_index_equal(result, expected)
rng -= delta
tm.assert_index_equal(rng, expected)
# int
rng = timedelta_range('1 days 09:00:00', freq='H', periods=10)
result = rng - 1
expected = timedelta_range('1 days 08:00:00', freq='H', periods=10)
tm.assert_index_equal(result, expected)
rng -= 1
tm.assert_index_equal(rng, expected)
idx = TimedeltaIndex(['1 day', '2 day'])
msg = "cannot subtract a datelike from a TimedeltaIndex"
with tm.assertRaisesRegexp(TypeError, msg):
idx - Timestamp('2011-01-01')
result = Timestamp('2011-01-01') + idx
expected = DatetimeIndex(['2011-01-02', '2011-01-03'])
tm.assert_index_equal(result, expected)
def test_ops_compat(self):
offsets = [pd.offsets.Hour(2), timedelta(hours=2),
np.timedelta64(2, 'h'), Timedelta(hours=2)]
rng = timedelta_range('1 days', '10 days', name='foo')
# multiply
for offset in offsets:
self.assertRaises(TypeError, lambda: rng * offset)
# divide
expected = Int64Index((np.arange(10) + 1) * 12, name='foo')
for offset in offsets:
result = rng / offset
tm.assert_index_equal(result, expected, exact=False)
# divide with nats
rng = TimedeltaIndex(['1 days', pd.NaT, '2 days'], name='foo')
expected = Float64Index([12, np.nan, 24], name='foo')
for offset in offsets:
result = rng / offset
tm.assert_index_equal(result, expected)
# don't allow division by NaT (make could in the future)
self.assertRaises(TypeError, lambda: rng / pd.NaT)
def test_subtraction_ops(self):
# with datetimes/timedelta and tdi/dti
tdi = TimedeltaIndex(['1 days', pd.NaT, '2 days'], name='foo')
dti = date_range('20130101', periods=3, name='bar')
td = Timedelta('1 days')
dt = Timestamp('20130101')
self.assertRaises(TypeError, lambda: tdi - dt)
self.assertRaises(TypeError, lambda: tdi - dti)
self.assertRaises(TypeError, lambda: td - dt)
self.assertRaises(TypeError, lambda: td - dti)
result = dt - dti
expected = TimedeltaIndex(['0 days', '-1 days', '-2 days'], name='bar')
tm.assert_index_equal(result, expected)
result = dti - dt
expected = TimedeltaIndex(['0 days', '1 days', '2 days'], name='bar')
tm.assert_index_equal(result, expected)
result = tdi - td
expected = TimedeltaIndex(['0 days', pd.NaT, '1 days'], name='foo')
tm.assert_index_equal(result, expected, check_names=False)
result = td - tdi
expected = TimedeltaIndex(['0 days', pd.NaT, '-1 days'], name='foo')
tm.assert_index_equal(result, expected, check_names=False)
result = dti - td
expected = DatetimeIndex(
['20121231', '20130101', '20130102'], name='bar')
tm.assert_index_equal(result, expected, check_names=False)
result = dt - tdi
expected = DatetimeIndex(['20121231', pd.NaT, '20121230'], name='foo')
tm.assert_index_equal(result, expected)
def test_subtraction_ops_with_tz(self):
# check that dt/dti subtraction ops with tz are validated
dti = date_range('20130101', periods=3)
ts = Timestamp('20130101')
dt = ts.to_pydatetime()
dti_tz = date_range('20130101', periods=3).tz_localize('US/Eastern')
ts_tz = Timestamp('20130101').tz_localize('US/Eastern')
ts_tz2 = Timestamp('20130101').tz_localize('CET')
dt_tz = ts_tz.to_pydatetime()
td = Timedelta('1 days')
def _check(result, expected):
self.assertEqual(result, expected)
self.assertIsInstance(result, Timedelta)
# scalars
result = ts - ts
expected = Timedelta('0 days')
_check(result, expected)
result = dt_tz - ts_tz
expected = Timedelta('0 days')
_check(result, expected)
result = ts_tz - dt_tz
expected = Timedelta('0 days')
_check(result, expected)
# tz mismatches
self.assertRaises(TypeError, lambda: dt_tz - ts)
self.assertRaises(TypeError, lambda: dt_tz - dt)
self.assertRaises(TypeError, lambda: dt_tz - ts_tz2)
self.assertRaises(TypeError, lambda: dt - dt_tz)
self.assertRaises(TypeError, lambda: ts - dt_tz)
self.assertRaises(TypeError, lambda: ts_tz2 - ts)
self.assertRaises(TypeError, lambda: ts_tz2 - dt)
self.assertRaises(TypeError, lambda: ts_tz - ts_tz2)
# with dti
self.assertRaises(TypeError, lambda: dti - ts_tz)
self.assertRaises(TypeError, lambda: dti_tz - ts)
self.assertRaises(TypeError, lambda: dti_tz - ts_tz2)
result = dti_tz - dt_tz
expected = TimedeltaIndex(['0 days', '1 days', '2 days'])
tm.assert_index_equal(result, expected)
result = dt_tz - dti_tz
expected = TimedeltaIndex(['0 days', '-1 days', '-2 days'])
tm.assert_index_equal(result, expected)
result = dti_tz - ts_tz
expected = TimedeltaIndex(['0 days', '1 days', '2 days'])
tm.assert_index_equal(result, expected)
result = ts_tz - dti_tz
expected = TimedeltaIndex(['0 days', '-1 days', '-2 days'])
tm.assert_index_equal(result, expected)
result = td - td
expected = Timedelta('0 days')
_check(result, expected)
result = dti_tz - td
expected = DatetimeIndex(
['20121231', '20130101', '20130102'], tz='US/Eastern')
tm.assert_index_equal(result, expected)
def test_dti_tdi_numeric_ops(self):
# These are normally union/diff set-like ops
tdi = TimedeltaIndex(['1 days', pd.NaT, '2 days'], name='foo')
dti = date_range('20130101', periods=3, name='bar')
# TODO(wesm): unused?
# td = Timedelta('1 days')
# dt = Timestamp('20130101')
result = tdi - tdi
expected = TimedeltaIndex(['0 days', pd.NaT, '0 days'], name='foo')
tm.assert_index_equal(result, expected)
result = tdi + tdi
expected = TimedeltaIndex(['2 days', pd.NaT, '4 days'], name='foo')
tm.assert_index_equal(result, expected)
result = dti - tdi # name will be reset
expected = DatetimeIndex(['20121231', pd.NaT, '20130101'])
tm.assert_index_equal(result, expected)
def test_sub_period(self):
# GH 13078
# not supported, check TypeError
p = pd.Period('2011-01-01', freq='D')
for freq in [None, 'H']:
idx = pd.TimedeltaIndex(['1 hours', '2 hours'], freq=freq)
with tm.assertRaises(TypeError):
idx - p
with tm.assertRaises(TypeError):
p - idx
def test_addition_ops(self):
# with datetimes/timedelta and tdi/dti
tdi = TimedeltaIndex(['1 days', pd.NaT, '2 days'], name='foo')
dti = date_range('20130101', periods=3, name='bar')
td = Timedelta('1 days')
dt = Timestamp('20130101')
result = tdi + dt
expected = DatetimeIndex(['20130102', pd.NaT, '20130103'], name='foo')
tm.assert_index_equal(result, expected)
result = dt + tdi
expected = DatetimeIndex(['20130102', pd.NaT, '20130103'], name='foo')
tm.assert_index_equal(result, expected)
result = td + tdi
expected = TimedeltaIndex(['2 days', pd.NaT, '3 days'], name='foo')
tm.assert_index_equal(result, expected)
result = tdi + td
expected = TimedeltaIndex(['2 days', pd.NaT, '3 days'], name='foo')
tm.assert_index_equal(result, expected)
# unequal length
self.assertRaises(ValueError, lambda: tdi + dti[0:1])
self.assertRaises(ValueError, lambda: tdi[0:1] + dti)
# random indexes
self.assertRaises(TypeError, lambda: tdi + Int64Index([1, 2, 3]))
# this is a union!
# self.assertRaises(TypeError, lambda : Int64Index([1,2,3]) + tdi)
result = tdi + dti # name will be reset
expected = DatetimeIndex(['20130102', pd.NaT, '20130105'])
tm.assert_index_equal(result, expected)
result = dti + tdi # name will be reset
expected = DatetimeIndex(['20130102', pd.NaT, '20130105'])
tm.assert_index_equal(result, expected)
result = dt + td
expected = Timestamp('20130102')
self.assertEqual(result, expected)
result = td + dt
expected = Timestamp('20130102')
self.assertEqual(result, expected)
def test_comp_nat(self):
left = pd.TimedeltaIndex([pd.Timedelta('1 days'), pd.NaT,
pd.Timedelta('3 days')])
right = pd.TimedeltaIndex([pd.NaT, pd.NaT, pd.Timedelta('3 days')])
for l, r in [(left, right), (left.asobject, right.asobject)]:
result = l == r
expected = np.array([False, False, True])
tm.assert_numpy_array_equal(result, expected)
result = l != r
expected = np.array([True, True, False])
tm.assert_numpy_array_equal(result, expected)
expected = np.array([False, False, False])
tm.assert_numpy_array_equal(l == pd.NaT, expected)
tm.assert_numpy_array_equal(pd.NaT == r, expected)
expected = np.array([True, True, True])
tm.assert_numpy_array_equal(l != pd.NaT, expected)
tm.assert_numpy_array_equal(pd.NaT != l, expected)
expected = np.array([False, False, False])
tm.assert_numpy_array_equal(l < pd.NaT, expected)
tm.assert_numpy_array_equal(pd.NaT > l, expected)
def test_value_counts_unique(self):
# GH 7735
idx = timedelta_range('1 days 09:00:00', freq='H', periods=10)
# create repeated values, 'n'th element is repeated by n+1 times
idx = TimedeltaIndex(np.repeat(idx.values, range(1, len(idx) + 1)))
exp_idx = timedelta_range('1 days 18:00:00', freq='-1H', periods=10)
expected = Series(range(10, 0, -1), index=exp_idx, dtype='int64')
for obj in [idx, Series(idx)]:
tm.assert_series_equal(obj.value_counts(), expected)
expected = timedelta_range('1 days 09:00:00', freq='H', periods=10)
tm.assert_index_equal(idx.unique(), expected)
idx = TimedeltaIndex(['1 days 09:00:00', '1 days 09:00:00',
'1 days 09:00:00', '1 days 08:00:00',
'1 days 08:00:00', pd.NaT])
exp_idx = TimedeltaIndex(['1 days 09:00:00', '1 days 08:00:00'])
expected = Series([3, 2], index=exp_idx)
for obj in [idx, Series(idx)]:
tm.assert_series_equal(obj.value_counts(), expected)
exp_idx = TimedeltaIndex(['1 days 09:00:00', '1 days 08:00:00',
pd.NaT])
expected = Series([3, 2, 1], index=exp_idx)
for obj in [idx, Series(idx)]:
tm.assert_series_equal(obj.value_counts(dropna=False), expected)
tm.assert_index_equal(idx.unique(), exp_idx)
def test_nonunique_contains(self):
# GH 9512
for idx in map(TimedeltaIndex, ([0, 1, 0], [0, 0, -1], [0, -1, -1],
['00:01:00', '00:01:00', '00:02:00'],
['00:01:00', '00:01:00', '00:00:01'])):
tm.assertIn(idx[0], idx)
def test_unknown_attribute(self):
# GH 9680
tdi = pd.timedelta_range(start=0, periods=10, freq='1s')
ts = pd.Series(np.random.normal(size=10), index=tdi)
self.assertNotIn('foo', ts.__dict__.keys())
self.assertRaises(AttributeError, lambda: ts.foo)
def test_order(self):
# GH 10295
idx1 = TimedeltaIndex(['1 day', '2 day', '3 day'], freq='D',
name='idx')
idx2 = TimedeltaIndex(
['1 hour', '2 hour', '3 hour'], freq='H', name='idx')
for idx in [idx1, idx2]:
ordered = idx.sort_values()
self.assert_index_equal(ordered, idx)
self.assertEqual(ordered.freq, idx.freq)
ordered = idx.sort_values(ascending=False)
expected = idx[::-1]
self.assert_index_equal(ordered, expected)
self.assertEqual(ordered.freq, expected.freq)
self.assertEqual(ordered.freq.n, -1)
ordered, indexer = idx.sort_values(return_indexer=True)
self.assert_index_equal(ordered, idx)
self.assert_numpy_array_equal(indexer,
np.array([0, 1, 2]),
check_dtype=False)
self.assertEqual(ordered.freq, idx.freq)
ordered, indexer = idx.sort_values(return_indexer=True,
ascending=False)
self.assert_index_equal(ordered, idx[::-1])
self.assertEqual(ordered.freq, expected.freq)
self.assertEqual(ordered.freq.n, -1)
idx1 = TimedeltaIndex(['1 hour', '3 hour', '5 hour',
'2 hour ', '1 hour'], name='idx1')
exp1 = TimedeltaIndex(['1 hour', '1 hour', '2 hour',
'3 hour', '5 hour'], name='idx1')
idx2 = TimedeltaIndex(['1 day', '3 day', '5 day',
'2 day', '1 day'], name='idx2')
# TODO(wesm): unused?
# exp2 = TimedeltaIndex(['1 day', '1 day', '2 day',
# '3 day', '5 day'], name='idx2')
# idx3 = TimedeltaIndex([pd.NaT, '3 minute', '5 minute',
# '2 minute', pd.NaT], name='idx3')
# exp3 = TimedeltaIndex([pd.NaT, pd.NaT, '2 minute', '3 minute',
# '5 minute'], name='idx3')
for idx, expected in [(idx1, exp1), (idx1, exp1), (idx1, exp1)]:
ordered = idx.sort_values()
self.assert_index_equal(ordered, expected)
self.assertIsNone(ordered.freq)
ordered = idx.sort_values(ascending=False)
self.assert_index_equal(ordered, expected[::-1])
self.assertIsNone(ordered.freq)
ordered, indexer = idx.sort_values(return_indexer=True)
self.assert_index_equal(ordered, expected)
exp = np.array([0, 4, 3, 1, 2])
self.assert_numpy_array_equal(indexer, exp, check_dtype=False)
self.assertIsNone(ordered.freq)
ordered, indexer = idx.sort_values(return_indexer=True,
ascending=False)
self.assert_index_equal(ordered, expected[::-1])
exp = np.array([2, 1, 3, 4, 0])
self.assert_numpy_array_equal(indexer, exp, check_dtype=False)
self.assertIsNone(ordered.freq)
def test_getitem(self):
idx1 = pd.timedelta_range('1 day', '31 day', freq='D', name='idx')
for idx in [idx1]:
result = idx[0]
self.assertEqual(result, pd.Timedelta('1 day'))
result = idx[0:5]
expected = pd.timedelta_range('1 day', '5 day', freq='D',
name='idx')
self.assert_index_equal(result, expected)
self.assertEqual(result.freq, expected.freq)
result = idx[0:10:2]
expected = pd.timedelta_range('1 day', '9 day', freq='2D',
name='idx')
self.assert_index_equal(result, expected)
self.assertEqual(result.freq, expected.freq)
result = idx[-20:-5:3]
expected = pd.timedelta_range('12 day', '24 day', freq='3D',
name='idx')
self.assert_index_equal(result, expected)
self.assertEqual(result.freq, expected.freq)
result = idx[4::-1]
expected = TimedeltaIndex(['5 day', '4 day', '3 day',
'2 day', '1 day'],
freq='-1D', name='idx')
self.assert_index_equal(result, expected)
self.assertEqual(result.freq, expected.freq)
def test_drop_duplicates_metadata(self):
# GH 10115
idx = pd.timedelta_range('1 day', '31 day', freq='D', name='idx')
result = idx.drop_duplicates()
self.assert_index_equal(idx, result)
self.assertEqual(idx.freq, result.freq)
idx_dup = idx.append(idx)
self.assertIsNone(idx_dup.freq) # freq is reset
result = idx_dup.drop_duplicates()
self.assert_index_equal(idx, result)
self.assertIsNone(result.freq)
def test_drop_duplicates(self):
# to check Index/Series compat
base = pd.timedelta_range('1 day', '31 day', freq='D', name='idx')
idx = base.append(base[:5])
res = idx.drop_duplicates()
tm.assert_index_equal(res, base)
res = Series(idx).drop_duplicates()
tm.assert_series_equal(res, Series(base))
res = idx.drop_duplicates(keep='last')
exp = base[5:].append(base[:5])
tm.assert_index_equal(res, exp)
res = Series(idx).drop_duplicates(keep='last')
tm.assert_series_equal(res, Series(exp, index=np.arange(5, 36)))
res = idx.drop_duplicates(keep=False)
tm.assert_index_equal(res, base[5:])
res = Series(idx).drop_duplicates(keep=False)
tm.assert_series_equal(res, Series(base[5:], index=np.arange(5, 31)))
def test_take(self):
# GH 10295
idx1 = pd.timedelta_range('1 day', '31 day', freq='D', name='idx')
for idx in [idx1]:
result = idx.take([0])
self.assertEqual(result, pd.Timedelta('1 day'))
result = idx.take([-1])
self.assertEqual(result, pd.Timedelta('31 day'))
result = idx.take([0, 1, 2])
expected = pd.timedelta_range('1 day', '3 day', freq='D',
name='idx')
self.assert_index_equal(result, expected)
self.assertEqual(result.freq, expected.freq)
result = idx.take([0, 2, 4])
expected = pd.timedelta_range('1 day', '5 day', freq='2D',
name='idx')
self.assert_index_equal(result, expected)
self.assertEqual(result.freq, expected.freq)
result = idx.take([7, 4, 1])
expected = pd.timedelta_range('8 day', '2 day', freq='-3D',
name='idx')
self.assert_index_equal(result, expected)
self.assertEqual(result.freq, expected.freq)
result = idx.take([3, 2, 5])
expected = TimedeltaIndex(['4 day', '3 day', '6 day'], name='idx')
self.assert_index_equal(result, expected)
self.assertIsNone(result.freq)
result = idx.take([-3, 2, 5])
expected = TimedeltaIndex(['29 day', '3 day', '6 day'], name='idx')
self.assert_index_equal(result, expected)
self.assertIsNone(result.freq)
def test_take_invalid_kwargs(self):
idx = pd.timedelta_range('1 day', '31 day', freq='D', name='idx')
indices = [1, 6, 5, 9, 10, 13, 15, 3]
msg = r"take\(\) got an unexpected keyword argument 'foo'"
tm.assertRaisesRegexp(TypeError, msg, idx.take,
indices, foo=2)
msg = "the 'out' parameter is not supported"
tm.assertRaisesRegexp(ValueError, msg, idx.take,
indices, out=indices)
msg = "the 'mode' parameter is not supported"
tm.assertRaisesRegexp(ValueError, msg, idx.take,
indices, mode='clip')
def test_infer_freq(self):
# GH 11018
for freq in ['D', '3D', '-3D', 'H', '2H', '-2H', 'T', '2T', 'S', '-3S'
]:
idx = pd.timedelta_range('1', freq=freq, periods=10)
result = pd.TimedeltaIndex(idx.asi8, freq='infer')
tm.assert_index_equal(idx, result)
self.assertEqual(result.freq, freq)
def test_nat_new(self):
idx = pd.timedelta_range('1', freq='D', periods=5, name='x')
result = idx._nat_new()
exp = pd.TimedeltaIndex([pd.NaT] * 5, name='x')
tm.assert_index_equal(result, exp)
result = idx._nat_new(box=False)
exp = np.array([tslib.iNaT] * 5, dtype=np.int64)
tm.assert_numpy_array_equal(result, exp)
def test_shift(self):
# GH 9903
idx = pd.TimedeltaIndex([], name='xxx')
tm.assert_index_equal(idx.shift(0, freq='H'), idx)
tm.assert_index_equal(idx.shift(3, freq='H'), idx)
idx = pd.TimedeltaIndex(['5 hours', '6 hours', '9 hours'], name='xxx')
tm.assert_index_equal(idx.shift(0, freq='H'), idx)
exp = pd.TimedeltaIndex(['8 hours', '9 hours', '12 hours'], name='xxx')
tm.assert_index_equal(idx.shift(3, freq='H'), exp)
exp = pd.TimedeltaIndex(['2 hours', '3 hours', '6 hours'], name='xxx')
tm.assert_index_equal(idx.shift(-3, freq='H'), exp)
tm.assert_index_equal(idx.shift(0, freq='T'), idx)
exp = pd.TimedeltaIndex(['05:03:00', '06:03:00', '9:03:00'],
name='xxx')
tm.assert_index_equal(idx.shift(3, freq='T'), exp)
exp = pd.TimedeltaIndex(['04:57:00', '05:57:00', '8:57:00'],
name='xxx')
tm.assert_index_equal(idx.shift(-3, freq='T'), exp)
def test_repeat(self):
index = pd.timedelta_range('1 days', periods=2, freq='D')
exp = pd.TimedeltaIndex(['1 days', '1 days', '2 days', '2 days'])
for res in [index.repeat(2), np.repeat(index, 2)]:
tm.assert_index_equal(res, exp)
self.assertIsNone(res.freq)
index = TimedeltaIndex(['1 days', 'NaT', '3 days'])
exp = TimedeltaIndex(['1 days', '1 days', '1 days',
'NaT', 'NaT', 'NaT',
'3 days', '3 days', '3 days'])
for res in [index.repeat(3), np.repeat(index, 3)]:
tm.assert_index_equal(res, exp)
self.assertIsNone(res.freq)
def test_nat(self):
self.assertIs(pd.TimedeltaIndex._na_value, pd.NaT)
self.assertIs(pd.TimedeltaIndex([])._na_value, pd.NaT)
idx = pd.TimedeltaIndex(['1 days', '2 days'])
self.assertTrue(idx._can_hold_na)
tm.assert_numpy_array_equal(idx._isnan, np.array([False, False]))
self.assertFalse(idx.hasnans)
tm.assert_numpy_array_equal(idx._nan_idxs,
np.array([], dtype=np.intp))
idx = pd.TimedeltaIndex(['1 days', 'NaT'])
self.assertTrue(idx._can_hold_na)
tm.assert_numpy_array_equal(idx._isnan, np.array([False, True]))
self.assertTrue(idx.hasnans)
tm.assert_numpy_array_equal(idx._nan_idxs,
np.array([1], dtype=np.intp))
def test_equals(self):
# GH 13107
idx = pd.TimedeltaIndex(['1 days', '2 days', 'NaT'])
self.assertTrue(idx.equals(idx))
self.assertTrue(idx.equals(idx.copy()))
self.assertTrue(idx.equals(idx.asobject))
self.assertTrue(idx.asobject.equals(idx))
self.assertTrue(idx.asobject.equals(idx.asobject))
self.assertFalse(idx.equals(list(idx)))
self.assertFalse(idx.equals(pd.Series(idx)))
idx2 = pd.TimedeltaIndex(['2 days', '1 days', 'NaT'])
self.assertFalse(idx.equals(idx2))
self.assertFalse(idx.equals(idx2.copy()))
self.assertFalse(idx.equals(idx2.asobject))
self.assertFalse(idx.asobject.equals(idx2))
self.assertFalse(idx.asobject.equals(idx2.asobject))
self.assertFalse(idx.equals(list(idx2)))
self.assertFalse(idx.equals(pd.Series(idx2)))
class TestTimedeltas(tm.TestCase):
_multiprocess_can_split_ = True
def test_ops(self):
td = Timedelta(10, unit='d')
self.assertEqual(-td, Timedelta(-10, unit='d'))
self.assertEqual(+td, Timedelta(10, unit='d'))
self.assertEqual(td - td, Timedelta(0, unit='ns'))
self.assertTrue((td - pd.NaT) is pd.NaT)
self.assertEqual(td + td, Timedelta(20, unit='d'))
self.assertTrue((td + pd.NaT) is pd.NaT)
self.assertEqual(td * 2, Timedelta(20, unit='d'))
self.assertTrue((td * pd.NaT) is pd.NaT)
self.assertEqual(td / 2, Timedelta(5, unit='d'))
self.assertEqual(abs(td), td)
self.assertEqual(abs(-td), td)
self.assertEqual(td / td, 1)
self.assertTrue((td / pd.NaT) is np.nan)
# invert
self.assertEqual(-td, Timedelta('-10d'))
self.assertEqual(td * -1, Timedelta('-10d'))
self.assertEqual(-1 * td, Timedelta('-10d'))
self.assertEqual(abs(-td), Timedelta('10d'))
# invalid
self.assertRaises(TypeError, lambda: Timedelta(11, unit='d') // 2)
# invalid multiply with another timedelta
self.assertRaises(TypeError, lambda: td * td)
# can't operate with integers
self.assertRaises(TypeError, lambda: td + 2)
self.assertRaises(TypeError, lambda: td - 2)
def test_ops_offsets(self):
td = Timedelta(10, unit='d')
self.assertEqual(Timedelta(241, unit='h'), td + pd.offsets.Hour(1))
self.assertEqual(Timedelta(241, unit='h'), pd.offsets.Hour(1) + td)
self.assertEqual(240, td / pd.offsets.Hour(1))
self.assertEqual(1 / 240.0, pd.offsets.Hour(1) / td)
self.assertEqual(Timedelta(239, unit='h'), td - pd.offsets.Hour(1))
self.assertEqual(Timedelta(-239, unit='h'), pd.offsets.Hour(1) - td)
def test_ops_ndarray(self):
td = Timedelta('1 day')
# timedelta, timedelta
other = pd.to_timedelta(['1 day']).values
expected = pd.to_timedelta(['2 days']).values
self.assert_numpy_array_equal(td + other, expected)
if LooseVersion(np.__version__) >= '1.8':
self.assert_numpy_array_equal(other + td, expected)
self.assertRaises(TypeError, lambda: td + np.array([1]))
self.assertRaises(TypeError, lambda: np.array([1]) + td)
expected = pd.to_timedelta(['0 days']).values
self.assert_numpy_array_equal(td - other, expected)
if LooseVersion(np.__version__) >= '1.8':
self.assert_numpy_array_equal(-other + td, expected)
self.assertRaises(TypeError, lambda: td - np.array([1]))
self.assertRaises(TypeError, lambda: np.array([1]) - td)
expected = pd.to_timedelta(['2 days']).values
self.assert_numpy_array_equal(td * np.array([2]), expected)
self.assert_numpy_array_equal(np.array([2]) * td, expected)
self.assertRaises(TypeError, lambda: td * other)
self.assertRaises(TypeError, lambda: other * td)
self.assert_numpy_array_equal(td / other,
np.array([1], dtype=np.float64))
if LooseVersion(np.__version__) >= '1.8':
self.assert_numpy_array_equal(other / td,
np.array([1], dtype=np.float64))
# timedelta, datetime
other = pd.to_datetime(['2000-01-01']).values
expected = pd.to_datetime(['2000-01-02']).values
self.assert_numpy_array_equal(td + other, expected)
if LooseVersion(np.__version__) >= '1.8':
self.assert_numpy_array_equal(other + td, expected)
expected = pd.to_datetime(['1999-12-31']).values
self.assert_numpy_array_equal(-td + other, expected)
if LooseVersion(np.__version__) >= '1.8':
self.assert_numpy_array_equal(other - td, expected)
def test_ops_series(self):
# regression test for GH8813
td = Timedelta('1 day')
other = pd.Series([1, 2])
expected = pd.Series(pd.to_timedelta(['1 day', '2 days']))
tm.assert_series_equal(expected, td * other)
tm.assert_series_equal(expected, other * td)
def test_ops_series_object(self):
# GH 13043
s = pd.Series([pd.Timestamp('2015-01-01', tz='US/Eastern'),
pd.Timestamp('2015-01-01', tz='Asia/Tokyo')],
name='xxx')
self.assertEqual(s.dtype, object)
exp = pd.Series([pd.Timestamp('2015-01-02', tz='US/Eastern'),
pd.Timestamp('2015-01-02', tz='Asia/Tokyo')],
name='xxx')
tm.assert_series_equal(s + pd.Timedelta('1 days'), exp)
tm.assert_series_equal(pd.Timedelta('1 days') + s, exp)
# object series & object series
s2 = pd.Series([pd.Timestamp('2015-01-03', tz='US/Eastern'),
pd.Timestamp('2015-01-05', tz='Asia/Tokyo')],
name='xxx')
self.assertEqual(s2.dtype, object)
exp = pd.Series([pd.Timedelta('2 days'), pd.Timedelta('4 days')],
name='xxx')
tm.assert_series_equal(s2 - s, exp)
tm.assert_series_equal(s - s2, -exp)
s = pd.Series([pd.Timedelta('01:00:00'), pd.Timedelta('02:00:00')],
name='xxx', dtype=object)
self.assertEqual(s.dtype, object)
exp = pd.Series([pd.Timedelta('01:30:00'), pd.Timedelta('02:30:00')],
name='xxx')
tm.assert_series_equal(s + pd.Timedelta('00:30:00'), exp)
tm.assert_series_equal(pd.Timedelta('00:30:00') + s, exp)
def test_ops_notimplemented(self):
class Other:
pass
other = Other()
td = Timedelta('1 day')
self.assertTrue(td.__add__(other) is NotImplemented)
self.assertTrue(td.__sub__(other) is NotImplemented)
self.assertTrue(td.__truediv__(other) is NotImplemented)
self.assertTrue(td.__mul__(other) is NotImplemented)
self.assertTrue(td.__floordiv__(td) is NotImplemented)
def test_ops_error_str(self):
# GH 13624
tdi = TimedeltaIndex(['1 day', '2 days'])
for l, r in [(tdi, 'a'), ('a', tdi)]:
with tm.assertRaises(TypeError):
l + r
with tm.assertRaises(TypeError):
l > r
with tm.assertRaises(TypeError):
l == r
with tm.assertRaises(TypeError):
l != r
def test_timedelta_ops(self):
# GH4984
# make sure ops return Timedelta
s = Series([Timestamp('20130101') + timedelta(seconds=i * i)
for i in range(10)])
td = s.diff()
result = td.mean()
expected = to_timedelta(timedelta(seconds=9))
self.assertEqual(result, expected)
result = td.to_frame().mean()
self.assertEqual(result[0], expected)
result = td.quantile(.1)
expected = Timedelta(np.timedelta64(2600, 'ms'))
self.assertEqual(result, expected)
result = td.median()
expected = to_timedelta('00:00:09')
self.assertEqual(result, expected)
result = td.to_frame().median()
self.assertEqual(result[0], expected)
# GH 6462
# consistency in returned values for sum
result = td.sum()
expected = to_timedelta('00:01:21')
self.assertEqual(result, expected)
result = td.to_frame().sum()
self.assertEqual(result[0], expected)
# std
result = td.std()
expected = to_timedelta(Series(td.dropna().values).std())
self.assertEqual(result, expected)
result = td.to_frame().std()
self.assertEqual(result[0], expected)
# invalid ops
for op in ['skew', 'kurt', 'sem', 'prod']:
self.assertRaises(TypeError, getattr(td, op))
# GH 10040
# make sure NaT is properly handled by median()
s = Series([Timestamp('2015-02-03'), Timestamp('2015-02-07')])
self.assertEqual(s.diff().median(), timedelta(days=4))
s = Series([Timestamp('2015-02-03'), Timestamp('2015-02-07'),
Timestamp('2015-02-15')])
self.assertEqual(s.diff().median(), timedelta(days=6))
def test_timedelta_ops_scalar(self):
# GH 6808
base = pd.to_datetime('20130101 09:01:12.123456')
expected_add = pd.to_datetime('20130101 09:01:22.123456')
expected_sub = pd.to_datetime('20130101 09:01:02.123456')
for offset in [pd.to_timedelta(10, unit='s'), timedelta(seconds=10),
np.timedelta64(10, 's'),
np.timedelta64(10000000000, 'ns'),
pd.offsets.Second(10)]:
result = base + offset
self.assertEqual(result, expected_add)
result = base - offset
self.assertEqual(result, expected_sub)
base = pd.to_datetime('20130102 09:01:12.123456')
expected_add = pd.to_datetime('20130103 09:01:22.123456')
expected_sub = pd.to_datetime('20130101 09:01:02.123456')
for offset in [pd.to_timedelta('1 day, 00:00:10'),
pd.to_timedelta('1 days, 00:00:10'),
timedelta(days=1, seconds=10),
np.timedelta64(1, 'D') + np.timedelta64(10, 's'),
pd.offsets.Day() + pd.offsets.Second(10)]:
result = base + offset
self.assertEqual(result, expected_add)
result = base - offset
self.assertEqual(result, expected_sub)
def test_timedelta_ops_with_missing_values(self):
# setup
s1 = pd.to_timedelta(Series(['00:00:01']))
s2 = pd.to_timedelta(Series(['00:00:02']))
sn = pd.to_timedelta(Series([pd.NaT]))
df1 = DataFrame(['00:00:01']).apply(pd.to_timedelta)
df2 = DataFrame(['00:00:02']).apply(pd.to_timedelta)
dfn = DataFrame([pd.NaT]).apply(pd.to_timedelta)
scalar1 = pd.to_timedelta('00:00:01')
scalar2 = pd.to_timedelta('00:00:02')
timedelta_NaT = pd.to_timedelta('NaT')
NA = np.nan
actual = scalar1 + scalar1
self.assertEqual(actual, scalar2)
actual = scalar2 - scalar1
self.assertEqual(actual, scalar1)
actual = s1 + s1
assert_series_equal(actual, s2)
actual = s2 - s1
assert_series_equal(actual, s1)
actual = s1 + scalar1
assert_series_equal(actual, s2)
actual = scalar1 + s1
assert_series_equal(actual, s2)
actual = s2 - scalar1
assert_series_equal(actual, s1)
actual = -scalar1 + s2
assert_series_equal(actual, s1)
actual = s1 + timedelta_NaT
assert_series_equal(actual, sn)
actual = timedelta_NaT + s1
assert_series_equal(actual, sn)
actual = s1 - timedelta_NaT
assert_series_equal(actual, sn)
actual = -timedelta_NaT + s1
assert_series_equal(actual, sn)
actual = s1 + NA
assert_series_equal(actual, sn)
actual = NA + s1
assert_series_equal(actual, sn)
actual = s1 - NA
assert_series_equal(actual, sn)
actual = -NA + s1
assert_series_equal(actual, sn)
actual = s1 + pd.NaT
assert_series_equal(actual, sn)
actual = s2 - pd.NaT
assert_series_equal(actual, sn)
actual = s1 + df1
assert_frame_equal(actual, df2)
actual = s2 - df1
assert_frame_equal(actual, df1)
actual = df1 + s1
assert_frame_equal(actual, df2)
actual = df2 - s1
assert_frame_equal(actual, df1)
actual = df1 + df1
assert_frame_equal(actual, df2)
actual = df2 - df1
assert_frame_equal(actual, df1)
actual = df1 + scalar1
assert_frame_equal(actual, df2)
actual = df2 - scalar1
assert_frame_equal(actual, df1)
actual = df1 + timedelta_NaT
assert_frame_equal(actual, dfn)
actual = df1 - timedelta_NaT
assert_frame_equal(actual, dfn)
actual = df1 + NA
assert_frame_equal(actual, dfn)
actual = df1 - NA
assert_frame_equal(actual, dfn)
actual = df1 + pd.NaT # NaT is datetime, not timedelta
assert_frame_equal(actual, dfn)
actual = df1 - pd.NaT
assert_frame_equal(actual, dfn)
def test_compare_timedelta_series(self):
# regresssion test for GH5963
s = pd.Series([timedelta(days=1), timedelta(days=2)])
actual = s > timedelta(days=1)
expected = pd.Series([False, True])
tm.assert_series_equal(actual, expected)
def test_compare_timedelta_ndarray(self):
# GH11835
periods = [Timedelta('0 days 01:00:00'), Timedelta('0 days 01:00:00')]
arr = np.array(periods)
result = arr[0] > arr
expected = np.array([False, False])
self.assert_numpy_array_equal(result, expected)
class TestSlicing(tm.TestCase):
def test_tdi_ops_attributes(self):
rng = timedelta_range('2 days', periods=5, freq='2D', name='x')
result = rng + 1
exp = timedelta_range('4 days', periods=5, freq='2D', name='x')
tm.assert_index_equal(result, exp)
self.assertEqual(result.freq, '2D')
result = rng - 2
exp = timedelta_range('-2 days', periods=5, freq='2D', name='x')
tm.assert_index_equal(result, exp)
self.assertEqual(result.freq, '2D')
result = rng * 2
exp = timedelta_range('4 days', periods=5, freq='4D', name='x')
tm.assert_index_equal(result, exp)
self.assertEqual(result.freq, '4D')
result = rng / 2
exp = timedelta_range('1 days', periods=5, freq='D', name='x')
tm.assert_index_equal(result, exp)
self.assertEqual(result.freq, 'D')
result = -rng
exp = timedelta_range('-2 days', periods=5, freq='-2D', name='x')
tm.assert_index_equal(result, exp)
self.assertEqual(result.freq, '-2D')
rng = pd.timedelta_range('-2 days', periods=5, freq='D', name='x')
result = abs(rng)
exp = TimedeltaIndex(['2 days', '1 days', '0 days', '1 days',
'2 days'], name='x')
tm.assert_index_equal(result, exp)
self.assertEqual(result.freq, None)
def test_add_overflow(self):
# see gh-14068
msg = "too (big|large) to convert"
with tm.assertRaisesRegexp(OverflowError, msg):
to_timedelta(106580, 'D') + Timestamp('2000')
with tm.assertRaisesRegexp(OverflowError, msg):
Timestamp('2000') + to_timedelta(106580, 'D')
_NaT = int(pd.NaT) + 1
msg = "Overflow in int64 addition"
with tm.assertRaisesRegexp(OverflowError, msg):
to_timedelta([106580], 'D') + Timestamp('2000')
with tm.assertRaisesRegexp(OverflowError, msg):
Timestamp('2000') + to_timedelta([106580], 'D')
with tm.assertRaisesRegexp(OverflowError, msg):
to_timedelta([_NaT]) - Timedelta('1 days')
with | tm.assertRaisesRegexp(OverflowError, msg) | pandas.util.testing.assertRaisesRegexp |
import numpy as np
import matplotlib.pyplot as pl
import pandas as pd
from scipy.fftpack import fft, ifft, ifftshift
from scipy.optimize import curve_fit
from tqdm import tqdm
tqdm.monitor_interval = 0
import itertools
from collections import deque
import os
class ising2d():
def __init__(self, temperatures, fields, sizes, microstates, output_folder='.', save_states = 0, checkpoint = 100, debug = False):
self.output_folder = output_folder
self.temperatures = temperatures
self.fields = fields
self.sizes = sizes
self.eqbm_window = 50
self.eqbm_zerocount = 20
self.microstates = microstates
self.save_states = save_states
self.saved_states = 0
self.first_save_observables = True
self.first_save_correlations = True
if microstates < checkpoint:
self.checkpoint = microstates
else:
self.checkpoint = checkpoint
self.observables = []
self.debug = debug
if any(np.array(temperatures) < 1):
raise ValueError('The Monte Carlo method cannot be reliably used for T < 1')
if any(np.absolute(fields) > 0.1):
raise ValueError('The Wolff Algorithm performs poorly for B > 0.1')
paths = [output_folder]
if self.save_states > 0:
paths.append(output_folder+'/states')
if self.debug:
paths.append(output_folder+'/correlations')
for path in paths:
try:
os.makedirs(path)
except OSError:
if not os.path.isdir(path):
raise
def run(self):
""" generate mictostates for all possible ensembles generated by the lists of size, temperature, and magnetic field """
ensembles = itertools.product(self.sizes, self.temperatures, self.fields)
pbar = tqdm(ensembles, total=len(self.sizes)*len(self.temperatures)*len(self.fields))
for ensemble in pbar:
L = ensemble[0]
T = ensemble[1]
B = ensemble[2]
pbar.set_description('(L,T,B) = ({0}, {1:.3g}, {2:.3g})'.format(L,T,B))
self._update_system(L,T,B)
if self.debug:
self._print_energy_evolution()
self._print_autocorrelation()
for k in tqdm(list(range(self.microstates)), desc = 'Production'):
self._update_microstate()
print_index = k
if print_index > 0:
print_index += 1
self._print_observables(print_index)
if self.saved_states < self.save_states:
self._save_state()
self._print_correlations()
self._print_observables(-1)
self._print_correlations()
##private internal utility functions
def _thermalize(self):
""" Perform enough spin flip operations that the system reaches thermal equilibrium """
self.zerocount = 0
self.thermalsteps = 0
steps = np.maximum(self.N/10, 1000)
while self.zerocount < self.eqbm_zerocount:
self._spinflip(steps, mode='Thermalize')
self.thermalsteps += steps
def _update_system(self, L, T, B):
""" set a new ensemble and equilibrate it """
self.T = T
self.B = B
self.L = L
self.N = L**2
self.state = np.random.choice([-1,1], size=(L,L))
self.state = self.state.astype(np.int64)
self.corrtime = None
self.energy_evolution = None
self.autocorrelation = None
self.delays = None
self.saved_states = 0
self._energy()
self._magnetization()
self._probability()
self._thermalize()
self._correlation_time()
def _update_microstate(self):
""" Flip spins until the energy correlations are gone and an independent configuration is generated """
self._spinflip(5*int(self.corrtime+1), mode = 'Production')
def _correlation_time(self):
""" Flip spins and keep track of energy evolution over time to collect correlation data """
fitted = False
while not fitted:
self._energy_evolution()
self._autocorrelation()
self.delays = np.arange(len(self.autocorrelation))
default = 150.0
p0 = [next((i for i in self.delays if self.autocorrelation[i] < 0), default)/3.0]
popt, pcov = curve_fit(self._exponential, self.delays, self.autocorrelation, p0=p0)
if popt == p0:
self.thermalsteps *= 2
else:
self.corrtime = popt[0]
fitted = True
def _spinflip(self, steps, mode=None):
""" perform a single spin update step using the given algorithm """
self._wolff(steps, mode)
def _wolff(self, steps, mode):
""" perform a spin cluster update step using the Wolff algorithm """
steps = int(steps)
label = mode
if mode == 'Thermalize':
save = False
elif mode == 'Autocorrelation':
save = True
elif mode == 'Production':
save = False
label=None
else:
raise NotImplementedError('{0} is not a valid mode'.format(mode))
if mode=='Thermalize':
self.energy_sign = deque()
if save:
self.energy_evolution = np.zeros(steps)
if label:
iterator = tqdm(list(range(steps)), desc=label)
else:
iterator = list(range(steps))
for k in iterator:
cluster, sign = self._build_cluster(self.probability)
oldE = self.E
if self.B == 0 or (np.sign(self.B) == -sign) or (np.random.rand() < np.exp(-2*sign*self.B*np.sum(cluster))):
self.state[cluster == 1] *= -1
self._energy()
self.M -= 2*np.sum(cluster)*sign
dE = oldE - self.E
if save:
self.energy_evolution[k] = self.E
if mode=='Thermalize':
added = False
if dE != 0:
self.energy_sign.append(np.sign(dE))
added = True
if len(self.energy_sign) > self.eqbm_window:
self.energy_sign.popleft()
if np.sum(self.energy_sign) == 0 and len(self.energy_sign) == self.eqbm_window and added:
self.zerocount += 1
if self.zerocount == self.eqbm_zerocount:
break
def _build_cluster(self, prob, seed=None):
""" build a cluster of like-spin nearest neighbours to be flipped all at once"""
cluster = np.zeros(self.state.shape, dtype=np.int64)
pocket = []
if seed == None:
seed = np.squeeze(np.random.randint(0, self.L, size=(1,2)).tolist())
sign = self.state[seed[0], seed[1]]
cluster[seed[0], seed[1]] = 1
pocket.append(seed)
pocketnum = 1
index = 0
while index < pocketnum:
i = pocket[index][0]
j = pocket[index][1]
neighbours = [[(i+1)%self.L, j], [(i-1)%self.L, j], [i,(j+1)%self.L], [i, (j-1)%self.L]]
for neighbour in neighbours:
x = neighbour[0]
y = neighbour[1]
if self.state[i,j] == self.state[x,y] and cluster[x,y] != 1 and np.random.rand() < prob:
pocket.append([x,y])
cluster[x,y] = 1
pocketnum += 1
index += 1
return cluster, sign
def _exponential(self, n, n0):
""" return a single exponential function for fitting """
return np.exp(-(n/n0))
def _offset_exponential(self, n, n0, a, p):
""" return a single exponential function for fitting """
return a*np.exp(-(n/n0))/n**p
def _energy(self):
""" Calculate the total energy of the system """
self.E = -np.sum(self.state*(np.roll(self.state, 1, axis=0) + np.roll(self.state, 1, axis=1) + self.B))
def _magnetization(self):
""" Calculate the total magnetization of the system """
self.M = np.sum(self.state)
def _autocorrelation(self):
""" Calculate the autocorrelation of the energy of the system using that fact that the autocorrelation is the Fourier Transform of the PSD """
energy = self.energy_evolution
maxdelay = int(len(energy)/5)
xp = ifftshift((energy - np.average(energy))/np.std(energy))
n = len(xp)
xp = np.r_[xp[:n//2], np.zeros_like(xp), xp[n//2:]]
f = fft(xp)
S = np.absolute(f)**2
R = ifft(S)
self.autocorrelation = (np.real(R)[:n//2]/(np.arange(n//2)[::-1]+n//2))[:maxdelay]
def _energy_evolution(self):
""" Flip spins and keep track of energy evolution over time to collect correlation data """
self._spinflip(np.maximum(1000, 2*self.thermalsteps), mode='Autocorrelation')
def _probability(self):
""" pre-define the spin-flip/cluster addition probabilities """
self.probability = 1.0 - np.exp(-2.0/self.T)
def _print_observables(self, num):
""" Add a row of observables to the list of saved microstates """
row = {'L': self.L, 'N': self.N, 'T': self.T, 'B': self.B, 'E': self.E, 'M': self.M}
self.observables.append(row)
if num % self.checkpoint == 0:
if self.first_save_observables:
pd.DataFrame(self.observables, index=[0]).to_csv(self.output_folder + '/observables.csv', sep=',', index=False)
self.first_save_observables = False
else:
with open(self.output_folder + '/observables.csv','a') as f:
pd.DataFrame(self.observables).to_csv(f, sep=',', index=False, header=False)
self.observables = []
def _print_correlations(self):
row = {'L': self.L, 'N': self.N, 'T': self.T, 'B': self.B, 'correlation_time': self.corrtime}
if self.first_save_correlations:
| pd.DataFrame(row, index=[0]) | pandas.DataFrame |
import datetime
import json
import csv
import numpy as np
import pandas as pd
# Configura contextos
contextos = [
{"arquivo_comentarios":"../../dados/dadospessoais-comentarios-pdfs-filtrado.csv",
"prefixo_saida":"../site/data/"}
]
#for i in range(1, 7):
# obj = {"arquivo_comentarios" : "../../data/contextos/dadospessoais-comentarios-pdfs-filtrado-contexto{}.csv".format(i),
# "prefixo_saida" : "../../data_web/contexto{}/".format(i)}
# contextos.append(obj)
anteprojeto = pd.read_csv("../../dados/dadospessoais-anteprojeto.csv",
dtype={"commentable_id" : pd.core.common.CategoricalDtype,
"commentable_parent" : pd.core.common.CategoricalDtype,
"commentable_article" : pd.core.common.CategoricalDtype,
"commentable_chapter" : pd.core.common.CategoricalDtype,
"commentable_axis" : pd.core.common.CategoricalDtype,
"commentable_type" : pd.core.common.CategoricalDtype,
"commentable_name" : pd.core.common.CategoricalDtype,
"commentable_text" : np.character})
for contexto in contextos:
comentarios = pd.read_csv(contexto["arquivo_comentarios"],
parse_dates=['comment_date'],
dtype={"source" : pd.core.common.CategoricalDtype,
"comment_id" : pd.core.common.CategoricalDtype,
"author_id" : pd.core.common.CategoricalDtype,
"author_name" : np.character,
"comment_parent" : pd.core.common.CategoricalDtype,
"commentable_id" : pd.core.common.CategoricalDtype,
"comment_text" : np.character })
comentarios.drop(["commentable_name"], axis=1, inplace=True)
comentarios["comment_datetime"] = comentarios["comment_date"]
comentarios["comment_date"] = comentarios["comment_datetime"].apply(lambda a: a.date())
comentarios["comment_month"] = comentarios["comment_datetime"].apply(
lambda a: datetime.datetime.strftime(a, "%b"))
df = | pd.merge(comentarios, anteprojeto, on="commentable_id") | pandas.merge |
import pandas as pd
import pytest
# paso imports
from paso.base import Paso, PasoError
from paso.pre.encoders import Encoders
from loguru import logger
session = Paso(parameters_filepath="../../parameters/lesson.1.yaml").startup()
# 0
def test_Class_init_NoArg():
with pytest.raises(PasoError):
g = Encoders()
# 1
def test_Class_init_WrongScaler():
with pytest.raises(PasoError):
g = Encoders("GORG")
# BoxCoxScaler unit tests
# 2
def test_EncoderList(X):
assert Encoders("BaseNEncoder").encoders() == [
"BackwardDifferenceEncoder",
"BinaryEncoder",
"HashingEncoder",
"HelmertEncoder",
"OneHotEncoder",
"OrdinalEncoder",
"SumEncoder",
"PolynomialEncoder",
"BaseNEncoder",
"LeaveOneOutEncoder",
"TargetEncoder",
"WOEEncoder",
"MEstimateEncoder",
"JamesSteinEncoder",
"CatBoostEncoder",
"EmbeddingVectorEncoder",
]
# 3
def test_bad_encoder_name():
with pytest.raises(PasoError):
g = Encoders("fred")
# 4
def test_BaseNEncoder_no_df(X):
with pytest.raises(PasoError):
Encoders(description_filepath="../../descriptions/pre/encoders/OHE.yaml").train(
[["Male", 1], ["Female", 3], ["Female", 2]]
)
# 5
def test_OrdinaEncoders(X):
h = [["Male", 1], ["Female", 3], ["Female", 2]]
hdf = pd.DataFrame(h)
o = [[1, 1], [2, 3], [2, 2]]
odf = pd.DataFrame(o)
assert (
(
Encoders(description_filepath="../../descriptions/pre/encoders/OHE.yaml")
.train(hdf)
.predict(hdf)
== odf
)
.any()
.any()
)
# 6
def test_OrdinaEncoderFlagsCacheOff(X):
h = [["Male", 1], ["Female", 3], ["Female", 2]]
hdf = pd.DataFrame(h)
g = Encoders("OrdinalEncoder")
g.train(hdf)
assert (g.trained and not g.cache) == True
# 7
def test_OrdinaEncoderFlagsCacheOn(X):
h = [["Male", 1], ["Female", 3], ["Female", 2]]
hdf = pd.DataFrame(h)
g = Encoders("OrdinalEncoder").cacheOn()
g.train(hdf).predict(hdf)
assert (g.trained and g.predicted and g.cache) == True
# 8
def test_OrdinaEncoderFlagsCacheOffpredictedNot(X):
h = [["Male", 1], ["Female", 3], ["Female", 2]]
hdf = pd.DataFrame(h)
g = Encoders("OrdinalEncoder")
g.train(hdf)
assert (g.trained and not g.predicted and not g.cache) == True
# 9
def test_OrdinaEncoderLoadError():
h = [["Male", 1], ["Female", 3], ["Female", 2]]
hdf = pd.DataFrame(h)
g = Encoders("OrdinalEncoder")
g.train(hdf)
with pytest.raises(PasoError):
g.load()
# 10
def test_OrdinaEncoderSaveError():
h = [["Male", 1], ["Female", 3], ["Female", 2]]
hdf = pd.DataFrame(h)
g = Encoders("OrdinalEncoder")
g.train(hdf)
with pytest.raises(PasoError):
g.save()
# 11
def test_OrdinaEncoderFlagsWrite():
h = [["Male", 1], ["Female", 3], ["Female", 2]]
hdf = pd.DataFrame(h, columns=["a", "b"])
g = Encoders("OrdinalEncoder").cacheOn()
g.train(hdf).predict(hdf)
fp: str = "tmp/df"
assert g.write(fp) == g
# 12
def test_OrdinaEncoderFlagsWriteRead():
h = [["Male", 1], ["Female", 3], ["Female", 2]]
hdf = | pd.DataFrame(h, columns=["a", "b"]) | pandas.DataFrame |
import pandas as pd
import io
import lithops
from .utils import derived_from, is_series_like, M
no_default = "__no_default__"
class DataFrame:
def __init__(self, df, filepath, npartitions):
self.filepath = filepath
self.df = df
self.npartitions = npartitions
def reduction(
self,
chunk,
aggregate=None,
combine=None,
meta=no_default,
token=None,
split_every=None,
chunk_kwargs=None,
aggregate_kwargs=None,
combine_kwargs=None,
**kwargs,
):
"""Generic row-wise reductions.
Parameters
----------
chunk : callable
Function to operate on each partition. Should return a
``pandas.DataFrame``, ``pandas.Series``, or a scalar.
aggregate : callable, optional
Function to operate on the concatenated result of ``chunk``. If not
specified, defaults to ``chunk``. Used to do the final aggregation
in a tree reduction.
The input to ``aggregate`` depends on the output of ``chunk``.
If the output of ``chunk`` is a:
- scalar: Input is a Series, with one row per partition.
- Series: Input is a DataFrame, with one row per partition. Columns
are the rows in the output series.
- DataFrame: Input is a DataFrame, with one row per partition.
Columns are the columns in the output dataframes.
Should return a ``pandas.DataFrame``, ``pandas.Series``, or a
scalar.
combine : callable, optional
Function to operate on intermediate concatenated results of
``chunk`` in a tree-reduction. If not provided, defaults to
``aggregate``. The input/output requirements should match that of
``aggregate`` described above.
$META
token : str, optional
The name to use for the output keys.
split_every : int, optional
Group partitions into groups of this size while performing a
tree-reduction. If set to False, no tree-reduction will be used,
and all intermediates will be concatenated and passed to
``aggregate``. Default is 8.
chunk_kwargs : dict, optional
Keyword arguments to pass on to ``chunk`` only.
aggregate_kwargs : dict, optional
Keyword arguments to pass on to ``aggregate`` only.
combine_kwargs : dict, optional
Keyword arguments to pass on to ``combine`` only.
kwargs :
All remaining keywords will be passed to ``chunk``, ``combine``,
and ``aggregate``.
Examples
--------
>>> import pandas as pd
>>> import dask.dataframe as dd
>>> df = pd.DataFrame({'x': range(50), 'y': range(50, 100)})
>>> ddf = dd.from_pandas(df, npartitions=4)
Count the number of rows in a DataFrame. To do this, count the number
of rows in each partition, then sum the results:
>>> res = ddf.reduction(lambda x: x.count(),
... aggregate=lambda x: x.sum())
>>> res.compute()
x 50
y 50
dtype: int64
Count the number of rows in a Series with elements greater than or
equal to a value (provided via a keyword).
>>> def count_greater(x, value=0):
... return (x >= value).sum()
>>> res = ddf.x.reduction(count_greater, aggregate=lambda x: x.sum(),
... chunk_kwargs={'value': 25})
>>> res.compute()
25
Aggregate both the sum and count of a Series at the same time:
>>> def sum_and_count(x):
... return pd.Series({'count': x.count(), 'sum': x.sum()},
... index=['count', 'sum'])
>>> res = ddf.x.reduction(sum_and_count, aggregate=lambda x: x.sum())
>>> res.compute()
count 50
sum 1225
dtype: int64
Doing the same, but for a DataFrame. Here ``chunk`` returns a
DataFrame, meaning the input to ``aggregate`` is a DataFrame with an
index with non-unique entries for both 'x' and 'y'. We groupby the
index, and sum each group to get the final result.
>>> def sum_and_count(x):
... return pd.DataFrame({'count': x.count(), 'sum': x.sum()},
... columns=['count', 'sum'])
>>> res = ddf.reduction(sum_and_count,
... aggregate=lambda x: x.groupby(level=0).sum())
>>> res.compute()
count sum
x 50 1225
y 50 3725
"""
if aggregate is None:
aggregate = chunk
if combine is None:
if combine_kwargs:
raise ValueError("`combine_kwargs` provided with no `combine`")
combine = aggregate
combine_kwargs = aggregate_kwargs
chunk_kwargs = chunk_kwargs.copy() if chunk_kwargs else {}
chunk_kwargs["aca_chunk"] = chunk
combine_kwargs = combine_kwargs.copy() if combine_kwargs else {}
combine_kwargs["aca_combine"] = combine
aggregate_kwargs = aggregate_kwargs.copy() if aggregate_kwargs else {}
aggregate_kwargs["aca_aggregate"] = aggregate
return aca(
self,
chunk=_reduction_chunk,
aggregate=_reduction_aggregate,
combine=_reduction_combine,
meta=meta,
token=token,
split_every=split_every,
chunk_kwargs=chunk_kwargs,
aggregate_kwargs=aggregate_kwargs,
combine_kwargs=combine_kwargs,
**kwargs,
)
def _reduction_agg(self, name, axis=None, skipna=True, split_every=False, out=None):
axis = self._validate_axis(axis)
meta = getattr(self._meta_nonempty, name)(axis=axis, skipna=skipna)
token = self._token_prefix + name
method = getattr(M, name)
if axis == 1:
result = self.map_partitions(
method, meta=meta, token=token, skipna=skipna, axis=axis
)
return handle_out(out, result)
else:
result = self.reduction(
method,
meta=meta,
token=token,
skipna=skipna,
axis=axis,
split_every=split_every,
)
if isinstance(self, DataFrame):
result.divisions = (self.columns.min(), self.columns.max())
return handle_out(out, result)
def apply(
self,
func,
axis=0,
broadcast=None,
raw=False,
reduce=None,
args=(),
meta=no_default,
result_type=None,
**kwds,
):
"""Parallel version of pandas.DataFrame.apply
This mimics the pandas version except for the following:
1. Only ``axis=1`` is supported (and must be specified explicitly).
2. The user should provide output metadata via the `meta` keyword.
Parameters
----------
func : function
Function to apply to each column/row
axis : {0 or 'index', 1 or 'columns'}, default 0
- 0 or 'index': apply function to each column (NOT SUPPORTED)
- 1 or 'columns': apply function to each row
$META
args : tuple
Positional arguments to pass to function in addition to the array/series
Additional keyword arguments will be passed as keywords to the function
Returns
-------
applied : Series or DataFrame
Examples
--------
>>> import pandas as pd
>>> import dask.dataframe as dd
>>> df = pd.DataFrame({'x': [1, 2, 3, 4, 5],
... 'y': [1., 2., 3., 4., 5.]})
>>> ddf = dd.from_pandas(df, npartitions=2)
Apply a function to row-wise passing in extra arguments in ``args`` and
``kwargs``:
>>> def myadd(row, a, b=1):
... return row.sum() + a + b
>>> res = ddf.apply(myadd, axis=1, args=(2,), b=1.5) # doctest: +SKIP
By default, dask tries to infer the output metadata by running your
provided function on some fake data. This works well in many cases, but
can sometimes be expensive, or even fail. To avoid this, you can
manually specify the output metadata with the ``meta`` keyword. This
can be specified in many forms, for more information see
``dask.dataframe.utils.make_meta``.
Here we specify the output is a Series with name ``'x'``, and dtype
``float64``:
>>> res = ddf.apply(myadd, axis=1, args=(2,), b=1.5, meta=('x', 'f8'))
In the case where the metadata doesn't change, you can also pass in
the object itself directly:
>>> res = ddf.apply(lambda row: row + 1, axis=1, meta=ddf)
See Also
--------
dask.DataFrame.map_partitions
"""
pandas_kwargs = {"axis": axis, "raw": raw, "result_type": result_type}
if axis == 0:
msg = (
"lithops.DataFrame.apply only supports axis=1\n"
" Try: df.apply(func, axis=1)"
)
raise NotImplementedError(msg)
def pandas_apply_function(obj):
buf = io.BytesIO(obj.data_stream.read())
df = pd.read_csv(buf)
df.apply(func, args=args, **kwds, **pandas_kwargs)
fexec = lithops.FunctionExecutor()
fexec.map(pandas_apply_function, [self.filepath], chunk_n=self.npartitions)
fexec.wait()
return self
@derived_from(pd.DataFrame)
def all(self, axis=None, skipna=True, split_every=False, out=None):
def pandas_all_function(obj):
buf = io.BytesIO(obj.data_stream.read())
df = pd.read_csv(buf)
df.all(axis=axis, skipna=skipna)
fexec = lithops.FunctionExecutor()
fexec.map(pandas_all_function, [self.filepath], chunk_n=self.npartitions)
fexec.wait()
return self
@derived_from(pd.DataFrame)
def any(self, axis=None, skipna=True, split_every=False, out=None):
def pandas_any_function(obj):
buf = io.BytesIO(obj.data_stream.read())
df = pd.read_csv(buf)
df.any(axis=axis, skipna=skipna)
fexec = lithops.FunctionExecutor()
fexec.map(pandas_any_function, [self.filepath], chunk_n=self.npartitions)
fexec.wait()
return self
@derived_from(pd.DataFrame)
def sum(
self,
axis=None,
skipna=True,
split_every=False,
dtype=None,
out=None,
min_count=None,
):
# use self._reduction_agg()
def pandas_sum_function(obj):
buf = io.BytesIO(obj.data_stream.read())
df = pd.read_csv(buf)
df.sum(axis=axis, skipna=skipna, min_count=min_count)
fexec = lithops.FunctionExecutor()
fexec.map(pandas_sum_function, [self.filepath], chunk_n=self.npartitions)
fexec.wait()
return self
@derived_from(pd.DataFrame)
def prod(
self,
axis=None,
skipna=True,
split_every=False,
dtype=None,
out=None,
min_count=None,
):
# use self._reduction_agg()
def pandas_prod_function(obj):
buf = io.BytesIO(obj.data_stream.read())
df = pd.read_csv(buf)
df.prod(axis=axis, skipna=skipna)
fexec = lithops.FunctionExecutor()
fexec.map(pandas_prod_function, [self.filepath], chunk_n=self.npartitions)
fexec.wait()
return self
@derived_from(pd.DataFrame)
def max(self, axis=None, skipna=True, split_every=False, out=None):
# use self._reduction_agg()
def pandas_max_function(obj):
buf = io.BytesIO(obj.data_stream.read())
df = pd.read_csv(buf)
df.max(axis=axis, skipna=skipna)
fexec = lithops.FunctionExecutor()
fexec.map(pandas_max_function, [self.filepath], chunk_n=self.npartitions)
fexec.wait()
return self
@derived_from(pd.DataFrame)
def min(self, axis=None, skipna=True, split_every=False, out=None):
# use self._reduction_agg()
def pandas_min_function(obj):
buf = io.BytesIO(obj.data_stream.read())
df = pd.read_csv(buf)
df.min(axis=axis, skipna=skipna)
fexec = lithops.FunctionExecutor()
fexec.map(pandas_min_function, [self.filepath], chunk_n=self.npartitions)
fexec.wait()
return self
@derived_from(pd.DataFrame)
def count(self, axis=None, split_every=False):
# use self.map_partition whens axis = 1 , self.reduction when axis = 0()
def pandas_count_function(obj):
buf = io.BytesIO(obj.data_stream.read())
df = pd.read_csv(buf)
df.count(axis=axis)
fexec = lithops.FunctionExecutor()
fexec.map(pandas_count_function, [self.filepath], chunk_n=self.npartitions)
fexec.wait()
return self
@derived_from(pd.DataFrame)
def mean(self, axis=None, skipna=True, split_every=False, dtype=None, out=None):
# use self.map_partition whens axis = 1 , self.reduction when axis = 0()
def pandas_mean_function(obj):
buf = io.BytesIO(obj.data_stream.read())
df = | pd.read_csv(buf) | pandas.read_csv |
# pylint: disable-msg=E1101,E1103
from datetime import datetime
import operator
import numpy as np
from pandas.core.index import Index
import pandas.core.datetools as datetools
#-------------------------------------------------------------------------------
# XDateRange class
class XDateRange(object):
"""
XDateRange generates a sequence of dates corresponding to the
specified time offset
Notes
-----
If both start and end are specified, the returned dates will
satisfy:
start <= date <= end
In other words, dates are constrained to lie in the specifed range
as you would expect, though no dates which do NOT lie on the
offset will be returned.
XDateRange is a generator, use if you do not intend to reuse the
date range, or if you are doing lazy iteration, or if the number
of dates you are generating is very large. If you intend to reuse
the range, use DateRange, which will be the list of dates
generated by XDateRange.
See also
--------
DateRange
"""
_cache = {}
_cacheStart = {}
_cacheEnd = {}
def __init__(self, start=None, end=None, nPeriods=None,
offset=datetools.BDay(), timeRule=None):
if timeRule is not None:
offset = datetools.getOffset(timeRule)
if timeRule is None:
if offset in datetools._offsetNames:
timeRule = datetools._offsetNames[offset]
start = datetools.to_datetime(start)
end = datetools.to_datetime(end)
if start and not offset.onOffset(start):
start = start + offset.__class__(n=1, **offset.kwds)
if end and not offset.onOffset(end):
end = end - offset.__class__(n=1, **offset.kwds)
if nPeriods == None and end < start:
end = None
nPeriods = 0
if end is None:
end = start + (nPeriods - 1) * offset
if start is None:
start = end - (nPeriods - 1) * offset
self.offset = offset
self.timeRule = timeRule
self.start = start
self.end = end
self.nPeriods = nPeriods
def __iter__(self):
offset = self.offset
cur = self.start
if offset._normalizeFirst:
cur = | datetools.normalize_date(cur) | pandas.core.datetools.normalize_date |
# ~*~ coding: utf-8 ~*~
'''
A module with classes and functions for
build a machine learning pipeline
from loading features and labels to
to submitting results to Mlflow Tracking
board
'''
import os, sys
from copy import deepcopy
from itertools import combinations
import numpy as np
import pandas as pd
from scipy import stats
from sklearn.metrics import f1_score, accuracy_score, classification_report
import mlflow
# custom modules
from src.rgutils.data import replace_array_vals, remove_nans, balance_data
from src.rgutils.ml import reduce_dimensions, cross_validation, f_score_macro, scale_features
from src.rgutils.nlp import vectorize_text
class BuildModel:
'''
Create a pipeline to build a model
Implement these steps:
1. Load feature datasets (order matters)
2. Load corresponding labels (order matters)
3. Customize labels: {remap labels, select only target labels}
4. Remove rows from training with missing values or outliers
OR impute data
5. Processing steps as Tf-Idf or PCA
6. Combine features for training or ensemble predictions or build a metalearner
7. Cross-validated training
8. Evaluate on test data: individually for each test set or mixed
9. Submit results to the mlflow tracking board
'''
process_options = ['decomposer','vectorizer','scaler']
subset_options = ['features','concat']
train_data_options = ['concat','ensemble','stack']
def __init__(self, feature_dict, label_list):
# make sure input features and labels match in names
l_names = [n for n,_ in label_list]
for _,features in feature_dict.items():
f_names = [n for n,_ in features]
assert f_names == l_names, f"{f_names} are different from labels {l_names}"
self.features = {}
for name,features in feature_dict.items():
self.features[name] = []
for set_name,path in features:
self.features[name].append([set_name, self._load_data(path)])
self.labels = {name:self._load_data(path) for name,path in label_list}
self.train_params = {}
self._update_general_params()
self.set2index = {name:c for c,(name,_) in enumerate(label_list)}
self.index2set = {v:k for k,v in self.set2index.items()}
self.num_sets = len(self.set2index)
self._check_sizes()
def _update_general_params(self):
''' Get sizes of sets and unique of labels '''
self.train_params.update({f"{set_name}_size": len(set_labels) \
for set_name,set_labels in self.labels.items()})
classes = set()
_ = [classes.update(set(set_labels)) for set_labels in self.labels.values()]
self.train_params.update({
'classes': list(classes),
'features': list(self.features.keys()),
})
@staticmethod
def _load_data(path):
assert os.path.exists(path), f'{path} does not exist'
file_extension = path.split('.')[-1]
if file_extension == 'csv':
data = pd.read_csv(path)
elif file_extension == 'npy':
data = np.load(path, allow_pickle=True)
elif file_extension == 'txt':
with open(path, 'r') as f:
data = np.array(f.read().strip('\n').split('\n'))
else:
raise Exception(f'Format {file_extension} not supported for {path}')
print('Loaded data of shape', data.shape, path)
return data
def _check_sizes(self, subset='features'):
'''
Test if features and labels have equal number of examples
Test if train and test have equal number of features
Test if labels have only 1 dimension
------
Subset argument tells which set of features to test {features, concat}
'''
def x_equals_y(set_name, set_data, name):
''' check if size of X and y equal for each set '''
assert len(set_data) == len(self.labels[set_name]), \
f'{name} {set_name} set size differ from size of labels'
def equal_num_features(features, name):
''' check if sets have equal number of features '''
if len(features) == 0:
raise Exception('No features provided')
elif len(features) == 1:
return
else:
set_names = [n for n,_ in features]
first_index = self.set2index[set_names[0]]
if features[0][1].ndim == 1:
return
for set_name in set_names[1:]:
second_index = self.set2index[set_name]
assert features[first_index][1].shape[1] == features[second_index][1].shape[1], \
(f'{set_names[self.index2set[first_index]]} not equal to '
f'{set_names[self.index2set[second_index]]} features {name}')
assert subset in self.subset_options
if subset == 'features':
for name,features in self.features.items():
for set_name,set_data in features:
x_equals_y(set_name, set_data, name)
equal_num_features(features, name)
elif subset == 'concat':
assert isinstance(self.concat, dict) and len(self.concat) > 0
feature_num = self.concat[self.index2set[0]].shape[1] # feature num
for set_name, set_data in self.concat.items():
x_equals_y(set_name, set_data, 'concat')
assert set_data.shape[1] == feature_num, \
f'Concat {set_name} not equal to feature number {feature_num}'
for name,array in self.labels.items():
assert array.ndim == 1, f'Label {name} does not have 1 dimension'
print('DATA LOOKS GOOD after size checkup')
def relabel_targets(self, mapping=None, include=None):
'''
Relabel target variable
Select only given list of target classes if specified
'''
assert mapping is not None or include is not None
print()
self.old_labels = deepcopy(self.labels)
if mapping:
print("RELABELING TARGETS")
self.labels = {name:replace_array_vals(mapping, lab) for name,lab in self.labels.items()}
if include:
print()
print('SELECTING TARGET CLASSES')
assert len(include) >= 2
is_target_fn = lambda x: x in include
# Go over each set of labels
# get target indices and select those indices
# in labels and respective features
for set_name,labels in self.labels.items():
target_i = np.array(list(map(is_target_fn, labels)))
self.labels[set_name] = labels[target_i]
self.old_labels[set_name] = self.old_labels[set_name][target_i]
index = self.set2index[set_name]
for name,features in self.features.items():
self.features[name][index][1] = features[index][1][target_i]
print(f'Selected {sum(target_i)} rows at {name}')
self._check_sizes()
self._update_general_params()
def process_features(self, process_steps):
'''
Apply processing to input features such as
text vectorization or decomposition of features
-----
Params:
process_steps is a dict with a list of tuples for each feauture
as {feature_name: [(step_name, algorithm, optional: args),...], ...}
in the order provided
'''
print()
print("PROCESSING FEATURES")
p_keys = list(process_steps.keys())
f_keys = list(self.features.keys())
for k in p_keys: assert k in f_keys
for name,step in process_steps.items():
for step_conf in step:
assert step_conf[0] in self.process_options, \
f"Process step {step_conf[0]} has to be among {self.process_options}"
transformed = self._process_step(name, *step_conf)
self.features[name] = [[self.index2set[i], t] \
for i,t in zip(range(self.num_sets), transformed)]
self._check_sizes()
self.train_params.update(process_steps)
def _process_step(self, feature_name, process_type, process_algo, process_args={}):
''' Call text vectorizer or reduce dimensions '''
# order sets inside feature according to how they were passed to label_list
# for all sets, get set_name for index, then index of that set
# then for the feature take the second element of that index
input_data = [self.features[feature_name][self.set2index[self.index2set[i]]][1] \
for i in range(self.num_sets)]
if process_type == 'vectorizer':
_,transformed = vectorize_text(*input_data, vectorizer=process_algo, **process_args)
elif process_type == 'decomposer':
_,transformed = reduce_dimensions(*input_data, decomposer=process_algo, **process_args)
elif process_type == 'scaler':
_,transformed = scale_features(*input_data, scaler=process_algo, **process_args)
return transformed
def concat_features(self):
''' Concatenate sets of features to create one set '''
print()
print('CONCATENATING FEATURES')
self.concat = {}
self.concat_names = []
for c,(name,features) in enumerate(self.features.items()):
for set_name,set_index in self.set2index.items():
set_data = features[set_index][1]
if c == 0:
self.concat.update({set_name: set_data})
else:
self.concat[set_name] = np.concatenate([self.concat[set_name], set_data], axis=1)
# concat feature names
if hasattr(set_data, 'columns'):
f_names = set_data.columns.values
else:
f_names = [f"{name}_{i}" for i in range(set_data.shape[1])]
self.concat_names.extend(f_names)
for set_name,set_data in self.concat.items():
print(f'{set_name} has shape {set_data.shape} after concatenation')
self._check_sizes('concat')
def resample_data(self, sampler, sampling_configs={}, subset='features', ):
'''
Resample data by over-sampling, under-sampling or combining
Use imbalance-learn lib
'''
assert subset in self.subset_options
# resample train set
set_name = self.index2set[0]
set_index = self.set2index[set_name]
if subset == 'concat':
X_train = self.concat[set_name]
y_train = self.labels[set_name]
_,X_train,y_train = balance_data(X_train, y_train, sampler, **sampling_configs)
self.concat[set_name] = deepcopy(X_train)
self.labels[set_name] = deepcopy(y_train)
elif subset == 'ensemble':
for feature_name,features in self.features.items():
X_train = features[set_index][1]
y_train = self.labels[set_name]
_,X_train,y_train = balance_data(X_train, y_train, sampler, **sampling_configs)
self.features[feature_name][set_index][1] = deepcopy(X_train)
if labels:
assert sum(labels != y_train) == 0, f"Different y labels resampled for ensemble"
labels = deepcopy(y_train)
self.labels[set_name] = deepcopy(labels)
self._check_sizes(subset=subset)
self._update_general_params()
self.train_params.update({'sampler': (sampler, sampling_configs, subset)})
def build_models(self, estimators, metric, num_folds, train_data='concat', k_best=3):
''' Train a list of models and return best of them '''
assert train_data in self.train_data_options
mode_fn = lambda x: stats.mode(x).mode.squeeze()
def train_model(estimator):
'''
Cross-validate a model on a set and test it on a holdout set
'''
cv_models,cv_scores = \
cross_validation(X_train, y_train, estimator, num_folds,
metric, model_name='cv', stratified=True)
print(f'Cross-validation scores:')
d = pd.DataFrame(cv_scores)
mean_train,std_train = d["train"].mean(), d["train"].std()
mean_val,std_val = d["val"].mean(), d["val"].std()
print(f'Train: {mean_train:.3f} +- {std_train:.4f}')
print(f'Validation: {mean_val:.3f} +- {std_val:.4f}')
predictions = np.empty((len(y_test), len(cv_models)), dtype='<U20', )
for i,(name,model) in enumerate(cv_models.items()):
predictions[:,i] = model.predict(X_test)
final = np.array(list(map(mode_fn, predictions)))
print()
print('Classification report:')
print(classification_report(y_test, final))
print()
print('Test set confusion matrix:')
print( | pd.crosstab(y_test, final) | pandas.crosstab |
#python 3
import pandas as pd
import csv
import utilities
"""A handful of pandas-based scripts to analyze and edit your spreadsheet-based
collection control data."""
def combine_csvs():
"""Combine two datasets."""
dataset_a = utilities.opencsv()
dataset_b = utilities.opencsv()
#fill in index column or add line for input...or don't have it?
data_a = pd.read_csv(dataset_a, index_col='')
data_b = pd.read_csv(dataset_b, index_col='')
if len(data_a.columns) == len(data_b.columns):
newdataset = data_a.append(data_b)
else:
newdataset = pd.concat([data_a, data_b])
newdataset.to_csv('alldren.csv', encoding='utf-8')
def merge_csvs():
"""Join two spreadsheets on a common column."""
data_a = utilities.opencsv()
data_b = utilities.opencsv()
dataset_a = pd.read_csv(data_a, encoding='utf-8')
dataset_b = pd.read_csv(data_b, encoding='utf-8')
headerlist = dataset_a.columns.values.tolist()
headlist = str(headerlist)
head = headlist[1:-1]
print('Columns: ' + head)
mergevar = input('Enter common column: ')
merged = dataset_a.merge(dataset_b, on=mergevar, how='left')
merged.to_csv('/Users/aliciadetelich/Desktop/agents_w_recs_merged.csv', encoding='utf-8')
# def join_csvs():
# """DO NOT USE"""
# pass
def group_by():
"""Get all values that meet a certain criteria"""
def g(dataset):
columnname = input('In what column is your group located?: ')
groupname = input('What value are you looking for?')
group = dataset.groupby(columnname)
grouped = group.get_group(groupname)
grouped.to_csv('group.csv', encoding='utf-8')
print(grouped)
stit = True
def s():
data = input('Please enter path to input CSV: ')
dataset = pd.read_csv(data)
headerlist = dataset.columns.values.tolist()
headlist = str(headerlist)
head = headlist[1:-1]
print('Columns: ' + head)
c = True
while c:
g(dataset)
while stit:
s()
def all_groups():
"""Get all values that meet a certain criteria."""
data = utilities.opencsv()
dataset = | pd.read_csv(data) | pandas.read_csv |
"""
Auxiliary functions
"""
import warnings
warnings.filterwarnings("ignore")
import os
import json
import joblib
import numpy as np
import pandas as pd
from itertools import permutations, combinations, product
from numba import njit, prange
from tqdm import tqdm
import networkx as nx
import multiprocessing as mp
from math import factorial
from copy import copy
from matplotlib import pyplot as plt
import seaborn as sns
from scipy import sparse
import resource
import csv
import networkx as nx
from networkx.algorithms.distance_measures import diameter
from networkx.algorithms.components import is_weakly_connected, is_strongly_connected, strongly_connected_components
from networkx.algorithms.centrality import degree_centrality, betweenness_centrality
from networkx.convert_matrix import to_numpy_array
from networkx.algorithms.swap import double_edge_swap
from collections import namedtuple
n_combs = lambda n, k: int(factorial(n)/factorial(n-k)/factorial(k))
def read_ecoli_network(path):
f = open(path)
line = f.readline()
while line.startswith('#'):
line = f.readline()
df = pd.read_csv(f, sep="\t", header=None)
df.loc[-1] = line.split("\t")
df.index = df.index + 1
df = df.sort_index()
f.close()
return df
def get_actual_parametrization(source, check_input=True, verbose=False):
cfg = source if type(source) is dict else json.load(open(source, "r"))
if check_input:
assert cfg["NETWORK_TO_SEARCH_IN"] in ["ecoli", "test", "yeast", "ecoli", "gs0.01", "gs0.1", "gs1"]
if verbose:
for param, value in cfg.items():
print(f"{param}: {value}")
return cfg
def update_cfg(path, param, value, verbose=False):
cfg = get_actual_parametrization(path, check_input=False, verbose=False)
cfg[param] = value
cfg = get_actual_parametrization(cfg, verbose=verbose)
json.dump(cfg, open(path, "w"))
return cfg
def get_interaction_matrix(config_file):
cwd = os.path.abspath(os.path.join(os.getcwd(), os.pardir))
network = config_file["NETWORK_TO_SEARCH_IN"]
interaction_matrix = joblib.load(
os.path.join(cwd, "networks", network, f"interaction_matrix.gz")
)
return interaction_matrix
def build_motif_from_string(string):
return np.array(list(map(int, string.split()))).reshape(3, 3)
def get_equivalents(core_pattern):
pattern_variants = []
for permutation in permutations(range(3)):
variant = core_pattern[permutation, :]
variant = variant[:, permutation]
for prev_variant in pattern_variants:
if (variant - prev_variant == np.zeros((3, 3))).all():
break
else:
pattern_variants.append(variant)
return pattern_variants
def print_equivalents(config_file):
m = build_motif_from_string(json.load(open("./motifs_collection.json", "r"))[config_file["MOTIF_TO_SEARCH_FOR"]])
if config_file["SELFLOOPS_INCLUDED"]: m += np.diag([1]*3)
equivalents = get_equivalents(m)
print(f"""Equivalent forms for {config_file["MOTIF_TO_SEARCH_FOR"]}{" with selfloops" if config_file["SELFLOOPS_INCLUDED"] else ""}\
({len(equivalents)} total):""")
for x in equivalents:
print(x)
print()
def get_triad_codes(path=None):
motifs = json.load(open("../motifs_collection.json", "r"))
salt = np.array([2**i for i in range(6)])
mapping = {x: i for i, x in enumerate(motifs.keys())}
codes = {}
for motif in motifs.keys():
form = build_motif_from_string(motifs[motif])
isoforms = get_equivalents(form)
for isoform in isoforms:
mask = np.concatenate([np.diag(isoform, k=i) for i in [-2, -1, 1, 2]])
code = mask @ np.array([2**i for i in range(6)])
codes[code] = mapping[motif]
xcodes = [-1 for _ in range(sum(salt)+1)]
for code, motif in codes.items():
xcodes[code] = motif
xcodes
return xcodes, {i: x for x, i in mapping.items()}
@njit(cache=True)
def get_motifs(interaction_matrix, combs, codes, n):
triads = [[(-1, -1, -1)] for _ in range(n)]
salt = np.array([2**i for i in range(6)]).astype(np.float64)
n_combinations = len(combs)
for i in prange(n_combinations):
c = combs[i]
cl = np.array(c)
triad = interaction_matrix[cl, :][:, cl]
mask = [0]
for k in [-2, -1, 1, 2]:
mask += list(np.diag(triad, k=k))
mask = np.array(mask[1:]).astype(np.float64)
code = int(mask @ salt)
idx = codes[code]
if idx == -1:
pass
else:
triads[idx] += [c]
return triads
def motif_search(config_file, interaction_matrix, batch_size, dump=False, verbose=False):
network_name = config_file["NETWORK_TO_SEARCH_IN"]
codes, mapping = get_triad_codes()
N_CORES = mp.cpu_count() if config_file["N_CORES_TO_USE"] == -1 else config_file["N_CORES_TO_USE"]
def connected_triads_generator(interaction_matrix):
if type(interaction_matrix) == 'scipy.sparse.csr.csr_matrix':
interaction_matrix = sparse.csr_matrix.toarray(interaction_matrix)
interaction_matrix_adj = interaction_matrix - np.diag(np.diag(interaction_matrix))
tg_idxs, tf_idxs = np.where(interaction_matrix_adj != 0)
links = pd.DataFrame(index=range(len(tf_idxs)), columns=["tf", "tg"])
links.tf = tf_idxs
links.tg = tg_idxs
links_tf = links.set_index("tf", drop=False)[["tg"]]
cascades = links.join(links_tf[["tg"]], on="tg", how="inner", rsuffix="_final")
cascades = cascades[cascades.tf != cascades.tg_final]
for cascade in cascades.values:
yield tuple(cascade)
grouper = links.groupby("tg")
counter = grouper["tf"].count()
for tg in counter[counter > 1].index:
tf_pairs = combinations(links[links.tg == tg].tf.values, 2)
for tf_1, tf_2 in tf_pairs:
yield tf_1, tf_2, tg
grouper = links.groupby("tf")
counter = grouper["tg"].count()
for tf in counter[counter > 1].index:
tg_pairs = combinations(links[links.tf == tf].tg.values, 2)
for tg_1, tg_2 in tg_pairs:
yield tf, tg_1, tg_2
triads = connected_triads_generator(interaction_matrix)
def batch_generator(triads):
batch = []
counter = 0
for triad in triads:
batch.append(triad)
counter += 1
if counter == batch_size:
yield batch
batch = []
counter = 0
yield batch
def processor(splitted_triads):
def gen_to_queue(input_q, splitted_triads):
for batch in splitted_triads:
input_q.put(batch)
for _ in range(N_CORES):
input_q.put(None)
def process(input_q, output_q):
while True:
batch = input_q.get()
if batch is None:
output_q.put(None)
break
output_q.put(get_motifs(interaction_matrix, batch, codes, len(mapping)))
input_q = mp.Queue(maxsize = N_CORES * 2)
output_q = mp.Queue(maxsize = N_CORES * 2)
gen_pool = mp.Pool(1, initializer=gen_to_queue, initargs=(input_q, splitted_triads))
pool = mp.Pool(N_CORES, initializer=process, initargs=(input_q, output_q))
finished_workers = 0
while True:
result = output_q.get()
if result is None:
finished_workers += 1
if finished_workers == N_CORES:
break
else:
yield result
input_q = None
output_q = None
gen_pool.close()
gen_pool.join()
pool.close()
pool.join()
splitted_triads = batch_generator(triads)
motifs_generator = processor(splitted_triads)
motifs = [[] for _ in range(len(mapping))]
for batch in tqdm(motifs_generator) if verbose else motifs_generator:
for i in range(len(mapping)):
if batch[i][1:] != []:
for triad in batch[i][1:]:
motifs[i].append("_".join(map(str, sorted(triad))))
motifs = {mapping[i]: list(set(motifs[i])) for i in range(len(mapping))}
counter = {x: len(y) for x, y in motifs.items()}
if dump:
joblib.dump(motifs, f"./networks/{network_name}/motifs.gz")
json.dump(counter, open(f"./networks/{network_name}/counter.json", "w"))
return motifs, counter
def count_triads_nx(interaction_matrix):
G = nx.DiGraph(interaction_matrix.T)
return nx.algorithms.triads.triadic_census(G)
def get_metrics_report(interaction_matrix):
Report = namedtuple(
"report",
["degree_seq", "avg_degree", "diameter_strong", "diameter_weak",
"largest_component_frac", "degree_centrality", "betweenness_centrality"]
)
G = nx.DiGraph(interaction_matrix.T)
degree_seq = pd.Series(np.array([x[1] for x in G.degree]))
avg_degree = degree_seq.mean()
diameter_weak = diameter(G.to_undirected()) if is_weakly_connected(G) else np.inf
if is_strongly_connected(G):
diameter_strong = diameter(G)
largest_component_frac = 1
else:
diameter_strong = np.inf
strong_components = [(c, len(c)) for c in strongly_connected_components(G)]
strong_components = sorted(strong_components, key=lambda x: x[1], reverse=True)
largest_component_frac = strong_components[0][1]/interaction_matrix.shape[0]
dc = pd.Series(degree_centrality(G))
bc = pd.Series(betweenness_centrality(G))
report = Report(*[degree_seq, avg_degree, diameter_strong, diameter_weak, largest_component_frac, dc, bc])
return report
def get_loops(matrix):
m = matrix + matrix.T
x = sorted([sorted([x, y]) for x, y in zip(*np.where(m == 2))])
y = [x[k] for k in range(len(x)) if k % 2 == 0]
return y
@njit
def get_shuffled_matrix(interaction_matrix, nswaps):
shuffled = interaction_matrix.copy()
tf_nodes = np.where(shuffled.sum(axis=0) != 0)[0]
for i in range(nswaps):
tf_1, tf_2 = np.random.choice(tf_nodes, size=2, replace=True)
tg = shuffled[:, np.array([tf_1, tf_2])]
x = np.where((tg[:, 0] == 1) & (tg[:, 1] == 0))[0]
if x.shape[0] > 0:
tg_1 = np.random.choice(x)
else:
continue
y = np.where((tg[:, 1] == 1) & (tg[:, 0] == 0))[0]
if y.shape[0] > 0:
tg_2 = np.random.choice(y)
else:
continue
s = shuffled[np.array([tg_1, tg_2]), :][:, np.array([tf_1, tf_2])]
e1 = np.diag(np.array([1, 1]))
e2 = e1[::-1]
if (s == e1).all():
shuffled[tg_1, tf_1] = 0
shuffled[tg_1, tf_2] = 1
shuffled[tg_2, tf_1] = 1
shuffled[tg_2, tf_2] = 0
else:
shuffled[tg_1, tf_1] = 1
shuffled[tg_1, tf_2] = 0
shuffled[tg_2, tf_1] = 0
shuffled[tg_2, tf_2] = 1
return shuffled
def corruption_score(shuffled_matrix, interaction_matrix):
i, j = np.where(interaction_matrix == 1)
return shuffled_matrix[i, j].sum()/interaction_matrix[i, j].sum()
def plot_distr(counters_shuffled, counter_orig, label, highlight):
df = pd.DataFrame(columns=["motif", "abundance", "network"])
df.motif = counter_orig.keys(); df.abundance = counter_orig.values(); df.network = "original"
for counter_shuffled in tqdm(counters_shuffled):
df2 = pd.DataFrame(columns=["motif", "abundance", "network"])
df2.motif = counter_shuffled.keys(); df2.abundance = counter_shuffled.values(); df2.network = "shuffled"
df = pd.concat([df, df2], axis=0)
df.abundance = df.abundance/1000
fig, ax = plt.subplots(nrows=1, ncols=5, figsize=(20, 5))
fig.suptitle(label, fontsize=30)
for i in range(len(counter_orig.keys())):
motif = list(counter_orig.keys())[i]
b = sns.barplot(data=df[df["motif"]==motif], x="motif", y="abundance", hue="network", ax=ax[i],
palette="Blues_r")
if highlight and motif == highlight:
b.set_facecolor('xkcd:wheat')
b.legend_.remove()
# else:
# plt.setp(b.get_legend().get_texts(), fontsize='13')
# plt.setp(b.get_legend().get_title(), fontsize='13')
b.tick_params("x", labelsize=20)
b.set_xlabel("",fontsize=0)
b.set_ylabel("",fontsize=0);
return df, fig
def get_shuffled_mp(params):
matrix = params["matrix"]
nswaps = params["nswaps"]
return get_shuffled_matrix(matrix, nswaps)
def shuffle_network(matrix, threshold=0.75):
"""
"""
complete = False
swaps = (matrix.sum())*0.2
while not complete:
shuffled_matrix = get_shuffled_matrix(matrix, swaps)
shuffled_score = 1-corruption_score(matrix, shuffled_matrix)
#print(shiffled_score)
swaps += (matrix.sum())*0.2
if shuffled_score >= threshold:
complete = True
return shuffled_matrix
def generate_random_networks(config_file, interaction_matrix, nsims, nsteps, nswaps):
counters = []
for _ in range(nsteps):
pool = mp.Pool(mp.cpu_count())
params = {"matrix": interaction_matrix, "nswaps": nswaps}
shuffled_arrays = pool.map(get_shuffled_mp, (params for _ in range(int(nsims/nsteps))))
pool.close()
pool.join()
for arr in tqdm(shuffled_arrays):
motifs, counter = motif_search(config_file, arr, batch_size=10000)
counters.append(counter)
return counters
def plot_distr_2(counters, counter_orig, ticks):
distr = {triad: [] for triad in counters[0].keys()}
for counter in counters:
for triad, n in counter.items():
distr[triad].append(n)
distr = {x: np.array(y) for x, y in distr.items()}
fig, axes = plt.subplots(nrows=2, ncols=3, figsize=(20, 10))
for i, motif in enumerate(counters[0].keys()):
ax = axes[i//3, i%3]
ax.set_title(motif, fontsize=25)
pd.Series(distr[motif]).hist(bins=50, ax=ax)
ax.plot([counter_orig[motif]]*100, np.linspace(0, ticks[i], 100), "r")
def build_zscores_report(counters, counter_orig):
distr = {triad: [] for triad in counters[0].keys()}
for counter in counters:
for triad, n in counter.items():
distr[triad].append(n)
distr = {x: np.array(y) for x, y in distr.items()}
zscores_report = pd.DataFrame(
index=["N_real", "mean(N_rand)", "sd(N_rand)", "Z-score", "P-value"]
)
for motif in counters[0].keys():
n_hypothesis = len(counters[0].keys())
d = distr[motif]
zscore = (counter_orig[motif]-np.mean(distr[motif]))/np.std(distr[motif])
pvalue = len(d[d <= counter_orig[motif]])/len(d)
if pvalue > 0.5:
pvalue = len(d[d >= counter_orig[motif]])/len(d)
if pvalue < 0.01/n_hypothesis:
result = " < 0.01"
elif pvalue < 0.05/n_hypothesis:
result = " < 0.05"
else:
result = "non-significant"
result_list = [
counter_orig[motif],
np.mean(distr[motif]),
np.std(distr[motif]),
zscore,
pvalue
]
zscores_report[motif] = result_list
return zscores_report.T
split_motif = lambda x: list(map(int, x.split("_")))
def build_vmn(motifs, verbose=False):
motifs_network = np.zeros((len(motifs), len(motifs)))
iterator = combinations(range(len(motifs)), 2)
if verbose:
iterator = tqdm(iterator, total=int(len(motifs)*(len(motifs)-1)/2))
for i, j in iterator:
m1, m2 = map(lambda x: set(map(int, x.split("_"))), [motifs[i], motifs[j]])
motifs_network[i, j] = len(m1 & m2)
motifs_network[j, i] = motifs_network[i, j]
return motifs_network
def get_sparcity(matrix):
return matrix.sum()/matrix.shape[0]
def get_tf_content(matrix):
return len(np.where(matrix.sum(axis=0)!=0)[0])/matrix.shape[0]
def plot_motif_distr(counter):
a = pd.Series(counter)
a = a/sum(a)
plt.title("Motifs distribution", fontsize=15)
plt.ylim(0, 1)
plt.bar(a.index, a.values)
for key in a.keys():
plt.text(x=key, y=a[key]+0.05,
s=f"{a[key]:.3f}", fontsize=10)
# plt.savefig("./pics/motif_distr_art2.png")
def read_df_as_network(filename):
with open(filename, 'rt') as f:
network_df = pd.read_csv(f, sep=' ', header = None)
network = nx.from_pandas_edgelist(network_df, source = 0, target = 1)
return network
def out_prob_kernel(out_degree_arr, power_law_degree, random_node):
out_prob = out_degree_arr[random_node]**power_law_degree
out_prob /= sum(out_degree_arr**power_law_degree)
return out_prob
def in_prob_kernel(in_degree_arr, power_law_degree, random_node):
in_prob = in_degree_arr[random_node]**power_law_degree
in_prob /= sum(in_degree_arr**power_law_degree)
return in_prob
def repeats_density(x, f=0.25, a=3):
return (f**(1/(1-a))-1)*f**(-x/(1-a))
def flatten(container):
for i in container:
if isinstance(i, (list,tuple)):
for j in flatten(i):
yield j
else:
yield i
def build_Tnet(edges, n):
"""returns adjacency matrix
requires adjacency list and matrix size
"""
interaction_matrix = np.zeros((n, n))
interaction_matrix[edges[:, 0], edges[:, 1]] = 1
return interaction_matrix
def collect_topological_parameters(config_file, interaction_matrix, label):
"""returns ffl-node participation, sparsity, average in/out-degree
requires adjacency matrix and config file
"""
import statistics
#ffl-part
motifs, counter = motif_search(config_file, interaction_matrix, batch_size=10000)
motifs = motifs["030T"]
ffl_nodes = list(set(sum([list(map(int, x.split("_"))) for x in motifs], [])))
p1 = len(ffl_nodes)/interaction_matrix.shape[0]
p1 = len(motifs)
#sparsity
p2 = interaction_matrix.sum()/interaction_matrix.shape[0]
#in-degree
in_degree = []
for i in range(interaction_matrix.shape[0]):
in_degree.append(interaction_matrix[i:].sum()/interaction_matrix.shape[0])
p3 = sum(in_degree)/len(in_degree)
#p3 = statistics.median(in_degree)
#out-degree
out_degree = []
for i in range(interaction_matrix.shape[0]):
out_degree.append(interaction_matrix[:i].sum()/interaction_matrix.shape[0])
p4 = sum(out_degree)/len(out_degree)
#p4 = statistics.median(out_degree)
params = list(map(lambda ids: round(ids, 3), [p1, p2, p3, p4]))
params.append(label)
return params
def collect_ffl_component(config_file, interaction_matrix):
"""returns ffl-node participation
requires adjacency matrix and config file
"""
import statistics
#ffl-part
motifs, counter = motif_search(config_file, interaction_matrix, batch_size=10000)
motifs = motifs["030T"]
ffl_nodes = list(set(sum([list(map(int, x.split("_"))) for x in motifs], [])))
p1 = len(ffl_nodes)/interaction_matrix.shape[0]
return p1
def get_free_memory():
with open('/proc/meminfo', 'r') as mem:
free_memory = 0
for i in mem:
sline = i.split()
if str(sline[0]) in ('MemFree:', 'Buffers:', 'Cached:'):
free_memory += int(sline[1])
return (free_memory*1024)/1000000
def get_memory_usage():
return (resource.getrusage(resource.RUSAGE_SELF).ru_maxrss*1024)/1000000
def limit_memory(maxsize):
maxsize = (maxsize*1000000)/1024
soft, hard = resource.getrlimit(resource.RLIMIT_AS)
resource.setrlimit(resource.RLIMIT_AS, (maxsize, hard))
def analyze_exctracted_network(config_file, path_to_tsv, network_label, network_rep, size, stability_motifs=False):
"""
collect topological stats from extracted networks
"""
import networkx as nx
if network_label == 'randg' or network_label == 'dag':
interaction_matrix = np.array(pd.read_csv(path_to_tsv, header = None, sep=','))
interaction_matrix = np.apply_along_axis(list, 1, interaction_matrix)
interaction_matrix = (interaction_matrix > 0).astype(np.int_)
#print(interaction_matrix)
else:
edges = pd.read_csv(path_to_tsv, sep="\t")
if network_label == 'gnw':
edges.columns = ["tf", "tg"]
#edges.columns = ["tf", "tg", "strength"]
#edges = edges[["tf", "tg"]]
else:
edges.columns = ["tf", "tg"]
edges['tf'].astype(str)
edges['tg'].astype(str)
edges.columns = ["tf", "tg"]
nodes = sorted(np.unique(np.concatenate((edges.tf.unique(), edges.tg.unique()))))
nodes = pd.DataFrame(data=range(len(nodes)), index=nodes, columns=["idx"])
edges_ = edges.join(nodes, on="tf").join(nodes, on="tg", lsuffix="_tf", rsuffix="_tg")
np_edges = edges_[["idx_tg", "idx_tf"]].values
interaction_matrix = build_Tnet(np_edges, len(nodes))
#interaction_matrix = interaction_matrix.T
#if shuffled:
# interaction_matrix = shuffle_network(interaction_matrix)
#print(interaction_matrix)
topological_properties = collect_topological_parameters(config_file,interaction_matrix, network_label)
topological_properties.append(size)
topological_properties.append(network_rep)
if stability_motifs:
#ffl_counts = topological_properties[0]
#graph_nx = nx.DiGraph(interaction_matrix)
#cycles_counts = list(nx.algorithms.cycles.simple_cycles(graph_nx))
#topological_properties = [ffl_counts, cycles_counts]
motifs, counter = motif_search(config_file, interaction_matrix, batch_size=10000)
shuffled_counters = generate_random_networks(config_file, interaction_matrix, 100, 10, 60000)
#topological_properties = counter
#topological_properties = {k:len(v) for k, v in counter.items()}
topological_properties = build_zscores_report(shuffled_counters, counter)
return topological_properties
def create_nx_network(n_trials,sparsity,size,out_dir):
"""
requires number of networks, desired sparsity, desired network size, and output dir
creates a set of networks (adjacency list format)
"""
import numpy as np
import networkx as nx
import random
import os
for number in range(n_trials):
test1 = nx.scale_free_graph(int(size), alpha=0.85, beta=0.1, gamma=0.05, delta_in=0.2, delta_out=0)
edges1 = test1.number_of_edges()
edge_list = list(set([e for e in test1.edges()]))
edge_list = [list(ele) for ele in edge_list]
nx_size = len(list(set([e for l in edge_list for e in l])))
edges = pd.DataFrame(edge_list, columns=['tf', 'tg'])
nodes = sorted(np.unique(np.concatenate((edges.tf.unique(), edges.tg.unique()))))
nodes = pd.DataFrame(data=range(len(nodes)), index=nodes, columns=["idx"])
edges_ = edges.join(nodes, on="tf").join(nodes, on="tg", lsuffix="_tf", rsuffix="_tg")
np_edges = edges_[["idx_tg", "idx_tf"]].values
interaction_matrix = build_Tnet(np_edges, len(nodes))
interaction_matrix = interaction_matrix.T
links_per_node = interaction_matrix.sum()/interaction_matrix.shape[0]
nodes = list(range(0, len(interaction_matrix)))
#sparsity = sparsity+(np.random.uniform(-2,2)*0.1)
while links_per_node<sparsity:
#print(links_per_node)
#print(interaction_matrix)
#calculate in, out degree
out_degree = interaction_matrix.sum(axis=1)
in_degree = interaction_matrix.sum(axis=0)
#calculate probs
in_probs = pd.Series(in_degree/sum(in_degree), nodes)
out_probs = pd.Series(out_degree/sum(out_degree), nodes)
#nodes that create edge
regulator = np.random.choice(out_probs.index, p=out_probs.values)
regulatee = np.random.choice(in_probs.index)
if regulatee == regulator:
while regulatee == regulator:
regulatee = np.random.choice(in_probs.index)
#print([regulator, regulatee])
#add edge
interaction_matrix[regulator,regulatee] = 1
links_per_node = interaction_matrix.sum()/interaction_matrix.shape[0]
#print(links_per_node)
#create adj list
adj_list = []
for name_regulatee, i in enumerate(interaction_matrix.T):
for name_regulator, j in enumerate(interaction_matrix):
if interaction_matrix[name_regulatee][name_regulator] == 1:
adj_list.append([name_regulator, name_regulatee])
#file name
network_name = '_'.join(str(x) for x in ['scale_free_nx',number,'nodes',len(nodes)])
#out dir
if not os.path.exists(out_dir):
os.mkdir(out_dir)
#store adjacency list of nx network:
with open(out_dir+'/'+network_name+'.tsv', "w", newline="") as file:
writer = csv.writer(file, delimiter ='\t')
writer.writerows(adj_list)
return
def analyze_connectivity(path_to_tsv, network_label, network_rep, size):
"""
collect topological stats from extracted networks
"""
import networkx as nx
if network_label == 'randg' or network_label == 'dag':
interaction_matrix = np.array(pd.read_csv(path_to_tsv, header = None, sep=','))
interaction_matrix = np.apply_along_axis(list, 1, interaction_matrix)
interaction_matrix = (interaction_matrix > 0).astype(np.int_)
#print(interaction_matrix)
else:
edges = | pd.read_csv(path_to_tsv, sep="\t") | pandas.read_csv |
from __future__ import print_function
import collections
import os
import sys
import numpy as np
import pandas as pd
try:
from sklearn.impute import SimpleImputer as Imputer
except ImportError:
from sklearn.preprocessing import Imputer
from sklearn.preprocessing import StandardScaler, MinMaxScaler, MaxAbsScaler
file_path = os.path.dirname(os.path.realpath(__file__))
lib_path = os.path.abspath(os.path.join(file_path, '..', '..', 'common'))
sys.path.append(lib_path)
import candle
global_cache = {}
SEED = 2017
P1B3_URL = 'http://ftp.mcs.anl.gov/pub/candle/public/benchmarks/P1B3/'
DATA_URL = 'http://ftp.mcs.anl.gov/pub/candle/public/benchmarks/Pilot1/combo/'
def get_file(url):
return candle.fetch_file(url, 'Pilot1')
def impute_and_scale(df, scaling='std', keepcols=None):
"""Impute missing values with mean and scale data included in pandas dataframe.
Parameters
----------
df : pandas dataframe
dataframe to impute and scale
scaling : 'maxabs' [-1,1], 'minmax' [0,1], 'std', or None, optional (default 'std')
type of scaling to apply
"""
if keepcols is None:
df = df.dropna(axis=1, how='all')
else:
df = df[keepcols].copy()
all_na_cols = df.columns[df.isna().all()]
df[all_na_cols] = 0
imputer = Imputer(strategy='mean')
mat = imputer.fit_transform(df)
if scaling is None or scaling.lower() == 'none':
return pd.DataFrame(mat, columns=df.columns)
if scaling == 'maxabs':
scaler = MaxAbsScaler()
elif scaling == 'minmax':
scaler = MinMaxScaler()
else:
scaler = StandardScaler()
mat = scaler.fit_transform(mat)
df = pd.DataFrame(mat, columns=df.columns)
return df
def load_dose_response(min_logconc=-4., max_logconc=-4., subsample=None, fraction=False):
"""Load cell line response to different drug compounds, sub-select response for a specific
drug log concentration range and return a pandas dataframe.
Parameters
----------
min_logconc : -3, -4, -5, -6, -7, optional (default -4)
min log concentration of drug to return cell line growth
max_logconc : -3, -4, -5, -6, -7, optional (default -4)
max log concentration of drug to return cell line growth
subsample: None, 'naive_balancing' (default None)
subsampling strategy to use to balance the data based on growth
fraction: bool (default False)
divide growth percentage by 100
"""
path = get_file(P1B3_URL + 'NCI60_dose_response_with_missing_z5_avg.csv')
df = global_cache.get(path)
if df is None:
df = pd.read_csv(path, sep=',', engine='c',
na_values=['na', '-', ''],
dtype={'NSC': object, 'CELLNAME': str, 'LOG_CONCENTRATION': np.float32, 'GROWTH': np.float32})
global_cache[path] = df
df = df[(df['LOG_CONCENTRATION'] >= min_logconc) & (df['LOG_CONCENTRATION'] <= max_logconc)]
df = df[['NSC', 'CELLNAME', 'GROWTH', 'LOG_CONCENTRATION']]
if subsample and subsample == 'naive_balancing':
df1 = df[df['GROWTH'] <= 0]
df2 = df[(df['GROWTH'] > 0) & (df['GROWTH'] < 50)].sample(frac=0.7, random_state=SEED)
df3 = df[(df['GROWTH'] >= 50) & (df['GROWTH'] <= 100)].sample(frac=0.18, random_state=SEED)
df4 = df[df['GROWTH'] > 100].sample(frac=0.01, random_state=SEED)
df = pd.concat([df1, df2, df3, df4])
if fraction:
df['GROWTH'] /= 100
df = df.set_index(['NSC'])
return df
def load_combo_response(response_url=None, fraction=False, use_combo_score=False, use_mean_growth=False,
exclude_cells=[], exclude_drugs=[]):
"""Load cell line response to pairs of drugs, sub-select response for a specific
drug log concentration range and return a pandas dataframe.
Parameters
----------
fraction: bool (default False)
divide growth percentage by 100
use_combo_score: bool (default False)
return combination score in place of percent growth (stored in 'GROWTH' column)
"""
response_url = response_url or (DATA_URL + 'ComboDrugGrowth_Nov2017.csv')
path = get_file(response_url)
df = global_cache.get(path)
if df is None:
df = pd.read_csv(path,
usecols=['CELLNAME', 'NSC1', 'CONC1', 'NSC2', 'CONC2', 'PERCENTGROWTH', 'VALID', 'SCORE', 'SCREENER', 'STUDY'],
na_values=['na', '-', ''],
dtype={'NSC1': object, 'NSC2': object, 'CONC1': object, 'CONC2': object, 'PERCENTGROWTH': str, 'SCORE': str},
engine='c', error_bad_lines=False, warn_bad_lines=True)
global_cache[path] = df
df = df[df['VALID'] == 'Y']
df = df[['CELLNAME', 'NSC1', 'NSC2', 'CONC1', 'CONC2', 'PERCENTGROWTH', 'SCORE']]
exclude_cells = [x.split('.')[-1] for x in exclude_cells]
exclude_drugs = [x.split('.')[-1] for x in exclude_drugs]
df = df[~df['CELLNAME'].isin(exclude_cells) & ~df['NSC1'].isin(exclude_drugs) & ~df['NSC2'].isin(exclude_drugs)]
df['PERCENTGROWTH'] = df['PERCENTGROWTH'].astype(np.float32)
df['SCORE'] = df['SCORE'].astype(np.float32)
df['NSC2'] = df['NSC2'].fillna(df['NSC1'])
df['CONC2'] = df['CONC2'].fillna(df['CONC1'])
df['SCORE'] = df['SCORE'].fillna(0)
cellmap_path = get_file(DATA_URL + 'NCI60_CELLNAME_to_Combo.txt')
df_cellmap = pd.read_csv(cellmap_path, sep='\t')
df_cellmap.set_index('Name', inplace=True)
cellmap = df_cellmap[['CELLNAME']].to_dict()['CELLNAME']
df['CELLNAME'] = df['CELLNAME'].map(lambda x: cellmap[x])
df_mean_min = df.groupby(['CELLNAME', 'NSC1', 'NSC2', 'CONC1', 'CONC2']).mean()
df_mean_min = df_mean_min.groupby(['CELLNAME', 'NSC1', 'NSC2']).min()
df_mean_min = df_mean_min.add_suffix('_MIN').reset_index() # add PERCENTGROWTH_MIN by flattening the hierarchical index
df_min = df_mean_min
# df_min = df.groupby(['CELLNAME', 'NSC1', 'NSC2']).min()
# df_min = df_min.add_suffix('_MIN').reset_index() # add PERCENTGROWTH_MIN by flattening the hierarchical index
df = df.drop(['CONC1', 'CONC2'], axis=1)
df_max = df.groupby(['CELLNAME', 'NSC1', 'NSC2']).max()
df_max = df_max.add_suffix('_MAX').reset_index() # add SCORE_MAX by flattening the hierarchical index
df_avg = df.copy()
df_avg['PERCENTGROWTH'] = df_avg['PERCENTGROWTH'].apply(lambda x: 100 if x > 100 else 50 + x / 2 if x < 0 else 50 + x / 2)
df_avg = df.groupby(['CELLNAME', 'NSC1', 'NSC2']).mean()
df_avg = df_avg.add_suffix('_AVG').reset_index()
if use_combo_score:
df = df_max.rename(columns={'SCORE_MAX': 'GROWTH'}).drop('PERCENTGROWTH_MAX', axis=1)
elif use_mean_growth:
df = df_avg.rename(columns={'PERCENTGROWTH_AVG': 'GROWTH'}).drop('SCORE_AVG', axis=1)
else:
df = df_min.rename(columns={'PERCENTGROWTH_MIN': 'GROWTH'}).drop('SCORE_MIN', axis=1)
if fraction:
df['GROWTH'] /= 100
return df
def load_combo_dose_response(response_url=None, fraction=False, use_combo_score=False, exclude_cells=[], exclude_drugs=[]):
"""Load cell line response to pairs of drugs, sub-select response for a specific
drug log concentration range and return a pandas dataframe.
Parameters
----------
fraction: bool (default False)
divide growth percentage by 100
use_combo_score: bool (default False)
return combination score in place of percent growth (stored in 'GROWTH' column)
"""
response_url = response_url or (DATA_URL + 'ComboDrugGrowth_Nov2017.csv')
path = get_file(response_url)
df = global_cache.get(path)
if df is None:
df = pd.read_csv(path,
usecols=['CELLNAME', 'NSC1', 'CONC1', 'NSC2', 'CONC2', 'PERCENTGROWTH', 'VALID', 'SCORE', 'SCREENER', 'STUDY'],
na_values=['na', '-', ''],
dtype={'NSC1': object, 'NSC2': object, 'CONC1': object, 'CONC2': object, 'PERCENTGROWTH': str, 'SCORE': str},
engine='c', error_bad_lines=False, warn_bad_lines=True,
)
global_cache[path] = df
df = df[df['VALID'] == 'Y']
df = df[['CELLNAME', 'NSC1', 'NSC2', 'CONC1', 'CONC2', 'PERCENTGROWTH', 'SCORE']]
exclude_cells = [x.split('.')[-1] for x in exclude_cells]
exclude_drugs = [x.split('.')[-1] for x in exclude_drugs]
df = df[~df['CELLNAME'].isin(exclude_cells) & ~df['NSC1'].isin(exclude_drugs) & ~df['NSC2'].isin(exclude_drugs)]
df['PERCENTGROWTH'] = df['PERCENTGROWTH'].astype(np.float32)
df['SCORE'] = df['SCORE'].astype(np.float32)
df['NSC2'] = df['NSC2'].fillna(df['NSC1'])
df['CONC2'] = df['CONC2'].fillna(df['CONC1'])
df['SCORE'] = df['SCORE'].fillna(0)
cellmap_path = get_file(DATA_URL + 'NCI60_CELLNAME_to_Combo.txt')
df_cellmap = pd.read_csv(cellmap_path, sep='\t')
df_cellmap.set_index('Name', inplace=True)
cellmap = df_cellmap[['CELLNAME']].to_dict()['CELLNAME']
df['CELLNAME'] = df['CELLNAME'].map(lambda x: cellmap[x])
df_mean = df.groupby(['CELLNAME', 'NSC1', 'NSC2', 'CONC1', 'CONC2']).mean()
df_mean = df_mean.reset_index()
df_mean['CONC1'] = -np.log10(df_mean['CONC1'].astype(np.float32))
df_mean['CONC2'] = -np.log10(df_mean['CONC2'].astype(np.float32))
df = df_mean.rename(columns={'PERCENTGROWTH': 'GROWTH', 'CONC1': 'pCONC1', 'CONC2': 'pCONC2'})
# df_mean_min = df.groupby(['CELLNAME', 'NSC1', 'NSC2', 'CONC1', 'CONC2']).mean()
# df_mean_min = df_mean_min.groupby(['CELLNAME', 'NSC1', 'NSC2']).min()
# df_mean_min = df_mean_min.add_suffix('_MIN').reset_index() # add PERCENTGROWTH_MIN by flattening the hierarchical index
# df_min = df_mean_min
# df_min = df.groupby(['CELLNAME', 'NSC1', 'NSC2']).min()
# df_min = df_min.add_suffix('_MIN').reset_index() # add PERCENTGROWTH_MIN by flattening the hierarchical index
# df = df.drop(['CONC1', 'CONC2'], axis=1)
# df_max = df.groupby(['CELLNAME', 'NSC1', 'NSC2']).max()
# df_max = df_max.add_suffix('_MAX').reset_index() # add SCORE_MAX by flattening the hierarchical index
# if use_combo_score:
# df = df_max.rename(columns={'SCORE_MAX': 'GROWTH'}).drop('PERCENTGROWTH_MAX', axis=1)
# else:
# df = df_min.rename(columns={'PERCENTGROWTH_MIN': 'GROWTH'}).drop('SCORE_MIN', axis=1)
if fraction:
df['GROWTH'] /= 100
return df
def load_drug_set_descriptors(drug_set='ALMANAC', ncols=None, scaling='std', add_prefix=True):
if drug_set == 'ALMANAC':
path = get_file(DATA_URL + 'ALMANAC_drug_descriptors_dragon7.txt')
elif drug_set == 'GDSC':
path = get_file(DATA_URL + 'GDSC_PubChemCID_drug_descriptors_dragon7')
elif drug_set == 'NCI_IOA_AOA':
path = get_file(DATA_URL + 'NCI_IOA_AOA_drug_descriptors_dragon7')
elif drug_set == 'RTS':
path = get_file(DATA_URL + 'RTS_drug_descriptors_dragon7')
elif drug_set == 'pan':
path = get_file(DATA_URL + 'pan_drugs_dragon7_descriptors.tsv')
else:
raise Exception('Drug set {} not supported!'.format(drug_set))
df = global_cache.get(path)
if df is None:
df = pd.read_csv(path, sep='\t', engine='c',
na_values=['na', '-', ''],
)
global_cache[path] = df
# df1 = pd.DataFrame(df.loc[:, 'NAME'].astype(int).astype(str))
df1 = pd.DataFrame(df.loc[:, 'NAME'])
# df1['NAME'] = df1['NAME'].map(lambda x: x[4:])
df1.rename(columns={'NAME': 'Drug'}, inplace=True)
df2 = df.drop('NAME', 1)
if add_prefix:
df2 = df2.add_prefix('dragon7.')
total = df2.shape[1]
if ncols and ncols < total:
usecols = np.random.choice(total, size=ncols, replace=False)
df2 = df2.iloc[:, usecols]
keepcols = None
else:
train_ref = load_drug_descriptors(add_prefix=add_prefix)
keepcols = train_ref.columns[1:]
df2 = impute_and_scale(df2, scaling, keepcols=keepcols)
df2 = df2.astype(np.float32)
df_dg = pd.concat([df1, df2], axis=1)
return df_dg
def load_drug_descriptors_new(ncols=None, scaling='std', add_prefix=True):
"""Load drug descriptor data, sub-select columns of drugs descriptors
randomly if specificed, impute and scale the selected data, and return a
pandas dataframe.
Parameters
----------
ncols : int or None
number of columns (drugs descriptors) to randomly subselect (default None : use all data)
scaling : 'maxabs' [-1,1], 'minmax' [0,1], 'std', or None, optional (default 'std')
type of scaling to apply
add_prefix: True or False
add feature namespace prefix
"""
path = get_file(DATA_URL + 'ALMANAC_drug_descriptors_dragon7.txt')
df = global_cache.get(path)
if df is None:
df = pd.read_csv(path, sep='\t', engine='c',
na_values=['na', '-', ''],
)
global_cache[path] = df
# df1 = pd.DataFrame(df.loc[:, 'NAME'].astype(int).astype(str))
df1 = pd.DataFrame(df.loc[:, 'NAME'])
# df1['NAME'] = df1['NAME'].map(lambda x: x[4:])
df1.rename(columns={'NAME': 'Drug'}, inplace=True)
df2 = df.drop('NAME', 1)
if add_prefix:
df2 = df2.add_prefix('dragon7.')
total = df2.shape[1]
if ncols and ncols < total:
usecols = np.random.choice(total, size=ncols, replace=False)
df2 = df2.iloc[:, usecols]
df2 = impute_and_scale(df2, scaling)
df2 = df2.astype(np.float32)
df_dg = pd.concat([df1, df2], axis=1)
return df_dg
def load_drug_descriptors(ncols=None, scaling='std', add_prefix=True):
"""Load drug descriptor data, sub-select columns of drugs descriptors
randomly if specificed, impute and scale the selected data, and return a
pandas dataframe.
Parameters
----------
ncols : int or None
number of columns (drugs descriptors) to randomly subselect (default None : use all data)
scaling : 'maxabs' [-1,1], 'minmax' [0,1], 'std', or None, optional (default 'std')
type of scaling to apply
add_prefix: True or False
add feature namespace prefix
"""
# path = get_file(DATA_URL + 'ALMANAC_drug_descriptors_dragon7.txt')
path = get_file(DATA_URL + 'pan_drugs_dragon7_descriptors.tsv')
df = global_cache.get(path)
if df is None:
df = pd.read_csv(path, sep='\t', engine='c',
na_values=['na', '-', ''],
)
global_cache[path] = df
# df1 = pd.DataFrame(df.loc[:, 'NAME'].astype(int).astype(str))
df1 = pd.DataFrame(df.loc[:, 'NAME'])
df1['NAME'] = df1['NAME'].map(lambda x: x[4:])
df1.rename(columns={'NAME': 'NSC'}, inplace=True)
df2 = df.drop('NAME', 1)
if add_prefix:
df2 = df2.add_prefix('dragon7.')
total = df2.shape[1]
if ncols and ncols < total:
usecols = np.random.choice(total, size=ncols, replace=False)
df2 = df2.iloc[:, usecols]
df2 = impute_and_scale(df2, scaling)
df2 = df2.astype(np.float32)
df_dg = pd.concat([df1, df2], axis=1)
return df_dg
def load_drug_descriptors_old(ncols=None, scaling='std', add_prefix=True):
"""Load drug descriptor data, sub-select columns of drugs descriptors
randomly if specificed, impute and scale the selected data, and return a
pandas dataframe.
Parameters
----------
ncols : int or None
number of columns (drugs descriptors) to randomly subselect (default None : use all data)
scaling : 'maxabs' [-1,1], 'minmax' [0,1], 'std', or None, optional (default 'std')
type of scaling to apply
add_prefix: True or False
add feature namespace prefix
"""
path = get_file(P1B3_URL + 'descriptors.2D-NSC.5dose.filtered.txt')
df = global_cache.get(path)
if df is None:
df = pd.read_csv(path, sep='\t', engine='c',
na_values=['na', '-', ''],
dtype=np.float32)
global_cache[path] = df
df1 = pd.DataFrame(df.loc[:, 'NAME'].astype(int).astype(str))
df1.rename(columns={'NAME': 'NSC'}, inplace=True)
df2 = df.drop('NAME', 1)
if add_prefix:
df2 = df2.add_prefix('dragon7.')
total = df2.shape[1]
if ncols and ncols < total:
usecols = np.random.choice(total, size=ncols, replace=False)
df2 = df2.iloc[:, usecols]
df2 = impute_and_scale(df2, scaling)
df2 = df2.astype(np.float32)
df_dg = pd.concat([df1, df2], axis=1)
return df_dg
def load_drug_smiles():
path = get_file(DATA_URL + 'ChemStructures_Consistent.smiles')
df = global_cache.get(path)
if df is None:
df = pd.read_csv(path, sep='\t', engine='c', dtype={'nsc_id': object})
df = df.rename(columns={'nsc_id': 'NSC'})
global_cache[path] = df
return df
def load_sample_rnaseq(ncols=None, scaling='std', add_prefix=True, use_landmark_genes=False, preprocess_rnaseq=None, sample_set='NCI60'):
if use_landmark_genes:
filename = 'combined_rnaseq_data_lincs1000'
else:
filename = 'combined_rnaseq_data'
if preprocess_rnaseq and preprocess_rnaseq != 'none':
scaling = None
filename += ('_' + preprocess_rnaseq) # 'source_scale' or 'combat'
path = get_file(DATA_URL + filename)
df = global_cache.get(path)
if df is None:
df = pd.read_csv(path, sep='\t', engine='c')
global_cache[path] = df
if sample_set == 'RTS':
df_ids = pd.read_table(get_file(DATA_URL + 'RTS_PDM_samples'))
df = df.merge(df_ids, on='Sample').reset_index(drop=True)
else:
df = df[df['Sample'].str.startswith(sample_set)].reset_index(drop=True)
# cellmap_path = get_file(DATA_URL + 'NCI60_CELLNAME_to_Combo.new.txt')
# df_cellmap = pd.read_csv(cellmap_path, sep='\t')
# df_cellmap.set_index('NCI60.ID', inplace=True)
# cellmap = df_cellmap[['CELLNAME']].to_dict()['CELLNAME']
# df['Sample'] = df['Sample'].map(lambda x: cellmap[x])
# df = df.rename(columns={'Sample': 'CELLNAME'})
df1 = df['Sample']
df2 = df.drop('Sample', 1)
if add_prefix:
df2 = df2.add_prefix('rnaseq.')
total = df.shape[1]
if ncols and ncols < total:
usecols = np.random.choice(total, size=ncols, replace=False)
df2 = df2.iloc[:, usecols]
df2 = impute_and_scale(df2, scaling)
df2 = df2.astype(np.float32)
df = pd.concat([df1, df2], axis=1)
return df
def load_cell_expression_rnaseq(ncols=None, scaling='std', add_prefix=True, use_landmark_genes=False, preprocess_rnaseq=None):
if use_landmark_genes:
filename = 'combined_rnaseq_data_lincs1000'
else:
filename = 'combined_rnaseq_data'
if preprocess_rnaseq and preprocess_rnaseq != 'none':
scaling = None
filename += ('_' + preprocess_rnaseq) # 'source_scale' or 'combat'
path = get_file(DATA_URL + filename)
df = global_cache.get(path)
if df is None:
df = | pd.read_csv(path, sep='\t', engine='c') | pandas.read_csv |
import math
import queue
from datetime import datetime, timedelta, timezone
import pandas as pd
from storey import build_flow, SyncEmitSource, Reduce, Table, AggregateByKey, FieldAggregator, NoopDriver, \
DataframeSource
from storey.dtypes import SlidingWindows, FixedWindows, EmitAfterMaxEvent, EmitEveryEvent
test_base_time = datetime.fromisoformat("2020-07-21T21:40:00+00:00")
def append_return(lst, x):
lst.append(x)
return lst
def test_sliding_window_simple_aggregation_flow():
controller = build_flow([
SyncEmitSource(),
AggregateByKey([FieldAggregator("number_of_stuff", "col1", ["sum", "avg", "min", "max"],
SlidingWindows(['1h', '2h', '24h'], '10m'))],
Table("test", NoopDriver())),
Reduce([], lambda acc, x: append_return(acc, x)),
]).run()
for i in range(10):
data = {'col1': i}
controller.emit(data, 'tal', test_base_time + timedelta(minutes=25 * i))
controller.terminate()
actual = controller.await_termination()
expected_results = [
{'col1': 0, 'number_of_stuff_sum_1h': 0, 'number_of_stuff_sum_2h': 0, 'number_of_stuff_sum_24h': 0, 'number_of_stuff_min_1h': 0,
'number_of_stuff_min_2h': 0, 'number_of_stuff_min_24h': 0, 'number_of_stuff_max_1h': 0, 'number_of_stuff_max_2h': 0,
'number_of_stuff_max_24h': 0, 'number_of_stuff_avg_1h': 0.0, 'number_of_stuff_avg_2h': 0.0, 'number_of_stuff_avg_24h': 0.0},
{'col1': 1, 'number_of_stuff_sum_1h': 1, 'number_of_stuff_sum_2h': 1, 'number_of_stuff_sum_24h': 1, 'number_of_stuff_min_1h': 0,
'number_of_stuff_min_2h': 0, 'number_of_stuff_min_24h': 0, 'number_of_stuff_max_1h': 1, 'number_of_stuff_max_2h': 1,
'number_of_stuff_max_24h': 1, 'number_of_stuff_avg_1h': 0.5, 'number_of_stuff_avg_2h': 0.5, 'number_of_stuff_avg_24h': 0.5},
{'col1': 2, 'number_of_stuff_sum_1h': 3, 'number_of_stuff_sum_2h': 3, 'number_of_stuff_sum_24h': 3, 'number_of_stuff_min_1h': 0,
'number_of_stuff_min_2h': 0, 'number_of_stuff_min_24h': 0, 'number_of_stuff_max_1h': 2, 'number_of_stuff_max_2h': 2,
'number_of_stuff_max_24h': 2, 'number_of_stuff_avg_1h': 1.0, 'number_of_stuff_avg_2h': 1.0, 'number_of_stuff_avg_24h': 1.0},
{'col1': 3, 'number_of_stuff_sum_1h': 6, 'number_of_stuff_sum_2h': 6, 'number_of_stuff_sum_24h': 6, 'number_of_stuff_min_1h': 1,
'number_of_stuff_min_2h': 0, 'number_of_stuff_min_24h': 0, 'number_of_stuff_max_1h': 3, 'number_of_stuff_max_2h': 3,
'number_of_stuff_max_24h': 3, 'number_of_stuff_avg_1h': 2.0, 'number_of_stuff_avg_2h': 1.5, 'number_of_stuff_avg_24h': 1.5},
{'col1': 4, 'number_of_stuff_sum_1h': 9, 'number_of_stuff_sum_2h': 10, 'number_of_stuff_sum_24h': 10, 'number_of_stuff_min_1h': 2,
'number_of_stuff_min_2h': 0, 'number_of_stuff_min_24h': 0, 'number_of_stuff_max_1h': 4, 'number_of_stuff_max_2h': 4,
'number_of_stuff_max_24h': 4, 'number_of_stuff_avg_1h': 3.0, 'number_of_stuff_avg_2h': 2.0, 'number_of_stuff_avg_24h': 2.0},
{'col1': 5, 'number_of_stuff_sum_1h': 12, 'number_of_stuff_sum_2h': 15, 'number_of_stuff_sum_24h': 15, 'number_of_stuff_min_1h': 3,
'number_of_stuff_min_2h': 1, 'number_of_stuff_min_24h': 0, 'number_of_stuff_max_1h': 5, 'number_of_stuff_max_2h': 5,
'number_of_stuff_max_24h': 5, 'number_of_stuff_avg_1h': 4.0, 'number_of_stuff_avg_2h': 3.0, 'number_of_stuff_avg_24h': 2.5},
{'col1': 6, 'number_of_stuff_sum_1h': 15, 'number_of_stuff_sum_2h': 20, 'number_of_stuff_sum_24h': 21, 'number_of_stuff_min_1h': 4,
'number_of_stuff_min_2h': 2, 'number_of_stuff_min_24h': 0, 'number_of_stuff_max_1h': 6, 'number_of_stuff_max_2h': 6,
'number_of_stuff_max_24h': 6, 'number_of_stuff_avg_1h': 5.0, 'number_of_stuff_avg_2h': 4.0, 'number_of_stuff_avg_24h': 3.0},
{'col1': 7, 'number_of_stuff_sum_1h': 18, 'number_of_stuff_sum_2h': 25, 'number_of_stuff_sum_24h': 28, 'number_of_stuff_min_1h': 5,
'number_of_stuff_min_2h': 3, 'number_of_stuff_min_24h': 0, 'number_of_stuff_max_1h': 7, 'number_of_stuff_max_2h': 7,
'number_of_stuff_max_24h': 7, 'number_of_stuff_avg_1h': 6.0, 'number_of_stuff_avg_2h': 5.0, 'number_of_stuff_avg_24h': 3.5},
{'col1': 8, 'number_of_stuff_sum_1h': 21, 'number_of_stuff_sum_2h': 30, 'number_of_stuff_sum_24h': 36, 'number_of_stuff_min_1h': 6,
'number_of_stuff_min_2h': 4, 'number_of_stuff_min_24h': 0, 'number_of_stuff_max_1h': 8, 'number_of_stuff_max_2h': 8,
'number_of_stuff_max_24h': 8, 'number_of_stuff_avg_1h': 7.0, 'number_of_stuff_avg_2h': 6.0, 'number_of_stuff_avg_24h': 4.0},
{'col1': 9, 'number_of_stuff_sum_1h': 24, 'number_of_stuff_sum_2h': 35, 'number_of_stuff_sum_24h': 45, 'number_of_stuff_min_1h': 7,
'number_of_stuff_min_2h': 5, 'number_of_stuff_min_24h': 0, 'number_of_stuff_max_1h': 9, 'number_of_stuff_max_2h': 9,
'number_of_stuff_max_24h': 9, 'number_of_stuff_avg_1h': 8.0, 'number_of_stuff_avg_2h': 7.0, 'number_of_stuff_avg_24h': 4.5}
]
assert actual == expected_results, \
f'actual did not match expected. \n actual: {actual} \n expected: {expected_results}'
def test_sliding_window_sparse_data():
controller = build_flow([
SyncEmitSource(),
AggregateByKey(
[FieldAggregator("number_of_stuff1", "col1", ["sum", "avg", "min", "max"], SlidingWindows(['1h', '2h', '24h'], '10m')),
FieldAggregator("number_of_stuff2", "col2", ["sum", "avg", "min", "max"], SlidingWindows(['1h', '2h', '24h'], '10m'))],
Table("test", NoopDriver())),
Reduce([], lambda acc, x: append_return(acc, x)),
]).run()
for i in range(10):
controller.emit({'col1': i}, 'tal', test_base_time + timedelta(minutes=25 * i))
controller.emit({'col2': i}, 'tal', test_base_time + timedelta(minutes=25 * i))
controller.terminate()
actual = controller.await_termination()
expected_results = [{'col1': 0, 'number_of_stuff1_avg_1h': 0.0, 'number_of_stuff1_avg_24h': 0.0, 'number_of_stuff1_avg_2h': 0.0,
'number_of_stuff1_max_1h': 0, 'number_of_stuff1_max_24h': 0, 'number_of_stuff1_max_2h': 0,
'number_of_stuff1_min_1h': 0, 'number_of_stuff1_min_24h': 0, 'number_of_stuff1_min_2h': 0,
'number_of_stuff1_sum_1h': 0, 'number_of_stuff1_sum_24h': 0, 'number_of_stuff1_sum_2h': 0,
'number_of_stuff2_avg_1h': math.nan, 'number_of_stuff2_avg_24h': math.nan, 'number_of_stuff2_avg_2h': math.nan,
'number_of_stuff2_max_1h': math.nan, 'number_of_stuff2_max_24h': math.nan, 'number_of_stuff2_max_2h': math.nan,
'number_of_stuff2_min_1h': math.nan, 'number_of_stuff2_min_24h': math.nan, 'number_of_stuff2_min_2h': math.nan,
'number_of_stuff2_sum_1h': 0, 'number_of_stuff2_sum_24h': 0, 'number_of_stuff2_sum_2h': 0},
{'col2': 0, 'number_of_stuff1_avg_1h': 0.0, 'number_of_stuff1_avg_24h': 0.0, 'number_of_stuff1_avg_2h': 0.0,
'number_of_stuff1_max_1h': 0, 'number_of_stuff1_max_24h': 0, 'number_of_stuff1_max_2h': 0,
'number_of_stuff1_min_1h': 0, 'number_of_stuff1_min_24h': 0, 'number_of_stuff1_min_2h': 0,
'number_of_stuff1_sum_1h': 0, 'number_of_stuff1_sum_24h': 0, 'number_of_stuff1_sum_2h': 0,
'number_of_stuff2_avg_1h': 0.0, 'number_of_stuff2_avg_24h': 0.0, 'number_of_stuff2_avg_2h': 0.0,
'number_of_stuff2_max_1h': 0, 'number_of_stuff2_max_24h': 0, 'number_of_stuff2_max_2h': 0,
'number_of_stuff2_min_1h': 0, 'number_of_stuff2_min_24h': 0, 'number_of_stuff2_min_2h': 0,
'number_of_stuff2_sum_1h': 0, 'number_of_stuff2_sum_24h': 0, 'number_of_stuff2_sum_2h': 0},
{'col1': 1, 'number_of_stuff1_avg_1h': 0.5, 'number_of_stuff1_avg_24h': 0.5, 'number_of_stuff1_avg_2h': 0.5,
'number_of_stuff1_max_1h': 1, 'number_of_stuff1_max_24h': 1, 'number_of_stuff1_max_2h': 1,
'number_of_stuff1_min_1h': 0, 'number_of_stuff1_min_24h': 0, 'number_of_stuff1_min_2h': 0,
'number_of_stuff1_sum_1h': 1, 'number_of_stuff1_sum_24h': 1, 'number_of_stuff1_sum_2h': 1,
'number_of_stuff2_avg_1h': 0.0, 'number_of_stuff2_avg_24h': 0.0, 'number_of_stuff2_avg_2h': 0.0,
'number_of_stuff2_max_1h': 0, 'number_of_stuff2_max_24h': 0, 'number_of_stuff2_max_2h': 0,
'number_of_stuff2_min_1h': 0, 'number_of_stuff2_min_24h': 0, 'number_of_stuff2_min_2h': 0,
'number_of_stuff2_sum_1h': 0, 'number_of_stuff2_sum_24h': 0, 'number_of_stuff2_sum_2h': 0},
{'col2': 1, 'number_of_stuff1_avg_1h': 0.5, 'number_of_stuff1_avg_24h': 0.5, 'number_of_stuff1_avg_2h': 0.5,
'number_of_stuff1_max_1h': 1, 'number_of_stuff1_max_24h': 1, 'number_of_stuff1_max_2h': 1,
'number_of_stuff1_min_1h': 0, 'number_of_stuff1_min_24h': 0, 'number_of_stuff1_min_2h': 0,
'number_of_stuff1_sum_1h': 1, 'number_of_stuff1_sum_24h': 1, 'number_of_stuff1_sum_2h': 1,
'number_of_stuff2_avg_1h': 0.5, 'number_of_stuff2_avg_24h': 0.5, 'number_of_stuff2_avg_2h': 0.5,
'number_of_stuff2_max_1h': 1, 'number_of_stuff2_max_24h': 1, 'number_of_stuff2_max_2h': 1,
'number_of_stuff2_min_1h': 0, 'number_of_stuff2_min_24h': 0, 'number_of_stuff2_min_2h': 0,
'number_of_stuff2_sum_1h': 1, 'number_of_stuff2_sum_24h': 1, 'number_of_stuff2_sum_2h': 1},
{'col1': 2, 'number_of_stuff1_avg_1h': 1.0, 'number_of_stuff1_avg_24h': 1.0, 'number_of_stuff1_avg_2h': 1.0,
'number_of_stuff1_max_1h': 2, 'number_of_stuff1_max_24h': 2, 'number_of_stuff1_max_2h': 2,
'number_of_stuff1_min_1h': 0, 'number_of_stuff1_min_24h': 0, 'number_of_stuff1_min_2h': 0,
'number_of_stuff1_sum_1h': 3, 'number_of_stuff1_sum_24h': 3, 'number_of_stuff1_sum_2h': 3,
'number_of_stuff2_avg_1h': 0.5, 'number_of_stuff2_avg_24h': 0.5, 'number_of_stuff2_avg_2h': 0.5,
'number_of_stuff2_max_1h': 1, 'number_of_stuff2_max_24h': 1, 'number_of_stuff2_max_2h': 1,
'number_of_stuff2_min_1h': 0, 'number_of_stuff2_min_24h': 0, 'number_of_stuff2_min_2h': 0,
'number_of_stuff2_sum_1h': 1, 'number_of_stuff2_sum_24h': 1, 'number_of_stuff2_sum_2h': 1},
{'col2': 2, 'number_of_stuff1_avg_1h': 1.0, 'number_of_stuff1_avg_24h': 1.0, 'number_of_stuff1_avg_2h': 1.0,
'number_of_stuff1_max_1h': 2, 'number_of_stuff1_max_24h': 2, 'number_of_stuff1_max_2h': 2,
'number_of_stuff1_min_1h': 0, 'number_of_stuff1_min_24h': 0, 'number_of_stuff1_min_2h': 0,
'number_of_stuff1_sum_1h': 3, 'number_of_stuff1_sum_24h': 3, 'number_of_stuff1_sum_2h': 3,
'number_of_stuff2_avg_1h': 1.0, 'number_of_stuff2_avg_24h': 1.0, 'number_of_stuff2_avg_2h': 1.0,
'number_of_stuff2_max_1h': 2, 'number_of_stuff2_max_24h': 2, 'number_of_stuff2_max_2h': 2,
'number_of_stuff2_min_1h': 0, 'number_of_stuff2_min_24h': 0, 'number_of_stuff2_min_2h': 0,
'number_of_stuff2_sum_1h': 3, 'number_of_stuff2_sum_24h': 3, 'number_of_stuff2_sum_2h': 3},
{'col1': 3, 'number_of_stuff1_avg_1h': 2.0, 'number_of_stuff1_avg_24h': 1.5, 'number_of_stuff1_avg_2h': 1.5,
'number_of_stuff1_max_1h': 3, 'number_of_stuff1_max_24h': 3, 'number_of_stuff1_max_2h': 3,
'number_of_stuff1_min_1h': 1, 'number_of_stuff1_min_24h': 0, 'number_of_stuff1_min_2h': 0,
'number_of_stuff1_sum_1h': 6, 'number_of_stuff1_sum_24h': 6, 'number_of_stuff1_sum_2h': 6,
'number_of_stuff2_avg_1h': 1.0, 'number_of_stuff2_avg_24h': 1.0, 'number_of_stuff2_avg_2h': 1.0,
'number_of_stuff2_max_1h': 2, 'number_of_stuff2_max_24h': 2, 'number_of_stuff2_max_2h': 2,
'number_of_stuff2_min_1h': 0, 'number_of_stuff2_min_24h': 0, 'number_of_stuff2_min_2h': 0,
'number_of_stuff2_sum_1h': 3, 'number_of_stuff2_sum_24h': 3, 'number_of_stuff2_sum_2h': 3},
{'col2': 3, 'number_of_stuff1_avg_1h': 2.0, 'number_of_stuff1_avg_24h': 1.5, 'number_of_stuff1_avg_2h': 1.5,
'number_of_stuff1_max_1h': 3, 'number_of_stuff1_max_24h': 3, 'number_of_stuff1_max_2h': 3,
'number_of_stuff1_min_1h': 1, 'number_of_stuff1_min_24h': 0, 'number_of_stuff1_min_2h': 0,
'number_of_stuff1_sum_1h': 6, 'number_of_stuff1_sum_24h': 6, 'number_of_stuff1_sum_2h': 6,
'number_of_stuff2_avg_1h': 2.0, 'number_of_stuff2_avg_24h': 1.5, 'number_of_stuff2_avg_2h': 1.5,
'number_of_stuff2_max_1h': 3, 'number_of_stuff2_max_24h': 3, 'number_of_stuff2_max_2h': 3,
'number_of_stuff2_min_1h': 1, 'number_of_stuff2_min_24h': 0, 'number_of_stuff2_min_2h': 0,
'number_of_stuff2_sum_1h': 6, 'number_of_stuff2_sum_24h': 6, 'number_of_stuff2_sum_2h': 6},
{'col1': 4, 'number_of_stuff1_avg_1h': 3.0, 'number_of_stuff1_avg_24h': 2.0, 'number_of_stuff1_avg_2h': 2.0,
'number_of_stuff1_max_1h': 4, 'number_of_stuff1_max_24h': 4, 'number_of_stuff1_max_2h': 4,
'number_of_stuff1_min_1h': 2, 'number_of_stuff1_min_24h': 0, 'number_of_stuff1_min_2h': 0,
'number_of_stuff1_sum_1h': 9, 'number_of_stuff1_sum_24h': 10, 'number_of_stuff1_sum_2h': 10,
'number_of_stuff2_avg_1h': 2.0, 'number_of_stuff2_avg_24h': 1.5, 'number_of_stuff2_avg_2h': 1.5,
'number_of_stuff2_max_1h': 3, 'number_of_stuff2_max_24h': 3, 'number_of_stuff2_max_2h': 3,
'number_of_stuff2_min_1h': 1, 'number_of_stuff2_min_24h': 0, 'number_of_stuff2_min_2h': 0,
'number_of_stuff2_sum_1h': 6, 'number_of_stuff2_sum_24h': 6, 'number_of_stuff2_sum_2h': 6},
{'col2': 4, 'number_of_stuff1_avg_1h': 3.0, 'number_of_stuff1_avg_24h': 2.0, 'number_of_stuff1_avg_2h': 2.0,
'number_of_stuff1_max_1h': 4, 'number_of_stuff1_max_24h': 4, 'number_of_stuff1_max_2h': 4,
'number_of_stuff1_min_1h': 2, 'number_of_stuff1_min_24h': 0, 'number_of_stuff1_min_2h': 0,
'number_of_stuff1_sum_1h': 9, 'number_of_stuff1_sum_24h': 10, 'number_of_stuff1_sum_2h': 10,
'number_of_stuff2_avg_1h': 3.0, 'number_of_stuff2_avg_24h': 2.0, 'number_of_stuff2_avg_2h': 2.0,
'number_of_stuff2_max_1h': 4, 'number_of_stuff2_max_24h': 4, 'number_of_stuff2_max_2h': 4,
'number_of_stuff2_min_1h': 2, 'number_of_stuff2_min_24h': 0, 'number_of_stuff2_min_2h': 0,
'number_of_stuff2_sum_1h': 9, 'number_of_stuff2_sum_24h': 10, 'number_of_stuff2_sum_2h': 10},
{'col1': 5, 'number_of_stuff1_avg_1h': 4.0, 'number_of_stuff1_avg_24h': 2.5, 'number_of_stuff1_avg_2h': 3.0,
'number_of_stuff1_max_1h': 5, 'number_of_stuff1_max_24h': 5, 'number_of_stuff1_max_2h': 5,
'number_of_stuff1_min_1h': 3, 'number_of_stuff1_min_24h': 0, 'number_of_stuff1_min_2h': 1,
'number_of_stuff1_sum_1h': 12, 'number_of_stuff1_sum_24h': 15, 'number_of_stuff1_sum_2h': 15,
'number_of_stuff2_avg_1h': 3.0, 'number_of_stuff2_avg_24h': 2.0, 'number_of_stuff2_avg_2h': 2.0,
'number_of_stuff2_max_1h': 4, 'number_of_stuff2_max_24h': 4, 'number_of_stuff2_max_2h': 4,
'number_of_stuff2_min_1h': 2, 'number_of_stuff2_min_24h': 0, 'number_of_stuff2_min_2h': 0,
'number_of_stuff2_sum_1h': 9, 'number_of_stuff2_sum_24h': 10, 'number_of_stuff2_sum_2h': 10},
{'col2': 5, 'number_of_stuff1_avg_1h': 4.0, 'number_of_stuff1_avg_24h': 2.5, 'number_of_stuff1_avg_2h': 3.0,
'number_of_stuff1_max_1h': 5, 'number_of_stuff1_max_24h': 5, 'number_of_stuff1_max_2h': 5,
'number_of_stuff1_min_1h': 3, 'number_of_stuff1_min_24h': 0, 'number_of_stuff1_min_2h': 1,
'number_of_stuff1_sum_1h': 12, 'number_of_stuff1_sum_24h': 15, 'number_of_stuff1_sum_2h': 15,
'number_of_stuff2_avg_1h': 4.0, 'number_of_stuff2_avg_24h': 2.5, 'number_of_stuff2_avg_2h': 3.0,
'number_of_stuff2_max_1h': 5, 'number_of_stuff2_max_24h': 5, 'number_of_stuff2_max_2h': 5,
'number_of_stuff2_min_1h': 3, 'number_of_stuff2_min_24h': 0, 'number_of_stuff2_min_2h': 1,
'number_of_stuff2_sum_1h': 12, 'number_of_stuff2_sum_24h': 15, 'number_of_stuff2_sum_2h': 15},
{'col1': 6, 'number_of_stuff1_avg_1h': 5.0, 'number_of_stuff1_avg_24h': 3.0, 'number_of_stuff1_avg_2h': 4.0,
'number_of_stuff1_max_1h': 6, 'number_of_stuff1_max_24h': 6, 'number_of_stuff1_max_2h': 6,
'number_of_stuff1_min_1h': 4, 'number_of_stuff1_min_24h': 0, 'number_of_stuff1_min_2h': 2,
'number_of_stuff1_sum_1h': 15, 'number_of_stuff1_sum_24h': 21, 'number_of_stuff1_sum_2h': 20,
'number_of_stuff2_avg_1h': 4.0, 'number_of_stuff2_avg_24h': 2.5, 'number_of_stuff2_avg_2h': 3.0,
'number_of_stuff2_max_1h': 5, 'number_of_stuff2_max_24h': 5, 'number_of_stuff2_max_2h': 5,
'number_of_stuff2_min_1h': 3, 'number_of_stuff2_min_24h': 0, 'number_of_stuff2_min_2h': 1,
'number_of_stuff2_sum_1h': 12, 'number_of_stuff2_sum_24h': 15, 'number_of_stuff2_sum_2h': 15},
{'col2': 6, 'number_of_stuff1_avg_1h': 5.0, 'number_of_stuff1_avg_24h': 3.0, 'number_of_stuff1_avg_2h': 4.0,
'number_of_stuff1_max_1h': 6, 'number_of_stuff1_max_24h': 6, 'number_of_stuff1_max_2h': 6,
'number_of_stuff1_min_1h': 4, 'number_of_stuff1_min_24h': 0, 'number_of_stuff1_min_2h': 2,
'number_of_stuff1_sum_1h': 15, 'number_of_stuff1_sum_24h': 21, 'number_of_stuff1_sum_2h': 20,
'number_of_stuff2_avg_1h': 5.0, 'number_of_stuff2_avg_24h': 3.0, 'number_of_stuff2_avg_2h': 4.0,
'number_of_stuff2_max_1h': 6, 'number_of_stuff2_max_24h': 6, 'number_of_stuff2_max_2h': 6,
'number_of_stuff2_min_1h': 4, 'number_of_stuff2_min_24h': 0, 'number_of_stuff2_min_2h': 2,
'number_of_stuff2_sum_1h': 15, 'number_of_stuff2_sum_24h': 21, 'number_of_stuff2_sum_2h': 20},
{'col1': 7, 'number_of_stuff1_avg_1h': 6.0, 'number_of_stuff1_avg_24h': 3.5, 'number_of_stuff1_avg_2h': 5.0,
'number_of_stuff1_max_1h': 7, 'number_of_stuff1_max_24h': 7, 'number_of_stuff1_max_2h': 7,
'number_of_stuff1_min_1h': 5, 'number_of_stuff1_min_24h': 0, 'number_of_stuff1_min_2h': 3,
'number_of_stuff1_sum_1h': 18, 'number_of_stuff1_sum_24h': 28, 'number_of_stuff1_sum_2h': 25,
'number_of_stuff2_avg_1h': 5.0, 'number_of_stuff2_avg_24h': 3.0, 'number_of_stuff2_avg_2h': 4.0,
'number_of_stuff2_max_1h': 6, 'number_of_stuff2_max_24h': 6, 'number_of_stuff2_max_2h': 6,
'number_of_stuff2_min_1h': 4, 'number_of_stuff2_min_24h': 0, 'number_of_stuff2_min_2h': 2,
'number_of_stuff2_sum_1h': 15, 'number_of_stuff2_sum_24h': 21, 'number_of_stuff2_sum_2h': 20},
{'col2': 7, 'number_of_stuff1_avg_1h': 6.0, 'number_of_stuff1_avg_24h': 3.5, 'number_of_stuff1_avg_2h': 5.0,
'number_of_stuff1_max_1h': 7, 'number_of_stuff1_max_24h': 7, 'number_of_stuff1_max_2h': 7,
'number_of_stuff1_min_1h': 5, 'number_of_stuff1_min_24h': 0, 'number_of_stuff1_min_2h': 3,
'number_of_stuff1_sum_1h': 18, 'number_of_stuff1_sum_24h': 28, 'number_of_stuff1_sum_2h': 25,
'number_of_stuff2_avg_1h': 6.0, 'number_of_stuff2_avg_24h': 3.5, 'number_of_stuff2_avg_2h': 5.0,
'number_of_stuff2_max_1h': 7, 'number_of_stuff2_max_24h': 7, 'number_of_stuff2_max_2h': 7,
'number_of_stuff2_min_1h': 5, 'number_of_stuff2_min_24h': 0, 'number_of_stuff2_min_2h': 3,
'number_of_stuff2_sum_1h': 18, 'number_of_stuff2_sum_24h': 28, 'number_of_stuff2_sum_2h': 25},
{'col1': 8, 'number_of_stuff1_avg_1h': 7.0, 'number_of_stuff1_avg_24h': 4.0, 'number_of_stuff1_avg_2h': 6.0,
'number_of_stuff1_max_1h': 8, 'number_of_stuff1_max_24h': 8, 'number_of_stuff1_max_2h': 8,
'number_of_stuff1_min_1h': 6, 'number_of_stuff1_min_24h': 0, 'number_of_stuff1_min_2h': 4,
'number_of_stuff1_sum_1h': 21, 'number_of_stuff1_sum_24h': 36, 'number_of_stuff1_sum_2h': 30,
'number_of_stuff2_avg_1h': 6.0, 'number_of_stuff2_avg_24h': 3.5, 'number_of_stuff2_avg_2h': 5.0,
'number_of_stuff2_max_1h': 7, 'number_of_stuff2_max_24h': 7, 'number_of_stuff2_max_2h': 7,
'number_of_stuff2_min_1h': 5, 'number_of_stuff2_min_24h': 0, 'number_of_stuff2_min_2h': 3,
'number_of_stuff2_sum_1h': 18, 'number_of_stuff2_sum_24h': 28, 'number_of_stuff2_sum_2h': 25},
{'col2': 8, 'number_of_stuff1_avg_1h': 7.0, 'number_of_stuff1_avg_24h': 4.0, 'number_of_stuff1_avg_2h': 6.0,
'number_of_stuff1_max_1h': 8, 'number_of_stuff1_max_24h': 8, 'number_of_stuff1_max_2h': 8,
'number_of_stuff1_min_1h': 6, 'number_of_stuff1_min_24h': 0, 'number_of_stuff1_min_2h': 4,
'number_of_stuff1_sum_1h': 21, 'number_of_stuff1_sum_24h': 36, 'number_of_stuff1_sum_2h': 30,
'number_of_stuff2_avg_1h': 7.0, 'number_of_stuff2_avg_24h': 4.0, 'number_of_stuff2_avg_2h': 6.0,
'number_of_stuff2_max_1h': 8, 'number_of_stuff2_max_24h': 8, 'number_of_stuff2_max_2h': 8,
'number_of_stuff2_min_1h': 6, 'number_of_stuff2_min_24h': 0, 'number_of_stuff2_min_2h': 4,
'number_of_stuff2_sum_1h': 21, 'number_of_stuff2_sum_24h': 36, 'number_of_stuff2_sum_2h': 30},
{'col1': 9, 'number_of_stuff1_avg_1h': 8.0, 'number_of_stuff1_avg_24h': 4.5, 'number_of_stuff1_avg_2h': 7.0,
'number_of_stuff1_max_1h': 9, 'number_of_stuff1_max_24h': 9, 'number_of_stuff1_max_2h': 9,
'number_of_stuff1_min_1h': 7, 'number_of_stuff1_min_24h': 0, 'number_of_stuff1_min_2h': 5,
'number_of_stuff1_sum_1h': 24, 'number_of_stuff1_sum_24h': 45, 'number_of_stuff1_sum_2h': 35,
'number_of_stuff2_avg_1h': 7.0, 'number_of_stuff2_avg_24h': 4.0, 'number_of_stuff2_avg_2h': 6.0,
'number_of_stuff2_max_1h': 8, 'number_of_stuff2_max_24h': 8, 'number_of_stuff2_max_2h': 8,
'number_of_stuff2_min_1h': 6, 'number_of_stuff2_min_24h': 0, 'number_of_stuff2_min_2h': 4,
'number_of_stuff2_sum_1h': 21, 'number_of_stuff2_sum_24h': 36, 'number_of_stuff2_sum_2h': 30},
{'col2': 9, 'number_of_stuff1_avg_1h': 8.0, 'number_of_stuff1_avg_24h': 4.5, 'number_of_stuff1_avg_2h': 7.0,
'number_of_stuff1_max_1h': 9, 'number_of_stuff1_max_24h': 9, 'number_of_stuff1_max_2h': 9,
'number_of_stuff1_min_1h': 7, 'number_of_stuff1_min_24h': 0, 'number_of_stuff1_min_2h': 5,
'number_of_stuff1_sum_1h': 24, 'number_of_stuff1_sum_24h': 45, 'number_of_stuff1_sum_2h': 35,
'number_of_stuff2_avg_1h': 8.0, 'number_of_stuff2_avg_24h': 4.5, 'number_of_stuff2_avg_2h': 7.0,
'number_of_stuff2_max_1h': 9, 'number_of_stuff2_max_24h': 9, 'number_of_stuff2_max_2h': 9,
'number_of_stuff2_min_1h': 7, 'number_of_stuff2_min_24h': 0, 'number_of_stuff2_min_2h': 5,
'number_of_stuff2_sum_1h': 24, 'number_of_stuff2_sum_24h': 45, 'number_of_stuff2_sum_2h': 35}]
assert actual == expected_results, \
f'actual did not match expected. \n actual: {actual} \n expected: {expected_results}'
def test_sliding_window_sparse_data_uneven_feature_occurrence():
controller = build_flow([
SyncEmitSource(),
AggregateByKey(
[FieldAggregator("number_of_stuff1", "col1", ["sum", "avg", "min", "max"], SlidingWindows(['1h', '2h', '24h'], '10m')),
FieldAggregator("number_of_stuff2", "col2", ["sum", "avg", "min", "max"], SlidingWindows(['1h', '2h', '24h'], '10m'))],
Table("test", NoopDriver())),
Reduce([], lambda acc, x: append_return(acc, x)),
]).run()
controller.emit({'col1': 0}, 'tal', test_base_time)
for i in range(10):
controller.emit({'col2': i}, 'tal', test_base_time + timedelta(minutes=25 * i))
controller.terminate()
actual = controller.await_termination()
expected_results = [{'col1': 0, 'number_of_stuff1_avg_1h': 0.0, 'number_of_stuff1_avg_24h': 0.0, 'number_of_stuff1_avg_2h': 0.0,
'number_of_stuff1_max_1h': 0, 'number_of_stuff1_max_24h': 0, 'number_of_stuff1_max_2h': 0,
'number_of_stuff1_min_1h': 0, 'number_of_stuff1_min_24h': 0, 'number_of_stuff1_min_2h': 0,
'number_of_stuff1_sum_1h': 0, 'number_of_stuff1_sum_24h': 0, 'number_of_stuff1_sum_2h': 0,
'number_of_stuff2_avg_1h': math.nan, 'number_of_stuff2_avg_24h': math.nan, 'number_of_stuff2_avg_2h': math.nan,
'number_of_stuff2_max_1h': math.nan, 'number_of_stuff2_max_24h': math.nan, 'number_of_stuff2_max_2h': math.nan,
'number_of_stuff2_min_1h': math.nan, 'number_of_stuff2_min_24h': math.nan, 'number_of_stuff2_min_2h': math.nan,
'number_of_stuff2_sum_1h': 0, 'number_of_stuff2_sum_24h': 0, 'number_of_stuff2_sum_2h': 0},
{'col2': 0, 'number_of_stuff1_avg_1h': 0.0, 'number_of_stuff1_avg_24h': 0.0, 'number_of_stuff1_avg_2h': 0.0,
'number_of_stuff1_max_1h': 0, 'number_of_stuff1_max_24h': 0, 'number_of_stuff1_max_2h': 0,
'number_of_stuff1_min_1h': 0, 'number_of_stuff1_min_24h': 0, 'number_of_stuff1_min_2h': 0,
'number_of_stuff1_sum_1h': 0, 'number_of_stuff1_sum_24h': 0, 'number_of_stuff1_sum_2h': 0,
'number_of_stuff2_avg_1h': 0.0, 'number_of_stuff2_avg_24h': 0.0, 'number_of_stuff2_avg_2h': 0.0,
'number_of_stuff2_max_1h': 0, 'number_of_stuff2_max_24h': 0, 'number_of_stuff2_max_2h': 0,
'number_of_stuff2_min_1h': 0, 'number_of_stuff2_min_24h': 0, 'number_of_stuff2_min_2h': 0,
'number_of_stuff2_sum_1h': 0, 'number_of_stuff2_sum_24h': 0, 'number_of_stuff2_sum_2h': 0},
{'col2': 1, 'number_of_stuff1_avg_1h': 0.0, 'number_of_stuff1_avg_24h': 0.0, 'number_of_stuff1_avg_2h': 0.0,
'number_of_stuff1_max_1h': 0, 'number_of_stuff1_max_24h': 0, 'number_of_stuff1_max_2h': 0,
'number_of_stuff1_min_1h': 0, 'number_of_stuff1_min_24h': 0, 'number_of_stuff1_min_2h': 0,
'number_of_stuff1_sum_1h': 0, 'number_of_stuff1_sum_24h': 0, 'number_of_stuff1_sum_2h': 0,
'number_of_stuff2_avg_1h': 0.5, 'number_of_stuff2_avg_24h': 0.5, 'number_of_stuff2_avg_2h': 0.5,
'number_of_stuff2_max_1h': 1, 'number_of_stuff2_max_24h': 1, 'number_of_stuff2_max_2h': 1,
'number_of_stuff2_min_1h': 0, 'number_of_stuff2_min_24h': 0, 'number_of_stuff2_min_2h': 0,
'number_of_stuff2_sum_1h': 1, 'number_of_stuff2_sum_24h': 1, 'number_of_stuff2_sum_2h': 1},
{'col2': 2, 'number_of_stuff1_avg_1h': 0.0, 'number_of_stuff1_avg_24h': 0.0, 'number_of_stuff1_avg_2h': 0.0,
'number_of_stuff1_max_1h': 0, 'number_of_stuff1_max_24h': 0, 'number_of_stuff1_max_2h': 0,
'number_of_stuff1_min_1h': 0, 'number_of_stuff1_min_24h': 0, 'number_of_stuff1_min_2h': 0,
'number_of_stuff1_sum_1h': 0, 'number_of_stuff1_sum_24h': 0, 'number_of_stuff1_sum_2h': 0,
'number_of_stuff2_avg_1h': 1.0, 'number_of_stuff2_avg_24h': 1.0, 'number_of_stuff2_avg_2h': 1.0,
'number_of_stuff2_max_1h': 2, 'number_of_stuff2_max_24h': 2, 'number_of_stuff2_max_2h': 2,
'number_of_stuff2_min_1h': 0, 'number_of_stuff2_min_24h': 0, 'number_of_stuff2_min_2h': 0,
'number_of_stuff2_sum_1h': 3, 'number_of_stuff2_sum_24h': 3, 'number_of_stuff2_sum_2h': 3},
{'col2': 3, 'number_of_stuff1_avg_1h': 0.0, 'number_of_stuff1_avg_24h': 0.0, 'number_of_stuff1_avg_2h': 0.0,
'number_of_stuff1_max_1h': 0, 'number_of_stuff1_max_24h': 0, 'number_of_stuff1_max_2h': 0,
'number_of_stuff1_min_1h': 0, 'number_of_stuff1_min_24h': 0, 'number_of_stuff1_min_2h': 0,
'number_of_stuff1_sum_1h': 0, 'number_of_stuff1_sum_24h': 0, 'number_of_stuff1_sum_2h': 0,
'number_of_stuff2_avg_1h': 2.0, 'number_of_stuff2_avg_24h': 1.5, 'number_of_stuff2_avg_2h': 1.5,
'number_of_stuff2_max_1h': 3, 'number_of_stuff2_max_24h': 3, 'number_of_stuff2_max_2h': 3,
'number_of_stuff2_min_1h': 1, 'number_of_stuff2_min_24h': 0, 'number_of_stuff2_min_2h': 0,
'number_of_stuff2_sum_1h': 6, 'number_of_stuff2_sum_24h': 6, 'number_of_stuff2_sum_2h': 6},
{'col2': 4, 'number_of_stuff1_avg_1h': 0.0, 'number_of_stuff1_avg_24h': 0.0, 'number_of_stuff1_avg_2h': 0.0,
'number_of_stuff1_max_1h': 0, 'number_of_stuff1_max_24h': 0, 'number_of_stuff1_max_2h': 0,
'number_of_stuff1_min_1h': 0, 'number_of_stuff1_min_24h': 0, 'number_of_stuff1_min_2h': 0,
'number_of_stuff1_sum_1h': 0, 'number_of_stuff1_sum_24h': 0, 'number_of_stuff1_sum_2h': 0,
'number_of_stuff2_avg_1h': 3.0, 'number_of_stuff2_avg_24h': 2.0, 'number_of_stuff2_avg_2h': 2.0,
'number_of_stuff2_max_1h': 4, 'number_of_stuff2_max_24h': 4, 'number_of_stuff2_max_2h': 4,
'number_of_stuff2_min_1h': 2, 'number_of_stuff2_min_24h': 0, 'number_of_stuff2_min_2h': 0,
'number_of_stuff2_sum_1h': 9, 'number_of_stuff2_sum_24h': 10, 'number_of_stuff2_sum_2h': 10},
{'col2': 5, 'number_of_stuff1_avg_1h': 0.0, 'number_of_stuff1_avg_24h': 0.0, 'number_of_stuff1_avg_2h': 0.0,
'number_of_stuff1_max_1h': 0, 'number_of_stuff1_max_24h': 0, 'number_of_stuff1_max_2h': 0,
'number_of_stuff1_min_1h': 0, 'number_of_stuff1_min_24h': 0, 'number_of_stuff1_min_2h': 0,
'number_of_stuff1_sum_1h': 0, 'number_of_stuff1_sum_24h': 0, 'number_of_stuff1_sum_2h': 0,
'number_of_stuff2_avg_1h': 4.0, 'number_of_stuff2_avg_24h': 2.5, 'number_of_stuff2_avg_2h': 3.0,
'number_of_stuff2_max_1h': 5, 'number_of_stuff2_max_24h': 5, 'number_of_stuff2_max_2h': 5,
'number_of_stuff2_min_1h': 3, 'number_of_stuff2_min_24h': 0, 'number_of_stuff2_min_2h': 1,
'number_of_stuff2_sum_1h': 12, 'number_of_stuff2_sum_24h': 15, 'number_of_stuff2_sum_2h': 15},
{'col2': 6, 'number_of_stuff1_avg_1h': 0.0, 'number_of_stuff1_avg_24h': 0.0, 'number_of_stuff1_avg_2h': 0.0,
'number_of_stuff1_max_1h': 0, 'number_of_stuff1_max_24h': 0, 'number_of_stuff1_max_2h': 0,
'number_of_stuff1_min_1h': 0, 'number_of_stuff1_min_24h': 0, 'number_of_stuff1_min_2h': 0,
'number_of_stuff1_sum_1h': 0, 'number_of_stuff1_sum_24h': 0, 'number_of_stuff1_sum_2h': 0,
'number_of_stuff2_avg_1h': 5.0, 'number_of_stuff2_avg_24h': 3.0, 'number_of_stuff2_avg_2h': 4.0,
'number_of_stuff2_max_1h': 6, 'number_of_stuff2_max_24h': 6, 'number_of_stuff2_max_2h': 6,
'number_of_stuff2_min_1h': 4, 'number_of_stuff2_min_24h': 0, 'number_of_stuff2_min_2h': 2,
'number_of_stuff2_sum_1h': 15, 'number_of_stuff2_sum_24h': 21, 'number_of_stuff2_sum_2h': 20},
{'col2': 7, 'number_of_stuff1_avg_1h': 0.0, 'number_of_stuff1_avg_24h': 0.0, 'number_of_stuff1_avg_2h': 0.0,
'number_of_stuff1_max_1h': 0, 'number_of_stuff1_max_24h': 0, 'number_of_stuff1_max_2h': 0,
'number_of_stuff1_min_1h': 0, 'number_of_stuff1_min_24h': 0, 'number_of_stuff1_min_2h': 0,
'number_of_stuff1_sum_1h': 0, 'number_of_stuff1_sum_24h': 0, 'number_of_stuff1_sum_2h': 0,
'number_of_stuff2_avg_1h': 6.0, 'number_of_stuff2_avg_24h': 3.5, 'number_of_stuff2_avg_2h': 5.0,
'number_of_stuff2_max_1h': 7, 'number_of_stuff2_max_24h': 7, 'number_of_stuff2_max_2h': 7,
'number_of_stuff2_min_1h': 5, 'number_of_stuff2_min_24h': 0, 'number_of_stuff2_min_2h': 3,
'number_of_stuff2_sum_1h': 18, 'number_of_stuff2_sum_24h': 28, 'number_of_stuff2_sum_2h': 25},
{'col2': 8, 'number_of_stuff1_avg_1h': 0.0, 'number_of_stuff1_avg_24h': 0.0, 'number_of_stuff1_avg_2h': 0.0,
'number_of_stuff1_max_1h': 0, 'number_of_stuff1_max_24h': 0, 'number_of_stuff1_max_2h': 0,
'number_of_stuff1_min_1h': 0, 'number_of_stuff1_min_24h': 0, 'number_of_stuff1_min_2h': 0,
'number_of_stuff1_sum_1h': 0, 'number_of_stuff1_sum_24h': 0, 'number_of_stuff1_sum_2h': 0,
'number_of_stuff2_avg_1h': 7.0, 'number_of_stuff2_avg_24h': 4.0, 'number_of_stuff2_avg_2h': 6.0,
'number_of_stuff2_max_1h': 8, 'number_of_stuff2_max_24h': 8, 'number_of_stuff2_max_2h': 8,
'number_of_stuff2_min_1h': 6, 'number_of_stuff2_min_24h': 0, 'number_of_stuff2_min_2h': 4,
'number_of_stuff2_sum_1h': 21, 'number_of_stuff2_sum_24h': 36, 'number_of_stuff2_sum_2h': 30},
{'col2': 9, 'number_of_stuff1_avg_1h': 0.0, 'number_of_stuff1_avg_24h': 0.0, 'number_of_stuff1_avg_2h': 0.0,
'number_of_stuff1_max_1h': 0, 'number_of_stuff1_max_24h': 0, 'number_of_stuff1_max_2h': 0,
'number_of_stuff1_min_1h': 0, 'number_of_stuff1_min_24h': 0, 'number_of_stuff1_min_2h': 0,
'number_of_stuff1_sum_1h': 0, 'number_of_stuff1_sum_24h': 0, 'number_of_stuff1_sum_2h': 0,
'number_of_stuff2_avg_1h': 8.0, 'number_of_stuff2_avg_24h': 4.5, 'number_of_stuff2_avg_2h': 7.0,
'number_of_stuff2_max_1h': 9, 'number_of_stuff2_max_24h': 9, 'number_of_stuff2_max_2h': 9,
'number_of_stuff2_min_1h': 7, 'number_of_stuff2_min_24h': 0, 'number_of_stuff2_min_2h': 5,
'number_of_stuff2_sum_1h': 24, 'number_of_stuff2_sum_24h': 45, 'number_of_stuff2_sum_2h': 35}]
assert actual == expected_results, \
f'actual did not match expected. \n actual: {actual} \n expected: {expected_results}'
def test_sliding_window_multiple_keys_aggregation_flow():
controller = build_flow([
SyncEmitSource(),
AggregateByKey([FieldAggregator("number_of_stuff", "col1", ["sum", "avg"],
SlidingWindows(['1h', '2h', '24h'], '10m'))],
Table("test", NoopDriver())),
Reduce([], lambda acc, x: append_return(acc, x)),
]).run()
for i in range(10):
data = {'col1': i}
controller.emit(data, f'{i % 2}', test_base_time + timedelta(minutes=i))
controller.terminate()
actual = controller.await_termination()
expected_results = [
{'col1': 0, 'number_of_stuff_sum_1h': 0, 'number_of_stuff_sum_2h': 0, 'number_of_stuff_sum_24h': 0,
'number_of_stuff_avg_1h': 0.0, 'number_of_stuff_avg_2h': 0.0, 'number_of_stuff_avg_24h': 0.0},
{'col1': 1, 'number_of_stuff_sum_1h': 1, 'number_of_stuff_sum_2h': 1, 'number_of_stuff_sum_24h': 1,
'number_of_stuff_avg_1h': 1.0, 'number_of_stuff_avg_2h': 1.0, 'number_of_stuff_avg_24h': 1.0},
{'col1': 2, 'number_of_stuff_sum_1h': 2, 'number_of_stuff_sum_2h': 2, 'number_of_stuff_sum_24h': 2,
'number_of_stuff_avg_1h': 1.0, 'number_of_stuff_avg_2h': 1.0, 'number_of_stuff_avg_24h': 1.0},
{'col1': 3, 'number_of_stuff_sum_1h': 4, 'number_of_stuff_sum_2h': 4, 'number_of_stuff_sum_24h': 4,
'number_of_stuff_avg_1h': 2.0, 'number_of_stuff_avg_2h': 2.0, 'number_of_stuff_avg_24h': 2.0},
{'col1': 4, 'number_of_stuff_sum_1h': 6, 'number_of_stuff_sum_2h': 6, 'number_of_stuff_sum_24h': 6,
'number_of_stuff_avg_1h': 2.0, 'number_of_stuff_avg_2h': 2.0, 'number_of_stuff_avg_24h': 2.0},
{'col1': 5, 'number_of_stuff_sum_1h': 9, 'number_of_stuff_sum_2h': 9, 'number_of_stuff_sum_24h': 9,
'number_of_stuff_avg_1h': 3.0, 'number_of_stuff_avg_2h': 3.0, 'number_of_stuff_avg_24h': 3.0},
{'col1': 6, 'number_of_stuff_sum_1h': 12, 'number_of_stuff_sum_2h': 12, 'number_of_stuff_sum_24h': 12,
'number_of_stuff_avg_1h': 3.0, 'number_of_stuff_avg_2h': 3.0, 'number_of_stuff_avg_24h': 3.0},
{'col1': 7, 'number_of_stuff_sum_1h': 16, 'number_of_stuff_sum_2h': 16, 'number_of_stuff_sum_24h': 16,
'number_of_stuff_avg_1h': 4.0, 'number_of_stuff_avg_2h': 4.0, 'number_of_stuff_avg_24h': 4.0},
{'col1': 8, 'number_of_stuff_sum_1h': 20, 'number_of_stuff_sum_2h': 20, 'number_of_stuff_sum_24h': 20,
'number_of_stuff_avg_1h': 4.0, 'number_of_stuff_avg_2h': 4.0, 'number_of_stuff_avg_24h': 4.0},
{'col1': 9, 'number_of_stuff_sum_1h': 25, 'number_of_stuff_sum_2h': 25, 'number_of_stuff_sum_24h': 25,
'number_of_stuff_avg_1h': 5.0, 'number_of_stuff_avg_2h': 5.0, 'number_of_stuff_avg_24h': 5.0}]
assert actual == expected_results, \
f'actual did not match expected. \n actual: {actual} \n expected: {expected_results}'
def test_sliding_window_aggregations_with_filters_flow():
controller = build_flow([
SyncEmitSource(),
AggregateByKey([FieldAggregator("number_of_stuff", "col1", ["sum", "avg"],
SlidingWindows(['1h', '2h', '24h'], '10m'),
aggr_filter=lambda element: element['is_valid'] == 0)],
Table("test", NoopDriver())),
Reduce([], lambda acc, x: append_return(acc, x)),
]).run()
for i in range(10):
data = {'col1': i, 'is_valid': i % 2}
controller.emit(data, 'tal', test_base_time + timedelta(minutes=i))
controller.terminate()
actual = controller.await_termination()
expected_results = [{'col1': 0, 'is_valid': 0, 'number_of_stuff_sum_1h': 0, 'number_of_stuff_sum_2h': 0,
'number_of_stuff_sum_24h': 0, 'number_of_stuff_avg_1h': 0.0, 'number_of_stuff_avg_2h': 0.0,
'number_of_stuff_avg_24h': 0.0},
{'col1': 1, 'is_valid': 1, 'number_of_stuff_sum_1h': 0, 'number_of_stuff_sum_2h': 0,
'number_of_stuff_sum_24h': 0, 'number_of_stuff_avg_1h': 0.0, 'number_of_stuff_avg_2h': 0.0,
'number_of_stuff_avg_24h': 0.0},
{'col1': 2, 'is_valid': 0, 'number_of_stuff_sum_1h': 2, 'number_of_stuff_sum_2h': 2,
'number_of_stuff_sum_24h': 2, 'number_of_stuff_avg_1h': 1.0, 'number_of_stuff_avg_2h': 1.0,
'number_of_stuff_avg_24h': 1.0},
{'col1': 3, 'is_valid': 1, 'number_of_stuff_sum_1h': 2, 'number_of_stuff_sum_2h': 2,
'number_of_stuff_sum_24h': 2, 'number_of_stuff_avg_1h': 1.0, 'number_of_stuff_avg_2h': 1.0,
'number_of_stuff_avg_24h': 1.0},
{'col1': 4, 'is_valid': 0, 'number_of_stuff_sum_1h': 6, 'number_of_stuff_sum_2h': 6,
'number_of_stuff_sum_24h': 6, 'number_of_stuff_avg_1h': 2.0, 'number_of_stuff_avg_2h': 2.0,
'number_of_stuff_avg_24h': 2.0},
{'col1': 5, 'is_valid': 1, 'number_of_stuff_sum_1h': 6, 'number_of_stuff_sum_2h': 6,
'number_of_stuff_sum_24h': 6, 'number_of_stuff_avg_1h': 2.0, 'number_of_stuff_avg_2h': 2.0,
'number_of_stuff_avg_24h': 2.0},
{'col1': 6, 'is_valid': 0, 'number_of_stuff_sum_1h': 12, 'number_of_stuff_sum_2h': 12,
'number_of_stuff_sum_24h': 12, 'number_of_stuff_avg_1h': 3.0, 'number_of_stuff_avg_2h': 3.0,
'number_of_stuff_avg_24h': 3.0},
{'col1': 7, 'is_valid': 1, 'number_of_stuff_sum_1h': 12, 'number_of_stuff_sum_2h': 12,
'number_of_stuff_sum_24h': 12, 'number_of_stuff_avg_1h': 3.0, 'number_of_stuff_avg_2h': 3.0,
'number_of_stuff_avg_24h': 3.0},
{'col1': 8, 'is_valid': 0, 'number_of_stuff_sum_1h': 20, 'number_of_stuff_sum_2h': 20,
'number_of_stuff_sum_24h': 20, 'number_of_stuff_avg_1h': 4.0, 'number_of_stuff_avg_2h': 4.0,
'number_of_stuff_avg_24h': 4.0},
{'col1': 9, 'is_valid': 1, 'number_of_stuff_sum_1h': 20, 'number_of_stuff_sum_2h': 20,
'number_of_stuff_sum_24h': 20, 'number_of_stuff_avg_1h': 4.0, 'number_of_stuff_avg_2h': 4.0,
'number_of_stuff_avg_24h': 4.0}]
assert actual == expected_results, \
f'actual did not match expected. \n actual: {actual} \n expected: {expected_results}'
def test_sliding_window_aggregations_with_max_values_flow():
controller = build_flow([
SyncEmitSource(),
AggregateByKey([FieldAggregator("num_hours_with_stuff_in_the_last_24h", "col1", ["count"],
SlidingWindows(['24h'], '1h'),
max_value=5)],
Table("test", NoopDriver())),
Reduce([], lambda acc, x: append_return(acc, x)),
]).run()
for i in range(10):
data = {'col1': i}
controller.emit(data, 'tal', test_base_time + timedelta(minutes=10 * i))
controller.terminate()
actual = controller.await_termination()
expected_results = [{'col1': 0, 'num_hours_with_stuff_in_the_last_24h_count_24h': 1},
{'col1': 1, 'num_hours_with_stuff_in_the_last_24h_count_24h': 2},
{'col1': 2, 'num_hours_with_stuff_in_the_last_24h_count_24h': 3},
{'col1': 3, 'num_hours_with_stuff_in_the_last_24h_count_24h': 4},
{'col1': 4, 'num_hours_with_stuff_in_the_last_24h_count_24h': 5},
{'col1': 5, 'num_hours_with_stuff_in_the_last_24h_count_24h': 5},
{'col1': 6, 'num_hours_with_stuff_in_the_last_24h_count_24h': 5},
{'col1': 7, 'num_hours_with_stuff_in_the_last_24h_count_24h': 5},
{'col1': 8, 'num_hours_with_stuff_in_the_last_24h_count_24h': 5},
{'col1': 9, 'num_hours_with_stuff_in_the_last_24h_count_24h': 5}]
assert actual == expected_results, \
f'actual did not match expected. \n actual: {actual} \n expected: {expected_results}'
def test_sliding_window_simple_aggregation_flow_multiple_fields():
controller = build_flow([
SyncEmitSource(),
AggregateByKey([FieldAggregator("number_of_stuff", "col1", ["sum", "avg"],
SlidingWindows(['1h', '2h', '24h'], '10m')),
FieldAggregator("number_of_things", "col2", ["count"],
SlidingWindows(['1h', '2h'], '15m')),
FieldAggregator("abc", "col3", ["sum"],
SlidingWindows(['24h'], '10m'))],
Table("test", NoopDriver())),
Reduce([], lambda acc, x: append_return(acc, x)),
]).run()
for i in range(10):
data = {'col1': i, 'col2': i * 1.2, 'col3': i * 2 + 4}
controller.emit(data, 'tal', test_base_time + timedelta(minutes=i))
controller.terminate()
actual = controller.await_termination()
expected_results = [{'col1': 0, 'col2': 0.0, 'col3': 4, 'number_of_stuff_sum_1h': 0, 'number_of_stuff_sum_2h': 0,
'number_of_stuff_sum_24h': 0, 'number_of_things_count_1h': 1, 'number_of_things_count_2h': 1,
'abc_sum_24h': 4, 'number_of_stuff_avg_1h': 0.0, 'number_of_stuff_avg_2h': 0.0, 'number_of_stuff_avg_24h': 0.0},
{'col1': 1, 'col2': 1.2, 'col3': 6, 'number_of_stuff_sum_1h': 1, 'number_of_stuff_sum_2h': 1,
'number_of_stuff_sum_24h': 1, 'number_of_things_count_1h': 2, 'number_of_things_count_2h': 2,
'abc_sum_24h': 10, 'number_of_stuff_avg_1h': 0.5, 'number_of_stuff_avg_2h': 0.5, 'number_of_stuff_avg_24h': 0.5},
{'col1': 2, 'col2': 2.4, 'col3': 8, 'number_of_stuff_sum_1h': 3, 'number_of_stuff_sum_2h': 3,
'number_of_stuff_sum_24h': 3, 'number_of_things_count_1h': 3, 'number_of_things_count_2h': 3,
'abc_sum_24h': 18, 'number_of_stuff_avg_1h': 1.0, 'number_of_stuff_avg_2h': 1.0, 'number_of_stuff_avg_24h': 1.0},
{'col1': 3, 'col2': 3.5999999999999996, 'col3': 10, 'number_of_stuff_sum_1h': 6,
'number_of_stuff_sum_2h': 6, 'number_of_stuff_sum_24h': 6, 'number_of_things_count_1h': 4,
'number_of_things_count_2h': 4, 'abc_sum_24h': 28, 'number_of_stuff_avg_1h': 1.5, 'number_of_stuff_avg_2h': 1.5,
'number_of_stuff_avg_24h': 1.5},
{'col1': 4, 'col2': 4.8, 'col3': 12, 'number_of_stuff_sum_1h': 10, 'number_of_stuff_sum_2h': 10,
'number_of_stuff_sum_24h': 10, 'number_of_things_count_1h': 5, 'number_of_things_count_2h': 5,
'abc_sum_24h': 40, 'number_of_stuff_avg_1h': 2.0, 'number_of_stuff_avg_2h': 2.0, 'number_of_stuff_avg_24h': 2.0},
{'col1': 5, 'col2': 6.0, 'col3': 14, 'number_of_stuff_sum_1h': 15, 'number_of_stuff_sum_2h': 15,
'number_of_stuff_sum_24h': 15, 'number_of_things_count_1h': 6, 'number_of_things_count_2h': 6,
'abc_sum_24h': 54, 'number_of_stuff_avg_1h': 2.5, 'number_of_stuff_avg_2h': 2.5, 'number_of_stuff_avg_24h': 2.5},
{'col1': 6, 'col2': 7.199999999999999, 'col3': 16, 'number_of_stuff_sum_1h': 21,
'number_of_stuff_sum_2h': 21, 'number_of_stuff_sum_24h': 21, 'number_of_things_count_1h': 7,
'number_of_things_count_2h': 7, 'abc_sum_24h': 70, 'number_of_stuff_avg_1h': 3.0,
'number_of_stuff_avg_2h': 3.0, 'number_of_stuff_avg_24h': 3.0},
{'col1': 7, 'col2': 8.4, 'col3': 18, 'number_of_stuff_sum_1h': 28, 'number_of_stuff_sum_2h': 28,
'number_of_stuff_sum_24h': 28, 'number_of_things_count_1h': 8, 'number_of_things_count_2h': 8,
'abc_sum_24h': 88, 'number_of_stuff_avg_1h': 3.5, 'number_of_stuff_avg_2h': 3.5, 'number_of_stuff_avg_24h': 3.5},
{'col1': 8, 'col2': 9.6, 'col3': 20, 'number_of_stuff_sum_1h': 36, 'number_of_stuff_sum_2h': 36,
'number_of_stuff_sum_24h': 36, 'number_of_things_count_1h': 9, 'number_of_things_count_2h': 9,
'abc_sum_24h': 108, 'number_of_stuff_avg_1h': 4.0, 'number_of_stuff_avg_2h': 4.0, 'number_of_stuff_avg_24h': 4.0},
{'col1': 9, 'col2': 10.799999999999999, 'col3': 22, 'number_of_stuff_sum_1h': 45,
'number_of_stuff_sum_2h': 45, 'number_of_stuff_sum_24h': 45,
'number_of_things_count_1h': 10, 'number_of_things_count_2h': 10, 'abc_sum_24h': 130,
'number_of_stuff_avg_1h': 4.5, 'number_of_stuff_avg_2h': 4.5, 'number_of_stuff_avg_24h': 4.5}]
assert actual == expected_results, \
f'actual did not match expected. \n actual: {actual} \n expected: {expected_results}'
def test_fixed_window_simple_aggregation_flow():
controller = build_flow([
SyncEmitSource(),
AggregateByKey([FieldAggregator("number_of_stuff", "col1", ["count"],
FixedWindows(['1h', '2h', '3h', '24h']))],
Table("test", NoopDriver())),
Reduce([], lambda acc, x: append_return(acc, x)),
]).run()
for i in range(10):
data = {'col1': i}
controller.emit(data, 'tal', test_base_time + timedelta(minutes=25 * i))
controller.terminate()
actual = controller.await_termination()
expected_results = [{'col1': 0, 'number_of_stuff_count_1h': 1, 'number_of_stuff_count_2h': 1, 'number_of_stuff_count_3h': 1,
'number_of_stuff_count_24h': 1},
{'col1': 1, 'number_of_stuff_count_1h': 1, 'number_of_stuff_count_2h': 1, 'number_of_stuff_count_3h': 2,
'number_of_stuff_count_24h': 2},
{'col1': 2, 'number_of_stuff_count_1h': 2, 'number_of_stuff_count_2h': 2, 'number_of_stuff_count_3h': 3,
'number_of_stuff_count_24h': 3},
{'col1': 3, 'number_of_stuff_count_1h': 3, 'number_of_stuff_count_2h': 3, 'number_of_stuff_count_3h': 4,
'number_of_stuff_count_24h': 4},
{'col1': 4, 'number_of_stuff_count_1h': 1, 'number_of_stuff_count_2h': 4, 'number_of_stuff_count_3h': 5,
'number_of_stuff_count_24h': 5},
{'col1': 5, 'number_of_stuff_count_1h': 2, 'number_of_stuff_count_2h': 5, 'number_of_stuff_count_3h': 6,
'number_of_stuff_count_24h': 6},
{'col1': 6, 'number_of_stuff_count_1h': 1, 'number_of_stuff_count_2h': 1, 'number_of_stuff_count_3h': 1,
'number_of_stuff_count_24h': 1},
{'col1': 7, 'number_of_stuff_count_1h': 2, 'number_of_stuff_count_2h': 2, 'number_of_stuff_count_3h': 2,
'number_of_stuff_count_24h': 2},
{'col1': 8, 'number_of_stuff_count_1h': 1, 'number_of_stuff_count_2h': 3, 'number_of_stuff_count_3h': 3,
'number_of_stuff_count_24h': 3},
{'col1': 9, 'number_of_stuff_count_1h': 2, 'number_of_stuff_count_2h': 4, 'number_of_stuff_count_3h': 4,
'number_of_stuff_count_24h': 4}]
assert actual == expected_results, \
f'actual did not match expected. \n actual: {actual} \n expected: {expected_results}'
def test_fixed_window_aggregation_with_uncommon_windows_flow():
time_format = '%Y-%m-%d %H:%M:%S.%f'
columns = ['sample_time', 'signal', 'isotope']
data = [[datetime.strptime('2021-05-30 16:42:15.797000', time_format).replace(tzinfo=timezone.utc), 790.235, 'U235'],
[datetime.strptime('2021-05-30 16:45:15.798000', time_format).replace(tzinfo=timezone.utc), 498.491, 'U235'],
[datetime.strptime('2021-05-30 16:48:15.799000', time_format).replace(tzinfo=timezone.utc), 34650.00343, 'U235'],
[datetime.strptime('2021-05-30 16:51:15.800000', time_format).replace(tzinfo=timezone.utc), 189.823, 'U235'],
[datetime.strptime('2021-05-30 16:54:15.801000', time_format).replace(tzinfo=timezone.utc), 379.524, 'U235'],
[datetime.strptime('2021-05-30 16:57:15.802000', time_format).replace(tzinfo=timezone.utc), 2225.4952, 'U235'],
[datetime.strptime('2021-05-30 17:00:15.803000', time_format).replace(tzinfo=timezone.utc), 1049.0903, 'U235'],
[datetime.strptime('2021-05-30 17:03:15.804000', time_format).replace(tzinfo=timezone.utc), 41905.63447, 'U235'],
[datetime.strptime('2021-05-30 17:06:15.805000', time_format).replace(tzinfo=timezone.utc), 4987.6764, 'U235'],
[datetime.strptime('2021-05-30 17:09:15.806000', time_format).replace(tzinfo=timezone.utc), 67657.11975, 'U235'],
[datetime.strptime('2021-05-30 17:12:15.807000', time_format).replace(tzinfo=timezone.utc), 56173.06327, 'U235'],
[datetime.strptime('2021-05-30 17:15:15.808000', time_format).replace(tzinfo=timezone.utc), 14249.67394, 'U235'],
[datetime.strptime('2021-05-30 17:18:15.809000', time_format).replace(tzinfo=timezone.utc), 656.831, 'U235'],
[datetime.strptime('2021-05-30 17:21:15.810000', time_format).replace(tzinfo=timezone.utc), 5768.4822, 'U235'],
[datetime.strptime('2021-05-30 17:24:15.811000', time_format).replace(tzinfo=timezone.utc), 929.028, 'U235'],
[datetime.strptime('2021-05-30 17:27:15.812000', time_format).replace(tzinfo=timezone.utc), 2585.9646, 'U235'],
[datetime.strptime('2021-05-30 17:30:15.813000', time_format).replace(tzinfo=timezone.utc), 358.918, 'U235']]
df = pd.DataFrame(data, columns=columns)
controller = build_flow([
DataframeSource(df, time_field="sample_time", key_field="isotope"),
AggregateByKey([FieldAggregator("samples", "signal", ["count"],
FixedWindows(['15m', '25m', '45m', '1h']))], Table("U235_test", NoopDriver())),
Reduce([], lambda acc, x: append_return(acc, x)),
]).run()
termination_result = controller.await_termination()
expected = [{'samples_count_15m': 1.0, 'samples_count_25m': 1.0, 'samples_count_45m': 1.0, 'samples_count_1h': 1.0,
'sample_time': pd.Timestamp('2021-05-30 16:42:15.797000+0000', tz='UTC'), 'signal': 790.235,
'isotope': 'U235'},
{'samples_count_15m': 1.0, 'samples_count_25m': 2.0, 'samples_count_45m': 2.0, 'samples_count_1h': 2.0,
'sample_time': pd.Timestamp('2021-05-30 16:45:15.798000+0000', tz='UTC'), 'signal': 498.491,
'isotope': 'U235'},
{'samples_count_15m': 2.0, 'samples_count_25m': 3.0, 'samples_count_45m': 3.0, 'samples_count_1h': 3.0,
'sample_time': pd.Timestamp('2021-05-30 16:48:15.799000+0000', tz='UTC'), 'signal': 34650.00343,
'isotope': 'U235'},
{'samples_count_15m': 3.0, 'samples_count_25m': 4.0, 'samples_count_45m': 4.0, 'samples_count_1h': 4.0,
'sample_time': pd.Timestamp('2021-05-30 16:51:15.800000+0000', tz='UTC'), 'signal': 189.823,
'isotope': 'U235'},
{'samples_count_15m': 4.0, 'samples_count_25m': 5.0, 'samples_count_45m': 5.0, 'samples_count_1h': 5.0,
'sample_time': pd.Timestamp('2021-05-30 16:54:15.801000+0000', tz='UTC'), 'signal': 379.524,
'isotope': 'U235'},
{'samples_count_15m': 5.0, 'samples_count_25m': 6.0, 'samples_count_45m': 6.0, 'samples_count_1h': 6.0,
'sample_time': pd.Timestamp('2021-05-30 16:57:15.802000+0000', tz='UTC'), 'signal': 2225.4952,
'isotope': 'U235'},
{'samples_count_15m': 1.0, 'samples_count_25m': 1.0, 'samples_count_45m': 7.0, 'samples_count_1h': 1.0,
'sample_time': pd.Timestamp('2021-05-30 17:00:15.803000+0000', tz='UTC'), 'signal': 1049.0903,
'isotope': 'U235'},
{'samples_count_15m': 2.0, 'samples_count_25m': 2.0, 'samples_count_45m': 8.0, 'samples_count_1h': 2.0,
'sample_time': pd.Timestamp('2021-05-30 17:03:15.804000+0000', tz='UTC'), 'signal': 41905.63447,
'isotope': 'U235'},
{'samples_count_15m': 3.0, 'samples_count_25m': 3.0, 'samples_count_45m': 9.0, 'samples_count_1h': 3.0,
'sample_time': pd.Timestamp('2021-05-30 17:06:15.805000+0000', tz='UTC'), 'signal': 4987.6764,
'isotope': 'U235'},
{'samples_count_15m': 4.0, 'samples_count_25m': 4.0, 'samples_count_45m': 10.0, 'samples_count_1h': 4.0,
'sample_time': pd.Timestamp('2021-05-30 17:09:15.806000+0000', tz='UTC'), 'signal': 67657.11975,
'isotope': 'U235'},
{'samples_count_15m': 5.0, 'samples_count_25m': 5.0, 'samples_count_45m': 11.0, 'samples_count_1h': 5.0,
'sample_time': pd.Timestamp('2021-05-30 17:12:15.807000+0000', tz='UTC'), 'signal': 56173.06327,
'isotope': 'U235'},
{'samples_count_15m': 1.0, 'samples_count_25m': 6.0, 'samples_count_45m': 1.0, 'samples_count_1h': 6.0,
'sample_time': pd.Timestamp('2021-05-30 17:15:15.808000+0000', tz='UTC'), 'signal': 14249.67394,
'isotope': 'U235'},
{'samples_count_15m': 2.0, 'samples_count_25m': 7.0, 'samples_count_45m': 2.0, 'samples_count_1h': 7.0,
'sample_time': pd.Timestamp('2021-05-30 17:18:15.809000+0000', tz='UTC'), 'signal': 656.831,
'isotope': 'U235'},
{'samples_count_15m': 3.0, 'samples_count_25m': 8.0, 'samples_count_45m': 3.0, 'samples_count_1h': 8.0,
'sample_time': | pd.Timestamp('2021-05-30 17:21:15.810000+0000', tz='UTC') | pandas.Timestamp |
import requests,time,os,pickle,inspect,json
import numpy as np
import pandas as pd
from math import ceil
from time import sleep
from pathlib import Path
from random import random
from datetime import datetime as dt
from Datorama import Connect,Workspace,Bad_HTTP_Response,Unequal_Input_Error,Timer
class datorama():
'''
Main class representing the parent Datorama object.
Inputs:
api_token (str)
The api token provided by Datorama.
verbose (boolean)
Determines the amount of feedback to be returned to the user. Useful for debugging.
pause (int,float)
The length of time to sleep between standard iterated requests.
'''
def __init__(self,api_token,verbose=False,pause=.5,platform_rate_pause=30,restore_spaces=True,restore_streams=True,restore_jobs=False):
self.instanceId = int(dt.now().timestamp()*1000)
self.restore_path = Path(os.path.dirname(inspect.getsourcefile(datorama) ) )/'restoration/workspaces'
self.log_path = Path(os.path.dirname(inspect.getsourcefile(datorama) ) )/'restoration/logs'
self.check_backup_folders()
self.logs = {'job_log':{},'update_log':[],'error_log':{},'maintenance':{} }
self.connection = Connect(
datorama=self,api_token=api_token,verbose=verbose,pause=pause,platform_rate_pause=platform_rate_pause
)
self.workspaces,self.streams,self.jobs = {},{},{}
if restore_spaces:
self.restore_spaces(restore_streams=restore_streams,restore_jobs=restore_jobs)
else:
self.get_workspaces()
def check_backup_folders(self):
if not os.path.exists(self.restore_path):
if not os.path.exists(os.path.dirname(self.restore_path)):
os.mkdir(os.path.dirname(self.restore_path) )
os.mkdir(self.restore_path)
if not os.path.exists(self.log_path):
os.mkdir(self.log_path)
def restore_spaces(self,restore_streams,restore_jobs):
with open(self.restore_path/'spaces.json','r') as f:
content = json.load(f)
self.workspaces = {ws.get('id'):Workspace(self,attributes=ws) for ws in content}
if restore_streams:
for space in self.workspaces.values(): space.restore_streams()
if restore_jobs:
for stream in self.streams.values(): stream.restore_jobs()
def log_error(self,source_module,function_triggered,error_raised,detail):
'''Adds record to the error log.'''
self.logs['error_log'].update(
{ (len(self.logs['error_log'])+1):{'module':source_module,'function':function_triggered,'timestamp':str(dt.now() ),'error_raised':error_raised,'detail':str(detail) } }
)
with open(self.log_path/f'{self.instanceId}_error_log.json','w') as f:
json.dump(self.logs['error_log'],f)
def log_update(self,action,obj,detail=None):
'''Adds record to the error log.'''
self.logs['maintenance'].update(
{ (len(self.logs['maintenance'])+1):{'action':action,'object':obj,'detail':detail,'timestamp':str(dt.now() )} }
)
def log_job(self,workspace,stream,job,job_type,start,end,isError=False):
'''Add a record to the job log.'''
status = 'queued'
if isError:
status = 'error'
if workspace not in self.logs['job_log']: self.logs['job_log'][workspace] = {}
if stream not in self.logs['job_log'][workspace]: self.logs['job_log'][workspace][stream] = {}
self.logs['job_log'][workspace][stream].update(
{job:{
'workspace':workspace,'stream':stream,'job':job,
'job_type':job_type,'start':start,'end':end,'status':status,
'exec_start':str( dt.now() ),'exec_end':'nat'
}
}
)
with open(self.log_path/f'{self.instanceId}_job_log.json','w') as f:
json.dump(self.logs['job_log'],f)
def get_workspaces(self):
'''Get request to pull the metadata for all workspaces from the api.'''
try:
if self.connection.verbose:
print('getting workspaces')
self.get_workspaces_response = self.connection.call(method='GET',endpoint='/v1/workspaces')
self.ws_content = self.get_workspaces_response.json()
self.workspaces = {ws.get('id'):Workspace(self,attributes=ws) for ws in self.ws_content}
self.store_spaces()
except Exception as X:
self.log_error(source_module='datorama',function_triggered='get_workspace',error_raised=str(X),detail='No valid response')
def store_spaces(self):
''
retries = 0;saved = False
while not saved and retries < 10:
try:
with open(self.restore_path/'spaces.json','w') as f:
json.dump(self.ws_content,f)
saved = True
except:
retries += 1
sleep(random() )
if retries == 10:
raise RuntimeError()
def get_all_dimensions(self):
'''Loop through all workspaces and retrieve the dimensions.'''
print('- getting dimensions for all workspaces -')
print('\tmaking calls to api')
self.dimensions = {}
cnt = len(self.workspaces)
rtimer = Timer(cnt)
for idx,space in enumerate(self.workspaces):
self.workspaces.get(space).get_dimensions()
self.dimensions.update( {space:self.workspaces.get(space).dimensions} )
rtimer.update(idx)
print('\tdone')
def create_stream_df(self,export=False,export_name='Datorama Stream Meta Data.csv',fields=None):
'''
Creates a pandas data frame from the stream data.
Inputs:
export (boolean)
Whether to export the resulting data frame to a csv file.
export_name (str)
The output filename with extension. Must be a csv file.
'''
if not self.streams:
self.get_all_streams()
if not fields:
fields = [
'id','name','dataSourceId','sourceDisplayName','workspaceId',
'enabled','hasData','dataSourceAuthenticationId','createTime',
'lastUpdate','lastRunStatus','lastRowsRetrieved','processedRows',
'lastDataDate'
]
print('- creating stream data frame -')
self.stream_meta = []
for stream in self.streams.values():
self.stream_meta.append( {x:stream.__dict__.get(x) for x in stream.__dict__ if x in fields} )
self.stream_df = pd.DataFrame(self.stream_meta).reset_index()
self.stream_df['createTime'] = pd.to_datetime(self.stream_df['createTime'],unit='ms')
self.stream_df['lastUpdate'] = pd.to_datetime(self.stream_df['lastUpdate'],unit='ms')
if export:
self.stream_df.to_csv(export_name,index=False)
print('- done -')
def get_all_streams(self,workspaces=None):
'''Loops through all workspace objects and triggers each ones 'get_streams' function.'''
print('- getting metadata for all streams -')
if not workspaces:
workspaces = self.workspaces
else:
workspaces = {k:v for k,v in self.workspaces.items() if v.id in workspaces}
print('\tmaking calls to api')
cnt = len(workspaces)
rtimer = Timer(cnt)
for idx,space in enumerate(workspaces):
self.workspaces.get(space).get_streams()
rtimer.update(idx)
print('- done -')
def create_jobs_df(self,export=False,export_name='Datorama Job Run Data.csv'):
'''
Creates a pandas data frame from the jobs data.
Inputs:
export (boolean)
Whether to export the resulting data frame to a csv file.
export_name (str)
The output filename with extension. Must be a csv file.
'''
if not self.jobs:
self.get_all_jobs()
print('- creating job data frame -')
exclusions = ['connection','logs','log_error','log_job']
job_meta = []
for stream in self.jobs.values():
for job in stream.values():
job_meta.append( {x:job.__dict__.get(x) for x in job.__dict__ if x not in exclusions} )
self.jobs_df = pd.DataFrame(job_meta).reset_index()
self.jobs_df['startExecutionTime'] = | pd.to_datetime(self.jobs_df['startExecutionTime'],unit='ms') | pandas.to_datetime |
import time
import random
from bs4 import BeautifulSoup
from selenium import webdriver as wd
from IPython.display import display
import pandas as pd
# from selenium.webdriver.chrome.options import Options
# from webdriver_manager.chrome import ChromeDriverManager
# driver = wd.Chrome(ChromeDriverManager().install())
# wd.implicitly_wait(60)
URL = "https://www.bestbuy.com/site/searchpage.jsp?id=pcat17071&qp=gpusv_facet%3DGraphics%20Processing%20Unit%20(GPU)~NVIDIA%20GeForce%20RTX%203060%5Egpusv_facet%3DGraphics%20Processing%20Unit%20(GPU)~NVIDIA%20GeForce%20RTX%203060%20Ti%5Egpusv_facet%3DGraphics%20Processing%20Unit%20(GPU)~NVIDIA%20GeForce%20RTX%203070&sp=%2Bcurrentprice%20skuidsaas&st=graphics+card"
options = wd.ChromeOptions()
options.add_experimental_option('excludeSwitches', ['enable-logging'])
options.add_argument("--start-maximized")
options.add_argument("window-size=1920,1080")
wd = wd.Chrome(options=options)
wd.get(URL)
# wd.find_element_by_xpath()
wd.save_screenshot("screenshot.png")
soup = BeautifulSoup(wd.page_source, features="html.parser")
lists = soup.find("ol", {"class": "sku-item-list"})
# print(lists);
items = lists.findAll("li", {"class": "sku-item"})
rows_processed = []
for item in items:
title = item.find("h4", {"class": "sku-header"})
button = item.find("div", {"class": "sku-list-item-button"})
row = []
row.append(title.text)
row.append(button.text)
print(title.text)
print(button.text)
rows_processed.append(row)
| pd.set_option("display.max_colwidth", -1) | pandas.set_option |
#PCA Analysis Python Code
from mpi4py import MPI #MPI package for cluster analysis
import pandas as pd
import datetime
import numpy as np
from sklearn.decomposition import PCA #PCA Package
import os
import zipfile #read the csv files directly
CONST_INTERVAL=5 #interval in seconds
CONST_BEGINTIME='9:30:00'
CONST_ENDTIME='16:00:00'
#get the mpi object
comm=MPI.COMM_WORLD
size = comm.Get_size()
rank = comm.Get_rank()
def expand_gap(x):
#function to expand gaps
max_lim=x['genjud_incre'].iloc[0]+1
incre_list=list(range(0,max_lim))
incre_arr=np.array(incre_list).reshape(max_lim, 1)
tmpdf=pd.DataFrame(incre_arr, columns=["increment"])
res_df=x.merge(tmpdf,on="increment",how="outer")
res_df=res_df.sort_values(['increment']) #sort before ffill
res_df=res_df.fillna(method='ffill').fillna(method='bfill')
return res_df
def calculate_return(x):
#function used to calculate the returns
x['returns']=(x['MIDPRICE']-x['MIDPRICE'].shift(1))/x['MIDPRICE'].shift(1)
return x
def pca_analysis(name):
begin=str(datetime.datetime.now())
name_date=filter(str.isdigit, name) #the date indicated on the trade and quote files
#read the zip file and convert it to csv
with zipfile.ZipFile(name) as zip:
csv_name=name.replace("zip","csv")
with zip.open(csv_name) as csv:
df=pd.read_csv(csv)
df['TIME']=df['DATE'].astype(str)+' '+df['TIME']
df['genesis']=df['DATE'].astype(str) + ' ' + CONST_BEGINTIME #begin time
df['judgement']=df['DATE'].astype(str) + ' ' + CONST_ENDTIME #end time
df['TIME']=pd.to_datetime(df['TIME'],format='%Y%m%d %H:%M:%S')
df['genesis']=pd.to_datetime(df['genesis'],format='%Y%m%d %H:%M:%S')
df['judgement']= | pd.to_datetime(df['judgement'],format='%Y%m%d %H:%M:%S') | pandas.to_datetime |
# -*- coding: utf-8 -*-
from __future__ import print_function
from datetime import datetime, timedelta
import functools
import itertools
import numpy as np
import numpy.ma as ma
import numpy.ma.mrecords as mrecords
from numpy.random import randn
import pytest
from pandas.compat import (
PY3, PY36, OrderedDict, is_platform_little_endian, lmap, long, lrange,
lzip, range, zip)
from pandas.core.dtypes.cast import construct_1d_object_array_from_listlike
from pandas.core.dtypes.common import is_integer_dtype
import pandas as pd
from pandas import (
Categorical, DataFrame, Index, MultiIndex, Series, Timedelta, Timestamp,
compat, date_range, isna)
from pandas.tests.frame.common import TestData
import pandas.util.testing as tm
MIXED_FLOAT_DTYPES = ['float16', 'float32', 'float64']
MIXED_INT_DTYPES = ['uint8', 'uint16', 'uint32', 'uint64', 'int8', 'int16',
'int32', 'int64']
class TestDataFrameConstructors(TestData):
def test_constructor(self):
df = DataFrame()
assert len(df.index) == 0
df = DataFrame(data={})
assert len(df.index) == 0
def test_constructor_mixed(self):
index, data = tm.getMixedTypeDict()
# TODO(wesm), incomplete test?
indexed_frame = DataFrame(data, index=index) # noqa
unindexed_frame = DataFrame(data) # noqa
assert self.mixed_frame['foo'].dtype == np.object_
def test_constructor_cast_failure(self):
foo = DataFrame({'a': ['a', 'b', 'c']}, dtype=np.float64)
assert foo['a'].dtype == object
# GH 3010, constructing with odd arrays
df = DataFrame(np.ones((4, 2)))
# this is ok
df['foo'] = np.ones((4, 2)).tolist()
# this is not ok
pytest.raises(ValueError, df.__setitem__, tuple(['test']),
np.ones((4, 2)))
# this is ok
df['foo2'] = np.ones((4, 2)).tolist()
def test_constructor_dtype_copy(self):
orig_df = DataFrame({
'col1': [1.],
'col2': [2.],
'col3': [3.]})
new_df = pd.DataFrame(orig_df, dtype=float, copy=True)
new_df['col1'] = 200.
assert orig_df['col1'][0] == 1.
def test_constructor_dtype_nocast_view(self):
df = DataFrame([[1, 2]])
should_be_view = DataFrame(df, dtype=df[0].dtype)
should_be_view[0][0] = 99
assert df.values[0, 0] == 99
should_be_view = DataFrame(df.values, dtype=df[0].dtype)
should_be_view[0][0] = 97
assert df.values[0, 0] == 97
def test_constructor_dtype_list_data(self):
df = DataFrame([[1, '2'],
[None, 'a']], dtype=object)
assert df.loc[1, 0] is None
assert df.loc[0, 1] == '2'
def test_constructor_list_frames(self):
# see gh-3243
result = DataFrame([DataFrame([])])
assert result.shape == (1, 0)
result = DataFrame([DataFrame(dict(A=lrange(5)))])
assert isinstance(result.iloc[0, 0], DataFrame)
def test_constructor_mixed_dtypes(self):
def _make_mixed_dtypes_df(typ, ad=None):
if typ == 'int':
dtypes = MIXED_INT_DTYPES
arrays = [np.array(np.random.rand(10), dtype=d)
for d in dtypes]
elif typ == 'float':
dtypes = MIXED_FLOAT_DTYPES
arrays = [np.array(np.random.randint(
10, size=10), dtype=d) for d in dtypes]
zipper = lzip(dtypes, arrays)
for d, a in zipper:
assert(a.dtype == d)
if ad is None:
ad = dict()
ad.update({d: a for d, a in zipper})
return DataFrame(ad)
def _check_mixed_dtypes(df, dtypes=None):
if dtypes is None:
dtypes = MIXED_FLOAT_DTYPES + MIXED_INT_DTYPES
for d in dtypes:
if d in df:
assert(df.dtypes[d] == d)
# mixed floating and integer coexinst in the same frame
df = _make_mixed_dtypes_df('float')
_check_mixed_dtypes(df)
# add lots of types
df = _make_mixed_dtypes_df('float', dict(A=1, B='foo', C='bar'))
_check_mixed_dtypes(df)
# GH 622
df = _make_mixed_dtypes_df('int')
_check_mixed_dtypes(df)
def test_constructor_complex_dtypes(self):
# GH10952
a = np.random.rand(10).astype(np.complex64)
b = np.random.rand(10).astype(np.complex128)
df = DataFrame({'a': a, 'b': b})
assert a.dtype == df.a.dtype
assert b.dtype == df.b.dtype
def test_constructor_dtype_str_na_values(self, string_dtype):
# https://github.com/pandas-dev/pandas/issues/21083
df = DataFrame({'A': ['x', None]}, dtype=string_dtype)
result = df.isna()
expected = DataFrame({"A": [False, True]})
tm.assert_frame_equal(result, expected)
assert df.iloc[1, 0] is None
df = DataFrame({'A': ['x', np.nan]}, dtype=string_dtype)
assert np.isnan(df.iloc[1, 0])
def test_constructor_rec(self):
rec = self.frame.to_records(index=False)
if PY3:
# unicode error under PY2
rec.dtype.names = list(rec.dtype.names)[::-1]
index = self.frame.index
df = DataFrame(rec)
tm.assert_index_equal(df.columns, pd.Index(rec.dtype.names))
df2 = DataFrame(rec, index=index)
tm.assert_index_equal(df2.columns, pd.Index(rec.dtype.names))
tm.assert_index_equal(df2.index, index)
rng = np.arange(len(rec))[::-1]
df3 = DataFrame(rec, index=rng, columns=['C', 'B'])
expected = DataFrame(rec, index=rng).reindex(columns=['C', 'B'])
tm.assert_frame_equal(df3, expected)
def test_constructor_bool(self):
df = DataFrame({0: np.ones(10, dtype=bool),
1: np.zeros(10, dtype=bool)})
assert df.values.dtype == np.bool_
def test_constructor_overflow_int64(self):
# see gh-14881
values = np.array([2 ** 64 - i for i in range(1, 10)],
dtype=np.uint64)
result = DataFrame({'a': values})
assert result['a'].dtype == np.uint64
# see gh-2355
data_scores = [(6311132704823138710, 273), (2685045978526272070, 23),
(8921811264899370420, 45),
(long(17019687244989530680), 270),
(long(9930107427299601010), 273)]
dtype = [('uid', 'u8'), ('score', 'u8')]
data = np.zeros((len(data_scores),), dtype=dtype)
data[:] = data_scores
df_crawls = DataFrame(data)
assert df_crawls['uid'].dtype == np.uint64
@pytest.mark.parametrize("values", [np.array([2**64], dtype=object),
np.array([2**65]), [2**64 + 1],
np.array([-2**63 - 4], dtype=object),
np.array([-2**64 - 1]), [-2**65 - 2]])
def test_constructor_int_overflow(self, values):
# see gh-18584
value = values[0]
result = DataFrame(values)
assert result[0].dtype == object
assert result[0][0] == value
def test_constructor_ordereddict(self):
import random
nitems = 100
nums = lrange(nitems)
random.shuffle(nums)
expected = ['A%d' % i for i in nums]
df = DataFrame(OrderedDict(zip(expected, [[0]] * nitems)))
assert expected == list(df.columns)
def test_constructor_dict(self):
frame = DataFrame({'col1': self.ts1,
'col2': self.ts2})
# col2 is padded with NaN
assert len(self.ts1) == 30
assert len(self.ts2) == 25
tm.assert_series_equal(self.ts1, frame['col1'], check_names=False)
exp = pd.Series(np.concatenate([[np.nan] * 5, self.ts2.values]),
index=self.ts1.index, name='col2')
tm.assert_series_equal(exp, frame['col2'])
frame = DataFrame({'col1': self.ts1,
'col2': self.ts2},
columns=['col2', 'col3', 'col4'])
assert len(frame) == len(self.ts2)
assert 'col1' not in frame
assert isna(frame['col3']).all()
# Corner cases
assert len(DataFrame({})) == 0
# mix dict and array, wrong size - no spec for which error should raise
# first
with pytest.raises(ValueError):
DataFrame({'A': {'a': 'a', 'b': 'b'}, 'B': ['a', 'b', 'c']})
# Length-one dict micro-optimization
frame = DataFrame({'A': {'1': 1, '2': 2}})
tm.assert_index_equal(frame.index, pd.Index(['1', '2']))
# empty dict plus index
idx = Index([0, 1, 2])
frame = DataFrame({}, index=idx)
assert frame.index is idx
# empty with index and columns
idx = Index([0, 1, 2])
frame = DataFrame({}, index=idx, columns=idx)
assert frame.index is idx
assert frame.columns is idx
assert len(frame._series) == 3
# with dict of empty list and Series
frame = DataFrame({'A': [], 'B': []}, columns=['A', 'B'])
tm.assert_index_equal(frame.index, Index([], dtype=np.int64))
# GH 14381
# Dict with None value
frame_none = DataFrame(dict(a=None), index=[0])
frame_none_list = DataFrame(dict(a=[None]), index=[0])
with tm.assert_produces_warning(FutureWarning,
check_stacklevel=False):
assert frame_none.get_value(0, 'a') is None
with tm.assert_produces_warning(FutureWarning,
check_stacklevel=False):
assert frame_none_list.get_value(0, 'a') is None
tm.assert_frame_equal(frame_none, frame_none_list)
# GH10856
# dict with scalar values should raise error, even if columns passed
msg = 'If using all scalar values, you must pass an index'
with pytest.raises(ValueError, match=msg):
DataFrame({'a': 0.7})
with pytest.raises(ValueError, match=msg):
DataFrame({'a': 0.7}, columns=['a'])
@pytest.mark.parametrize("scalar", [2, np.nan, None, 'D'])
def test_constructor_invalid_items_unused(self, scalar):
# No error if invalid (scalar) value is in fact not used:
result = DataFrame({'a': scalar}, columns=['b'])
expected = DataFrame(columns=['b'])
tm.assert_frame_equal(result, expected)
@pytest.mark.parametrize("value", [2, np.nan, None, float('nan')])
def test_constructor_dict_nan_key(self, value):
# GH 18455
cols = [1, value, 3]
idx = ['a', value]
values = [[0, 3], [1, 4], [2, 5]]
data = {cols[c]: Series(values[c], index=idx) for c in range(3)}
result = DataFrame(data).sort_values(1).sort_values('a', axis=1)
expected = DataFrame(np.arange(6, dtype='int64').reshape(2, 3),
index=idx, columns=cols)
tm.assert_frame_equal(result, expected)
result = DataFrame(data, index=idx).sort_values('a', axis=1)
tm.assert_frame_equal(result, expected)
result = DataFrame(data, index=idx, columns=cols)
tm.assert_frame_equal(result, expected)
@pytest.mark.parametrize("value", [np.nan, None, float('nan')])
def test_constructor_dict_nan_tuple_key(self, value):
# GH 18455
cols = Index([(11, 21), (value, 22), (13, value)])
idx = Index([('a', value), (value, 2)])
values = [[0, 3], [1, 4], [2, 5]]
data = {cols[c]: Series(values[c], index=idx) for c in range(3)}
result = (DataFrame(data)
.sort_values((11, 21))
.sort_values(('a', value), axis=1))
expected = DataFrame(np.arange(6, dtype='int64').reshape(2, 3),
index=idx, columns=cols)
tm.assert_frame_equal(result, expected)
result = DataFrame(data, index=idx).sort_values(('a', value), axis=1)
tm.assert_frame_equal(result, expected)
result = DataFrame(data, index=idx, columns=cols)
tm.assert_frame_equal(result, expected)
@pytest.mark.skipif(not PY36, reason='Insertion order for Python>=3.6')
def test_constructor_dict_order_insertion(self):
# GH19018
# initialization ordering: by insertion order if python>= 3.6
d = {'b': self.ts2, 'a': self.ts1}
frame = DataFrame(data=d)
expected = DataFrame(data=d, columns=list('ba'))
tm.assert_frame_equal(frame, expected)
@pytest.mark.skipif(PY36, reason='order by value for Python<3.6')
def test_constructor_dict_order_by_values(self):
# GH19018
# initialization ordering: by value if python<3.6
d = {'b': self.ts2, 'a': self.ts1}
frame = DataFrame(data=d)
expected = DataFrame(data=d, columns=list('ab'))
tm.assert_frame_equal(frame, expected)
def test_constructor_multi_index(self):
# GH 4078
# construction error with mi and all-nan frame
tuples = [(2, 3), (3, 3), (3, 3)]
mi = MultiIndex.from_tuples(tuples)
df = DataFrame(index=mi, columns=mi)
assert pd.isna(df).values.ravel().all()
tuples = [(3, 3), (2, 3), (3, 3)]
mi = MultiIndex.from_tuples(tuples)
df = DataFrame(index=mi, columns=mi)
assert pd.isna(df).values.ravel().all()
def test_constructor_error_msgs(self):
msg = "Empty data passed with indices specified."
# passing an empty array with columns specified.
with pytest.raises(ValueError, match=msg):
DataFrame(np.empty(0), columns=list('abc'))
msg = "Mixing dicts with non-Series may lead to ambiguous ordering."
# mix dict and array, wrong size
with pytest.raises(ValueError, match=msg):
DataFrame({'A': {'a': 'a', 'b': 'b'},
'B': ['a', 'b', 'c']})
# wrong size ndarray, GH 3105
msg = r"Shape of passed values is \(3, 4\), indices imply \(3, 3\)"
with pytest.raises(ValueError, match=msg):
DataFrame(np.arange(12).reshape((4, 3)),
columns=['foo', 'bar', 'baz'],
index=pd.date_range('2000-01-01', periods=3))
# higher dim raise exception
with pytest.raises(ValueError, match='Must pass 2-d input'):
DataFrame(np.zeros((3, 3, 3)), columns=['A', 'B', 'C'], index=[1])
# wrong size axis labels
msg = ("Shape of passed values "
r"is \(3, 2\), indices "
r"imply \(3, 1\)")
with pytest.raises(ValueError, match=msg):
DataFrame(np.random.rand(2, 3), columns=['A', 'B', 'C'], index=[1])
msg = ("Shape of passed values "
r"is \(3, 2\), indices "
r"imply \(2, 2\)")
with pytest.raises(ValueError, match=msg):
DataFrame(np.random.rand(2, 3), columns=['A', 'B'], index=[1, 2])
msg = ("If using all scalar "
"values, you must pass "
"an index")
with pytest.raises(ValueError, match=msg):
DataFrame({'a': False, 'b': True})
def test_constructor_with_embedded_frames(self):
# embedded data frames
df1 = DataFrame({'a': [1, 2, 3], 'b': [3, 4, 5]})
df2 = DataFrame([df1, df1 + 10])
df2.dtypes
str(df2)
result = df2.loc[0, 0]
tm.assert_frame_equal(result, df1)
result = df2.loc[1, 0]
tm.assert_frame_equal(result, df1 + 10)
def test_constructor_subclass_dict(self):
# Test for passing dict subclass to constructor
data = {'col1': tm.TestSubDict((x, 10.0 * x) for x in range(10)),
'col2': tm.TestSubDict((x, 20.0 * x) for x in range(10))}
df = DataFrame(data)
refdf = DataFrame({col: dict(compat.iteritems(val))
for col, val in compat.iteritems(data)})
tm.assert_frame_equal(refdf, df)
data = tm.TestSubDict(compat.iteritems(data))
df = DataFrame(data)
tm.assert_frame_equal(refdf, df)
# try with defaultdict
from collections import defaultdict
data = {}
self.frame['B'][:10] = np.nan
for k, v in compat.iteritems(self.frame):
dct = defaultdict(dict)
dct.update(v.to_dict())
data[k] = dct
frame = DataFrame(data)
tm.assert_frame_equal(self.frame.sort_index(), frame)
def test_constructor_dict_block(self):
expected = np.array([[4., 3., 2., 1.]])
df = DataFrame({'d': [4.], 'c': [3.], 'b': [2.], 'a': [1.]},
columns=['d', 'c', 'b', 'a'])
tm.assert_numpy_array_equal(df.values, expected)
def test_constructor_dict_cast(self):
# cast float tests
test_data = {
'A': {'1': 1, '2': 2},
'B': {'1': '1', '2': '2', '3': '3'},
}
frame = DataFrame(test_data, dtype=float)
assert len(frame) == 3
assert frame['B'].dtype == np.float64
assert frame['A'].dtype == np.float64
frame = DataFrame(test_data)
assert len(frame) == 3
assert frame['B'].dtype == np.object_
assert frame['A'].dtype == np.float64
# can't cast to float
test_data = {
'A': dict(zip(range(20), tm.makeStringIndex(20))),
'B': dict(zip(range(15), randn(15)))
}
frame = DataFrame(test_data, dtype=float)
assert len(frame) == 20
assert frame['A'].dtype == np.object_
assert frame['B'].dtype == np.float64
def test_constructor_dict_dont_upcast(self):
d = {'Col1': {'Row1': 'A String', 'Row2': np.nan}}
df = DataFrame(d)
assert isinstance(df['Col1']['Row2'], float)
dm = DataFrame([[1, 2], ['a', 'b']], index=[1, 2], columns=[1, 2])
assert isinstance(dm[1][1], int)
def test_constructor_dict_of_tuples(self):
# GH #1491
data = {'a': (1, 2, 3), 'b': (4, 5, 6)}
result = DataFrame(data)
expected = DataFrame({k: list(v) for k, v in compat.iteritems(data)})
tm.assert_frame_equal(result, expected, check_dtype=False)
def test_constructor_dict_multiindex(self):
def check(result, expected):
return tm.assert_frame_equal(result, expected, check_dtype=True,
check_index_type=True,
check_column_type=True,
check_names=True)
d = {('a', 'a'): {('i', 'i'): 0, ('i', 'j'): 1, ('j', 'i'): 2},
('b', 'a'): {('i', 'i'): 6, ('i', 'j'): 5, ('j', 'i'): 4},
('b', 'c'): {('i', 'i'): 7, ('i', 'j'): 8, ('j', 'i'): 9}}
_d = sorted(d.items())
df = DataFrame(d)
expected = DataFrame(
[x[1] for x in _d],
index=MultiIndex.from_tuples([x[0] for x in _d])).T
expected.index = MultiIndex.from_tuples(expected.index)
check(df, expected)
d['z'] = {'y': 123., ('i', 'i'): 111, ('i', 'j'): 111, ('j', 'i'): 111}
_d.insert(0, ('z', d['z']))
expected = DataFrame(
[x[1] for x in _d],
index=Index([x[0] for x in _d], tupleize_cols=False)).T
expected.index = Index(expected.index, tupleize_cols=False)
df = DataFrame(d)
df = df.reindex(columns=expected.columns, index=expected.index)
check(df, expected)
def test_constructor_dict_datetime64_index(self):
# GH 10160
dates_as_str = ['1984-02-19', '1988-11-06', '1989-12-03', '1990-03-15']
def create_data(constructor):
return {i: {constructor(s): 2 * i}
for i, s in enumerate(dates_as_str)}
data_datetime64 = create_data(np.datetime64)
data_datetime = create_data(lambda x: datetime.strptime(x, '%Y-%m-%d'))
data_Timestamp = create_data(Timestamp)
expected = DataFrame([{0: 0, 1: None, 2: None, 3: None},
{0: None, 1: 2, 2: None, 3: None},
{0: None, 1: None, 2: 4, 3: None},
{0: None, 1: None, 2: None, 3: 6}],
index=[Timestamp(dt) for dt in dates_as_str])
result_datetime64 = DataFrame(data_datetime64)
result_datetime = DataFrame(data_datetime)
result_Timestamp = DataFrame(data_Timestamp)
tm.assert_frame_equal(result_datetime64, expected)
tm.assert_frame_equal(result_datetime, expected)
tm.assert_frame_equal(result_Timestamp, expected)
def test_constructor_dict_timedelta64_index(self):
# GH 10160
td_as_int = [1, 2, 3, 4]
def create_data(constructor):
return {i: {constructor(s): 2 * i}
for i, s in enumerate(td_as_int)}
data_timedelta64 = create_data(lambda x: np.timedelta64(x, 'D'))
data_timedelta = create_data(lambda x: timedelta(days=x))
data_Timedelta = create_data(lambda x: Timedelta(x, 'D'))
expected = DataFrame([{0: 0, 1: None, 2: None, 3: None},
{0: None, 1: 2, 2: None, 3: None},
{0: None, 1: None, 2: 4, 3: None},
{0: None, 1: None, 2: None, 3: 6}],
index=[Timedelta(td, 'D') for td in td_as_int])
result_timedelta64 = DataFrame(data_timedelta64)
result_timedelta = DataFrame(data_timedelta)
result_Timedelta = DataFrame(data_Timedelta)
tm.assert_frame_equal(result_timedelta64, expected)
tm.assert_frame_equal(result_timedelta, expected)
tm.assert_frame_equal(result_Timedelta, expected)
def test_constructor_period(self):
# PeriodIndex
a = pd.PeriodIndex(['2012-01', 'NaT', '2012-04'], freq='M')
b = pd.PeriodIndex(['2012-02-01', '2012-03-01', 'NaT'], freq='D')
df = pd.DataFrame({'a': a, 'b': b})
assert df['a'].dtype == a.dtype
assert df['b'].dtype == b.dtype
# list of periods
df = pd.DataFrame({'a': a.astype(object).tolist(),
'b': b.astype(object).tolist()})
assert df['a'].dtype == a.dtype
assert df['b'].dtype == b.dtype
def test_nested_dict_frame_constructor(self):
rng = pd.period_range('1/1/2000', periods=5)
df = DataFrame(randn(10, 5), columns=rng)
data = {}
for col in df.columns:
for row in df.index:
with tm.assert_produces_warning(FutureWarning,
check_stacklevel=False):
data.setdefault(col, {})[row] = df.get_value(row, col)
result = DataFrame(data, columns=rng)
tm.assert_frame_equal(result, df)
data = {}
for col in df.columns:
for row in df.index:
with tm.assert_produces_warning(FutureWarning,
check_stacklevel=False):
data.setdefault(row, {})[col] = df.get_value(row, col)
result = DataFrame(data, index=rng).T
tm.assert_frame_equal(result, df)
def _check_basic_constructor(self, empty):
# mat: 2d matrix with shape (3, 2) to input. empty - makes sized
# objects
mat = empty((2, 3), dtype=float)
# 2-D input
frame = DataFrame(mat, columns=['A', 'B', 'C'], index=[1, 2])
assert len(frame.index) == 2
assert len(frame.columns) == 3
# 1-D input
frame = DataFrame(empty((3,)), columns=['A'], index=[1, 2, 3])
assert len(frame.index) == 3
assert len(frame.columns) == 1
# cast type
frame = DataFrame(mat, columns=['A', 'B', 'C'],
index=[1, 2], dtype=np.int64)
assert frame.values.dtype == np.int64
# wrong size axis labels
msg = r'Shape of passed values is \(3, 2\), indices imply \(3, 1\)'
with pytest.raises(ValueError, match=msg):
DataFrame(mat, columns=['A', 'B', 'C'], index=[1])
msg = r'Shape of passed values is \(3, 2\), indices imply \(2, 2\)'
with pytest.raises(ValueError, match=msg):
DataFrame(mat, columns=['A', 'B'], index=[1, 2])
# higher dim raise exception
with pytest.raises(ValueError, match='Must pass 2-d input'):
DataFrame(empty((3, 3, 3)), columns=['A', 'B', 'C'],
index=[1])
# automatic labeling
frame = DataFrame(mat)
tm.assert_index_equal(frame.index, pd.Index(lrange(2)))
tm.assert_index_equal(frame.columns, pd.Index(lrange(3)))
frame = DataFrame(mat, index=[1, 2])
tm.assert_index_equal(frame.columns, pd.Index(lrange(3)))
frame = DataFrame(mat, columns=['A', 'B', 'C'])
tm.assert_index_equal(frame.index, pd.Index(lrange(2)))
# 0-length axis
frame = DataFrame(empty((0, 3)))
assert len(frame.index) == 0
frame = DataFrame(empty((3, 0)))
assert len(frame.columns) == 0
def test_constructor_ndarray(self):
self._check_basic_constructor(np.ones)
frame = DataFrame(['foo', 'bar'], index=[0, 1], columns=['A'])
assert len(frame) == 2
def test_constructor_maskedarray(self):
self._check_basic_constructor(ma.masked_all)
# Check non-masked values
mat = ma.masked_all((2, 3), dtype=float)
mat[0, 0] = 1.0
mat[1, 2] = 2.0
frame = DataFrame(mat, columns=['A', 'B', 'C'], index=[1, 2])
assert 1.0 == frame['A'][1]
assert 2.0 == frame['C'][2]
# what is this even checking??
mat = ma.masked_all((2, 3), dtype=float)
frame = DataFrame(mat, columns=['A', 'B', 'C'], index=[1, 2])
assert np.all(~np.asarray(frame == frame))
def test_constructor_maskedarray_nonfloat(self):
# masked int promoted to float
mat = ma.masked_all((2, 3), dtype=int)
# 2-D input
frame = DataFrame(mat, columns=['A', 'B', 'C'], index=[1, 2])
assert len(frame.index) == 2
assert len(frame.columns) == 3
assert np.all(~np.asarray(frame == frame))
# cast type
frame = DataFrame(mat, columns=['A', 'B', 'C'],
index=[1, 2], dtype=np.float64)
assert frame.values.dtype == np.float64
# Check non-masked values
mat2 = ma.copy(mat)
mat2[0, 0] = 1
mat2[1, 2] = 2
frame = DataFrame(mat2, columns=['A', 'B', 'C'], index=[1, 2])
assert 1 == frame['A'][1]
assert 2 == frame['C'][2]
# masked np.datetime64 stays (use NaT as null)
mat = ma.masked_all((2, 3), dtype='M8[ns]')
# 2-D input
frame = DataFrame(mat, columns=['A', 'B', 'C'], index=[1, 2])
assert len(frame.index) == 2
assert len(frame.columns) == 3
assert isna(frame).values.all()
# cast type
frame = DataFrame(mat, columns=['A', 'B', 'C'],
index=[1, 2], dtype=np.int64)
assert frame.values.dtype == np.int64
# Check non-masked values
mat2 = ma.copy(mat)
mat2[0, 0] = 1
mat2[1, 2] = 2
frame = DataFrame(mat2, columns=['A', 'B', 'C'], index=[1, 2])
assert 1 == frame['A'].view('i8')[1]
assert 2 == frame['C'].view('i8')[2]
# masked bool promoted to object
mat = ma.masked_all((2, 3), dtype=bool)
# 2-D input
frame = DataFrame(mat, columns=['A', 'B', 'C'], index=[1, 2])
assert len(frame.index) == 2
assert len(frame.columns) == 3
assert np.all(~np.asarray(frame == frame))
# cast type
frame = DataFrame(mat, columns=['A', 'B', 'C'],
index=[1, 2], dtype=object)
assert frame.values.dtype == object
# Check non-masked values
mat2 = ma.copy(mat)
mat2[0, 0] = True
mat2[1, 2] = False
frame = DataFrame(mat2, columns=['A', 'B', 'C'], index=[1, 2])
assert frame['A'][1] is True
assert frame['C'][2] is False
def test_constructor_mrecarray(self):
# Ensure mrecarray produces frame identical to dict of masked arrays
# from GH3479
assert_fr_equal = functools.partial(tm.assert_frame_equal,
check_index_type=True,
check_column_type=True,
check_frame_type=True)
arrays = [
('float', np.array([1.5, 2.0])),
('int', np.array([1, 2])),
('str', np.array(['abc', 'def'])),
]
for name, arr in arrays[:]:
arrays.append(('masked1_' + name,
np.ma.masked_array(arr, mask=[False, True])))
arrays.append(('masked_all', np.ma.masked_all((2,))))
arrays.append(('masked_none',
np.ma.masked_array([1.0, 2.5], mask=False)))
# call assert_frame_equal for all selections of 3 arrays
for comb in itertools.combinations(arrays, 3):
names, data = zip(*comb)
mrecs = mrecords.fromarrays(data, names=names)
# fill the comb
comb = {k: (v.filled() if hasattr(v, 'filled') else v)
for k, v in comb}
expected = DataFrame(comb, columns=names)
result = DataFrame(mrecs)
assert_fr_equal(result, expected)
# specify columns
expected = DataFrame(comb, columns=names[::-1])
result = DataFrame(mrecs, columns=names[::-1])
assert_fr_equal(result, expected)
# specify index
expected = DataFrame(comb, columns=names, index=[1, 2])
result = DataFrame(mrecs, index=[1, 2])
assert_fr_equal(result, expected)
def test_constructor_corner_shape(self):
df = DataFrame(index=[])
assert df.values.shape == (0, 0)
@pytest.mark.parametrize("data, index, columns, dtype, expected", [
(None, lrange(10), ['a', 'b'], object, np.object_),
(None, None, ['a', 'b'], 'int64', np.dtype('int64')),
(None, lrange(10), ['a', 'b'], int, np.dtype('float64')),
({}, None, ['foo', 'bar'], None, np.object_),
({'b': 1}, lrange(10), list('abc'), int, np.dtype('float64'))
])
def test_constructor_dtype(self, data, index, columns, dtype, expected):
df = DataFrame(data, index, columns, dtype)
assert df.values.dtype == expected
def test_constructor_scalar_inference(self):
data = {'int': 1, 'bool': True,
'float': 3., 'complex': 4j, 'object': 'foo'}
df = DataFrame(data, index=np.arange(10))
assert df['int'].dtype == np.int64
assert df['bool'].dtype == np.bool_
assert df['float'].dtype == np.float64
assert df['complex'].dtype == np.complex128
assert df['object'].dtype == np.object_
def test_constructor_arrays_and_scalars(self):
df = DataFrame({'a': randn(10), 'b': True})
exp = DataFrame({'a': df['a'].values, 'b': [True] * 10})
tm.assert_frame_equal(df, exp)
with pytest.raises(ValueError, match='must pass an index'):
DataFrame({'a': False, 'b': True})
def test_constructor_DataFrame(self):
df = DataFrame(self.frame)
tm.assert_frame_equal(df, self.frame)
df_casted = DataFrame(self.frame, dtype=np.int64)
assert df_casted.values.dtype == np.int64
def test_constructor_more(self):
# used to be in test_matrix.py
arr = randn(10)
dm = DataFrame(arr, columns=['A'], index=np.arange(10))
assert dm.values.ndim == 2
arr = randn(0)
dm = DataFrame(arr)
assert dm.values.ndim == 2
assert dm.values.ndim == 2
# no data specified
dm = DataFrame(columns=['A', 'B'], index=np.arange(10))
assert dm.values.shape == (10, 2)
dm = DataFrame(columns=['A', 'B'])
assert dm.values.shape == (0, 2)
dm = DataFrame(index=np.arange(10))
assert dm.values.shape == (10, 0)
# can't cast
mat = np.array(['foo', 'bar'], dtype=object).reshape(2, 1)
with pytest.raises(ValueError, match='cast'):
DataFrame(mat, index=[0, 1], columns=[0], dtype=float)
dm = DataFrame(DataFrame(self.frame._series))
tm.assert_frame_equal(dm, self.frame)
# int cast
dm = DataFrame({'A': np.ones(10, dtype=int),
'B': np.ones(10, dtype=np.float64)},
index=np.arange(10))
assert len(dm.columns) == 2
assert dm.values.dtype == np.float64
def test_constructor_empty_list(self):
df = DataFrame([], index=[])
expected = DataFrame(index=[])
tm.assert_frame_equal(df, expected)
# GH 9939
df = DataFrame([], columns=['A', 'B'])
expected = DataFrame({}, columns=['A', 'B'])
tm.assert_frame_equal(df, expected)
# Empty generator: list(empty_gen()) == []
def empty_gen():
return
yield
df = DataFrame(empty_gen(), columns=['A', 'B'])
tm.assert_frame_equal(df, expected)
def test_constructor_list_of_lists(self):
# GH #484
df = DataFrame(data=[[1, 'a'], [2, 'b']], columns=["num", "str"])
assert is_integer_dtype(df['num'])
assert df['str'].dtype == np.object_
# GH 4851
# list of 0-dim ndarrays
expected = DataFrame({0: np.arange(10)})
data = [np.array(x) for x in range(10)]
result = DataFrame(data)
tm.assert_frame_equal(result, expected)
def test_constructor_sequence_like(self):
# GH 3783
# collections.Squence like
class DummyContainer(compat.Sequence):
def __init__(self, lst):
self._lst = lst
def __getitem__(self, n):
return self._lst.__getitem__(n)
def __len__(self, n):
return self._lst.__len__()
lst_containers = [DummyContainer([1, 'a']), DummyContainer([2, 'b'])]
columns = ["num", "str"]
result = DataFrame(lst_containers, columns=columns)
expected = DataFrame([[1, 'a'], [2, 'b']], columns=columns)
tm.assert_frame_equal(result, expected, check_dtype=False)
# GH 4297
# support Array
import array
result = DataFrame({'A': array.array('i', range(10))})
expected = DataFrame({'A': list(range(10))})
tm.assert_frame_equal(result, expected, check_dtype=False)
expected = DataFrame([list(range(10)), list(range(10))])
result = DataFrame([array.array('i', range(10)),
array.array('i', range(10))])
tm.assert_frame_equal(result, expected, check_dtype=False)
def test_constructor_iterable(self):
# GH 21987
class Iter():
def __iter__(self):
for i in range(10):
yield [1, 2, 3]
expected = DataFrame([[1, 2, 3]] * 10)
result = DataFrame(Iter())
tm.assert_frame_equal(result, expected)
def test_constructor_iterator(self):
expected = DataFrame([list(range(10)), list(range(10))])
result = DataFrame([range(10), range(10)])
tm.assert_frame_equal(result, expected)
def test_constructor_generator(self):
# related #2305
gen1 = (i for i in range(10))
gen2 = (i for i in range(10))
expected = DataFrame([list(range(10)), list(range(10))])
result = DataFrame([gen1, gen2])
tm.assert_frame_equal(result, expected)
gen = ([i, 'a'] for i in range(10))
result = DataFrame(gen)
expected = DataFrame({0: range(10), 1: 'a'})
tm.assert_frame_equal(result, expected, check_dtype=False)
def test_constructor_list_of_dicts(self):
data = [OrderedDict([['a', 1.5], ['b', 3], ['c', 4], ['d', 6]]),
OrderedDict([['a', 1.5], ['b', 3], ['d', 6]]),
OrderedDict([['a', 1.5], ['d', 6]]),
OrderedDict(),
OrderedDict([['a', 1.5], ['b', 3], ['c', 4]]),
OrderedDict([['b', 3], ['c', 4], ['d', 6]])]
result = DataFrame(data)
expected = DataFrame.from_dict(dict(zip(range(len(data)), data)),
orient='index')
tm.assert_frame_equal(result, expected.reindex(result.index))
result = DataFrame([{}])
expected = DataFrame(index=[0])
tm.assert_frame_equal(result, expected)
def test_constructor_ordered_dict_preserve_order(self):
# see gh-13304
expected = DataFrame([[2, 1]], columns=['b', 'a'])
data = OrderedDict()
data['b'] = [2]
data['a'] = [1]
result = DataFrame(data)
tm.assert_frame_equal(result, expected)
data = OrderedDict()
data['b'] = 2
data['a'] = 1
result = DataFrame([data])
tm.assert_frame_equal(result, expected)
def test_constructor_ordered_dict_conflicting_orders(self):
# the first dict element sets the ordering for the DataFrame,
# even if there are conflicting orders from subsequent ones
row_one = OrderedDict()
row_one['b'] = 2
row_one['a'] = 1
row_two = OrderedDict()
row_two['a'] = 1
row_two['b'] = 2
row_three = {'b': 2, 'a': 1}
expected = DataFrame([[2, 1], [2, 1]], columns=['b', 'a'])
result = DataFrame([row_one, row_two])
tm.assert_frame_equal(result, expected)
expected = DataFrame([[2, 1], [2, 1], [2, 1]], columns=['b', 'a'])
result = DataFrame([row_one, row_two, row_three])
tm.assert_frame_equal(result, expected)
def test_constructor_list_of_series(self):
data = [OrderedDict([['a', 1.5], ['b', 3.0], ['c', 4.0]]),
OrderedDict([['a', 1.5], ['b', 3.0], ['c', 6.0]])]
sdict = OrderedDict(zip(['x', 'y'], data))
idx = Index(['a', 'b', 'c'])
# all named
data2 = [Series([1.5, 3, 4], idx, dtype='O', name='x'),
Series([1.5, 3, 6], idx, name='y')]
result = DataFrame(data2)
expected = DataFrame.from_dict(sdict, orient='index')
tm.assert_frame_equal(result, expected)
# some unnamed
data2 = [Series([1.5, 3, 4], idx, dtype='O', name='x'),
Series([1.5, 3, 6], idx)]
result = DataFrame(data2)
sdict = OrderedDict(zip(['x', 'Unnamed 0'], data))
expected = DataFrame.from_dict(sdict, orient='index')
tm.assert_frame_equal(result.sort_index(), expected)
# none named
data = [OrderedDict([['a', 1.5], ['b', 3], ['c', 4], ['d', 6]]),
OrderedDict([['a', 1.5], ['b', 3], ['d', 6]]),
OrderedDict([['a', 1.5], ['d', 6]]),
OrderedDict(),
OrderedDict([['a', 1.5], ['b', 3], ['c', 4]]),
OrderedDict([['b', 3], ['c', 4], ['d', 6]])]
data = [Series(d) for d in data]
result = DataFrame(data)
sdict = OrderedDict(zip(range(len(data)), data))
expected = DataFrame.from_dict(sdict, orient='index')
tm.assert_frame_equal(result, expected.reindex(result.index))
result2 = DataFrame(data, index=np.arange(6))
tm.assert_frame_equal(result, result2)
result = DataFrame([Series({})])
expected = DataFrame(index=[0])
tm.assert_frame_equal(result, expected)
data = [OrderedDict([['a', 1.5], ['b', 3.0], ['c', 4.0]]),
OrderedDict([['a', 1.5], ['b', 3.0], ['c', 6.0]])]
sdict = OrderedDict(zip(range(len(data)), data))
idx = Index(['a', 'b', 'c'])
data2 = [Series([1.5, 3, 4], idx, dtype='O'),
Series([1.5, 3, 6], idx)]
result = DataFrame(data2)
expected = DataFrame.from_dict(sdict, orient='index')
tm.assert_frame_equal(result, expected)
def test_constructor_list_of_series_aligned_index(self):
series = [pd.Series(i, index=['b', 'a', 'c'], name=str(i))
for i in range(3)]
result = pd.DataFrame(series)
expected = pd.DataFrame({'b': [0, 1, 2],
'a': [0, 1, 2],
'c': [0, 1, 2]},
columns=['b', 'a', 'c'],
index=['0', '1', '2'])
tm.assert_frame_equal(result, expected)
def test_constructor_list_of_derived_dicts(self):
class CustomDict(dict):
pass
d = {'a': 1.5, 'b': 3}
data_custom = [CustomDict(d)]
data = [d]
result_custom = DataFrame(data_custom)
result = DataFrame(data)
tm.assert_frame_equal(result, result_custom)
def test_constructor_ragged(self):
data = {'A': randn(10),
'B': randn(8)}
with pytest.raises(ValueError, match='arrays must all be same length'):
DataFrame(data)
def test_constructor_scalar(self):
idx = Index(lrange(3))
df = DataFrame({"a": 0}, index=idx)
expected = DataFrame({"a": [0, 0, 0]}, index=idx)
tm.assert_frame_equal(df, expected, check_dtype=False)
def test_constructor_Series_copy_bug(self):
df = DataFrame(self.frame['A'], index=self.frame.index, columns=['A'])
df.copy()
def test_constructor_mixed_dict_and_Series(self):
data = {}
data['A'] = {'foo': 1, 'bar': 2, 'baz': 3}
data['B'] = Series([4, 3, 2, 1], index=['bar', 'qux', 'baz', 'foo'])
result = DataFrame(data)
assert result.index.is_monotonic
# ordering ambiguous, raise exception
with pytest.raises(ValueError, match='ambiguous ordering'):
DataFrame({'A': ['a', 'b'], 'B': {'a': 'a', 'b': 'b'}})
# this is OK though
result = DataFrame({'A': ['a', 'b'],
'B': Series(['a', 'b'], index=['a', 'b'])})
expected = DataFrame({'A': ['a', 'b'], 'B': ['a', 'b']},
index=['a', 'b'])
tm.assert_frame_equal(result, expected)
def test_constructor_tuples(self):
result = DataFrame({'A': [(1, 2), (3, 4)]})
expected = DataFrame({'A': Series([(1, 2), (3, 4)])})
tm.assert_frame_equal(result, expected)
def test_constructor_namedtuples(self):
# GH11181
from collections import namedtuple
named_tuple = namedtuple("Pandas", list('ab'))
tuples = [named_tuple(1, 3), named_tuple(2, 4)]
expected = DataFrame({'a': [1, 2], 'b': [3, 4]})
result = DataFrame(tuples)
tm.assert_frame_equal(result, expected)
# with columns
expected = DataFrame({'y': [1, 2], 'z': [3, 4]})
result = DataFrame(tuples, columns=['y', 'z'])
tm.assert_frame_equal(result, expected)
def test_constructor_orient(self):
data_dict = self.mixed_frame.T._series
recons = DataFrame.from_dict(data_dict, orient='index')
expected = self.mixed_frame.sort_index()
tm.assert_frame_equal(recons, expected)
# dict of sequence
a = {'hi': [32, 3, 3],
'there': [3, 5, 3]}
rs = DataFrame.from_dict(a, orient='index')
xp = DataFrame.from_dict(a).T.reindex(list(a.keys()))
tm.assert_frame_equal(rs, xp)
def test_from_dict_columns_parameter(self):
# GH 18529
# Test new columns parameter for from_dict that was added to make
# from_items(..., orient='index', columns=[...]) easier to replicate
result = DataFrame.from_dict(OrderedDict([('A', [1, 2]),
('B', [4, 5])]),
orient='index', columns=['one', 'two'])
expected = DataFrame([[1, 2], [4, 5]], index=['A', 'B'],
columns=['one', 'two'])
tm.assert_frame_equal(result, expected)
msg = "cannot use columns parameter with orient='columns'"
with pytest.raises(ValueError, match=msg):
DataFrame.from_dict(dict([('A', [1, 2]), ('B', [4, 5])]),
orient='columns', columns=['one', 'two'])
with pytest.raises(ValueError, match=msg):
DataFrame.from_dict(dict([('A', [1, 2]), ('B', [4, 5])]),
columns=['one', 'two'])
def test_constructor_Series_named(self):
a = Series([1, 2, 3], index=['a', 'b', 'c'], name='x')
df = DataFrame(a)
assert df.columns[0] == 'x'
tm.assert_index_equal(df.index, a.index)
# ndarray like
arr = np.random.randn(10)
s = Series(arr, name='x')
df = DataFrame(s)
expected = DataFrame(dict(x=s))
tm.assert_frame_equal(df, expected)
s = Series(arr, index=range(3, 13))
df = DataFrame(s)
expected = DataFrame({0: s})
tm.assert_frame_equal(df, expected)
pytest.raises(ValueError, DataFrame, s, columns=[1, 2])
# #2234
a = Series([], name='x')
df = DataFrame(a)
assert df.columns[0] == 'x'
# series with name and w/o
s1 = Series(arr, name='x')
df = DataFrame([s1, arr]).T
expected = DataFrame({'x': s1, 'Unnamed 0': arr},
columns=['x', 'Unnamed 0'])
tm.assert_frame_equal(df, expected)
# this is a bit non-intuitive here; the series collapse down to arrays
df = DataFrame([arr, s1]).T
expected = DataFrame({1: s1, 0: arr}, columns=[0, 1])
tm.assert_frame_equal(df, expected)
def test_constructor_Series_named_and_columns(self):
# GH 9232 validation
s0 = Series(range(5), name=0)
s1 = Series(range(5), name=1)
# matching name and column gives standard frame
tm.assert_frame_equal(pd.DataFrame(s0, columns=[0]),
s0.to_frame())
tm.assert_frame_equal(pd.DataFrame(s1, columns=[1]),
s1.to_frame())
# non-matching produces empty frame
assert pd.DataFrame(s0, columns=[1]).empty
assert pd.DataFrame(s1, columns=[0]).empty
def test_constructor_Series_differently_indexed(self):
# name
s1 = Series([1, 2, 3], index=['a', 'b', 'c'], name='x')
# no name
s2 = Series([1, 2, 3], index=['a', 'b', 'c'])
other_index = Index(['a', 'b'])
df1 = DataFrame(s1, index=other_index)
exp1 = DataFrame(s1.reindex(other_index))
assert df1.columns[0] == 'x'
tm.assert_frame_equal(df1, exp1)
df2 = DataFrame(s2, index=other_index)
exp2 = DataFrame(s2.reindex(other_index))
assert df2.columns[0] == 0
tm.assert_index_equal(df2.index, other_index)
tm.assert_frame_equal(df2, exp2)
def test_constructor_manager_resize(self):
index = list(self.frame.index[:5])
columns = list(self.frame.columns[:3])
result = DataFrame(self.frame._data, index=index,
columns=columns)
tm.assert_index_equal(result.index, Index(index))
tm.assert_index_equal(result.columns, Index(columns))
def test_constructor_from_items(self):
items = [(c, self.frame[c]) for c in self.frame.columns]
with tm.assert_produces_warning(FutureWarning,
check_stacklevel=False):
recons = DataFrame.from_items(items)
tm.assert_frame_equal(recons, self.frame)
# pass some columns
with tm.assert_produces_warning(FutureWarning,
check_stacklevel=False):
recons = DataFrame.from_items(items, columns=['C', 'B', 'A'])
tm.assert_frame_equal(recons, self.frame.loc[:, ['C', 'B', 'A']])
# orient='index'
row_items = [(idx, self.mixed_frame.xs(idx))
for idx in self.mixed_frame.index]
with tm.assert_produces_warning(FutureWarning,
check_stacklevel=False):
recons = DataFrame.from_items(row_items,
columns=self.mixed_frame.columns,
orient='index')
tm.assert_frame_equal(recons, self.mixed_frame)
assert recons['A'].dtype == np.float64
msg = "Must pass columns with orient='index'"
with pytest.raises(TypeError, match=msg):
with tm.assert_produces_warning(FutureWarning,
check_stacklevel=False):
DataFrame.from_items(row_items, orient='index')
# orient='index', but thar be tuples
arr = construct_1d_object_array_from_listlike(
[('bar', 'baz')] * len(self.mixed_frame))
self.mixed_frame['foo'] = arr
row_items = [(idx, list(self.mixed_frame.xs(idx)))
for idx in self.mixed_frame.index]
with tm.assert_produces_warning(FutureWarning,
check_stacklevel=False):
recons = DataFrame.from_items(row_items,
columns=self.mixed_frame.columns,
orient='index')
tm.assert_frame_equal(recons, self.mixed_frame)
assert isinstance(recons['foo'][0], tuple)
with tm.assert_produces_warning(FutureWarning,
check_stacklevel=False):
rs = DataFrame.from_items([('A', [1, 2, 3]), ('B', [4, 5, 6])],
orient='index',
columns=['one', 'two', 'three'])
xp = DataFrame([[1, 2, 3], [4, 5, 6]], index=['A', 'B'],
columns=['one', 'two', 'three'])
tm.assert_frame_equal(rs, xp)
def test_constructor_from_items_scalars(self):
# GH 17312
msg = (r'The value in each \(key, value\) '
'pair must be an array, Series, or dict')
with pytest.raises(ValueError, match=msg):
with tm.assert_produces_warning(FutureWarning,
check_stacklevel=False):
DataFrame.from_items([('A', 1), ('B', 4)])
msg = (r'The value in each \(key, value\) '
'pair must be an array, Series, or dict')
with pytest.raises(ValueError, match=msg):
with tm.assert_produces_warning(FutureWarning,
check_stacklevel=False):
DataFrame.from_items([('A', 1), ('B', 2)], columns=['col1'],
orient='index')
def test_from_items_deprecation(self):
# GH 17320
with tm.assert_produces_warning(FutureWarning,
check_stacklevel=False):
DataFrame.from_items([('A', [1, 2, 3]), ('B', [4, 5, 6])])
with tm.assert_produces_warning(FutureWarning,
check_stacklevel=False):
DataFrame.from_items([('A', [1, 2, 3]), ('B', [4, 5, 6])],
columns=['col1', 'col2', 'col3'],
orient='index')
def test_constructor_mix_series_nonseries(self):
df = DataFrame({'A': self.frame['A'],
'B': list(self.frame['B'])}, columns=['A', 'B'])
tm.assert_frame_equal(df, self.frame.loc[:, ['A', 'B']])
msg = 'does not match index length'
with pytest.raises(ValueError, match=msg):
DataFrame({'A': self.frame['A'], 'B': list(self.frame['B'])[:-2]})
def test_constructor_miscast_na_int_dtype(self):
df = DataFrame([[np.nan, 1], [1, 0]], dtype=np.int64)
expected = DataFrame([[np.nan, 1], [1, 0]])
tm.assert_frame_equal(df, expected)
def test_constructor_column_duplicates(self):
# it works! #2079
df = DataFrame([[8, 5]], columns=['a', 'a'])
edf = DataFrame([[8, 5]])
edf.columns = ['a', 'a']
tm.assert_frame_equal(df, edf)
idf = DataFrame.from_records([(8, 5)],
columns=['a', 'a'])
tm.assert_frame_equal(idf, edf)
pytest.raises(ValueError, DataFrame.from_dict,
OrderedDict([('b', 8), ('a', 5), ('a', 6)]))
def test_constructor_empty_with_string_dtype(self):
# GH 9428
expected = DataFrame(index=[0, 1], columns=[0, 1], dtype=object)
df = DataFrame(index=[0, 1], columns=[0, 1], dtype=str)
tm.assert_frame_equal(df, expected)
df = DataFrame(index=[0, 1], columns=[0, 1], dtype=np.str_)
tm.assert_frame_equal(df, expected)
df = DataFrame(index=[0, 1], columns=[0, 1], dtype=np.unicode_)
tm.assert_frame_equal(df, expected)
df = DataFrame(index=[0, 1], columns=[0, 1], dtype='U5')
tm.assert_frame_equal(df, expected)
def test_constructor_single_value(self):
# expecting single value upcasting here
df = DataFrame(0., index=[1, 2, 3], columns=['a', 'b', 'c'])
tm.assert_frame_equal(df,
DataFrame(np.zeros(df.shape).astype('float64'),
df.index, df.columns))
df = DataFrame(0, index=[1, 2, 3], columns=['a', 'b', 'c'])
tm.assert_frame_equal(df, DataFrame(np.zeros(df.shape).astype('int64'),
df.index, df.columns))
df = DataFrame('a', index=[1, 2], columns=['a', 'c'])
tm.assert_frame_equal(df, DataFrame(np.array([['a', 'a'], ['a', 'a']],
dtype=object),
index=[1, 2], columns=['a', 'c']))
pytest.raises(ValueError, DataFrame, 'a', [1, 2])
pytest.raises(ValueError, DataFrame, 'a', columns=['a', 'c'])
msg = 'incompatible data and dtype'
with pytest.raises(TypeError, match=msg):
DataFrame('a', [1, 2], ['a', 'c'], float)
def test_constructor_with_datetimes(self):
intname = np.dtype(np.int_).name
floatname = np.dtype(np.float_).name
datetime64name = np.dtype('M8[ns]').name
objectname = np.dtype(np.object_).name
# single item
df = DataFrame({'A': 1, 'B': 'foo', 'C': 'bar',
'D': Timestamp("20010101"),
'E': datetime(2001, 1, 2, 0, 0)},
index=np.arange(10))
result = df.get_dtype_counts()
expected = Series({'int64': 1, datetime64name: 2, objectname: 2})
result.sort_index()
expected.sort_index()
tm.assert_series_equal(result, expected)
# check with ndarray construction ndim==0 (e.g. we are passing a ndim 0
# ndarray with a dtype specified)
df = DataFrame({'a': 1., 'b': 2, 'c': 'foo',
floatname: np.array(1., dtype=floatname),
intname: np.array(1, dtype=intname)},
index=np.arange(10))
result = df.get_dtype_counts()
expected = {objectname: 1}
if intname == 'int64':
expected['int64'] = 2
else:
expected['int64'] = 1
expected[intname] = 1
if floatname == 'float64':
expected['float64'] = 2
else:
expected['float64'] = 1
expected[floatname] = 1
result = result.sort_index()
expected = Series(expected).sort_index()
tm.assert_series_equal(result, expected)
# check with ndarray construction ndim>0
df = DataFrame({'a': 1., 'b': 2, 'c': 'foo',
floatname: np.array([1.] * 10, dtype=floatname),
intname: np.array([1] * 10, dtype=intname)},
index=np.arange(10))
result = df.get_dtype_counts()
result = result.sort_index()
tm.assert_series_equal(result, expected)
# GH 2809
ind = date_range(start="2000-01-01", freq="D", periods=10)
datetimes = [ts.to_pydatetime() for ts in ind]
datetime_s = Series(datetimes)
assert datetime_s.dtype == 'M8[ns]'
df = DataFrame({'datetime_s': datetime_s})
result = df.get_dtype_counts()
expected = Series({datetime64name: 1})
result = result.sort_index()
expected = expected.sort_index()
tm.assert_series_equal(result, expected)
# GH 2810
ind = date_range(start="2000-01-01", freq="D", periods=10)
datetimes = [ts.to_pydatetime() for ts in ind]
dates = [ts.date() for ts in ind]
df = DataFrame({'datetimes': datetimes, 'dates': dates})
result = df.get_dtype_counts()
expected = Series({datetime64name: 1, objectname: 1})
result = result.sort_index()
expected = expected.sort_index()
tm.assert_series_equal(result, expected)
# GH 7594
# don't coerce tz-aware
import pytz
tz = pytz.timezone('US/Eastern')
dt = tz.localize(datetime(2012, 1, 1))
df = DataFrame({'End Date': dt}, index=[0])
assert df.iat[0, 0] == dt
tm.assert_series_equal(df.dtypes, Series(
{'End Date': 'datetime64[ns, US/Eastern]'}))
df = DataFrame([{'End Date': dt}])
assert df.iat[0, 0] == dt
tm.assert_series_equal(df.dtypes, Series(
{'End Date': 'datetime64[ns, US/Eastern]'}))
# tz-aware (UTC and other tz's)
# GH 8411
dr = date_range('20130101', periods=3)
df = DataFrame({'value': dr})
assert df.iat[0, 0].tz is None
dr = date_range('20130101', periods=3, tz='UTC')
df = DataFrame({'value': dr})
assert str(df.iat[0, 0].tz) == 'UTC'
dr = date_range('20130101', periods=3, tz='US/Eastern')
df = DataFrame({'value': dr})
assert str(df.iat[0, 0].tz) == 'US/Eastern'
# GH 7822
# preserver an index with a tz on dict construction
i = date_range('1/1/2011', periods=5, freq='10s', tz='US/Eastern')
expected = DataFrame(
{'a': i.to_series(keep_tz=True).reset_index(drop=True)})
df = DataFrame()
df['a'] = i
tm.assert_frame_equal(df, expected)
df = DataFrame({'a': i})
tm.assert_frame_equal(df, expected)
# multiples
i_no_tz = date_range('1/1/2011', periods=5, freq='10s')
df = DataFrame({'a': i, 'b': i_no_tz})
expected = DataFrame({'a': i.to_series(keep_tz=True)
.reset_index(drop=True), 'b': i_no_tz})
tm.assert_frame_equal(df, expected)
def test_constructor_datetimes_with_nulls(self):
# gh-15869
for arr in [np.array([None, None, None, None,
datetime.now(), None]),
np.array([None, None, datetime.now(), None])]:
result = DataFrame(arr).get_dtype_counts()
expected = Series({'datetime64[ns]': 1})
tm.assert_series_equal(result, expected)
def test_constructor_for_list_with_dtypes(self):
# TODO(wesm): unused
intname = np.dtype(np.int_).name # noqa
floatname = np.dtype(np.float_).name # noqa
datetime64name = np.dtype('M8[ns]').name
objectname = np.dtype(np.object_).name
# test list of lists/ndarrays
df = DataFrame([np.arange(5) for x in range(5)])
result = df.get_dtype_counts()
expected = Series({'int64': 5})
df = DataFrame([np.array(np.arange(5), dtype='int32')
for x in range(5)])
result = df.get_dtype_counts()
expected = Series({'int32': 5})
# overflow issue? (we always expecte int64 upcasting here)
df = DataFrame({'a': [2 ** 31, 2 ** 31 + 1]})
result = df.get_dtype_counts()
expected = Series({'int64': 1})
tm.assert_series_equal(result, expected)
# GH #2751 (construction with no index specified), make sure we cast to
# platform values
df = DataFrame([1, 2])
result = df.get_dtype_counts()
expected = Series({'int64': 1})
tm.assert_series_equal(result, expected)
df = DataFrame([1., 2.])
result = df.get_dtype_counts()
expected = Series({'float64': 1})
tm.assert_series_equal(result, expected)
df = DataFrame({'a': [1, 2]})
result = df.get_dtype_counts()
expected = Series({'int64': 1})
tm.assert_series_equal(result, expected)
df = DataFrame({'a': [1., 2.]})
result = df.get_dtype_counts()
expected = Series({'float64': 1})
tm.assert_series_equal(result, expected)
df = DataFrame({'a': 1}, index=lrange(3))
result = df.get_dtype_counts()
expected = Series({'int64': 1})
tm.assert_series_equal(result, expected)
df = DataFrame({'a': 1.}, index=lrange(3))
result = df.get_dtype_counts()
expected = Series({'float64': 1})
tm.assert_series_equal(result, expected)
# with object list
df = DataFrame({'a': [1, 2, 4, 7], 'b': [1.2, 2.3, 5.1, 6.3],
'c': list('abcd'),
'd': [datetime(2000, 1, 1) for i in range(4)],
'e': [1., 2, 4., 7]})
result = df.get_dtype_counts()
expected = Series(
{'int64': 1, 'float64': 2, datetime64name: 1, objectname: 1})
result = result.sort_index()
expected = expected.sort_index()
tm.assert_series_equal(result, expected)
def test_constructor_frame_copy(self):
cop = DataFrame(self.frame, copy=True)
cop['A'] = 5
assert (cop['A'] == 5).all()
assert not (self.frame['A'] == 5).all()
def test_constructor_ndarray_copy(self):
df = DataFrame(self.frame.values)
self.frame.values[5] = 5
assert (df.values[5] == 5).all()
df = DataFrame(self.frame.values, copy=True)
self.frame.values[6] = 6
assert not (df.values[6] == 6).all()
def test_constructor_series_copy(self):
series = self.frame._series
df = DataFrame({'A': series['A']})
df['A'][:] = 5
assert not (series['A'] == 5).all()
def test_constructor_with_nas(self):
# GH 5016
# na's in indices
def check(df):
for i in range(len(df.columns)):
df.iloc[:, i]
indexer = np.arange(len(df.columns))[isna(df.columns)]
# No NaN found -> error
if len(indexer) == 0:
def f():
df.loc[:, np.nan]
pytest.raises(TypeError, f)
# single nan should result in Series
elif len(indexer) == 1:
tm.assert_series_equal(df.iloc[:, indexer[0]],
df.loc[:, np.nan])
# multiple nans should result in DataFrame
else:
tm.assert_frame_equal(df.iloc[:, indexer],
df.loc[:, np.nan])
df = DataFrame([[1, 2, 3], [4, 5, 6]], index=[1, np.nan])
check(df)
df = DataFrame([[1, 2, 3], [4, 5, 6]], columns=[1.1, 2.2, np.nan])
check(df)
df = DataFrame([[0, 1, 2, 3], [4, 5, 6, 7]],
columns=[np.nan, 1.1, 2.2, np.nan])
check(df)
df = DataFrame([[0.0, 1, 2, 3.0], [4, 5, 6, 7]],
columns=[np.nan, 1.1, 2.2, np.nan])
check(df)
# GH 21428 (non-unique columns)
df = DataFrame([[0.0, 1, 2, 3.0], [4, 5, 6, 7]],
columns=[np.nan, 1, 2, 2])
check(df)
def test_constructor_lists_to_object_dtype(self):
# from #1074
d = DataFrame({'a': [np.nan, False]})
assert d['a'].dtype == np.object_
assert not d['a'][1]
def test_constructor_categorical(self):
# GH8626
# dict creation
df = DataFrame({'A': list('abc')}, dtype='category')
expected = Series(list('abc'), dtype='category', name='A')
tm.assert_series_equal(df['A'], expected)
# to_frame
s = Series(list('abc'), dtype='category')
result = s.to_frame()
expected = Series(list('abc'), dtype='category', name=0)
tm.assert_series_equal(result[0], expected)
result = s.to_frame(name='foo')
expected = Series(list('abc'), dtype='category', name='foo')
tm.assert_series_equal(result['foo'], expected)
# list-like creation
df = DataFrame(list('abc'), dtype='category')
expected = Series(list('abc'), dtype='category', name=0)
tm.assert_series_equal(df[0], expected)
# ndim != 1
df = DataFrame([Categorical(list('abc'))])
expected = DataFrame({0: Series(list('abc'), dtype='category')})
tm.assert_frame_equal(df, expected)
df = DataFrame([Categorical(list('abc')), Categorical(list('abd'))])
expected = DataFrame({0: Series(list('abc'), dtype='category'),
1: Series(list('abd'), dtype='category')},
columns=[0, 1])
tm.assert_frame_equal(df, expected)
# mixed
df = DataFrame([Categorical(list('abc')), list('def')])
expected = DataFrame({0: Series(list('abc'), dtype='category'),
1: list('def')}, columns=[0, 1])
tm.assert_frame_equal(df, expected)
# invalid (shape)
pytest.raises(ValueError,
lambda: DataFrame([Categorical(list('abc')),
Categorical(list('abdefg'))]))
# ndim > 1
pytest.raises(NotImplementedError,
lambda: Categorical(np.array([list('abcd')])))
def test_constructor_categorical_series(self):
items = [1, 2, 3, 1]
exp = Series(items).astype('category')
res = Series(items, dtype='category')
tm.assert_series_equal(res, exp)
items = ["a", "b", "c", "a"]
exp = Series(items).astype('category')
res = Series(items, dtype='category')
tm.assert_series_equal(res, exp)
# insert into frame with different index
# GH 8076
index = date_range('20000101', periods=3)
expected = Series(Categorical(values=[np.nan, np.nan, np.nan],
categories=['a', 'b', 'c']))
expected.index = index
expected = DataFrame({'x': expected})
df = DataFrame(
{'x': Series(['a', 'b', 'c'], dtype='category')}, index=index)
tm.assert_frame_equal(df, expected)
def test_from_records_to_records(self):
# from numpy documentation
arr = np.zeros((2,), dtype=('i4,f4,a10'))
arr[:] = [(1, 2., 'Hello'), (2, 3., "World")]
# TODO(wesm): unused
frame = DataFrame.from_records(arr) # noqa
index = pd.Index(np.arange(len(arr))[::-1])
indexed_frame = DataFrame.from_records(arr, index=index)
tm.assert_index_equal(indexed_frame.index, index)
# without names, it should go to last ditch
arr2 = np.zeros((2, 3))
tm.assert_frame_equal(DataFrame.from_records(arr2), DataFrame(arr2))
# wrong length
msg = r'Shape of passed values is \(3, 2\), indices imply \(3, 1\)'
with pytest.raises(ValueError, match=msg):
DataFrame.from_records(arr, index=index[:-1])
indexed_frame = DataFrame.from_records(arr, index='f1')
# what to do?
records = indexed_frame.to_records()
assert len(records.dtype.names) == 3
records = indexed_frame.to_records(index=False)
assert len(records.dtype.names) == 2
assert 'index' not in records.dtype.names
def test_from_records_nones(self):
tuples = [(1, 2, None, 3),
(1, 2, None, 3),
(None, 2, 5, 3)]
df = DataFrame.from_records(tuples, columns=['a', 'b', 'c', 'd'])
assert np.isnan(df['c'][0])
def test_from_records_iterator(self):
arr = np.array([(1.0, 1.0, 2, 2), (3.0, 3.0, 4, 4), (5., 5., 6, 6),
(7., 7., 8, 8)],
dtype=[('x', np.float64), ('u', np.float32),
('y', np.int64), ('z', np.int32)])
df = DataFrame.from_records(iter(arr), nrows=2)
xp = DataFrame({'x': np.array([1.0, 3.0], dtype=np.float64),
'u': np.array([1.0, 3.0], dtype=np.float32),
'y': np.array([2, 4], dtype=np.int64),
'z': np.array([2, 4], dtype=np.int32)})
tm.assert_frame_equal(df.reindex_like(xp), xp)
# no dtypes specified here, so just compare with the default
arr = [(1.0, 2), (3.0, 4), (5., 6), (7., 8)]
df = DataFrame.from_records(iter(arr), columns=['x', 'y'],
nrows=2)
tm.assert_frame_equal(df, xp.reindex(columns=['x', 'y']),
check_dtype=False)
def test_from_records_tuples_generator(self):
def tuple_generator(length):
for i in range(length):
letters = 'ABCDEFGHIJKLMNOPQRSTUVWXYZ'
yield (i, letters[i % len(letters)], i / length)
columns_names = ['Integer', 'String', 'Float']
columns = [[i[j] for i in tuple_generator(
10)] for j in range(len(columns_names))]
data = {'Integer': columns[0],
'String': columns[1], 'Float': columns[2]}
expected = DataFrame(data, columns=columns_names)
generator = tuple_generator(10)
result = DataFrame.from_records(generator, columns=columns_names)
tm.assert_frame_equal(result, expected)
def test_from_records_lists_generator(self):
def list_generator(length):
for i in range(length):
letters = 'ABCDEFGHIJKLMNOPQRSTUVWXYZ'
yield [i, letters[i % len(letters)], i / length]
columns_names = ['Integer', 'String', 'Float']
columns = [[i[j] for i in list_generator(
10)] for j in range(len(columns_names))]
data = {'Integer': columns[0],
'String': columns[1], 'Float': columns[2]}
expected = DataFrame(data, columns=columns_names)
generator = list_generator(10)
result = DataFrame.from_records(generator, columns=columns_names)
tm.assert_frame_equal(result, expected)
def test_from_records_columns_not_modified(self):
tuples = [(1, 2, 3),
(1, 2, 3),
(2, 5, 3)]
columns = ['a', 'b', 'c']
original_columns = list(columns)
df = DataFrame.from_records(tuples, columns=columns, index='a') # noqa
assert columns == original_columns
def test_from_records_decimal(self):
from decimal import Decimal
tuples = [(Decimal('1.5'),), (Decimal('2.5'),), (None,)]
df = DataFrame.from_records(tuples, columns=['a'])
assert df['a'].dtype == object
df = DataFrame.from_records(tuples, columns=['a'], coerce_float=True)
assert df['a'].dtype == np.float64
assert np.isnan(df['a'].values[-1])
def test_from_records_duplicates(self):
result = DataFrame.from_records([(1, 2, 3), (4, 5, 6)],
columns=['a', 'b', 'a'])
expected = DataFrame([(1, 2, 3), (4, 5, 6)],
columns=['a', 'b', 'a'])
tm.assert_frame_equal(result, expected)
def test_from_records_set_index_name(self):
def create_dict(order_id):
return {'order_id': order_id, 'quantity': np.random.randint(1, 10),
'price': np.random.randint(1, 10)}
documents = [create_dict(i) for i in range(10)]
# demo missing data
documents.append({'order_id': 10, 'quantity': 5})
result = DataFrame.from_records(documents, index='order_id')
assert result.index.name == 'order_id'
# MultiIndex
result = DataFrame.from_records(documents,
index=['order_id', 'quantity'])
assert result.index.names == ('order_id', 'quantity')
def test_from_records_misc_brokenness(self):
# #2179
data = {1: ['foo'], 2: ['bar']}
result = DataFrame.from_records(data, columns=['a', 'b'])
exp = DataFrame(data, columns=['a', 'b'])
tm.assert_frame_equal(result, exp)
# overlap in index/index_names
data = {'a': [1, 2, 3], 'b': [4, 5, 6]}
result = DataFrame.from_records(data, index=['a', 'b', 'c'])
exp = DataFrame(data, index=['a', 'b', 'c'])
tm.assert_frame_equal(result, exp)
# GH 2623
rows = []
rows.append([datetime(2010, 1, 1), 1])
rows.append([datetime(2010, 1, 2), 'hi']) # test col upconverts to obj
df2_obj = DataFrame.from_records(rows, columns=['date', 'test'])
results = df2_obj.get_dtype_counts()
expected = Series({'datetime64[ns]': 1, 'object': 1})
rows = []
rows.append([datetime(2010, 1, 1), 1])
rows.append([datetime(2010, 1, 2), 1])
df2_obj = DataFrame.from_records(rows, columns=['date', 'test'])
results = df2_obj.get_dtype_counts().sort_index()
expected = Series({'datetime64[ns]': 1, 'int64': 1})
tm.assert_series_equal(results, expected)
def test_from_records_empty(self):
# 3562
result = DataFrame.from_records([], columns=['a', 'b', 'c'])
expected = DataFrame(columns=['a', 'b', 'c'])
tm.assert_frame_equal(result, expected)
result = DataFrame.from_records([], columns=['a', 'b', 'b'])
expected = DataFrame(columns=['a', 'b', 'b'])
| tm.assert_frame_equal(result, expected) | pandas.util.testing.assert_frame_equal |
"""DataFrameToMatrix: Class that converts a DataFrame to a Numpy Matrix (ndarray)"""
# Third Party
import pandas as pd
import numpy as np
class DataFrameToMatrix():
"""DataFrameToMatrix: Class that converts a DataFrame to a Numpy Matrix (ndarray)
Notes:
fit_transform: Does a fit and a transform and returns the transformed matrix
transform: Based on previous fit parameters returns the transformed matrix
More Info: https://supercowpowers.github.io/zat/dataframe_to_matrix.html
# Nullable integer arrays are currently not handled by Numpy
# Cast Nullable integer arrays to float32
null_int_types = [pd.UInt16Dtype, pd.UInt32Dtype, pd.UInt64Dtype, pd.Int64Dtype]
for col in _internal_df:
if type(_internal_df[col].dtype) in null_int_types:
_internal_df[col] = _internal_df[col].astype(np.float32)
"""
def __init__(self):
"""Initialize the DataFrameToMatrix class"""
self.column_names = None
self.norm_map = {}
self.dtype_map = {}
self.nan_replace = -999
def fit_transform(self, input_df, normalize=True, nan_replace=-999, copy=True):
"""Convert the dataframe to a matrix (numpy ndarray)
Args:
input_df (dataframe): The dataframe to convert
normalize (bool): Boolean flag to normalize numeric columns (default=True)
"""
self.nan_replace = nan_replace
# Copy the dataframe (if wanted)
_internal_df = input_df.copy() if copy else input_df
# Convert object columns to categorical
self.object_to_categorical(_internal_df)
# Convert categorical NaNs
self.fit_category_nans(_internal_df)
# Lock categories to specific values (important for train/predict consistency)
self.lock_categorical(_internal_df)
# Sanity Check
self.sanity_check_categorical(_internal_df)
# Normalize numeric columns (mean normalize, sometimes called 'standardizing')
if normalize:
self.normalize_numeric(_internal_df)
# Remove any numerical NaNs (categorical NaNs were removed above)
for column in _internal_df.select_dtypes(include='number').columns:
_internal_df[column].fillna(self.nan_replace, inplace=True)
# Drop any columns that aren't numeric or categorical
for column in list(_internal_df.select_dtypes(exclude=['number', 'category']).columns):
print('Dropping {:s} column...'.format(column))
_internal_df = _internal_df.select_dtypes(include=['number', 'category'])
# Capture all the column/dtype information from the dataframe
self.column_names = _internal_df.columns.to_list()
for column in _internal_df.columns:
self.dtype_map[column] = _internal_df[column].dtype
# Now with every thing setup, call the dummy_encoder, convert to ndarray and return
return pd.get_dummies(_internal_df).to_numpy(dtype=np.float32)
def transform(self, input_df, copy=True):
"""Convert the dataframe to a matrix (numpy ndarray)
Args:
input_df (dataframe): The dataframe to convert
"""
# Make sure we have the same columns in the right order
# Copy the dataframe (if wanted)
_internal_df = input_df[self.column_names].copy() if copy else input_df[self.column_names]
# Convert all columns to the proper dtypes
for column, dtype in self.dtype_map.items():
_internal_df[column] = _internal_df[column].astype(dtype)
# Convert any categorical NaNs to a 'NaN' category
self.transform_category_nans(_internal_df)
# Normalize any numeric columns
for column, (smin, smax) in self.norm_map.items():
print('Normalizing column {:s}...'.format(column))
_internal_df[column] = (_internal_df[column] - smin) / (smax - smin)
# Remove any numerical NaNs (categorical NaNs were removed above)
for column in _internal_df.select_dtypes(include='number').columns:
_internal_df[column].fillna(self.nan_replace, inplace=True)
# Now with every thing setup, call the dummy_encoder, convert to ndarray and return
return pd.get_dummies(_internal_df).to_numpy(dtype=np.float32)
@staticmethod
def fit_category_nans(df):
"""ONLY FIT: Convert np.NaNs to a category 'NaN'"""
for column in df.select_dtypes(include=['category']).columns:
if df[column].isnull().any():
df[column].cat.add_categories('NaN', inplace=True)
df[column].fillna('NaN', inplace=True)
@staticmethod
def transform_category_nans(df):
"""ONLY TRANSFORM: Convert np.NaNs to a category 'NaN'"""
for column in df.select_dtypes(include=['category']).columns:
if 'NaN' in df[column].cat.categories:
df[column].fillna('NaN', inplace=True)
@staticmethod
def object_to_categorical(df):
"""Run a heuristic on the object columns to determine whether it contains categorical values
if the heuristic decides it's categorical then the type of the column is changed
Args:
df (dataframe): The dataframe to check for categorical data
Returns:
None but the dataframe columns are modified
"""
# Loop through each column that might be converable to categorical
for column in df.select_dtypes(include='object').columns:
# If we don't have too many unique values convert the column
if df[column].nunique() < 100:
print('Changing column {:s} to category...'.format(column))
df[column] = pd.Categorical(df[column])
@staticmethod
def lock_categorical(df):
"""Lock the categorical column types to a specific ordered list of categories
Args:
df (dataframe): The dataframe to lock categorical columns
Returns:
None but note that the dataframe is modified to 'lock' the categorical columns
"""
for column in df.select_dtypes(include='category').columns:
df[column] = pd.Categorical(df[column], categories=sorted(df[column].unique().tolist()))
@staticmethod
def sanity_check_categorical(df):
"""Sanity check for 'dimensionality explosion' on categorical types
Args:
df (dataframe): The dataframe to check the categorical columns
Returns:
None
"""
for column in df.select_dtypes(include='category').columns:
# Give warning on category types will LOTs of values
num_unique = df[column].nunique()
if num_unique > 20:
print('WARNING: {:s} will expand into {:d} dimensions...'.format(column, num_unique))
def normalize_numeric(self, df):
"""Normalize (mean normalize) the numeric columns in the dataframe
Args:
df (dataframe): The dataframe to normalize
Returns:
None but note that the numeric columns of the dataframe are modified
"""
for column in df.select_dtypes(include='number').columns:
print('Normalizing column {:s}...'.format(column))
df[column] = self._normalize_series(df[column])
def _normalize_series(self, series):
smin = series.min()
smax = series.max()
# Check for div by 0
if smax - smin == 0:
print('Cannot normalize series (div by 0) so not normalizing...')
return series
# Capture the normalization info and return the normalize series
self.norm_map[series.name] = (smin, smax)
return (series - smin) / (smax - smin)
# Simple test of the functionality
def test():
"""Test for DataFrameToMatrix Class"""
import os
import pickle
from tempfile import NamedTemporaryFile
import numpy.testing as np_test_utils
| pd.set_option('display.width', 1000) | pandas.set_option |
import numpy as np
import pandas as pd
import os
DATADIR='/tigress/BEE/penn-covidsub/'
FEATURE_DICT = {'ALBUMIN': ['ALBUMIN (CCH)', 'C ALBUMIN'],
'CREATININE': ['C CREATININE', 'CREATININE (CCH)', 'ISTAT CREATININE'],
'URINE': ['URINE TOTAL VOLUME'],
'LACTIC ACID': ['LACTIC ACID'],
'PO2 ART': [ 'PO2 ART', 'APO2 POC', 'C PO2 ARTERIAL'],
'FIO2': ['C FIO2 ARTERIAL','FIO2 POC', 'C %FIO2','%FIO2'],
'ANION GAP': ['ANION GAP'],
'DDIMER': ['C D-DIMER'],
'AO2CT': ['AO2CT'],
'CO2': ['APCO2 POC'],
'CARBOXYHEMOGLOBIN': ['CARBOXYHEMOGLOBIN'],
'METHEMOGLOBIN': ['METHEMOGLOBIN', 'C METHEMOGLOBIN'],
'CHLORIDE': ['C CHLORIDE', 'C CHLORIDE ART', 'CHLORIDE'],
'INR': ['C INR (INTERNATIONAL NORMALIZED RATIO)'],
'PH': ['PH ART', 'A PH POC', 'C PH ARTERIAL', 'ARTERIAL PH (CCH)'],
'HEMOGLOBIN': [ 'C HEMOGLOBIN'],
'POTASSIUM': ['C POTASSIUM', 'C POTASSIUM ART'],
'SODIUM': ['C SODIUM', 'C SODIUM ART', 'C SODIUM (ABG)', 'SODIUM (CCH)'],
'PLATELETS': ['C PLATELETS', 'PLATELET CNT (CCH)'],
'LACTATE': ['C LACTATE DEHYDROGENAS', 'C LACTATE POC', 'LACTATE (CCH)'],
'BILIRUBIN': ['C BILIRUBIN, TOTAL'],
}
def load_feature(
feature,
n_pts_per_attribute=30,
model_type='gpr',
min_trajectory_len=25,
max_trajectory_len=80,
train_test_split=0.75,
cohort_name='COVID - Cohort v2.csv',
):
cohort = pd.read_csv(DATADIR + cohort_name)
# Dictionary of pt_id to race
id_to_race_dict = dict()
id_to_icu_dict = dict()
id_to_vent_dict = dict()
id_to_sex_dict = dict()
for index, row in cohort.iterrows():
id_to_race_dict[row['pat_id']] = row['pt_race']
id_to_icu_dict[row['pat_id']] = row['icu_any']
id_to_vent_dict[row['pat_id']] = row['vent_any']
id_to_sex_dict[row['pat_id']] = row['pt_sex']
datadir='/tigress/BEE/penn-covidsub/'
data_df = pd.read_csv(os.path.join(DATADIR, 'COVID - Labs v2.csv'))
data_df['order_time'] = | pd.to_datetime(data_df['order_time']) | pandas.to_datetime |
"""
conjoin_tables.py.
Bring together two tables:
- Reading times by subject by token
- Surprisal by token
This should be the final step before R analysis.
Ideally, this process would be included in the R analysis to lower the number
of steps needed to get data visualizations, but this Python script will fill
that role for now.
<NAME>
"""
import argparse
from functools import cache
import pandas as pd
parser = argparse.ArgumentParser()
DATADIR = '../data'
parser.add_argument('--id_file', default=f'{DATADIR}/ids.tsv')
parser.add_argument('--rnng_file',
default=f'{DATADIR}/naturalstories_rnng.output')
parser.add_argument('--rts_file', default=f'{DATADIR}/processed_RTs.tsv')
parser.add_argument('--lstm_file')
parser.add_argument('--id_file', default=f'{DATADIR}/ids.tsv')
parser.add_argument('--save_file', default=f'{DATADIR}/final.csv')
def get_rts(rts_file) -> pd.DataFrame:
"""Load reading times from a file.
Returns a pandas Dataframe with the following columns:
* worker_id (str) - Unique identifier for the reader.
* work_time_total (int) - Total time the reader took.
* story (int) - Story index.
* story_pos (int) - Token index.
* rt (int) - Reading time in milliseconds.
"""
df = pd.read_csv(rts_file, sep='\t', header=0)
return df
def cut_malformed(surps: pd.DataFrame, ids: pd.DataFrame):
"""Remove any trees that had an issue in syntactic parsing.
Returns a FUNCTION that takes a token item and zone, as well as a column,
and returns the sum of that column for that token.
"""
valid = []
failed = []
for surp, ind in zip(surps.groupby(['sent']), ids.groupby(['sent'])):
if len(surp[1]) == len(ind[1]):
row = pd.merge_ordered(surp[1], ind[1], left_on="sent_pos",
right_on="sent_pos")
row = row.drop(
columns=['index', 'component', 'sent', 'sent_pos'])
valid.append(row)
else:
failed.append(surp[0])
if failed:
print(f"The following {str(len(failed))} sentences failed:")
print(' '.join(map(str, failed)))
data = pd.concat(valid)
@cache
def func(story, story_pos, colname):
return data.loc[
(data['story'] == story) & (data['story_pos'] == story_pos)] \
.sum()[colname]
return func
def main(args):
"""Cut out bad syntax trees and merge with reading times."""
ids = | pd.read_csv(args.id_file, sep='\t') | pandas.read_csv |
import os
import glob
import datetime
import numpy as np
import pandas as pd
from tqdm import tqdm
from typing import List, Tuple, Callable
from climate_resilience import utils
import warnings
warnings.formatwarning = utils.warning_format
def calculate_Nth_percentile(
sites: pd.DataFrame,
scenarios: List[str],
variables: List[str],
datadir: str,
N: int=99,
) -> None:
"""Calculates the Nth percentile.
Args:
sites (pd.DataFrame): Data Frame containing all the site information.
scenarios (List[str]): Scenarios of interest.
variables (List[str]): Variables of interest.
datadir (str): Parent directory containing all the data files.
The generated output file is also stored here.
N (int): Nth percentile will be calculated.
Returns:
pd.DataFrame: The output DataFrame that is written to a csv file is also returned.
Raises:
ValueError: If the integer value of N is outside the range [0, 100].
"""
# Verify the value of N
if N < 0 or N > 100:
raise ValueError("Incorrect value for N. N must be between 0 and 100.")
# Declare variables that will be used to convert the processed data to a DataFrame
df_array = []
df_colnames = []
# Loop over all the sites.
# ID and Object ID are stored only to inspect the final result with the corresponding site
for _oid, _id, name, state in zip(sites.OBJECTID, sites.ID, sites.NameMnemonic, sites.StateCode):
array_ind = [_oid, _id, name, state]
df_colnames = ["OBJECTID", "ID", "NameMnemonic", "StateCode"]
# Iterate over all combinations of variables and scenarios
for sce in scenarios:
for var in variables:
csv_path = os.path.join(datadir,
f"{sce}_{var}_ensemble",
f"{name}_{state}_{sce}_{var}.csv")
if not os.path.exists(csv_path):
print(f"WARNING: {csv_path} does not exist. Continuing to the next file.")
continue
# Preprocessing step
df = pd.read_csv(csv_path)
df1 = df.set_index('date')
mean_val = np.percentile(df1['mean'], N)
# Update the column names
colname = f"{sce}_{var}_percentile"
if colname not in df_colnames:
df_colnames.append(colname)
# Store the row information
array_ind.append(mean_val)
# Store the row for conversion to DataFrame
df_array.append(array_ind)
# Convert the generated data to a DataFrame
df_pr = pd.DataFrame(df_array)
df_pr.columns = df_colnames
# Merge the generated data with the original Data Frame
df_pr = pd.merge(sites, df_pr,
how="inner",
left_on=["OBJECTID", "ID"],
right_on=["OBJECTID", "ID"],
suffixes=(None, "_copy"),
)
# Write to CSV
output_csv_path = os.path.join(datadir, f"LMsites_{N}th_percentile.csv")
df_pr.to_csv(output_csv_path)
print(f"STATUS UPDATE: The output file generated from calculate_Nth_percentile() function is stored as {output_csv_path}.")
return df_pr
def calculate_pr_count_amount(
sites: pd.DataFrame,
scenarios: List[str],
variables: List[str],
datadir: str,
df_pr_csv_path: str
) -> None:
"""Calculates precipitation count and amount.
Args:
sites (pd.DataFrame): Data Frame containing all the site information.
scenarios (List[str]): Scenarios of interest.
variables (List[str]): Variables of interest.
datadir (str): Parent directory containing all the data files.
The generated output file is also stored here.
df_pr_csv_path (str): This data frame can be generated using the calculate_Nth_percentile() function.
The csv file generated from this function is passed here as argument.
Returns:
pd.DataFrame: The output DataFrame that is written to a csv file is also returned.
Raises:
KeyError: This error is raised if the correct historical column does not
exist in the df_pr data frame that is mentioned in df_pr_csv_path.
"""
nyr_hist = 56 # QUESTION: fixed values or random values for experiment?
nyr_proj = 93 # QUESTION: fixed values or random values for experiment?
# df_pr is required to calculate counts and amounts greater than 'historical' values
df_pr = pd.read_csv(df_pr_csv_path)
# Declare variables that will be used to convert the processed data to a DataFrame
df_array = []
df_colnames = []
# Loop over all the sites.
# ID and Object ID are stored only to inspect the final result with the corresponding site
i=0
for _oid, _id, name, state in zip(sites.OBJECTID, sites.ID, sites.NameMnemonic, sites.StateCode):
array_ind = [_oid, _id, name, state]
df_colnames = ["OBJECTID", "ID", "NameMnemonic", "StateCode"]
# Iterate over all combinations of variables and scenarios
for sce in scenarios:
for var in variables:
# Verify if the column required for counts and amounts calculation is present in the df_pr DataFrame.
historical_col_name = f"historical_{var}_percentile"
if historical_col_name not in df_pr:
raise KeyError(f"{historical_col_name} column does not exist in the percentile data frame. Check the df_pr_csv_path argument.")
csv_path = os.path.join(datadir,
f"{sce}_{var}_ensemble",
f"{name}_{state}_{sce}_{var}.csv")
if not os.path.exists(csv_path):
print(f"WARNING: {csv_path} does not exist. Continuing to the next file.")
continue
# Preprocessing step
df = pd.read_csv(csv_path)
df1 = df.set_index('date')
div_const = nyr_hist if sce == "historical" else nyr_proj
count = np.count_nonzero(df1['mean'] > df_pr[historical_col_name].iloc[i]) / div_const
amount = np.mean(df1[df1['mean'] > df_pr[historical_col_name].iloc[i]]['mean']) / div_const
# Update the column names and store the row information
colname = f"{sce}_{var}_counts"
if colname not in df_colnames:
df_colnames.append(colname)
array_ind.append(count)
colname = f"{sce}_{var}_amount"
if colname not in df_colnames:
df_colnames.append(colname)
array_ind.append(amount)
# Store the row for conversion to DataFrame
df_array.append(array_ind)
i+=1
# Convert the generated data to a DataFrame
df_pr_counts_amounts = pd.DataFrame(df_array)
df_pr_counts_amounts.columns = df_colnames
# Merge the generated data with the original Data Frame
df_pr = pd.merge(sites, df_pr_counts_amounts,
how="inner",
left_on=["OBJECTID", "ID"],
right_on=["OBJECTID", "ID"],
suffixes=(None, "_copy"),
)
# Write to CSV
output_csv_path = os.path.join(datadir, "LMsites_counts_amounts.csv")
df_pr_counts_amounts.to_csv(output_csv_path)
print(f"STATUS UPDATE: The output file generated from calculate_pr_count_amount() function is stored as {output_csv_path}.")
return df_pr_counts_amounts
def calculate_temporal_mean(
sites: pd.DataFrame,
scenarios: List[str],
variables: List[str],
datadir: str,
start_date: str,
end_date: str
) -> None:
"""Calculates mean precipitation for the 'historical' scenario or
between the start_date and the end_date.
Args:
sites (pd.DataFrame): Data Frame containing all the site information.
scenarios (List[str]): Scenarios of interest.
variables (List[str]): Variables of interest.
datadir (str): Parent directory containing all the data files.
The generated output file is also stored here.
start_date (str): Must be in the format 'YYYY-MM' or 'YYYY-MM-DD'.
end_date (str): Must be in the format 'YYYY-MM' or 'YYYY-MM-DD'.
Returns:
pd.DataFrame: The output DataFrame that is written to a csv file is also returned.
"""
# Declare variables that will be used to convert the processed data to a DataFrame
df_array = []
df_colnames = []
# Loop over all the sites.
# ID and Object ID are stored only to inspect the final result with the corresponding site
for _oid, _id, name, state in zip(sites.OBJECTID, sites.ID, sites.NameMnemonic, sites.StateCode):
array_ind = [_oid, _id, name, state]
df_colnames = ["OBJECTID", "ID", "NameMnemonic", "StateCode"]
# Iterate over all combinations of variables and scenarios
for sce in scenarios:
for var in variables:
csv_path = os.path.join(datadir,
f"{sce}_{var}_ensemble",
f"{name}_{state}_{sce}_{var}.csv")
if not os.path.exists(csv_path):
print(f"WARNING: {csv_path} does not exist. Continuing to the next file.")
continue
# Preprocessing step
df = pd.read_csv(csv_path)
df1 = df.set_index('date')
# 'historial' scenario dates from 1950 to 2006.
if sce != 'historical':
c0 = df1.index.to_series().between(start_date, end_date)
df2 = df1[c0]
mean_val = np.mean(df2['mean'])
# Generate column names
colname = f"{start_date}_{end_date}_{var}_mean"
else:
mean_val = np.mean(df1['mean'])
# Generate column names
colname = f"{sce}_{var}_mean"
# Update the column names
if colname not in df_colnames:
df_colnames.append(colname)
# Store the row information
array_ind.append(mean_val)
# Store the row for conversion to DataFrame
df_array.append(array_ind)
# Convert the generated data to a DataFrame
df_pr = pd.DataFrame(df_array)
df_pr.columns = df_colnames
# Merge the generated data with the original Data Frame
df_pr = pd.merge(sites, df_pr,
how="inner",
left_on=["OBJECTID", "ID"],
right_on=["OBJECTID", "ID"],
suffixes=(None, "_copy"),
)
# Write to CSV
output_csv_path = os.path.join(datadir, "LMsites_seg.csv")
df_pr.to_csv(output_csv_path)
print(f"STATUS UPDATE: The output file generated from calculate_temporal_mean() function is stored as {output_csv_path}.")
return df_pr
def get_climate_ensemble(
sites: pd.DataFrame,
scenarios: List[str],
variables: List[str],
datadir: str,
) -> None:
"""Calculates the mean and std of data for each site.
Args:
sites (pd.DataFrame): Data Frame containing all the site information.
scenarios (List[str]): Scenarios of interest.
variables (List[str]): Variables of interest.
datadir (str): Parent directory containing all the data files.
The generated output file is also stored here.
"""
# Create the output directory where the generated CSVs will be stored
output_dir = os.path.join(datadir, "climate_ensemble")
if not os.path.isdir(output_dir):
os.makedirs(output_dir)
# Iterating over all sites
name_state_list = list(zip(sites.NameMnemonic, sites.StateCode))
with tqdm(name_state_list) as tqdm_name_state_list:
tqdm_name_state_list.set_description("LM Sites")
for name, state in tqdm_name_state_list:
# Iterating over all combinations of scenarios and variables
for scenario in scenarios:
for variable in variables:
filepath_format = os.path.join(datadir, f"{scenario}_{variable}", f"{name}_{state}*.csv")
all_files = glob.glob(filepath_format)
# Iterating over all_files to create a single data frame of mean values of all the models
for i, filename in enumerate(all_files):
if i == 0:
df = pd.read_csv(filename, index_col=None, header=0)
else:
df[str(i)] = pd.read_csv(filename, index_col=None, header=0).iloc[:, 1]
# Creating a new data frame that contains the ensemble mean and std values
df2 = pd.DataFrame()
start_date = datetime.date(1950, 1, 1) # TODO: Ideally this should be read from the CSV file but the date in the CSV file seems incorrect.
end_date = start_date + datetime.timedelta(days=len(df)-1) # TODO: Ideally this should be read from the CSV file but the date in the CSV file seems incorrect.
df2["date"] = pd.date_range(start_date, end_date)
df2["mean"] = df.mean(axis=1, numeric_only=True) # avoids the date column
df2["std"] = df.std(axis=1, numeric_only=True) # avoids the date column
output_csv_path = os.path.join(output_dir, f"{name}_{state}_{scenario}_{variable}.csv")
df2.to_csv(output_csv_path)
# print(f"STATUS UPDATE: The output file is stored as {output_csv_path}.")
print(f"STATUS UPDATE: The CSVs generated from get_climate_ensemble() function are stored in the '{output_dir}' directory.")
def get_per_year_stats(
sites: pd.DataFrame,
scenarios: List[str],
variables: List[str],
datadir: str,
) -> None:
"""Calculates the year-wise max, mean, and std of data for each site.
Args:
sites (pd.DataFrame): Data Frame containing all the site information.
scenarios (List[str]): Scenarios of interest.
variables (List[str]): Variables of interest.
datadir (str): Parent directory containing all the data files.
The generated output file is also stored here.
"""
# Create the output directory where the generated CSVs will be stored
output_dir = os.path.join(datadir, "per_year_stats")
if not os.path.isdir(output_dir):
os.makedirs(output_dir)
else:
warnings.warn(f"{output_dir} already exists! The generated CSVs will be added or overwritten in this directory.")
# Iterating over all sites
name_state_list = list(zip(sites.NameMnemonic, sites.StateCode))
with tqdm(name_state_list) as tqdm_name_state_list:
tqdm_name_state_list.set_description("LM Sites")
for name, state in tqdm_name_state_list:
df_array = []
# Iterating over all combinations of scenarios and variables and
# concating data for all combinations in a single data frame
df = pd.DataFrame()
for sce in scenarios:
for var in variables:
csv_path = os.path.join(datadir, f"{sce}_{var}_ensemble", f"{name}_{state}_{sce}_{var}.csv")
df_i = pd.read_csv(csv_path)
df_i = df_i.set_index("date")
if df.empty:
df = df_i
else:
df = | pd.concat([df, df_i]) | pandas.concat |
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Scientific Computing and Visualization with Spyder
Created on Thu May 20 10:17:27 2021
@author: <NAME>
"""
# %% Import libraries
import matplotlib.pyplot as plt
import pandas as pd
import scipy.stats as stats
import seaborn as sns
# %% Load raw data (parquet)
data = pd.read_parquet("parsed_data_public.parquet")
# %% Let's explore age
print(data.d_age.describe())
age = data.d_age.tolist()
# %% Save some variables and display them in the Variable Explorer
max_age = data.d_age.min()
min_age = data.d_age.max()
# %% Plot age with pandas
data.d_age.plot.hist(bins=25, alpha=0.5)
# %% Plot age with seaborn (and search for help from IPython Console)
sns.histplot(data.d_age, kde=True, bins=25)
plt.show()
# %% Plot age and mean
sns.histplot(data.d_age, kde=True, bins=25)
plt.xlabel('Age')
plt.axvline(data.d_age.mean(), color='k', linestyle='dashed', linewidth=1)
min_ylim, max_ylim = plt.ylim()
plt.text(data.d_age.mean()*1.1, max_ylim*0.9,
'Mean: {:.2f}'.format(data.d_age.mean()))
plt.show()
# %% Demographic variables list
demograph = [v for v in list(data.columns) if v.startswith("d_")]
# %% Cognitive ability questions
# Select the questions for the cognitive ability test (14 questions).
# Add the correct answers in a new column.
test_items = | pd.read_csv("test_items.csv") | pandas.read_csv |
"""
Download data from IB using the "IB loop" approach (ie starting from the end of
the interval and moving backwards).
"""
import datetime
import logging
import os
from typing import Any, Iterator, List, Optional, Tuple, Union
try:
import ib_insync
except ModuleNotFoundError:
print("Can't find ib_insync")
import pandas as pd
# from tqdm.notebook import tqdm
from tqdm import tqdm
import helpers.dbg as dbg
import helpers.s3 as hs3
import helpers.io_ as hio
import im.ib.data.extract.gateway.utils as videgu
_LOG = logging.getLogger(__name__)
# TODO(*): -> _ib_loop_generator?
def ib_loop_generator(
ib: ib_insync.ib.IB,
contract: ib_insync.Contract,
start_ts: datetime,
end_ts: datetime,
duration_str: str,
bar_size_setting: str,
what_to_show: str,
use_rth: bool,
use_progress_bar: bool = False,
num_retry: Optional[Any] = None,
) -> Iterator[
Union[
Iterator,
Iterator[
Tuple[int, pd.DataFrame, Tuple[datetime.datetime, pd.Timestamp]]
],
Iterator[Tuple[int, pd.DataFrame, Tuple[pd.Timestamp, pd.Timestamp]]],
]
]:
"""
Get historical data using the IB style of looping for [start_ts, end_ts).
The IB loop style consists in starting from the end of the interval
and then using the earliest value returned to move the window
backwards in time. The problem with this approach is that one can't
parallelize the requests of chunks of data.
"""
videgu.check_ib_connected(ib)
_LOG.debug("start_ts='%s' end_ts='%s'", start_ts, end_ts)
start_ts = videgu.to_ET(start_ts)
end_ts = videgu.to_ET(end_ts)
_LOG.debug("start_ts='%s' end_ts='%s'", start_ts, end_ts)
dbg.dassert_lt(start_ts, end_ts)
# Let's start from the end.
curr_ts = end_ts
pbar = None
i = 0
_LOG.debug("start_ts='%s' end_ts='%s'", start_ts, end_ts)
ts_seq = None
start_ts_reached = False
while not start_ts_reached:
_LOG.debug("Requesting data for curr_ts='%s'", curr_ts)
df = videgu.req_historical_data(
ib,
contract,
curr_ts,
duration_str,
bar_size_setting,
what_to_show,
use_rth,
num_retry=num_retry,
)
if df is None:
return
# TODO(gp): Sometimes IB returns an empty df in a chunk although there
# is more data later on. Maybe we can just keep going.
return
_LOG.debug("df=%s\n%s", videgu.get_df_signature(df), df.head(3))
date_offset = videgu.duration_str_to_pd_dateoffset(duration_str)
if df.empty:
# Sometimes IB returns an empty df in a chunk although there is more
# data later on: we keep going.
next_curr_ts = curr_ts - date_offset
_LOG.debug("Empty df -> curr_ts=%s", curr_ts)
else:
# Move the curr_ts to the beginning of the chuck.
next_curr_ts = videgu.to_ET(df.index[0])
# Avoid infinite loop if there is only one record in response.
if next_curr_ts == curr_ts:
next_curr_ts -= date_offset
ts_seq = (curr_ts, next_curr_ts)
curr_ts = next_curr_ts
_LOG.debug("curr_ts='%s'", curr_ts)
if i == 0:
# Create the progress bar.
total = (end_ts - start_ts).days
if use_progress_bar:
pbar = tqdm(total=total, desc=contract.symbol)
if pbar is not None:
idx = (end_ts - curr_ts).days
_LOG.debug("idx=%s, total=%s", idx, pbar.total)
pbar.n = idx
pbar.refresh()
# We insert at the beginning since we are walking backwards the interval.
if start_ts != "" and curr_ts <= start_ts:
_LOG.debug(
"Reached the beginning of the interval: "
"curr_ts=%s start_ts=%s",
curr_ts,
start_ts,
)
df = videgu.truncate(df, start_ts=start_ts, end_ts=end_ts)
start_ts_reached = True
if not df.empty:
yield i, df, ts_seq
i += 1
def save_historical_data_by_intervals_IB_loop(
ib: int,
contract: ib_insync.Contract,
start_ts: pd.Timestamp,
end_ts: pd.Timestamp,
duration_str: str,
bar_size_setting: str,
what_to_show: str,
use_rth: bool,
file_name: str,
part_files_dir: str,
incremental: bool,
use_progress_bar: bool = True,
num_retry: Optional[Any] = None,
) -> List[Tuple[pd.Timestamp, pd.Timestamp]]:
"""
Save historical data into multiple files into `contract.symbol` directory
near the `file_name`.
:param incremental: if the `file_name` already exists, resume downloading
from the last date
"""
start_ts, end_ts = videgu.process_start_end_ts(start_ts, end_ts)
#
ib, deallocate_ib = videgu.allocate_ib(ib)
_LOG.debug("ib=%s", ib)
generator = ib_loop_generator(
ib,
contract,
start_ts,
end_ts,
duration_str,
bar_size_setting,
what_to_show,
use_rth,
use_progress_bar=use_progress_bar,
num_retry=num_retry,
)
saved_intervals = set()
for i, df_tmp, _ in generator:
# Split data by static intervals.
for interval, df_tmp_part in videgu.split_data_by_intervals(
df_tmp, videgu.duration_str_to_pd_dateoffset(duration_str)
):
# Get file name for each part.
file_name_for_part = historical_data_to_filename(
contract=contract,
start_ts=interval[0],
end_ts=interval[1],
duration_str=duration_str,
bar_size_setting=bar_size_setting,
what_to_show=what_to_show,
use_rth=use_rth,
dst_dir=part_files_dir,
)
# There can be already data from previous loop iteration.
if videgu.check_file_exists(file_name_for_part):
df_to_write = pd.concat(
[df_tmp_part, load_historical_data(file_name_for_part)]
)
else:
# First iteration ever.
df_to_write = df_tmp_part
# Force to have index `pd.Timestamp` format.
df_to_write.index = df_to_write.index.map(videgu.to_ET)
if incremental:
# It is possible that same data was already loaded.
df_to_write = df_to_write[
~df_to_write.index.duplicated(keep="last")
]
df_to_write.sort_index(inplace=True)
dbg.dassert_monotonic_index(
df_to_write,
"Most likely the data for selected interval already exists, try incremental mode.",
)
# We appended data at step before, so re-write the file.
df_to_write.to_csv(file_name_for_part, mode="w", header=True)
_LOG.info("Saved partial data in '%s'", file_name_for_part)
saved_intervals.add(interval)
videgu.deallocate_ib(ib, deallocate_ib)
return saved_intervals
def get_historical_data_with_IB_loop(
ib: ib_insync.ib.IB,
contract: ib_insync.Contract,
start_ts: pd.Timestamp,
end_ts: pd.Timestamp,
duration_str: str,
bar_size_setting: str,
what_to_show: str,
use_rth: bool,
use_progress_bar: bool = False,
return_ts_seq: bool = False,
num_retry: Optional[Any] = None,
) -> Tuple[
pd.DataFrame,
List[Tuple[Union[datetime.datetime, pd.Timestamp], pd.Timestamp]],
]:
"""
Get historical data using the IB style of looping for [start_ts, end_ts).
The IB loop style consists in starting from the end of the interval
and then using the earliest value returned to move the window
backwards in time. The problem with this approach is that one can't
parallelize the requests of chunks of data.
"""
start_ts, end_ts = videgu.process_start_end_ts(start_ts, end_ts)
#
dfs = []
ts_seq = []
ib, deallocate_ib = videgu.allocate_ib(ib)
generator = ib_loop_generator(
ib,
contract,
start_ts,
end_ts,
duration_str,
bar_size_setting,
what_to_show,
use_rth,
use_progress_bar=use_progress_bar,
num_retry=num_retry,
)
# Deallocate.
videgu.deallocate_ib(ib, deallocate_ib)
for i, df_tmp, ts_seq_tmp in generator:
ts_seq.append(ts_seq_tmp)
dfs.insert(0, df_tmp)
#
df = | pd.concat(dfs) | pandas.concat |
import os
import pandas as pd
import numpy as np
import json
import urllib.request
from datetime import datetime
# --------------------------------------------- Create contents ----------------------------------------------------- #
# 10:30 a.m. (Eastern Time) on Thursday. Delayed by one day if holiday.
def generate_html(today):
with urllib.request.urlopen("http://ir.eia.gov/ngs/wngsr.json") as url:
data = json.loads(url.read().decode('utf-8-sig'))
release_date = datetime.strptime(data['release_date'], '%Y-%b-%d %H:%M:%S')
if release_date.date() != today.date():
return None
df = pd.DataFrame.from_records(data['series'])
df_ng = pd.DataFrame()
df_ng['Region'] = df['name']
df_temp = pd.DataFrame(df.data.tolist(), index=df.index)
df_temp.columns = df_temp.iloc[0].apply(lambda x: x[0])
df_temp = df_temp.applymap(lambda x: x[1])
df_ng = | pd.concat([df_ng, df_temp], axis=1) | pandas.concat |
# Copyright (c) 2021 <NAME>. All rights reserved.
# This code is licensed under Apache 2.0 with Commons Clause license (see LICENSE.md for details)
"""Base data class.
Class `Data` allows storing, downloading, updating, and managing data. It stores data
as a dictionary of Series/DataFrames keyed by symbol, and makes sure that
all pandas objects have the same index and columns by aligning them.
## Downloading
Data can be downloaded by overriding the `Data.download_symbol` class method. What `Data` does
under the hood is iterating over all symbols and calling this method.
Let's create a simple data class `RandomData` that generates price based on
random returns with provided mean and standard deviation:
```python-repl
>>> import numpy as np
>>> import pandas as pd
>>> import vectorbt as vbt
>>> class RandomData(vbt.Data):
... @classmethod
... def download_symbol(cls, symbol, mean=0., stdev=0.1, start_value=100,
... start_dt='2021-01-01', end_dt='2021-01-10'):
... index = pd.date_range(start_dt, end_dt)
... rand_returns = np.random.normal(mean, stdev, size=len(index))
... rand_price = start_value + np.cumprod(rand_returns + 1)
... return pd.Series(rand_price, index=index)
>>> rand_data = RandomData.download(['RANDNX1', 'RANDNX2'])
>>> rand_data.get()
symbol RANDNX1 RANDNX2
2021-01-01 101.042956 100.920462
2021-01-02 100.987327 100.956455
2021-01-03 101.022333 100.955128
2021-01-04 101.084243 100.791793
2021-01-05 101.158619 100.781000
2021-01-06 101.172688 100.786198
2021-01-07 101.311609 100.848192
2021-01-08 101.331841 100.861500
2021-01-09 101.440530 100.944935
2021-01-10 101.585689 100.993223
```
To provide different keyword arguments for different symbols, we can use `symbol_dict`:
```python-repl
>>> start_value = vbt.symbol_dict({'RANDNX2': 200})
>>> rand_data = RandomData.download(['RANDNX1', 'RANDNX2'], start_value=start_value)
>>> rand_data.get()
symbol RANDNX1 RANDNX2
2021-01-01 101.083324 200.886078
2021-01-02 101.113405 200.791934
2021-01-03 101.169194 200.852877
2021-01-04 101.164033 200.820111
2021-01-05 101.326248 201.060448
2021-01-06 101.394482 200.876984
2021-01-07 101.494227 200.845519
2021-01-08 101.422012 200.963474
2021-01-09 101.493162 200.790369
2021-01-10 101.606052 200.752296
```
In case two symbols have different index or columns, they are automatically aligned based on
`missing_index` and `missing_columns` respectively (see `data` in `vectorbt._settings.settings`):
```python-repl
>>> start_dt = vbt.symbol_dict({'RANDNX2': '2021-01-03'})
>>> end_dt = vbt.symbol_dict({'RANDNX2': '2021-01-07'})
>>> rand_data = RandomData.download(
... ['RANDNX1', 'RANDNX2'], start_value=start_value,
... start_dt=start_dt, end_dt=end_dt)
>>> rand_data.get()
symbol RANDNX1 RANDNX2
2021-01-01 101.028054 NaN
2021-01-02 101.032090 NaN
2021-01-03 101.038531 200.936283
2021-01-04 101.068265 200.926764
2021-01-05 100.878492 200.898898
2021-01-06 100.857444 200.922368
2021-01-07 100.933123 200.987094
2021-01-08 100.938034 NaN
2021-01-09 101.044736 NaN
2021-01-10 101.098133 NaN
```
## Updating
Updating can be implemented by overriding the `Data.update_symbol` instance method, which takes
the same arguments as `Data.download_symbol`. In contrast to the download method, the update
method is an instance method and can access the data downloaded earlier. It can also access the
keyword arguments initially passed to the download method, accessible under `Data.download_kwargs`.
Those arguments can be used as default arguments and overriden by arguments passed directly
to the update method, using `vectorbt.utils.config.merge_dicts`.
Let's define an update method that updates the latest data point and adds two news data points.
Note that updating data always returns a new `Data` instance.
```python-repl
>>> from datetime import timedelta
>>> from vectorbt.utils.config import merge_dicts
>>> class RandomData(vbt.Data):
... @classmethod
... def download_symbol(cls, symbol, mean=0., stdev=0.1, start_value=100,
... start_dt='2021-01-01', end_dt='2021-01-10'):
... index = pd.date_range(start_dt, end_dt)
... rand_returns = np.random.normal(mean, stdev, size=len(index))
... rand_price = start_value + np.cumprod(rand_returns + 1)
... return pd.Series(rand_price, index=index)
...
... def update_symbol(self, symbol, **kwargs):
... download_kwargs = self.select_symbol_kwargs(symbol, self.download_kwargs)
... download_kwargs['start_dt'] = self.data[symbol].index[-1]
... download_kwargs['end_dt'] = download_kwargs['start_dt'] + timedelta(days=2)
... kwargs = merge_dicts(download_kwargs, kwargs)
... return self.download_symbol(symbol, **kwargs)
>>> rand_data = RandomData.download(['RANDNX1', 'RANDNX2'], end_dt='2021-01-05')
>>> rand_data.get()
symbol RANDNX1 RANDNX2
2021-01-01 100.956601 100.970865
2021-01-02 100.919011 100.987026
2021-01-03 101.062733 100.835376
2021-01-04 100.960535 100.820817
2021-01-05 100.834387 100.866549
>>> rand_data = rand_data.update()
>>> rand_data.get()
symbol RANDNX1 RANDNX2
2021-01-01 100.956601 100.970865
2021-01-02 100.919011 100.987026
2021-01-03 101.062733 100.835376
2021-01-04 100.960535 100.820817
2021-01-05 101.011255 100.887049 < updated from here
2021-01-06 101.004149 100.808410
2021-01-07 101.023673 100.714583
>>> rand_data = rand_data.update()
>>> rand_data.get()
symbol RANDNX1 RANDNX2
2021-01-01 100.956601 100.970865
2021-01-02 100.919011 100.987026
2021-01-03 101.062733 100.835376
2021-01-04 100.960535 100.820817
2021-01-05 101.011255 100.887049
2021-01-06 101.004149 100.808410
2021-01-07 100.883400 100.874922 < updated from here
2021-01-08 101.011738 100.780188
2021-01-09 100.912639 100.934014
```
## Merging
You can merge symbols from different `Data` instances either by subclassing `Data` and
defining custom download and update methods, or by manually merging their data dicts
into one data dict and passing it to the `Data.from_data` class method.
```python-repl
>>> rand_data1 = RandomData.download('RANDNX1', mean=0.2)
>>> rand_data2 = RandomData.download('RANDNX2', start_value=200, start_dt='2021-01-05')
>>> merged_data = vbt.Data.from_data(vbt.merge_dicts(rand_data1.data, rand_data2.data))
>>> merged_data.get()
symbol RANDNX1 RANDNX2
2021-01-01 101.160718 NaN
2021-01-02 101.421020 NaN
2021-01-03 101.959176 NaN
2021-01-04 102.076670 NaN
2021-01-05 102.447234 200.916198
2021-01-06 103.195187 201.033907
2021-01-07 103.595915 200.908229
2021-01-08 104.332550 201.000497
2021-01-09 105.159708 201.019157
2021-01-10 106.729495 200.910210
```
## Indexing
Like any other class subclassing `vectorbt.base.array_wrapper.Wrapping`, we can do pandas indexing
on a `Data` instance, which forwards indexing operation to each Series/DataFrame:
```python-repl
>>> rand_data.loc['2021-01-07':'2021-01-09']
<__main__.RandomData at 0x7fdba4e36198>
>>> rand_data.loc['2021-01-07':'2021-01-09'].get()
symbol RANDNX1 RANDNX2
2021-01-07 100.883400 100.874922
2021-01-08 101.011738 100.780188
2021-01-09 100.912639 100.934014
```
## Saving and loading
Like any other class subclassing `vectorbt.utils.config.Pickleable`, we can save a `Data`
instance to the disk with `Data.save` and load it with `Data.load`:
```python-repl
>>> rand_data.save('rand_data')
>>> rand_data = RandomData.load('rand_data')
>>> rand_data.get()
symbol RANDNX1 RANDNX2
2021-01-01 100.956601 100.970865
2021-01-02 100.919011 100.987026
2021-01-03 101.062733 100.835376
2021-01-04 100.960535 100.820817
2021-01-05 101.011255 100.887049
2021-01-06 101.004149 100.808410
2021-01-07 100.883400 100.874922
2021-01-08 101.011738 100.780188
2021-01-09 100.912639 100.934014
```
## Stats
!!! hint
See `vectorbt.generic.stats_builder.StatsBuilderMixin.stats` and `Data.metrics`.
```python-repl
>>> rand_data = RandomData.download(['RANDNX1', 'RANDNX2'])
>>> rand_data.stats(column='a')
Start 2021-01-01 00:00:00+00:00
End 2021-01-10 00:00:00+00:00
Period 10 days 00:00:00
Total Symbols 2
Null Counts: RANDNX1 0
Null Counts: RANDNX2 0
dtype: object
```
`Data.stats` also supports (re-)grouping:
```python-repl
>>> rand_data.stats(group_by=True)
Start 2021-01-01 00:00:00+00:00
End 2021-01-10 00:00:00+00:00
Period 10 days 00:00:00
Total Symbols 2
Null Counts: RANDNX1 0
Null Counts: RANDNX2 0
Name: group, dtype: object
```
## Plots
!!! hint
See `vectorbt.generic.plots_builder.PlotsBuilderMixin.plots` and `Data.subplots`.
`Data` class has a single subplot based on `Data.plot`:
```python-repl
>>> rand_data.plots(settings=dict(base=100)).show_svg()
```

"""
import warnings
import numpy as np
import pandas as pd
from vectorbt import _typing as tp
from vectorbt.base.array_wrapper import ArrayWrapper, Wrapping
from vectorbt.generic import plotting
from vectorbt.generic.plots_builder import PlotsBuilderMixin
from vectorbt.generic.stats_builder import StatsBuilderMixin
from vectorbt.utils import checks
from vectorbt.utils.config import merge_dicts, Config
from vectorbt.utils.datetime_ import is_tz_aware, to_timezone
from vectorbt.utils.decorators import cached_method
__pdoc__ = {}
class symbol_dict(dict):
"""Dict that contains symbols as keys."""
pass
class MetaData(type(StatsBuilderMixin), type(PlotsBuilderMixin)):
pass
DataT = tp.TypeVar("DataT", bound="Data")
class Data(Wrapping, StatsBuilderMixin, PlotsBuilderMixin, metaclass=MetaData):
"""Class that downloads, updates, and manages data coming from a data source."""
def __init__(self,
wrapper: ArrayWrapper,
data: tp.Data,
tz_localize: tp.Optional[tp.TimezoneLike],
tz_convert: tp.Optional[tp.TimezoneLike],
missing_index: str,
missing_columns: str,
download_kwargs: dict,
**kwargs) -> None:
Wrapping.__init__(
self,
wrapper,
data=data,
tz_localize=tz_localize,
tz_convert=tz_convert,
missing_index=missing_index,
missing_columns=missing_columns,
download_kwargs=download_kwargs,
**kwargs
)
StatsBuilderMixin.__init__(self)
PlotsBuilderMixin.__init__(self)
checks.assert_instance_of(data, dict)
for k, v in data.items():
checks.assert_meta_equal(v, data[list(data.keys())[0]])
self._data = data
self._tz_localize = tz_localize
self._tz_convert = tz_convert
self._missing_index = missing_index
self._missing_columns = missing_columns
self._download_kwargs = download_kwargs
def indexing_func(self: DataT, pd_indexing_func: tp.PandasIndexingFunc, **kwargs) -> DataT:
"""Perform indexing on `Data`."""
new_wrapper = pd_indexing_func(self.wrapper)
new_data = {k: pd_indexing_func(v) for k, v in self.data.items()}
return self.replace(
wrapper=new_wrapper,
data=new_data
)
@property
def data(self) -> tp.Data:
"""Data dictionary keyed by symbol."""
return self._data
@property
def symbols(self) -> tp.List[tp.Label]:
"""List of symbols."""
return list(self.data.keys())
@property
def tz_localize(self) -> tp.Optional[tp.TimezoneLike]:
"""`tz_localize` initially passed to `Data.download_symbol`."""
return self._tz_localize
@property
def tz_convert(self) -> tp.Optional[tp.TimezoneLike]:
"""`tz_convert` initially passed to `Data.download_symbol`."""
return self._tz_convert
@property
def missing_index(self) -> str:
"""`missing_index` initially passed to `Data.download_symbol`."""
return self._missing_index
@property
def missing_columns(self) -> str:
"""`missing_columns` initially passed to `Data.download_symbol`."""
return self._missing_columns
@property
def download_kwargs(self) -> dict:
"""Keyword arguments initially passed to `Data.download_symbol`."""
return self._download_kwargs
@classmethod
def align_index(cls, data: tp.Data, missing: str = 'nan') -> tp.Data:
"""Align data to have the same index.
The argument `missing` accepts the following values:
* 'nan': set missing data points to NaN
* 'drop': remove missing data points
* 'raise': raise an error"""
if len(data) == 1:
return data
index = None
for k, v in data.items():
if index is None:
index = v.index
else:
if len(index.intersection(v.index)) != len(index.union(v.index)):
if missing == 'nan':
warnings.warn("Symbols have mismatching index. "
"Setting missing data points to NaN.", stacklevel=2)
index = index.union(v.index)
elif missing == 'drop':
warnings.warn("Symbols have mismatching index. "
"Dropping missing data points.", stacklevel=2)
index = index.intersection(v.index)
elif missing == 'raise':
raise ValueError("Symbols have mismatching index")
else:
raise ValueError(f"missing='{missing}' is not recognized")
# reindex
new_data = {k: v.reindex(index=index) for k, v in data.items()}
return new_data
@classmethod
def align_columns(cls, data: tp.Data, missing: str = 'raise') -> tp.Data:
"""Align data to have the same columns.
See `Data.align_index` for `missing`."""
if len(data) == 1:
return data
columns = None
multiple_columns = False
name_is_none = False
for k, v in data.items():
if isinstance(v, pd.Series):
if v.name is None:
name_is_none = True
v = v.to_frame()
else:
multiple_columns = True
if columns is None:
columns = v.columns
else:
if len(columns.intersection(v.columns)) != len(columns.union(v.columns)):
if missing == 'nan':
warnings.warn("Symbols have mismatching columns. "
"Setting missing data points to NaN.", stacklevel=2)
columns = columns.union(v.columns)
elif missing == 'drop':
warnings.warn("Symbols have mismatching columns. "
"Dropping missing data points.", stacklevel=2)
columns = columns.intersection(v.columns)
elif missing == 'raise':
raise ValueError("Symbols have mismatching columns")
else:
raise ValueError(f"missing='{missing}' is not recognized")
# reindex
new_data = {}
for k, v in data.items():
if isinstance(v, pd.Series):
v = v.to_frame(name=v.name)
v = v.reindex(columns=columns)
if not multiple_columns:
v = v[columns[0]]
if name_is_none:
v = v.rename(None)
new_data[k] = v
return new_data
@classmethod
def select_symbol_kwargs(cls, symbol: tp.Label, kwargs: dict) -> dict:
"""Select keyword arguments belonging to `symbol`."""
_kwargs = dict()
for k, v in kwargs.items():
if isinstance(v, symbol_dict):
if symbol in v:
_kwargs[k] = v[symbol]
else:
_kwargs[k] = v
return _kwargs
@classmethod
def from_data(cls: tp.Type[DataT],
data: tp.Data,
tz_localize: tp.Optional[tp.TimezoneLike] = None,
tz_convert: tp.Optional[tp.TimezoneLike] = None,
missing_index: tp.Optional[str] = None,
missing_columns: tp.Optional[str] = None,
wrapper_kwargs: tp.KwargsLike = None,
**kwargs) -> DataT:
"""Create a new `Data` instance from (aligned) data.
Args:
data (dict): Dictionary of array-like objects keyed by symbol.
tz_localize (timezone_like): If the index is tz-naive, convert to a timezone.
See `vectorbt.utils.datetime_.to_timezone`.
tz_convert (timezone_like): Convert the index from one timezone to another.
See `vectorbt.utils.datetime_.to_timezone`.
missing_index (str): See `Data.align_index`.
missing_columns (str): See `Data.align_columns`.
wrapper_kwargs (dict): Keyword arguments passed to `vectorbt.base.array_wrapper.ArrayWrapper`.
**kwargs: Keyword arguments passed to the `__init__` method.
For defaults, see `data` in `vectorbt._settings.settings`."""
from vectorbt._settings import settings
data_cfg = settings['data']
# Get global defaults
if tz_localize is None:
tz_localize = data_cfg['tz_localize']
if tz_convert is None:
tz_convert = data_cfg['tz_convert']
if missing_index is None:
missing_index = data_cfg['missing_index']
if missing_columns is None:
missing_columns = data_cfg['missing_columns']
if wrapper_kwargs is None:
wrapper_kwargs = {}
data = data.copy()
for k, v in data.items():
# Convert array to pandas
if not isinstance(v, (pd.Series, pd.DataFrame)):
v = np.asarray(v)
if v.ndim == 1:
v = pd.Series(v)
else:
v = pd.DataFrame(v)
# Perform operations with datetime-like index
if isinstance(v.index, pd.DatetimeIndex):
if tz_localize is not None:
if not is_tz_aware(v.index):
v = v.tz_localize(to_timezone(tz_localize))
if tz_convert is not None:
v = v.tz_convert(to_timezone(tz_convert))
v.index.freq = v.index.inferred_freq
data[k] = v
# Align index and columns
data = cls.align_index(data, missing=missing_index)
data = cls.align_columns(data, missing=missing_columns)
# Create new instance
symbols = list(data.keys())
wrapper = ArrayWrapper.from_obj(data[symbols[0]], **wrapper_kwargs)
return cls(
wrapper,
data,
tz_localize=tz_localize,
tz_convert=tz_convert,
missing_index=missing_index,
missing_columns=missing_columns,
**kwargs
)
@classmethod
def download_symbol(cls, symbol: tp.Label, **kwargs) -> tp.SeriesFrame:
"""Abstract method to download a symbol."""
raise NotImplementedError
@classmethod
def download(cls: tp.Type[DataT],
symbols: tp.Union[tp.Label, tp.Labels],
tz_localize: tp.Optional[tp.TimezoneLike] = None,
tz_convert: tp.Optional[tp.TimezoneLike] = None,
missing_index: tp.Optional[str] = None,
missing_columns: tp.Optional[str] = None,
wrapper_kwargs: tp.KwargsLike = None,
**kwargs) -> DataT:
"""Download data using `Data.download_symbol`.
Args:
symbols (hashable or sequence of hashable): One or multiple symbols.
!!! note
Tuple is considered as a single symbol (since hashable).
tz_localize (any): See `Data.from_data`.
tz_convert (any): See `Data.from_data`.
missing_index (str): See `Data.from_data`.
missing_columns (str): See `Data.from_data`.
wrapper_kwargs (dict): See `Data.from_data`.
**kwargs: Passed to `Data.download_symbol`.
If two symbols require different keyword arguments, pass `symbol_dict` for each argument.
"""
if checks.is_hashable(symbols):
symbols = [symbols]
elif not checks.is_sequence(symbols):
raise TypeError("Symbols must be either hashable or sequence of hashable")
data = dict()
for s in symbols:
# Select keyword arguments for this symbol
_kwargs = cls.select_symbol_kwargs(s, kwargs)
# Download data for this symbol
data[s] = cls.download_symbol(s, **_kwargs)
# Create new instance from data
return cls.from_data(
data,
tz_localize=tz_localize,
tz_convert=tz_convert,
missing_index=missing_index,
missing_columns=missing_columns,
wrapper_kwargs=wrapper_kwargs,
download_kwargs=kwargs
)
def update_symbol(self, symbol: tp.Label, **kwargs) -> tp.SeriesFrame:
"""Abstract method to update a symbol."""
raise NotImplementedError
def update(self: DataT, **kwargs) -> DataT:
"""Update the data using `Data.update_symbol`.
Args:
**kwargs: Passed to `Data.update_symbol`.
If two symbols require different keyword arguments, pass `symbol_dict` for each argument.
!!! note
Returns a new `Data` instance."""
new_data = dict()
for k, v in self.data.items():
# Select keyword arguments for this symbol
_kwargs = self.select_symbol_kwargs(k, kwargs)
# Download new data for this symbol
new_obj = self.update_symbol(k, **_kwargs)
# Convert array to pandas
if not isinstance(new_obj, (pd.Series, pd.DataFrame)):
new_obj = np.asarray(new_obj)
index = pd.RangeIndex(
start=v.index[-1],
stop=v.index[-1] + new_obj.shape[0],
step=1
)
if new_obj.ndim == 1:
new_obj = pd.Series(new_obj, index=index)
else:
new_obj = pd.DataFrame(new_obj, index=index)
# Perform operations with datetime-like index
if isinstance(new_obj.index, pd.DatetimeIndex):
if self.tz_localize is not None:
if not is_tz_aware(new_obj.index):
new_obj = new_obj.tz_localize(to_timezone(self.tz_localize))
if self.tz_convert is not None:
new_obj = new_obj.tz_convert(to_timezone(self.tz_convert))
new_data[k] = new_obj
# Align index and columns
new_data = self.align_index(new_data, missing=self.missing_index)
new_data = self.align_columns(new_data, missing=self.missing_columns)
# Concatenate old and new data
for k, v in new_data.items():
if isinstance(self.data[k], pd.Series):
if isinstance(v, pd.DataFrame):
v = v[self.data[k].name]
else:
v = v[self.data[k].columns]
v = pd.concat((self.data[k], v), axis=0)
v = v[~v.index.duplicated(keep='last')]
if isinstance(v.index, pd.DatetimeIndex):
v.index.freq = v.index.inferred_freq
new_data[k] = v
# Create new instance
new_index = new_data[self.symbols[0]].index
return self.replace(
wrapper=self.wrapper.replace(index=new_index),
data=new_data
)
@cached_method
def concat(self, level_name: str = 'symbol') -> tp.Data:
"""Return a dict of Series/DataFrames with symbols as columns, keyed by column name."""
first_data = self.data[self.symbols[0]]
index = first_data.index
if isinstance(first_data, pd.Series):
columns = | pd.Index([first_data.name]) | pandas.Index |
# -*- coding: utf-8 -*-
"""
Created on Tue Jan 14 17:14:29 2020
@author: p000526841
"""
from pathlib import Path
import numpy as np
import pandas as pd
from datetime import datetime
import inspect
#from matplotlib_venn import venn2
from utils import *
plt.rcParams['font.family'] = 'IPAexGothic'
@contextmanager
def save_fig(path_to_save=PATH_TO_GRAPH_DIR/f"tmp.png"):
plt.figure()
yield
plt.savefig(path_to_save)
def showCorr(df, str_value_name, show_percentage=0.6):
corrmat = df.corr()
num_of_col = len(corrmat.columns)
cols = corrmat.nlargest(num_of_col, str_value_name)[str_value_name]
tmp = cols[(cols >= show_percentage) | (cols <= -show_percentage)]
print("*****[ corr : " + str_value_name + " ]*****")
print(tmp)
print("*****[" + str_value_name + "]*****")
print("\n")
#print(tmp[0])
def showBoxPlot(df, str_val1, str_va2):
plt.figure(figsize=(15, 8))
plt.xticks(rotation=90, size='small')
#neigh_median = df.groupby([str_val1],as_index=False)[str_va2].median().sort_values(str_va2)
#print(neigh_median)
#col_order = neigh_median[str_val1].values
#sns.boxplot(x=df[str_val1], y =df[str_va2], order=col_order)
sns.boxplot(x=df[str_val1], y =df[str_va2])
plt.tight_layout()
path_to_save = os.path.join(str(PATH_TO_GRAPH_DIR), datetime.now().strftime("%Y%m%d%H%M%S") + "_box_plot_{}.png".format(str_val1))
#print("save: ", path_to_save)
plt.savefig(path_to_save)
plt.show(block=False)
plt.close()
def createVenn(train_set, test_set, title_str, path_to_save, train_label="train", test_label="test"):
plt.figure()
#venn2(subsets=[train_set,test_set],set_labels=(train_label,test_label))
plt.title(f'{title_str}',fontsize=20)
#path_to_save = os.path.join(str(PATH_TO_GRAPH_DIR), datetime.now().strftime("%Y%m%d%H%M%S") + "_venn_{}.png".format(title_str))
#print("save: ", path_to_save)
plt.savefig(path_to_save)
plt.show(block=False)
plt.close()
def showValueCount(df_train, df_test, str_value, str_target, debug=True, regression_flag=1, _fig_size=(20,10)):
if str_value == str_target:
df_test[str_value] = np.nan
df = pd.concat([df_train, df_test])
if not str_value in df.columns:
print(str_value, " is not inside columns")
return
se_all = df[str_value]
se_train = df_train[str_value]
se_test = df_test[str_value]
all_desc = se_all.describe()
train_desc = se_train.describe()
test_desc = se_test.describe()
df_concat_desc = pd.concat([train_desc, test_desc, all_desc], axis=1, keys=['train', 'test', "all"])
if debug:
print("***[" + str_value + "]***")
print("describe :")
print(df_concat_desc)
num_nan_all = se_all.isna().sum()
num_nan_train = se_train.isna().sum()
num_nan_test = se_test.isna().sum()
df_concat_num_nan = pd.DataFrame([num_nan_train, num_nan_test, num_nan_all], columns=["num_of_nan"], index=['train', 'test', "all"]).transpose()
if debug:
print("Num of Nan : ")
print(df_concat_num_nan)
df_value = se_all.value_counts(dropna=False)
df_value_percentage = (df_value / df_value.sum()) * 100
df_value_train = se_train.value_counts(dropna=False)
df_value_train_percentage = (df_value_train / df_value_train.sum()) * 100
df_value_test = se_test.value_counts(dropna=False)
df_value_test_percentage = (df_value_test / df_value_test.sum()) * 100
df_concat = pd.concat([df_value_train, df_value_train_percentage, df_value_test, df_value_test_percentage, df_value, df_value_percentage], axis=1, keys=['train', "train rate", 'test', "test rate", "all", "all rate"], sort=True)
train_values = set(se_train.unique())
test_values = set(se_test.unique())
xor_values = test_values - train_values
if xor_values:
#print(f'Replace {len(xor_values)} in {col} column')
print(f'{xor_values} is only found in test, not train!!!')
#full_data.loc[full_data[col].isin(xor_values), col] = 'xor'
xor_values_train = train_values - test_values
if xor_values_train:
#print(f'Replace {len(xor_values)} in {col} column')
print(f'{xor_values_train} is only found in train, not test!!!' )
#full_data.loc[full_data[col].isin(xor_values), col] = 'xor'
if debug:
# plt.figure()
# venn2(subsets=[train_values,test_values],set_labels=('train','test'))
# plt.title(f'{str_value}',fontsize=20)
# path_to_save = os.path.join(str(PATH_TO_GRAPH_DIR), datetime.now().strftime("%Y%m%d%H%M%S") + "_venn_{}.png".format(str_value))
# #print("save: ", path_to_save)
# plt.savefig(path_to_save)
# plt.show(block=False)
# plt.close()
path_to_save = os.path.join(str(PATH_TO_GRAPH_DIR), datetime.now().strftime("%Y%m%d%H%M%S") + "_venn_{}.png".format(str_value))
createVenn(train_set=train_values, test_set=test_values, title_str=str_value, path_to_save=path_to_save, train_label="train", test_label="test")
print("value_counts :")
print(df_concat)
plt.figure(figsize=_fig_size)
df_graph = df_concat[['train', 'test', "all"]].reset_index()
df_graph = pd.melt(df_graph, id_vars=["index"], value_vars=['train', 'test', "all"])
sns.barplot(x='index', y='value', hue='variable', data=df_graph)
#sns.despine(fig)
#df_concat[['train', 'test', "all"]].dropna().plot.bar(figsize=_fig_size)
plt.ylabel('Number of each element', fontsize=12)
plt.xlabel(str_value, fontsize=12)
plt.xticks(rotation=90, size='small')
plt.tight_layout()
path_to_save = os.path.join(str(PATH_TO_GRAPH_DIR), datetime.now().strftime("%Y%m%d%H%M%S") + "_num_each_elments_{}.png".format(str_value))
#print("save: ", path_to_save)
plt.savefig(path_to_save)
plt.show(block=False)
plt.close()
plt.figure(figsize=_fig_size)
df_graph = df_concat[['train rate', 'test rate', "all rate"]].reset_index()
df_graph = pd.melt(df_graph, id_vars=["index"], value_vars=['train rate', 'test rate', "all rate"])
sns.barplot(x='index', y='value', hue='variable', data=df_graph)
#df_concat[['train rate', 'test rate', "all rate"]].plot.bar(figsize=_fig_size)
plt.ylabel('rate of each element', fontsize=12)
plt.xlabel(str_value, fontsize=12)
plt.xticks(rotation=90, size='small')
plt.tight_layout()
path_to_save = os.path.join(str(PATH_TO_GRAPH_DIR), datetime.now().strftime("%Y%m%d%H%M%S") + "_rate_each_elments_{}.png".format(str_value))
#print("save: ", path_to_save)
plt.savefig(path_to_save)
plt.show(block=False)
plt.close()
if str_value != str_target and str_target in df.columns:
if regression_flag == 1:
if debug:
showBoxPlot(df_train, str_value, str_target)
else:
df_train_small = df.loc[df[str_target].isnull() == False, [str_value, str_target]]
df_stack = df_train_small.groupby(str_value)[str_target].value_counts().unstack()
if debug:
print("---")
col_list = []
df_list = []
if debug:
plt.figure(figsize=_fig_size)
g = sns.countplot(x=str_value, hue = str_target, data=df, order=df_stack.index)
plt.xticks(rotation=90, size='small')
ax1 = g.axes
ax2 = ax1.twinx()
for col in df_stack.columns:
col_list += [str(col), str(col)+"_percent"]
df_percent = (df_stack.loc[:, col] / df_stack.sum(axis=1))
df_list += [df_stack.loc[:, col], df_percent]
if debug:
#print(df_percent.index)
xn = range(len(df_percent.index))
sns.lineplot(x=xn, y=df_percent.values, ax=ax2)
#sns.lineplot(data=df_percent, ax=ax2)
#sns.lineplot(data=df_percent, y=(str(col)+"_percent"), x=df_percent.index)
df_conc = pd.concat(df_list, axis=1, keys=col_list)
if debug:
print(df_conc.T)
#print(df_stack.columns)
#print(df_stack.index)
#plt.tight_layout()
path_to_save = os.path.join(str(PATH_TO_GRAPH_DIR), datetime.now().strftime("%Y%m%d%H%M%S") + "_count_line_{}.png".format(str_value))
#print("save: ", path_to_save)
plt.savefig(path_to_save)
plt.show(block=False)
plt.close()
if debug:
print("******\n")
del df
gc.collect()
return df_concat
def showJointPlot(_df_train, _df_test, str_value, str_target, debug=True, regression_flag=1, corr_flag=False, empty_nums=[], log_flag=1, _fig_size=(20, 10)):
print("now in function : ", inspect.getframeinfo(inspect.currentframe())[2])
df_train = _df_train.copy()
df_test = _df_test.copy()
if str_value == str_target:
df_test[str_target] = np.nan
if len(empty_nums) >0:
for e in empty_nums:
df_train[str_value] = df_train[str_value].replace(e, np.nan)
df_test[str_value] = df_test[str_value].replace(e, np.nan)
if log_flag==1:
df_train[str_value] = np.log1p(df_train[str_value])
df_test[str_value] = np.log1p(df_test[str_value])
df = pd.concat([df_train, df_test])
if not str_value in df.columns:
print(str_value + " is not inside columns")
return
se_all = df[str_value]
se_train = df_train[str_value]
se_test = df_test[str_value]
all_desc = se_all.describe()
train_desc = se_train.describe()
test_desc = se_test.describe()
df_concat_desc = pd.concat([train_desc, test_desc, all_desc], axis=1, keys=['train', 'test', "all"])
print("***[" + str_value + "]***")
print("describe :")
print(df_concat_desc)
num_nan_all = se_all.isna().sum()
num_nan_train = se_train.isna().sum()
num_nan_test = se_test.isna().sum()
df_concat_num_nan = pd.DataFrame([num_nan_train, num_nan_test, num_nan_all], columns=["num_of_nan"], index=['train', 'test', "all"]).transpose()
print("Num of Nan : ")
print(df_concat_num_nan)
skew_all = se_all.skew()
skew_train = se_train.skew()
skew_test = se_test.skew()
df_concat_skew = pd.DataFrame([skew_train, skew_test, skew_all], columns=["skew"], index=['train', 'test', "all"]).transpose()
print("skew : ")
print(df_concat_skew)
if corr_flag==True:
showCorr(df, str_value)
#tmp_se = pd.Series( ["_"] * len(df_dist), columns=["dataset"] )
#print(tmp_se.values)
#df_dist.append(tmp_se)
#df_dist["dataset"].apply(lambda x: "train" if pd.isna(x[self.str_target_value_]) == False else "test")
#df_dist.plot(kind="kde", y=df_dist["dataset"])
plt.figure(figsize=_fig_size)
sns.distplot(df_train[str_value].dropna(),kde=True,label="train")
sns.distplot(df_test[str_value].dropna(),kde=True,label="test")
plt.title('distplot by {}'.format(str_value),size=20)
plt.xlabel(str_value)
plt.ylabel('prob')
plt.legend() #実行させないと凡例が出ない。
plt.tight_layout()
path_to_save = os.path.join(str(PATH_TO_GRAPH_DIR), datetime.now().strftime("%Y%m%d%H%M%S") + "_distplot_{}.png".format(str_value))
#print("save: ", path_to_save)
plt.savefig(path_to_save)
plt.show(block=False)
plt.close()
#sns.distplot(df_dist[str_value], hue=df_dist["dataset"])
#visualize_distribution(df[str_value].dropna())
#visualize_probplot(df[str_value].dropna())
# plt.figure(figsize=(10,5))
#
# sns.distplot()
# plt.show()
if (str_value != str_target) and (str_target in df.columns):
#plt.figure(figsize=(10,5))
if regression_flag == 1:
sns.jointplot(str_value, str_target, df_train)
path_to_save = os.path.join(str(PATH_TO_GRAPH_DIR), datetime.now().strftime("%Y%m%d%H%M%S") + "_jointplot_{}.png".format(str_value))
#print("save: ", path_to_save)
plt.savefig(path_to_save)
plt.show(block=False)
plt.close()
df_train.plot.hexbin(x=str_value, y=str_target, gridsize=15, sharex=False)
path_to_save = os.path.join(str(PATH_TO_GRAPH_DIR), datetime.now().strftime("%Y%m%d%H%M%S") + "_hexbin_{}.png".format(str_value))
#print("save: ", path_to_save)
plt.savefig(path_to_save)
plt.show(block=False)
plt.close()
#plt.show()
else:
df_small = df_train[[str_value, str_target]]
print(df_small.groupby(str_target)[str_value].describe().T)
type_val = df_small[str_target].unique()
#print(type_val)
plt.figure()
for i, val in enumerate(type_val):
sns.distplot(df_small.loc[df_small[str_target]==val, str_value].dropna(),kde=True,label=str(val)) #, color=mycols[i%len(mycols)])
plt.legend() #実行させないと凡例が出ない。
plt.tight_layout()
path_to_save = os.path.join(str(PATH_TO_GRAPH_DIR), datetime.now().strftime("%Y%m%d%H%M%S") + "_distplot_target_{}.png".format(str_value))
#print("save: ", path_to_save)
plt.savefig(path_to_save)
plt.show(block=False)
plt.close()
plt.figure(figsize=_fig_size)
plt.xlabel(str_value, fontsize=9)
for i, val in enumerate(type_val):
sns.kdeplot(df_small.loc[df_small[str_target] == val, str_value].dropna().values, bw=0.5,label='Target: {}'.format(val))
sns.kdeplot(df_test[str_value].dropna().values, bw=0.5,label='Test')
path_to_save = os.path.join(str(PATH_TO_GRAPH_DIR), datetime.now().strftime("%Y%m%d%H%M%S") + "_kde_target_{}.png".format(str_value))
print("save: ", path_to_save)
plt.savefig(path_to_save)
plt.show(block=False)
plt.close()
print("******\n")
del df, df_train, df_test
gc.collect()
return
def showDetails(_df_train, _df_test, new_cols, target_val, debug=True, regression_flag=1, corr_flag=True, obj_flag=False):
df_train = _df_train.copy()
if _df_test is None:
df_test = pd.DataFrame(index=[0], columns=new_cols)
else:
df_test = _df_test.copy()
for new_col in new_cols:
if obj_flag:
df_train[new_col] = df_train[new_col].astype("str")
df_test[new_col] = df_test[new_col].astype("str")
try:
if df_train[new_col].dtype == "object":
showValueCount(df_train, df_test, new_col, target_val, debug=debug, regression_flag=regression_flag)
else:
showJointPlot(df_train, df_test, new_col, target_val, debug=debug, regression_flag=regression_flag, corr_flag=corr_flag, empty_nums=[-999], log_flag=0)
except Exception as e:
print(e)
print("******[error col : {}]******".format(new_col))
def interEDA(df_train, df_test, inter_col, new_cols, target_val, _fig_size=(10, 5)):
df = pd.concat([df_train, df_test])
elements = df[inter_col].unique()
type_val = df[target_val].unique()
for col in new_cols:
plt.figure(figsize=_fig_size)
plt.title('interaction kde of {}'.format(inter_col),size=20)
plt.xlabel(col, fontsize=9)
for e in elements:
df_small = df_train.loc[df_train[inter_col] == e]
for i, val in enumerate(type_val):
sns.kdeplot(df_small.loc[df_small[target_val] == val, col].dropna().values, bw=0.5,label='Inter:{}, Target: {}'.format(e, val))
sns.kdeplot(df_test.loc[df_test[inter_col]==e, col].dropna().values, bw=0.5,label='Inter:{}, Test'.format(e))
path_to_save = os.path.join(str(PATH_TO_GRAPH_DIR), datetime.now().strftime("%Y%m%d%H%M%S") + "_inter_kde_{}_vs_{}.png".format(inter_col, col))
print("save: ", path_to_save)
plt.savefig(path_to_save)
plt.show(block=False)
plt.close()
def procEDA_(df_train, df_test):
#df_train = df_train[df_train["y"] < (90)]
new_col=["延床面積(㎡)"]#
#new_col=df_train.columns
showDetails(df_train, df_test, new_col, "y", corr_flag=False)
sys.exit()
#df["nom"]
#print(df_train.loc[df_train["auto__nom_8_count_agg_by_nom_7"] > 30000, "auto__nom_8_count_agg_by_nom_7"])
#showValueCount(df_train, df_test, "ord_5", "target", debug=True, regression_flag=0)
for i in range(6):
col_name = "ord_{}".format(i)
df_train[col_name] /= df_train[col_name].max() # for convergence
df_test[col_name] /= df_test[col_name].max()
new_name = "{}_sqr".format(col_name)
df_train[new_name] = 4*(df_train[col_name] - 0.5)**2
df_test[new_name] = 4*(df_test[col_name] - 0.5)**2
#
new_col=["ord_3", "ord_3_sqr"]#getColumnsFromParts(["ord_3"], df_train.columns)
#showDetails(df_train, df_test, new_col, "target", corr_flag=False)
for col in new_col:
showJointPlot(df_train, df_test, col, "target", debug=True, regression_flag=0, corr_flag=False, empty_nums=[-999], log_flag=1)
# new_cols=list(df_train.columns.values)
# new_cols.remove("bin_3")
# new_cols.remove("target")
# #new_cols=["null_all_count"]
# #new_cols = getColumnsFromParts(["bin_3"], df_train.columns)
# #showDetails(df_train, df_test, new_cols, "target")
# for col in new_cols:
# try:
# interEDA(df_train, df_test, col, ["bin_3"], "target")
# except Exception as e:
# print(e)
# print("******[inter error col : {}]******".format(col))
sys.exit(0)
# colums_parts=[]
# parts_cols = getColumnsFromParts(colums_parts, df_train.columns)
# new_cols = list(set(new_cols + parts_cols))
use_columns=list(df_test.columns)
bin_list = ["bin_{}".format(i) for i in range(5)]
ord_list = ["ord_0", "ord_1", "ord_2", "ord_3", "ord_4", "ord_5", "ord_5_1", "ord_5_2"]
nom_list = ["nom_{}".format(i) for i in range(10)] #getColumnsFromParts(["nom_"] , use_columns)
#oh_nom_list = getColumnsFromParts(["OH_nom"] , use_columns)
time_list = ["day", "month"]
nan_pos_list = getColumnsFromParts(["_nan_pos"] , use_columns)
count_list = getColumnsFromParts(["_count"], use_columns)
#inter_list = getColumnsFromParts(["inter_"], use_columns)
additional_list = ["day_cos", "day_sin", "month_cos", "month_sin"]
embedding_features_list=time_list + ord_list + nom_list + bin_list
continuous_features_list = additional_list+count_list +nan_pos_list
final_cols = embedding_features_list+continuous_features_list
#adversarialValidation(df_train[final_cols], df_test[final_cols], drop_cols=[])
adv2(df_train[final_cols], df_test[final_cols], drop_cols=[])
return
def procSave():
df_train, df_test = loadRaw()
print("df_train_interm:{}".format(df_train.shape))
print("df_test_interm:{}".format(df_test.shape))
df_train["part"] = "train"
df_test["part"] = "test"
df = pd.concat([df_train, df_test])
syurui_list = list(df["種類"].unique())
for w in syurui_list:
df_csv = df.loc[(df["種類"]==w)]
df_csv.to_csv(PROC_DIR /"syurui_{}.csv".format(w), encoding='utf_8_sig')
def edaSeasonMatch(df_train, df_test):
print(df_train.groupby("Season")["total_match_team1"].mean())
print(df_test.groupby("Season")["total_match_team1"].mean())
sys.exit()
def compare_sample(df_train, df_test):
df_train_sample = | pd.read_csv(PROC_DIR/"df_train_sample.csv", index_col=0) | pandas.read_csv |
# coding: utf-8
# In[1]:
import torch
import torch.nn as nn
import torch.nn.functional as F
import torch.optim as optim
from torchvision import datasets, transforms
import numpy as np
import matplotlib.pyplot as plt
# Add Bayesian-and-novelty directory to the PYTHONPATH
import sys
import os
sys.path.append(os.path.realpath('../../..'))
# Autoreload changes in utils, etc.
# %matplotlib inline
# %load_ext autoreload
# %autoreload 2
from novelty.utils.metrics import plot_roc, plot_prc
from novelty.utils.metrics import get_summary_statistics
from novelty.utils.metrics import html_summary_table
# In[2]:
# Training settings
BATCH_SIZE = 128
EPOCHS = 100
LR = 0.1
MOMENTUM = 0.9
NO_CUDA = False
SEED = 1
CLASSES = 80
MODEL_PATH_ROOT = './weights/wrn-28-10-cifar80'
MODEL_PATH = MODEL_PATH_ROOT + '.pth'
# MNIST mean and stdevs of training data by channel
CHANNEL_MEANS = (129.38732832670212/255, 124.35894414782524/255, 113.09937313199043/255)
CHANNEL_STDS = (67.87980079650879/255, 65.10988622903824/255, 70.04801765084267/255)
# Plot ROC and PR curves
PLOT_CHARTS = False
# ## Training and Testing functions
# In[3]:
from novelty.utils import Progbar
def train(model, device, train_loader, optimizer, epoch):
progbar = Progbar(target=len(train_loader.dataset))
model.train()
for batch_idx, (data, target) in enumerate(train_loader):
data, target = data.to(device), target.to(device)
optimizer.zero_grad()
output = F.log_softmax(model(data), dim=1)
loss = F.nll_loss(output, target)
loss.backward()
optimizer.step()
progbar.add(len(data), [("loss", loss.item())])
# In[4]:
def test(model, device, test_loader):
model.eval()
test_loss = 0
correct = 0
with torch.no_grad():
for data, target in test_loader:
data, target = data.to(device), target.to(device)
output = F.log_softmax(model(data), dim=1)
# sum up batch loss
test_loss += F.nll_loss(output, target, size_average=False).item()
# get the index of the max log-probability
pred = output.max(1, keepdim=True)[1]
correct += pred.eq(target.view_as(pred)).sum().item()
test_loss /= len(test_loader.dataset)
test_acc = 100. * correct / len(test_loader.dataset)
print('Test set: Average loss: {:.4f}, Accuracy: {}/{} ({:.0f}%)\n'.format(
test_loss, correct, len(test_loader.dataset), test_acc))
return test_loss, test_acc
# ## Load CIFAR80
# In[5]:
from novelty.utils import DATA_DIR
from src.wide_resnet import Wide_ResNet
torch.manual_seed(SEED)
use_cuda = not NO_CUDA and torch.cuda.is_available()
device = torch.device("cuda:0" if use_cuda else "cpu")
# Dataset transformation
transform = transforms.Compose([
transforms.ToTensor(),
transforms.Normalize(CHANNEL_MEANS, CHANNEL_STDS)
])
# Load training and test sets
kwargs = {'num_workers': 2, 'pin_memory': True} if use_cuda else {}
train_loader = torch.utils.data.DataLoader(
datasets.ImageFolder(os.path.join(DATA_DIR, 'cifar80/train'), transform=transform),
batch_size=BATCH_SIZE, shuffle=True, **kwargs)
test_loader = torch.utils.data.DataLoader(
datasets.ImageFolder(os.path.join(DATA_DIR, 'cifar80/test'), transform=transform),
batch_size=BATCH_SIZE, shuffle=True, **kwargs)
# ## Initialize model and optimizer
# In[6]:
# Create model instance
model = Wide_ResNet(28, 10, 0., CLASSES)
# Move model to available GPUs
if torch.cuda.device_count() > 1:
model = nn.DataParallel(model)
print("Using", torch.cuda.device_count(), "GPUs")
model = model.to(device)
# Initialize optimizer
optimizer = optim.SGD(model.parameters(), lr=LR, momentum=MOMENTUM)
# scheduler = optim.lr_scheduler.MultiStepLR(optimizer, milestones=[int(EPOCHS*0.5), int(EPOCHS*0.75)], gamma=0.1)
scheduler = optim.lr_scheduler.CosineAnnealingLR(optimizer, T_max=EPOCHS)
# ## Optimization loop
# In[7]:
if os.path.exists(MODEL_PATH):
# load previously trained model:
model.load_state_dict(torch.load(MODEL_PATH))
else:
best_loss = float("inf")
# Training loop
for epoch in range(EPOCHS):
print("Epoch:", epoch)
scheduler.step()
# Print the learning rate
for param_group in optimizer.param_groups:
print('Learning rate:', param_group['lr'])
train(model, device, train_loader, optimizer, epoch)
loss, acc = test(model, device, test_loader)
# Checkpoint the model parameters
if loss < best_loss:
torch.save(model.state_dict(), "{}_epoch{}.pth".format(MODEL_PATH_ROOT, epoch))
best_loss = loss
# save the model
torch.save(model.state_dict(), MODEL_PATH)
# ## ODIN prediction functions
# In[8]:
from torch.autograd import Variable
def predict(model, data, device):
model.eval()
data = data.to(device)
outputs = model(data)
outputs = outputs - outputs.max(1)[0].unsqueeze(1) # For stability
return F.softmax(outputs, dim=1)
def predict_temp(model, data, device, temp=1000.):
model.eval()
data = data.to(device)
outputs = model(data)
outputs /= temp
outputs = outputs - outputs.max(1)[0].unsqueeze(1) # For stability
return F.softmax(outputs, dim=1)
def predict_novelty(model, data, device, temp=1000., noiseMagnitude=0.0012):
model.eval()
# Create a variable so we can get the gradients on the input
inputs = Variable(data.to(device), requires_grad=True)
# Get the predicted labels
outputs = model(inputs)
outputs = outputs / temp
outputs = F.log_softmax(outputs, dim=1)
# Calculate the perturbation to add to the input
maxIndexTemp = torch.argmax(outputs, dim=1)
labels = Variable(maxIndexTemp).to(device)
loss = F.nll_loss(outputs, labels)
loss.backward()
# Normalizing the gradient to binary in {0, 1}
gradient = torch.ge(inputs.grad.data, 0)
gradient = (gradient.float() - 0.5) * 2
# Normalize the gradient to the same space of image
for channel, (mean, std) in enumerate(zip(CHANNEL_MEANS, CHANNEL_STDS)):
gradient[0][channel] = (gradient[0][channel] - mean) / std
# Add small perturbations to image
# TODO, this is from the released code, but disagrees with paper I think
tempInputs = torch.add(inputs.data, -noiseMagnitude, gradient)
# Get new outputs after perturbations
outputs = model(Variable(tempInputs))
outputs = outputs / temp
outputs = outputs - outputs.max(1)[0].unsqueeze(1) # For stability
outputs = F.softmax(outputs, dim=1)
return outputs
# ## Evaluate method on outlier datasets
# In[9]:
def get_max_model_outputs(data_loader, device):
"""Get the max softmax output from the model in a Python array.
data_loader: object
A pytorch dataloader with the data you want to calculate values for.
device: object
The CUDA device handle.
"""
result = []
for data, target in data_loader:
# Using regular model
p = predict(model, data, device)
max_val, label = torch.max(p, dim=1)
# Convert torch tensors to python list
max_val = list(max_val.cpu().detach().numpy())
result += max_val
return result
def get_max_odin_outputs(data_loader, device, temp=1000., noiseMagnitude=0.0012):
"""Convenience function to get the max softmax values from the ODIN model in a Python array.
data_loader: object
A pytorch dataloader with the data you want to calculate values for.
device: object
The CUDA device handle.
temp: float, optional (default=1000.)
The temp the model should use to do temperature scaling on the softmax outputs.
noiseMagnitude: float, optional (default=0.0012)
The epsilon value used to scale the input images according to the ODIN paper.
"""
result = []
for data, target in data_loader:
# Using ODIN model
p = predict_novelty(model, data, device, temp=temp, noiseMagnitude=noiseMagnitude)
max_val, label = torch.max(p, dim=1)
# Convert torch tensors to python list
max_val = list(max_val.cpu().detach().numpy())
result += max_val
return result
# In[10]:
import pandas as pd
df = pd.DataFrame(columns=['auroc', 'aupr_in', 'aupr_out', 'fpr_at_95_tpr', 'detection_error'],
index=['imagenet_crop', 'imagenet_resize', 'lsun_crop', 'lsun_resize',
'isun_resize', 'gaussian', 'uniform', 'cifar20'])
df_odin = pd.DataFrame(columns=['auroc', 'aupr_in', 'aupr_out', 'fpr_at_95_tpr', 'detection_error'],
index=['imagenet_crop', 'imagenet_resize', 'lsun_crop', 'lsun_resize',
'isun_resize', 'gaussian', 'uniform', 'cifar20'])
# ### Process Inliers
# In[11]:
num_inliers = len(test_loader.dataset)
# Get predictions on in-distribution images
cifar_model_maximums = get_max_model_outputs(test_loader, device)
# ### Tiny Imagenet (Crop)
# In[12]:
directory = os.path.join(DATA_DIR, 'tiny-imagenet-200/test')
# Dataset transformation
transform_crop = transforms.Compose([
transforms.RandomCrop([32, 32]),
transforms.ToTensor(),
transforms.Normalize(CHANNEL_MEANS, CHANNEL_STDS)
])
# Load the dataset
kwargs = {'num_workers': 2, 'pin_memory': True} if use_cuda else {}
imagenet_crop_loader = torch.utils.data.DataLoader(
datasets.ImageFolder(directory, transform=transform_crop),
batch_size=BATCH_SIZE, shuffle=True, **kwargs)
num_imagenet_crop = len(imagenet_crop_loader.dataset.imgs)
# Get predictions on in-distribution images
imagenet_crop_model_maximums = get_max_model_outputs(imagenet_crop_loader, device)
temp = 1000
eps = 0.0026
cifar_odin_maximums = get_max_odin_outputs(test_loader, device, temp=temp, noiseMagnitude=eps)
imagenet_crop_odin_maximums = get_max_odin_outputs(imagenet_crop_loader, device, temp=temp, noiseMagnitude=eps)
# In[13]:
labels = [1] * num_inliers + [0] * num_imagenet_crop
predictions = cifar_model_maximums + imagenet_crop_model_maximums
predictions_odin = cifar_odin_maximums + imagenet_crop_odin_maximums
stats = get_summary_statistics(predictions, labels)
df.loc['imagenet_crop'] = | pd.Series(stats) | pandas.Series |
## Top line code for our Oil Equity Model
## We are using 4 test firms (ExxonMobil, Diamondback Resources, Devon Energy and ConocoPhillps)
# packages needed
import statsmodels.api as sm
from statsmodels.stats.outliers_influence import variance_inflation_factor
import numpy as np
import pandas as pd
import os
import json
import requests
import matplotlib.pyplot as plt
import openpyxl
# Import Tickers
tickerList = | pd.read_csv(
r"C:\Users\MichaelTanner\Documents\code_doc\oilstock\tickersOilModel.csv"
) | pandas.read_csv |
import pandas as pd
import numpy as np
from catboost import CatBoostClassifier
# 读取数据
def get_processed_data():
dataset1 = | pd.read_csv('myspace/ProcessDataSet1.csv') | pandas.read_csv |
from typing import List
import warnings
import pandas as pd
from epippy.geographics import match_points_to_regions
from epippy.technologies import get_config_values
from epippy import data_path
def get_legacy_capacity_in_countries(tech: str, countries: List[str], raise_error: bool = True) -> pd.Series:
"""
Return the total existing capacity (in GW) for the given tech for a set of countries.
If there is not data for a certain country, returns a capacity of 0.
Parameters
----------
tech: str
Name of technology for which we want to retrieve legacy data.
countries: List[str]
List of ISO codes of countries
raise_error: bool (default: True)
Whether to raise an error if no legacy data is available for this technology.
Returns
-------
capacities: pd.Series
Legacy capacities (in GW) of technology 'tech' for each country.
"""
assert len(countries) != 0, "Error: List of countries is empty."
# Read per grid cell capacity file
legacy_dir = f"{data_path}/generation/vres/legacy/generated/"
capacities_df = pd.read_csv(f"{legacy_dir}aggregated_capacity.csv", index_col=[0, 1])
plant, plant_type = get_config_values(tech, ["plant", "type"])
available_plant_types = set(capacities_df.index)
if (plant, plant_type) not in available_plant_types:
if raise_error:
raise ValueError(f"Error: no legacy data exists for tech {tech} with plant {plant} and type {plant_type}.")
else:
warnings.warn(f"Warning: No legacy data exists for tech {tech}.")
return pd.Series(0., name="Legacy capacity (GW)", index=countries, dtype=float)
# Get only capacity for the desired technology and aggregated per country
capacities_df = capacities_df.loc[(plant, plant_type), ("ISO2", "Capacity (GW)")]
capacities_ds = capacities_df.groupby("ISO2").sum().squeeze()
capacities_ds = capacities_ds.reindex(countries).fillna(0.)
capacities_ds.name = "Legacy capacity (GW)"
return capacities_ds
def get_legacy_capacity_at_points(tech: str, points: List[tuple], raise_error: bool = True) -> pd.Series:
"""
Return the total existing capacity (in GW) for the given tech for a set of countries.
If there is not data for a certain country, returns a capacity of 0.
Parameters
----------
tech: str
Name of technology for which we want to retrieve legacy data.
points: List[tuple]
List of points at which legacy capacity is retrieved.
raise_error: bool (default: True)
Whether to raise an error if no legacy data is available for this technology.
Returns
-------
capacities: pd.Series
Legacy capacities (in GW) of technology 'tech' for each country.
"""
assert len(points) != 0, "Error: List of points is empty."
# Read per grid cell capacity file
legacy_dir = f"{data_path}/generation/vres/legacy/generated/"
capacities_df = pd.read_csv(f"{legacy_dir}aggregated_capacity.csv", index_col=[0, 1])
plant, plant_type = get_config_values(tech, ["plant", "type"])
available_plant_types = set(capacities_df.index)
if (plant, plant_type) not in available_plant_types:
if raise_error:
raise ValueError(f"Error: no legacy data exists for tech {tech} with plant {plant} and type {plant_type}.")
else:
warnings.warn(f"Warning: No legacy data exists for tech {tech}.")
return | pd.Series(0., index=points, dtype=float) | pandas.Series |
import argparse
import time
import selenium
import datetime
import os, json
import pandas as pd
import swa
import configuration
DEFAULT_CONFIGURATION_FILE = "swatcher.ini"
class State(object):
def __init__(self):
self.errorCount = 0
self.currentLowestFare = None
self.blockQuery = False
self.notificationHistory = ''
self.dailyAlertDate = datetime.datetime.now().date()
class swatcher(object):
def __init__(self):
self.states = []
self.config = None
def now(self):
return datetime.datetime.now().strftime('%Y-%m-%d %H:%M:%S')
def parseArguments(self):
parser = argparse.ArgumentParser(description = "swatcher.py: Utility to monitor SWA for fare price changes")
parser.add_argument('-f', '--file',
dest = 'configurationFile',
help = "Configuration file to use. If unspecified, will be '" + DEFAULT_CONFIGURATION_FILE + "'",
default = DEFAULT_CONFIGURATION_FILE)
args = parser.parse_args()
return args
def initializeLogs(self, index):
tripHistory = os.linesep + "Trip Details:"
ignoreKeys = ['index', 'description']
for key in self.config.trips[index].__dict__:
if any(x in key for x in ignoreKeys):
continue
tripHistory += os.linesep + " " + key + ": " + str(self.config.trips[index].__dict__[key])
if self.config.historyFileBase:
try:
historyFileName = self.config.historyFileBase + "-" + str(index) + ".history"
with open(historyFileName) as historyFile:
for line in historyFile:
tripHistory = line + tripHistory
except IOError as e:
pass
return tripHistory
def appendLogFile(self, index, message):
if self.config.historyFileBase:
try:
historyFileName = self.config.historyFileBase + "-" + str(index) + ".history"
with open(historyFileName, 'a') as historyFile:
historyFile.write(message + os.linesep)
except IOError as e:
pass
def initializeCsvHistory(self, trip):
trip_name = trip.description.split('/')[-1]
file_path = os.path.join(self.config.tripsDir, f"{trip_name}.csv")
if os.path.exists(file_path):
# File exists so it's already initialized
return
os.makedirs(self.config.tripsDir, exist_ok=True)
columns = ['query_datetime', 'returnOrDepart', 'flight', 'departTime', 'arriveTime', 'duration', 'stops', 'fare', 'fareAnytime', 'fareBusinessSelect']
| pd.DataFrame(columns=columns) | pandas.DataFrame |
import subprocess
from io import StringIO
import pandas as pd
import numpy as np
from scipy.stats import pearsonr
from shutil import which
import argparse
def bedtools_window(bed1, bed2, window, reverse=False):
"""
Python wrapper for bedtools window.
reverse: return only sites that have no match in the ground truth.
"""
# make sure bedtools can be called in current env
assert which('bedtools') is not None, "bedtools not installed or not in PATH"
# run bedtools window, capture output
if not reverse:
out = subprocess.run(['bedtools', 'window', '-sm',
'-w', str(window),
'-a', bed1,
'-b', bed2],
capture_output=True, shell=False)
else:
out = subprocess.run(['bedtools', 'window', '-sm', '-v',
'-w', str(window),
'-a', bed1,
'-b', bed2],
capture_output=True, shell=False)
assert out.returncode==0, "bedtools window run failed, check input files"
# memory file-handle to pass output to pandas without writing to disk
out_handle = StringIO(out.stdout.decode())
# incase there were no sites returned (no overlap / all overlap in case of reverse=True)
if not out.stdout.decode():
out = pd.DataFrame()
else:
out = | pd.read_csv(out_handle, delimiter='\t', header=None, dtype={0: str}) | pandas.read_csv |
from __future__ import print_function, division
import math
import numpy as np
import torch
import pandas as pd
from torch.utils.data import Dataset
import matplotlib.pyplot as plt
from typing import Optional, Dict
from bindsnet.encoding import Encoder, NullEncoder
from sklearn.preprocessing import MinMaxScaler
# Ignore warnings
import warnings
warnings.filterwarnings("ignore")
class StockDatasetBindsnet(Dataset):
__doc__ = (
"""BindsNET stock price dataset wrapper:
The stock of __getitem__ is a dictionary containing the price,
label (increasing, decreasing, not changing),
and their encoded versions if encoders were provided.
\n\n"""
)
def __init__(self,
csv_file,
price_encoder: Optional[Encoder] = None,
label_encoder: Optional[Encoder] = None,
train=False):
"""
Args:
csv_file (string): Path to the csv file with annotations.
"""
# language=rst
"""
Constructor for the BindsNET dataset wrapper.
For details on the dataset you're interested in visit
:param csv_file (string): Path to the csv file with annotations.
:param price_encoder: Spike encoder for use on the price
:param label_encoder: Spike encoder for use on the label
:param train: train
"""
self.df = pd.read_csv(csv_file)
self.train = train
self.get_technical_indicators()
# Creating a new dataframe with only the 'Close' column
data = self.df.filter(['close'])
# Converting the dataframe to a numpy array
dataset = data.values
# Get /Compute the number of rows to train the model on
training_data_len = math.ceil(len(dataset) * .8)
# here we are Scaling the all of the data to be values between 0 and 1
scaler = MinMaxScaler(feature_range=(0, 1))
scaled_data = scaler.fit_transform(dataset)
# Creating the scaled training data set
train_data = scaled_data[0:training_data_len, :]
# Spliting the data into x_train and y_train data sets
x_train = []
y_train = []
dim = 32
self.window_size = dim * dim
for i in range(self.window_size, len(train_data)):
x_train.append(train_data[i - self.window_size:i, 0])
y_train.append(train_data[i, 0])
# Here we are Converting x_train and y_train to numpy arrays
self.x_train, self.y_train = np.array(x_train), np.array(y_train)
# Creating the scaled training data set
train_data = scaled_data[0:training_data_len, :]
# Spliting the data into x_train and y_train data sets
x_train = []
y_train = []
for i in range(self.window_size, len(train_data)):
x_train.append(train_data[i - self.window_size:i, 0])
y_train.append(train_data[i, 0])
# Here we are Converting x_train and y_train to numpy arrays
self.x_train, self.y_train = np.array(x_train), np.array(y_train)
# here we are testing data set
test_data = scaled_data[training_data_len - self.window_size:, :]
# Creating the x_test and y_test data sets
x_test = []
y_test = dataset[training_data_len:, :]
# Get all of the rows from index 1603 to the rest and all of the columns (in this case it's only column 'Close'), so 2003 - 1603 = 400 rows of data
for i in range(dim*dim, len(test_data)):
x_test.append(test_data[i - self.window_size:i, 0])
# here we are converting x_test to a numpy array
self.x_test = np.array(x_test)
self.y_test = np.array(y_test)
# Allow the passthrough of None, but change to NullEncoder
if price_encoder is None:
price_encoder = NullEncoder()
if label_encoder is None:
label_encoder = NullEncoder()
self.price_encoder = price_encoder
self.label_encoder = label_encoder
def __len__(self):
# return len(self.df) - self.window_size
if self.train:
return len(self.x_train) - self.window_size
return len(self.x_test) - 1
def __getitem__(self, idx) -> Dict[str, torch.Tensor]:
if torch.is_tensor(idx):
idx = idx.tolist()
item = self.df.iloc[idx]
close = item['close']
ma7 = item['ma7']
ma21 = item['ma21']
# MACD = item['MACD']
upper_band = item['upper_band']
lower_band = item['lower_band']
ema = item['ema']
# price = {'close': close,
# 'ma7': ma7,
# 'ma21': ma21,
# 'MACD': MACD,
# '20sd': m20sd,
# 'upper_band': upper_band,
# 'lower_band': lower_band,
# 'ema': ema,
# }
# price = torch.FloatTensor([close, ma7, ma21, MACD, m20sd, upper_band, lower_band, ema])
# price = torch.FloatTensor([close, ma7, ma21, ema])
# price = torch.FloatTensor([close])
# label = torch.FloatTensor([0])
# if idx > 0: # Not changing
# if close > self.df.iloc[idx - 1]['close']:
# label = torch.FloatTensor([1]) # Increasing
# elif close < self.df.iloc[idx - 1]['close']:
# label = torch.FloatTensor([2]) # Decreasing
# output = {
# "price": price,
# "label": label,
# "encoded_price": self.price_encoder(price),
# "encoded_label": self.label_encoder(label),
# }
if self.train:
x = torch.FloatTensor(self.x_train[idx])
y = torch.as_tensor(self.y_train[idx], dtype=torch.float)
x_encoded = self.price_encoder(x)
y_encoded = self.label_encoder(y)
output = {
"price": x,
"label": y,
"encoded_price": x_encoded,
"encoded_label": y_encoded,
}
else:
x = torch.FloatTensor(self.x_test[idx])
y = torch.as_tensor(self.y_test[idx], dtype=torch.float)
x_encoded = self.price_encoder(x)
y_encoded = self.label_encoder(y)
output = {
"price": x,
"label": y,
"encoded_price": x_encoded,
"encoded_label": y_encoded,
}
return output
def get_technical_indicators(self):
# Create 7 and 21 days Moving Average
self.df['ma7'] = self.df['close'].rolling(window=7).mean().fillna(0)
self.df['ma21'] = self.df['close'].rolling(window=21).mean().fillna(0)
self.df['ma50'] = self.df['close'].rolling(window=50).mean().fillna(0)
self.df['Daily Return'] = self.df['close'].pct_change()
s = | pd.Series(self.df['close']) | pandas.Series |
import matplotlib.pyplot as plt
from mpl_toolkits.mplot3d import Axes3D
import numpy as np
import pandas as pd
from scipy import stats
from sklearn.decomposition import PCA
from sklearn.preprocessing import normalize
from sklearn.cluster import KMeans, DBSCAN
from pypospack.pyposmat.data import datafile
def make_clusters(pca_df, cluster_by):
if cluster_by == 'kmeans':
obj_kmeans = KMeans(n_clusters=10)
obj_kmeans = obj_kmeans.fit(pca_df)
labels = obj_kmeans.labels_
elif cluster_by == 'dbscan':
obj_dbscan = DBSCAN(eps=0.75, min_samples=10).fit(pca_df)
labels = obj_dbscan.labels_
else:
labels = None
print("unsupported clustering method")
exit()
return labels
def plot_3d(df):
_clusterids = set(df['cluster_id'])
fig = plt.figure()
ax = fig.add_subplot(111, projection='3d')
for clusterid in _clusterids:
x = df.loc[df['cluster_id'] == clusterid]['inv_0']
y = df.loc[df['cluster_id'] == clusterid]['inv_1']
z = df.loc[df['cluster_id'] == clusterid]['inv_2']
print(x.shape, y.shape, z.shape)
ax.scatter(x, y, z, s=1)
plt.title(s='Inverted Space Cluster Projection')
plt.show()
def compare_inversion(df):
# start with raw data df and do transform and inversion in here
# write raw df to csv
df.to_csv(path_or_buf='raw_data.csv', sep=',')
orig_np, orig_norms = normalize(df, return_norm=True)
nrows, ncols = orig_np.shape
orig_names = ['orig_{}'.format(i) for i in range(ncols)]
orig_df = pd.DataFrame(data=orig_np, columns=orig_names)
obj_pca = PCA()
pca_np = obj_pca.fit_transform(orig_df)
pca_names = ['pca_{}'.format(i) for i in range(ncols)]
pca_df = | pd.DataFrame(data=pca_np, columns=pca_names) | pandas.DataFrame |
################################################################################
# Logging logic, must come first
from tools.logger import configure_logging
import logging
configure_logging(
screen=False, file=True, screen_level=logging.DEBUG, file_level=logging.WARNING
)
################################################################################
from typing import Any, Dict
import pandas as pd
from models import bradley_terry, page_rank, seed
from pull_scripts import pull_round_1
from shared_types import PlayoffGame
from tools import file_lib, game_lib
year = 2021
data = list()
for game in pull_round_1.read_playoffs(year):
data.append(game_lib.row_from_game(game))
data.append(game_lib.row_from_game(game.flip()))
df = | pd.DataFrame(data) | pandas.DataFrame |
"""
Common NLP tasks such as named_entities, noun_chunks, etc.
"""
import spacy
import pandas as pd
def named_entities(s, package="spacy"):
"""
Return named-entities.
Return a Pandas Series where each rows contains a list of tuples containing information regarding the given named entities.
Tuple: (`entity'name`, `entity'label`, `starting character`, `ending character`)
Under the hood, `named_entities` make use of Spacy name entity recognition.
List of labels:
- `PERSON`: People, including fictional.
- `NORP`: Nationalities or religious or political groups.
- `FAC`: Buildings, airports, highways, bridges, etc.
- `ORG` : Companies, agencies, institutions, etc.
- `GPE`: Countries, cities, states.
- `LOC`: Non-GPE locations, mountain ranges, bodies of water.
- `PRODUCT`: Objects, vehicles, foods, etc. (Not services.)
- `EVENT`: Named hurricanes, battles, wars, sports events, etc.
- `WORK_OF_ART`: Titles of books, songs, etc.
- `LAW`: Named documents made into laws.
- `LANGUAGE`: Any named language.
- `DATE`: Absolute or relative dates or periods.
- `TIME`: Times smaller than a day.
- `PERCENT`: Percentage, including ”%“.
- `MONEY`: Monetary values, including unit.
- `QUANTITY`: Measurements, as of weight or distance.
- `ORDINAL`: “first”, “second”, etc.
- `CARDINAL`: Numerals that do not fall under another type.
Examples
--------
>>> import texthero as hero
>>> import pandas as pd
>>> s = pd.Series("Yesterday I was in NY with <NAME>")
>>> hero.named_entities(s)[0]
[('Yesterday', 'DATE', 0, 9), ('NY', 'GPE', 19, 21), ('<NAME>', 'PERSON', 27, 41)]
"""
entities = []
nlp = spacy.load("en_core_web_sm", disable=["tagger", "parser"])
# nlp.pipe is now 'ner'
for doc in nlp.pipe(s.astype("unicode").values, batch_size=32):
entities.append(
[(ent.text, ent.label_, ent.start_char, ent.end_char) for ent in doc.ents]
)
return pd.Series(entities, index=s.index)
def noun_chunks(s):
"""
Return noun chunks (noun phrases).
Return a Pandas Series where each row contains a tuple that has information regarding the noun chunk.
Tuple: (`chunk'text`, `chunk'label`, `starting index`, `ending index`)
Noun chunks or noun phrases are phrases that have noun at their head or nucleus
i.e., they ontain the noun and other words that describe that noun.
A detailed explanation on noun chunks: https://en.wikipedia.org/wiki/Noun_phrase
Internally `noun_chunks` makes use of Spacy's dependency parsing:
https://spacy.io/usage/linguistic-features#dependency-parse
Parameters
----------
input : Pandas Series
Examples
--------
>>> import texthero as hero
>>> import pandas as pd
>>> s = pd.Series("The spotted puppy is sleeping.")
>>> hero.noun_chunks(s)
0 [(The spotted puppy, NP, 0, 17)]
dtype: object
"""
noun_chunks = []
nlp = spacy.load("en_core_web_sm", disable=["ner"])
# nlp.pipe is now "tagger", "parser"
for doc in nlp.pipe(s.astype("unicode").values, batch_size=32):
noun_chunks.append(
[
(chunk.text, chunk.label_, chunk.start_char, chunk.end_char)
for chunk in doc.noun_chunks
]
)
return pd.Series(noun_chunks, index=s.index)
def count_sentences(s: pd.Series) -> pd.Series:
"""
Count the number of sentences per cell in a Pandas Series.
Return a new Pandas Series with the number of sentences per cell.
This makes use of the SpaCy `sentencizer <https://spacy.io/api/sentencizer>`.
Examples
--------
>>> import texthero as hero
>>> import pandas as pd
>>> s = pd.Series(["Yesterday I was in NY with <NAME>. Great story...", "This is the F.B.I.! What? Open up!"])
>>> hero.count_sentences(s)
0 2
1 3
dtype: int64
"""
number_of_sentences = []
nlp = spacy.load("en_core_web_sm", disable=["tagger", "parser", "ner"])
nlp.add_pipe(nlp.create_pipe("sentencizer")) # Pipe is only "sentencizer"
for doc in nlp.pipe(s.values, batch_size=32):
sentences = len(list(doc.sents))
number_of_sentences.append(sentences)
return | pd.Series(number_of_sentences, index=s.index) | pandas.Series |
"""Utilities for training and evaluating RL models on OpenAI gym environments"""
import warnings
from itertools import product
from collections import defaultdict
import numpy as np
from numpy_ml.utils.testing import DependencyWarning
from numpy_ml.rl_models.tiles.tiles3 import tiles, IHT
NO_PD = False
try:
import pandas as pd
except ModuleNotFoundError:
NO_PD = True
try:
import gym
except ModuleNotFoundError:
fstr = (
"Agents in `numpy_ml.rl_models` use the OpenAI gym for training. "
"To install the gym environments, run `pip install gym`. For more"
" information, see https://github.com/openai/gym."
)
warnings.warn(fstr, DependencyWarning)
class EnvModel(object):
"""
A simple tabular environment model that maintains the counts of each
reward-outcome pair given the state and action that preceded them. The
model can be queried with
>>> M = EnvModel()
>>> M[(state, action, reward, next_state)] += 1
>>> M[(state, action, reward, next_state)]
1
>>> M.state_action_pairs()
[(state, action)]
>>> M.outcome_probs(state, action)
[(next_state, 1)]
"""
def __init__(self):
super(EnvModel, self).__init__()
self._model = defaultdict(lambda: defaultdict(lambda: 0))
def __setitem__(self, key, value):
"""Set self[key] to value"""
s, a, r, s_ = key
self._model[(s, a)][(r, s_)] = value
def __getitem__(self, key):
"""Return the value associated with key"""
s, a, r, s_ = key
return self._model[(s, a)][(r, s_)]
def __contains__(self, key):
"""True if EnvModel contains `key`, else False"""
s, a, r, s_ = key
p1 = (s, a) in self.state_action_pairs()
p2 = (r, s_) in self.reward_outcome_pairs()
return p1 and p2
def state_action_pairs(self):
"""Return all (state, action) pairs in the environment model"""
return list(self._model.keys())
def reward_outcome_pairs(self, s, a):
"""
Return all (reward, next_state) pairs associated with taking action `a`
in state `s`.
"""
return list(self._model[(s, a)].keys())
def outcome_probs(self, s, a):
"""
Return the probability under the environment model of each outcome
state after taking action `a` in state `s`.
Parameters
----------
s : int as returned by ``self._obs2num``
The id for the state/observation.
a : int as returned by ``self._action2num``
The id for the action taken from state `s`.
Returns
-------
outcome_probs : list of (state, prob) tuples
A list of each possible outcome and its associated probability
under the model.
"""
items = list(self._model[(s, a)].items())
total_count = np.sum([c for (_, c) in items])
outcome_probs = [c / total_count for (_, c) in items]
outcomes = [p for (p, _) in items]
return list(zip(outcomes, outcome_probs))
def state_action_pairs_leading_to_outcome(self, outcome):
"""
Return all (state, action) pairs that have a nonzero probability of
producing `outcome` under the current model.
Parameters
----------
outcome : int
The outcome state.
Returns
-------
pairs : list of (state, action) tuples
A list of all (state, action) pairs with a nonzero probability of
producing `outcome` under the model.
"""
pairs = []
for sa in self.state_action_pairs():
outcomes = [o for (r, o) in self.reward_outcome_pairs(*sa)]
if outcome in outcomes:
pairs.append(sa)
return pairs
def tile_state_space(
env,
env_stats,
n_tilings,
obs_max=None,
obs_min=None,
state_action=False,
grid_size=(4, 4),
):
"""
Return a function to encode the continous observations generated by `env`
in terms of a collection of `n_tilings` overlapping tilings (each with
dimension `grid_size`) of the state space.
Arguments
---------
env : ``gym.wrappers.time_limit.TimeLimit`` instance
An openAI environment.
n_tilings : int
The number of overlapping tilings to use. Should be a power of 2. This
determines the dimension of the discretized tile-encoded state vector.
obs_max : float or np.ndarray
The value to treat as the max value of the observation space when
calculating the grid widths. If None, use
``env.observation_space.high``. Default is None.
obs_min : float or np.ndarray
The value to treat as the min value of the observation space when
calculating the grid widths. If None, use
``env.observation_space.low``. Default is None.
state_action : bool
Whether to use tile coding to encode state-action values (True) or just
state values (False). Default is False.
grid_size : list of length 2
A list of ints representing the coarseness of the tilings. E.g., a
`grid_size` of [4, 4] would mean each tiling consisted of a 4x4 tile
grid. Default is [4, 4].
Returns
-------
encode_obs_as_tile : function
A function which takes as input continous observation vector and
returns a set of the indices of the active tiles in the tile coded
observation space.
n_states : int
An integer reflecting the total number of unique states possible under
this tile coding regimen.
"""
obs_max = np.nan_to_num(env.observation_space.high) if obs_max is None else obs_max
obs_min = np.nan_to_num(env.observation_space.low) if obs_min is None else obs_min
if state_action:
if env_stats["tuple_action"]:
n = [space.n - 1.0 for space in env.action_spaces.spaces]
else:
n = [env.action_space.n]
obs_max = np.concatenate([obs_max, n])
obs_min = np.concatenate([obs_min, np.zeros_like(n)])
obs_range = obs_max - obs_min
scale = 1.0 / obs_range
# scale (state-)observation vector
scale_obs = lambda obs: obs * scale # noqa: E731
n_tiles = np.prod(grid_size) * n_tilings
n_states = np.prod([n_tiles - i for i in range(n_tilings)])
iht = IHT(16384)
def encode_obs_as_tile(obs):
obs = scale_obs(obs)
return tuple(tiles(iht, n_tilings, obs))
return encode_obs_as_tile, n_states
def get_gym_environs():
"""List all valid OpenAI ``gym`` environment ids"""
return [e.id for e in gym.envs.registry.all()]
def get_gym_stats():
"""Return a pandas DataFrame of the environment IDs."""
df = []
for e in gym.envs.registry.all():
print(e.id)
df.append(env_stats(gym.make(e.id)))
cols = [
"id",
"continuous_actions",
"continuous_observations",
"action_dim",
# "action_ids",
"deterministic",
"multidim_actions",
"multidim_observations",
"n_actions_per_dim",
"n_obs_per_dim",
"obs_dim",
# "obs_ids",
"seed",
"tuple_actions",
"tuple_observations",
]
return df if NO_PD else | pd.DataFrame(df) | pandas.DataFrame |
import datetime
import inspect
import numpy.testing as npt
import os.path
import pandas as pd
import pandas.util.testing as pdt
import sys
from tabulate import tabulate
import unittest
# #find parent directory and import model
# parentddir = os.path.abspath(os.path.join(os.path.dirname(__file__), os.path.pardir))
# sys.path.append(parentddir)
from ..screenip_exe import Screenip
test = {}
class TestScreenip(unittest.TestCase):
"""
Unit tests for screenip.
"""
print("screenip unittests conducted at " + str(datetime.datetime.today()))
def setUp(self):
"""
Setup routine for screenip unittest.
:return:
"""
pass
# screenip2 = screenip_model.screenip(0, pd_obj_inputs, pd_obj_exp_out)
# setup the test as needed
# e.g. pandas to open screenip qaqc csv
# Read qaqc csv and create pandas DataFrames for inputs and expected outputs
def tearDown(self):
"""
Teardown routine for screenip unittest.
:return:
"""
pass
# teardown called after each test
# e.g. maybe write test results to some text file
def create_screenip_object(self):
# create empty pandas dataframes to create empty object for testing
df_empty = pd.DataFrame()
# create an empty screenip object
screenip_empty = Screenip(df_empty, df_empty)
return screenip_empty
def test_screenip_unit_fw_bird(self):
"""
unittest for function screenip.fw_bird:
:return:
"""
expected_results = pd.Series([0.0162, 0.0162, 0.0162], dtype='float')
result = pd.Series([], dtype='float')
# create empty pandas dataframes to create empty object for this unittest
screenip_empty = self.create_screenip_object()
try:
# for i in range(0,3):
# result[i] = screenip_empty.fw_bird()
screenip_empty.no_of_runs = len(expected_results)
screenip_empty.fw_bird()
result = screenip_empty.out_fw_bird
npt.assert_allclose(result, expected_results, rtol=1e-4, atol=0, err_msg='', verbose=True )
finally:
tab = [result, expected_results]
print("\n")
print(inspect.currentframe().f_code.co_name)
print(tabulate(tab, headers='keys', tablefmt='rst'))
return
def test_screenip_unit_fw_mamm(self):
"""
unittest for function screenip.fw_mamm:
:return:
"""
# create empty pandas dataframes to create empty object for this unittest
screenip_empty = self.create_screenip_object()
expected_results = pd.Series([0.172, 0.172, 0.172], dtype='float')
result = pd.Series([], dtype='float')
try:
screenip_empty.no_of_runs = len(expected_results)
result = screenip_empty.fw_mamm()
npt.assert_allclose(result, expected_results, rtol=1e-4, atol=0, err_msg='', verbose=True )
finally:
tab = [result, expected_results]
print("\n")
print(inspect.currentframe().f_code.co_name)
print(tabulate(tab, headers='keys', tablefmt='rst'))
return
def test_screenip_unit_dose_bird(self):
"""
unittest for function screenip.dose_bird:
:return:
"""
# create empty pandas dataframes to create empty object for this unittest
screenip_empty = self.create_screenip_object()
expected_results = pd.Series([1000000., 4805.50175, 849727.21122], dtype='float')
result = pd.Series([], dtype='float')
try:
#(self.out_fw_bird * self.solubility)/(self.bodyweight_assessed_bird / 1000.)
screenip_empty.out_fw_bird = pd.Series([10., 0.329, 1.8349], dtype='float')
screenip_empty.solubility = pd.Series([100., 34.9823, 453.83], dtype='float')
screenip_empty.bodyweight_assessed_bird = pd.Series([1.0, 2.395, 0.98], dtype='float')
result = screenip_empty.dose_bird()
npt.assert_allclose(result, expected_results, rtol=1e-4, atol=0, err_msg='', verbose=True )
finally:
tab = [result, expected_results]
print("\n")
print(inspect.currentframe().f_code.co_name)
print(tabulate(tab, headers='keys', tablefmt='rst'))
return
def test_screenip_unit_dose_mamm(self):
"""
unittest for function screenip.dose_mamm:
:return:
"""
# create empty pandas dataframes to create empty object for this unittest
screenip_empty = self.create_screenip_object()
expected_results = pd.Series([8000000., 48205.7595, 3808036.37889], dtype='float')
result = pd.Series([], dtype='float')
try:
#(self.out_fw_mamm * self.solubility)/(self.bodyweight_assessed_mammal / 1000)
screenip_empty.out_fw_mamm = pd.Series([20., 12.843, 6.998], dtype='float')
screenip_empty.solubility = pd.Series([400., 34.9823, 453.83], dtype='float')
screenip_empty.bodyweight_assessed_mammal = pd.Series([1., 9.32, 0.834], dtype='float')
result = screenip_empty.dose_mamm()
npt.assert_allclose(result, expected_results, rtol=1e-4, atol=0, err_msg='', verbose=True )
finally:
tab = [result, expected_results]
print("\n")
print(inspect.currentframe().f_code.co_name)
print(tabulate(tab, headers='keys', tablefmt='rst'))
return
def test_screenip_unit_at_bird(self):
"""
unittest for function screenip.at_bird:
:return:
"""
# create empty pandas dataframes to create empty object for this unittest
screenip_empty = self.create_screenip_object()
expected_results = pd.Series([1000., 687.9231, 109.3361], dtype='float')
result = pd.Series([], dtype='float')
try:
#(self.ld50_avian_water) * ((self.bodyweight_assessed_bird / self.bodyweight_tested_bird)**(self.mineau_scaling_factor - 1.))
screenip_empty.ld50_avian_water = pd.Series([2000., 938.34, 345.83], dtype='float')
screenip_empty.bodyweight_assessed_bird = pd.Series([100., 39.49, 183.54], dtype='float')
screenip_empty.ld50_bodyweight_tested_bird = pd.Series([200., 73.473, 395.485], dtype='float')
screenip_empty.mineau_scaling_factor = pd.Series([2., 1.5, 2.5], dtype='float')
result = screenip_empty.at_bird()
npt.assert_allclose(result, expected_results, rtol=1e-4, atol=0, err_msg='', verbose=True )
finally:
tab = [result, expected_results]
print("\n")
print(inspect.currentframe().f_code.co_name)
print(tabulate(tab, headers='keys', tablefmt='rst'))
return
def test_screenip_unit_at_mamm(self):
"""
unittest for function screenip.at_mamm:
:return:
"""
# create empty pandas dataframes to create empty object for this unittest
screenip_empty = self.create_screenip_object()
expected_results = pd.Series([11.89207, 214.0572, 412.6864], dtype='float')
result = pd.Series([], dtype='float')
try:
#(self.ld50_mammal_water) * ((self.bodyweight_tested_mammal / self.bodyweight_assessed_mammal)**0.25)
screenip_empty.ld50_mammal_water = pd.Series([10., 250., 500.], dtype='float')
screenip_empty.ld50_bodyweight_tested_mammal = pd.Series([200., 39.49, 183.54], dtype='float')
screenip_empty.bodyweight_assessed_mammal = pd.Series([100., 73.473, 395.485], dtype='float')
result = screenip_empty.at_mamm()
npt.assert_allclose(result, expected_results, rtol=1e-4, atol=0, err_msg='', verbose=True )
finally:
tab = [result, expected_results]
print("\n")
print(inspect.currentframe().f_code.co_name)
print(tabulate(tab, headers='keys', tablefmt='rst'))
return
def test_screenip_unit_fi_bird(self):
"""
unittest for function screenip.fi_bird:
:return:
"""
# create empty pandas dataframes to create empty object for this unittest
screenip_empty = self.create_screenip_object()
expected_results = pd.Series([0.012999, 0.026578, 0.020412], dtype='float')
result = pd.Series([], dtype='float')
try:
#0.0582 * ((bw_grams / 1000.)**0.651)
bw_grams = pd.Series([100., 300., 200.], dtype='float')
result = screenip_empty.fi_bird(bw_grams)
npt.assert_allclose(result, expected_results, rtol=1e-4, atol=0, err_msg='', verbose=True )
finally:
tab = [result, expected_results]
print("\n")
print(inspect.currentframe().f_code.co_name)
print(tabulate(tab, headers='keys', tablefmt='rst'))
return
def test_screenip_unit_act(self):
"""
unittest for function screenip.test_act:
:return:
"""
# create empty pandas dataframes to create empty object for this unittest
screenip_empty = self.create_screenip_object()
expected_results = pd.Series([10.5737, 124.8032, 416.4873], dtype='float')
result = pd.Series([], dtype='float')
try:
#(self.noael_mammal_water) * ((self.bodyweight_tested_mammal / self.bodyweight_assessed_mammal)**0.25)
screenip_empty.noael_mammal_water = pd.Series([10., 120., 400.], dtype='float')
screenip_empty.noael_bodyweight_tested_mammal = pd.Series([500., 385.45, 673.854], dtype='float')
screenip_empty.bodyweight_assessed_mammal = pd.Series([400., 329.45, 573.322], dtype='float')
result = screenip_empty.act()
npt.assert_allclose(result, expected_results, rtol=1e-4, atol=0, err_msg='', verbose=True )
finally:
tab = [result, expected_results]
print("\n")
print(inspect.currentframe().f_code.co_name)
print(tabulate(tab, headers='keys', tablefmt='rst'))
return
def test_screenip_unit_det(self):
"""
unittest for function screenip.det
return:
"""
#
# '''
# Dose Equiv. Toxicity:
#
# The FI value (kg-diet) is multiplied by the reported NOAEC (mg/kg-diet) and then divided by
# the test animal's body weight to derive the dose-equivalent chronic toxicity value (mg/kg-bw):
#
# Dose Equiv. Toxicity = (NOAEC * FI) / BW
#
# NOTE: The user enters the lowest available NOAEC for the mallard duck, for the bobwhite quail,
# and for any other test species. The model calculates the dose equivalent toxicity values for
# all of the modeled values (Cells F20-24 and results worksheet) and then selects the lowest dose
# equivalent toxicity value to represent the chronic toxicity of the chemical to birds.
# '''
# try:
# # result =
# # self.assertEquals(result, )
# pass
# finally:
# pass
# return
#
#
# def test_det_duck(self):
# """
# unittest for function screenip.det_duck:
# :return:
# """
# try:
# # det_duck = (self.noaec_duck * self.fi_bird(1580.)) / (1580. / 1000.)
# screenip_empty.noaec_duck = pd.Series([1.], dtype='int')
# screenip_empty.fi_bird = pd.Series([1.], dtype='int')
# result = screenip_empty.det_duck()
# npt.assert_array_almost_equal(result, 1000., 4, '', True)
# finally:
# pass
# return
#
# def test_det_quail(self):
# """
# unittest for function screenip.det_quail:
# :return:
# """
# try:
# # det_quail = (self.noaec_quail * self.fi_bird(178.)) / (178. / 1000.)
# screenip_empty.noaec_quail = pd.Series([1.], dtype='int')
# screenip_empty.fi_bird = pd.Series([1.], dtype='int')
# result = screenip_empty.det_quail()
# npt.assert_array_almost_equal(result, 1000., 4, '', True)
# finally:
# pass
# return
#
# def test_det_other_1(self):
# """
# unittest for function screenip.det_other_1:
# :return:
# """
# try:
# #det_other_1 = (self.noaec_bird_other_1 * self.fi_bird(self.bodyweight_bird_other_1)) / (self.bodyweight_bird_other_1 / 1000.)
# #det_other_2 = (self.noaec_bird_other_2 * self.fi_bird(self.bodyweight_bird_other_1)) / (self.bodyweight_bird_other_1 / 1000.)
# screenip_empty.noaec_bird_other_1 = pd.Series([400.]) # mg/kg-diet
# screenip_empty.bodyweight_bird_other_1 = pd.Series([100]) # grams
# result = screenip_empty.det_other_1()
# npt.assert_array_almost_equal(result, 4666, 4)
# finally:
# pass
# return
#
# The following tests are configured such that:
# 1. four values are provided for each needed input
# 2. the four input values generate four values of out_det_* per bird type
# 3. the inputs per bird type are set so that calculations of out_det_* will result in
# each bird type having one minimum among the bird types;
# thus all four calculations result in one minimum per bird type
# create empty pandas dataframes to create empty object for this unittest
screenip_empty = self.create_screenip_object()
expected_results = pd.Series([4.2174, 4.96125, 7.97237, 10.664648], dtype='float')
result = pd.Series([], dtype='float')
try:
screenip_empty.bodyweight_bobwhite_quail = 178.
screenip_empty.bodyweight_mallard_duck = 1580.
screenip_empty.noaec_quail = pd.Series([100., 300., 75., 150.], dtype='float')
screenip_empty.noaec_duck = pd.Series([400., 100., 200., 350.], dtype='float')
screenip_empty.noaec_bird_other_1 = | pd.Series([50., 200., 300., 250.], dtype='float') | pandas.Series |
from flask import Flask,redirect, url_for, request
import pandas as pd
import numpy as np
from pandas import to_datetime,read_csv
from datetime import timedelta
from statsmodels.tsa.stattools import adfuller
from statsmodels.tsa.arima_model import ARIMA
from arch import arch_model
#import matplotlib.pyplot as plt
import gc
def feature_selecion():
start_date = request.form["year"] + "-" + request.form["from_month"] + "-01"
if(int(request.form["to_month"])==12):
end_date = str(int(request.form["year"])+1) + "-" + str(1) + "-01"
else:
end_date = request.form["year"] + "-" + str(int(request.form["to_month"])+1) + "-01"
data_file ="static/data/"+request.form["currency_pair"]+"/DAT_MT_"+request.form["currency_pair"]+"_M1_"+request.form["year"] +".csv"
#news = ["Brexit","US presidential election 2012"]
#price
data = read_csv(data_file)
data['Time'] = data[['Date', 'Time']].apply(lambda x: ' '.join(x), axis=1)
data['Time'] = data['Time'].apply(lambda x: to_datetime(x)-timedelta(hours=2))
data.index = data.Time
mask = (data.index > start_date) & (data.index <= end_date)
data = data.loc[mask]
series = data["Close"]
""""#price and the gradient
fig = plt.figure()
#fig.tight_layout()
ax3 = fig.add_subplot(211)
ax3.plot(series)
ax3.set_title(request.form["currency_pair"] + ' prices')
ax3.set_xlabel('Time')
ax3.set_ylabel('Price')"""
np_array_series = np.array(data['Close'])
np_array_dates = np.array(data.index)
gradients = np.gradient(np_array_series)
"""ax1 = fig.add_subplot(212)
ax1.set_title('Gradients of the price series')
ax1.set_xlabel('Time')
ax1.set_ylabel('Gradient')
ax1.plot(np_array_dates, gradients)
fig.savefig("static/anomalies/feature_lection_image1.png")
fig,ax1,ax2 = "","","" """
price_list = series.values
ADF_result_price = adfuller(price_list)
print('ADF Statistic: for series %f' % ADF_result_price[0])
print('p-value: %f' % ADF_result_price[1]) #p-value: 0.668171
print('Critical Values:')
for key, value in ADF_result_price[4].items():
print('\t%s: %.3f' % (key, value))
#create log return series
series_log_ret = np.log(data.Close) - np.log(data.Close.shift(1))
series_log_ret = series_log_ret.dropna()
log_return_list = series_log_ret.values
ADF_result_log_return = adfuller(log_return_list)
print('ADF Statistic: for series_log_ret %f' % ADF_result_log_return[0])
print('p-value: %f' % ADF_result_log_return[1]) #p-value: 0.000000 therefore, null hypothesis is rejected. the system is stationary
print('Critical Values:')
for key, value in ADF_result_log_return[4].items():
print('\t%s: %.3f' % (key, value))
input_series = []
#testing for stationarity in series
if ADF_result_price[0]<0.05:
input_series = price_list
else :
input_series = log_return_list
#Creating the ARIMA model
arima_model = ARIMA(series_log_ret, order=(4,1,1))
model_fit = arima_model.fit(disp=0)
print(model_fit.summary())
#tsaplots.plot_acf(series_log_ret, lags=30)
#tsaplots.plot_pacf(series_log_ret, lags=30)
#Getting the residual series
residuals = | pd.DataFrame(model_fit.resid) | pandas.DataFrame |
import sys
import pandas as pd
from sqlalchemy import create_engine
import re
import nltk
nltk.download(['punkt', 'wordnet','stopwords','averaged_perceptron_tagger'])
from nltk.tokenize import word_tokenize
from nltk.stem import WordNetLemmatizer
from nltk.corpus import stopwords
from sklearn.pipeline import Pipeline, FeatureUnion
from sklearn.ensemble import RandomForestClassifier, AdaBoostClassifier
from sklearn.multioutput import MultiOutputClassifier
from sklearn.feature_extraction.text import CountVectorizer, TfidfTransformer
from sklearn.model_selection import train_test_split, GridSearchCV
import numpy as np
from sklearn.metrics import confusion_matrix
from sklearn.metrics import make_scorer, accuracy_score, f1_score, fbeta_score, classification_report
from sklearn.base import BaseEstimator, TransformerMixin
import pickle
def load_data(database_filepath):
"""
function that loads the df from the ETL
args: file to the df after the ETL
return: X data, y data, and category names of the columns
"""
engine = create_engine('sqlite:///'+database_filepath)
df = pd.read_sql_table('df',engine)
X = df.message
y = df[df.columns[4:]]
category_names = y.columns
return X,y, category_names
def tokenize(text):
"""
function that tokenize the text
Args: text
return: the text after the process
"""
url_regex = 'http[s]?://(?:[a-zA-Z]|[0-9]|[$-_@.&+]|[!*\(\),]|(?:%[0-9a-fA-F][0-9a-fA-F]))+'
detected_urls = re.findall(url_regex, text)
for url in detected_urls:
text = text.replace(url, "urlplaceholder")
tokens = word_tokenize(text)
lemmatizer = WordNetLemmatizer()
clean_tokens = []
for tok in tokens:
clean_tok = lemmatizer.lemmatize(tok).lower().strip()
clean_tokens.append(clean_tok)
return clean_tokens
class StartingVerbExtractor(BaseEstimator, TransformerMixin):
"""
class that identify if the first word is a verb.
"""
def starting_verb(self, text):
"""
method that identify if the first word of the sentence is a verb
Args: text
return: True if first word is verb, else False
"""
sentence_list = nltk.sent_tokenize(text)
for sentence in sentence_list:
pos_tags = nltk.pos_tag(tokenize(sentence))
if len(pos_tags)==0:
return False
first_word, first_tag = pos_tags[0]
if first_tag in ['VB', 'VBP'] or first_word == 'RT':
return True
return False
def fit(self, X, y=None):
"""
method needed to create the class
"""
return self
def transform(self, X):
"""
method that applies the transformation
"""
X_tagged = pd.Series(X).apply(self.starting_verb)
return | pd.DataFrame(X_tagged) | pandas.DataFrame |
#!/usr/bin/env python3
import os, time, json
import numpy as np
import pandas as pd
from pprint import pprint
import matplotlib as mpl
import matplotlib.pyplot as plt
import matplotlib.colors as mcolors
from matplotlib.colors import LogNorm
from scipy.integrate import quad
import tinydb as db
import argparse
from statsmodels.stats import proportion
from pygama import DataSet
from pygama.analysis.calibration import *
from pygama.analysis.histograms import *
import pygama.utils as pgu
from matplotlib.lines import Line2D
from pygama.utils import set_plot_style
set_plot_style("clint")
def main():
"""
Code to implement an A/E cut
"""
# global runDB
# with open("runDB.json") as f:
# runDB = json.load(f)
# global tier_dir
# tier_dir = runDB["tier_dir"]
# global meta_dir
# meta_dir = runDB["meta_dir"]
run_db, cal_db = "../experiments/mj60/runDB.json", "../experiments/mj60/calDB.json"
par = argparse.ArgumentParser(description="A/E cut for MJ60")
arg, st, sf = par.add_argument, "store_true", "store_false"
arg("-ds", nargs='*', action="store", help="load runs for a DS")
arg("-r", "--run", nargs=1, help="load a single run")
arg("-db", "--writeDB", action=st, help="store results in DB")
arg("-mode", "--mode", nargs=1, action="store",
help="232 or 228 for the two diffrerent thorium types")
args = vars(par.parse_args())
# -- declare the DataSet --
if args["ds"]:
ds_lo = int(args["ds"][0])
try:
ds_hi = int(args["ds"][1])
except:
ds_hi = None
ds = DataSet(ds_lo, ds_hi,
md=run_db, cal = cal_db) #,tier_dir=tier_dir)
if args["run"]:
ds = DataSet(run=int(args["run"][0]),
md=run_db, cal=cal_db)
find_cut(ds, ds_lo, args["mode"][0], args["writeDB"])
def find_cut(ds, ds_lo, mode, write_db=False):
"""
Find and record (if -db is chosen) an A/E cut for either Th232 or Th228 source.
Currently a brute force algorithm is chosen, but code is in progress for an
optimized algorithm.
"""
#Make tier2 dataframe
t2 = ds.get_t2df()
t2 = t2.reset_index(drop=True)
#Get e_ftp and pass1 calibration constant TODO: need pass2 constants at some point
calDB = ds.calDB
query = db.Query()
table = calDB.table("cal_pass1")
vals = table.all()
df_cal = | pd.DataFrame(vals) | pandas.DataFrame |
import numpy as np
import pandas as pd
from scipy.stats import mode
from tqdm import tqdm
from geopy.geocoders import Nominatim
from datetime import datetime
def handle_bornIn(x):
skip_vals = ['16-Mar', '23-May', 'None']
if x not in skip_vals:
return datetime(2012, 1, 1).year - datetime(int(x), 1, 1).year
else:
return 23
def handle_gender(x):
if x == 'male':
return 1
else:
return 0
def handle_memberSince(x):
skip_vals = ['--None']
if pd.isna(x):
return datetime(2012, 1, 1)
elif x not in skip_vals:
return datetime.strptime(x, '%d-%m-%Y')
else:
return datetime(2012, 1, 1)
def process_tours_df(data_content):
dtype = {}
cols = data_content.tours_df.columns[9:]
for d in cols:
dtype[d] = np.int16
data_content.tours_df = data_content.tours_df.astype(dtype)
data_content.tours_df['area'] = data_content.tours_df['city'] + ' ' + data_content.tours_df['state'] + ' ' + \
data_content.tours_df['pincode'] + ' ' + data_content.tours_df['country']
data_content.tours_df['area'] = data_content.tours_df['area'].apply(lambda x: x.lstrip() if type(x) == str else x)
data_content.tours_df['area'] = data_content.tours_df['area'].apply(lambda x: x.rstrip() if type(x) == str else x)
data_content.tours_df.drop(['city', 'state', 'pincode', 'country'], axis=1, inplace=True)
data_content.tours_df['tour_date'] = data_content.tours_df['tour_date'].apply(
lambda x: datetime(int(x.split('-')[2]), int(x.split('-')[1]), int(x.split('-')[0]), 23, 59))
def process_tour_convoy_df(data_content):
print('Initializing tour_convoy_df...', flush=True)
data_content.tour_convoy_df['total_going'] = 0
data_content.tour_convoy_df['total_not_going'] = 0
data_content.tour_convoy_df['total_maybe'] = 0
data_content.tour_convoy_df['total_invited'] = 0
data_content.tour_convoy_df['fraction_going'] = 0
data_content.tour_convoy_df['fraction_not_going'] = 0
data_content.tour_convoy_df['fraction_maybe'] = 0
known_bikers = set()
lis = ['going', 'not_going', 'maybe', 'invited']
pbar = tqdm(total=data_content.tour_convoy_df.shape[0],
bar_format='{l_bar}{bar:10}{r_bar}{bar:-10b}')
pbar.set_description("Step 1 of 2")
for idx, _ in data_content.tour_convoy_df.iterrows():
s = [0, 0, 0]
for j, l in enumerate(lis):
if not pd.isna(data_content.tour_convoy_df.loc[idx, l]):
biker = data_content.tour_convoy_df.loc[idx, l].split()
data_content.tour_convoy_df.loc[idx, 'total_' + l] = len(biker)
if j != 3:
s[j] = len(biker)
for bik in biker:
known_bikers.add(bik)
if sum(s) != 0:
for j in range(3):
data_content.tour_convoy_df.loc[idx, 'fraction_' + lis[j]] = s[j] / sum(s)
pbar.update(1)
pbar.close()
mean = data_content.tour_convoy_df['total_invited'].mean()
std = data_content.tour_convoy_df['total_invited'].std()
data_content.tour_convoy_df['fraction_invited'] = data_content.tour_convoy_df['total_invited'].apply(
lambda x: (x - mean) / std)
biker_tour_convoy_df = dict()
for biker in list(known_bikers):
biker_tour_convoy_df[biker] = [[], [], [], []]
pbar = tqdm(total=data_content.tour_convoy_df.shape[0], bar_format='{l_bar}{bar:10}{r_bar}{bar:-10b}')
pbar.set_description("Step 2 of 2")
for idx, _ in data_content.tour_convoy_df.iterrows():
for l in lis:
if not pd.isna(data_content.tour_convoy_df.loc[idx, l]):
biker = data_content.tour_convoy_df.loc[idx, l].split()
for bik in biker:
biker_tour_convoy_df[bik][lis.index(l)] += \
[data_content.tour_convoy_df.loc[idx, 'tour_id']]
pbar.update(1)
pbar.close()
for key, _ in biker_tour_convoy_df.items():
for i in range(4):
biker_tour_convoy_df[key][i] = ' '.join(list(set(biker_tour_convoy_df[key][i])))
biker_tour_convoy_df = pd.DataFrame.from_dict(biker_tour_convoy_df, orient='index')
biker_tour_convoy_df.reset_index(inplace=True)
biker_tour_convoy_df.columns = ['biker_id'] + lis
print('tour_convoy_df ready...', flush=True)
return biker_tour_convoy_df
def get_coordinates(locations, data_content):
geolocation_map = {}
locator = Nominatim(user_agent="Kolibri")
for i in tqdm(range(len(locations)),
disable=False,
bar_format='{l_bar}{bar:10}{r_bar}{bar:-10b}'):
# noinspection PyBroadException
try:
location = locator.geocode(locations[i])
geolocation_map[locations[i]] = [location.latitude, location.longitude]
except:
# Called when there is presumably some noise in the Address location
# noinspection PyBroadException
data_content.noise += [locations[i]]
geolocation_map[locations[i]] = [np.nan, np.nan]
location_df = pd.DataFrame({'location': list(locations),
'latitude': np.array(list(geolocation_map.values()))[:, 0],
'longitude': np.array(list(geolocation_map.values()))[:, 1]})
return geolocation_map, location_df
def initialize_locations(data_content):
# noinspection PyBroadException
try:
location_df = pd.read_csv(data_content.base_dir + 'temp/location.csv')
location_from_csv = True
except:
location_df = None
location_from_csv = False
if location_from_csv:
geolocation = {}
print('Initializing Locations from DataFrame...', flush=True)
for i, l in enumerate(location_df['location'].tolist()):
geolocation[l] = [location_df.loc[i, 'latitude'], location_df.loc[i, 'longitude']]
else:
print('Initializing Locations from Nominatim...', flush=True)
biker_location = data_content.bikers_df['area'].dropna().drop_duplicates().tolist()
geolocation, location_df = get_coordinates(biker_location, data_content)
return geolocation, location_df
def impute_location_from_tour_convoy(data_content):
# From tour_convoy
unk_loc = data_content.bikers_df[pd.isna(data_content.bikers_df['latitude'])]
org_bik = list(set(data_content.convoy_df['biker_id'].drop_duplicates().tolist()).intersection(
data_content.bikers_df['biker_id'].tolist()))
groups = ['going', 'not_going', 'maybe', 'invited']
rest_trs = data_content.tours_df[data_content.tours_df['tour_id'].isin(
data_content.tour_convoy_df['tour_id'])]
rest_con = data_content.convoy_df[data_content.convoy_df['biker_id'].isin(org_bik)]
pbar = tqdm(total=unk_loc.shape[0], bar_format='{l_bar}{bar:10}{r_bar}{bar:-10b}')
pbar.set_description('Step ' + str(data_content.current_step) + ' of ' + str(data_content.total_steps))
for idx, _ in unk_loc.iterrows():
if unk_loc.loc[idx, 'biker_id'] in org_bik:
cdf = rest_con[rest_con['biker_id'] == unk_loc.loc[idx, 'biker_id']]
if cdf.shape[0] > 0:
tours = []
for g in groups:
tours += cdf[g].tolist()[0].split()
tours = (' '.join(tours)).split()
trs = rest_trs[rest_trs['tour_id'].isin(tours)]
if trs.shape[0] > 0:
m, _ = mode(trs[['latitude']], axis=0)
if not np.isnan(m[0, 0]):
index = trs[trs['latitude'] == m[0, 0]].index.tolist()[0]
lat, long, = trs.loc[index, 'latitude'], trs.loc[index, 'longitude']
data_content.bikers_df.loc[idx, 'latitude'] = lat
data_content.bikers_df.loc[idx, 'longitude'] = long
pbar.update(1)
pbar.close()
data_content.current_step += 1
def impute_location_from_tours(data_content):
# From tours_df
unk_loc = data_content.bikers_df[pd.isna(data_content.bikers_df['latitude'])]
org_bik = list(set(data_content.tours_df['biker_id'].drop_duplicates().tolist()).intersection(
data_content.bikers_df['biker_id'].tolist()))
pbar = tqdm(total=unk_loc.shape[0], bar_format='{l_bar}{bar:10}{r_bar}{bar:-10b}')
pbar.set_description('Step ' + str(data_content.current_step) + ' of ' + str(data_content.total_steps))
for idx, _ in unk_loc.iterrows():
if unk_loc.loc[idx, 'biker_id'] in org_bik:
tours = data_content.tours_df[data_content.tours_df['biker_id'] == unk_loc.loc[idx, 'biker_id']]
if tours.shape[0] > 0:
m, _ = mode(tours[['latitude']], axis=0)
if not np.isnan(m[0, 0]):
index = tours[tours['latitude'] == m[0, 0]].index.tolist()[0]
lat, long, = tours.loc[index, 'latitude'], tours.loc[index, 'longitude']
if not np.isnan(lat):
data_content.bikers_df.loc[idx, 'latitude'] = lat
data_content.bikers_df.loc[idx, 'longitude'] = long
pbar.update(1)
pbar.close()
data_content.current_step += 1
def impute_lcoation_from_friends(data_content):
biker_df = pd.merge(data_content.bikers_df,
data_content.bikers_network_df, on='biker_id', how='left').copy()
bikers_df_ids = set(data_content.bikers_df['biker_id'].tolist())
# From friends
for i in range(data_content.location_recursion):
pbar = tqdm(total=biker_df.shape[0], bar_format='{l_bar}{bar:10}{r_bar}{bar:-10b}')
pbar.set_description('Step ' + str(data_content.current_step) + ' of ' + str(data_content.total_steps))
for idx, rows in biker_df.iterrows():
if not pd.isna(biker_df.loc[idx, 'friends']):
bikers_known_friends = set(biker_df.loc[idx, 'friends'].split()).intersection(bikers_df_ids)
if len(bikers_known_friends) >= data_content.member_threshold:
temp_df = biker_df[biker_df['biker_id'].isin(bikers_known_friends)].dropna()
if temp_df.shape[0] > 0:
m, _ = mode(temp_df[['latitude']], axis=0)
if not np.isnan(m[0, 0]):
index = temp_df[temp_df['latitude'] == m[0, 0]].index.tolist()[0]
lat, long, = temp_df.loc[index, 'latitude'], temp_df.loc[index, 'longitude']
if pd.isna(data_content.bikers_df.loc[idx, 'latitude']):
data_content.bikers_df.loc[idx, 'latitude'] = lat
data_content.bikers_df.loc[idx, 'longitude'] = long
elif not np.isnan(lat):
dist = (data_content.bikers_df.loc[idx, 'latitude'] - lat) ** 2 + \
(data_content.bikers_df.loc[idx, 'longitude'] - long) ** 2
if (dist ** 0.5) > data_content.gps_threshold:
data_content.bikers_df.loc[idx, 'latitude'] = lat
data_content.bikers_df.loc[idx, 'longitude'] = long
pbar.update(1)
pbar.close()
data_content.current_step += 1
def fill_missing_locations(data_content):
impute_lcoation_from_friends(data_content)
impute_location_from_tours(data_content)
impute_location_from_tour_convoy(data_content)
def handle_locations(data_content):
print('Preprocessing bikers_df..', flush=True)
print('Initializing Locations...', flush=True)
geolocation, location_df = initialize_locations(data_content)
loc = set(location_df['location'].tolist())
for i in tqdm(range(data_content.bikers_df.shape[0]),
disable=False, desc='Step 1 of ' + str(data_content.total_steps),
bar_format='{l_bar}{bar:10}{r_bar}{bar:-10b}'):
if data_content.bikers_df.loc[i, 'area'] in loc:
data_content.bikers_df.loc[i, 'latitude'] = geolocation[data_content.bikers_df.loc[i, 'area']][0]
data_content.bikers_df.loc[i, 'longitude'] = geolocation[data_content.bikers_df.loc[i, 'area']][1]
data_content.current_step += 1
# Imputing Missing Locations
fill_missing_locations(data_content)
print('Locations Initialized...', flush=True)
print('bikers_df ready', flush=True)
def time_zone_converter(data_content):
for idx, _ in data_content.bikers_df.iterrows():
if not np.isnan(data_content.bikers_df.loc[idx, 'longitude']):
x = data_content.bikers_df.loc[idx, 'longitude']
data_content.bikers_df.loc[idx, 'time_zone'] = (np.floor((x - 7.500000001) / 15) + 1) * 60
def time_zone_for_location_imputation(data_content):
timezones = np.unique(data_content.bikers_df['time_zone'].drop_duplicates().dropna())
tz = dict()
for time in timezones:
df = data_content.bikers_df[data_content.bikers_df['time_zone'] == time]
m, _ = mode(df[['latitude']], axis=0)
if not np.isnan(m[0, 0]):
index = df[df['latitude'] == m[0, 0]].index.tolist()[0]
lat, long, = df.loc[index, 'latitude'], df.loc[index, 'longitude']
tz[time] = [lat, long]
data_content.bikers_df['time_zone'] = data_content.bikers_df['time_zone'].map(
lambda x: x if x in timezones else np.nan)
df = data_content.bikers_df[( | pd.isna(data_content.bikers_df['latitude']) | pandas.isna |
import pandas as pd
from model.GenData import GenData
from service.CorrelationService import CorrelationService
class AfterCorData:
def __init__(self):
self.genData = GenData()
self.linearFeatureData = pd.DataFrame()
self.factorFeatureData = pd.DataFrame()
self.levelFeatureData = pd.DataFrame()
self.modelTargetData = pd.Series()
self.corBetweenData = []
self.newData = | pd.DataFrame() | pandas.DataFrame |
# AUTOGENERATED! DO NOT EDIT! File to edit: notebooks/01_datasets.ipynb (unless otherwise specified).
__all__ = ['CocoData']
# Cell
import contextlib
import io
from urllib.request import urlopen
from random import shuffle
from zipfile import ZipFile
from fastai.data.external import URLs
from fastai.imports import Path
from fastai.vision.all import PILImage, PILMask, TensorImage, TensorMask, LabeledBBox, TensorBBox, image2tensor
from .core import TensorBinMasks, show_binmask
from fastprogress.fastprogress import master_bar, progress_bar
from PIL import Image
import pandas as pd
import matplotlib.pyplot as plt
from pycocotools.coco import COCO
import numpy as np
from shutil import rmtree
from matplotlib import cm
from matplotlib.colors import ListedColormap, Colormap
# Cell
class CocoData():
'''Creates dataset for object detection models by downloading images from coco dataset.
Specify the name of the dataset and which categories it should contain.
If data_path is None it creates a new folder in fastai's data path, like `untar_data`.
By default only bounding boxes, optionally with masks and crowded objects.
'''
coco = None
@classmethod
def create(cls, ds_name, cat_list, data_path=None, with_mask=False, max_images=1000, remove_crowded=True):
"""Creates a new coco dataset with categories defined in cat_list, optionally with or without masks.
You can specify the path, where the dataset gets stored, by default it uses fastai's data path like `untar_data`"""
path = Path(URLs.path(c_key='data'))/ds_name if data_path is None else Path(data_path)/ds_name
path_images = path/"images"
path_masks = path/"masks"
if Path(path).is_dir():
print(f"Dataset {ds_name} already exists: {path}")
return cls.get_path_df(ds_name, data_path=data_path)
# create folders
print("Creating folders.")
path.mkdir(exist_ok=False, parents=True)
path_images.mkdir()
if with_mask: path_masks.mkdir()
# download annotation files
annotations = 'annotations/instances_train2017.json'
if not (path/annotations).is_file():
cls._download_annotation_file(path)
if not (path/annotations).is_file():
print("Download was not successful. No annotation file found.")
return
cls.coco = COCO(annotation_file=str(path/annotations))
# download images
cls._download_images(cat_list, path_images, max_images, remove_crowded)
# create dataframe
df = cls._create_dataframe(path, cat_list, with_mask)
return path, df
def get_path_df(ds_name, data_path=None):
"""Get path and dataframe of downloaded coco dataset."""
path = Path(URLs.path(c_key='data'))/ds_name if data_path is None else Path(data_path)/ds_name
if path.is_dir():
if (path/"df_train.csv").is_file():
return (path, pd.read_csv(path/"df_train.csv"))
else:
print(f"No Dataframe found in {path}")
else:
print(f"No dataset '{path}' found.")
print("Create dataset first with CocoData.create(ds_name, cat_list) or list available datasets with CocoData.ls()")
def ls(data_path=None):
"""List all available datasets."""
path = Path(URLs.path(c_key='data')) if data_path is None else Path(data_path)
if path.is_dir():
return list(path.ls())
else: print(f"Path {path} does not exist.")
def remove(ds_name, data_path=None):
"""Remove a downloaded coco dataset."""
path = Path(URLs.path(c_key='data'))/ds_name if data_path is None else Path(data_path)/ds_name
if path.is_dir():
rmtree(path)
print(f"{path} removed.")
else:
print(f"No dataset '{path}' found.")
def show_examples(ds_name, data_path=None, n=3):
"""Show examples of a downloaded coco dataset."""
_, df = CocoData.get_path_df(ds_name, data_path=data_path)
img_ids = [i for i in df.image_id.unique()]
shuffle(img_ids)
with_mask = "mask_path" in df.columns
for img_id in img_ids[:n]:
filt = df.image_id == img_id
img_path = df.loc[filt,"image_path"].values[0]
img = PILImage.create(img_path)
bboxes = [box for box in df.loc[filt,["x_min","y_min","x_max","y_max"]].values]
labels = [label[0] for label in df.loc[filt,["class_name"]].values]
if with_mask:
mask_paths = df.loc[filt,"mask_path"].values
masks = np.stack([PILMask.create(m) for m in mask_paths])
fig,ax = plt.subplots(figsize=(8,8))
TensorImage(image2tensor(img)).show(ax=ax)
if with_mask:
TensorBinMasks(masks).show(ax)
LabeledBBox(TensorBBox(bboxes), labels).show(ax)
def _download_annotation_file(path):
print("Downloading annotation files...")
url = 'http://images.cocodataset.org/annotations/annotations_trainval2017.zip'
zipresp = urlopen(url)
zip_fn = path/'annotations_trainval2017.zip'
with open(zip_fn, 'wb') as zip:
zip.write(zipresp.read())
zf = ZipFile(zip_fn)
zf.extractall(path=str(path))
zf.close()
Path(zip_fn).unlink()
def _download_images(cat_list, path_images, max_images, remove_crowded):
cat_ids = CocoData.coco.getCatIds(catNms=cat_list);
idx2cat = {e['id']:e['name'] for e in CocoData.coco.loadCats(CocoData.coco.getCatIds())}
img_id2fn = {}
print(f"Found {len(cat_ids)} valid categories.")
print([idx2cat[e] for e in cat_ids])
print("Starting download.")
mb = master_bar(range(len(cat_ids)))
for i in mb:
c_id = cat_ids[i]
print(f"Downloading images of category {idx2cat[c_id]}")
img_ids = CocoData.coco.getImgIds(catIds=c_id)
# small function to filter images with crowded objects
def _f(iid):
annos = CocoData.coco.loadAnns(CocoData.coco.getAnnIds(imgIds=iid))
annos = [a for a in annos if idx2cat[a["category_id"]] in cat_list]
is_crowd = [a["iscrowd"] for a in annos]
return 1 in is_crowd
if remove_crowded:
img_ids = [i for i in img_ids if not _f(i)]
if max_images is not None:
img_ids = img_ids[:max_images]
for i in img_ids:
img_id2fn[i] = path_images/(str(i).zfill(12)+".jpg")
for i in progress_bar(range(len(img_ids)), parent=mb):
with contextlib.redirect_stdout(io.StringIO()):
CocoData.coco.download(path_images, [img_ids[i]])
print(len([fn for fn in path_images.ls()]), "images downloaded.")
def _create_dataframe(path, cat_list, with_mask,):
print("Creating Dataframe...")
path_images = path/"images"
path_masks = path/"masks"
df_train = | pd.DataFrame() | pandas.DataFrame |
import pandas as pd
import pandas.testing as pdt
import pytest
import pytz
from werkzeug.exceptions import RequestEntityTooLarge
from sfa_api.conftest import (
VALID_FORECAST_JSON, VALID_CDF_FORECAST_JSON, demo_forecasts)
from sfa_api.utils import request_handling
from sfa_api.utils.errors import (
BadAPIRequest, StorageAuthError, NotFoundException)
@pytest.mark.parametrize('start,end', [
('invalid', 'invalid'),
('NaT', 'NaT')
])
def test_validate_start_end_fail(app, forecast_id, start, end):
url = f'/forecasts/single/{forecast_id}/values?start={start}&end={end}'
with pytest.raises(request_handling.BadAPIRequest):
with app.test_request_context(url):
request_handling.validate_start_end()
@pytest.mark.parametrize('start,end', [
('20190101T120000Z', '20190101T130000Z'),
('20190101T120000', '20190101T130000'),
('20190101T120000', '20190101T130000Z'),
('20190101T120000Z', '20190101T130000+00:00'),
('20190101T120000Z', '20190101T140000+01:00'),
])
def test_validate_start_end_success(app, forecast_id, start, end):
url = f'/forecasts/single/{forecast_id}/values?start={start}&end={end}'
with app.test_request_context(url):
request_handling.validate_start_end()
@pytest.mark.parametrize('query,exc', [
('?start=20200101T0000Z', {'end'}),
('?end=20200101T0000Z', {'start'}),
('?start=20200101T0000Z&end=20210102T0000Z', {'end'}),
('', {'start', 'end'}),
pytest.param('?start=20200101T0000Z&end=20200102T0000Z', {},
marks=pytest.mark.xfail(strict=True))
])
def test_validate_start_end_not_provided(app, forecast_id, query, exc):
url = f'/forecasts/single/{forecast_id}/values{query}'
with app.test_request_context(url):
with pytest.raises(BadAPIRequest) as err:
request_handling.validate_start_end()
if exc:
assert set(err.value.errors.keys()) == exc
@pytest.mark.parametrize('content_type,payload', [
('text/csv', ''),
('application/json', '{}'),
('application/json', '{"values": "nope"}'),
('text/plain', 'nope'),
])
def test_validate_parsable_fail(app, content_type, payload, forecast_id):
url = f'/forecasts/single/{forecast_id}/values/'
with pytest.raises(request_handling.BadAPIRequest):
with app.test_request_context(
url, content_type=content_type, data=payload, method='POST',
content_length=len(payload)):
request_handling.validate_parsable_values()
@pytest.mark.parametrize('content_type', [
('text/csv'),
('application/json'),
('application/json'),
])
def test_validate_parsable_fail_too_large(app, content_type, forecast_id):
url = f'/forecasts/single/{forecast_id}/values/'
with pytest.raises(RequestEntityTooLarge):
with app.test_request_context(
url, content_type=content_type, method='POST',
content_length=17*1024*1024):
request_handling.validate_parsable_values()
@pytest.mark.parametrize('content_type,payload', [
('text/csv', 'timestamp,value\n2019-01-01T12:00:00Z,5'),
('application/json', ('{"values":[{"timestamp": "2019-01-01T12:00:00Z",'
'"value": 5}]}')),
])
def test_validate_parsable_success(app, content_type, payload, forecast_id):
with app.test_request_context(f'/forecasts/single/{forecast_id}/values/',
content_type=content_type, data=payload,
method='POST'):
request_handling.validate_parsable_values()
def test_validate_observation_values():
df = pd.DataFrame({'value': [0.1, '.2'],
'quality_flag': [0.0, 1],
'timestamp': ['20190101T0000Z',
'2019-01-01T03:00:00+07:00']})
request_handling.validate_observation_values(df)
def test_validate_observation_values_bad_value():
df = pd.DataFrame({'value': [0.1, 's.2'],
'quality_flag': [0.0, 1],
'timestamp': ['20190101T0000Z',
'2019-01-01T03:00:00+07:00']})
with pytest.raises(BadAPIRequest) as e:
request_handling.validate_observation_values(df)
assert 'value' in e.value.errors
def test_validate_observation_values_no_value():
df = pd.DataFrame({'quality_flag': [0.0, 1],
'timestamp': ['20190101T0000Z',
'2019-01-01T03:00:00+07:00']})
with pytest.raises(BadAPIRequest) as e:
request_handling.validate_observation_values(df)
assert 'value' in e.value.errors
def test_validate_observation_values_bad_timestamp():
df = pd.DataFrame({'value': [0.1, '.2'],
'quality_flag': [0.0, 1],
'timestamp': ['20190101T008Z',
'2019-01-01T03:00:00+07:00']})
with pytest.raises(BadAPIRequest) as e:
request_handling.validate_observation_values(df)
assert 'timestamp' in e.value.errors
def test_validate_observation_values_no_timestamp():
df = pd.DataFrame({
'value': [0.1, '.2'], 'quality_flag': [0.0, 1]})
with pytest.raises(BadAPIRequest) as e:
request_handling.validate_observation_values(df)
assert 'timestamp' in e.value.errors
@pytest.mark.parametrize('quality', [
[1, .1],
[1, '0.9'],
[2, 0],
['ham', 0]
])
def test_validate_observation_values_bad_quality(quality):
df = pd.DataFrame({'value': [0.1, .2],
'quality_flag': quality,
'timestamp': ['20190101T008Z',
'2019-01-01T03:00:00+07:00']})
with pytest.raises(BadAPIRequest) as e:
request_handling.validate_observation_values(df)
assert 'quality_flag' in e.value.errors
def test_validate_observation_values_no_quality():
df = pd.DataFrame({'value': [0.1, '.2'],
'timestamp': ['20190101T008Z',
'2019-01-01T03:00:00+07:00']})
with pytest.raises(BadAPIRequest) as e:
request_handling.validate_observation_values(df)
assert 'quality_flag' in e.value.errors
expected_parsed_df = pd.DataFrame({
'a': [1, 2, 3, 4],
'b': [4, 5, 6, 7],
})
csv_string = "a,b\n1,4\n2,5\n3,6\n4,7\n"
json_string = '{"values":{"a":[1,2,3,4],"b":[4,5,6,7]}}'
def test_parse_csv_success():
test_df = request_handling.parse_csv(csv_string)
pdt.assert_frame_equal(test_df, expected_parsed_df)
@pytest.mark.parametrize('csv_input', [
'',
"a,b\n1,4\n2.56,2.45\n1,2,3\n"
])
def test_parse_csv_failure(csv_input):
with pytest.raises(request_handling.BadAPIRequest):
request_handling.parse_csv(csv_input)
def test_parse_json_success():
test_df = request_handling.parse_json(json_string)
pdt.assert_frame_equal(test_df, expected_parsed_df)
@pytest.mark.parametrize('json_input', [
'',
"{'a':[1,2,3]}"
])
def test_parse_json_failure(json_input):
with pytest.raises(request_handling.BadAPIRequest):
request_handling.parse_json(json_input)
null_df = pd.DataFrame({
'timestamp': [
'2018-10-29T12:00:00Z',
'2018-10-29T13:00:00Z',
'2018-10-29T14:00:00Z',
'2018-10-29T15:00:00Z',
],
'value': [32.93, 25.17, None, None],
'quality_flag': [0, 0, 1, 0]
})
def test_parse_csv_nan():
test_df = request_handling.parse_csv("""
# comment line
timestamp,value,quality_flag
2018-10-29T12:00:00Z,32.93,0
2018-10-29T13:00:00Z,25.17,0
2018-10-29T14:00:00Z,,1 # this value is NaN
2018-10-29T15:00:00Z,NaN,0
""")
pdt.assert_frame_equal(test_df, null_df)
def test_parse_json_nan():
test_df = request_handling.parse_json("""
{"values":[
{"timestamp": "2018-10-29T12:00:00Z", "value": 32.93, "quality_flag": 0},
{"timestamp": "2018-10-29T13:00:00Z", "value": 25.17, "quality_flag": 0},
{"timestamp": "2018-10-29T14:00:00Z", "value": null, "quality_flag": 1},
{"timestamp": "2018-10-29T15:00:00Z", "value": null, "quality_flag": 0}
]}
""")
pdt.assert_frame_equal(test_df, null_df)
@pytest.mark.parametrize('data,mimetype', [
(csv_string, 'text/csv'),
(csv_string, 'application/vnd.ms-excel'),
(json_string, 'application/json')
])
def test_parse_values_success(app, data, mimetype):
with app.test_request_context():
test_df = request_handling.parse_values(data, mimetype)
pdt.assert_frame_equal(test_df, expected_parsed_df)
@pytest.mark.parametrize('data,mimetype', [
(csv_string, 'application/fail'),
(json_string, 'image/bmp'),
])
def test_parse_values_failure(data, mimetype):
with pytest.raises(request_handling.BadAPIRequest):
request_handling.parse_values(data, mimetype)
@pytest.mark.parametrize('dt_string,expected', [
('20190101T1200Z', pd.Timestamp('20190101T1200Z')),
('20190101T1200', pd.Timestamp('20190101T1200Z')),
('20190101T1200+0700', pd.Timestamp('20190101T0500Z'))
])
def test_parse_to_timestamp(dt_string, expected):
parsed_dt = request_handling.parse_to_timestamp(dt_string)
assert parsed_dt == expected
@pytest.mark.parametrize('dt_string', [
'invalid datetime',
'21454543251345234',
'20190101T2500Z',
'NaT',
])
def test_parse_to_timestamp_error(dt_string):
with pytest.raises(ValueError):
request_handling.parse_to_timestamp(dt_string)
@pytest.mark.parametrize('index,interval_length,previous_time', [
(pd.date_range(start='2019-09-01T1200Z', end='2019-09-01T1300Z',
freq='10min'), 10, pd.Timestamp('2019-09-01T1150Z')),
(pd.DatetimeIndex(['2019-09-01T0000Z', '2019-09-01T0200Z',
'2019-09-01T0400Z']), 120, None),
(pd.DatetimeIndex(['2019-09-01T0006Z', '2019-09-01T0011Z',
'2019-09-01T0016Z']),
5,
pd.Timestamp('2019-09-01T0001Z')),
(pd.DatetimeIndex(['2019-09-01T0006Z', '2019-09-01T0013Z',
'2019-09-01T0020Z']),
7,
pd.Timestamp('2019-08-31T2352Z')),
# out of order
pytest.param(
pd.DatetimeIndex(['2019-09-01T0013Z', '2019-09-01T0006Z',
'2019-09-01T0020Z']),
7,
pd.Timestamp('2019-08-31T2352Z'), marks=pytest.mark.xfail),
(pd.date_range(start='2019-03-10 00:00', end='2019-03-10 05:00',
tz='America/Denver', freq='1h'),
60, None), # DST transition
(pd.date_range(start='2019-11-03 00:00', end='2019-11-03 05:00',
tz='America/Denver', freq='1h'),
60, None), # DST transition
(pd.DatetimeIndex(['2019-01-01T000132Z']), 33, None),
(pd.DatetimeIndex(['2019-01-01T000132Z']), 30,
pd.Timestamp('2018-12-01T000132Z')),
(pd.DatetimeIndex(['2019-01-01T000132Z']), 30,
pd.Timestamp('2019-01-02T000132Z'))
])
def test_validate_index_period(index, interval_length, previous_time):
request_handling.validate_index_period(index, interval_length,
previous_time)
def test_validate_index_empty():
with pytest.raises(request_handling.BadAPIRequest):
request_handling.validate_index_period(pd.DatetimeIndex([]), 10,
None)
@pytest.mark.parametrize('index,interval_length', [
(pd.DatetimeIndex(['2019-09-01T0000Z', '2019-09-01T0200Z',
'2019-09-01T0300Z']), 60),
(pd.DatetimeIndex(['2019-09-01T0000Z', '2019-09-01T0030Z',
'2019-09-01T0300Z']), 30),
(pd.date_range(start='2019-09-01T1200Z', end='2019-09-01T1300Z',
freq='20min'), 10),
])
def test_validate_index_period_missing(index, interval_length):
with pytest.raises(request_handling.BadAPIRequest) as e:
request_handling.validate_index_period(index, interval_length,
index[0])
errs = e.value.errors['timestamp']
assert len(errs) == 1
assert 'Missing' in errs[0]
@pytest.mark.parametrize('index,interval_length', [
(pd.DatetimeIndex(['2019-09-01T0000Z', '2019-09-01T0100Z',
'2019-09-01T0200Z']), 120),
(pd.DatetimeIndex(['2019-09-01T0000Z', '2019-09-01T0030Z',
'2019-09-01T0045Z']), 30),
(pd.date_range(start='2019-09-01T1200Z', end='2019-09-01T1300Z',
freq='5min'), 10),
])
def test_validate_index_period_extra(index, interval_length):
with pytest.raises(request_handling.BadAPIRequest) as e:
request_handling.validate_index_period(index, interval_length,
index[0])
errs = e.value.errors['timestamp']
assert len(errs) == 1
assert 'extra' in errs[0]
@pytest.mark.parametrize('index,interval_length', [
(pd.DatetimeIndex(['2019-09-01T0000Z', '2019-09-01T0100Z',
'2019-09-01T0201Z']), 120),
(pd.DatetimeIndex(['2019-09-01T0000Z', '2019-09-01T0030Z',
'2019-09-01T0130Z']), 30),
(pd.date_range(start='2019-09-01T1200Z', end='2019-09-01T1305Z',
freq='5min'), 10),
])
def test_validate_index_period_other(index, interval_length):
with pytest.raises(request_handling.BadAPIRequest) as e:
request_handling.validate_index_period(index, interval_length,
index[0])
errs = e.value.errors['timestamp']
assert len(errs) > 0
@pytest.mark.parametrize('index,interval_length,previous_time', [
(pd.date_range(start='2019-09-01T1200Z', end='2019-09-01T1300Z',
freq='10min'), 10, pd.Timestamp('2019-09-01T1155Z')),
(pd.DatetimeIndex(['2019-09-01T0006Z', '2019-09-01T0011Z',
'2019-09-01T0016Z']),
5,
pd.Timestamp('2019-09-01T0000Z')),
(pd.DatetimeIndex(['2019-09-01T0006Z', '2019-09-01T0013Z',
'2019-09-01T0020Z']),
7,
pd.Timestamp('2019-09-01T0000Z')),
(pd.DatetimeIndex(['2019-01-01T000132Z']), 30,
pd.Timestamp('2018-12-01T000232Z')),
(pd.DatetimeIndex(['2019-01-01T000132Z']), 30,
pd.Timestamp('2020-12-01T000232Z'))
])
def test_validate_index_period_previous(index, interval_length, previous_time):
with pytest.raises(request_handling.BadAPIRequest) as e:
request_handling.validate_index_period(index, interval_length,
previous_time)
errs = e.value.errors['timestamp']
assert len(errs) == 1
assert 'previous time' in errs[0]
@pytest.mark.parametrize('ep,res', [
('{"restrict_upload": true}', True),
('{"restrict_upload": true, "other_key": 1}', True),
('{"restrict_upload" : true}', True),
('{"restrict_upload" : True}', True),
('{"restrict_upload": 1}', False),
('{"restrict_upload": false}', False),
('{"restrict_uploa": true}', False),
('{"upload_restrict_upload": true}', False),
])
def test__restrict_in_extra(ep, res):
assert request_handling._restrict_in_extra(ep) is res
def test__current_utc_timestamp():
t = request_handling._current_utc_timestamp()
assert isinstance(t, pd.Timestamp)
assert t.tzinfo == pytz.utc
def test_restrict_upload_window_noop():
assert request_handling.restrict_forecast_upload_window(
'', None, None) is None
@pytest.mark.parametrize('now,first', [
(pd.Timestamp('2019-11-01T11:59Z'), pd.Timestamp('2019-11-01T13:00Z')),
(pd.Timestamp('2019-11-01T12:00Z'), pd.Timestamp('2019-11-01T13:00Z')),
(pd.Timestamp('2019-11-01T00:00Z'), pd.Timestamp('2019-11-01T13:00Z')),
(pd.Timestamp('2019-11-01T12:01Z'), pd.Timestamp('2019-11-02T13:00Z')),
])
def test_restrict_upload_window(mocker, now, first):
fxd = VALID_FORECAST_JSON.copy()
ep = '{"restrict_upload": true}'
mocker.patch(
'sfa_api.utils.request_handling._current_utc_timestamp',
return_value=now)
request_handling.restrict_forecast_upload_window(ep, lambda: fxd, first)
@pytest.mark.parametrize('now,first', [
(pd.Timestamp('2019-11-01T11:59Z'), pd.Timestamp('2019-11-01T13:00Z')),
(pd.Timestamp('2019-11-01T12:00Z'), pd.Timestamp('2019-11-01T13:00Z')),
# as currently and previously (pre-rc1) implemented in core, midnight is a
# valid forecast init time even if doesn't start until mid-day
# pre-rc1 did not 11/1 00:00, but would include 11/2 00:00 in issue times
(pd.Timestamp('2019-11-01T00:00Z'), pd.Timestamp('2019-11-01T01:00Z')),
(pd.Timestamp('2019-11-01T22:01Z'), pd.Timestamp('2019-11-02T00:00Z')),
(pd.Timestamp('2019-11-01T23:20Z'), pd.Timestamp('2019-11-02T01:00Z'))
])
def test_restrict_upload_window_freq(mocker, now, first):
fxd = demo_forecasts['f8dd49fa-23e2-48a0-862b-ba0af6dec276'].copy()
ep = '{"restrict_upload": true}'
mocker.patch(
'sfa_api.utils.request_handling._current_utc_timestamp',
return_value=now)
request_handling.restrict_forecast_upload_window(ep, lambda: fxd, first)
def test_restrict_upload_window_cdf_dict(mocker):
now = pd.Timestamp('2019-11-01T11:59Z')
first = pd.Timestamp('2019-11-01T13:00Z')
fxd = VALID_CDF_FORECAST_JSON.copy()
ep = '{"restrict_upload": true}'
mocker.patch(
'sfa_api.utils.request_handling._current_utc_timestamp',
return_value=now)
request_handling.restrict_forecast_upload_window(ep, lambda: fxd, first)
def test_restrict_upload_window_cant_get(mocker):
now = pd.Timestamp('2019-11-01T11:59Z')
first = pd.Timestamp('2019-11-01T13:00Z')
ep = '{"restrict_upload": true}'
mocker.patch(
'sfa_api.utils.request_handling._current_utc_timestamp',
return_value=now)
get = mocker.MagicMock(side_effect=StorageAuthError)
with pytest.raises(NotFoundException):
request_handling.restrict_forecast_upload_window(ep, get, first)
@pytest.mark.parametrize('now,first', [
(pd.Timestamp('2019-11-01T11:59Z'), pd.Timestamp('2019-11-01T14:00Z')),
(pd.Timestamp('2019-11-01T12:00:00.000001Z'),
pd.Timestamp('2019-11-01T13:00Z')),
(pd.Timestamp('2019-11-01T11:59Z'), pd.Timestamp('2019-11-02T13:00Z')),
])
def test_restrict_upload_window_bad(mocker, now, first):
fxd = VALID_FORECAST_JSON.copy()
ep = '{"restrict_upload": true}'
mocker.patch(
'sfa_api.utils.request_handling._current_utc_timestamp',
return_value=now)
with pytest.raises(BadAPIRequest) as err:
request_handling.restrict_forecast_upload_window(
ep, lambda: fxd, first)
assert 'only accepting' in err.value.errors['issue_time'][0]
@pytest.mark.parametrize('now,first,label', [
(pd.Timestamp('2019-11-01T11:59Z'), pd.Timestamp('2019-11-01T13:00Z'),
'beginning'),
(pd.Timestamp('2019-11-01T11:59Z'), pd.Timestamp('2019-11-01T13:00Z'),
'instant'),
(pd.Timestamp('2019-11-01T12:00Z'), pd.Timestamp('2019-11-01T13:05Z'),
'ending'),
pytest.param(
pd.Timestamp('2019-11-01T11:59Z'), pd.Timestamp('2019-11-01T13:00Z'),
'ending', marks=pytest.mark.xfail
),
])
def test_restrict_upload_interval_label(mocker, now, first, label):
fxd = VALID_FORECAST_JSON.copy()
fxd['interval_label'] = label
ep = '{"restrict_upload": true}'
mocker.patch(
'sfa_api.utils.request_handling._current_utc_timestamp',
return_value=now)
request_handling.restrict_forecast_upload_window(ep, lambda: fxd, first)
@pytest.mark.parametrize('mimetype', [
'text/csv',
'application/vnd.ms-excel',
'application/json'
])
def test_parse_values_too_much_data(app, random_post_payload, mimetype):
with app.test_request_context():
data = random_post_payload(
app.config.get('MAX_POST_DATAPOINTS') + 1,
mimetype
)
with pytest.raises(request_handling.BadAPIRequest):
request_handling.parse_values(data, mimetype)
@pytest.mark.parametrize('lat,lon,exc', [
('str', -110, {'latitude': ['Must be a float']}),
(32, 'str', {'longitude': ['Must be a float']}),
('str', 'str', {'longitude': ['Must be a float'],
'latitude': ['Must be a float']}),
(-110, -110, {'latitude': ['Must be within [-90, 90].']}),
(-110, -181, {'latitude': ['Must be within [-90, 90].'],
'longitude': ['Must be within (-180, 180].']}),
(None, -181, {'latitude': ['Must provide a latitude'],
'longitude': ['Must be within (-180, 180].']}),
(32, None, {'longitude': ['Must provide a longitude']}),
])
def test_validate_latitude_longitude_fail(app, lat, lon, exc):
url = '/climatezones/search?'
if lat is not None:
url += f'latitude={lat}&'
if lon is not None:
url += f'longitude={lon}'
with pytest.raises(request_handling.BadAPIRequest) as err:
with app.test_request_context(url):
request_handling.validate_latitude_longitude()
assert err.value.errors == exc
@pytest.mark.parametrize('lat,lon', [
(32, -110),
(0, 0),
(-20, 170),
(-.01, 20.8)
])
def test_validate_latitude_longitude_success(app, lat, lon):
url = f'/climatezones/search?latitude={lat}&longitude={lon}'
with app.test_request_context(url):
request_handling.validate_latitude_longitude()
@pytest.mark.parametrize('data', [
pd.DataFrame({'value': []}),
pd.DataFrame({'value': [0, 0, 0]}),
pd.DataFrame({'value': [0.0, 1.0, 0.0]}, index=pd.date_range(
start='now', freq='5min', periods=3)),
pd.DataFrame({'value': [1, 0, 1], 'other': ['a', 'b', 'c']})
])
def test_validate_event_data_ok(data):
request_handling.validate_event_data(data)
@pytest.mark.parametrize('data,exp', [
( | pd.DataFrame({'value': [1, 2, 0]}) | pandas.DataFrame |
from itertools import product as it_product
from typing import List, Dict
import numpy as np
import os
import pandas as pd
from scipy.stats import spearmanr, wilcoxon
from provided_code.constants_class import ModelParameters
from provided_code.data_loader import DataLoader
from provided_code.dose_evaluation_class import EvaluateDose
from provided_code.general_functions import get_paths, get_predictions_to_optimize
def consolidate_data_for_analysis(cs: ModelParameters, force_new_consolidate: bool = False) \
-> [pd.DataFrame, pd.DataFrame, pd.DataFrame, pd.DataFrame, pd.DataFrame, pd.DataFrame, pd.DataFrame]:
"""
Consolidated data of all reference plans, dose predictions, and KBP plans. This may take about an hour to run, but
only needs to be run once for a given set of experiments.
Args:
cs: A constants object.
force_new_consolidate: Flag that will force consolidating data, which will overwrite previous data that was
consolidated in previous iterations.
Returns:
df_dose_error: Summary of dose error
df_dvh_metrics: Summary of DVH metric performance (can be converted to DVH error later)
df_clinical_criteria: Summary of clinical criteria performance
df_ref_dvh_metrics: Summary of reference dose DVH metrics
df_ref_clinical_criteria: Summary of reference dose clinical criteria performance
df_objective_data: The data from the objective functions (e.g., weights, objective function values)
df_solve_time: The time it took to solve models
"""
# Run consolidate_data_for_analysis when new predictions or plans
consolidate_data_paths = {'dose': f'{cs.results_data_dir}/dose_error_df.csv',
'dvh': f'{cs.results_data_dir}/dvh_metric_df.csv',
'clinical_criteria': f'{cs.results_data_dir}/clinical_criteria_df.csv',
'ref_dvh': f'{cs.results_data_dir}/reference_metrics.csv',
'ref_clinical_criteria': f'{cs.results_data_dir}/reference_criteria.csv',
'weights': f'{cs.results_data_dir}/weights_df.csv',
'solve_time': f'{cs.results_data_dir}/solve_time_df.csv'
}
# Check if consolidated data already exists
no_consolidated_date = False
for p in consolidate_data_paths.values():
if not os.path.isfile(p):
print(p)
no_consolidated_date = True
os.makedirs(cs.results_data_dir, exist_ok=True) # Make dir for results
# Consolidate data if it doesn't exist yet or force flag is True
if no_consolidated_date or force_new_consolidate:
# Prepare strings for data that will be evaluated
predictions_to_optimize, prediction_names = get_predictions_to_optimize(cs)
patient_names = os.listdir(cs.reference_data_dir)
hold_out_plan_paths = get_paths(cs.reference_data_dir, ext='') # list of paths used for held out testing
# Evaluate dose metrics
patient_data_loader = DataLoader(hold_out_plan_paths, mode_name='evaluation') # Set data loader
dose_evaluator_sample = EvaluateDose(patient_data_loader)
# Make reference dose DVH metrics and clinical criteria
dose_evaluator_sample.make_metrics()
dose_evaluator_sample.melt_dvh_metrics('Reference', 'reference_dose_metric_df').to_csv(
consolidate_data_paths['ref_dvh'])
dose_evaluator_sample.melt_dvh_metrics('Reference', 'reference_criteria_df').to_csv(
consolidate_data_paths['ref_clinical_criteria'])
# Initialize DataFrames for all scores and errors
optimizer_names = os.listdir(cs.plans_dir) # Get names of all optimizers
dose_error_index_dict, dvh_metric_index_dict = make_error_and_metric_indices(patient_names,
dose_evaluator_sample,
optimizer_names)
df_dose_error_indices = pd.MultiIndex.from_product(**dose_error_index_dict)
df_dvh_error_indices = pd.MultiIndex.from_arrays(**dvh_metric_index_dict)
# Make DataFrames
df_dose_error = pd.DataFrame(columns=prediction_names, index=df_dose_error_indices)
df_solve_time = | pd.DataFrame(columns=prediction_names, index=df_dose_error_indices) | pandas.DataFrame |
# coding=utf-8
# pylint: disable-msg=E1101,W0612
import numpy as np
import pandas as pd
from pandas import (Index, Series, DataFrame, isnull)
from pandas.compat import lrange
from pandas import compat
from pandas.util.testing import assert_series_equal
import pandas.util.testing as tm
from .common import TestData
class TestSeriesApply(TestData, tm.TestCase):
def test_apply(self):
with np.errstate(all='ignore'):
assert_series_equal(self.ts.apply(np.sqrt), np.sqrt(self.ts))
# elementwise-apply
import math
assert_series_equal(self.ts.apply(math.exp), np.exp(self.ts))
# how to handle Series result, #2316
result = self.ts.apply(lambda x: Series(
[x, x ** 2], index=['x', 'x^2']))
expected = DataFrame({'x': self.ts, 'x^2': self.ts ** 2})
tm.assert_frame_equal(result, expected)
# empty series
s = Series(dtype=object, name='foo', index=pd.Index([], name='bar'))
rs = s.apply(lambda x: x)
tm.assert_series_equal(s, rs)
# check all metadata (GH 9322)
self.assertIsNot(s, rs)
self.assertIs(s.index, rs.index)
self.assertEqual(s.dtype, rs.dtype)
self.assertEqual(s.name, rs.name)
# index but no data
s = Series(index=[1, 2, 3])
rs = s.apply(lambda x: x)
tm.assert_series_equal(s, rs)
def test_apply_same_length_inference_bug(self):
s = Series([1, 2])
f = lambda x: (x, x + 1)
result = s.apply(f)
expected = s.map(f)
assert_series_equal(result, expected)
s = Series([1, 2, 3])
result = s.apply(f)
expected = s.map(f)
assert_series_equal(result, expected)
def test_apply_dont_convert_dtype(self):
s = Series(np.random.randn(10))
f = lambda x: x if x > 0 else np.nan
result = s.apply(f, convert_dtype=False)
self.assertEqual(result.dtype, object)
def test_apply_args(self):
s = Series(['foo,bar'])
result = s.apply(str.split, args=(',', ))
self.assertEqual(result[0], ['foo', 'bar'])
tm.assertIsInstance(result[0], list)
def test_apply_box(self):
# ufunc will not be boxed. Same test cases as the test_map_box
vals = [pd.Timestamp('2011-01-01'), pd.Timestamp('2011-01-02')]
s = pd.Series(vals)
self.assertEqual(s.dtype, 'datetime64[ns]')
# boxed value must be Timestamp instance
res = s.apply(lambda x: '{0}_{1}_{2}'.format(x.__class__.__name__,
x.day, x.tz))
exp = pd.Series(['Timestamp_1_None', 'Timestamp_2_None'])
tm.assert_series_equal(res, exp)
vals = [pd.Timestamp('2011-01-01', tz='US/Eastern'),
pd.Timestamp('2011-01-02', tz='US/Eastern')]
s = pd.Series(vals)
self.assertEqual(s.dtype, 'datetime64[ns, US/Eastern]')
res = s.apply(lambda x: '{0}_{1}_{2}'.format(x.__class__.__name__,
x.day, x.tz))
exp = pd.Series(['Timestamp_1_US/Eastern', 'Timestamp_2_US/Eastern'])
tm.assert_series_equal(res, exp)
# timedelta
vals = [pd.Timedelta('1 days'), pd.Timedelta('2 days')]
s = pd.Series(vals)
self.assertEqual(s.dtype, 'timedelta64[ns]')
res = s.apply(lambda x: '{0}_{1}'.format(x.__class__.__name__, x.days))
exp = pd.Series(['Timedelta_1', 'Timedelta_2'])
tm.assert_series_equal(res, exp)
# period (object dtype, not boxed)
vals = [pd.Period('2011-01-01', freq='M'),
pd.Period('2011-01-02', freq='M')]
s = pd.Series(vals)
self.assertEqual(s.dtype, 'object')
res = s.apply(lambda x: '{0}_{1}'.format(x.__class__.__name__,
x.freqstr))
exp = pd.Series(['Period_M', 'Period_M'])
tm.assert_series_equal(res, exp)
def test_apply_datetimetz(self):
values = pd.date_range('2011-01-01', '2011-01-02',
freq='H').tz_localize('Asia/Tokyo')
s = pd.Series(values, name='XX')
result = s.apply(lambda x: x + pd.offsets.Day())
exp_values = pd.date_range('2011-01-02', '2011-01-03',
freq='H').tz_localize('Asia/Tokyo')
exp = pd.Series(exp_values, name='XX')
tm.assert_series_equal(result, exp)
# change dtype
# GH 14506 : Returned dtype changed from int32 to int64
result = s.apply(lambda x: x.hour)
exp = pd.Series(list(range(24)) + [0], name='XX', dtype=np.int64)
tm.assert_series_equal(result, exp)
# not vectorized
def f(x):
if not isinstance(x, pd.Timestamp):
raise ValueError
return str(x.tz)
result = s.map(f)
exp = pd.Series(['Asia/Tokyo'] * 25, name='XX')
tm.assert_series_equal(result, exp)
class TestSeriesMap(TestData, tm.TestCase):
def test_map(self):
index, data = tm.getMixedTypeDict()
source = Series(data['B'], index=data['C'])
target = Series(data['C'][:4], index=data['D'][:4])
merged = target.map(source)
for k, v in compat.iteritems(merged):
self.assertEqual(v, source[target[k]])
# input could be a dict
merged = target.map(source.to_dict())
for k, v in compat.iteritems(merged):
self.assertEqual(v, source[target[k]])
# function
result = self.ts.map(lambda x: x * 2)
self.assert_series_equal(result, self.ts * 2)
# GH 10324
a = Series([1, 2, 3, 4])
b = Series(["even", "odd", "even", "odd"], dtype="category")
c = Series(["even", "odd", "even", "odd"])
exp = Series(["odd", "even", "odd", np.nan], dtype="category")
self.assert_series_equal(a.map(b), exp)
exp = Series(["odd", "even", "odd", np.nan])
self.assert_series_equal(a.map(c), exp)
a = Series(['a', 'b', 'c', 'd'])
b = Series([1, 2, 3, 4],
index=pd.CategoricalIndex(['b', 'c', 'd', 'e']))
c = Series([1, 2, 3, 4], index=Index(['b', 'c', 'd', 'e']))
exp = Series([np.nan, 1, 2, 3])
self.assert_series_equal(a.map(b), exp)
exp = Series([np.nan, 1, 2, 3])
self.assert_series_equal(a.map(c), exp)
a = Series(['a', 'b', 'c', 'd'])
b = Series(['B', 'C', 'D', 'E'], dtype='category',
index=pd.CategoricalIndex(['b', 'c', 'd', 'e']))
c = Series(['B', 'C', 'D', 'E'], index=Index(['b', 'c', 'd', 'e']))
exp = Series(pd.Categorical([np.nan, 'B', 'C', 'D'],
categories=['B', 'C', 'D', 'E']))
self.assert_series_equal(a.map(b), exp)
exp = Series([np.nan, 'B', 'C', 'D'])
self.assert_series_equal(a.map(c), exp)
def test_map_compat(self):
# related GH 8024
s = Series([True, True, False], index=[1, 2, 3])
result = s.map({True: 'foo', False: 'bar'})
expected = Series(['foo', 'foo', 'bar'], index=[1, 2, 3])
assert_series_equal(result, expected)
def test_map_int(self):
left = Series({'a': 1., 'b': 2., 'c': 3., 'd': 4})
right = Series({1: 11, 2: 22, 3: 33})
self.assertEqual(left.dtype, np.float_)
self.assertTrue(issubclass(right.dtype.type, np.integer))
merged = left.map(right)
self.assertEqual(merged.dtype, np.float_)
self.assertTrue(isnull(merged['d']))
self.assertTrue(not isnull(merged['c']))
def test_map_type_inference(self):
s = Series(lrange(3))
s2 = s.map(lambda x: np.where(x == 0, 0, 1))
self.assertTrue(issubclass(s2.dtype.type, np.integer))
def test_map_decimal(self):
from decimal import Decimal
result = self.series.map(lambda x: Decimal(str(x)))
self.assertEqual(result.dtype, np.object_)
tm.assertIsInstance(result[0], Decimal)
def test_map_na_exclusion(self):
s = Series([1.5, np.nan, 3, np.nan, 5])
result = s.map(lambda x: x * 2, na_action='ignore')
exp = s * 2
assert_series_equal(result, exp)
def test_map_dict_with_tuple_keys(self):
"""
Due to new MultiIndex-ing behaviour in v0.14.0,
dicts with tuple keys passed to map were being
converted to a multi-index, preventing tuple values
from being mapped properly.
"""
df = pd.DataFrame({'a': [(1, ), (2, ), (3, 4), (5, 6)]})
label_mappings = {(1, ): 'A', (2, ): 'B', (3, 4): 'A', (5, 6): 'B'}
df['labels'] = df['a'].map(label_mappings)
df['expected_labels'] = pd.Series(['A', 'B', 'A', 'B'], index=df.index)
# All labels should be filled now
tm.assert_series_equal(df['labels'], df['expected_labels'],
check_names=False)
def test_map_box(self):
vals = [pd.Timestamp('2011-01-01'), pd.Timestamp('2011-01-02')]
s = pd.Series(vals)
self.assertEqual(s.dtype, 'datetime64[ns]')
# boxed value must be Timestamp instance
res = s.map(lambda x: '{0}_{1}_{2}'.format(x.__class__.__name__,
x.day, x.tz))
exp = pd.Series(['Timestamp_1_None', 'Timestamp_2_None'])
tm.assert_series_equal(res, exp)
vals = [pd.Timestamp('2011-01-01', tz='US/Eastern'),
pd.Timestamp('2011-01-02', tz='US/Eastern')]
s = pd.Series(vals)
self.assertEqual(s.dtype, 'datetime64[ns, US/Eastern]')
res = s.map(lambda x: '{0}_{1}_{2}'.format(x.__class__.__name__,
x.day, x.tz))
exp = pd.Series(['Timestamp_1_US/Eastern', 'Timestamp_2_US/Eastern'])
tm.assert_series_equal(res, exp)
# timedelta
vals = [ | pd.Timedelta('1 days') | pandas.Timedelta |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
"""
test_preprocessing
----------------------------------
Tests for `preprocessing` module.
"""
import pytest
from sktutor.preprocessing import (GroupByImputer, MissingValueFiller,
OverMissingThresholdDropper,
ValueReplacer, FactorLimiter,
SingleValueAboveThresholdDropper,
SingleValueDropper, ColumnExtractor,
ColumnDropper, DummyCreator,
ColumnValidator, TextContainsDummyExtractor,
BitwiseOperator, BoxCoxTransformer,
InteractionCreator, StandardScaler,
PolynomialFeatures, ContinuousFeatureBinner,
TypeExtractor, GenericTransformer,
MissingColumnsReplacer)
from sktutor.pipeline import make_union
import numpy as np
import pandas as pd
import pandas.util.testing as tm
from random import shuffle
from sklearn.pipeline import make_pipeline
@pytest.mark.usefixtures("missing_data")
@pytest.mark.usefixtures("missing_data2")
class TestGroupByImputer(object):
def test_groups_most_frequent(self, missing_data):
# Test imputing most frequent value per group.
prep = GroupByImputer('most_frequent', 'b')
prep.fit(missing_data)
result = prep.transform(missing_data)
exp_dict = {'a': [2, 2, 2, None, 4, 4, 7, 8, 8, 8],
'b': ['123', '123', '123',
'234', '456', '456',
'789', '789', '789', '789'],
'c': [1.0, 2.0, 1.0, 4.0, 4.0, 4.0, 7.0, 9.0, 9.0, 9.0],
'd': ['a', 'a', 'a', None, 'e', 'f', 'j', 'h', 'j', 'j'],
'e': [1, 2, 1, None, None, None, None, None, None, None],
'f': ['a', 'b', 'a', None, None, None, None, None, None,
None],
'g': ['a', 'a', 'a', 'a', 'a', 'a', 'a', 'a', 'b', 'a'],
'h': ['a', 'a', 'a', 'a', 'a', 'a', 'a', 'a', 'a', 'a']
}
expected = pd.DataFrame(exp_dict)
tm.assert_frame_equal(result, expected, check_dtype=False)
def test_groups_mean(self, missing_data):
# Test imputing mean by group.
prep = GroupByImputer('mean', 'b')
prep.fit(missing_data)
result = prep.transform(missing_data)
exp_dict = {'a': [2, 2, 2, None, 4, 4, 7, 8, 7 + 2/3, 8],
'b': ['123', '123', '123',
'234', '456', '456',
'789', '789', '789', '789'],
'c': [1.0, 2.0, 1.5, 4.0, 4.0, 4.0, 7.0, 9.0, 8+1/3, 9.0],
'd': ['a', 'a', None, None, 'e', 'f', None, 'h', 'j', 'j'],
'e': [1, 2, 1.5, None, None, None, None, None, None, None],
'f': ['a', 'b', None, None, None, None, None, None, None,
None],
'g': ['a', 'a', 'a', 'a', 'a', 'a', 'a', 'a', 'b', None],
'h': ['a', 'a', 'a', 'a', 'a', 'a', 'a', 'a', None, None]
}
expected = pd.DataFrame(exp_dict)
tm.assert_frame_equal(result, expected, check_dtype=False)
def test_groups_median(self, missing_data):
# Test imputing median by group.
prep = GroupByImputer('median', 'b')
prep.fit(missing_data)
result = prep.transform(missing_data)
exp_dict = {'a': [2, 2, 2, None, 4, 4, 7, 8, 8, 8],
'b': ['123', '123', '123',
'234', '456', '456',
'789', '789', '789', '789'],
'c': [1, 2, 1.5, 4, 4, 4, 7, 9, 9, 9],
'd': ['a', 'a', None, None, 'e', 'f', None, 'h', 'j', 'j'],
'e': [1, 2, 1.5, None, None, None, None, None, None, None],
'f': ['a', 'b', None, None, None, None, None, None, None,
None],
'g': ['a', 'a', 'a', 'a', 'a', 'a', 'a', 'a', 'b', None],
'h': ['a', 'a', 'a', 'a', 'a', 'a', 'a', 'a', None, None]
}
expected = pd.DataFrame(exp_dict)
tm.assert_frame_equal(result, expected, check_dtype=False)
def test_all_most_frequent(self, missing_data):
# Test imputing most frequent with no group by.
prep = GroupByImputer('most_frequent')
prep.fit(missing_data)
result = prep.transform(missing_data)
exp_dict = {'a': [2, 2, 2, 2, 4, 4, 7, 8, 2, 8],
'b': ['123', '123', '123',
'234', '456', '456',
'789', '789', '789', '789'],
'c': [1.0, 2.0, 4.0, 4.0, 4.0, 4.0, 7.0, 9.0, 4.0, 9.0],
'd': ['a', 'a', 'a', 'a', 'e', 'f', 'a', 'h', 'j', 'j'],
'e': [1, 2, 1, 1, 1, 1, 1, 1, 1, 1],
'f': ['a', 'b', 'a', 'a', 'a', 'a', 'a', 'a', 'a', 'a'],
'g': ['a', 'a', 'a', 'a', 'a', 'a', 'a', 'a', 'b', 'a'],
'h': ['a', 'a', 'a', 'a', 'a', 'a', 'a', 'a', 'a', 'a']
}
expected = pd.DataFrame(exp_dict)
tm.assert_frame_equal(result, expected, check_dtype=False)
def test_all_mean(self, missing_data):
# Test imputing mean with no group by.
prep = GroupByImputer('mean')
prep.fit(missing_data)
result = prep.transform(missing_data)
exp_dict = {'a': [2, 2, 5, 5, 4, 4, 7, 8, 5, 8],
'b': ['123', '123', '123',
'234', '456', '456',
'789', '789', '789', '789'],
'c': [1, 2, 5, 4, 4, 4, 7, 9, 5, 9],
'd': ['a', 'a', None, None, 'e', 'f', None, 'h', 'j', 'j'],
'e': [1, 2, 1.5, 1.5, 1.5, 1.5, 1.5, 1.5, 1.5, 1.5],
'f': ['a', 'b', None, None, None, None, None, None, None,
None],
'g': ['a', 'a', 'a', 'a', 'a', 'a', 'a', 'a', 'b', None],
'h': ['a', 'a', 'a', 'a', 'a', 'a', 'a', 'a', None, None]
}
expected = pd.DataFrame(exp_dict)
tm.assert_frame_equal(result, expected, check_dtype=False)
def test_all_median(self, missing_data):
# Test imputing median with no group by.
prep = GroupByImputer('median')
prep.fit(missing_data)
result = prep.transform(missing_data)
exp_dict = {'a': [2, 2, 4, 4, 4, 4, 7, 8, 4, 8],
'b': ['123', '123', '123',
'234', '456', '456',
'789', '789', '789', '789'],
'c': [1, 2, 4, 4, 4, 4, 7, 9, 4, 9],
'd': ['a', 'a', None, None, 'e', 'f', None, 'h', 'j', 'j'],
'e': [1, 2, 1.5, 1.5, 1.5, 1.5, 1.5, 1.5, 1.5, 1.5],
'f': ['a', 'b', None, None, None, None, None, None, None,
None],
'g': ['a', 'a', 'a', 'a', 'a', 'a', 'a', 'a', 'b', None],
'h': ['a', 'a', 'a', 'a', 'a', 'a', 'a', 'a', None, None]
}
expected = pd.DataFrame(exp_dict)
tm.assert_frame_equal(result, expected, check_dtype=False)
def test_value_error(self, missing_data):
# Test limiting options without a group by.
prep = GroupByImputer('stdev')
with pytest.raises(ValueError):
prep.fit(missing_data)
def test_key_error(self, missing_data):
# Test imputing with np.nan when a new group level is introduced in
# Transform.
prep = GroupByImputer('mean', 'b')
prep.fit(missing_data)
new_dict = {'a': [2, 2, None, None, 4, 4, 7, 8, None, 8],
'b': ['123', '123', '123',
'987', '987', '456',
'789', '789', '789', '789'],
'c': [1, 2, None, 4, 4, 4, 7, 9, None, 9],
'd': ['a', 'a', None, None, 'e', 'f', None, 'h', 'j', 'j'],
'e': [1, 2, None, None, None, None, None, None, None,
None],
'f': ['a', 'b', None, None, None, None, None, None, None,
None],
'g': ['a', 'a', 'a', 'a', 'a', 'a', 'a', 'a', 'b', None],
'h': ['a', 'a', 'a', 'a', 'a', 'a', 'a', 'a', None, None]
}
new_data = pd.DataFrame(new_dict)
# set equal to the expected for test means group
exp_dict = {'a': [2, 2, 2, None, 4, 4, 7, 8, 7+2/3, 8],
'b': ['123', '123', '123',
'987', '987', '456',
'789', '789', '789', '789'],
'c': [1.0, 2.0, 1.5, 4.0, 4.0, 4.0, 7.0, 9.0, 8+1/3, 9.0],
'd': ['a', 'a', None, None, 'e', 'f', None, 'h', 'j', 'j'],
'e': [1, 2, 1.5, None, None, None, None, None, None, None],
'f': ['a', 'b', None, None, None, None, None, None, None,
None],
'g': ['a', 'a', 'a', 'a', 'a', 'a', 'a', 'a', 'b', None],
'h': ['a', 'a', 'a', 'a', 'a', 'a', 'a', 'a', None, None]
}
expected = pd.DataFrame(exp_dict)
result = prep.transform(new_data)
tm.assert_frame_equal(result, expected, check_dtype=False)
def test_2groups_most_frequent(self, missing_data2):
# Test most frequent with group by with 2 columns.
prep = GroupByImputer('most_frequent', ['b', 'c'])
prep.fit(missing_data2)
result = prep.transform(missing_data2)
exp_dict = {'a': [1, 2, 1, 4, 4, 4, 7, 8, 8, 8],
'b': ['123', '123', '123',
'123', '123', '789',
'789', '789', '789', '789'],
'c': ['a', 'a', 'a', 'b', 'b', 'c', 'c', 'a', 'a', 'c'],
'd': ['a', 'a', 'a', 'e', 'e', 'f', 'f', 'h', 'j', 'j']
}
expected = pd.DataFrame(exp_dict)
tm.assert_frame_equal(result, expected, check_dtype=False)
def test_2groups_mean(self, missing_data2):
# Test mean with group by with 2 columns.
prep = GroupByImputer('mean', ['b', 'c'])
prep.fit(missing_data2)
result = prep.transform(missing_data2)
exp_dict = {'a': [1, 2, 1.5, 4, 4, 4, 7, 8, 8, 8],
'b': ['123', '123', '123',
'123', '123', '789',
'789', '789', '789', '789'],
'c': ['a', 'a', 'a', 'b', 'b', 'c', 'c', 'a', 'a', 'c'],
'd': ['a', 'a', None, None, 'e', 'f', None, 'h', 'j',
'j']
}
expected = pd.DataFrame(exp_dict)
tm.assert_frame_equal(result, expected, check_dtype=False)
def test_2groups_median(self, missing_data2):
# Test median with group by with 2 columns.
prep = GroupByImputer('median', ['b', 'c'])
prep.fit(missing_data2)
result = prep.transform(missing_data2)
exp_dict = {'a': [1, 2, 1.5, 4, 4, 4, 7, 8, 8, 8],
'b': ['123', '123', '123',
'123', '123', '789',
'789', '789', '789', '789'],
'c': ['a', 'a', 'a', 'b', 'b', 'c', 'c', 'a', 'a', 'c'],
'd': ['a', 'a', None, None, 'e', 'f', None, 'h', 'j',
'j']
}
expected = pd.DataFrame(exp_dict)
tm.assert_frame_equal(result, expected, check_dtype=False)
def test_unordered_index(self, missing_data):
# Test unordered index is handled properly
new_index = list(missing_data.index)
shuffle(new_index)
missing_data.index = new_index
prep = GroupByImputer('most_frequent', 'b')
prep.fit(missing_data)
result = prep.transform(missing_data)
exp_dict = {'a': [2, 2, 2, None, 4, 4, 7, 8, 8, 8],
'b': ['123', '123', '123',
'234', '456', '456',
'789', '789', '789', '789'],
'c': [1.0, 2.0, 1.0, 4.0, 4.0, 4.0, 7.0, 9.0, 9.0, 9.0],
'd': ['a', 'a', 'a', None, 'e', 'f', 'j', 'h', 'j', 'j'],
'e': [1, 2, 1, None, None, None, None, None, None, None],
'f': ['a', 'b', 'a', None, None, None, None, None, None,
None],
'g': ['a', 'a', 'a', 'a', 'a', 'a', 'a', 'a', 'b', 'a'],
'h': ['a', 'a', 'a', 'a', 'a', 'a', 'a', 'a', 'a', 'a']
}
expected = pd.DataFrame(exp_dict, index=new_index)
tm.assert_frame_equal(result, expected, check_dtype=False)
@pytest.mark.usefixtures("missing_data_factors")
@pytest.mark.usefixtures("missing_data_numeric")
class TestMissingValueFiller(object):
def test_missing_factors(self, missing_data_factors):
# Test filling in missing factors with a string.
prep = MissingValueFiller('Missing')
result = prep.fit_transform(missing_data_factors)
exp_dict = {'c': ['a', 'Missing', 'a', 'b', 'b', 'Missing', 'c', 'a',
'a', 'c'],
'd': ['a', 'a', 'Missing', 'Missing', 'e', 'f', 'Missing',
'h', 'j', 'j']
}
expected = pd.DataFrame(exp_dict)
tm.assert_frame_equal(result, expected, check_dtype=False)
def test_missing_numeric(self, missing_data_numeric):
# Test filling in missing numeric data with a number.
prep = MissingValueFiller(0)
result = prep.fit_transform(missing_data_numeric)
exp_dict = {'a': [2, 2, 0, 0, 4, 4, 7, 8, 0, 8],
'c': [1, 2, 0, 4, 4, 4, 7, 9, 0, 9],
'e': [1, 2, 0, 0, 0, 0, 0, 0, 0, 0]
}
expected = pd.DataFrame(exp_dict)
tm.assert_frame_equal(result, expected, check_dtype=False)
def test_unordered_index(self, missing_data_numeric):
# Test unordered index is handled properly
new_index = list(missing_data_numeric.index)
shuffle(new_index)
missing_data_numeric.index = new_index
prep = MissingValueFiller(0)
result = prep.fit_transform(missing_data_numeric)
exp_dict = {'a': [2, 2, 0, 0, 4, 4, 7, 8, 0, 8],
'c': [1, 2, 0, 4, 4, 4, 7, 9, 0, 9],
'e': [1, 2, 0, 0, 0, 0, 0, 0, 0, 0]
}
expected = pd.DataFrame(exp_dict, index=new_index)
tm.assert_frame_equal(result, expected, check_dtype=False)
@pytest.mark.usefixtures("missing_data")
class TestOverMissingThresholdDropper(object):
def test_drop_20(self, missing_data):
# Test dropping columns with missing over a threshold.
prep = OverMissingThresholdDropper(.2)
prep.fit(missing_data)
result = prep.transform(missing_data)
exp_dict = {'b': ['123', '123', '123',
'234', '456', '456',
'789', '789', '789', '789'],
'c': [1, 2, None, 4, 4, 4, 7, 9, None, 9],
'g': ['a', 'a', 'a', 'a', 'a', 'a', 'a', 'a', 'b', None],
'h': ['a', 'a', 'a', 'a', 'a', 'a', 'a', 'a', None, None]
}
expected = pd.DataFrame(exp_dict)
tm.assert_frame_equal(result, expected, check_dtype=False)
def test_threshold_high_value_error(self, missing_data):
# Test throwing error with threshold set too high.
with pytest.raises(ValueError):
svatd = OverMissingThresholdDropper(1.5)
svatd
def test_threshold_low_value_error(self, missing_data):
# Test throwing error with threshold set too low.
with pytest.raises(ValueError):
svatd = OverMissingThresholdDropper(-1)
svatd
def test_unordered_index(self, missing_data):
# Test unordered index is handled properly
new_index = list(missing_data.index)
shuffle(new_index)
missing_data.index = new_index
prep = OverMissingThresholdDropper(.2)
prep.fit(missing_data)
result = prep.transform(missing_data)
exp_dict = {'b': ['123', '123', '123',
'234', '456', '456',
'789', '789', '789', '789'],
'c': [1, 2, None, 4, 4, 4, 7, 9, None, 9],
'g': ['a', 'a', 'a', 'a', 'a', 'a', 'a', 'a', 'b', None],
'h': ['a', 'a', 'a', 'a', 'a', 'a', 'a', 'a', None, None]
}
expected = pd.DataFrame(exp_dict, index=new_index)
tm.assert_frame_equal(result, expected, check_dtype=False)
@pytest.mark.usefixtures("full_data_factors")
class TestValueReplacer(object):
def test_mapper(self, full_data_factors):
# Test replacing values with mapper.
mapper = {'c': {'a': 'z', 'b': 'z'},
'd': {'a': 'z', 'b': 'z', 'c': 'y', 'd': 'y', 'e': 'x',
'f': 'x', 'g': 'w', 'h': 'w', 'j': 'w'
}
}
prep = ValueReplacer(mapper)
prep.fit(full_data_factors)
result = prep.transform(full_data_factors)
exp_dict = {'c': ['z', 'z', 'z', 'z', 'z', 'c', 'c', 'z', 'z', 'c'],
'd': ['z', 'z', 'y', 'y', 'x', 'x', 'w', 'w', 'w', 'w']
}
expected = pd.DataFrame(exp_dict)
tm.assert_frame_equal(result, expected, check_dtype=False)
def test_inverse_mapper(self, full_data_factors):
# Test replacing values with inverse_mapper.
inv_mapper = {'c': {'z': ['a', 'b']},
'd': {'z': ['a', 'b'],
'y': ['c', 'd'],
'x': ['e', 'f'],
'w': ['g', 'h', 'j']
}
}
prep = ValueReplacer(inverse_mapper=inv_mapper)
prep.fit(full_data_factors)
result = prep.transform(full_data_factors)
exp_dict = {'c': ['z', 'z', 'z', 'z', 'z', 'c', 'c', 'z', 'z', 'c'],
'd': ['z', 'z', 'y', 'y', 'x', 'x', 'w', 'w', 'w', 'w']
}
expected = pd.DataFrame(exp_dict)
tm.assert_frame_equal(result, expected, check_dtype=False)
def test_extra_column_value_error(self, full_data_factors):
# Test throwing error when replacing values with a non-existant column.
mapper = {'c': {'a': 'z', 'b': 'z'},
'e': {'a': 'z', 'b': 'z', 'c': 'y', 'd': 'y', 'e': 'x',
'f': 'x', 'g': 'w', 'h': 'w', 'j': 'w'
}
}
prep = ValueReplacer(mapper)
with pytest.raises(ValueError):
prep.fit(full_data_factors)
def test_2_mappers_value_error(self):
# Test throwing error when specifying mapper and inverse_mapper.
mapper = {'c': {'a': 'z', 'b': 'z'},
'e': {'a': 'z', 'b': 'z', 'c': 'y', 'd': 'y', 'e': 'x',
'f': 'x', 'g': 'w', 'h': 'w', 'j': 'w'
}
}
inv_mapper = {'c': {'z': ['a', 'b']},
'd': {'z': ['a', 'b'],
'y': ['c', 'd'],
'x': ['e', 'f'],
'w': ['g', 'h', 'j']
}
}
with pytest.raises(ValueError):
prep = ValueReplacer(mapper=mapper, inverse_mapper=inv_mapper)
prep
def test_no_mappers_value_error(self):
# Test throwing error when not specifying mapper or inverse_mapper.
with pytest.raises(ValueError):
prep = ValueReplacer()
prep
def test_unordered_index(self, full_data_factors):
# Test unordered index is handled properly
new_index = list(full_data_factors.index)
shuffle(new_index)
full_data_factors.index = new_index
mapper = {'c': {'a': 'z', 'b': 'z'},
'd': {'a': 'z', 'b': 'z', 'c': 'y', 'd': 'y', 'e': 'x',
'f': 'x', 'g': 'w', 'h': 'w', 'j': 'w'
}
}
prep = ValueReplacer(mapper)
prep.fit(full_data_factors)
result = prep.transform(full_data_factors)
exp_dict = {'c': ['z', 'z', 'z', 'z', 'z', 'c', 'c', 'z', 'z', 'c'],
'd': ['z', 'z', 'y', 'y', 'x', 'x', 'w', 'w', 'w', 'w']
}
expected = pd.DataFrame(exp_dict, index=new_index)
tm.assert_frame_equal(result, expected, check_dtype=False)
@pytest.mark.usefixtures("missing_data_factors")
class TestFactorLimiter(object):
def test_limiter(self, missing_data_factors):
# Test limiting factor levels to specified levels with default.
factors = {'c': {'factors': ['a', 'b'],
'default': 'a'
},
'd': {'factors': ['a', 'b', 'c', 'd'],
'default': 'd'
}
}
prep = FactorLimiter(factors)
prep.fit(missing_data_factors)
result = prep.transform(missing_data_factors)
exp_dict = {'c': ['a', 'a', 'a', 'b', 'b', 'a', 'a', 'a', 'a', 'a'],
'd': ['a', 'a', 'd', 'd', 'd', 'd', 'd', 'd', 'd', 'd']
}
expected = pd.DataFrame(exp_dict)
tm.assert_frame_equal(result, expected, check_dtype=False)
def test_extra_column_value_error(self, missing_data_factors):
# Test throwing error when limiting values with a non-existant column.
factors = {'c': {'factors': ['a', 'b'],
'default': 'a'
},
'e': {'factors': ['a', 'b', 'c', 'd'],
'default': 'd'
}
}
fl = FactorLimiter(factors)
with pytest.raises(ValueError):
fl.fit(missing_data_factors)
def test_unordered_index(self, missing_data_factors):
# Test unordered index is handled properly
new_index = list(missing_data_factors.index)
shuffle(new_index)
missing_data_factors.index = new_index
factors = {'c': {'factors': ['a', 'b'],
'default': 'a'
},
'd': {'factors': ['a', 'b', 'c', 'd'],
'default': 'd'
}
}
prep = FactorLimiter(factors)
prep.fit(missing_data_factors)
result = prep.transform(missing_data_factors)
exp_dict = {'c': ['a', 'a', 'a', 'b', 'b', 'a', 'a', 'a', 'a', 'a'],
'd': ['a', 'a', 'd', 'd', 'd', 'd', 'd', 'd', 'd', 'd']
}
expected = pd.DataFrame(exp_dict, index=new_index)
tm.assert_frame_equal(result, expected, check_dtype=False)
@pytest.mark.usefixtures("missing_data")
class TestSingleValueAboveThresholdDropper(object):
def test_drop_70_with_na(self, missing_data):
# test dropping columns with over 70% single value, including NaNs.
prep = SingleValueAboveThresholdDropper(.7, dropna=False)
prep.fit(missing_data)
result = prep.transform(missing_data)
exp_dict = {'a': [2, 2, None, None, 4, 4, 7, 8, None, 8],
'b': ['123', '123', '123',
'234', '456', '456',
'789', '789', '789', '789'],
'c': [1, 2, None, 4, 4, 4, 7, 9, None, 9],
'd': ['a', 'a', None, None, 'e', 'f', None, 'h', 'j', 'j']
}
expected = pd.DataFrame(exp_dict)
tm.assert_frame_equal(result, expected, check_dtype=False)
def test_drop_70_without_na(self, missing_data):
# test dropping columns with over 70% single value, not including NaNs.
prep = SingleValueAboveThresholdDropper(.7, dropna=True)
prep.fit(missing_data)
result = prep.transform(missing_data)
exp_dict = {'a': [2, 2, None, None, 4, 4, 7, 8, None, 8],
'b': ['123', '123', '123',
'234', '456', '456',
'789', '789', '789', '789'],
'c': [1, 2, None, 4, 4, 4, 7, 9, None, 9],
'd': ['a', 'a', None, None, 'e', 'f', None, 'h', 'j',
'j'],
'e': [1, 2, None, None, None, None, None, None, None,
None],
'f': ['a', 'b', None, None, None, None, None, None, None,
None],
}
expected = pd.DataFrame(exp_dict)
tm.assert_frame_equal(result, expected, check_dtype=False)
def test_threshold_high_value_error(self, missing_data):
# Test throwing error with threshold set too high.
with pytest.raises(ValueError):
prep = SingleValueAboveThresholdDropper(1.5)
prep
def test_threshold_low_value_error(self, missing_data):
# Test throwing error with threshold set too low.
with pytest.raises(ValueError):
prep = SingleValueAboveThresholdDropper(-1)
prep
def test_unordered_index(self, missing_data):
# Test unordered index is handled properly
new_index = list(missing_data.index)
shuffle(new_index)
missing_data.index = new_index
prep = SingleValueAboveThresholdDropper(.7, dropna=False)
prep.fit(missing_data)
result = prep.transform(missing_data)
exp_dict = {'a': [2, 2, None, None, 4, 4, 7, 8, None, 8],
'b': ['123', '123', '123',
'234', '456', '456',
'789', '789', '789', '789'],
'c': [1, 2, None, 4, 4, 4, 7, 9, None, 9],
'd': ['a', 'a', None, None, 'e', 'f', None, 'h', 'j', 'j']
}
expected = pd.DataFrame(exp_dict, index=new_index)
tm.assert_frame_equal(result, expected, check_dtype=False)
@pytest.mark.usefixtures("single_values_data")
class TestSingleValueDropper(object):
def test_without_na(self, single_values_data):
# Test dropping columns with single values, excluding NaNs as a value.
prep = SingleValueDropper(dropna=True)
prep.fit(single_values_data)
result = prep.transform(single_values_data)
exp_dict = {'a': [2, 2, 2, 3, 4, 4, 7, 8, 8, 8],
'b': ['123', '123', '123',
'234', '456', '456',
'789', '789', '789', '789'],
'e': [1, 2, None, None, None, None, None, None, None,
None]
}
expected = pd.DataFrame(exp_dict)
tm.assert_frame_equal(result, expected, check_dtype=False)
def test_with_na(self, single_values_data):
# Test dropping columns with single values, including NaNs as a value.
prep = SingleValueDropper(dropna=False)
prep.fit(single_values_data)
result = prep.transform(single_values_data)
exp_dict = {'a': [2, 2, 2, 3, 4, 4, 7, 8, 8, 8],
'b': ['123', '123', '123',
'234', '456', '456',
'789', '789', '789', '789'],
'd': [1, 1, 1, 1, 1, 1, 1, 1, 1, None],
'e': [1, 2, None, None, None, None, None, None, None,
None],
'g': ['a', 'a', 'a', 'a', 'a', 'a', 'a', 'a', 'a', None]
}
expected = pd.DataFrame(exp_dict)
tm.assert_frame_equal(result, expected, check_dtype=False)
def test_unordered_index(self, single_values_data):
# Test unordered index is handled properly
new_index = list(single_values_data.index)
shuffle(new_index)
single_values_data.index = new_index
prep = SingleValueDropper(dropna=False)
prep.fit(single_values_data)
result = prep.transform(single_values_data)
exp_dict = {'a': [2, 2, 2, 3, 4, 4, 7, 8, 8, 8],
'b': ['123', '123', '123',
'234', '456', '456',
'789', '789', '789', '789'],
'd': [1, 1, 1, 1, 1, 1, 1, 1, 1, None],
'e': [1, 2, None, None, None, None, None, None, None,
None],
'g': ['a', 'a', 'a', 'a', 'a', 'a', 'a', 'a', 'a', None]
}
expected = pd.DataFrame(exp_dict, index=new_index)
tm.assert_frame_equal(result, expected, check_dtype=False)
@pytest.mark.usefixtures("missing_data")
class TestColumnExtractor(object):
def test_extraction(self, missing_data):
# Test extraction of columns from a DataFrame.
prep = ColumnExtractor(['a', 'b', 'c'])
prep.fit(missing_data)
result = prep.transform(missing_data)
exp_dict = {'a': [2, 2, None, None, 4, 4, 7, 8, None, 8],
'b': ['123', '123', '123',
'234', '456', '456',
'789', '789', '789', '789'],
'c': [1, 2, None, 4, 4, 4, 7, 9, None, 9]
}
expected = pd.DataFrame(exp_dict)
tm.assert_frame_equal(result, expected, check_dtype=False)
def test_column_missing_error(self, missing_data):
# Test throwing error when an extraction is requested of a missing.
# column
prep = ColumnExtractor(['a', 'b', 'z'])
with pytest.raises(ValueError):
prep.fit(missing_data)
def test_unordered_index(self, missing_data):
# Test unordered index is handled properly
new_index = list(missing_data.index)
shuffle(new_index)
missing_data.index = new_index
prep = ColumnExtractor(['a', 'b', 'c'])
prep.fit(missing_data)
result = prep.transform(missing_data)
exp_dict = {'a': [2, 2, None, None, 4, 4, 7, 8, None, 8],
'b': ['123', '123', '123',
'234', '456', '456',
'789', '789', '789', '789'],
'c': [1, 2, None, 4, 4, 4, 7, 9, None, 9]
}
expected = pd.DataFrame(exp_dict, index=new_index)
tm.assert_frame_equal(result, expected, check_dtype=False)
@pytest.mark.usefixtures("missing_data")
class TestColumnDropper(object):
def test_drop_multiple(self, missing_data):
# Test extraction of columns from a DataFrame
prep = ColumnDropper(['d', 'e', 'f', 'g', 'h'])
prep.fit(missing_data)
result = prep.transform(missing_data)
exp_dict = {'a': [2, 2, None, None, 4, 4, 7, 8, None, 8],
'b': ['123', '123', '123',
'234', '456', '456',
'789', '789', '789', '789'],
'c': [1, 2, None, 4, 4, 4, 7, 9, None, 9]
}
expected = pd.DataFrame(exp_dict)
tm.assert_frame_equal(result, expected, check_dtype=False)
def test_drop_single(self, missing_data):
# Test extraction of columns from a DataFrame
prep = ColumnDropper('d')
prep.fit(missing_data)
result = prep.transform(missing_data)
exp_dict = {'a': [2, 2, None, None, 4, 4, 7, 8, None, 8],
'b': ['123', '123', '123',
'234', '456', '456',
'789', '789', '789', '789'],
'c': [1, 2, None, 4, 4, 4, 7, 9, None, 9],
'e': [1, 2, None, None, None, None, None, None, None,
None],
'f': ['a', 'b', None, None, None, None, None, None, None,
None],
'g': ['a', 'a', 'a', 'a', 'a', 'a', 'a', 'a', 'b', None],
'h': ['a', 'a', 'a', 'a', 'a', 'a', 'a', 'a', None, None]
}
expected = pd.DataFrame(exp_dict)
tm.assert_frame_equal(result, expected, check_dtype=False)
def test_error(self, missing_data):
# Test throwing error when dropping is requested of a missing column
prep = ColumnDropper(['a', 'b', 'z'])
with pytest.raises(ValueError):
prep.fit(missing_data)
def test_unordered_index(self, missing_data):
# Test unordered index is handled properly
new_index = list(missing_data.index)
shuffle(new_index)
missing_data.index = new_index
prep = ColumnDropper(['d', 'e', 'f', 'g', 'h'])
prep.fit(missing_data)
result = prep.transform(missing_data)
exp_dict = {'a': [2, 2, None, None, 4, 4, 7, 8, None, 8],
'b': ['123', '123', '123',
'234', '456', '456',
'789', '789', '789', '789'],
'c': [1, 2, None, 4, 4, 4, 7, 9, None, 9]
}
expected = pd.DataFrame(exp_dict, index=new_index)
tm.assert_frame_equal(result, expected, check_dtype=False)
@pytest.mark.usefixtures("full_data_factors")
@pytest.mark.usefixtures("full_data_factors_subset")
@pytest.mark.usefixtures("missing_data_factors")
class TestDummyCreator(object):
def test_default_dummies(self, full_data_factors):
# Test creating dummies variables from a DataFrame
prep = DummyCreator()
prep.fit(full_data_factors)
result = prep.transform(full_data_factors)
exp_dict = {'c_a': [1, 1, 1, 0, 0, 0, 0, 1, 1, 0],
'c_b': [0, 0, 0, 1, 1, 0, 0, 0, 0, 0],
'c_c': [0, 0, 0, 0, 0, 1, 1, 0, 0, 1],
'd_a': [1, 0, 0, 0, 0, 0, 0, 0, 0, 0],
'd_b': [0, 1, 0, 0, 0, 0, 0, 0, 0, 0],
'd_c': [0, 0, 1, 0, 0, 0, 0, 0, 0, 0],
'd_d': [0, 0, 0, 1, 0, 0, 0, 0, 0, 0],
'd_e': [0, 0, 0, 0, 1, 0, 0, 0, 0, 0],
'd_f': [0, 0, 0, 0, 0, 1, 0, 0, 0, 0],
'd_g': [0, 0, 0, 0, 0, 0, 1, 0, 0, 0],
'd_h': [0, 0, 0, 0, 0, 0, 0, 1, 0, 0],
'd_j': [0, 0, 0, 0, 0, 0, 0, 0, 1, 1]
}
expected = pd.DataFrame(exp_dict)
tm.assert_frame_equal(result, expected, check_dtype=False)
def test_fit_transform(self, full_data_factors):
# Test creating dummies variables from a DataFrame
prep = DummyCreator()
result = prep.fit_transform(full_data_factors)
exp_dict = {'c_a': [1, 1, 1, 0, 0, 0, 0, 1, 1, 0],
'c_b': [0, 0, 0, 1, 1, 0, 0, 0, 0, 0],
'c_c': [0, 0, 0, 0, 0, 1, 1, 0, 0, 1],
'd_a': [1, 0, 0, 0, 0, 0, 0, 0, 0, 0],
'd_b': [0, 1, 0, 0, 0, 0, 0, 0, 0, 0],
'd_c': [0, 0, 1, 0, 0, 0, 0, 0, 0, 0],
'd_d': [0, 0, 0, 1, 0, 0, 0, 0, 0, 0],
'd_e': [0, 0, 0, 0, 1, 0, 0, 0, 0, 0],
'd_f': [0, 0, 0, 0, 0, 1, 0, 0, 0, 0],
'd_g': [0, 0, 0, 0, 0, 0, 1, 0, 0, 0],
'd_h': [0, 0, 0, 0, 0, 0, 0, 1, 0, 0],
'd_j': [0, 0, 0, 0, 0, 0, 0, 0, 1, 1]
}
expected = pd.DataFrame(exp_dict)
tm.assert_frame_equal(result, expected, check_dtype=False)
def test_drop_first_dummies(self, full_data_factors):
# Test dropping first dummies for each column.
kwargs = {'drop_first': True}
prep = DummyCreator(**kwargs)
prep.fit(full_data_factors)
result = prep.transform(full_data_factors)
exp_dict = {'c_b': [0, 0, 0, 1, 1, 0, 0, 0, 0, 0],
'c_c': [0, 0, 0, 0, 0, 1, 1, 0, 0, 1],
'd_b': [0, 1, 0, 0, 0, 0, 0, 0, 0, 0],
'd_c': [0, 0, 1, 0, 0, 0, 0, 0, 0, 0],
'd_d': [0, 0, 0, 1, 0, 0, 0, 0, 0, 0],
'd_e': [0, 0, 0, 0, 1, 0, 0, 0, 0, 0],
'd_f': [0, 0, 0, 0, 0, 1, 0, 0, 0, 0],
'd_g': [0, 0, 0, 0, 0, 0, 1, 0, 0, 0],
'd_h': [0, 0, 0, 0, 0, 0, 0, 1, 0, 0],
'd_j': [0, 0, 0, 0, 0, 0, 0, 0, 1, 1]
}
expected = pd.DataFrame(exp_dict)
tm.assert_frame_equal(result, expected, check_dtype=False)
def test_drop_first_dummies_missing_levels(self, full_data_factors,
full_data_factors_subset):
# Test dropping first dummies for each column.
kwargs = {'drop_first': True}
prep = DummyCreator(**kwargs)
prep.fit(full_data_factors)
result = prep.transform(full_data_factors_subset)
exp_dict = {'c_b': [1, 1, 0, 0, 0, 0, 0],
'c_c': [0, 0, 1, 1, 0, 0, 1],
'd_b': [0, 0, 0, 0, 0, 0, 0],
'd_c': [0, 0, 0, 0, 0, 0, 0],
'd_d': [1, 0, 0, 0, 0, 0, 0],
'd_e': [0, 1, 0, 0, 0, 0, 0],
'd_f': [0, 0, 1, 0, 0, 0, 0],
'd_g': [0, 0, 0, 1, 0, 0, 0],
'd_h': [0, 0, 0, 0, 1, 0, 0],
'd_j': [0, 0, 0, 0, 0, 1, 1]
}
expected = pd.DataFrame(exp_dict)
tm.assert_frame_equal(result, expected, check_dtype=False)
def test_dummy_na_false_dummies(self, missing_data_factors):
# Test not creating dummies for NaNs.
prep = DummyCreator()
prep.fit(missing_data_factors)
result = prep.transform(missing_data_factors)
exp_dict = {'c_a': [1, 0, 1, 0, 0, 0, 0, 1, 1, 0],
'c_b': [0, 0, 0, 1, 1, 0, 0, 0, 0, 0],
'c_c': [0, 0, 0, 0, 0, 0, 1, 0, 0, 1],
'd_a': [1, 1, 0, 0, 0, 0, 0, 0, 0, 0],
'd_e': [0, 0, 0, 0, 1, 0, 0, 0, 0, 0],
'd_f': [0, 0, 0, 0, 0, 1, 0, 0, 0, 0],
'd_h': [0, 0, 0, 0, 0, 0, 0, 1, 0, 0],
'd_j': [0, 0, 0, 0, 0, 0, 0, 0, 1, 1]
}
expected = pd.DataFrame(exp_dict)
tm.assert_frame_equal(result, expected, check_dtype=False)
def test_dummy_na_true_dummies(self, missing_data_factors):
# Test creating dummies for NaNs.
kwargs = {'dummy_na': True}
prep = DummyCreator(**kwargs)
prep.fit(missing_data_factors)
result = prep.transform(missing_data_factors)
exp_dict = {'c_a': [1, 0, 1, 0, 0, 0, 0, 1, 1, 0],
'c_b': [0, 0, 0, 1, 1, 0, 0, 0, 0, 0],
'c_c': [0, 0, 0, 0, 0, 0, 1, 0, 0, 1],
'c_nan': [0, 1, 0, 0, 0, 1, 0, 0, 0, 0],
'd_a': [1, 1, 0, 0, 0, 0, 0, 0, 0, 0],
'd_e': [0, 0, 0, 0, 1, 0, 0, 0, 0, 0],
'd_f': [0, 0, 0, 0, 0, 1, 0, 0, 0, 0],
'd_h': [0, 0, 0, 0, 0, 0, 0, 1, 0, 0],
'd_j': [0, 0, 0, 0, 0, 0, 0, 0, 1, 1],
'd_nan': [0, 0, 1, 1, 0, 0, 1, 0, 0, 0]
}
expected = pd.DataFrame(exp_dict)
tm.assert_frame_equal(result, expected, check_dtype=False)
def test_fillin_missing_dummies(self, full_data_factors):
# Test filling missing dummies with a transform data missing levels
# present in the fitting data set.
prep = DummyCreator()
prep.fit(full_data_factors)
new_dict = {'c': ['b', 'c'],
'd': ['a', 'b']
}
new_data = pd.DataFrame(new_dict)
result = prep.transform(new_data)
exp_dict = {'c_a': [0, 0],
'c_b': [1, 0],
'c_c': [0, 1],
'd_a': [1, 0],
'd_b': [0, 1],
'd_c': [0, 0],
'd_d': [0, 0],
'd_e': [0, 0],
'd_f': [0, 0],
'd_g': [0, 0],
'd_h': [0, 0],
'd_j': [0, 0]
}
expected = pd.DataFrame(exp_dict)
tm.assert_frame_equal(result, expected, check_dtype=False)
def test_unordered_index(self, full_data_factors):
# Test unordered index is handled properly
new_index = list(full_data_factors.index)
shuffle(new_index)
full_data_factors.index = new_index
prep = DummyCreator()
prep.fit(full_data_factors)
result = prep.transform(full_data_factors)
exp_dict = {'c_a': [1, 1, 1, 0, 0, 0, 0, 1, 1, 0],
'c_b': [0, 0, 0, 1, 1, 0, 0, 0, 0, 0],
'c_c': [0, 0, 0, 0, 0, 1, 1, 0, 0, 1],
'd_a': [1, 0, 0, 0, 0, 0, 0, 0, 0, 0],
'd_b': [0, 1, 0, 0, 0, 0, 0, 0, 0, 0],
'd_c': [0, 0, 1, 0, 0, 0, 0, 0, 0, 0],
'd_d': [0, 0, 0, 1, 0, 0, 0, 0, 0, 0],
'd_e': [0, 0, 0, 0, 1, 0, 0, 0, 0, 0],
'd_f': [0, 0, 0, 0, 0, 1, 0, 0, 0, 0],
'd_g': [0, 0, 0, 0, 0, 0, 1, 0, 0, 0],
'd_h': [0, 0, 0, 0, 0, 0, 0, 1, 0, 0],
'd_j': [0, 0, 0, 0, 0, 0, 0, 0, 1, 1]
}
expected = pd.DataFrame(exp_dict, index=new_index)
tm.assert_frame_equal(result, expected, check_dtype=False)
@pytest.mark.usefixtures("full_data_factors")
class TestColumnValidator(object):
def test_order(self, full_data_factors):
# Test extraction of columns from a DataFrame
prep = ColumnValidator()
prep.fit(full_data_factors)
new_dict = {'d': ['a', 'b', 'c', 'd', 'e', 'f', 'g', 'h', 'j', 'j'],
'c': ['a', 'a', 'a', 'b', 'b', 'c', 'c', 'a', 'a', 'c']
}
new_data = pd.DataFrame(new_dict)
result = prep.transform(new_data)
exp_dict = {'c': ['a', 'a', 'a', 'b', 'b', 'c', 'c', 'a', 'a', 'c'],
'd': ['a', 'b', 'c', 'd', 'e', 'f', 'g', 'h', 'j', 'j']
}
expected = pd.DataFrame(exp_dict)
tm.assert_frame_equal(result, expected, check_dtype=False)
def test_missing_columns_error(self, full_data_factors):
# Test throwing an error when the new data is missing columns
prep = ColumnValidator()
prep.fit(full_data_factors)
new_dict = {'d': ['a', 'b', 'c', 'd', 'e', 'f', 'g', 'h', 'j', 'j']
}
new_data = pd.DataFrame(new_dict)
with pytest.raises(ValueError):
prep.transform(new_data)
def test_new_columns_error(self, full_data_factors):
# Test throwing an error when the new data is missing columns
prep = ColumnValidator()
prep.fit(full_data_factors)
new_dict = {'c': ['a', 'a', 'a', 'b', 'b', 'c', 'c', 'a', 'a', 'c'],
'd': ['a', 'b', 'c', 'd', 'e', 'f', 'g', 'h', 'j', 'j'],
'e': ['a', 'b', 'c', 'd', 'e', 'f', 'g', 'h', 'j', 'j']
}
new_data = pd.DataFrame(new_dict)
with pytest.raises(ValueError):
prep.transform(new_data)
def test_unordered_index(self, full_data_factors):
# Test unordered index is handled properly
new_index = list(full_data_factors.index)
shuffle(new_index)
full_data_factors.index = new_index
prep = ColumnValidator()
prep.fit(full_data_factors)
result = prep.transform(full_data_factors)
exp_dict = {
'c': ['a', 'a', 'a', 'b', 'b', 'c', 'c', 'a', 'a', 'c'],
'd': ['a', 'b', 'c', 'd', 'e', 'f', 'g', 'h', 'j', 'j']
}
expected = pd.DataFrame(exp_dict, index=new_index)
tm.assert_frame_equal(result, expected, check_dtype=False)
@pytest.mark.usefixtures("text_data")
class TestTextContainsDummyExtractor(object):
def test_mapper(self, text_data):
# Test text contains dummy with mapper.
mapper = {'a':
{'a_1':
[{'pattern': 'birthday', 'kwargs': {'case': False}},
{'pattern': 'bday', 'kwargs': {'case': False}}
],
'a_2':
[{'pattern': 'b.*day', 'kwargs': {'case': False}}
],
},
'b':
{'b_1':
[{'pattern': 'h.*r', 'kwargs': {'case': False}}
],
'b_2':
[{'pattern': '!', 'kwargs': {'case': False}},
]
}
}
prep = TextContainsDummyExtractor(mapper)
prep.fit(text_data)
result = prep.transform(text_data)
exp_dict = {'a': ['Happy Birthday!', 'It\'s your bday!'],
'b': ['Happy Arbor Day!', 'Happy Gilmore'],
'c': ['a', 'b'],
'a_1': [1, 1],
'a_2': [1, 1],
'b_1': [1, 1],
'b_2': [1, 0]
}
expected = pd.DataFrame(exp_dict)
tm.assert_frame_equal(result, expected, check_dtype=False,
check_like=True)
def test_extra_column_value_error(self, text_data):
# Test throwing error when replacing values with a non-existant column.
mapper = {'a':
{'a_1':
[{'pattern': 'birthday', 'kwargs': {'case': False}},
{'pattern': 'bday', 'kwargs': {'case': False}}
],
'a_2':
[{'pattern': 'b.*day', 'kwargs': {'case': False}}
],
},
'd':
{'b_1':
[{'pattern': 'h.*r', 'kwargs': {'case': False}}
],
'b_2':
[{'pattern': '!', 'kwargs': {'case': False}},
]
}
}
prep = TextContainsDummyExtractor(mapper)
with pytest.raises(ValueError):
prep.fit(text_data)
def test_unordered_index(self, text_data):
# Test unordered index is handled properly
new_index = list(text_data.index)
shuffle(new_index)
text_data.index = new_index
mapper = {'a':
{'a_1':
[{'pattern': 'birthday', 'kwargs': {'case': False}},
{'pattern': 'bday', 'kwargs': {'case': False}}
],
'a_2':
[{'pattern': 'b.*day', 'kwargs': {'case': False}}
],
},
'b':
{'b_1':
[{'pattern': 'h.*r', 'kwargs': {'case': False}}
],
'b_2':
[{'pattern': '!', 'kwargs': {'case': False}},
]
}
}
prep = TextContainsDummyExtractor(mapper)
prep.fit(text_data)
result = prep.transform(text_data)
exp_dict = {'a': ['Happy Birthday!', 'It\'s your bday!'],
'b': ['Happy Arbor Day!', 'Happy Gilmore'],
'c': ['a', 'b'],
'a_1': [1, 1],
'a_2': [1, 1],
'b_1': [1, 1],
'b_2': [1, 0]
}
expected = pd.DataFrame(exp_dict, index=new_index)
tm.assert_frame_equal(result, expected, check_dtype=False,
check_like=True)
@pytest.mark.usefixtures("boolean_data")
class TestBitwiseOperator(object):
def test_operator_value_error(self, text_data):
# Test throwing error when using invalid operator parameter
mapper = {'f': ['c', 'd', 'e'],
'g': ['a', 'b']
}
with pytest.raises(ValueError):
prep = BitwiseOperator('with', mapper)
prep
def test_or_mapper_boolean(self, boolean_data):
# Test bitwise or applied to booleans
mapper = {'f': ['c', 'd', 'e'],
'g': ['a', 'b']
}
prep = BitwiseOperator('or', mapper)
prep.fit(boolean_data)
result = prep.transform(boolean_data)
exp_dict = {'a': [True, True, False, False],
'b': [True, False, False, True],
'c': [False, True, True, False],
'd': [True, False, True, False],
'e': [False, True, False, True],
'f': [1, 1, 1, 1],
'g': [1, 1, 0, 1],
}
expected = pd.DataFrame(exp_dict)
tm.assert_frame_equal(result, expected, check_dtype=False,
check_like=True)
def test_or_mapper_binary(self, boolean_data):
# Test bitwise or applied to integers
mapper = {'f': ['c', 'd', 'e'],
'g': ['a', 'b']
}
prep = BitwiseOperator('or', mapper)
prep.fit(boolean_data)
result = prep.transform(boolean_data)
exp_dict = {'a': [1, 1, 0, 0],
'b': [1, 0, 0, 1],
'c': [0, 1, 1, 0],
'd': [1, 0, 1, 0],
'e': [0, 1, 0, 1],
'f': [1, 1, 1, 1],
'g': [1, 1, 0, 1],
}
expected = pd.DataFrame(exp_dict)
tm.assert_frame_equal(result, expected, check_dtype=False,
check_like=True)
def test_or_extra_column_value_error(self, text_data):
# Test throwing error when replacing values with a non-existant column.
mapper = {'f': ['c', 'd', 'e'],
'g': ['a', 'b']
}
prep = BitwiseOperator('or', mapper)
with pytest.raises(ValueError):
prep.fit(text_data)
def test_and_mapper_boolean(self, boolean_data):
# Test bitwise and applied to booleans
mapper = {'f': ['c', 'd', 'e'],
'g': ['a', 'b']
}
prep = BitwiseOperator('and', mapper)
prep.fit(boolean_data)
result = prep.transform(boolean_data)
exp_dict = {'a': [True, True, False, False],
'b': [True, False, False, True],
'c': [False, True, True, False],
'd': [True, False, True, False],
'e': [False, True, False, True],
'f': [0, 0, 0, 0],
'g': [1, 0, 0, 0]
}
expected = pd.DataFrame(exp_dict)
tm.assert_frame_equal(result, expected, check_dtype=False,
check_like=True)
def test_and_mapper_binary(self, boolean_data):
# Test bitwise and applied to integers
mapper = {'f': ['c', 'd', 'e'],
'g': ['a', 'b']
}
prep = BitwiseOperator('and', mapper)
prep.fit(boolean_data)
result = prep.transform(boolean_data)
exp_dict = {'a': [1, 1, 0, 0],
'b': [1, 0, 0, 1],
'c': [0, 1, 1, 0],
'd': [1, 0, 1, 0],
'e': [0, 1, 0, 1],
'f': [0, 0, 0, 0],
'g': [1, 0, 0, 0]
}
expected = | pd.DataFrame(exp_dict) | pandas.DataFrame |
#!/usr/bin/env python3
import pandas as pd
def main():
senders = {'tom': 1,
'dick': 5,
'harold': 2}
recipients = {'dick': 1,
'gerald': 3,
'tom': 2}
scores = {'tom': 1,
'dick': 1,
'harry': 1}
print('Pandas intelligently reorders from dicts')
df = pd.DataFrame({'senders': senders,
'recipients': recipients,
'scores': scores}).fillna(0)
print(df)
print('Pandas intelligently reorders from series')
ssend = pd.Series(senders, name='senders')
srecv = pd.Series(recipients, name='recipients')
sscor = pd.Series(scores, name='scores')
df = pd.DataFrame({'senders' : ssend, 'recipients': srecv, 'score': sscor})
print(df)
print('If you just do an array of series, they become records (rows) due to the from record constructor:')
df = | pd.DataFrame([ssend, srecv, sscor]) | pandas.DataFrame |
from datetime import datetime, timedelta
import warnings
import operator
from textwrap import dedent
import numpy as np
from pandas._libs import (lib, index as libindex, tslib as libts,
algos as libalgos, join as libjoin,
Timedelta)
from pandas._libs.lib import is_datetime_array
from pandas.compat import range, u, set_function_name
from pandas.compat.numpy import function as nv
from pandas import compat
from pandas.core.accessor import CachedAccessor
from pandas.core.arrays import ExtensionArray
from pandas.core.dtypes.generic import (
ABCSeries, ABCDataFrame,
ABCMultiIndex,
ABCPeriodIndex, ABCTimedeltaIndex,
ABCDateOffset)
from pandas.core.dtypes.missing import isna, array_equivalent
from pandas.core.dtypes.common import (
_ensure_int64,
_ensure_object,
_ensure_categorical,
_ensure_platform_int,
is_integer,
is_float,
is_dtype_equal,
is_dtype_union_equal,
is_object_dtype,
is_categorical,
is_categorical_dtype,
is_interval_dtype,
is_period_dtype,
is_bool,
is_bool_dtype,
is_signed_integer_dtype,
is_unsigned_integer_dtype,
is_integer_dtype, is_float_dtype,
is_datetime64_any_dtype,
is_datetime64tz_dtype,
is_timedelta64_dtype,
is_hashable,
needs_i8_conversion,
is_iterator, is_list_like,
is_scalar)
from pandas.core.base import PandasObject, IndexOpsMixin
import pandas.core.common as com
from pandas.core import ops
from pandas.util._decorators import (
Appender, Substitution, cache_readonly, deprecate_kwarg)
from pandas.core.indexes.frozen import FrozenList
import pandas.core.dtypes.concat as _concat
import pandas.core.missing as missing
import pandas.core.algorithms as algos
import pandas.core.sorting as sorting
from pandas.io.formats.printing import (
pprint_thing, default_pprint, format_object_summary, format_object_attrs)
from pandas.core.ops import make_invalid_op
from pandas.core.strings import StringMethods
__all__ = ['Index']
_unsortable_types = frozenset(('mixed', 'mixed-integer'))
_index_doc_kwargs = dict(klass='Index', inplace='',
target_klass='Index',
unique='Index', duplicated='np.ndarray')
_index_shared_docs = dict()
def _try_get_item(x):
try:
return x.item()
except AttributeError:
return x
def _make_comparison_op(op, cls):
def cmp_method(self, other):
if isinstance(other, (np.ndarray, Index, ABCSeries)):
if other.ndim > 0 and len(self) != len(other):
raise ValueError('Lengths must match to compare')
# we may need to directly compare underlying
# representations
if needs_i8_conversion(self) and needs_i8_conversion(other):
return self._evaluate_compare(other, op)
if is_object_dtype(self) and self.nlevels == 1:
# don't pass MultiIndex
with np.errstate(all='ignore'):
result = ops._comp_method_OBJECT_ARRAY(op, self.values, other)
else:
# numpy will show a DeprecationWarning on invalid elementwise
# comparisons, this will raise in the future
with warnings.catch_warnings(record=True):
with np.errstate(all='ignore'):
result = op(self.values, np.asarray(other))
# technically we could support bool dtyped Index
# for now just return the indexing array directly
if is_bool_dtype(result):
return result
try:
return Index(result)
except TypeError:
return result
name = '__{name}__'.format(name=op.__name__)
# TODO: docstring?
return set_function_name(cmp_method, name, cls)
def _make_arithmetic_op(op, cls):
def index_arithmetic_method(self, other):
if isinstance(other, (ABCSeries, ABCDataFrame)):
return NotImplemented
elif isinstance(other, ABCTimedeltaIndex):
# Defer to subclass implementation
return NotImplemented
other = self._validate_for_numeric_binop(other, op)
# handle time-based others
if isinstance(other, (ABCDateOffset, np.timedelta64, timedelta)):
return self._evaluate_with_timedelta_like(other, op)
elif isinstance(other, (datetime, np.datetime64)):
return self._evaluate_with_datetime_like(other, op)
values = self.values
with np.errstate(all='ignore'):
result = op(values, other)
result = missing.dispatch_missing(op, values, other, result)
attrs = self._get_attributes_dict()
attrs = self._maybe_update_attributes(attrs)
if op is divmod:
result = (Index(result[0], **attrs), Index(result[1], **attrs))
else:
result = Index(result, **attrs)
return result
name = '__{name}__'.format(name=op.__name__)
# TODO: docstring?
return set_function_name(index_arithmetic_method, name, cls)
class InvalidIndexError(Exception):
pass
_o_dtype = np.dtype(object)
_Identity = object
def _new_Index(cls, d):
""" This is called upon unpickling, rather than the default which doesn't
have arguments and breaks __new__
"""
# required for backward compat, because PI can't be instantiated with
# ordinals through __new__ GH #13277
if issubclass(cls, ABCPeriodIndex):
from pandas.core.indexes.period import _new_PeriodIndex
return _new_PeriodIndex(cls, **d)
return cls.__new__(cls, **d)
class Index(IndexOpsMixin, PandasObject):
"""
Immutable ndarray implementing an ordered, sliceable set. The basic object
storing axis labels for all pandas objects
Parameters
----------
data : array-like (1-dimensional)
dtype : NumPy dtype (default: object)
If dtype is None, we find the dtype that best fits the data.
If an actual dtype is provided, we coerce to that dtype if it's safe.
Otherwise, an error will be raised.
copy : bool
Make a copy of input ndarray
name : object
Name to be stored in the index
tupleize_cols : bool (default: True)
When True, attempt to create a MultiIndex if possible
Notes
-----
An Index instance can **only** contain hashable objects
Examples
--------
>>> pd.Index([1, 2, 3])
Int64Index([1, 2, 3], dtype='int64')
>>> pd.Index(list('abc'))
Index(['a', 'b', 'c'], dtype='object')
See Also
---------
RangeIndex : Index implementing a monotonic integer range
CategoricalIndex : Index of :class:`Categorical` s.
MultiIndex : A multi-level, or hierarchical, Index
IntervalIndex : an Index of :class:`Interval` s.
DatetimeIndex, TimedeltaIndex, PeriodIndex
Int64Index, UInt64Index, Float64Index
"""
# To hand over control to subclasses
_join_precedence = 1
# Cython methods
_left_indexer_unique = libjoin.left_join_indexer_unique_object
_left_indexer = libjoin.left_join_indexer_object
_inner_indexer = libjoin.inner_join_indexer_object
_outer_indexer = libjoin.outer_join_indexer_object
_typ = 'index'
_data = None
_id = None
name = None
asi8 = None
_comparables = ['name']
_attributes = ['name']
_is_numeric_dtype = False
_can_hold_na = True
# would we like our indexing holder to defer to us
_defer_to_indexing = False
# prioritize current class for _shallow_copy_with_infer,
# used to infer integers as datetime-likes
_infer_as_myclass = False
_engine_type = libindex.ObjectEngine
_accessors = set(['str'])
str = CachedAccessor("str", StringMethods)
def __new__(cls, data=None, dtype=None, copy=False, name=None,
fastpath=False, tupleize_cols=True, **kwargs):
if name is None and hasattr(data, 'name'):
name = data.name
if fastpath:
return cls._simple_new(data, name)
from .range import RangeIndex
# range
if isinstance(data, RangeIndex):
return RangeIndex(start=data, copy=copy, dtype=dtype, name=name)
elif isinstance(data, range):
return RangeIndex.from_range(data, copy=copy, dtype=dtype,
name=name)
# categorical
if is_categorical_dtype(data) or is_categorical_dtype(dtype):
from .category import CategoricalIndex
return CategoricalIndex(data, dtype=dtype, copy=copy, name=name,
**kwargs)
# interval
if is_interval_dtype(data) or is_interval_dtype(dtype):
from .interval import IntervalIndex
closed = kwargs.get('closed', None)
return IntervalIndex(data, dtype=dtype, name=name, copy=copy,
closed=closed)
# index-like
elif isinstance(data, (np.ndarray, Index, ABCSeries)):
if (is_datetime64_any_dtype(data) or
(dtype is not None and is_datetime64_any_dtype(dtype)) or
'tz' in kwargs):
from pandas.core.indexes.datetimes import DatetimeIndex
result = DatetimeIndex(data, copy=copy, name=name,
dtype=dtype, **kwargs)
if dtype is not None and is_dtype_equal(_o_dtype, dtype):
return Index(result.to_pydatetime(), dtype=_o_dtype)
else:
return result
elif (is_timedelta64_dtype(data) or
(dtype is not None and is_timedelta64_dtype(dtype))):
from pandas.core.indexes.timedeltas import TimedeltaIndex
result = TimedeltaIndex(data, copy=copy, name=name, **kwargs)
if dtype is not None and _o_dtype == dtype:
return Index(result.to_pytimedelta(), dtype=_o_dtype)
else:
return result
if dtype is not None:
try:
# we need to avoid having numpy coerce
# things that look like ints/floats to ints unless
# they are actually ints, e.g. '0' and 0.0
# should not be coerced
# GH 11836
if is_integer_dtype(dtype):
inferred = lib.infer_dtype(data)
if inferred == 'integer':
try:
data = np.array(data, copy=copy, dtype=dtype)
except OverflowError:
# gh-15823: a more user-friendly error message
raise OverflowError(
"the elements provided in the data cannot "
"all be casted to the dtype {dtype}"
.format(dtype=dtype))
elif inferred in ['floating', 'mixed-integer-float']:
if isna(data).any():
raise ValueError('cannot convert float '
'NaN to integer')
# If we are actually all equal to integers,
# then coerce to integer.
try:
return cls._try_convert_to_int_index(
data, copy, name, dtype)
except ValueError:
pass
# Return an actual float index.
from .numeric import Float64Index
return Float64Index(data, copy=copy, dtype=dtype,
name=name)
elif inferred == 'string':
pass
else:
data = data.astype(dtype)
elif is_float_dtype(dtype):
inferred = lib.infer_dtype(data)
if inferred == 'string':
pass
else:
data = data.astype(dtype)
else:
data = np.array(data, dtype=dtype, copy=copy)
except (TypeError, ValueError) as e:
msg = str(e)
if 'cannot convert float' in msg:
raise
# maybe coerce to a sub-class
from pandas.core.indexes.period import (
PeriodIndex, IncompatibleFrequency)
if isinstance(data, PeriodIndex):
return PeriodIndex(data, copy=copy, name=name, **kwargs)
if is_signed_integer_dtype(data.dtype):
from .numeric import Int64Index
return Int64Index(data, copy=copy, dtype=dtype, name=name)
elif is_unsigned_integer_dtype(data.dtype):
from .numeric import UInt64Index
return UInt64Index(data, copy=copy, dtype=dtype, name=name)
elif is_float_dtype(data.dtype):
from .numeric import Float64Index
return Float64Index(data, copy=copy, dtype=dtype, name=name)
elif issubclass(data.dtype.type, np.bool) or is_bool_dtype(data):
subarr = data.astype('object')
else:
subarr = com._asarray_tuplesafe(data, dtype=object)
# _asarray_tuplesafe does not always copy underlying data,
# so need to make sure that this happens
if copy:
subarr = subarr.copy()
if dtype is None:
inferred = lib.infer_dtype(subarr)
if inferred == 'integer':
try:
return cls._try_convert_to_int_index(
subarr, copy, name, dtype)
except ValueError:
pass
return Index(subarr, copy=copy,
dtype=object, name=name)
elif inferred in ['floating', 'mixed-integer-float']:
from .numeric import Float64Index
return Float64Index(subarr, copy=copy, name=name)
elif inferred == 'interval':
from .interval import IntervalIndex
return IntervalIndex(subarr, name=name, copy=copy)
elif inferred == 'boolean':
# don't support boolean explicitly ATM
pass
elif inferred != 'string':
if inferred.startswith('datetime'):
if (lib.is_datetime_with_singletz_array(subarr) or
'tz' in kwargs):
# only when subarr has the same tz
from pandas.core.indexes.datetimes import (
DatetimeIndex)
try:
return DatetimeIndex(subarr, copy=copy,
name=name, **kwargs)
except libts.OutOfBoundsDatetime:
pass
elif inferred.startswith('timedelta'):
from pandas.core.indexes.timedeltas import (
TimedeltaIndex)
return TimedeltaIndex(subarr, copy=copy, name=name,
**kwargs)
elif inferred == 'period':
try:
return PeriodIndex(subarr, name=name, **kwargs)
except IncompatibleFrequency:
pass
return cls._simple_new(subarr, name)
elif hasattr(data, '__array__'):
return Index(np.asarray(data), dtype=dtype, copy=copy, name=name,
**kwargs)
elif data is None or is_scalar(data):
cls._scalar_data_error(data)
else:
if tupleize_cols and is_list_like(data) and data:
if is_iterator(data):
data = list(data)
# we must be all tuples, otherwise don't construct
# 10697
if all(isinstance(e, tuple) for e in data):
from .multi import MultiIndex
return MultiIndex.from_tuples(
data, names=name or kwargs.get('names'))
# other iterable of some kind
subarr = com._asarray_tuplesafe(data, dtype=object)
return Index(subarr, dtype=dtype, copy=copy, name=name, **kwargs)
"""
NOTE for new Index creation:
- _simple_new: It returns new Index with the same type as the caller.
All metadata (such as name) must be provided by caller's responsibility.
Using _shallow_copy is recommended because it fills these metadata
otherwise specified.
- _shallow_copy: It returns new Index with the same type (using
_simple_new), but fills caller's metadata otherwise specified. Passed
kwargs will overwrite corresponding metadata.
- _shallow_copy_with_infer: It returns new Index inferring its type
from passed values. It fills caller's metadata otherwise specified as the
same as _shallow_copy.
See each method's docstring.
"""
@classmethod
def _simple_new(cls, values, name=None, dtype=None, **kwargs):
"""
we require the we have a dtype compat for the values
if we are passed a non-dtype compat, then coerce using the constructor
Must be careful not to recurse.
"""
if not hasattr(values, 'dtype'):
if (values is None or not len(values)) and dtype is not None:
values = np.empty(0, dtype=dtype)
else:
values = np.array(values, copy=False)
if is_object_dtype(values):
values = cls(values, name=name, dtype=dtype,
**kwargs)._ndarray_values
result = object.__new__(cls)
result._data = values
result.name = name
for k, v in compat.iteritems(kwargs):
setattr(result, k, v)
return result._reset_identity()
_index_shared_docs['_shallow_copy'] = """
create a new Index with the same class as the caller, don't copy the
data, use the same object attributes with passed in attributes taking
precedence
*this is an internal non-public method*
Parameters
----------
values : the values to create the new Index, optional
kwargs : updates the default attributes for this Index
"""
@Appender(_index_shared_docs['_shallow_copy'])
def _shallow_copy(self, values=None, **kwargs):
if values is None:
values = self.values
attributes = self._get_attributes_dict()
attributes.update(kwargs)
if not len(values) and 'dtype' not in kwargs:
attributes['dtype'] = self.dtype
return self._simple_new(values, **attributes)
def _shallow_copy_with_infer(self, values=None, **kwargs):
"""
create a new Index inferring the class with passed value, don't copy
the data, use the same object attributes with passed in attributes
taking precedence
*this is an internal non-public method*
Parameters
----------
values : the values to create the new Index, optional
kwargs : updates the default attributes for this Index
"""
if values is None:
values = self.values
attributes = self._get_attributes_dict()
attributes.update(kwargs)
attributes['copy'] = False
if not len(values) and 'dtype' not in kwargs:
attributes['dtype'] = self.dtype
if self._infer_as_myclass:
try:
return self._constructor(values, **attributes)
except (TypeError, ValueError):
pass
return Index(values, **attributes)
def _deepcopy_if_needed(self, orig, copy=False):
"""
.. versionadded:: 0.19.0
Make a copy of self if data coincides (in memory) with orig.
Subclasses should override this if self._base is not an ndarray.
Parameters
----------
orig : ndarray
other ndarray to compare self._data against
copy : boolean, default False
when False, do not run any check, just return self
Returns
-------
A copy of self if needed, otherwise self : Index
"""
if copy:
# Retrieve the "base objects", i.e. the original memory allocations
if not isinstance(orig, np.ndarray):
# orig is a DatetimeIndex
orig = orig.values
orig = orig if orig.base is None else orig.base
new = self._data if self._data.base is None else self._data.base
if orig is new:
return self.copy(deep=True)
return self
def _update_inplace(self, result, **kwargs):
# guard when called from IndexOpsMixin
raise TypeError("Index can't be updated inplace")
def _sort_levels_monotonic(self):
""" compat with MultiIndex """
return self
_index_shared_docs['_get_grouper_for_level'] = """
Get index grouper corresponding to an index level
Parameters
----------
mapper: Group mapping function or None
Function mapping index values to groups
level : int or None
Index level
Returns
-------
grouper : Index
Index of values to group on
labels : ndarray of int or None
Array of locations in level_index
uniques : Index or None
Index of unique values for level
"""
@Appender(_index_shared_docs['_get_grouper_for_level'])
def _get_grouper_for_level(self, mapper, level=None):
assert level is None or level == 0
if mapper is None:
grouper = self
else:
grouper = self.map(mapper)
return grouper, None, None
def is_(self, other):
"""
More flexible, faster check like ``is`` but that works through views
Note: this is *not* the same as ``Index.identical()``, which checks
that metadata is also the same.
Parameters
----------
other : object
other object to compare against.
Returns
-------
True if both have same underlying data, False otherwise : bool
"""
# use something other than None to be clearer
return self._id is getattr(
other, '_id', Ellipsis) and self._id is not None
def _reset_identity(self):
"""Initializes or resets ``_id`` attribute with new object"""
self._id = _Identity()
return self
# ndarray compat
def __len__(self):
"""
return the length of the Index
"""
return len(self._data)
def __array__(self, dtype=None):
""" the array interface, return my values """
return self._data.view(np.ndarray)
def __array_wrap__(self, result, context=None):
"""
Gets called after a ufunc
"""
if is_bool_dtype(result):
return result
attrs = self._get_attributes_dict()
attrs = self._maybe_update_attributes(attrs)
return Index(result, **attrs)
@cache_readonly
def dtype(self):
""" return the dtype object of the underlying data """
return self._data.dtype
@cache_readonly
def dtype_str(self):
""" return the dtype str of the underlying data """
return str(self.dtype)
@property
def values(self):
""" return the underlying data as an ndarray """
return self._data.view(np.ndarray)
@property
def _values(self):
# type: () -> Union[ExtensionArray, Index]
# TODO(EA): remove index types as they become extension arrays
"""The best array representation.
This is an ndarray, ExtensionArray, or Index subclass. This differs
from ``_ndarray_values``, which always returns an ndarray.
Both ``_values`` and ``_ndarray_values`` are consistent between
``Series`` and ``Index``.
It may differ from the public '.values' method.
index | values | _values | _ndarray_values |
----------------- | -------------- -| ----------- | --------------- |
CategoricalIndex | Categorical | Categorical | codes |
DatetimeIndex[tz] | ndarray[M8ns] | DTI[tz] | ndarray[M8ns] |
For the following, the ``._values`` is currently ``ndarray[object]``,
but will soon be an ``ExtensionArray``
index | values | _values | _ndarray_values |
----------------- | --------------- | ------------ | --------------- |
PeriodIndex | ndarray[object] | ndarray[obj] | ndarray[int] |
IntervalIndex | ndarray[object] | ndarray[obj] | ndarray[object] |
See Also
--------
values
_ndarray_values
"""
return self.values
def get_values(self):
"""
Return `Index` data as an `numpy.ndarray`.
Returns
-------
numpy.ndarray
A one-dimensional numpy array of the `Index` values.
See Also
--------
Index.values : The attribute that get_values wraps.
Examples
--------
Getting the `Index` values of a `DataFrame`:
>>> df = pd.DataFrame([[1, 2, 3], [4, 5, 6], [7, 8, 9]],
... index=['a', 'b', 'c'], columns=['A', 'B', 'C'])
>>> df
A B C
a 1 2 3
b 4 5 6
c 7 8 9
>>> df.index.get_values()
array(['a', 'b', 'c'], dtype=object)
Standalone `Index` values:
>>> idx = pd.Index(['1', '2', '3'])
>>> idx.get_values()
array(['1', '2', '3'], dtype=object)
`MultiIndex` arrays also have only one dimension:
>>> midx = pd.MultiIndex.from_arrays([[1, 2, 3], ['a', 'b', 'c']],
... names=('number', 'letter'))
>>> midx.get_values()
array([(1, 'a'), (2, 'b'), (3, 'c')], dtype=object)
>>> midx.get_values().ndim
1
"""
return self.values
@Appender(IndexOpsMixin.memory_usage.__doc__)
def memory_usage(self, deep=False):
result = super(Index, self).memory_usage(deep=deep)
# include our engine hashtable
result += self._engine.sizeof(deep=deep)
return result
# ops compat
@deprecate_kwarg(old_arg_name='n', new_arg_name='repeats')
def repeat(self, repeats, *args, **kwargs):
"""
Repeat elements of an Index.
Returns a new index where each element of the current index
is repeated consecutively a given number of times.
Parameters
----------
repeats : int
The number of repetitions for each element.
**kwargs
Additional keywords have no effect but might be accepted for
compatibility with numpy.
Returns
-------
pandas.Index
Newly created Index with repeated elements.
See Also
--------
Series.repeat : Equivalent function for Series
numpy.repeat : Underlying implementation
Examples
--------
>>> idx = pd.Index([1, 2, 3])
>>> idx
Int64Index([1, 2, 3], dtype='int64')
>>> idx.repeat(2)
Int64Index([1, 1, 2, 2, 3, 3], dtype='int64')
>>> idx.repeat(3)
Int64Index([1, 1, 1, 2, 2, 2, 3, 3, 3], dtype='int64')
"""
nv.validate_repeat(args, kwargs)
return self._shallow_copy(self._values.repeat(repeats))
_index_shared_docs['where'] = """
.. versionadded:: 0.19.0
Return an Index of same shape as self and whose corresponding
entries are from self where cond is True and otherwise are from
other.
Parameters
----------
cond : boolean array-like with the same length as self
other : scalar, or array-like
"""
@Appender(_index_shared_docs['where'])
def where(self, cond, other=None):
if other is None:
other = self._na_value
dtype = self.dtype
values = self.values
if is_bool(other) or is_bool_dtype(other):
# bools force casting
values = values.astype(object)
dtype = None
values = np.where(cond, values, other)
if self._is_numeric_dtype and np.any(isna(values)):
# We can't coerce to the numeric dtype of "self" (unless
# it's float) if there are NaN values in our output.
dtype = None
return self._shallow_copy_with_infer(values, dtype=dtype)
def ravel(self, order='C'):
"""
return an ndarray of the flattened values of the underlying data
See also
--------
numpy.ndarray.ravel
"""
return self._ndarray_values.ravel(order=order)
# construction helpers
@classmethod
def _try_convert_to_int_index(cls, data, copy, name, dtype):
"""
Attempt to convert an array of data into an integer index.
Parameters
----------
data : The data to convert.
copy : Whether to copy the data or not.
name : The name of the index returned.
Returns
-------
int_index : data converted to either an Int64Index or a
UInt64Index
Raises
------
ValueError if the conversion was not successful.
"""
from .numeric import Int64Index, UInt64Index
if not is_unsigned_integer_dtype(dtype):
# skip int64 conversion attempt if uint-like dtype is passed, as
# this could return Int64Index when UInt64Index is what's desrired
try:
res = data.astype('i8', copy=False)
if (res == data).all():
return Int64Index(res, copy=copy, name=name)
except (OverflowError, TypeError, ValueError):
pass
# Conversion to int64 failed (possibly due to overflow) or was skipped,
# so let's try now with uint64.
try:
res = data.astype('u8', copy=False)
if (res == data).all():
return UInt64Index(res, copy=copy, name=name)
except (OverflowError, TypeError, ValueError):
pass
raise ValueError
@classmethod
def _scalar_data_error(cls, data):
raise TypeError('{0}(...) must be called with a collection of some '
'kind, {1} was passed'.format(cls.__name__,
repr(data)))
@classmethod
def _string_data_error(cls, data):
raise TypeError('String dtype not supported, you may need '
'to explicitly cast to a numeric type')
@classmethod
def _coerce_to_ndarray(cls, data):
"""coerces data to ndarray, raises on scalar data. Converts other
iterables to list first and then to array. Does not touch ndarrays.
"""
if not isinstance(data, (np.ndarray, Index)):
if data is None or is_scalar(data):
cls._scalar_data_error(data)
# other iterable of some kind
if not isinstance(data, (ABCSeries, list, tuple)):
data = list(data)
data = np.asarray(data)
return data
def _get_attributes_dict(self):
""" return an attributes dict for my class """
return {k: getattr(self, k, None) for k in self._attributes}
def view(self, cls=None):
# we need to see if we are subclassing an
# index type here
if cls is not None and not hasattr(cls, '_typ'):
result = self._data.view(cls)
else:
result = self._shallow_copy()
if isinstance(result, Index):
result._id = self._id
return result
def _coerce_scalar_to_index(self, item):
"""
we need to coerce a scalar to a compat for our index type
Parameters
----------
item : scalar item to coerce
"""
dtype = self.dtype
if self._is_numeric_dtype and isna(item):
# We can't coerce to the numeric dtype of "self" (unless
# it's float) if there are NaN values in our output.
dtype = None
return Index([item], dtype=dtype, **self._get_attributes_dict())
_index_shared_docs['copy'] = """
Make a copy of this object. Name and dtype sets those attributes on
the new object.
Parameters
----------
name : string, optional
deep : boolean, default False
dtype : numpy dtype or pandas type
Returns
-------
copy : Index
Notes
-----
In most cases, there should be no functional difference from using
``deep``, but if ``deep`` is passed it will attempt to deepcopy.
"""
@Appender(_index_shared_docs['copy'])
def copy(self, name=None, deep=False, dtype=None, **kwargs):
if deep:
new_index = self._shallow_copy(self._data.copy())
else:
new_index = self._shallow_copy()
names = kwargs.get('names')
names = self._validate_names(name=name, names=names, deep=deep)
new_index = new_index.set_names(names)
if dtype:
new_index = new_index.astype(dtype)
return new_index
def __copy__(self, **kwargs):
return self.copy(**kwargs)
def __deepcopy__(self, memo=None):
if memo is None:
memo = {}
return self.copy(deep=True)
def _validate_names(self, name=None, names=None, deep=False):
"""
Handles the quirks of having a singular 'name' parameter for general
Index and plural 'names' parameter for MultiIndex.
"""
from copy import deepcopy
if names is not None and name is not None:
raise TypeError("Can only provide one of `names` and `name`")
elif names is None and name is None:
return deepcopy(self.names) if deep else self.names
elif names is not None:
if not is_list_like(names):
raise TypeError("Must pass list-like as `names`.")
return names
else:
if not is_list_like(name):
return [name]
return name
def __unicode__(self):
"""
Return a string representation for this object.
Invoked by unicode(df) in py2 only. Yields a Unicode String in both
py2/py3.
"""
klass = self.__class__.__name__
data = self._format_data()
attrs = self._format_attrs()
space = self._format_space()
prepr = (u(",%s") %
space).join(u("%s=%s") % (k, v) for k, v in attrs)
# no data provided, just attributes
if data is None:
data = ''
res = u("%s(%s%s)") % (klass, data, prepr)
return res
def _format_space(self):
# using space here controls if the attributes
# are line separated or not (the default)
# max_seq_items = get_option('display.max_seq_items')
# if len(self) > max_seq_items:
# space = "\n%s" % (' ' * (len(klass) + 1))
return " "
@property
def _formatter_func(self):
"""
Return the formatter function
"""
return default_pprint
def _format_data(self, name=None):
"""
Return the formatted data as a unicode string
"""
# do we want to justify (only do so for non-objects)
is_justify = not (self.inferred_type in ('string', 'unicode') or
(self.inferred_type == 'categorical' and
is_object_dtype(self.categories)))
return format_object_summary(self, self._formatter_func,
is_justify=is_justify, name=name)
def _format_attrs(self):
"""
Return a list of tuples of the (attr,formatted_value)
"""
return format_object_attrs(self)
def to_series(self, index=None, name=None):
"""
Create a Series with both index and values equal to the index keys
useful with map for returning an indexer based on an index
Parameters
----------
index : Index, optional
index of resulting Series. If None, defaults to original index
name : string, optional
name of resulting Series. If None, defaults to name of original
index
Returns
-------
Series : dtype will be based on the type of the Index values.
"""
from pandas import Series
if index is None:
index = self._shallow_copy()
if name is None:
name = self.name
return Series(self._to_embed(), index=index, name=name)
def to_frame(self, index=True):
"""
Create a DataFrame with a column containing the Index.
.. versionadded:: 0.21.0
Parameters
----------
index : boolean, default True
Set the index of the returned DataFrame as the original Index.
Returns
-------
DataFrame
DataFrame containing the original Index data.
See Also
--------
Index.to_series : Convert an Index to a Series.
Series.to_frame : Convert Series to DataFrame.
Examples
--------
>>> idx = pd.Index(['Ant', 'Bear', 'Cow'], name='animal')
>>> idx.to_frame()
animal
animal
Ant Ant
Bear Bear
Cow Cow
By default, the original Index is reused. To enforce a new Index:
>>> idx.to_frame(index=False)
animal
0 Ant
1 Bear
2 Cow
"""
from pandas import DataFrame
result = DataFrame(self._shallow_copy(), columns=[self.name or 0])
if index:
result.index = self
return result
def _to_embed(self, keep_tz=False, dtype=None):
"""
*this is an internal non-public method*
return an array repr of this object, potentially casting to object
"""
if dtype is not None:
return self.astype(dtype)._to_embed(keep_tz=keep_tz)
return self.values.copy()
_index_shared_docs['astype'] = """
Create an Index with values cast to dtypes. The class of a new Index
is determined by dtype. When conversion is impossible, a ValueError
exception is raised.
Parameters
----------
dtype : numpy dtype or pandas type
copy : bool, default True
By default, astype always returns a newly allocated object.
If copy is set to False and internal requirements on dtype are
satisfied, the original data is used to create a new Index
or the original Index is returned.
.. versionadded:: 0.19.0
"""
@Appender(_index_shared_docs['astype'])
def astype(self, dtype, copy=True):
if is_dtype_equal(self.dtype, dtype):
return self.copy() if copy else self
elif is_categorical_dtype(dtype):
from .category import CategoricalIndex
return CategoricalIndex(self.values, name=self.name, dtype=dtype,
copy=copy)
try:
return Index(self.values.astype(dtype, copy=copy), name=self.name,
dtype=dtype)
except (TypeError, ValueError):
msg = 'Cannot cast {name} to dtype {dtype}'
raise TypeError(msg.format(name=type(self).__name__, dtype=dtype))
def _to_safe_for_reshape(self):
""" convert to object if we are a categorical """
return self
def _assert_can_do_setop(self, other):
if not is_list_like(other):
raise TypeError('Input must be Index or array-like')
return True
def _convert_can_do_setop(self, other):
if not isinstance(other, Index):
other = Index(other, name=self.name)
result_name = self.name
else:
result_name = self.name if self.name == other.name else None
return other, result_name
def _convert_for_op(self, value):
""" Convert value to be insertable to ndarray """
return value
def _assert_can_do_op(self, value):
""" Check value is valid for scalar op """
if not is_scalar(value):
msg = "'value' must be a scalar, passed: {0}"
raise TypeError(msg.format(type(value).__name__))
@property
def nlevels(self):
return 1
def _get_names(self):
return FrozenList((self.name, ))
def _set_names(self, values, level=None):
"""
Set new names on index. Each name has to be a hashable type.
Parameters
----------
values : str or sequence
name(s) to set
level : int, level name, or sequence of int/level names (default None)
If the index is a MultiIndex (hierarchical), level(s) to set (None
for all levels). Otherwise level must be None
Raises
------
TypeError if each name is not hashable.
"""
if not is_list_like(values):
raise ValueError('Names must be a list-like')
if len(values) != 1:
raise ValueError('Length of new names must be 1, got %d' %
len(values))
# GH 20527
# All items in 'name' need to be hashable:
for name in values:
if not is_hashable(name):
raise TypeError('{}.name must be a hashable type'
.format(self.__class__.__name__))
self.name = values[0]
names = property(fset=_set_names, fget=_get_names)
def set_names(self, names, level=None, inplace=False):
"""
Set new names on index. Defaults to returning new index.
Parameters
----------
names : str or sequence
name(s) to set
level : int, level name, or sequence of int/level names (default None)
If the index is a MultiIndex (hierarchical), level(s) to set (None
for all levels). Otherwise level must be None
inplace : bool
if True, mutates in place
Returns
-------
new index (of same type and class...etc) [if inplace, returns None]
Examples
--------
>>> Index([1, 2, 3, 4]).set_names('foo')
Int64Index([1, 2, 3, 4], dtype='int64', name='foo')
>>> Index([1, 2, 3, 4]).set_names(['foo'])
Int64Index([1, 2, 3, 4], dtype='int64', name='foo')
>>> idx = MultiIndex.from_tuples([(1, u'one'), (1, u'two'),
(2, u'one'), (2, u'two')],
names=['foo', 'bar'])
>>> idx.set_names(['baz', 'quz'])
MultiIndex(levels=[[1, 2], [u'one', u'two']],
labels=[[0, 0, 1, 1], [0, 1, 0, 1]],
names=[u'baz', u'quz'])
>>> idx.set_names('baz', level=0)
MultiIndex(levels=[[1, 2], [u'one', u'two']],
labels=[[0, 0, 1, 1], [0, 1, 0, 1]],
names=[u'baz', u'bar'])
"""
from .multi import MultiIndex
if level is not None and not isinstance(self, MultiIndex):
raise ValueError('Level must be None for non-MultiIndex')
if level is not None and not is_list_like(level) and is_list_like(
names):
raise TypeError("Names must be a string")
if not is_list_like(names) and level is None and self.nlevels > 1:
raise TypeError("Must pass list-like as `names`.")
if not is_list_like(names):
names = [names]
if level is not None and not is_list_like(level):
level = [level]
if inplace:
idx = self
else:
idx = self._shallow_copy()
idx._set_names(names, level=level)
if not inplace:
return idx
def rename(self, name, inplace=False):
"""
Set new names on index. Defaults to returning new index.
Parameters
----------
name : str or list
name to set
inplace : bool
if True, mutates in place
Returns
-------
new index (of same type and class...etc) [if inplace, returns None]
"""
return self.set_names([name], inplace=inplace)
@property
def _has_complex_internals(self):
# to disable groupby tricks in MultiIndex
return False
def _summary(self, name=None):
"""
Return a summarized representation
Parameters
----------
name : str
name to use in the summary representation
Returns
-------
String with a summarized representation of the index
"""
if len(self) > 0:
head = self[0]
if (hasattr(head, 'format') and
not isinstance(head, compat.string_types)):
head = head.format()
tail = self[-1]
if (hasattr(tail, 'format') and
not isinstance(tail, compat.string_types)):
tail = tail.format()
index_summary = ', %s to %s' % (pprint_thing(head),
pprint_thing(tail))
else:
index_summary = ''
if name is None:
name = type(self).__name__
return '%s: %s entries%s' % (name, len(self), index_summary)
def summary(self, name=None):
"""
Return a summarized representation
.. deprecated:: 0.23.0
"""
warnings.warn("'summary' is deprecated and will be removed in a "
"future version.", FutureWarning, stacklevel=2)
return self._summary(name)
def _mpl_repr(self):
# how to represent ourselves to matplotlib
return self.values
_na_value = np.nan
"""The expected NA value to use with this index."""
# introspection
@property
def is_monotonic(self):
""" alias for is_monotonic_increasing (deprecated) """
return self.is_monotonic_increasing
@property
def is_monotonic_increasing(self):
"""
return if the index is monotonic increasing (only equal or
increasing) values.
Examples
--------
>>> Index([1, 2, 3]).is_monotonic_increasing
True
>>> Index([1, 2, 2]).is_monotonic_increasing
True
>>> Index([1, 3, 2]).is_monotonic_increasing
False
"""
return self._engine.is_monotonic_increasing
@property
def is_monotonic_decreasing(self):
"""
return if the index is monotonic decreasing (only equal or
decreasing) values.
Examples
--------
>>> Index([3, 2, 1]).is_monotonic_decreasing
True
>>> Index([3, 2, 2]).is_monotonic_decreasing
True
>>> Index([3, 1, 2]).is_monotonic_decreasing
False
"""
return self._engine.is_monotonic_decreasing
@property
def _is_strictly_monotonic_increasing(self):
"""return if the index is strictly monotonic increasing
(only increasing) values
Examples
--------
>>> Index([1, 2, 3])._is_strictly_monotonic_increasing
True
>>> Index([1, 2, 2])._is_strictly_monotonic_increasing
False
>>> Index([1, 3, 2])._is_strictly_monotonic_increasing
False
"""
return self.is_unique and self.is_monotonic_increasing
@property
def _is_strictly_monotonic_decreasing(self):
"""return if the index is strictly monotonic decreasing
(only decreasing) values
Examples
--------
>>> Index([3, 2, 1])._is_strictly_monotonic_decreasing
True
>>> Index([3, 2, 2])._is_strictly_monotonic_decreasing
False
>>> Index([3, 1, 2])._is_strictly_monotonic_decreasing
False
"""
return self.is_unique and self.is_monotonic_decreasing
def is_lexsorted_for_tuple(self, tup):
return True
@cache_readonly
def is_unique(self):
""" return if the index has unique values """
return self._engine.is_unique
@property
def has_duplicates(self):
return not self.is_unique
def is_boolean(self):
return self.inferred_type in ['boolean']
def is_integer(self):
return self.inferred_type in ['integer']
def is_floating(self):
return self.inferred_type in ['floating', 'mixed-integer-float']
def is_numeric(self):
return self.inferred_type in ['integer', 'floating']
def is_object(self):
return is_object_dtype(self.dtype)
def is_categorical(self):
"""
Check if the Index holds categorical data.
Returns
-------
boolean
True if the Index is categorical.
See Also
--------
CategoricalIndex : Index for categorical data.
Examples
--------
>>> idx = pd.Index(["Watermelon", "Orange", "Apple",
... "Watermelon"]).astype("category")
>>> idx.is_categorical()
True
>>> idx = pd.Index([1, 3, 5, 7])
>>> idx.is_categorical()
False
>>> s = pd.Series(["Peter", "Victor", "Elisabeth", "Mar"])
>>> s
0 Peter
1 Victor
2 Elisabeth
3 Mar
dtype: object
>>> s.index.is_categorical()
False
"""
return self.inferred_type in ['categorical']
def is_interval(self):
return self.inferred_type in ['interval']
def is_mixed(self):
return self.inferred_type in ['mixed']
def holds_integer(self):
return self.inferred_type in ['integer', 'mixed-integer']
_index_shared_docs['_convert_scalar_indexer'] = """
Convert a scalar indexer.
Parameters
----------
key : label of the slice bound
kind : {'ix', 'loc', 'getitem', 'iloc'} or None
"""
@Appender(_index_shared_docs['_convert_scalar_indexer'])
def _convert_scalar_indexer(self, key, kind=None):
assert kind in ['ix', 'loc', 'getitem', 'iloc', None]
if kind == 'iloc':
return self._validate_indexer('positional', key, kind)
if len(self) and not isinstance(self, ABCMultiIndex,):
# we can raise here if we are definitive that this
# is positional indexing (eg. .ix on with a float)
# or label indexing if we are using a type able
# to be represented in the index
if kind in ['getitem', 'ix'] and is_float(key):
if not self.is_floating():
return self._invalid_indexer('label', key)
elif kind in ['loc'] and is_float(key):
# we want to raise KeyError on string/mixed here
# technically we *could* raise a TypeError
# on anything but mixed though
if self.inferred_type not in ['floating',
'mixed-integer-float',
'string',
'unicode',
'mixed']:
return self._invalid_indexer('label', key)
elif kind in ['loc'] and is_integer(key):
if not self.holds_integer():
return self._invalid_indexer('label', key)
return key
_index_shared_docs['_convert_slice_indexer'] = """
Convert a slice indexer.
By definition, these are labels unless 'iloc' is passed in.
Floats are not allowed as the start, step, or stop of the slice.
Parameters
----------
key : label of the slice bound
kind : {'ix', 'loc', 'getitem', 'iloc'} or None
"""
@Appender(_index_shared_docs['_convert_slice_indexer'])
def _convert_slice_indexer(self, key, kind=None):
assert kind in ['ix', 'loc', 'getitem', 'iloc', None]
# if we are not a slice, then we are done
if not isinstance(key, slice):
return key
# validate iloc
if kind == 'iloc':
return slice(self._validate_indexer('slice', key.start, kind),
self._validate_indexer('slice', key.stop, kind),
self._validate_indexer('slice', key.step, kind))
# potentially cast the bounds to integers
start, stop, step = key.start, key.stop, key.step
# figure out if this is a positional indexer
def is_int(v):
return v is None or is_integer(v)
is_null_slicer = start is None and stop is None
is_index_slice = is_int(start) and is_int(stop)
is_positional = is_index_slice and not self.is_integer()
if kind == 'getitem':
"""
called from the getitem slicers, validate that we are in fact
integers
"""
if self.is_integer() or is_index_slice:
return slice(self._validate_indexer('slice', key.start, kind),
self._validate_indexer('slice', key.stop, kind),
self._validate_indexer('slice', key.step, kind))
# convert the slice to an indexer here
# if we are mixed and have integers
try:
if is_positional and self.is_mixed():
# TODO: i, j are not used anywhere
if start is not None:
i = self.get_loc(start) # noqa
if stop is not None:
j = self.get_loc(stop) # noqa
is_positional = False
except KeyError:
if self.inferred_type == 'mixed-integer-float':
raise
if is_null_slicer:
indexer = key
elif is_positional:
indexer = key
else:
try:
indexer = self.slice_indexer(start, stop, step, kind=kind)
except Exception:
if is_index_slice:
if self.is_integer():
raise
else:
indexer = key
else:
raise
return indexer
def _convert_listlike_indexer(self, keyarr, kind=None):
"""
Parameters
----------
keyarr : list-like
Indexer to convert.
Returns
-------
tuple (indexer, keyarr)
indexer is an ndarray or None if cannot convert
keyarr are tuple-safe keys
"""
if isinstance(keyarr, Index):
keyarr = self._convert_index_indexer(keyarr)
else:
keyarr = self._convert_arr_indexer(keyarr)
indexer = self._convert_list_indexer(keyarr, kind=kind)
return indexer, keyarr
_index_shared_docs['_convert_arr_indexer'] = """
Convert an array-like indexer to the appropriate dtype.
Parameters
----------
keyarr : array-like
Indexer to convert.
Returns
-------
converted_keyarr : array-like
"""
@Appender(_index_shared_docs['_convert_arr_indexer'])
def _convert_arr_indexer(self, keyarr):
keyarr = com._asarray_tuplesafe(keyarr)
return keyarr
_index_shared_docs['_convert_index_indexer'] = """
Convert an Index indexer to the appropriate dtype.
Parameters
----------
keyarr : Index (or sub-class)
Indexer to convert.
Returns
-------
converted_keyarr : Index (or sub-class)
"""
@Appender(_index_shared_docs['_convert_index_indexer'])
def _convert_index_indexer(self, keyarr):
return keyarr
_index_shared_docs['_convert_list_indexer'] = """
Convert a list-like indexer to the appropriate dtype.
Parameters
----------
keyarr : Index (or sub-class)
Indexer to convert.
kind : iloc, ix, loc, optional
Returns
-------
positional indexer or None
"""
@Appender(_index_shared_docs['_convert_list_indexer'])
def _convert_list_indexer(self, keyarr, kind=None):
if (kind in [None, 'iloc', 'ix'] and
is_integer_dtype(keyarr) and not self.is_floating() and
not isinstance(keyarr, ABCPeriodIndex)):
if self.inferred_type == 'mixed-integer':
indexer = self.get_indexer(keyarr)
if (indexer >= 0).all():
return indexer
# missing values are flagged as -1 by get_indexer and negative
# indices are already converted to positive indices in the
# above if-statement, so the negative flags are changed to
# values outside the range of indices so as to trigger an
# IndexError in maybe_convert_indices
indexer[indexer < 0] = len(self)
from pandas.core.indexing import maybe_convert_indices
return maybe_convert_indices(indexer, len(self))
elif not self.inferred_type == 'integer':
keyarr = np.where(keyarr < 0, len(self) + keyarr, keyarr)
return keyarr
return None
def _invalid_indexer(self, form, key):
""" consistent invalid indexer message """
raise TypeError("cannot do {form} indexing on {klass} with these "
"indexers [{key}] of {kind}".format(
form=form, klass=type(self), key=key,
kind=type(key)))
def get_duplicates(self):
"""
Extract duplicated index elements.
Returns a sorted list of index elements which appear more than once in
the index.
.. deprecated:: 0.23.0
Use idx[idx.duplicated()].unique() instead
Returns
-------
array-like
List of duplicated indexes.
See Also
--------
Index.duplicated : Return boolean array denoting duplicates.
Index.drop_duplicates : Return Index with duplicates removed.
Examples
--------
Works on different Index of types.
>>> pd.Index([1, 2, 2, 3, 3, 3, 4]).get_duplicates()
[2, 3]
>>> pd.Index([1., 2., 2., 3., 3., 3., 4.]).get_duplicates()
[2.0, 3.0]
>>> pd.Index(['a', 'b', 'b', 'c', 'c', 'c', 'd']).get_duplicates()
['b', 'c']
Note that for a DatetimeIndex, it does not return a list but a new
DatetimeIndex:
>>> dates = pd.to_datetime(['2018-01-01', '2018-01-02', '2018-01-03',
... '2018-01-03', '2018-01-04', '2018-01-04'],
... format='%Y-%m-%d')
>>> pd.Index(dates).get_duplicates()
DatetimeIndex(['2018-01-03', '2018-01-04'],
dtype='datetime64[ns]', freq=None)
Sorts duplicated elements even when indexes are unordered.
>>> pd.Index([1, 2, 3, 2, 3, 4, 3]).get_duplicates()
[2, 3]
Return empty array-like structure when all elements are unique.
>>> pd.Index([1, 2, 3, 4]).get_duplicates()
[]
>>> dates = pd.to_datetime(['2018-01-01', '2018-01-02', '2018-01-03'],
... format='%Y-%m-%d')
>>> pd.Index(dates).get_duplicates()
DatetimeIndex([], dtype='datetime64[ns]', freq=None)
"""
warnings.warn("'get_duplicates' is deprecated and will be removed in "
"a future release. You can use "
"idx[idx.duplicated()].unique() instead",
FutureWarning, stacklevel=2)
return self[self.duplicated()].unique()
def _cleanup(self):
self._engine.clear_mapping()
@cache_readonly
def _constructor(self):
return type(self)
@cache_readonly
def _engine(self):
# property, for now, slow to look up
return self._engine_type(lambda: self._ndarray_values, len(self))
def _validate_index_level(self, level):
"""
Validate index level.
For single-level Index getting level number is a no-op, but some
verification must be done like in MultiIndex.
"""
if isinstance(level, int):
if level < 0 and level != -1:
raise IndexError("Too many levels: Index has only 1 level,"
" %d is not a valid level number" % (level, ))
elif level > 0:
raise IndexError("Too many levels:"
" Index has only 1 level, not %d" %
(level + 1))
elif level != self.name:
raise KeyError('Level %s must be same as name (%s)' %
(level, self.name))
def _get_level_number(self, level):
self._validate_index_level(level)
return 0
@cache_readonly
def inferred_type(self):
""" return a string of the type inferred from the values """
return lib.infer_dtype(self)
def _is_memory_usage_qualified(self):
""" return a boolean if we need a qualified .info display """
return self.is_object()
def is_type_compatible(self, kind):
return kind == self.inferred_type
@cache_readonly
def is_all_dates(self):
if self._data is None:
return False
return is_datetime_array(_ensure_object(self.values))
def __reduce__(self):
d = dict(data=self._data)
d.update(self._get_attributes_dict())
return _new_Index, (self.__class__, d), None
def __setstate__(self, state):
"""Necessary for making this object picklable"""
if isinstance(state, dict):
self._data = state.pop('data')
for k, v in compat.iteritems(state):
setattr(self, k, v)
elif isinstance(state, tuple):
if len(state) == 2:
nd_state, own_state = state
data = np.empty(nd_state[1], dtype=nd_state[2])
np.ndarray.__setstate__(data, nd_state)
self.name = own_state[0]
else: # pragma: no cover
data = np.empty(state)
np.ndarray.__setstate__(data, state)
self._data = data
self._reset_identity()
else:
raise Exception("invalid pickle state")
_unpickle_compat = __setstate__
def __nonzero__(self):
raise ValueError("The truth value of a {0} is ambiguous. "
"Use a.empty, a.bool(), a.item(), a.any() or a.all()."
.format(self.__class__.__name__))
__bool__ = __nonzero__
_index_shared_docs['__contains__'] = """
return a boolean if this key is IN the index
Parameters
----------
key : object
Returns
-------
boolean
"""
@Appender(_index_shared_docs['__contains__'] % _index_doc_kwargs)
def __contains__(self, key):
hash(key)
try:
return key in self._engine
except (OverflowError, TypeError, ValueError):
return False
_index_shared_docs['contains'] = """
return a boolean if this key is IN the index
Parameters
----------
key : object
Returns
-------
boolean
"""
@Appender(_index_shared_docs['contains'] % _index_doc_kwargs)
def contains(self, key):
hash(key)
try:
return key in self._engine
except (TypeError, ValueError):
return False
def __hash__(self):
raise TypeError("unhashable type: %r" % type(self).__name__)
def __setitem__(self, key, value):
raise TypeError("Index does not support mutable operations")
def __getitem__(self, key):
"""
Override numpy.ndarray's __getitem__ method to work as desired.
This function adds lists and Series as valid boolean indexers
(ndarrays only supports ndarray with dtype=bool).
If resulting ndim != 1, plain ndarray is returned instead of
corresponding `Index` subclass.
"""
# There's no custom logic to be implemented in __getslice__, so it's
# not overloaded intentionally.
getitem = self._data.__getitem__
promote = self._shallow_copy
if is_scalar(key):
return getitem(key)
if isinstance(key, slice):
# This case is separated from the conditional above to avoid
# pessimization of basic indexing.
return promote(getitem(key))
if com.is_bool_indexer(key):
key = np.asarray(key)
key = com._values_from_object(key)
result = getitem(key)
if not is_scalar(result):
return promote(result)
else:
return result
def _can_hold_identifiers_and_holds_name(self, name):
"""
Faster check for ``name in self`` when we know `name` is a Python
identifier (e.g. in NDFrame.__getattr__, which hits this to support
. key lookup). For indexes that can't hold identifiers (everything
but object & categorical) we just return False.
https://github.com/pandas-dev/pandas/issues/19764
"""
if self.is_object() or self.is_categorical():
return name in self
return False
def append(self, other):
"""
Append a collection of Index options together
Parameters
----------
other : Index or list/tuple of indices
Returns
-------
appended : Index
"""
to_concat = [self]
if isinstance(other, (list, tuple)):
to_concat = to_concat + list(other)
else:
to_concat.append(other)
for obj in to_concat:
if not isinstance(obj, Index):
raise TypeError('all inputs must be Index')
names = {obj.name for obj in to_concat}
name = None if len(names) > 1 else self.name
return self._concat(to_concat, name)
def _concat(self, to_concat, name):
typs = _concat.get_dtype_kinds(to_concat)
if len(typs) == 1:
return self._concat_same_dtype(to_concat, name=name)
return _concat._concat_index_asobject(to_concat, name=name)
def _concat_same_dtype(self, to_concat, name):
"""
Concatenate to_concat which has the same class
"""
# must be overridden in specific classes
return _concat._concat_index_asobject(to_concat, name)
_index_shared_docs['take'] = """
return a new %(klass)s of the values selected by the indices
For internal compatibility with numpy arrays.
Parameters
----------
indices : list
Indices to be taken
axis : int, optional
The axis over which to select values, always 0.
allow_fill : bool, default True
fill_value : bool, default None
If allow_fill=True and fill_value is not None, indices specified by
-1 is regarded as NA. If Index doesn't hold NA, raise ValueError
See also
--------
numpy.ndarray.take
"""
@Appender(_index_shared_docs['take'] % _index_doc_kwargs)
def take(self, indices, axis=0, allow_fill=True,
fill_value=None, **kwargs):
if kwargs:
nv.validate_take(tuple(), kwargs)
indices = _ensure_platform_int(indices)
if self._can_hold_na:
taken = self._assert_take_fillable(self.values, indices,
allow_fill=allow_fill,
fill_value=fill_value,
na_value=self._na_value)
else:
if allow_fill and fill_value is not None:
msg = 'Unable to fill values because {0} cannot contain NA'
raise ValueError(msg.format(self.__class__.__name__))
taken = self.values.take(indices)
return self._shallow_copy(taken)
def _assert_take_fillable(self, values, indices, allow_fill=True,
fill_value=None, na_value=np.nan):
""" Internal method to handle NA filling of take """
indices = _ensure_platform_int(indices)
# only fill if we are passing a non-None fill_value
if allow_fill and fill_value is not None:
if (indices < -1).any():
msg = ('When allow_fill=True and fill_value is not None, '
'all indices must be >= -1')
raise ValueError(msg)
taken = algos.take(values,
indices,
allow_fill=allow_fill,
fill_value=na_value)
else:
taken = values.take(indices)
return taken
@cache_readonly
def _isnan(self):
""" return if each value is nan"""
if self._can_hold_na:
return isna(self)
else:
# shouldn't reach to this condition by checking hasnans beforehand
values = np.empty(len(self), dtype=np.bool_)
values.fill(False)
return values
@cache_readonly
def _nan_idxs(self):
if self._can_hold_na:
w, = self._isnan.nonzero()
return w
else:
return np.array([], dtype=np.int64)
@cache_readonly
def hasnans(self):
""" return if I have any nans; enables various perf speedups """
if self._can_hold_na:
return self._isnan.any()
else:
return False
def isna(self):
"""
Detect missing values.
Return a boolean same-sized object indicating if the values are NA.
NA values, such as ``None``, :attr:`numpy.NaN` or :attr:`pd.NaT`, get
mapped to ``True`` values.
Everything else get mapped to ``False`` values. Characters such as
empty strings `''` or :attr:`numpy.inf` are not considered NA values
(unless you set ``pandas.options.mode.use_inf_as_na = True``).
.. versionadded:: 0.20.0
Returns
-------
numpy.ndarray
A boolean array of whether my values are NA
See Also
--------
pandas.Index.notna : boolean inverse of isna.
pandas.Index.dropna : omit entries with missing values.
pandas.isna : top-level isna.
Series.isna : detect missing values in Series object.
Examples
--------
Show which entries in a pandas.Index are NA. The result is an
array.
>>> idx = pd.Index([5.2, 6.0, np.NaN])
>>> idx
Float64Index([5.2, 6.0, nan], dtype='float64')
>>> idx.isna()
array([False, False, True], dtype=bool)
Empty strings are not considered NA values. None is considered an NA
value.
>>> idx = pd.Index(['black', '', 'red', None])
>>> idx
Index(['black', '', 'red', None], dtype='object')
>>> idx.isna()
array([False, False, False, True], dtype=bool)
For datetimes, `NaT` (Not a Time) is considered as an NA value.
>>> idx = pd.DatetimeIndex([pd.Timestamp('1940-04-25'),
... pd.Timestamp(''), None, pd.NaT])
>>> idx
DatetimeIndex(['1940-04-25', 'NaT', 'NaT', 'NaT'],
dtype='datetime64[ns]', freq=None)
>>> idx.isna()
array([False, True, True, True], dtype=bool)
"""
return self._isnan
isnull = isna
def notna(self):
"""
Detect existing (non-missing) values.
Return a boolean same-sized object indicating if the values are not NA.
Non-missing values get mapped to ``True``. Characters such as empty
strings ``''`` or :attr:`numpy.inf` are not considered NA values
(unless you set ``pandas.options.mode.use_inf_as_na = True``).
NA values, such as None or :attr:`numpy.NaN`, get mapped to ``False``
values.
.. versionadded:: 0.20.0
Returns
-------
numpy.ndarray
Boolean array to indicate which entries are not NA.
See also
--------
Index.notnull : alias of notna
Index.isna: inverse of notna
pandas.notna : top-level notna
Examples
--------
Show which entries in an Index are not NA. The result is an
array.
>>> idx = pd.Index([5.2, 6.0, np.NaN])
>>> idx
Float64Index([5.2, 6.0, nan], dtype='float64')
>>> idx.notna()
array([ True, True, False])
Empty strings are not considered NA values. None is considered a NA
value.
>>> idx = pd.Index(['black', '', 'red', None])
>>> idx
Index(['black', '', 'red', None], dtype='object')
>>> idx.notna()
array([ True, True, True, False])
"""
return ~self.isna()
notnull = notna
def putmask(self, mask, value):
"""
return a new Index of the values set with the mask
See also
--------
numpy.ndarray.putmask
"""
values = self.values.copy()
try:
np.putmask(values, mask, self._convert_for_op(value))
return self._shallow_copy(values)
except (ValueError, TypeError) as err:
if is_object_dtype(self):
raise err
# coerces to object
return self.astype(object).putmask(mask, value)
def format(self, name=False, formatter=None, **kwargs):
"""
Render a string representation of the Index
"""
header = []
if name:
header.append(pprint_thing(self.name,
escape_chars=('\t', '\r', '\n')) if
self.name is not None else '')
if formatter is not None:
return header + list(self.map(formatter))
return self._format_with_header(header, **kwargs)
def _format_with_header(self, header, na_rep='NaN', **kwargs):
values = self.values
from pandas.io.formats.format import format_array
if is_categorical_dtype(values.dtype):
values = np.array(values)
elif is_object_dtype(values.dtype):
values = lib.maybe_convert_objects(values, safe=1)
if is_object_dtype(values.dtype):
result = [pprint_thing(x, escape_chars=('\t', '\r', '\n'))
for x in values]
# could have nans
mask = isna(values)
if mask.any():
result = np.array(result)
result[mask] = na_rep
result = result.tolist()
else:
result = _trim_front(format_array(values, None, justify='left'))
return header + result
def to_native_types(self, slicer=None, **kwargs):
"""
Format specified values of `self` and return them.
Parameters
----------
slicer : int, array-like
An indexer into `self` that specifies which values
are used in the formatting process.
kwargs : dict
Options for specifying how the values should be formatted.
These options include the following:
1) na_rep : str
The value that serves as a placeholder for NULL values
2) quoting : bool or None
Whether or not there are quoted values in `self`
3) date_format : str
The format used to represent date-like values
"""
values = self
if slicer is not None:
values = values[slicer]
return values._format_native_types(**kwargs)
def _format_native_types(self, na_rep='', quoting=None, **kwargs):
""" actually format my specific types """
mask = isna(self)
if not self.is_object() and not quoting:
values = np.asarray(self).astype(str)
else:
values = np.array(self, dtype=object, copy=True)
values[mask] = na_rep
return values
def equals(self, other):
"""
Determines if two Index objects contain the same elements.
"""
if self.is_(other):
return True
if not isinstance(other, Index):
return False
if is_object_dtype(self) and not is_object_dtype(other):
# if other is not object, use other's logic for coercion
return other.equals(self)
try:
return array_equivalent(com._values_from_object(self),
com._values_from_object(other))
except Exception:
return False
def identical(self, other):
"""Similar to equals, but check that other comparable attributes are
also equal
"""
return (self.equals(other) and
all((getattr(self, c, None) == getattr(other, c, None)
for c in self._comparables)) and
type(self) == type(other))
def asof(self, label):
"""
For a sorted index, return the most recent label up to and including
the passed label. Return NaN if not found.
See also
--------
get_loc : asof is a thin wrapper around get_loc with method='pad'
"""
try:
loc = self.get_loc(label, method='pad')
except KeyError:
return self._na_value
else:
if isinstance(loc, slice):
loc = loc.indices(len(self))[-1]
return self[loc]
def asof_locs(self, where, mask):
"""
where : array of timestamps
mask : array of booleans where data is not NA
"""
locs = self.values[mask].searchsorted(where.values, side='right')
locs = np.where(locs > 0, locs - 1, 0)
result = np.arange(len(self))[mask].take(locs)
first = mask.argmax()
result[(locs == 0) & (where < self.values[first])] = -1
return result
def sort_values(self, return_indexer=False, ascending=True):
"""
Return a sorted copy of the index.
Return a sorted copy of the index, and optionally return the indices
that sorted the index itself.
Parameters
----------
return_indexer : bool, default False
Should the indices that would sort the index be returned.
ascending : bool, default True
Should the index values be sorted in an ascending order.
Returns
-------
sorted_index : pandas.Index
Sorted copy of the index.
indexer : numpy.ndarray, optional
The indices that the index itself was sorted by.
See Also
--------
pandas.Series.sort_values : Sort values of a Series.
pandas.DataFrame.sort_values : Sort values in a DataFrame.
Examples
--------
>>> idx = pd.Index([10, 100, 1, 1000])
>>> idx
Int64Index([10, 100, 1, 1000], dtype='int64')
Sort values in ascending order (default behavior).
>>> idx.sort_values()
Int64Index([1, 10, 100, 1000], dtype='int64')
Sort values in descending order, and also get the indices `idx` was
sorted by.
>>> idx.sort_values(ascending=False, return_indexer=True)
(Int64Index([1000, 100, 10, 1], dtype='int64'), array([3, 1, 0, 2]))
"""
_as = self.argsort()
if not ascending:
_as = _as[::-1]
sorted_index = self.take(_as)
if return_indexer:
return sorted_index, _as
else:
return sorted_index
def sort(self, *args, **kwargs):
raise TypeError("cannot sort an Index object in-place, use "
"sort_values instead")
def sortlevel(self, level=None, ascending=True, sort_remaining=None):
"""
For internal compatibility with with the Index API
Sort the Index. This is for compat with MultiIndex
Parameters
----------
ascending : boolean, default True
False to sort in descending order
level, sort_remaining are compat parameters
Returns
-------
sorted_index : Index
"""
return self.sort_values(return_indexer=True, ascending=ascending)
def shift(self, periods=1, freq=None):
"""
Shift index by desired number of time frequency increments.
This method is for shifting the values of datetime-like indexes
by a specified time increment a given number of times.
Parameters
----------
periods : int, default 1
Number of periods (or increments) to shift by,
can be positive or negative.
freq : pandas.DateOffset, pandas.Timedelta or string, optional
Frequency increment to shift by.
If None, the index is shifted by its own `freq` attribute.
Offset aliases are valid strings, e.g., 'D', 'W', 'M' etc.
Returns
-------
pandas.Index
shifted index
See Also
--------
Series.shift : Shift values of Series.
Examples
--------
Put the first 5 month starts of 2011 into an index.
>>> month_starts = pd.date_range('1/1/2011', periods=5, freq='MS')
>>> month_starts
DatetimeIndex(['2011-01-01', '2011-02-01', '2011-03-01', '2011-04-01',
'2011-05-01'],
dtype='datetime64[ns]', freq='MS')
Shift the index by 10 days.
>>> month_starts.shift(10, freq='D')
DatetimeIndex(['2011-01-11', '2011-02-11', '2011-03-11', '2011-04-11',
'2011-05-11'],
dtype='datetime64[ns]', freq=None)
The default value of `freq` is the `freq` attribute of the index,
which is 'MS' (month start) in this example.
>>> month_starts.shift(10)
DatetimeIndex(['2011-11-01', '2011-12-01', '2012-01-01', '2012-02-01',
'2012-03-01'],
dtype='datetime64[ns]', freq='MS')
Notes
-----
This method is only implemented for datetime-like index classes,
i.e., DatetimeIndex, PeriodIndex and TimedeltaIndex.
"""
raise NotImplementedError("Not supported for type %s" %
type(self).__name__)
def argsort(self, *args, **kwargs):
"""
Return the integer indices that would sort the index.
Parameters
----------
*args
Passed to `numpy.ndarray.argsort`.
**kwargs
Passed to `numpy.ndarray.argsort`.
Returns
-------
numpy.ndarray
Integer indices that would sort the index if used as
an indexer.
See also
--------
numpy.argsort : Similar method for NumPy arrays.
Index.sort_values : Return sorted copy of Index.
Examples
--------
>>> idx = pd.Index(['b', 'a', 'd', 'c'])
>>> idx
Index(['b', 'a', 'd', 'c'], dtype='object')
>>> order = idx.argsort()
>>> order
array([1, 0, 3, 2])
>>> idx[order]
Index(['a', 'b', 'c', 'd'], dtype='object')
"""
result = self.asi8
if result is None:
result = np.array(self)
return result.argsort(*args, **kwargs)
def __add__(self, other):
return Index(np.array(self) + other)
def __radd__(self, other):
return Index(other + np.array(self))
def __iadd__(self, other):
# alias for __add__
return self + other
def __sub__(self, other):
raise TypeError("cannot perform __sub__ with this index type: "
"{typ}".format(typ=type(self).__name__))
def __and__(self, other):
return self.intersection(other)
def __or__(self, other):
return self.union(other)
def __xor__(self, other):
return self.symmetric_difference(other)
def _get_consensus_name(self, other):
"""
Given 2 indexes, give a consensus name meaning
we take the not None one, or None if the names differ.
Return a new object if we are resetting the name
"""
if self.name != other.name:
if self.name is None or other.name is None:
name = self.name or other.name
else:
name = None
if self.name != name:
return self._shallow_copy(name=name)
return self
def union(self, other):
"""
Form the union of two Index objects and sorts if possible.
Parameters
----------
other : Index or array-like
Returns
-------
union : Index
Examples
--------
>>> idx1 = pd.Index([1, 2, 3, 4])
>>> idx2 = pd.Index([3, 4, 5, 6])
>>> idx1.union(idx2)
Int64Index([1, 2, 3, 4, 5, 6], dtype='int64')
"""
self._assert_can_do_setop(other)
other = _ensure_index(other)
if len(other) == 0 or self.equals(other):
return self._get_consensus_name(other)
if len(self) == 0:
return other._get_consensus_name(self)
# TODO: is_dtype_union_equal is a hack around
# 1. buggy set ops with duplicates (GH #13432)
# 2. CategoricalIndex lacking setops (GH #10186)
# Once those are fixed, this workaround can be removed
if not is_dtype_union_equal(self.dtype, other.dtype):
this = self.astype('O')
other = other.astype('O')
return this.union(other)
# TODO(EA): setops-refactor, clean all this up
if is_period_dtype(self) or is_datetime64tz_dtype(self):
lvals = self._ndarray_values
else:
lvals = self._values
if is_period_dtype(other) or is_datetime64tz_dtype(other):
rvals = other._ndarray_values
else:
rvals = other._values
if self.is_monotonic and other.is_monotonic:
try:
result = self._outer_indexer(lvals, rvals)[0]
except TypeError:
# incomparable objects
result = list(lvals)
# worth making this faster? a very unusual case
value_set = set(lvals)
result.extend([x for x in rvals if x not in value_set])
else:
indexer = self.get_indexer(other)
indexer, = (indexer == -1).nonzero()
if len(indexer) > 0:
other_diff = algos.take_nd(rvals, indexer,
allow_fill=False)
result = _concat._concat_compat((lvals, other_diff))
try:
lvals[0] < other_diff[0]
except TypeError as e:
warnings.warn("%s, sort order is undefined for "
"incomparable objects" % e, RuntimeWarning,
stacklevel=3)
else:
types = frozenset((self.inferred_type,
other.inferred_type))
if not types & _unsortable_types:
result.sort()
else:
result = lvals
try:
result = np.sort(result)
except TypeError as e:
warnings.warn("%s, sort order is undefined for "
"incomparable objects" % e, RuntimeWarning,
stacklevel=3)
# for subclasses
return self._wrap_union_result(other, result)
def _wrap_union_result(self, other, result):
name = self.name if self.name == other.name else None
return self.__class__(result, name=name)
def intersection(self, other):
"""
Form the intersection of two Index objects.
This returns a new Index with elements common to the index and `other`,
preserving the order of the calling index.
Parameters
----------
other : Index or array-like
Returns
-------
intersection : Index
Examples
--------
>>> idx1 = pd.Index([1, 2, 3, 4])
>>> idx2 = pd.Index([3, 4, 5, 6])
>>> idx1.intersection(idx2)
Int64Index([3, 4], dtype='int64')
"""
self._assert_can_do_setop(other)
other = _ensure_index(other)
if self.equals(other):
return self._get_consensus_name(other)
if not is_dtype_equal(self.dtype, other.dtype):
this = self.astype('O')
other = other.astype('O')
return this.intersection(other)
# TODO(EA): setops-refactor, clean all this up
if is_period_dtype(self):
lvals = self._ndarray_values
else:
lvals = self._values
if is_period_dtype(other):
rvals = other._ndarray_values
else:
rvals = other._values
if self.is_monotonic and other.is_monotonic:
try:
result = self._inner_indexer(lvals, rvals)[0]
return self._wrap_union_result(other, result)
except TypeError:
pass
try:
indexer = Index(rvals).get_indexer(lvals)
indexer = indexer.take((indexer != -1).nonzero()[0])
except Exception:
# duplicates
indexer = algos.unique1d(
Index(rvals).get_indexer_non_unique(lvals)[0])
indexer = indexer[indexer != -1]
taken = other.take(indexer)
if self.name != other.name:
taken.name = None
return taken
def difference(self, other):
"""
Return a new Index with elements from the index that are not in
`other`.
This is the set difference of two Index objects.
It's sorted if sorting is possible.
Parameters
----------
other : Index or array-like
Returns
-------
difference : Index
Examples
--------
>>> idx1 = pd.Index([1, 2, 3, 4])
>>> idx2 = pd.Index([3, 4, 5, 6])
>>> idx1.difference(idx2)
Int64Index([1, 2], dtype='int64')
"""
self._assert_can_do_setop(other)
if self.equals(other):
return self._shallow_copy([])
other, result_name = self._convert_can_do_setop(other)
this = self._get_unique_index()
indexer = this.get_indexer(other)
indexer = indexer.take((indexer != -1).nonzero()[0])
label_diff = np.setdiff1d(np.arange(this.size), indexer,
assume_unique=True)
the_diff = this.values.take(label_diff)
try:
the_diff = sorting.safe_sort(the_diff)
except TypeError:
pass
return this._shallow_copy(the_diff, name=result_name, freq=None)
def symmetric_difference(self, other, result_name=None):
"""
Compute the symmetric difference of two Index objects.
It's sorted if sorting is possible.
Parameters
----------
other : Index or array-like
result_name : str
Returns
-------
symmetric_difference : Index
Notes
-----
``symmetric_difference`` contains elements that appear in either
``idx1`` or ``idx2`` but not both. Equivalent to the Index created by
``idx1.difference(idx2) | idx2.difference(idx1)`` with duplicates
dropped.
Examples
--------
>>> idx1 = Index([1, 2, 3, 4])
>>> idx2 = Index([2, 3, 4, 5])
>>> idx1.symmetric_difference(idx2)
Int64Index([1, 5], dtype='int64')
You can also use the ``^`` operator:
>>> idx1 ^ idx2
Int64Index([1, 5], dtype='int64')
"""
self._assert_can_do_setop(other)
other, result_name_update = self._convert_can_do_setop(other)
if result_name is None:
result_name = result_name_update
this = self._get_unique_index()
other = other._get_unique_index()
indexer = this.get_indexer(other)
# {this} minus {other}
common_indexer = indexer.take((indexer != -1).nonzero()[0])
left_indexer = np.setdiff1d(np.arange(this.size), common_indexer,
assume_unique=True)
left_diff = this.values.take(left_indexer)
# {other} minus {this}
right_indexer = (indexer == -1).nonzero()[0]
right_diff = other.values.take(right_indexer)
the_diff = _concat._concat_compat([left_diff, right_diff])
try:
the_diff = sorting.safe_sort(the_diff)
except TypeError:
pass
attribs = self._get_attributes_dict()
attribs['name'] = result_name
if 'freq' in attribs:
attribs['freq'] = None
return self._shallow_copy_with_infer(the_diff, **attribs)
def _get_unique_index(self, dropna=False):
"""
Returns an index containing unique values.
Parameters
----------
dropna : bool
If True, NaN values are dropped.
Returns
-------
uniques : index
"""
if self.is_unique and not dropna:
return self
values = self.values
if not self.is_unique:
values = self.unique()
if dropna:
try:
if self.hasnans:
values = values[~isna(values)]
except NotImplementedError:
pass
return self._shallow_copy(values)
_index_shared_docs['get_loc'] = """
Get integer location, slice or boolean mask for requested label.
Parameters
----------
key : label
method : {None, 'pad'/'ffill', 'backfill'/'bfill', 'nearest'}, optional
* default: exact matches only.
* pad / ffill: find the PREVIOUS index value if no exact match.
* backfill / bfill: use NEXT index value if no exact match
* nearest: use the NEAREST index value if no exact match. Tied
distances are broken by preferring the larger index value.
tolerance : optional
Maximum distance from index value for inexact matches. The value of
the index at the matching location most satisfy the equation
``abs(index[loc] - key) <= tolerance``.
Tolerance may be a scalar
value, which applies the same tolerance to all values, or
list-like, which applies variable tolerance per element. List-like
includes list, tuple, array, Series, and must be the same size as
the index and its dtype must exactly match the index's type.
.. versionadded:: 0.21.0 (list-like tolerance)
Returns
-------
loc : int if unique index, slice if monotonic index, else mask
Examples
---------
>>> unique_index = pd.Index(list('abc'))
>>> unique_index.get_loc('b')
1
>>> monotonic_index = pd.Index(list('abbc'))
>>> monotonic_index.get_loc('b')
slice(1, 3, None)
>>> non_monotonic_index = pd.Index(list('abcb'))
>>> non_monotonic_index.get_loc('b')
array([False, True, False, True], dtype=bool)
"""
@Appender(_index_shared_docs['get_loc'])
def get_loc(self, key, method=None, tolerance=None):
if method is None:
if tolerance is not None:
raise ValueError('tolerance argument only valid if using pad, '
'backfill or nearest lookups')
try:
return self._engine.get_loc(key)
except KeyError:
return self._engine.get_loc(self._maybe_cast_indexer(key))
indexer = self.get_indexer([key], method=method, tolerance=tolerance)
if indexer.ndim > 1 or indexer.size > 1:
raise TypeError('get_loc requires scalar valued input')
loc = indexer.item()
if loc == -1:
raise KeyError(key)
return loc
def get_value(self, series, key):
"""
Fast lookup of value from 1-dimensional ndarray. Only use this if you
know what you're doing
"""
# if we have something that is Index-like, then
# use this, e.g. DatetimeIndex
s = getattr(series, '_values', None)
if isinstance(s, (ExtensionArray, Index)) and is_scalar(key):
# GH 20825
# Unify Index and ExtensionArray treatment
# First try to convert the key to a location
# If that fails, see if key is an integer, and
# try that
try:
iloc = self.get_loc(key)
return s[iloc]
except KeyError:
if is_integer(key):
return s[key]
s = com._values_from_object(series)
k = com._values_from_object(key)
k = self._convert_scalar_indexer(k, kind='getitem')
try:
return self._engine.get_value(s, k,
tz=getattr(series.dtype, 'tz', None))
except KeyError as e1:
if len(self) > 0 and self.inferred_type in ['integer', 'boolean']:
raise
try:
return libindex.get_value_box(s, key)
except IndexError:
raise
except TypeError:
# generator/iterator-like
if is_iterator(key):
raise InvalidIndexError(key)
else:
raise e1
except Exception: # pragma: no cover
raise e1
except TypeError:
# python 3
if is_scalar(key): # pragma: no cover
raise IndexError(key)
raise InvalidIndexError(key)
def set_value(self, arr, key, value):
"""
Fast lookup of value from 1-dimensional ndarray. Only use this if you
know what you're doing
"""
self._engine.set_value(com._values_from_object(arr),
com._values_from_object(key), value)
def _get_level_values(self, level):
"""
Return an Index of values for requested level, equal to the length
of the index.
Parameters
----------
level : int or str
``level`` is either the integer position of the level in the
MultiIndex, or the name of the level.
Returns
-------
values : Index
``self``, as there is only one level in the Index.
See also
---------
pandas.MultiIndex.get_level_values : get values for a level of a
MultiIndex
"""
self._validate_index_level(level)
return self
get_level_values = _get_level_values
def droplevel(self, level=0):
"""
Return index with requested level(s) removed. If resulting index has
only 1 level left, the result will be of Index type, not MultiIndex.
.. versionadded:: 0.23.1 (support for non-MultiIndex)
Parameters
----------
level : int, str, or list-like, default 0
If a string is given, must be the name of a level
If list-like, elements must be names or indexes of levels.
Returns
-------
index : Index or MultiIndex
"""
if not isinstance(level, (tuple, list)):
level = [level]
levnums = sorted(self._get_level_number(lev) for lev in level)[::-1]
if len(level) == 0:
return self
if len(level) >= self.nlevels:
raise ValueError("Cannot remove {} levels from an index with {} "
"levels: at least one level must be "
"left.".format(len(level), self.nlevels))
# The two checks above guarantee that here self is a MultiIndex
new_levels = list(self.levels)
new_labels = list(self.labels)
new_names = list(self.names)
for i in levnums:
new_levels.pop(i)
new_labels.pop(i)
new_names.pop(i)
if len(new_levels) == 1:
# set nan if needed
mask = new_labels[0] == -1
result = new_levels[0].take(new_labels[0])
if mask.any():
result = result.putmask(mask, np.nan)
result.name = new_names[0]
return result
else:
from .multi import MultiIndex
return MultiIndex(levels=new_levels, labels=new_labels,
names=new_names, verify_integrity=False)
_index_shared_docs['get_indexer'] = """
Compute indexer and mask for new index given the current index. The
indexer should be then used as an input to ndarray.take to align the
current data to the new index.
Parameters
----------
target : %(target_klass)s
method : {None, 'pad'/'ffill', 'backfill'/'bfill', 'nearest'}, optional
* default: exact matches only.
* pad / ffill: find the PREVIOUS index value if no exact match.
* backfill / bfill: use NEXT index value if no exact match
* nearest: use the NEAREST index value if no exact match. Tied
distances are broken by preferring the larger index value.
limit : int, optional
Maximum number of consecutive labels in ``target`` to match for
inexact matches.
tolerance : optional
Maximum distance between original and new labels for inexact
matches. The values of the index at the matching locations most
satisfy the equation ``abs(index[indexer] - target) <= tolerance``.
Tolerance may be a scalar value, which applies the same tolerance
to all values, or list-like, which applies variable tolerance per
element. List-like includes list, tuple, array, Series, and must be
the same size as the index and its dtype must exactly match the
index's type.
.. versionadded:: 0.21.0 (list-like tolerance)
Examples
--------
>>> indexer = index.get_indexer(new_index)
>>> new_values = cur_values.take(indexer)
Returns
-------
indexer : ndarray of int
Integers from 0 to n - 1 indicating that the index at these
positions matches the corresponding target values. Missing values
in the target are marked by -1.
"""
@Appender(_index_shared_docs['get_indexer'] % _index_doc_kwargs)
def get_indexer(self, target, method=None, limit=None, tolerance=None):
method = missing.clean_reindex_fill_method(method)
target = _ensure_index(target)
if tolerance is not None:
tolerance = self._convert_tolerance(tolerance, target)
# Treat boolean labels passed to a numeric index as not found. Without
# this fix False and True would be treated as 0 and 1 respectively.
# (GH #16877)
if target.is_boolean() and self.is_numeric():
return _ensure_platform_int(np.repeat(-1, target.size))
pself, ptarget = self._maybe_promote(target)
if pself is not self or ptarget is not target:
return pself.get_indexer(ptarget, method=method, limit=limit,
tolerance=tolerance)
if not is_dtype_equal(self.dtype, target.dtype):
this = self.astype(object)
target = target.astype(object)
return this.get_indexer(target, method=method, limit=limit,
tolerance=tolerance)
if not self.is_unique:
raise InvalidIndexError('Reindexing only valid with uniquely'
' valued Index objects')
if method == 'pad' or method == 'backfill':
indexer = self._get_fill_indexer(target, method, limit, tolerance)
elif method == 'nearest':
indexer = self._get_nearest_indexer(target, limit, tolerance)
else:
if tolerance is not None:
raise ValueError('tolerance argument only valid if doing pad, '
'backfill or nearest reindexing')
if limit is not None:
raise ValueError('limit argument only valid if doing pad, '
'backfill or nearest reindexing')
indexer = self._engine.get_indexer(target._ndarray_values)
return _ensure_platform_int(indexer)
def _convert_tolerance(self, tolerance, target):
# override this method on subclasses
tolerance = np.asarray(tolerance)
if target.size != tolerance.size and tolerance.size > 1:
raise ValueError('list-like tolerance size must match '
'target index size')
return tolerance
def _get_fill_indexer(self, target, method, limit=None, tolerance=None):
if self.is_monotonic_increasing and target.is_monotonic_increasing:
method = (self._engine.get_pad_indexer if method == 'pad' else
self._engine.get_backfill_indexer)
indexer = method(target._ndarray_values, limit)
else:
indexer = self._get_fill_indexer_searchsorted(target, method,
limit)
if tolerance is not None:
indexer = self._filter_indexer_tolerance(target._ndarray_values,
indexer,
tolerance)
return indexer
def _get_fill_indexer_searchsorted(self, target, method, limit=None):
"""
Fallback pad/backfill get_indexer that works for monotonic decreasing
indexes and non-monotonic targets
"""
if limit is not None:
raise ValueError('limit argument for %r method only well-defined '
'if index and target are monotonic' % method)
side = 'left' if method == 'pad' else 'right'
# find exact matches first (this simplifies the algorithm)
indexer = self.get_indexer(target)
nonexact = (indexer == -1)
indexer[nonexact] = self._searchsorted_monotonic(target[nonexact],
side)
if side == 'left':
# searchsorted returns "indices into a sorted array such that,
# if the corresponding elements in v were inserted before the
# indices, the order of a would be preserved".
# Thus, we need to subtract 1 to find values to the left.
indexer[nonexact] -= 1
# This also mapped not found values (values of 0 from
# np.searchsorted) to -1, which conveniently is also our
# sentinel for missing values
else:
# Mark indices to the right of the largest value as not found
indexer[indexer == len(self)] = -1
return indexer
def _get_nearest_indexer(self, target, limit, tolerance):
"""
Get the indexer for the nearest index labels; requires an index with
values that can be subtracted from each other (e.g., not strings or
tuples).
"""
left_indexer = self.get_indexer(target, 'pad', limit=limit)
right_indexer = self.get_indexer(target, 'backfill', limit=limit)
target = np.asarray(target)
left_distances = abs(self.values[left_indexer] - target)
right_distances = abs(self.values[right_indexer] - target)
op = operator.lt if self.is_monotonic_increasing else operator.le
indexer = np.where(op(left_distances, right_distances) |
(right_indexer == -1), left_indexer, right_indexer)
if tolerance is not None:
indexer = self._filter_indexer_tolerance(target, indexer,
tolerance)
return indexer
def _filter_indexer_tolerance(self, target, indexer, tolerance):
distance = abs(self.values[indexer] - target)
indexer = np.where(distance <= tolerance, indexer, -1)
return indexer
_index_shared_docs['get_indexer_non_unique'] = """
Compute indexer and mask for new index given the current index. The
indexer should be then used as an input to ndarray.take to align the
current data to the new index.
Parameters
----------
target : %(target_klass)s
Returns
-------
indexer : ndarray of int
Integers from 0 to n - 1 indicating that the index at these
positions matches the corresponding target values. Missing values
in the target are marked by -1.
missing : ndarray of int
An indexer into the target of the values not found.
These correspond to the -1 in the indexer array
"""
@Appender(_index_shared_docs['get_indexer_non_unique'] % _index_doc_kwargs)
def get_indexer_non_unique(self, target):
target = _ensure_index(target)
if is_categorical(target):
target = target.astype(target.dtype.categories.dtype)
pself, ptarget = self._maybe_promote(target)
if pself is not self or ptarget is not target:
return pself.get_indexer_non_unique(ptarget)
if self.is_all_dates:
self = Index(self.asi8)
tgt_values = target.asi8
else:
tgt_values = target._ndarray_values
indexer, missing = self._engine.get_indexer_non_unique(tgt_values)
return _ensure_platform_int(indexer), missing
def get_indexer_for(self, target, **kwargs):
"""
guaranteed return of an indexer even when non-unique
This dispatches to get_indexer or get_indexer_nonunique as appropriate
"""
if self.is_unique:
return self.get_indexer(target, **kwargs)
indexer, _ = self.get_indexer_non_unique(target, **kwargs)
return indexer
def _maybe_promote(self, other):
# A hack, but it works
from pandas.core.indexes.datetimes import DatetimeIndex
if self.inferred_type == 'date' and isinstance(other, DatetimeIndex):
return DatetimeIndex(self), other
elif self.inferred_type == 'boolean':
if not is_object_dtype(self.dtype):
return self.astype('object'), other.astype('object')
return self, other
def groupby(self, values):
"""
Group the index labels by a given array of values.
Parameters
----------
values : array
Values used to determine the groups.
Returns
-------
groups : dict
{group name -> group labels}
"""
# TODO: if we are a MultiIndex, we can do better
# that converting to tuples
from .multi import MultiIndex
if isinstance(values, MultiIndex):
values = values.values
values = _ensure_categorical(values)
result = values._reverse_indexer()
# map to the label
result = {k: self.take(v) for k, v in compat.iteritems(result)}
return result
def map(self, mapper, na_action=None):
"""
Map values using input correspondence (a dict, Series, or function).
Parameters
----------
mapper : function, dict, or Series
Mapping correspondence.
na_action : {None, 'ignore'}
If 'ignore', propagate NA values, without passing them to the
mapping correspondence.
Returns
-------
applied : Union[Index, MultiIndex], inferred
The output of the mapping function applied to the index.
If the function returns a tuple with more than one element
a MultiIndex will be returned.
"""
from .multi import MultiIndex
new_values = super(Index, self)._map_values(
mapper, na_action=na_action)
attributes = self._get_attributes_dict()
# we can return a MultiIndex
if new_values.size and isinstance(new_values[0], tuple):
if isinstance(self, MultiIndex):
names = self.names
elif attributes.get('name'):
names = [attributes.get('name')] * len(new_values[0])
else:
names = None
return MultiIndex.from_tuples(new_values,
names=names)
attributes['copy'] = False
if not new_values.size:
# empty
attributes['dtype'] = self.dtype
return Index(new_values, **attributes)
def isin(self, values, level=None):
"""
Return a boolean array where the index values are in `values`.
Compute boolean array of whether each index value is found in the
passed set of values. The length of the returned boolean array matches
the length of the index.
Parameters
----------
values : set or list-like
Sought values.
.. versionadded:: 0.18.1
Support for values as a set.
level : str or int, optional
Name or position of the index level to use (if the index is a
`MultiIndex`).
Returns
-------
is_contained : ndarray
NumPy array of boolean values.
See also
--------
Series.isin : Same for Series.
DataFrame.isin : Same method for DataFrames.
Notes
-----
In the case of `MultiIndex` you must either specify `values` as a
list-like object containing tuples that are the same length as the
number of levels, or specify `level`. Otherwise it will raise a
``ValueError``.
If `level` is specified:
- if it is the name of one *and only one* index level, use that level;
- otherwise it should be a number indicating level position.
Examples
--------
>>> idx = pd.Index([1,2,3])
>>> idx
Int64Index([1, 2, 3], dtype='int64')
Check whether each index value in a list of values.
>>> idx.isin([1, 4])
array([ True, False, False])
>>> midx = pd.MultiIndex.from_arrays([[1,2,3],
... ['red', 'blue', 'green']],
... names=('number', 'color'))
>>> midx
MultiIndex(levels=[[1, 2, 3], ['blue', 'green', 'red']],
labels=[[0, 1, 2], [2, 0, 1]],
names=['number', 'color'])
Check whether the strings in the 'color' level of the MultiIndex
are in a list of colors.
>>> midx.isin(['red', 'orange', 'yellow'], level='color')
array([ True, False, False])
To check across the levels of a MultiIndex, pass a list of tuples:
>>> midx.isin([(1, 'red'), (3, 'red')])
array([ True, False, False])
For a DatetimeIndex, string values in `values` are converted to
Timestamps.
>>> dates = ['2000-03-11', '2000-03-12', '2000-03-13']
>>> dti = pd.to_datetime(dates)
>>> dti
DatetimeIndex(['2000-03-11', '2000-03-12', '2000-03-13'],
dtype='datetime64[ns]', freq=None)
>>> dti.isin(['2000-03-11'])
array([ True, False, False])
"""
if level is not None:
self._validate_index_level(level)
return algos.isin(self, values)
def _can_reindex(self, indexer):
"""
*this is an internal non-public method*
Check if we are allowing reindexing with this particular indexer
Parameters
----------
indexer : an integer indexer
Raises
------
ValueError if its a duplicate axis
"""
# trying to reindex on an axis with duplicates
if not self.is_unique and len(indexer):
raise ValueError("cannot reindex from a duplicate axis")
def reindex(self, target, method=None, level=None, limit=None,
tolerance=None):
"""
Create index with target's values (move/add/delete values as necessary)
Parameters
----------
target : an iterable
Returns
-------
new_index : pd.Index
Resulting index
indexer : np.ndarray or None
Indices of output values in original index
"""
# GH6552: preserve names when reindexing to non-named target
# (i.e. neither Index nor Series).
preserve_names = not hasattr(target, 'name')
# GH7774: preserve dtype/tz if target is empty and not an Index.
target = _ensure_has_len(target) # target may be an iterator
if not isinstance(target, Index) and len(target) == 0:
attrs = self._get_attributes_dict()
attrs.pop('freq', None) # don't preserve freq
target = self._simple_new(None, dtype=self.dtype, **attrs)
else:
target = _ensure_index(target)
if level is not None:
if method is not None:
raise TypeError('Fill method not supported if level passed')
_, indexer, _ = self._join_level(target, level, how='right',
return_indexers=True)
else:
if self.equals(target):
indexer = None
else:
if self.is_unique:
indexer = self.get_indexer(target, method=method,
limit=limit,
tolerance=tolerance)
else:
if method is not None or limit is not None:
raise ValueError("cannot reindex a non-unique index "
"with a method or limit")
indexer, missing = self.get_indexer_non_unique(target)
if preserve_names and target.nlevels == 1 and target.name != self.name:
target = target.copy()
target.name = self.name
return target, indexer
def _reindex_non_unique(self, target):
"""
*this is an internal non-public method*
Create a new index with target's values (move/add/delete values as
necessary) use with non-unique Index and a possibly non-unique target
Parameters
----------
target : an iterable
Returns
-------
new_index : pd.Index
Resulting index
indexer : np.ndarray or None
Indices of output values in original index
"""
target = _ensure_index(target)
indexer, missing = self.get_indexer_non_unique(target)
check = indexer != -1
new_labels = self.take(indexer[check])
new_indexer = None
if len(missing):
length = np.arange(len(indexer))
missing = _ensure_platform_int(missing)
missing_labels = target.take(missing)
missing_indexer = _ensure_int64(length[~check])
cur_labels = self.take(indexer[check]).values
cur_indexer = _ensure_int64(length[check])
new_labels = np.empty(tuple([len(indexer)]), dtype=object)
new_labels[cur_indexer] = cur_labels
new_labels[missing_indexer] = missing_labels
# a unique indexer
if target.is_unique:
# see GH5553, make sure we use the right indexer
new_indexer = np.arange(len(indexer))
new_indexer[cur_indexer] = np.arange(len(cur_labels))
new_indexer[missing_indexer] = -1
# we have a non_unique selector, need to use the original
# indexer here
else:
# need to retake to have the same size as the indexer
indexer[~check] = 0
# reset the new indexer to account for the new size
new_indexer = np.arange(len(self.take(indexer)))
new_indexer[~check] = -1
new_index = self._shallow_copy_with_infer(new_labels, freq=None)
return new_index, indexer, new_indexer
_index_shared_docs['join'] = """
*this is an internal non-public method*
Compute join_index and indexers to conform data
structures to the new index.
Parameters
----------
other : Index
how : {'left', 'right', 'inner', 'outer'}
level : int or level name, default None
return_indexers : boolean, default False
sort : boolean, default False
Sort the join keys lexicographically in the result Index. If False,
the order of the join keys depends on the join type (how keyword)
.. versionadded:: 0.20.0
Returns
-------
join_index, (left_indexer, right_indexer)
"""
@Appender(_index_shared_docs['join'])
def join(self, other, how='left', level=None, return_indexers=False,
sort=False):
from .multi import MultiIndex
self_is_mi = isinstance(self, MultiIndex)
other_is_mi = isinstance(other, MultiIndex)
# try to figure out the join level
# GH3662
if level is None and (self_is_mi or other_is_mi):
# have the same levels/names so a simple join
if self.names == other.names:
pass
else:
return self._join_multi(other, how=how,
return_indexers=return_indexers)
# join on the level
if level is not None and (self_is_mi or other_is_mi):
return self._join_level(other, level, how=how,
return_indexers=return_indexers)
other = _ensure_index(other)
if len(other) == 0 and how in ('left', 'outer'):
join_index = self._shallow_copy()
if return_indexers:
rindexer = np.repeat(-1, len(join_index))
return join_index, None, rindexer
else:
return join_index
if len(self) == 0 and how in ('right', 'outer'):
join_index = other._shallow_copy()
if return_indexers:
lindexer = np.repeat(-1, len(join_index))
return join_index, lindexer, None
else:
return join_index
if self._join_precedence < other._join_precedence:
how = {'right': 'left', 'left': 'right'}.get(how, how)
result = other.join(self, how=how, level=level,
return_indexers=return_indexers)
if return_indexers:
x, y, z = result
result = x, z, y
return result
if not is_dtype_equal(self.dtype, other.dtype):
this = self.astype('O')
other = other.astype('O')
return this.join(other, how=how, return_indexers=return_indexers)
_validate_join_method(how)
if not self.is_unique and not other.is_unique:
return self._join_non_unique(other, how=how,
return_indexers=return_indexers)
elif not self.is_unique or not other.is_unique:
if self.is_monotonic and other.is_monotonic:
return self._join_monotonic(other, how=how,
return_indexers=return_indexers)
else:
return self._join_non_unique(other, how=how,
return_indexers=return_indexers)
elif self.is_monotonic and other.is_monotonic:
try:
return self._join_monotonic(other, how=how,
return_indexers=return_indexers)
except TypeError:
pass
if how == 'left':
join_index = self
elif how == 'right':
join_index = other
elif how == 'inner':
join_index = self.intersection(other)
elif how == 'outer':
join_index = self.union(other)
if sort:
join_index = join_index.sort_values()
if return_indexers:
if join_index is self:
lindexer = None
else:
lindexer = self.get_indexer(join_index)
if join_index is other:
rindexer = None
else:
rindexer = other.get_indexer(join_index)
return join_index, lindexer, rindexer
else:
return join_index
def _join_multi(self, other, how, return_indexers=True):
from .multi import MultiIndex
self_is_mi = isinstance(self, MultiIndex)
other_is_mi = isinstance(other, MultiIndex)
# figure out join names
self_names = com._not_none(*self.names)
other_names = com._not_none(*other.names)
overlap = list(set(self_names) & set(other_names))
# need at least 1 in common, but not more than 1
if not len(overlap):
raise ValueError("cannot join with no level specified and no "
"overlapping names")
if len(overlap) > 1:
raise NotImplementedError("merging with more than one level "
"overlap on a multi-index is not "
"implemented")
jl = overlap[0]
# make the indices into mi's that match
if not (self_is_mi and other_is_mi):
flip_order = False
if self_is_mi:
self, other = other, self
flip_order = True
# flip if join method is right or left
how = {'right': 'left', 'left': 'right'}.get(how, how)
level = other.names.index(jl)
result = self._join_level(other, level, how=how,
return_indexers=return_indexers)
if flip_order:
if isinstance(result, tuple):
return result[0], result[2], result[1]
return result
# 2 multi-indexes
raise NotImplementedError("merging with both multi-indexes is not "
"implemented")
def _join_non_unique(self, other, how='left', return_indexers=False):
from pandas.core.reshape.merge import _get_join_indexers
left_idx, right_idx = _get_join_indexers([self._ndarray_values],
[other._ndarray_values],
how=how,
sort=True)
left_idx = _ensure_platform_int(left_idx)
right_idx = _ensure_platform_int(right_idx)
join_index = np.asarray(self._ndarray_values.take(left_idx))
mask = left_idx == -1
np.putmask(join_index, mask, other._ndarray_values.take(right_idx))
join_index = self._wrap_joined_index(join_index, other)
if return_indexers:
return join_index, left_idx, right_idx
else:
return join_index
def _join_level(self, other, level, how='left', return_indexers=False,
keep_order=True):
"""
The join method *only* affects the level of the resulting
MultiIndex. Otherwise it just exactly aligns the Index data to the
labels of the level in the MultiIndex. If `keep_order` == True, the
order of the data indexed by the MultiIndex will not be changed;
otherwise, it will tie out with `other`.
"""
from .multi import MultiIndex
def _get_leaf_sorter(labels):
"""
returns sorter for the inner most level while preserving the
order of higher levels
"""
if labels[0].size == 0:
return np.empty(0, dtype='int64')
if len(labels) == 1:
lab = _ensure_int64(labels[0])
sorter, _ = libalgos.groupsort_indexer(lab, 1 + lab.max())
return sorter
# find indexers of beginning of each set of
# same-key labels w.r.t all but last level
tic = labels[0][:-1] != labels[0][1:]
for lab in labels[1:-1]:
tic |= lab[:-1] != lab[1:]
starts = np.hstack(([True], tic, [True])).nonzero()[0]
lab = _ensure_int64(labels[-1])
return lib.get_level_sorter(lab, _ensure_int64(starts))
if isinstance(self, MultiIndex) and isinstance(other, MultiIndex):
raise TypeError('Join on level between two MultiIndex objects '
'is ambiguous')
left, right = self, other
flip_order = not isinstance(self, MultiIndex)
if flip_order:
left, right = right, left
how = {'right': 'left', 'left': 'right'}.get(how, how)
level = left._get_level_number(level)
old_level = left.levels[level]
if not right.is_unique:
raise NotImplementedError('Index._join_level on non-unique index '
'is not implemented')
new_level, left_lev_indexer, right_lev_indexer = \
old_level.join(right, how=how, return_indexers=True)
if left_lev_indexer is None:
if keep_order or len(left) == 0:
left_indexer = None
join_index = left
else: # sort the leaves
left_indexer = _get_leaf_sorter(left.labels[:level + 1])
join_index = left[left_indexer]
else:
left_lev_indexer = _ensure_int64(left_lev_indexer)
rev_indexer = lib.get_reverse_indexer(left_lev_indexer,
len(old_level))
new_lev_labels = algos.take_nd(rev_indexer, left.labels[level],
allow_fill=False)
new_labels = list(left.labels)
new_labels[level] = new_lev_labels
new_levels = list(left.levels)
new_levels[level] = new_level
if keep_order: # just drop missing values. o.w. keep order
left_indexer = np.arange(len(left), dtype=np.intp)
mask = new_lev_labels != -1
if not mask.all():
new_labels = [lab[mask] for lab in new_labels]
left_indexer = left_indexer[mask]
else: # tie out the order with other
if level == 0: # outer most level, take the fast route
ngroups = 1 + new_lev_labels.max()
left_indexer, counts = libalgos.groupsort_indexer(
new_lev_labels, ngroups)
# missing values are placed first; drop them!
left_indexer = left_indexer[counts[0]:]
new_labels = [lab[left_indexer] for lab in new_labels]
else: # sort the leaves
mask = new_lev_labels != -1
mask_all = mask.all()
if not mask_all:
new_labels = [lab[mask] for lab in new_labels]
left_indexer = _get_leaf_sorter(new_labels[:level + 1])
new_labels = [lab[left_indexer] for lab in new_labels]
# left_indexers are w.r.t masked frame.
# reverse to original frame!
if not mask_all:
left_indexer = mask.nonzero()[0][left_indexer]
join_index = MultiIndex(levels=new_levels, labels=new_labels,
names=left.names, verify_integrity=False)
if right_lev_indexer is not None:
right_indexer = algos.take_nd(right_lev_indexer,
join_index.labels[level],
allow_fill=False)
else:
right_indexer = join_index.labels[level]
if flip_order:
left_indexer, right_indexer = right_indexer, left_indexer
if return_indexers:
left_indexer = (None if left_indexer is None
else _ensure_platform_int(left_indexer))
right_indexer = (None if right_indexer is None
else _ensure_platform_int(right_indexer))
return join_index, left_indexer, right_indexer
else:
return join_index
def _join_monotonic(self, other, how='left', return_indexers=False):
if self.equals(other):
ret_index = other if how == 'right' else self
if return_indexers:
return ret_index, None, None
else:
return ret_index
sv = self._ndarray_values
ov = other._ndarray_values
if self.is_unique and other.is_unique:
# We can perform much better than the general case
if how == 'left':
join_index = self
lidx = None
ridx = self._left_indexer_unique(sv, ov)
elif how == 'right':
join_index = other
lidx = self._left_indexer_unique(ov, sv)
ridx = None
elif how == 'inner':
join_index, lidx, ridx = self._inner_indexer(sv, ov)
join_index = self._wrap_joined_index(join_index, other)
elif how == 'outer':
join_index, lidx, ridx = self._outer_indexer(sv, ov)
join_index = self._wrap_joined_index(join_index, other)
else:
if how == 'left':
join_index, lidx, ridx = self._left_indexer(sv, ov)
elif how == 'right':
join_index, ridx, lidx = self._left_indexer(ov, sv)
elif how == 'inner':
join_index, lidx, ridx = self._inner_indexer(sv, ov)
elif how == 'outer':
join_index, lidx, ridx = self._outer_indexer(sv, ov)
join_index = self._wrap_joined_index(join_index, other)
if return_indexers:
lidx = None if lidx is None else _ensure_platform_int(lidx)
ridx = None if ridx is None else _ensure_platform_int(ridx)
return join_index, lidx, ridx
else:
return join_index
def _wrap_joined_index(self, joined, other):
name = self.name if self.name == other.name else None
return Index(joined, name=name)
def _get_string_slice(self, key, use_lhs=True, use_rhs=True):
# this is for partial string indexing,
# overridden in DatetimeIndex, TimedeltaIndex and PeriodIndex
raise NotImplementedError
def slice_indexer(self, start=None, end=None, step=None, kind=None):
"""
For an ordered or unique index, compute the slice indexer for input
labels and step.
Parameters
----------
start : label, default None
If None, defaults to the beginning
end : label, default None
If None, defaults to the end
step : int, default None
kind : string, default None
Returns
-------
indexer : slice
Raises
------
KeyError : If key does not exist, or key is not unique and index is
not ordered.
Notes
-----
This function assumes that the data is sorted, so use at your own peril
Examples
---------
This is a method on all index types. For example you can do:
>>> idx = pd.Index(list('abcd'))
>>> idx.slice_indexer(start='b', end='c')
slice(1, 3)
>>> idx = pd.MultiIndex.from_arrays([list('abcd'), list('efgh')])
>>> idx.slice_indexer(start='b', end=('c', 'g'))
slice(1, 3)
"""
start_slice, end_slice = self.slice_locs(start, end, step=step,
kind=kind)
# return a slice
if not is_scalar(start_slice):
raise AssertionError("Start slice bound is non-scalar")
if not is_scalar(end_slice):
raise AssertionError("End slice bound is non-scalar")
return slice(start_slice, end_slice, step)
def _maybe_cast_indexer(self, key):
"""
If we have a float key and are not a floating index
then try to cast to an int if equivalent
"""
if is_float(key) and not self.is_floating():
try:
ckey = int(key)
if ckey == key:
key = ckey
except (OverflowError, ValueError, TypeError):
pass
return key
def _validate_indexer(self, form, key, kind):
"""
if we are positional indexer
validate that we have appropriate typed bounds
must be an integer
"""
assert kind in ['ix', 'loc', 'getitem', 'iloc']
if key is None:
pass
elif is_integer(key):
pass
elif kind in ['iloc', 'getitem']:
self._invalid_indexer(form, key)
return key
_index_shared_docs['_maybe_cast_slice_bound'] = """
This function should be overloaded in subclasses that allow non-trivial
casting on label-slice bounds, e.g. datetime-like indices allowing
strings containing formatted datetimes.
Parameters
----------
label : object
side : {'left', 'right'}
kind : {'ix', 'loc', 'getitem'}
Returns
-------
label : object
Notes
-----
Value of `side` parameter should be validated in caller.
"""
@Appender(_index_shared_docs['_maybe_cast_slice_bound'])
def _maybe_cast_slice_bound(self, label, side, kind):
assert kind in ['ix', 'loc', 'getitem', None]
# We are a plain index here (sub-class override this method if they
# wish to have special treatment for floats/ints, e.g. Float64Index and
# datetimelike Indexes
# reject them
if is_float(label):
if not (kind in ['ix'] and (self.holds_integer() or
self.is_floating())):
self._invalid_indexer('slice', label)
# we are trying to find integer bounds on a non-integer based index
# this is rejected (generally .loc gets you here)
elif is_integer(label):
self._invalid_indexer('slice', label)
return label
def _searchsorted_monotonic(self, label, side='left'):
if self.is_monotonic_increasing:
return self.searchsorted(label, side=side)
elif self.is_monotonic_decreasing:
# np.searchsorted expects ascending sort order, have to reverse
# everything for it to work (element ordering, search side and
# resulting value).
pos = self[::-1].searchsorted(label, side='right' if side == 'left'
else 'left')
return len(self) - pos
raise ValueError('index must be monotonic increasing or decreasing')
def _get_loc_only_exact_matches(self, key):
"""
This is overridden on subclasses (namely, IntervalIndex) to control
get_slice_bound.
"""
return self.get_loc(key)
def get_slice_bound(self, label, side, kind):
"""
Calculate slice bound that corresponds to given label.
Returns leftmost (one-past-the-rightmost if ``side=='right'``) position
of given label.
Parameters
----------
label : object
side : {'left', 'right'}
kind : {'ix', 'loc', 'getitem'}
"""
assert kind in ['ix', 'loc', 'getitem', None]
if side not in ('left', 'right'):
raise ValueError("Invalid value for side kwarg,"
" must be either 'left' or 'right': %s" %
(side, ))
original_label = label
# For datetime indices label may be a string that has to be converted
# to datetime boundary according to its resolution.
label = self._maybe_cast_slice_bound(label, side, kind)
# we need to look up the label
try:
slc = self._get_loc_only_exact_matches(label)
except KeyError as err:
try:
return self._searchsorted_monotonic(label, side)
except ValueError:
# raise the original KeyError
raise err
if isinstance(slc, np.ndarray):
# get_loc may return a boolean array or an array of indices, which
# is OK as long as they are representable by a slice.
if is_bool_dtype(slc):
slc = lib.maybe_booleans_to_slice(slc.view('u1'))
else:
slc = lib.maybe_indices_to_slice(slc.astype('i8'), len(self))
if isinstance(slc, np.ndarray):
raise KeyError("Cannot get %s slice bound for non-unique "
"label: %r" % (side, original_label))
if isinstance(slc, slice):
if side == 'left':
return slc.start
else:
return slc.stop
else:
if side == 'right':
return slc + 1
else:
return slc
def slice_locs(self, start=None, end=None, step=None, kind=None):
"""
Compute slice locations for input labels.
Parameters
----------
start : label, default None
If None, defaults to the beginning
end : label, default None
If None, defaults to the end
step : int, defaults None
If None, defaults to 1
kind : {'ix', 'loc', 'getitem'} or None
Returns
-------
start, end : int
Notes
-----
This method only works if the index is monotonic or unique.
Examples
---------
>>> idx = pd.Index(list('abcd'))
>>> idx.slice_locs(start='b', end='c')
(1, 3)
See Also
--------
Index.get_loc : Get location for a single label
"""
inc = (step is None or step >= 0)
if not inc:
# If it's a reverse slice, temporarily swap bounds.
start, end = end, start
start_slice = None
if start is not None:
start_slice = self.get_slice_bound(start, 'left', kind)
if start_slice is None:
start_slice = 0
end_slice = None
if end is not None:
end_slice = self.get_slice_bound(end, 'right', kind)
if end_slice is None:
end_slice = len(self)
if not inc:
# Bounds at this moment are swapped, swap them back and shift by 1.
#
# slice_locs('B', 'A', step=-1): s='B', e='A'
#
# s='A' e='B'
# AFTER SWAP: | |
# v ------------------> V
# -----------------------------------
# | | |A|A|A|A| | | | | |B|B| | | | |
# -----------------------------------
# ^ <------------------ ^
# SHOULD BE: | |
# end=s-1 start=e-1
#
end_slice, start_slice = start_slice - 1, end_slice - 1
# i == -1 triggers ``len(self) + i`` selection that points to the
# last element, not before-the-first one, subtracting len(self)
# compensates that.
if end_slice == -1:
end_slice -= len(self)
if start_slice == -1:
start_slice -= len(self)
return start_slice, end_slice
def delete(self, loc):
"""
Make new Index with passed location(-s) deleted
Returns
-------
new_index : Index
"""
return self._shallow_copy(np.delete(self._data, loc))
def insert(self, loc, item):
"""
Make new Index inserting new item at location. Follows
Python list.append semantics for negative values
Parameters
----------
loc : int
item : object
Returns
-------
new_index : Index
"""
if is_scalar(item) and isna(item):
# GH 18295
item = self._na_value
_self = np.asarray(self)
item = self._coerce_scalar_to_index(item)._ndarray_values
idx = np.concatenate((_self[:loc], item, _self[loc:]))
return self._shallow_copy_with_infer(idx)
def drop(self, labels, errors='raise'):
"""
Make new Index with passed list of labels deleted
Parameters
----------
labels : array-like
errors : {'ignore', 'raise'}, default 'raise'
If 'ignore', suppress error and existing labels are dropped.
Returns
-------
dropped : Index
Raises
------
KeyError
If none of the labels are found in the selected axis
"""
arr_dtype = 'object' if self.dtype == 'object' else None
labels = com._index_labels_to_array(labels, dtype=arr_dtype)
indexer = self.get_indexer(labels)
mask = indexer == -1
if mask.any():
if errors != 'ignore':
raise KeyError(
'labels %s not contained in axis' % labels[mask])
indexer = indexer[~mask]
return self.delete(indexer)
_index_shared_docs['index_unique'] = (
"""
Return unique values in the index. Uniques are returned in order
of appearance, this does NOT sort.
Parameters
----------
level : int or str, optional, default None
Only return values from specified level (for MultiIndex)
.. versionadded:: 0.23.0
Returns
-------
Index without duplicates
See Also
--------
unique
Series.unique
""")
@Appender(_index_shared_docs['index_unique'] % _index_doc_kwargs)
def unique(self, level=None):
if level is not None:
self._validate_index_level(level)
result = super(Index, self).unique()
return self._shallow_copy(result)
def drop_duplicates(self, keep='first'):
"""
Return Index with duplicate values removed.
Parameters
----------
keep : {'first', 'last', ``False``}, default 'first'
- 'first' : Drop duplicates except for the first occurrence.
- 'last' : Drop duplicates except for the last occurrence.
- ``False`` : Drop all duplicates.
Returns
-------
deduplicated : Index
See Also
--------
Series.drop_duplicates : equivalent method on Series
DataFrame.drop_duplicates : equivalent method on DataFrame
Index.duplicated : related method on Index, indicating duplicate
Index values.
Examples
--------
Generate an pandas.Index with duplicate values.
>>> idx = pd.Index(['lama', 'cow', 'lama', 'beetle', 'lama', 'hippo'])
The `keep` parameter controls which duplicate values are removed.
The value 'first' keeps the first occurrence for each
set of duplicated entries. The default value of keep is 'first'.
>>> idx.drop_duplicates(keep='first')
Index(['lama', 'cow', 'beetle', 'hippo'], dtype='object')
The value 'last' keeps the last occurrence for each set of duplicated
entries.
>>> idx.drop_duplicates(keep='last')
Index(['cow', 'beetle', 'lama', 'hippo'], dtype='object')
The value ``False`` discards all sets of duplicated entries.
>>> idx.drop_duplicates(keep=False)
Index(['cow', 'beetle', 'hippo'], dtype='object')
"""
return super(Index, self).drop_duplicates(keep=keep)
def duplicated(self, keep='first'):
"""
Indicate duplicate index values.
Duplicated values are indicated as ``True`` values in the resulting
array. Either all duplicates, all except the first, or all except the
last occurrence of duplicates can be indicated.
Parameters
----------
keep : {'first', 'last', False}, default 'first'
The value or values in a set of duplicates to mark as missing.
- 'first' : Mark duplicates as ``True`` except for the first
occurrence.
- 'last' : Mark duplicates as ``True`` except for the last
occurrence.
- ``False`` : Mark all duplicates as ``True``.
Examples
--------
By default, for each set of duplicated values, the first occurrence is
set to False and all others to True:
>>> idx = pd.Index(['lama', 'cow', 'lama', 'beetle', 'lama'])
>>> idx.duplicated()
array([False, False, True, False, True])
which is equivalent to
>>> idx.duplicated(keep='first')
array([False, False, True, False, True])
By using 'last', the last occurrence of each set of duplicated values
is set on False and all others on True:
>>> idx.duplicated(keep='last')
array([ True, False, True, False, False])
By setting keep on ``False``, all duplicates are True:
>>> idx.duplicated(keep=False)
array([ True, False, True, False, True])
Returns
-------
numpy.ndarray
See Also
--------
pandas.Series.duplicated : Equivalent method on pandas.Series
pandas.DataFrame.duplicated : Equivalent method on pandas.DataFrame
pandas.Index.drop_duplicates : Remove duplicate values from Index
"""
return super(Index, self).duplicated(keep=keep)
_index_shared_docs['fillna'] = """
Fill NA/NaN values with the specified value
Parameters
----------
value : scalar
Scalar value to use to fill holes (e.g. 0).
This value cannot be a list-likes.
downcast : dict, default is None
a dict of item->dtype of what to downcast if possible,
or the string 'infer' which will try to downcast to an appropriate
equal type (e.g. float64 to int64 if possible)
Returns
-------
filled : %(klass)s
"""
@Appender(_index_shared_docs['fillna'])
def fillna(self, value=None, downcast=None):
self._assert_can_do_op(value)
if self.hasnans:
result = self.putmask(self._isnan, value)
if downcast is None:
# no need to care metadata other than name
# because it can't have freq if
return Index(result, name=self.name)
return self._shallow_copy()
_index_shared_docs['dropna'] = """
Return Index without NA/NaN values
Parameters
----------
how : {'any', 'all'}, default 'any'
If the Index is a MultiIndex, drop the value when any or all levels
are NaN.
Returns
-------
valid : Index
"""
@Appender(_index_shared_docs['dropna'])
def dropna(self, how='any'):
if how not in ('any', 'all'):
raise ValueError("invalid how option: {0}".format(how))
if self.hasnans:
return self._shallow_copy(self.values[~self._isnan])
return self._shallow_copy()
def _evaluate_with_timedelta_like(self, other, op):
# Timedelta knows how to operate with np.array, so dispatch to that
# operation and then wrap the results
other = Timedelta(other)
values = self.values
with np.errstate(all='ignore'):
result = op(values, other)
attrs = self._get_attributes_dict()
attrs = self._maybe_update_attributes(attrs)
if op == divmod:
return Index(result[0], **attrs), Index(result[1], **attrs)
return Index(result, **attrs)
def _evaluate_with_datetime_like(self, other, op):
raise TypeError("can only perform ops with datetime like values")
def _evaluate_compare(self, other, op):
raise com.AbstractMethodError(self)
@classmethod
def _add_comparison_methods(cls):
""" add in comparison methods """
cls.__eq__ = _make_comparison_op(operator.eq, cls)
cls.__ne__ = _make_comparison_op(operator.ne, cls)
cls.__lt__ = _make_comparison_op(operator.lt, cls)
cls.__gt__ = _make_comparison_op(operator.gt, cls)
cls.__le__ = _make_comparison_op(operator.le, cls)
cls.__ge__ = _make_comparison_op(operator.ge, cls)
@classmethod
def _add_numeric_methods_add_sub_disabled(cls):
""" add in the numeric add/sub methods to disable """
cls.__add__ = make_invalid_op('__add__')
cls.__radd__ = make_invalid_op('__radd__')
cls.__iadd__ = make_invalid_op('__iadd__')
cls.__sub__ = make_invalid_op('__sub__')
cls.__rsub__ = make_invalid_op('__rsub__')
cls.__isub__ = make_invalid_op('__isub__')
@classmethod
def _add_numeric_methods_disabled(cls):
""" add in numeric methods to disable other than add/sub """
cls.__pow__ = make_invalid_op('__pow__')
cls.__rpow__ = make_invalid_op('__rpow__')
cls.__mul__ = make_invalid_op('__mul__')
cls.__rmul__ = make_invalid_op('__rmul__')
cls.__floordiv__ = make_invalid_op('__floordiv__')
cls.__rfloordiv__ = make_invalid_op('__rfloordiv__')
cls.__truediv__ = make_invalid_op('__truediv__')
cls.__rtruediv__ = make_invalid_op('__rtruediv__')
if not compat.PY3:
cls.__div__ = | make_invalid_op('__div__') | pandas.core.ops.make_invalid_op |
import numpy as np
import os
import pandas as pd
# import eia
from datetime import datetime
import pytz
import json
from os.path import join
import zipfile
import requests
import logging
from electricitylci.globals import data_dir, output_dir
from electricitylci.bulk_eia_data import download_EBA, row_to_df, ba_exchange_to_df
from electricitylci.model_config import model_specs
import electricitylci.eia923_generation as eia923
import electricitylci.eia860_facilities as eia860
from electricitylci.process_dictionary_writer import *
"""
Merge generation and emissions data. Add region designations using either
eGRID or EIA-860. Same for primary fuel by plant (eGRID or 923). Calculate
and merge in the total generation by region. Create the column "Subregion"
to hold regional name info. Remove electricity flows. Rename flows and add
UUIDs according to the federal flow list.
Parameters
----------
year : int
Specified year to pull transaction data between balancing authorities
subregion : str
Description of a group of regions. Options include 'FERC' for all FERC
market regions, 'BA' for all balancing authorities.
Returns
-------
Dictionary of dataframes with import region, export region, transaction amount, total
imports for import region, and fraction of total. The dictionary keys
are the level of aggregation: "BA", "FERC", "US".
Sample output:
ferc_final_trade.head()
import ferc region export ferc region value total fraction
0 CAISO CAISO 2.662827e+08 3.225829e+08 0.825471
1 CAISO Canada 1.119572e+06 3.225829e+08 0.003471
2 CAISO ERCOT 0.000000e+00 3.225829e+08 0.000000
3 CAISO ISO-NE 0.000000e+00 3.225829e+08 0.000000
4 CAISO MISO 0.000000e+00 3.225829e+08 0.000000
"""
def ba_io_trading_model(year=None, subregion=None, regions_to_keep=None):
REGION_NAMES = [
'California', 'Carolinas', 'Central',
'Electric Reliability Council of Texas, Inc.', 'Florida',
'Mid-Atlantic', 'Midwest', 'New England ISO',
'New York Independent System Operator', 'Northwest', 'Southeast',
'Southwest', 'Tennessee Valley Authority'
]
REGION_ACRONYMS = [
'TVA', 'MIDA', 'CAL', 'CAR', 'CENT', 'ERCO', 'FLA',
'MIDW', 'ISNE', 'NYIS', 'NW', 'SE', 'SW',
]
if year is None:
year = model_specs.NETL_IO_trading_year
if subregion is None:
subregion = model_specs.regional_aggregation
if subregion not in ['BA', 'FERC','US']:
raise ValueError(
f'subregion or regional_aggregation must have a value of "BA" or "FERC" '
f'when calculating trading with input-output, not {subregion}'
)
# Read in BAA file which contains the names and abbreviations
df_BA = pd.read_excel(data_dir + '/BA_Codes_930.xlsx', sheet_name = 'US', header = 4)
df_BA.rename(columns={'etag ID': 'BA_Acronym', 'Entity Name': 'BA_Name','NCR_ID#': 'NRC_ID', 'Region': 'Region'}, inplace=True)
BA = pd.np.array(df_BA['BA_Acronym'])
US_BA_acronyms = df_BA['BA_Acronym'].tolist()
# Read in BAA file which contains the names and abbreviations
# Original df_BAA does not include the Canadian balancing authorities
# Import them here, then concatenate to make a single df_BAA_NA (North America)
df_BA_CA = pd.read_excel(data_dir + '/BA_Codes_930.xlsx', sheet_name = 'Canada', header = 4)
df_BA_CA.rename(columns={'etag ID': 'BA_Acronym', 'Entity Name': 'BA_Name','NCR_ID#': 'NRC_ID', 'Region': 'Region'}, inplace=True)
df_BA_NA = pd.concat([df_BA, df_BA_CA])
ferc_list = df_BA_NA['FERC_Region_Abbr'].unique().tolist()
# Read in the bulk data
# download_EBA()
path = join(data_dir, 'bulk_data', 'EBA.zip')
NET_GEN_ROWS = []
BA_TO_BA_ROWS = []
DEMAND_ROWS=[]
TOTAL_INTERCHANGE_ROWS=[]
try:
logging.info("Using existing bulk data download")
z = zipfile.ZipFile(path, 'r')
except FileNotFoundError:
logging.info("Downloading new bulk data")
download_EBA()
z = zipfile.ZipFile(path, 'r')
logging.info("Loading bulk data to json")
with z.open('EBA.txt') as f:
for line in f:
# All but one BA is currently reporting net generation in UTC and local time
# for that one BA (GRMA) only UTC time is reported - so only pulling that
# for now.
if b'EBA.NG.H' in line and b'EBA.NG.HL' not in line:
NET_GEN_ROWS.append(json.loads(line))
# Similarly there are 5 interchanges that report interchange in UTC but not in
# local time.
elif b'EBA.ID.H' in line and b'EBA.ID.HL' not in line:
exchange_line=json.loads(line)
if exchange_line['series_id'].split('-')[0][4:] not in REGION_ACRONYMS:
# try:
# Adding this check here to hopefully save some time down the road.
# dummy_date=datetime.strptime(exchange_line['data'][0][0],'%Y%m%dT%HZ')
BA_TO_BA_ROWS.append(exchange_line)
# good_date_count+=1
# except ValueError:
# bad_date_count+=1
# continue
# Keeping these here just in case
elif b'EBA.D.H' in line and b'EBA.D.HL' not in line:
DEMAND_ROWS.append(json.loads(line))
# elif b'EBA.TI.H' in line:
# TOTAL_INTERCHANGE_ROWS.append(json.loads(line))
logging.info(f"Net gen rows: {len(NET_GEN_ROWS)}; BA to BA rows:{len(BA_TO_BA_ROWS)}; Demand rows:{len(DEMAND_ROWS)}")
eia923_gen=eia923.build_generation_data(generation_years=[year])
eia860_df=eia860.eia860_balancing_authority(year)
eia860_df["Plant Id"]=eia860_df["Plant Id"].astype(int)
eia_combined_df=eia923_gen.merge(eia860_df,
left_on=["FacilityID"],
right_on=["Plant Id"],
how="left")
eia_gen_ba=eia_combined_df.groupby(by=["Balancing Authority Code"],as_index=False)["Electricity"].sum()
# Subset for specified eia_gen_year
start_datetime = '{}-01-01 00:00:00+00:00'.format(year)
end_datetime = '{}-12-31 23:00:00+00:00'.format(year)
start_datetime = datetime.strptime(start_datetime, '%Y-%m-%d %H:%M:%S%z')
end_datetime = datetime.strptime(end_datetime, '%Y-%m-%d %H:%M:%S%z')
# Net Generation Data Import
logging.info("Generating df with datetime")
df_net_gen = row_to_df(NET_GEN_ROWS, 'net_gen')
del(NET_GEN_ROWS)
logging.info("Pivoting")
df_net_gen = df_net_gen.pivot(index = 'datetime', columns = 'region', values = 'net_gen')
ba_cols = US_BA_acronyms
gen_cols = list(df_net_gen.columns.values)
gen_cols_set = set(gen_cols)
ba_ref_set = set(ba_cols)
col_diff = list(ba_ref_set - gen_cols_set)
col_diff.sort(key = str.upper)
logging.info("Cleaning net_gen dataframe")
# Add in missing columns, then sort in alphabetical order
for i in col_diff:
df_net_gen[i] = 0
# Keep only the columns that match the balancing authority names, there are several other columns included in the dataset
# that represent states (e.g., TEX, NY, FL) and other areas (US48)
df_net_gen = df_net_gen[ba_cols]
# Resort columns so the headers are in alpha order
df_net_gen = df_net_gen.sort_index(axis=1)
df_net_gen = df_net_gen.fillna(value = 0)
df_net_gen = df_net_gen.loc[start_datetime:end_datetime]
# Sum values in each column
df_net_gen_sum = df_net_gen.sum(axis = 0).to_frame()
logging.info("Reading canadian import data")
# Add Canadian import data to the net generation dataset, concatenate and put in alpha order
df_CA_Imports_Gen = pd.read_csv(data_dir + '/CA_Imports_Gen.csv', index_col = 0)
df_CA_Imports_Gen = df_CA_Imports_Gen[str(year)]
logging.info("Combining US and Canadian net gen data")
df_net_gen_sum = pd.concat([df_net_gen_sum,df_CA_Imports_Gen]).sum(axis=1)
df_net_gen_sum = df_net_gen_sum.to_frame()
df_net_gen_sum = df_net_gen_sum.sort_index(axis=0)
# Check the net generation of each Balancing Authority against EIA 923 data.
# If the percent change of a given area is greater than the mean absolute difference
# of all of the areas, it will be treated as an error and replaced with the
# value in EIA923.
logging.info("Checking against EIA 923 generation data")
net_gen_check=df_net_gen_sum.merge(
right=eia_gen_ba,
left_index=True,
right_on=["Balancing Authority Code"],
how="left"
).reset_index()
net_gen_check["diff"]=abs(net_gen_check["Electricity"]-net_gen_check[0])/net_gen_check[0]
diff_mad=net_gen_check["diff"].mad()
net_gen_swap=net_gen_check.loc[net_gen_check["diff"]>diff_mad,["Balancing Authority Code","Electricity"]].set_index("Balancing Authority Code")
df_net_gen_sum.loc[net_gen_swap.index,[0]]=np.nan
net_gen_swap.rename(columns={"Electricity":0},inplace=True)
df_net_gen_sum=df_net_gen_sum.combine_first(net_gen_swap)
# First work on the trading data from the 'df_trade_all_stack_2016' frame
# This cell does the following:
# 1. reformats the data to an annual basis
# 2. formats the BA names in the corresponding columns
# 3. evalutes the trade values from both BA perspectives
# (e.g. BA1 as exporter and importer in a transaction with BA2)
# 4. evaluates the trading data for any results that don't make sense
# a. both BAs designate as importers (negative value)
# b. both BAs designate as exporters (postive value)
# c. one of the BAs in the transation reports a zero value and the other is nonzero
# 5. calulate the percent difference in the transaction values reports by BAs
# 6. final exchange value based on logic;
# a. if percent diff is less than 20%, take mean,
# b. if not use the value as reported by the exporting BAA
# c. designate each BA in the transaction either as the importer or exporter
# Output is a pivot with index (rows) representing exporting BAs,
# columns representing importing BAs, and values for the traded amount
# Group and resample trading data so that it is on an annual basis
logging.info("Creating trading dataframe")
df_ba_trade = ba_exchange_to_df(BA_TO_BA_ROWS, data_type='ba_to_ba')
del(BA_TO_BA_ROWS)
df_ba_trade = df_ba_trade.set_index('datetime')
df_ba_trade['transacting regions'] = df_ba_trade['from_region'] + '-' + df_ba_trade['to_region']
logging.info("Filtering trading dataframe")
# Keep only the columns that match the balancing authority names, there are several other columns included in the dataset
# that represent states (e.g., TEX, NY, FL) and other areas (US48)
filt1 = df_ba_trade['from_region'].isin(ba_cols)
filt2 = df_ba_trade['to_region'].isin(ba_cols)
filt = filt1 & filt2
df_ba_trade = df_ba_trade[filt]
# Subset for eia_gen_year, need to pivot first because of non-unique datetime index
df_ba_trade_pivot = df_ba_trade.pivot(columns = 'transacting regions', values = 'ba_to_ba')
df_ba_trade_pivot = df_ba_trade_pivot.loc[start_datetime:end_datetime]
# Sum columns - represents the net transactced amount between the two BAs
df_ba_trade_sum = df_ba_trade_pivot.sum(axis = 0).to_frame()
df_ba_trade_sum = df_ba_trade_sum.reset_index()
df_ba_trade_sum.columns = ['BAAs','Exchange']
# Split BAA string into exporting and importing BAA columns
df_ba_trade_sum['BAA1'], df_ba_trade_sum['BAA2'] = df_ba_trade_sum['BAAs'].str.split('-', 1).str
df_ba_trade_sum = df_ba_trade_sum.rename(columns={'BAAs': 'Transacting BAAs'})
# Create two perspectives - import and export to use for comparison in selection of the final exchange value between the BAAs
df_trade_sum_1_2 = df_ba_trade_sum.groupby(['BAA1', 'BAA2','Transacting BAAs'], as_index=False)[['Exchange']].sum()
df_trade_sum_2_1 = df_ba_trade_sum.groupby(['BAA2', 'BAA1', 'Transacting BAAs'], as_index=False)[['Exchange']].sum()
df_trade_sum_1_2.columns = ['BAA1_1_2', 'BAA2_1_2','Transacting BAAs_1_2', 'Exchange_1_2']
df_trade_sum_2_1.columns = ['BAA2_2_1', 'BAA1_2_1','Transacting BAAs_2_1', 'Exchange_2_1']
# Combine two grouped tables for comparison for exchange values
df_concat_trade = pd.concat([df_trade_sum_1_2,df_trade_sum_2_1], axis = 1)
df_concat_trade['Exchange_1_2_abs'] = df_concat_trade['Exchange_1_2'].abs()
df_concat_trade['Exchange_2_1_abs'] = df_concat_trade['Exchange_2_1'].abs()
# Create new column to check if BAAs designate as either both exporters or both importers
# or if one of the entities in the transaction reports a zero value
# Drop combinations where any of these conditions are true, keep everything else
df_concat_trade['Status_Check'] = np.where(((df_concat_trade['Exchange_1_2'] > 0) & (df_concat_trade['Exchange_2_1'] > 0)) \
|((df_concat_trade['Exchange_1_2'] < 0) & (df_concat_trade['Exchange_2_1'] < 0)) \
| ((df_concat_trade['Exchange_1_2'] == 0) | (df_concat_trade['Exchange_2_1'] == 0)), 'drop', 'keep')
# Calculate the difference in exchange values
df_concat_trade['Delta'] = df_concat_trade['Exchange_1_2_abs'] - df_concat_trade['Exchange_2_1_abs']
# Calculate percent diff of exchange_abs values - this can be down two ways:
# relative to 1_2 exchange or relative to 2_1 exchange - perform the calc both ways
# and take the average
df_concat_trade['Percent_Diff_Avg']= ((abs((df_concat_trade['Exchange_1_2_abs']/df_concat_trade['Exchange_2_1_abs'])-1)) \
+ (abs((df_concat_trade['Exchange_2_1_abs']/df_concat_trade['Exchange_1_2_abs'])-1)))/2
# Mean exchange value
df_concat_trade['Exchange_mean'] = df_concat_trade[['Exchange_1_2_abs', 'Exchange_2_1_abs']].mean(axis=1)
# Percent diff equations creats NaN where both values are 0, fill with 0
df_concat_trade['Percent_Diff_Avg'].fillna(0, inplace = True)
# Final exchange value based on logic; if percent diff is less than 20%, take mean,
# if not use the value as reported by the exporting BAA. First figure out which BAA is the exporter
# by checking the value of the Exchance_1_2
# If that value is positive, it indicates that BAA1 is exported to BAA2; if negative, use the
# value from Exchange_2_1
df_concat_trade['Final_Exchange'] = np.where((df_concat_trade['Percent_Diff_Avg'].abs() < 0.2),
df_concat_trade['Exchange_mean'],np.where((df_concat_trade['Exchange_1_2'] > 0),
df_concat_trade['Exchange_1_2'],df_concat_trade['Exchange_2_1']))
# Assign final designation of BAA as exporter or importer based on logical assignment
df_concat_trade['Export_BAA'] = np.where((df_concat_trade['Exchange_1_2'] > 0), df_concat_trade['BAA1_1_2'],
np.where((df_concat_trade['Exchange_1_2'] < 0), df_concat_trade['BAA2_1_2'],''))
df_concat_trade['Import_BAA'] = np.where((df_concat_trade['Exchange_1_2'] < 0), df_concat_trade['BAA1_1_2'],
np.where((df_concat_trade['Exchange_1_2'] > 0), df_concat_trade['BAA2_1_2'],''))
df_concat_trade = df_concat_trade[df_concat_trade['Status_Check'] == 'keep']
# Create the final trading matrix; first grab the necessary columns, rename the columns and then pivot
df_concat_trade_subset = df_concat_trade[['Export_BAA', 'Import_BAA', 'Final_Exchange']]
df_concat_trade_subset.columns = ['Exporting_BAA', 'Importing_BAA', 'Amount']
df_trade_pivot = df_concat_trade_subset.pivot_table(index = 'Exporting_BAA', columns = 'Importing_BAA', values = 'Amount').fillna(0)
# This cell continues formatting the df_trade
# Find missing BAs - need to add them in so that we have a square matrix
# Not all BAs are involved in transactions
trade_cols = list(df_trade_pivot.columns.values)
trade_rows = list(df_trade_pivot.index.values)
trade_cols_set = set(trade_cols)
trade_rows_set = set(trade_rows)
trade_ba_ref_set = set(ba_cols)
trade_col_diff = list(trade_ba_ref_set - trade_cols_set)
trade_col_diff.sort(key = str.upper)
trade_row_diff = list(trade_ba_ref_set - trade_rows_set)
trade_row_diff.sort(key=str.upper)
# Add in missing columns, then sort in alphabetical order
for i in trade_col_diff:
df_trade_pivot[i] = 0
df_trade_pivot = df_trade_pivot.sort_index(axis=1)
# Add in missing rows, then sort in alphabetical order
for i in trade_row_diff:
df_trade_pivot.loc[i,:] = 0
df_trade_pivot = df_trade_pivot.sort_index(axis=0)
# Add Canadian Imports to the trading matrix
# CA imports are specified in an external file
df_CA_Imports_Cols = pd.read_csv(data_dir + '/CA_Imports_Cols.csv', index_col = 0)
df_CA_Imports_Rows = pd.read_csv(data_dir + '/CA_Imports_Rows.csv', index_col = 0)
df_CA_Imports_Rows = df_CA_Imports_Rows[['us_ba', str(year)]]
df_CA_Imports_Rows = df_CA_Imports_Rows.pivot(columns = 'us_ba', values = str(year))
df_concat_trade_CA = | pd.concat([df_trade_pivot, df_CA_Imports_Rows]) | pandas.concat |
#!/usr/bin/python3
# -*- coding: utf-8 -*-
# *****************************************************************************/
# * Authors: <NAME>
# *****************************************************************************/
"""transformCSV.py
This module contains the basic functions for creating the content of a configuration file from CSV.
Args:
--inFile: Path for the configuration file where the time series data values CSV
--outFile: Path for the configuration file where the time series data values INI
--debug: Boolean flag to activate verbose printing for debug use
Example:
Default usage:
$ python transformCSV.py
Specific usage:
$ python transformCSV.py
--inFile C:\raad\src\software\time-series.csv
--outFile C:\raad\src\software\time-series.ini
--debug True
"""
import sys
import datetime
import optparse
import traceback
import pandas
import numpy
import os
import pprint
import csv
if sys.version_info.major > 2:
import configparser as cF
else:
import ConfigParser as cF
class TransformMetaData(object):
debug = False
fileName = None
fileLocation = None
columnsList = None
analysisFrameFormat = None
uniqueLists = None
analysisFrame = None
def __init__(self, inputFileName=None, debug=False, transform=False, sectionName=None, outFolder=None,
outFile='time-series-madness.ini'):
if isinstance(debug, bool):
self.debug = debug
if inputFileName is None:
return
elif os.path.exists(os.path.abspath(inputFileName)):
self.fileName = inputFileName
self.fileLocation = os.path.exists(os.path.abspath(inputFileName))
(analysisFrame, analysisFrameFormat, uniqueLists, columnNamesList) = self.CSVtoFrame(
inputFileName=self.fileName)
self.analysisFrame = analysisFrame
self.columnsList = columnNamesList
self.analysisFrameFormat = analysisFrameFormat
self.uniqueLists = uniqueLists
if transform:
passWrite = self.frameToINI(analysisFrame=analysisFrame, sectionName=sectionName, outFolder=outFolder,
outFile=outFile)
print(f"Pass Status is : {passWrite}")
return
def getColumnList(self):
return self.columnsList
def getAnalysisFrameFormat(self):
return self.analysisFrameFormat
def getuniqueLists(self):
return self.uniqueLists
def getAnalysisFrame(self):
return self.analysisFrame
@staticmethod
def getDateParser(formatString="%Y-%m-%d %H:%M:%S.%f"):
return (lambda x: pandas.datetime.strptime(x, formatString)) # 2020-06-09 19:14:00.000
def getHeaderFromFile(self, headerFilePath=None, method=1):
if headerFilePath is None:
return (None, None)
if method == 1:
fieldnames = pandas.read_csv(headerFilePath, index_col=0, nrows=0).columns.tolist()
elif method == 2:
with open(headerFilePath, 'r') as infile:
reader = csv.DictReader(infile)
fieldnames = list(reader.fieldnames)
elif method == 3:
fieldnames = list(pandas.read_csv(headerFilePath, nrows=1).columns)
else:
fieldnames = None
fieldDict = {}
for indexName, valueName in enumerate(fieldnames):
fieldDict[valueName] = pandas.StringDtype()
return (fieldnames, fieldDict)
def CSVtoFrame(self, inputFileName=None):
if inputFileName is None:
return (None, None)
# Load File
print("Processing File: {0}...\n".format(inputFileName))
self.fileLocation = inputFileName
# Create data frame
analysisFrame = pandas.DataFrame()
analysisFrameFormat = self._getDataFormat()
inputDataFrame = pandas.read_csv(filepath_or_buffer=inputFileName,
sep='\t',
names=self._getDataFormat(),
# dtype=self._getDataFormat()
# header=None
# float_precision='round_trip'
# engine='c',
# parse_dates=['date_column'],
# date_parser=True,
# na_values=['NULL']
)
if self.debug: # Preview data.
print(inputDataFrame.head(5))
# analysisFrame.astype(dtype=analysisFrameFormat)
# Cleanup data
analysisFrame = inputDataFrame.copy(deep=True)
analysisFrame.apply(pandas.to_numeric, errors='coerce') # Fill in bad data with Not-a-Number (NaN)
# Create lists of unique strings
uniqueLists = []
columnNamesList = []
for columnName in analysisFrame.columns:
if self.debug:
print('Column Name : ', columnName)
print('Column Contents : ', analysisFrame[columnName].values)
if isinstance(analysisFrame[columnName].dtypes, str):
columnUniqueList = analysisFrame[columnName].unique().tolist()
else:
columnUniqueList = None
columnNamesList.append(columnName)
uniqueLists.append([columnName, columnUniqueList])
if self.debug: # Preview data.
print(analysisFrame.head(5))
return (analysisFrame, analysisFrameFormat, uniqueLists, columnNamesList)
def frameToINI(self, analysisFrame=None, sectionName='Unknown', outFolder=None, outFile='nil.ini'):
if analysisFrame is None:
return False
try:
if outFolder is None:
outFolder = os.getcwd()
configFilePath = os.path.join(outFolder, outFile)
configINI = cF.ConfigParser()
configINI.add_section(sectionName)
for (columnName, columnData) in analysisFrame:
if self.debug:
print('Column Name : ', columnName)
print('Column Contents : ', columnData.values)
print("Column Contents Length:", len(columnData.values))
print("Column Contents Type", type(columnData.values))
writeList = "["
for colIndex, colValue in enumerate(columnData):
writeList = f"{writeList}'{colValue}'"
if colIndex < len(columnData) - 1:
writeList = f"{writeList}, "
writeList = f"{writeList}]"
configINI.set(sectionName, columnName, writeList)
if not os.path.exists(configFilePath) or os.stat(configFilePath).st_size == 0:
with open(configFilePath, 'w') as configWritingFile:
configINI.write(configWritingFile)
noErrors = True
except ValueError as e:
errorString = ("ERROR in {__file__} @{framePrintNo} with {ErrorFound}".format(__file__=str(__file__),
framePrintNo=str(
sys._getframe().f_lineno),
ErrorFound=e))
print(errorString)
noErrors = False
return noErrors
@staticmethod
def _validNumericalFloat(inValue):
"""
Determines if the value is a valid numerical object.
Args:
inValue: floating-point value
Returns: Value in floating-point or Not-A-Number.
"""
try:
return numpy.float128(inValue)
except ValueError:
return numpy.nan
@staticmethod
def _calculateMean(x):
"""
Calculates the mean in a multiplication method since division produces an infinity or NaN
Args:
x: Input data set. We use a data frame.
Returns: Calculated mean for a vector data frame.
"""
try:
mean = numpy.float128(numpy.average(x, weights=numpy.ones_like(numpy.float128(x)) / numpy.float128(x.size)))
except ValueError:
mean = 0
pass
return mean
def _calculateStd(self, data):
"""
Calculates the standard deviation in a multiplication method since division produces a infinity or NaN
Args:
data: Input data set. We use a data frame.
Returns: Calculated standard deviation for a vector data frame.
"""
sd = 0
try:
n = numpy.float128(data.size)
if n <= 1:
return numpy.float128(0.0)
# Use multiplication version of mean since numpy bug causes infinity.
mean = self._calculateMean(data)
sd = numpy.float128(mean)
# Calculate standard deviation
for el in data:
diff = numpy.float128(el) - numpy.float128(mean)
sd += (diff) ** 2
points = numpy.float128(n - 1)
sd = numpy.float128(numpy.sqrt(numpy.float128(sd) / numpy.float128(points)))
except ValueError:
pass
return sd
def _determineQuickStats(self, dataAnalysisFrame, columnName=None, multiplierSigma=3.0):
"""
Determines stats based on a vector to get the data shape.
Args:
dataAnalysisFrame: Dataframe to do analysis on.
columnName: Column name of the data frame.
multiplierSigma: Sigma range for the stats.
Returns: Set of stats.
"""
meanValue = 0
sigmaValue = 0
sigmaRangeValue = 0
topValue = 0
try:
# Clean out anomoly due to random invalid inputs.
if (columnName is not None):
meanValue = self._calculateMean(dataAnalysisFrame[columnName])
if meanValue == numpy.nan:
meanValue = numpy.float128(1)
sigmaValue = self._calculateStd(dataAnalysisFrame[columnName])
if float(sigmaValue) is float(numpy.nan):
sigmaValue = numpy.float128(1)
multiplier = numpy.float128(multiplierSigma) # Stats: 1 sigma = 68%, 2 sigma = 95%, 3 sigma = 99.7
sigmaRangeValue = (sigmaValue * multiplier)
if float(sigmaRangeValue) is float(numpy.nan):
sigmaRangeValue = numpy.float128(1)
topValue = numpy.float128(meanValue + sigmaRangeValue)
print("Name:{} Mean= {}, Sigma= {}, {}*Sigma= {}".format(columnName,
meanValue,
sigmaValue,
multiplier,
sigmaRangeValue))
except ValueError:
pass
return (meanValue, sigmaValue, sigmaRangeValue, topValue)
def _cleanZerosForColumnInFrame(self, dataAnalysisFrame, columnName='cycles'):
"""
Cleans the data frame with data values that are invalid. I.E. inf, NaN
Args:
dataAnalysisFrame: Dataframe to do analysis on.
columnName: Column name of the data frame.
Returns: Cleaned dataframe.
"""
dataAnalysisCleaned = None
try:
# Clean out anomoly due to random invalid inputs.
(meanValue, sigmaValue, sigmaRangeValue, topValue) = self._determineQuickStats(
dataAnalysisFrame=dataAnalysisFrame, columnName=columnName)
# dataAnalysisCleaned = dataAnalysisFrame[dataAnalysisFrame[columnName] != 0]
# When the cycles are negative or zero we missed cleaning up a row.
# logicVector = (dataAnalysisFrame[columnName] != 0)
# dataAnalysisCleaned = dataAnalysisFrame[logicVector]
logicVector = (dataAnalysisCleaned[columnName] >= 1)
dataAnalysisCleaned = dataAnalysisCleaned[logicVector]
# These timed out mean + 2 * sd
logicVector = (dataAnalysisCleaned[columnName] < topValue) # Data range
dataAnalysisCleaned = dataAnalysisCleaned[logicVector]
except ValueError:
pass
return dataAnalysisCleaned
def _cleanFrame(self, dataAnalysisTemp, cleanColumn=False, columnName='cycles'):
"""
Args:
dataAnalysisTemp: Dataframe to do analysis on.
cleanColumn: Flag to clean the data frame.
columnName: Column name of the data frame.
Returns: cleaned dataframe
"""
try:
replacementList = [pandas.NaT, numpy.Infinity, numpy.NINF, 'NaN', 'inf', '-inf', 'NULL']
if cleanColumn is True:
dataAnalysisTemp = self._cleanZerosForColumnInFrame(dataAnalysisTemp, columnName=columnName)
dataAnalysisTemp = dataAnalysisTemp.replace(to_replace=replacementList,
value=numpy.nan)
dataAnalysisTemp = dataAnalysisTemp.dropna()
except ValueError:
pass
return dataAnalysisTemp
@staticmethod
def _getDataFormat():
"""
Return the dataframe setup for the CSV file generated from server.
Returns: dictionary data format for pandas.
"""
dataFormat = {
"Serial_Number": pandas.StringDtype(),
"LogTime0": pandas.StringDtype(), # @todo force rename
"Id0": pandas.StringDtype(), # @todo force rename
"DriveId": pandas.StringDtype(),
"JobRunId": pandas.StringDtype(),
"LogTime1": pandas.StringDtype(), # @todo force rename
"Comment0": pandas.StringDtype(), # @todo force rename
"CriticalWarning": pandas.StringDtype(),
"Temperature": pandas.StringDtype(),
"AvailableSpare": pandas.StringDtype(),
"AvailableSpareThreshold": pandas.StringDtype(),
"PercentageUsed": pandas.StringDtype(),
"DataUnitsReadL": pandas.StringDtype(),
"DataUnitsReadU": pandas.StringDtype(),
"DataUnitsWrittenL": pandas.StringDtype(),
"DataUnitsWrittenU": pandas.StringDtype(),
"HostReadCommandsL": pandas.StringDtype(),
"HostReadCommandsU": pandas.StringDtype(),
"HostWriteCommandsL": pandas.StringDtype(),
"HostWriteCommandsU": pandas.StringDtype(),
"ControllerBusyTimeL": pandas.StringDtype(),
"ControllerBusyTimeU": pandas.StringDtype(),
"PowerCyclesL": pandas.StringDtype(),
"PowerCyclesU": pandas.StringDtype(),
"PowerOnHoursL": pandas.StringDtype(),
"PowerOnHoursU": pandas.StringDtype(),
"UnsafeShutdownsL": pandas.StringDtype(),
"UnsafeShutdownsU": pandas.StringDtype(),
"MediaErrorsL": pandas.StringDtype(),
"MediaErrorsU": pandas.StringDtype(),
"NumErrorInfoLogsL": pandas.StringDtype(),
"NumErrorInfoLogsU": pandas.StringDtype(),
"ProgramFailCountN": pandas.StringDtype(),
"ProgramFailCountR": pandas.StringDtype(),
"EraseFailCountN": pandas.StringDtype(),
"EraseFailCountR": pandas.StringDtype(),
"WearLevelingCountN": pandas.StringDtype(),
"WearLevelingCountR": pandas.StringDtype(),
"E2EErrorDetectCountN": pandas.StringDtype(),
"E2EErrorDetectCountR": pandas.StringDtype(),
"CRCErrorCountN": pandas.StringDtype(),
"CRCErrorCountR": pandas.StringDtype(),
"MediaWearPercentageN": pandas.StringDtype(),
"MediaWearPercentageR": pandas.StringDtype(),
"HostReadsN": pandas.StringDtype(),
"HostReadsR": pandas.StringDtype(),
"TimedWorkloadN": pandas.StringDtype(),
"TimedWorkloadR": pandas.StringDtype(),
"ThermalThrottleStatusN": pandas.StringDtype(),
"ThermalThrottleStatusR": pandas.StringDtype(),
"RetryBuffOverflowCountN": pandas.StringDtype(),
"RetryBuffOverflowCountR": pandas.StringDtype(),
"PLLLockLossCounterN": pandas.StringDtype(),
"PLLLockLossCounterR": pandas.StringDtype(),
"NandBytesWrittenN": pandas.StringDtype(),
"NandBytesWrittenR": pandas.StringDtype(),
"HostBytesWrittenN": pandas.StringDtype(),
"HostBytesWrittenR": pandas.StringDtype(),
"SystemAreaLifeRemainingN": pandas.StringDtype(),
"SystemAreaLifeRemainingR": pandas.StringDtype(),
"RelocatableSectorCountN": pandas.StringDtype(),
"RelocatableSectorCountR": pandas.StringDtype(),
"SoftECCErrorRateN": pandas.StringDtype(),
"SoftECCErrorRateR": pandas.StringDtype(),
"UnexpectedPowerLossN": pandas.StringDtype(),
"UnexpectedPowerLossR": pandas.StringDtype(),
"MediaErrorCountN": pandas.StringDtype(),
"MediaErrorCountR": pandas.StringDtype(),
"NandBytesReadN": pandas.StringDtype(),
"NandBytesReadR": pandas.StringDtype(),
"WarningCompTempTime": pandas.StringDtype(),
"CriticalCompTempTime": pandas.StringDtype(),
"TempSensor1": pandas.StringDtype(),
"TempSensor2": pandas.StringDtype(),
"TempSensor3": pandas.StringDtype(),
"TempSensor4": pandas.StringDtype(),
"TempSensor5": pandas.StringDtype(),
"TempSensor6": pandas.StringDtype(),
"TempSensor7": pandas.StringDtype(),
"TempSensor8": pandas.StringDtype(),
"ThermalManagementTemp1TransitionCount": pandas.StringDtype(),
"ThermalManagementTemp2TransitionCount": pandas.StringDtype(),
"TotalTimeForThermalManagementTemp1": pandas.StringDtype(),
"TotalTimeForThermalManagementTemp2": pandas.StringDtype(),
"Core_Num": pandas.StringDtype(),
"Id1": pandas.StringDtype(), # @todo force rename
"Job_Run_Id": pandas.StringDtype(),
"Stats_Time": pandas.StringDtype(),
"HostReads": pandas.StringDtype(),
"HostWrites": pandas.StringDtype(),
"NandReads": pandas.StringDtype(),
"NandWrites": pandas.StringDtype(),
"ProgramErrors": pandas.StringDtype(),
"EraseErrors": pandas.StringDtype(),
"ErrorCount": pandas.StringDtype(),
"BitErrorsHost1": pandas.StringDtype(),
"BitErrorsHost2": pandas.StringDtype(),
"BitErrorsHost3": pandas.StringDtype(),
"BitErrorsHost4": pandas.StringDtype(),
"BitErrorsHost5": pandas.StringDtype(),
"BitErrorsHost6": pandas.StringDtype(),
"BitErrorsHost7": pandas.StringDtype(),
"BitErrorsHost8": pandas.StringDtype(),
"BitErrorsHost9": pandas.StringDtype(),
"BitErrorsHost10": pandas.StringDtype(),
"BitErrorsHost11": pandas.StringDtype(),
"BitErrorsHost12": pandas.StringDtype(),
"BitErrorsHost13": pandas.StringDtype(),
"BitErrorsHost14": pandas.StringDtype(),
"BitErrorsHost15": pandas.StringDtype(),
"ECCFail": pandas.StringDtype(),
"GrownDefects": pandas.StringDtype(),
"FreeMemory": pandas.StringDtype(),
"WriteAllowance": pandas.StringDtype(),
"ModelString": pandas.StringDtype(),
"ValidBlocks": pandas.StringDtype(),
"TokenBlocks": pandas.StringDtype(),
"SpuriousPFCount": pandas.StringDtype(),
"SpuriousPFLocations1": pandas.StringDtype(),
"SpuriousPFLocations2": pandas.StringDtype(),
"SpuriousPFLocations3": pandas.StringDtype(),
"SpuriousPFLocations4": pandas.StringDtype(),
"SpuriousPFLocations5": pandas.StringDtype(),
"SpuriousPFLocations6": pandas.StringDtype(),
"SpuriousPFLocations7": pandas.StringDtype(),
"SpuriousPFLocations8": pandas.StringDtype(),
"BitErrorsNonHost1": pandas.StringDtype(),
"BitErrorsNonHost2": pandas.StringDtype(),
"BitErrorsNonHost3": pandas.StringDtype(),
"BitErrorsNonHost4": pandas.StringDtype(),
"BitErrorsNonHost5": pandas.StringDtype(),
"BitErrorsNonHost6": pandas.StringDtype(),
"BitErrorsNonHost7": pandas.StringDtype(),
"BitErrorsNonHost8": pandas.StringDtype(),
"BitErrorsNonHost9": pandas.StringDtype(),
"BitErrorsNonHost10": pandas.StringDtype(),
"BitErrorsNonHost11": pandas.StringDtype(),
"BitErrorsNonHost12": pandas.StringDtype(),
"BitErrorsNonHost13": pandas.StringDtype(),
"BitErrorsNonHost14": pandas.StringDtype(),
"BitErrorsNonHost15": pandas.StringDtype(),
"ECCFailNonHost": pandas.StringDtype(),
"NSversion": pandas.StringDtype(),
"numBands": pandas.StringDtype(),
"minErase": pandas.StringDtype(),
"maxErase": pandas.StringDtype(),
"avgErase": pandas.StringDtype(),
"minMVolt": pandas.StringDtype(),
"maxMVolt": pandas.StringDtype(),
"avgMVolt": pandas.StringDtype(),
"minMAmp": pandas.StringDtype(),
"maxMAmp": pandas.StringDtype(),
"avgMAmp": pandas.StringDtype(),
"comment1": pandas.StringDtype(), # @todo force rename
"minMVolt12v": pandas.StringDtype(),
"maxMVolt12v": pandas.StringDtype(),
"avgMVolt12v": pandas.StringDtype(),
"minMAmp12v": pandas.StringDtype(),
"maxMAmp12v": pandas.StringDtype(),
"avgMAmp12v": pandas.StringDtype(),
"nearMissSector": pandas.StringDtype(),
"nearMissDefect": pandas.StringDtype(),
"nearMissOverflow": pandas.StringDtype(),
"replayUNC": pandas.StringDtype(),
"Drive_Id": pandas.StringDtype(),
"indirectionMisses": pandas.StringDtype(),
"BitErrorsHost16": pandas.StringDtype(),
"BitErrorsHost17": pandas.StringDtype(),
"BitErrorsHost18": | pandas.StringDtype() | pandas.StringDtype |
'''
Preprocessing Tranformers Based on sci-kit's API
By <NAME>
Created on June 12, 2017
'''
import copy
import pandas as pd
import numpy as np
from sklearn.base import BaseEstimator, TransformerMixin
from pymo.rotation_tools import Rotation
class MocapParameterizer(BaseEstimator, TransformerMixin):
def __init__(self, param_type = 'euler'):
'''
param_type = {'euler', 'quat', 'expmap', 'position'}
'''
self.param_type = param_type
def fit(self, X, y=None):
return self
def transform(self, X, y=None):
if self.param_type == 'euler':
return X
elif self.param_type == 'expmap':
return self._to_expmap(X)
elif self.param_type == 'quat':
return X
elif self.param_type == 'position':
return self._to_pos(X)
elif self.param_type == 'axis_angle':
return self._to_axis_angle(X)
else:
raise UnsupportedParamError('Unsupported param: %s. Valid param types are: euler, quat, expmap, position' % self.param_type)
# return X
def inverse_transform(self, X, copy=None):
if self.param_type == 'euler':
return X
elif self.param_type == 'expmap':
return self._expmap_to_euler(X)
elif self.param_type == 'quat':
raise UnsupportedParamError('quat2euler is not supported')
elif self.param_type == 'position':
print('positions 2 eulers is not supported')
return X
else:
raise UnsupportedParamError('Unsupported param: %s. Valid param types are: euler, quat, expmap, position' % self.param_type)
def _to_pos(self, X):
'''Converts joints rotations in Euler angles to joint positions'''
Q = []
for track in X:
channels = []
titles = []
euler_df = track.values
# Create a new DataFrame to store the exponential map rep
pos_df = pd.DataFrame(index=euler_df.index)
# Copy the root rotations into the new DataFrame
# rxp = '%s_Xrotation'%track.root_name
# ryp = '%s_Yrotation'%track.root_name
# rzp = '%s_Zrotation'%track.root_name
# pos_df[rxp] = pd.Series(data=euler_df[rxp], index=pos_df.index)
# pos_df[ryp] = pd.Series(data=euler_df[ryp], index=pos_df.index)
# pos_df[rzp] = pd.Series(data=euler_df[rzp], index=pos_df.index)
# List the columns that contain rotation channels
rot_cols = [c for c in euler_df.columns if ('rotation' in c)]
# List the columns that contain position channels
pos_cols = [c for c in euler_df.columns if ('position' in c)]
# List the joints that are not end sites, i.e., have channels
joints = (joint for joint in track.skeleton)
tree_data = {}
for joint in track.traverse():
parent = track.skeleton[joint]['parent']
# Get the rotation columns that belong to this joint
rc = euler_df[[c for c in rot_cols if joint in c]]
# Get the position columns that belong to this joint
pc = euler_df[[c for c in pos_cols if joint in c]]
# Make sure the columns are organized in xyz order
if rc.shape[1] < 3:
euler_values = [[0,0,0] for f in rc.iterrows()]
else:
euler_values = [[f[1]['%s_Xrotation'%joint],
f[1]['%s_Yrotation'%joint],
f[1]['%s_Zrotation'%joint]] for f in rc.iterrows()]
################# in euler angle, the order of rotation axis is very important #####################
rotation_order = rc.columns[0][rc.columns[0].find('rotation') - 1] + rc.columns[1][rc.columns[1].find('rotation') - 1] + rc.columns[2][rc.columns[2].find('rotation') - 1] #rotation_order is string : 'XYZ' or'ZYX' or ...
####################################################################################################
if pc.shape[1] < 3:
pos_values = [[0,0,0] for f in pc.iterrows()]
else:
pos_values =[[f[1]['%s_Xposition'%joint],
f[1]['%s_Yposition'%joint],
f[1]['%s_Zposition'%joint]] for f in pc.iterrows()]
#euler_values = [[0,0,0] for f in rc.iterrows()] #for deugging
#pos_values = [[0,0,0] for f in pc.iterrows()] #for deugging
# Convert the eulers to rotation matrices
############################ input rotation order as Rotation class's argument #########################
rotmats = np.asarray([Rotation([f[0], f[1], f[2]], 'euler', rotation_order, from_deg=True).rotmat for f in euler_values])
########################################################################################################
tree_data[joint]=[
[], # to store the rotation matrix
[] # to store the calculated position
]
if track.root_name == joint:
tree_data[joint][0] = rotmats
# tree_data[joint][1] = np.add(pos_values, track.skeleton[joint]['offsets'])
tree_data[joint][1] = pos_values
else:
# for every frame i, multiply this joint's rotmat to the rotmat of its parent
tree_data[joint][0] = np.asarray([np.matmul(rotmats[i], tree_data[parent][0][i])
for i in range(len(tree_data[parent][0]))])
# add the position channel to the offset and store it in k, for every frame i
k = np.asarray([np.add(pos_values[i], track.skeleton[joint]['offsets'])
for i in range(len(tree_data[parent][0]))])
# multiply k to the rotmat of the parent for every frame i
q = np.asarray([np.matmul(k[i], tree_data[parent][0][i])
for i in range(len(tree_data[parent][0]))])
# add q to the position of the parent, for every frame i
tree_data[joint][1] = np.asarray([np.add(q[i], tree_data[parent][1][i])
for i in range(len(tree_data[parent][1]))])
# Create the corresponding columns in the new DataFrame
pos_df['%s_Xposition'%joint] = pd.Series(data=[e[0] for e in tree_data[joint][1]], index=pos_df.index)
pos_df['%s_Yposition'%joint] = pd.Series(data=[e[1] for e in tree_data[joint][1]], index=pos_df.index)
pos_df['%s_Zposition'%joint] = pd.Series(data=[e[2] for e in tree_data[joint][1]], index=pos_df.index)
new_track = track.clone()
new_track.values = pos_df
Q.append(new_track)
return Q
def _to_axis_angle(self, X):
'''Converts joints rotations in Euler angles to axis angle rotations'''
Q = []
for track in X:
# fix track names
# adapt joint name so that it's equal for either male or female
channels = []
titles = []
euler_df = track.values
# Create a new DataFrame to store the axis angle values
axis_anlge_df = pd.DataFrame(index=euler_df.index)
# Copy the root rotations into the new DataFrame
# rxp = '%s_Xrotation'%track.root_name
# ryp = '%s_Yrotation'%track.root_name
# rzp = '%s_Zrotation'%track.root_name
# pos_df[rxp] = pd.Series(data=euler_df[rxp], index=pos_df.index)
# pos_df[ryp] = pd.Series(data=euler_df[ryp], index=pos_df.index)
# pos_df[rzp] = pd.Series(data=euler_df[rzp], index=pos_df.index)
# List the columns that contain rotation channels
rot_cols = [c for c in euler_df.columns if ('rotation' in c)]
# List the columns that contain position channels
pos_cols = [c for c in euler_df.columns if ('position' in c)]
# List the joints that are not end sites, i.e., have channels
joints = (joint for joint in track.skeleton)
tree_data = {}
for joint in track.traverse():
parent = track.skeleton[joint]['parent']
# Get the rotation columns that belong to this joint
rc = euler_df[[c for c in rot_cols if joint in c]]
# Get the position columns that belong to this joint
pc = euler_df[[c for c in pos_cols if joint in c]]
# Make sure the columns are organized in xyz order
if rc.shape[1] < 3:
euler_values = [[0,0,0] for f in rc.iterrows()]
else:
euler_values = [[f[1]['%s_Xrotation'%joint],
f[1]['%s_Yrotation'%joint],
f[1]['%s_Zrotation'%joint]] for f in rc.iterrows()]
################# in euler angle, the order of rotation axis is very important #####################
rotation_order = rc.columns[0][rc.columns[0].find('rotation') - 1] + rc.columns[1][rc.columns[1].find('rotation') - 1] + rc.columns[2][rc.columns[2].find('rotation') - 1] #rotation_order is string : 'XYZ' or'ZYX' or ...
####################################################################################################
if pc.shape[1] < 3:
pos_values = [[0,0,0] for f in pc.iterrows()]
else:
pos_values =[[f[1]['%s_Xposition'%joint],
f[1]['%s_Yposition'%joint],
f[1]['%s_Zposition'%joint]] for f in pc.iterrows()]
#euler_values = [[0,0,0] for f in rc.iterrows()] #for deugging
#pos_values = [[0,0,0] for f in pc.iterrows()] #for deugging
# Convert the eulers to axis angles
############################ input rotation order as Rotation class's argument #########################
axis_angles = np.asarray([Rotation([f[0], f[1], f[2]], 'euler', rotation_order, from_deg=True).get_euler_axis() for f in euler_values])
########################################################################################################
# Create the corresponding columns in the new DataFrame
axis_anlge_df['%s_Xposition'%joint] = pd.Series(data=[e[0] for e in pos_values], index=axis_anlge_df.index)
axis_anlge_df['%s_Yposition'%joint] = pd.Series(data=[e[1] for e in pos_values], index=axis_anlge_df.index)
axis_anlge_df['%s_Zposition'%joint] = pd.Series(data=[e[2] for e in pos_values], index=axis_anlge_df.index)
axis_anlge_df['%s_Xrotation'%joint] = pd.Series(data=[e[0] for e in axis_angles], index=axis_anlge_df.index)
axis_anlge_df['%s_Yrotation'%joint] = pd.Series(data=[e[1] for e in axis_angles], index=axis_anlge_df.index)
axis_anlge_df['%s_Zrotation'%joint] = pd.Series(data=[e[2] for e in axis_angles], index=axis_anlge_df.index)
new_track = track.clone()
new_track.values = axis_anlge_df
Q.append(new_track)
return Q
def _to_expmap(self, X):
'''Converts Euler angles to Exponential Maps'''
Q = []
for track in X:
channels = []
titles = []
euler_df = track.values
# Create a new DataFrame to store the exponential map rep
exp_df = pd.DataFrame(index=euler_df.index)
# Copy the root positions into the new DataFrame
rxp = '%s_Xposition'%track.root_name
ryp = '%s_Yposition'%track.root_name
rzp = '%s_Zposition'%track.root_name
exp_df[rxp] = pd.Series(data=euler_df[rxp], index=exp_df.index)
exp_df[ryp] = pd.Series(data=euler_df[ryp], index=exp_df.index)
exp_df[rzp] = pd.Series(data=euler_df[rzp], index=exp_df.index)
# List the columns that contain rotation channels
rots = [c for c in euler_df.columns if ('rotation' in c and 'Nub' not in c)]
# List the joints that are not end sites, i.e., have channels
joints = (joint for joint in track.skeleton if 'Nub' not in joint)
for joint in joints:
r = euler_df[[c for c in rots if joint in c]] # Get the columns that belong to this joint
euler = [[f[1]['%s_Xrotation'%joint], f[1]['%s_Yrotation'%joint], f[1]['%s_Zrotation'%joint]] for f in r.iterrows()] # Make sure the columsn are organized in xyz order
exps = [Rotation(f, 'euler', from_deg=True).to_expmap() for f in euler] # Convert the eulers to exp maps
# Create the corresponding columns in the new DataFrame
exp_df['%s_alpha'%joint] = pd.Series(data=[e[0] for e in exps], index=exp_df.index)
exp_df['%s_beta'%joint] = pd.Series(data=[e[1] for e in exps], index=exp_df.index)
exp_df['%s_gamma'%joint] = pd.Series(data=[e[2] for e in exps], index=exp_df.index)
new_track = track.clone()
new_track.values = exp_df
Q.append(new_track)
return Q
def _expmap_to_euler(self, X):
Q = []
for track in X:
channels = []
titles = []
exp_df = track.values
# Create a new DataFrame to store the exponential map rep
euler_df = pd.DataFrame(index=exp_df.index)
# Copy the root positions into the new DataFrame
rxp = '%s_Xposition'%track.root_name
ryp = '%s_Yposition'%track.root_name
rzp = '%s_Zposition'%track.root_name
euler_df[rxp] = pd.Series(data=exp_df[rxp], index=euler_df.index)
euler_df[ryp] = pd.Series(data=exp_df[ryp], index=euler_df.index)
euler_df[rzp] = pd.Series(data=exp_df[rzp], index=euler_df.index)
# List the columns that contain rotation channels
exp_params = [c for c in exp_df.columns if ( any(p in c for p in ['alpha', 'beta','gamma']) and 'Nub' not in c)]
# List the joints that are not end sites, i.e., have channels
joints = (joint for joint in track.skeleton if 'Nub' not in joint)
for joint in joints:
r = exp_df[[c for c in exp_params if joint in c]] # Get the columns that belong to this joint
expmap = [[f[1]['%s_alpha'%joint], f[1]['%s_beta'%joint], f[1]['%s_gamma'%joint]] for f in r.iterrows()] # Make sure the columsn are organized in xyz order
euler_rots = [Rotation(f, 'expmap').to_euler(True)[0] for f in expmap] # Convert the eulers to exp maps
# Create the corresponding columns in the new DataFrame
euler_df['%s_Xrotation'%joint] = | pd.Series(data=[e[0] for e in euler_rots], index=euler_df.index) | pandas.Series |
import time
import glob
import h2o
import pandas as pd
from dil_preprocess import header_to_header
h2o_columns = list(header_to_header.values()) + ["Status-Code", "body"]
working_methods = [
## event_set
"1/event_set_smooth::audio", "2/event_set_smooth::audio",
"1/event_set_smooth::embed-img", "2/event_set_smooth::embed-img",
# embed: same as embed-img for firefox, unstable for chromium
"1/event_set_smooth::iframe-csp", # chromium always load
# iframe: (should) always load
"1/event_set_smooth::img", # chromiun same as embed-img
# link-prefetch: unstable
"1/event_set_smooth::link-stylesheet", "2/event_set_smooth::link-stylesheet",
"1/event_set_smooth::object", # chromium unstable
"1/event_set_smooth::script", "2/event_set_smooth::script",
"1/event_set_smooth::video", "2/event_set_smooth::video", # chromium same as audio?
## Global properties
# Download bar: unstable
"1/gp_securitypolicyviolation::iframe-csp", "2/gp_securitypolicyviolation::iframe-csp",
"1/gp_window_getComputedStyle::link-stylesheet", "2/gp_window_getComputedStyle::link-stylesheet",
"1/gp_window_hasOwnProperty::script", "2/gp_window_hasOwnProperty::script",
# Window onblur: ustable
"1/gp_window_onerror::script", "2/gp_window_onerror::script",
"1/gp_window_postMessage::embed-img", # chromium does not work
"2/gp_window_postMessage::embed", # firefox same as embed-img
"1/gp_window_postMessage::iframe", # chromium same as embed, iframe-csp == iframe
# object identical with embed for both (means different things)
"1/gp_window_postMessage::window.open", "2/gp_window_postMessage::window.open",
## Object properties
# buffered: same as duration (but less information)
# contentDocument: unstable?
"1/op_el_duration::audio", "2/op_el_duration::audio",
"1/op_el_duration::video", "2/op_el_duration::video", # chromium same as audio?
# height: use naturalHeight instead
"1/op_el_media_error::audio", "2/op_el_media_error::audio",
# mediaError-video: see audio?
"1/op_el_naturalHeight::img", "2/op_el_naturalHeight::img",
# naturalWidth: see height
# networkState: see duration
# paused: does not work
# readyState: see duration
# seekable: see duration
# sheet: does not work
"1/op_el_videoHeight::video", "2/op_el_videoHeight::video",
# videoWidth: see videoHeight
# width: see height
## window property
"1/op_frame_count::iframe", "2/op_frame_count::iframe", # same as iframe-csp
"1/op_frame_count::window.open", "2/op_frame_count::window.open",
"1/op_win_CSS2Properties::window.open", "2/op_win_CSS2Properties::window.open",
"1/op_win_history_length::window.open", "2/op_win_history_length::window.open",
"1/op_win_opener::window.open", "2/op_win_opener::window.open",
"1/op_win_origin::iframe", "2/op_win_origin::iframe", # same as iframe-csp
"1/op_win_origin::window.open", "2/op_win_origin::window.open",
"1/op_win_window::iframe", "2/op_win_window::iframe", # same as iframe-csp
"1/op_win_window::window.open", "2/op_win_window::window.open",
]
models = None
# Methods that can work even though the trees have the same result for both responses
single_methods = {
"gp_window_getComputedStyle": ["{'H1': 'rgb(0, 0, 255)'}"],
"gp_window_hasOwnProperty": ["{'a': 'Var a exist. Value: 5'}"],
"gp_window_onerror": ["[['Script error.', 0, 0]]"],
"gp_window_postMessage": ["['Message: mes1 Origin: https://172.17.0.1:44300', 'Message: mes1 Origin: https://172.17.0.1:44300']",
"['Message: mes1 Origin: https://172.17.0.1:44300']"],
"op_el_duration": [1, 2],
"op_el_naturalHeight": [50],
"op_el_videoHeight": [100],
"op_frame_count": [1, 2],
"gp_securitypolicyviolation": ["js-undefined"], # Special as we check for not negative instead
}
def init(methods="limited"):
"""Load the models from disk into the h2o cluster."""
global models
h2o.init(log_level="FATA")
h2o.no_progress() # Disable progress bars of h2o
if methods == "limited":
files = [f"../analysis/trees/tenmin/mojo/{method}.mojo" for method in working_methods if not "window.open" in method]
files_window = [f"../analysis/trees/window-redo/mojo/{method}.mojo" for method in working_methods if "window.open" in method]
#files_window = [f"../analysis/trees/tenmin/mojo/{method}.mojo" for method in working_methods if "window.open" in method]
files = files + files_window
elif methods == "all":
files = glob.glob("../analysis/trees/tenmin/mojo/1/*")
files = files + glob.glob("../analysis/trees/tenmin/mojo/2/*")
else:
print("Unsupported methods")
raise ValueError
files = [file for file in files if "conflicted" not in file]
print(f"h2o init complete: load {len(files)} mojos now.")
models = [h2o.import_mojo(file) for file in files]
print("h2o loading complete")
# Mojo import not working because no test metric exists.
# comment out `print(mojo_estimator)` in line 2253 in h2o.py fixes it
return models
def check_single_method(row_df, method):
"""For 'single_methods' check whether they could work or not."""
# Possible improvement: check according to method
# Currently only check if body hash is the same, for most methods
# However, not too important as we have dynamic confirmation
# This should not generate any FNs as a different body hash is required for almost all single methods
# But it might not be enough (e.g., two images of the same size have different hashes but result in the same observation)
if method == "gp_securitypolicyviolation":
if row_df["real_location"].nunique() == 1:
return None
else:
return row_df.iloc[0]
if row_df["resp_body_hash"].nunique() == 1:
return None
else:
return row_df.iloc[0]
def post_process_single(nunique_frame, res, method):
"""Post-process 'single_methods'."""
unique_pos_values = single_methods[method]
# special, check for not negative result (as many positive exist)
if method == "gp_securitypolicyviolation":
poss = nunique_frame["unique"].apply(lambda x: True if x != "js-undefined" else False)
# Only check the URLs where all observations have the "positive" result (e.g., image height 50)
else:
poss = nunique_frame["unique"].apply(lambda x: True if x in unique_pos_values else False)
poss = poss[poss == True]
poss = res.loc[res["URL"].isin(poss.index)].groupby(["URL"], group_keys=False).apply(check_single_method, method=method)
return poss
def predict_trees(af, log=False, conf=False):
""""Get the predictions for all fitted responses."""
start = time.time()
at = af.reset_index()
if log:
print(at.shape)
hf = h2o.H2OFrame(at[h2o_columns])
leaky_endpoints = {}
if log:
print(len(models))
# Predict for every working method/model/tree
for model in models:
model_name = model.actual_params["path"]
res = h2o.as_list(model.predict(hf))
# We might miss some cases for single methods,
# if we only continue if not all values are the same
# However this should be negligible: these are the results on all responses of a site!
# (e.g., not every response should be an img)
if log:
if "secret" in model_name:
with pd.option_context('display.max_rows', None):
print(model_name)
display(pd.concat([at, res], axis=1)[["URL", "predict"]])
if res["predict"].nunique() > 0:
res = res.rename(columns={"predict": f"predict_{model_name}"})
res = pd.concat([at, res[[f"predict_{model_name}"]]], axis=1)
# FPs possible, if both cookies/non-cookies have the same expanded rows
# And the result only differs based on our expansion
# res[[cookies, ind_i]] == res[[no-cookies, ind_i]] (for all i) + res[[cookies, ind_i]] != res[[cookies, ind_j]]
# Should only happen rarely and will be found by the dynamic confirmation, so it does not matter
info = res.groupby(["URL"])[f"predict_{model_name}"].agg(["nunique", "unique", "count"])
valid = info[info["nunique"] > 1]
for method in single_methods.keys():
if method in model_name:
# For the methods that do not necessarily need two records according to our tree,
# also check if they work if only one value was observed
new_valids = post_process_single(info[info["nunique"] == 1], res, method)
valid = pd.concat([valid, new_valids])
break
leaky = res.loc[res["URL"].isin(valid.index)]
if len(leaky) != 0:
leaky_endpoints[model_name] = leaky
if log:
print(f"{model_name} works for {len(valid)} URLs.")
# display(valid)
if log:
print(f"Took {time.time() - start} seconds")
return leaky_endpoints
def reduce_leaky_endpoints(leaky_endpoints, log=False):
"""Convert leaky_endpoints dict of dfs to a single dataframe."""
leaky_table = None
for method in leaky_endpoints:
df = leaky_endpoints[method]
if log:
print(df.shape)
if leaky_table is None:
leaky_table = df
else:
# Update all rows that already exist
try:
leaky_table.loc[leaky_table["index_i"].isin(df["index_i"]),
f"predict_{method}"] = df[f"predict_{method}"]
except ValueError:
if log:
print("Error")
# Append all rows and then delete duplicates (only add new rows)
leaky_table = | pd.concat([leaky_table, df]) | pandas.concat |
# coding=utf-8
# Author: <NAME>
# Date: June 17, 2020
#
# Description: Calculates entropy-based on network PCA
#
#
import numpy as np
import pandas as pd
pd.set_option('display.max_rows', 100)
pd.set_option('display.max_columns', 500)
pd.set_option('display.width', 1000)
from cycler import cycler
import matplotlib as mpl
mpl.rcParams['mathtext.fontset'] = 'cm'
mpl.rcParams['mathtext.rm'] = 'serif'
from utils import ensurePathExists
from scipy import stats
import argparse
def in_ranges(x, bins):
return [((x >= lower) & (x <= upper)) for lower, upper in bins]
def compute_entropy(df_dec,
radius_window=1.0,
radius_overlap=0.1,
angle_window=30,
angle_overlap=15,
min_points=10,
n_cut_points=3,
components=9):
""" """
df_dec = df_dec.copy()
#
angle_items = int(angle_window / angle_overlap)
a = np.arange(-180, (181), angle_overlap)
angle_bins = [(i, j) for i, j in zip(a[0:-angle_items], a[angle_items:])]
n_bins = len(angle_bins)
max_entropy = stats.entropy((np.ones(shape=n_bins) / n_bins), base=2)
list_df_ent = []
#
for dim in range(1, (components + 1)):
print('Computing projection: {dim1:d} vs {dim2:d}'.format(dim1=dim, dim2=(dim + 1)))
#
cx = str(dim) + 'c'
cy = str(dim + 1) + 'c'
dist_label = '{cx:s}-{cy:s}-dist'.format(cx=cx, cy=cy)
angle_label = '{cx:s}-{cy:s}-angle'.format(cx=cx, cy=cy)
df_dec[dist_label] = np.hypot(df_dec[cx], df_dec[cy])
df_dec[angle_label] = np.degrees(np.arctan2(df_dec[cy], df_dec[cx]))
#
df_dec.sort_values(dist_label, ascending=True, inplace=True)
radius_max = df_dec[dist_label].max()
#
radius_items = int(radius_window / radius_overlap)
#
b = np.arange(0, (radius_max + radius_overlap), radius_overlap)
radius_intervals = [(s, e) for s, e in zip(b[0:-radius_items], b[radius_items:])]
# Loop radius intervals
r = []
for radius_start, radius_end in radius_intervals:
df_dian_tmp = df_dec.loc[(df_dec[dist_label] >= radius_start) & (df_dec[dist_label] <= radius_end), :]
dfc = df_dian_tmp[angle_label].apply(lambda x: pd.Series(in_ranges(x, angle_bins), angle_bins))
if len(dfc) > min_points:
dfp = (dfc.sum(axis=0) / dfc.sum().sum()).rename('prob').to_frame()
dfp['log2'] = dfp['prob'].apply(np.log2)
#
entropy = stats.entropy(dfp['prob'], base=2)
else:
entropy = np.nan
entropy_norm = entropy / max_entropy
r.append((dim, radius_start, radius_end, entropy, entropy_norm))
#
df_ent_tmp = pd.DataFrame(r, columns=['dim', 'radius-start', 'radius-end', 'entropy', 'entropy-norm'])
# Interpolation
df_ent_tmp['entropy-smooth'] = df_ent_tmp['entropy-norm'].interpolate(method='linear', limit_direction='both')
# Rank
df_ent_tmp['radius-rank'] = df_ent_tmp['radius-start'].rank(method='min')
df_ent_tmp['entropy-rank'] = df_ent_tmp['entropy-norm'].rank(method='min')
# Rank Sum
df_ent_tmp['rank-sum'] = ((df_ent_tmp['radius-rank']) + (df_ent_tmp['entropy-rank']))
# Define Cut Pointns
cut_points = []
# Index % Sort
df_cp = df_ent_tmp.sort_values('rank-sum').loc[(df_ent_tmp['radius-start'] > 1.0), :]
possible_rank = 1
for possible_id, row in df_cp.iterrows():
possible_value = row['radius-start']
if not any([True if abs(possible_value - existing_value) <= 1.0 else False for existing_id, existing_value, existing_rank in cut_points]):
cut_points.append((possible_id, possible_value, possible_rank))
possible_rank += 1
if len(cut_points) >= n_cut_points:
break
#
dict_cut_points = {idx: rank for idx, value, rank in cut_points}
df_ent_tmp['cut-rank'] = df_ent_tmp.index.map(dict_cut_points)
#
# Add to list
list_df_ent.append(df_ent_tmp)
#
df_ent = | pd.concat(list_df_ent, axis='index') | pandas.concat |
from interface import *
from steps import *
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
from copy import copy
class ADSAApp():
"""The class managing the interface for the project.
:param App app: The curses app wrapper where we will draw the interface.
"""
def __init__(self, app=None):
if app is None:
app = App()
self.app = app
height, width = self.app.stdscr.getmaxyx()
self.widgets = {}
# Main Menu
texts_main_menu = ["Choose a step:", "Step 1", "Step 2", "Step 3", "Step 4", "Exit"]
main_menu = Menu(self._get_coord_centered(height, width, texts_main_menu), texts_main_menu, True, True)
main_menu.bind(lambda x : self.main_menu_function(x))
self.widgets["main_menu"] = main_menu
# Step1 Menu
texts_step1 = [ "Wich datastructure do you want to use ?", "AVL Tree", "Array", "Return"]
step1_menu = Menu(self._get_coord_centered(height, width, texts_step1), texts_step1, True, True)
step1_menu.bind(lambda x : self.step1_menu_function(x))
self.widgets["step1_menu"] = step1_menu
def main_menu_function(self, index):
self.app.stdscr.clear()
if index == 1:
self.widgets["step1_menu"].start(self.app)
elif index == 2:
self._find_impostors("data/adjacency_matrix.txt")
elif index == 3:
self._get_distance("data/graph_crewmates.txt", "data/graph_impostors.txt")
elif index == 4:
#self.display_step4()
self.step4()
elif index == 5:
return False
return True
def step1_menu_function(self, index):
self.app.stdscr.clear()
game = None
if index == 1:
self._play_game("AVLTree")
elif index == 2:
self._play_game("Array")
return False
def _play_game(self, datastructure):
height, width = self.app.stdscr.getmaxyx()
screen_game = FakeScreen([5, 5], [height - 10, width - 10])
game = Game(datastructure)
screen_game.insert_line(f"Game created with {datastructure} to store the players.")
for i in range(3):
screen_game.insert_line(f"Playing round {game.round}.")
screen_game.insert_line(f" ↪Remaining players {game.get_nb_players()}.")
game.simulate_game()
game.sort_players()
screen_game.insert_line(f"END POOL !")
while game.get_nb_players() > 10:
screen_game.insert_line(f"Playing round {game.round}")
screen_game.insert_line(f" ↪Remaining players {game.get_nb_players()}.")
game.simulate_game(True)
game.sort_players()
game.delete_last_player()
screen_game.insert_line(f"FINALS:")
for i in range(5):
screen_game.insert_line(f"Playing round {game.round}")
screen_game.insert_line(f" ↪Remaining players {game.get_nb_players()}.")
game.simulate_game(True)
game.sort_players()
last_players = game.players.__str__().split('\n')
if datastructure == "AVLTree":
last_players = last_players[::-1]
for i in range(len(last_players)):
screen_game.insert_line(f"{i + 1}. {last_players[i]}")
screen_game.start(self.app)
def _find_impostors(self, filepath):
height, width = self.app.stdscr.getmaxyx()
screen_game = FakeScreen([5, 5], [height - 10, width - 10])
adjacency_matrix = np.genfromtxt(filepath, delimiter=",")
suspects = get_suspects(adjacency_matrix, [0])
screen_game.insert_line("Suspects:")
for key, val in suspects.items():
screen_game.insert_line(f" {key} is a suspect. He met {val} dead player.")
suspects_pair = get_suspects_pairs(suspects, adjacency_matrix, [0])
screen_game.insert_line("")
screen_game.insert_line("Suspects pair:")
for pair in suspects_pair:
screen_game.insert_line(f" {pair[0]} and {pair[1]}")
screen_game.insert_line("")
screen_game.insert_line("Press the escape key to continue...")
screen_game.start(self.app)
def _get_distance(self, filepath_crewmates, filepath_impostors, position=None):
height, width = self.app.stdscr.getmaxyx()
screen_game = FakeScreen([5, 5], [height - 10, width - 10])
pd.set_option('display.max_rows', 500)
pd.set_option('display.max_columns', 500)
pd.set_option('display.expand_frame_repr', False)
names = ["Reactor", "UpperE", "LowerE", "Security", "Electrical",
"Medbay", "Storage", "Cafetaria", "Unnamed1", "Unnamed2",
"O2", "Weapons", "Shield", "Navigations"]
graph_crewmates = Graph(0)
graph_crewmates.import_from_file(filepath_crewmates)
distances = graph_crewmates.floydWarshall()
df_crewmates = pd.DataFrame(data=distances, index=names, columns=names)
lines = df_crewmates.__str__().split("\n")
screen_game.insert_line("CREWMATES")
for line in lines:
screen_game.insert_line(line)
names = ["Reactor", "UpperE", "LowerE", "Security", "Electrical",
"Medbay", "Storage", "Cafetaria", "Unnamed1", "Unnamed2",
"O2", "Weapons", "Shield", "Navigations", "CorridorW"]
graph_impostors = Graph(0)
graph_impostors.import_from_file(filepath_impostors)
distances = graph_impostors.floydWarshall()
df_impostors = | pd.DataFrame(data=distances, index=names, columns=names) | pandas.DataFrame |
"""Electric grid models module."""
import cvxpy as cp
import itertools
from multimethod import multimethod
import natsort
import numpy as np
import opendssdirect
import pandas as pd
import scipy.sparse as sp
import scipy.sparse.linalg
import typing
import mesmo.config
import mesmo.data_interface
import mesmo.utils
logger = mesmo.config.get_logger(__name__)
class ElectricGridModel(mesmo.utils.ObjectBase):
"""Electric grid model object.
Note:
This abstract class only defines the expected variables of linear electric grid model objects,
but does not implement any functionality.
Attributes:
timesteps (pd.Index): Index set of time steps of the current scenario. This is needed for optimization problem
definitions within linear electric grid models (see ``LinearElectricGridModel``).
phases (pd.Index): Index set of the phases.
node_names (pd.Index): Index set of the node names.
node_types (pd.Index): Index set of the node types.
line_names (pd.Index): Index set of the line names.
transformer_names (pd.Index): Index set of the transformer names.
branch_names (pd.Index): Index set of the branch names, i.e., all line names and transformer names.
branch_types (pd.Index): Index set of the branch types.
der_names (pd.Index): Index set of the DER names.
der_types (pd.Index): Index set of the DER types.
nodes (pd.Index): Multi-level / tuple index set of the node types, node names and phases
corresponding to the dimension of the node admittance matrices.
branches (pd.Index): Multi-level / tuple index set of the branch types, branch names and phases
corresponding to the dimension of the branch admittance matrices.
lines (pd.Index): Multi-level / tuple index set of the branch types, branch names and phases
for the lines only.
transformers (pd.Index): Multi-level / tuple index set of the branch types, branch names and phases
for the transformers only.
ders (pd.Index): Index set of the DER names, corresponding to the dimension of the DER power vector.
node_voltage_vector_reference (np.ndarray): Node voltage reference / no load vector.
branch_power_vector_magnitude_reference (np.ndarray): Branch power reference / rated power vector.
der_power_vector_reference (np.ndarray): DER power reference / nominal power vector.
is_single_phase_equivalent (bool): Singe-phase-equivalent modelling flag. If true, electric grid is modelled
as single-phase-equivalent of three-phase balanced system.
"""
timesteps: pd.Index
phases: pd.Index
node_names: pd.Index
node_types: pd.Index
line_names: pd.Index
transformer_names: pd.Index
branch_names: pd.Index
branch_types: pd.Index
der_names: pd.Index
der_types: pd.Index
nodes: pd.Index
branches: pd.Index
lines: pd.Index
transformers: pd.Index
ders: pd.Index
node_voltage_vector_reference: np.ndarray
branch_power_vector_magnitude_reference: np.ndarray
der_power_vector_reference: np.ndarray
is_single_phase_equivalent: bool
def __init__(
self,
electric_grid_data: mesmo.data_interface.ElectricGridData
):
# Process overhead line type definitions.
# - This is implemented as direct modification on the electric grid data object and therefore done first.
electric_grid_data = self.process_line_types_overhead(electric_grid_data)
# Obtain index set for time steps.
# - This is needed for optimization problem definitions within linear electric grid models.
self.timesteps = electric_grid_data.scenario_data.timesteps
# Obtain index sets for phases / node names / node types / line names / transformer names /
# branch types / DER names.
self.phases = (
pd.Index(
np.unique(np.concatenate(
electric_grid_data.electric_grid_nodes.apply(
mesmo.utils.get_element_phases_array,
axis=1
).values
))
)
)
self.node_names = pd.Index(electric_grid_data.electric_grid_nodes['node_name'])
self.node_types = pd.Index(['source', 'no_source'])
self.line_names = pd.Index(electric_grid_data.electric_grid_lines['line_name'])
self.transformer_names = pd.Index(electric_grid_data.electric_grid_transformers['transformer_name'])
self.branch_types = pd.Index(['line', 'transformer'])
self.der_names = pd.Index(electric_grid_data.electric_grid_ders['der_name'])
self.der_types = pd.Index(electric_grid_data.electric_grid_ders['der_type'].unique())
# Obtain nodes index set, i.e., collection of all phases of all nodes
# for generating indexing functions for the admittance matrix.
# - The admittance matrix has one entry for each phase of each node in both dimensions.
# - There cannot be "empty" dimensions for missing phases of nodes, because the matrix would become singular.
# - Therefore the admittance matrix must have the exact number of existing phases of all nodes.
node_dimension = (
int(electric_grid_data.electric_grid_nodes.loc[
:,
[
'is_phase_1_connected',
'is_phase_2_connected',
'is_phase_3_connected'
]
].sum().sum())
)
self.nodes = (
pd.DataFrame(
None,
index=range(node_dimension),
columns=[
'node_type',
'node_name',
'phase'
]
)
)
# Fill `node_name`.
self.nodes['node_name'] = (
pd.concat([
electric_grid_data.electric_grid_nodes.loc[
electric_grid_data.electric_grid_nodes['is_phase_1_connected'] == 1,
'node_name'
],
electric_grid_data.electric_grid_nodes.loc[
electric_grid_data.electric_grid_nodes['is_phase_2_connected'] == 1,
'node_name'
],
electric_grid_data.electric_grid_nodes.loc[
electric_grid_data.electric_grid_nodes['is_phase_3_connected'] == 1,
'node_name'
]
], ignore_index=True)
)
# Fill `phase`.
self.nodes['phase'] = (
np.concatenate([
np.repeat(1, sum(electric_grid_data.electric_grid_nodes['is_phase_1_connected'] == 1)),
np.repeat(2, sum(electric_grid_data.electric_grid_nodes['is_phase_2_connected'] == 1)),
np.repeat(3, sum(electric_grid_data.electric_grid_nodes['is_phase_3_connected'] == 1))
])
)
# Fill `node_type`.
self.nodes['node_type'] = 'no_source'
# Set `node_type` for source node.
self.nodes.loc[
self.nodes['node_name'] == (electric_grid_data.electric_grid['source_node_name']),
'node_type'
] = 'source'
# Sort by `node_name`.
self.nodes = (
self.nodes.reindex(index=natsort.order_by_index(
self.nodes.index,
natsort.index_natsorted(self.nodes.loc[:, 'node_name'])
))
)
self.nodes = pd.MultiIndex.from_frame(self.nodes)
# Obtain branches index set, i.e., collection of phases of all branches
# for generating indexing functions for the branch admittance matrices.
# - Branches consider all power delivery elements, i.e., lines as well as transformers.
# - The second dimension of the branch admittance matrices is the number of phases of all nodes.
# - Transformers must have same number of phases per winding and exactly two windings.
line_dimension = (
int(electric_grid_data.electric_grid_lines.loc[
:,
[
'is_phase_1_connected',
'is_phase_2_connected',
'is_phase_3_connected'
]
].sum().sum())
)
transformer_dimension = (
int(electric_grid_data.electric_grid_transformers.loc[
:,
[
'is_phase_1_connected',
'is_phase_2_connected',
'is_phase_3_connected'
]
].sum().sum())
)
self.branches = (
pd.DataFrame(
None,
index=range(line_dimension + transformer_dimension),
columns=[
'branch_type',
'branch_name',
'phase'
]
)
)
# Fill `branch_name`.
self.branches['branch_name'] = (
pd.concat([
electric_grid_data.electric_grid_lines.loc[
electric_grid_data.electric_grid_lines['is_phase_1_connected'] == 1,
'line_name'
],
electric_grid_data.electric_grid_lines.loc[
electric_grid_data.electric_grid_lines['is_phase_2_connected'] == 1,
'line_name'
],
electric_grid_data.electric_grid_lines.loc[
electric_grid_data.electric_grid_lines['is_phase_3_connected'] == 1,
'line_name'
],
electric_grid_data.electric_grid_transformers.loc[
electric_grid_data.electric_grid_transformers['is_phase_1_connected'] == 1,
'transformer_name'
],
electric_grid_data.electric_grid_transformers.loc[
electric_grid_data.electric_grid_transformers['is_phase_2_connected'] == 1,
'transformer_name'
],
electric_grid_data.electric_grid_transformers.loc[
electric_grid_data.electric_grid_transformers['is_phase_3_connected'] == 1,
'transformer_name'
]
], ignore_index=True)
)
# Fill `phase`.
self.branches['phase'] = (
np.concatenate([
np.repeat(1, sum(electric_grid_data.electric_grid_lines['is_phase_1_connected'] == 1)),
np.repeat(2, sum(electric_grid_data.electric_grid_lines['is_phase_2_connected'] == 1)),
np.repeat(3, sum(electric_grid_data.electric_grid_lines['is_phase_3_connected'] == 1)),
np.repeat(1, sum(electric_grid_data.electric_grid_transformers['is_phase_1_connected'] == 1)),
np.repeat(2, sum(electric_grid_data.electric_grid_transformers['is_phase_2_connected'] == 1)),
np.repeat(3, sum(electric_grid_data.electric_grid_transformers['is_phase_3_connected'] == 1))
])
)
# Fill `branch_type`.
self.branches['branch_type'] = (
np.concatenate([
np.repeat('line', line_dimension),
np.repeat('transformer', transformer_dimension)
])
)
# Sort by `branch_type` / `branch_name`.
self.branches = (
self.branches.reindex(index=natsort.order_by_index(
self.branches.index,
natsort.index_natsorted(self.branches.loc[:, 'branch_name'])
))
)
self.branches = (
self.branches.reindex(index=natsort.order_by_index(
self.branches.index,
natsort.index_natsorted(self.branches.loc[:, 'branch_type'])
))
)
self.branches = pd.MultiIndex.from_frame(self.branches)
# Obtain index sets for lines / transformers corresponding to branches.
self.lines = (
self.branches[
mesmo.utils.get_index(self.branches, raise_empty_index_error=False, branch_type='line')
]
)
self.transformers = (
self.branches[
mesmo.utils.get_index(self.branches, raise_empty_index_error=False, branch_type='transformer')
]
)
# Obtain index set for DERs.
self.ders = pd.MultiIndex.from_frame(electric_grid_data.electric_grid_ders[['der_type', 'der_name']])
# Obtain reference / no load voltage vector.
self.node_voltage_vector_reference = np.zeros(len(self.nodes), dtype=complex)
voltage_phase_factors = (
np.array([
np.exp(0 * 1j), # Phase 1.
np.exp(- 2 * np.pi / 3 * 1j), # Phase 2.
np.exp(2 * np.pi / 3 * 1j) # Phase 3.
])
)
for node_name, node in electric_grid_data.electric_grid_nodes.iterrows():
# Obtain phases index & node index for positioning the node voltage in the voltage vector.
phases_index = mesmo.utils.get_element_phases_array(node) - 1
node_index = mesmo.utils.get_index(self.nodes, node_name=node_name)
# Insert voltage into voltage vector.
self.node_voltage_vector_reference[node_index] = (
voltage_phase_factors[phases_index]
* node.at['voltage'] / np.sqrt(3)
)
# Obtain reference / rated branch power vector.
self.branch_power_vector_magnitude_reference = np.zeros(len(self.branches), dtype=float)
for line_name, line in electric_grid_data.electric_grid_lines.iterrows():
# Obtain branch index.
branch_index = mesmo.utils.get_index(self.branches, branch_type='line', branch_name=line_name)
# Insert rated power into branch power vector.
self.branch_power_vector_magnitude_reference[branch_index] = (
line.at['maximum_current']
* electric_grid_data.electric_grid_nodes.at[line.at['node_1_name'], 'voltage']
/ np.sqrt(3)
)
for transformer_name, transformer in electric_grid_data.electric_grid_transformers.iterrows():
# Obtain branch index.
branch_index = mesmo.utils.get_index(self.branches, branch_type='transformer', branch_name=transformer_name)
# Insert rated power into branch flow vector.
self.branch_power_vector_magnitude_reference[branch_index] = (
transformer.at['apparent_power']
/ len(branch_index) # Divide total capacity by number of phases.
)
# Obtain reference / nominal DER power vector.
self.der_power_vector_reference = (
(
electric_grid_data.electric_grid_ders.loc[:, 'active_power_nominal']
+ 1.0j * electric_grid_data.electric_grid_ders.loc[:, 'reactive_power_nominal']
).values
)
# Obtain flag for single-phase-equivalent modelling.
if electric_grid_data.electric_grid.at['is_single_phase_equivalent'] == 1:
if len(self.phases) != 1:
raise ValueError(f"Cannot model electric grid with {len(self.phases)} phase as single-phase-equivalent.")
self.is_single_phase_equivalent = True
else:
self.is_single_phase_equivalent = False
# Make modifications for single-phase-equivalent modelling.
if self.is_single_phase_equivalent:
self.branch_power_vector_magnitude_reference[mesmo.utils.get_index(self.branches, branch_type='line')] *= 3
@staticmethod
def process_line_types_overhead(
electric_grid_data: mesmo.data_interface.ElectricGridData
) -> mesmo.data_interface.ElectricGridData:
"""Process overhead line type definitions in electric grid data object."""
# Process over-head line type definitions.
for line_type, line_type_data in electric_grid_data.electric_grid_line_types_overhead.iterrows():
# Obtain data shorthands.
# - Only for phases which have `conductor_id` defined in `electric_grid_line_types_overhead`.
phases = (
pd.Index([
1 if pd.notnull(line_type_data.at['phase_1_conductor_id']) else None,
2 if pd.notnull(line_type_data.at['phase_2_conductor_id']) else None,
3 if pd.notnull(line_type_data.at['phase_3_conductor_id']) else None,
'n' if pd.notnull(line_type_data.at['neutral_conductor_id']) else None
]).dropna()
)
phase_conductor_id = (
pd.Series({
1: line_type_data.at['phase_1_conductor_id'],
2: line_type_data.at['phase_2_conductor_id'],
3: line_type_data.at['phase_3_conductor_id'],
'n': line_type_data.at['neutral_conductor_id']
}).loc[phases]
)
phase_y = (
pd.Series({
1: line_type_data.at['phase_1_y'],
2: line_type_data.at['phase_2_y'],
3: line_type_data.at['phase_3_y'],
'n': line_type_data.at['neutral_y']
}).loc[phases]
)
phase_xy = (
pd.Series({
1: np.array([line_type_data.at['phase_1_x'], line_type_data.at['phase_1_y']]),
2: np.array([line_type_data.at['phase_2_x'], line_type_data.at['phase_2_y']]),
3: np.array([line_type_data.at['phase_3_x'], line_type_data.at['phase_3_y']]),
'n': np.array([line_type_data.at['neutral_x'], line_type_data.at['neutral_y']])
}).loc[phases]
)
phase_conductor_diameter = (
pd.Series([
electric_grid_data.electric_grid_line_types_overhead_conductors.at[
phase_conductor_id.at[phase], 'conductor_diameter'
]
for phase in phases
], index=phases)
* 1e-3 # mm to m.
)
phase_conductor_geometric_mean_radius = (
pd.Series([
electric_grid_data.electric_grid_line_types_overhead_conductors.at[
phase_conductor_id.at[phase], 'conductor_geometric_mean_radius'
]
for phase in phases
], index=phases)
* 1e-3 # mm to m.
)
phase_conductor_resistance = (
pd.Series([
electric_grid_data.electric_grid_line_types_overhead_conductors.at[
phase_conductor_id.at[phase], 'conductor_resistance'
]
for phase in phases
], index=phases)
)
phase_conductor_maximum_current = (
pd.Series([
electric_grid_data.electric_grid_line_types_overhead_conductors.at[
phase_conductor_id.at[phase], 'conductor_maximum_current'
]
for phase in phases
], index=phases)
)
# Obtain shorthands for neutral / non-neutral phases.
# - This is needed for Kron reduction.
phases_neutral = phases[phases.isin(['n'])]
phases_non_neutral = phases[~phases.isin(['n'])]
# Other parameter shorthands.
frequency = electric_grid_data.electric_grid.at['base_frequency'] # In Hz.
earth_resistivity = line_type_data.at['earth_resistivity'] # In Ωm.
air_permittivity = line_type_data.at['air_permittivity'] # In nF/km.
g_factor = 1e-4 # In Ω/km from 0.1609347e-3 Ω/mile from Kersting <https://doi.org/10.1201/9781315120782>.
# Obtain impedance matrix in Ω/km based on Kersting <https://doi.org/10.1201/9781315120782>.
z_matrix = pd.DataFrame(index=phases, columns=phases, dtype=complex)
for phase_row, phase_col in itertools.product(phases, phases):
# Calculate geometric parameters.
d_distance = np.linalg.norm(phase_xy.at[phase_row] - phase_xy.at[phase_col])
s_distance = np.linalg.norm(phase_xy.at[phase_row] - np.array([1, -1]) * phase_xy.at[phase_col])
s_angle = np.pi / 2 - np.arcsin((phase_y.at[phase_row] + phase_y.at[phase_col]) / s_distance)
# Calculate Kersting / Carson parameters.
k_factor = (
8.565e-4 * s_distance * np.sqrt(frequency / earth_resistivity)
)
p_factor = (
np.pi / 8
- (3 * np.sqrt(2)) ** -1 * k_factor * np.cos(s_angle)
- k_factor ** 2 / 16 * np.cos(2 * s_angle) * (0.6728 + np.log(2 / k_factor))
)
q_factor = (
-0.0386
+ 0.5 * np.log(2 / k_factor)
+ (3 * np.sqrt(2)) ** -1 * k_factor * np.cos(2 * s_angle)
)
x_factor = (
2 * np.pi * frequency * g_factor
* np.log(
phase_conductor_diameter[phase_row]
/ phase_conductor_geometric_mean_radius.at[phase_row]
)
)
# Calculate admittance according to Kersting / Carson <https://doi.org/10.1201/9781315120782>.
if phase_row == phase_col:
z_matrix.at[phase_row, phase_col] = (
phase_conductor_resistance.at[phase_row]
+ 4 * np.pi * frequency * p_factor * g_factor
+ 1j * (
x_factor
+ 2 * np.pi * frequency * g_factor
* np.log(s_distance / phase_conductor_diameter[phase_row])
+ 4 * np.pi * frequency * q_factor * g_factor
)
)
else:
z_matrix.at[phase_row, phase_col] = (
4 * np.pi * frequency * p_factor * g_factor
+ 1j * (
2 * np.pi * frequency * g_factor
* np.log(s_distance / d_distance)
+ 4 * np.pi * frequency * q_factor * g_factor
)
)
# Apply Kron reduction.
z_matrix = (
pd.DataFrame(
(
z_matrix.loc[phases_non_neutral, phases_non_neutral].values
- z_matrix.loc[phases_non_neutral, phases_neutral].values
@ z_matrix.loc[phases_neutral, phases_neutral].values ** -1 # Inverse of scalar value.
@ z_matrix.loc[phases_neutral, phases_non_neutral].values
),
index=phases_non_neutral,
columns=phases_non_neutral
)
)
# Obtain potentials matrix in km/nF based on Kersting <https://doi.org/10.1201/9781315120782>.
p_matrix = pd.DataFrame(index=phases, columns=phases, dtype=float)
for phase_row, phase_col in itertools.product(phases, phases):
# Calculate geometric parameters.
d_distance = np.linalg.norm(phase_xy.at[phase_row] - phase_xy.at[phase_col])
s_distance = np.linalg.norm(phase_xy.at[phase_row] - np.array([1, -1]) * phase_xy.at[phase_col])
# Calculate potential according to Kersting <https://doi.org/10.1201/9781315120782>.
if phase_row == phase_col:
p_matrix.at[phase_row, phase_col] = (
1 / (2 * np.pi * air_permittivity)
* np.log(s_distance / phase_conductor_diameter.at[phase_row])
)
else:
p_matrix.at[phase_row, phase_col] = (
1 / (2 * np.pi * air_permittivity)
* np.log(s_distance / d_distance)
)
# Apply Kron reduction.
p_matrix = (
pd.DataFrame(
(
p_matrix.loc[phases_non_neutral, phases_non_neutral].values
- p_matrix.loc[phases_non_neutral, phases_neutral].values
@ p_matrix.loc[phases_neutral, phases_neutral].values ** -1 # Inverse of scalar value.
@ p_matrix.loc[phases_neutral, phases_non_neutral].values
),
index=phases_non_neutral,
columns=phases_non_neutral
)
)
# Obtain capacitance matrix in nF/km.
c_matrix = pd.DataFrame(np.linalg.inv(p_matrix), index=phases_non_neutral, columns=phases_non_neutral)
# Obtain final element matrices.
resistance_matrix = z_matrix.apply(np.real) # In Ω/km.
reactance_matrix = z_matrix.apply(np.imag) # In Ω/km.
capacitance_matrix = c_matrix # In nF/km.
# Add to line type matrices definition.
for phase_row in phases_non_neutral:
for phase_col in phases_non_neutral[phases_non_neutral <= phase_row]:
electric_grid_data.electric_grid_line_types_matrices = (
electric_grid_data.electric_grid_line_types_matrices.append(
pd.Series({
'line_type': line_type,
'row': phase_row,
'col': phase_col,
'resistance': resistance_matrix.at[phase_row, phase_col],
'reactance': reactance_matrix.at[phase_row, phase_col],
'capacitance': capacitance_matrix.at[phase_row, phase_col]
}),
ignore_index=True
)
)
# Obtain number of phases.
electric_grid_data.electric_grid_line_types.loc[line_type, 'n_phases'] = len(phases_non_neutral)
# Obtain maximum current.
# TODO: Validate this.
electric_grid_data.electric_grid_line_types.loc[line_type, 'maximum_current'] = (
phase_conductor_maximum_current.loc[phases_non_neutral].mean()
)
return electric_grid_data
class ElectricGridModelDefault(ElectricGridModel):
"""Electric grid model object consisting of the index sets for node names / branch names / der names / phases /
node types / branch types, the nodal admittance / transformation matrices, branch admittance /
incidence matrices and DER incidence matrices.
:syntax:
- ``ElectricGridModelDefault(electric_grid_data)``: Instantiate electric grid model for given
`electric_grid_data`.
- ``ElectricGridModelDefault(scenario_name)``: Instantiate electric grid model for given `scenario_name`.
The required `electric_grid_data` is obtained from the database.
Arguments:
electric_grid_data (mesmo.data_interface.ElectricGridData): Electric grid data object.
scenario_name (str): MESMO scenario name.
Attributes:
phases (pd.Index): Index set of the phases.
node_names (pd.Index): Index set of the node names.
node_types (pd.Index): Index set of the node types.
line_names (pd.Index): Index set of the line names.
transformer_names (pd.Index): Index set of the transformer names.
branch_names (pd.Index): Index set of the branch names, i.e., all line names and transformer names.
branch_types (pd.Index): Index set of the branch types.
der_names (pd.Index): Index set of the DER names.
der_types (pd.Index): Index set of the DER types.
nodes (pd.Index): Multi-level / tuple index set of the node types, node names and phases
corresponding to the dimension of the node admittance matrices.
branches (pd.Index): Multi-level / tuple index set of the branch types, branch names and phases
corresponding to the dimension of the branch admittance matrices.
lines (pd.Index): Multi-level / tuple index set of the branch types, branch names and phases
for the lines only.
transformers (pd.Index): Multi-level / tuple index set of the branch types, branch names and phases
for the transformers only.
ders (pd.Index): Index set of the DER names, corresponding to the dimension of the DER power vector.
node_voltage_vector_reference (np.ndarray): Node voltage reference / no load vector.
branch_power_vector_magnitude_reference (np.ndarray): Branch power reference / rated power vector.
der_power_vector_reference (np.ndarray): DER power reference / nominal power vector.
is_single_phase_equivalent (bool): Singe-phase-equivalent modelling flag. If true, electric grid is modelled
as single-phase-equivalent of three-phase balanced system.
node_admittance_matrix (sp.spmatrix): Nodal admittance matrix.
node_transformation_matrix (sp.spmatrix): Nodal transformation matrix.
branch_admittance_1_matrix (sp.spmatrix): Branch admittance matrix in the 'from' direction.
branch_admittance_2_matrix (sp.spmatrix): Branch admittance matrix in the 'to' direction.
branch_incidence_1_matrix (sp.spmatrix): Branch incidence matrix in the 'from' direction.
branch_incidence_2_matrix (sp.spmatrix): Branch incidence matrix in the 'to' direction.
der_incidence_wye_matrix (sp.spmatrix): Load incidence matrix for 'wye' DERs.
der_incidence_delta_matrix (sp.spmatrix): Load incidence matrix for 'delta' DERs.
node_admittance_matrix_no_source (sp.spmatrix): Nodal admittance matrix from no-source to no-source nodes.
node_transformation_matrix_no_source (sp.spmatrix): Nodal admittance matrix from source to no-source nodes.
der_incidence_wye_matrix_no_source (sp.spmatrix): Incidence matrix from wye-conn. DERs to no-source nodes.
der_incidence_delta_matrix_no_source (sp.spmatrix): Incidence matrix from delta-conn. DERs to no-source nodes.
node_voltage_vector_reference_no_source (sp.spmatrix): Nodal reference voltage vector for no-source nodes.
node_voltage_vector_reference_source (sp.spmatrix): Nodal reference voltage vector for source nodes.
node_admittance_matrix_no_source_inverse (sp.spmatrix): Inverse of no-source nodal admittance matrix.
"""
node_admittance_matrix: sp.spmatrix
node_transformation_matrix: sp.spmatrix
branch_admittance_1_matrix: sp.spmatrix
branch_admittance_2_matrix: sp.spmatrix
branch_incidence_1_matrix: sp.spmatrix
branch_incidence_2_matrix: sp.spmatrix
der_incidence_wye_matrix: sp.spmatrix
der_incidence_delta_matrix: sp.spmatrix
node_admittance_matrix_no_source: sp.spmatrix
node_admittance_matrix_source_to_no_source: sp.spmatrix
node_transformation_matrix_no_source: sp.spmatrix
der_incidence_wye_matrix_no_source: sp.spmatrix
der_incidence_delta_matrix_no_source: sp.spmatrix
node_voltage_vector_reference_no_source: sp.spmatrix
node_voltage_vector_reference_source: sp.spmatrix
node_admittance_matrix_no_source_inverse: sp.spmatrix
@multimethod
def __init__(
self,
scenario_name: str
):
# Obtain electric grid data.
electric_grid_data = mesmo.data_interface.ElectricGridData(scenario_name)
# Instantiate electric grid model object.
self.__init__(
electric_grid_data
)
@multimethod
def __init__(
self,
electric_grid_data: mesmo.data_interface.ElectricGridData,
):
# Obtain electric grid indexes, via `ElectricGridModel.__init__()`.
super().__init__(electric_grid_data)
# Define sparse matrices for nodal admittance, nodal transformation,
# branch admittance, branch incidence and der incidence matrix entries.
self.node_admittance_matrix = (
sp.dok_matrix((len(self.nodes), len(self.nodes)), dtype=complex)
)
self.node_transformation_matrix = (
sp.dok_matrix((len(self.nodes), len(self.nodes)), dtype=int)
)
self.branch_admittance_1_matrix = (
sp.dok_matrix((len(self.branches), len(self.nodes)), dtype=complex)
)
self.branch_admittance_2_matrix = (
sp.dok_matrix((len(self.branches), len(self.nodes)), dtype=complex)
)
self.branch_incidence_1_matrix = (
sp.dok_matrix((len(self.branches), len(self.nodes)), dtype=int)
)
self.branch_incidence_2_matrix = (
sp.dok_matrix((len(self.branches), len(self.nodes)), dtype=int)
)
self.der_incidence_wye_matrix = (
sp.dok_matrix((len(self.nodes), len(self.ders)), dtype=float)
)
self.der_incidence_delta_matrix = (
sp.dok_matrix((len(self.nodes), len(self.ders)), dtype=float)
)
# Add lines to admittance, transformation and incidence matrices.
for line_index, line in electric_grid_data.electric_grid_lines.iterrows():
# Obtain phases vector.
phases_vector = mesmo.utils.get_element_phases_array(line)
# Obtain line resistance / reactance / capacitance matrix entries for the line.
matrices_index = (
electric_grid_data.electric_grid_line_types_matrices.loc[:, 'line_type'] == line['line_type']
)
resistance_matrix = (
electric_grid_data.electric_grid_line_types_matrices.loc[matrices_index, 'resistance'].values
)
reactance_matrix = (
electric_grid_data.electric_grid_line_types_matrices.loc[matrices_index, 'reactance'].values
)
capacitance_matrix = (
electric_grid_data.electric_grid_line_types_matrices.loc[matrices_index, 'capacitance'].values
)
# Obtain the full line resistance and reactance matrices.
# Data only contains upper half entries.
matrices_full_index = (
np.array([
[1, 2, 4],
[2, 3, 5],
[4, 5, 6]
]) - 1
)
matrices_full_index = (
matrices_full_index[:len(phases_vector), :len(phases_vector)]
)
resistance_matrix = resistance_matrix[matrices_full_index]
reactance_matrix = reactance_matrix[matrices_full_index]
capacitance_matrix = capacitance_matrix[matrices_full_index]
# Construct line series admittance matrix.
series_admittance_matrix = (
np.linalg.inv(
(resistance_matrix + 1j * reactance_matrix)
* line['length']
)
)
# Construct line shunt admittance.
# Note: nF to Ω with X = 1 / (2π * f * C)
# TODO: Check line shunt admittance.
shunt_admittance_matrix = (
capacitance_matrix
* 2 * np.pi * electric_grid_data.electric_grid.at['base_frequency'] * 1e-9
* 0.5j
* line['length']
)
# Construct line element admittance matrices according to:
# https://doi.org/10.1109/TPWRS.2017.2728618
admittance_matrix_11 = (
series_admittance_matrix
+ shunt_admittance_matrix
)
admittance_matrix_12 = (
- series_admittance_matrix
)
admittance_matrix_21 = (
- series_admittance_matrix
)
admittance_matrix_22 = (
series_admittance_matrix
+ shunt_admittance_matrix
)
# Obtain indexes for positioning the line element matrices
# in the full admittance matrices.
node_index_1 = (
mesmo.utils.get_index(
self.nodes,
node_name=line['node_1_name'],
phase=phases_vector
)
)
node_index_2 = (
mesmo.utils.get_index(
self.nodes,
node_name=line['node_2_name'],
phase=phases_vector
)
)
branch_index = (
mesmo.utils.get_index(
self.branches,
branch_type='line',
branch_name=line['line_name']
)
)
# Add line element matrices to the nodal admittance matrix.
self.node_admittance_matrix[np.ix_(node_index_1, node_index_1)] += admittance_matrix_11
self.node_admittance_matrix[np.ix_(node_index_1, node_index_2)] += admittance_matrix_12
self.node_admittance_matrix[np.ix_(node_index_2, node_index_1)] += admittance_matrix_21
self.node_admittance_matrix[np.ix_(node_index_2, node_index_2)] += admittance_matrix_22
# Add line element matrices to the branch admittance matrices.
self.branch_admittance_1_matrix[np.ix_(branch_index, node_index_1)] += admittance_matrix_11
self.branch_admittance_1_matrix[np.ix_(branch_index, node_index_2)] += admittance_matrix_12
self.branch_admittance_2_matrix[np.ix_(branch_index, node_index_1)] += admittance_matrix_21
self.branch_admittance_2_matrix[np.ix_(branch_index, node_index_2)] += admittance_matrix_22
# Add line element matrices to the branch incidence matrices.
self.branch_incidence_1_matrix[np.ix_(branch_index, node_index_1)] += (
np.identity(len(branch_index), dtype=int)
)
self.branch_incidence_2_matrix[np.ix_(branch_index, node_index_2)] += (
np.identity(len(branch_index), dtype=int)
)
# Add transformers to admittance, transformation and incidence matrices.
# - Note: This setup only works for transformers with exactly two windings
# and identical number of phases at each winding / side.
# Define transformer factor matrices according to:
# https://doi.org/10.1109/TPWRS.2017.2728618
transformer_factors_1 = (
np.array([
[1, 0, 0],
[0, 1, 0],
[0, 0, 1]
])
)
transformer_factors_2 = (
1 / 3
* np.array([
[2, -1, -1],
[-1, 2, -1],
[-1, -1, 2]
])
)
transformer_factors_3 = (
1 / np.sqrt(3)
* np.array([
[-1, 1, 0],
[0, -1, 1],
[1, 0, -1]
])
)
# Add transformers to admittance matrix.
for transformer_index, transformer in electric_grid_data.electric_grid_transformers.iterrows():
# Raise error if transformer nominal power is not valid.
if not (transformer.at['apparent_power'] > 0):
raise ValueError(
f"At transformer '{transformer.at['transformer_name']}', "
f"found invalid value for `apparent_power`: {transformer.at['apparent_power']}`"
)
# Calculate transformer admittance.
admittance = (
(
(
2 * transformer.at['resistance_percentage'] / 100
+ 1j * transformer.at['reactance_percentage'] / 100
)
* (
electric_grid_data.electric_grid_nodes.at[transformer.at['node_2_name'], 'voltage'] ** 2
/ transformer.at['apparent_power']
)
) ** -1
)
# Calculate turn ratio.
turn_ratio = (
(
1.0 # TODO: Replace `1.0` with actual tap position.
* electric_grid_data.electric_grid_nodes.at[transformer.at['node_1_name'], 'voltage']
)
/ (
1.0 # TODO: Replace `1.0` with actual tap position.
* electric_grid_data.electric_grid_nodes.at[transformer.at['node_2_name'], 'voltage']
)
)
# Construct transformer element admittance matrices according to:
# https://doi.org/10.1109/TPWRS.2017.2728618
if transformer.at['connection'] == "wye-wye":
admittance_matrix_11 = (
admittance
* transformer_factors_1
/ turn_ratio ** 2
)
admittance_matrix_12 = (
- 1 * admittance
* transformer_factors_1
/ turn_ratio
)
admittance_matrix_21 = (
- 1 * admittance
* transformer_factors_1
/ turn_ratio
)
admittance_matrix_22 = (
admittance
* transformer_factors_1
)
elif transformer.at['connection'] == "delta-wye":
admittance_matrix_11 = (
admittance
* transformer_factors_2
/ turn_ratio ** 2
)
admittance_matrix_12 = (
- 1 * admittance
* - 1 * np.transpose(transformer_factors_3)
/ turn_ratio
)
admittance_matrix_21 = (
- 1 * admittance
* - 1 * transformer_factors_3
/ turn_ratio
)
admittance_matrix_22 = (
admittance
* transformer_factors_1
)
elif transformer.at['connection'] == "wye-delta":
admittance_matrix_11 = (
admittance
* transformer_factors_1
/ turn_ratio ** 2
)
admittance_matrix_12 = (
- 1 * admittance
* - 1 * transformer_factors_3
/ turn_ratio
)
admittance_matrix_21 = (
- 1 * admittance
* - 1 * np.transpose(transformer_factors_3)
/ turn_ratio
)
admittance_matrix_22 = (
admittance
* transformer_factors_2
)
elif transformer.at['connection'] == "delta-delta":
admittance_matrix_11 = (
admittance
* transformer_factors_2
/ turn_ratio ** 2
)
admittance_matrix_12 = (
- 1 * admittance
* transformer_factors_2
/ turn_ratio
)
admittance_matrix_21 = (
- 1 * admittance
* transformer_factors_2
/ turn_ratio
)
admittance_matrix_22 = (
admittance
* transformer_factors_2
)
else:
raise ValueError(f"Unknown transformer type: {transformer.at['connection']}")
# Obtain phases vector.
phases_vector = mesmo.utils.get_element_phases_array(transformer)
# Obtain element admittance matrices for correct phases.
admittance_matrix_11 = (
admittance_matrix_11[np.ix_(phases_vector - 1, phases_vector - 1)]
)
admittance_matrix_12 = (
admittance_matrix_12[np.ix_(phases_vector - 1, phases_vector - 1)]
)
admittance_matrix_21 = (
admittance_matrix_21[np.ix_(phases_vector - 1, phases_vector - 1)]
)
admittance_matrix_22 = (
admittance_matrix_22[np.ix_(phases_vector - 1, phases_vector - 1)]
)
# Obtain indexes for positioning the transformer element
# matrices in the full matrices.
node_index_1 = (
mesmo.utils.get_index(
self.nodes,
node_name=transformer.at['node_1_name'],
phase=phases_vector
)
)
node_index_2 = (
mesmo.utils.get_index(
self.nodes,
node_name=transformer.at['node_2_name'],
phase=phases_vector
)
)
branch_index = (
mesmo.utils.get_index(
self.branches,
branch_type='transformer',
branch_name=transformer['transformer_name']
)
)
# Add transformer element matrices to the nodal admittance matrix.
self.node_admittance_matrix[np.ix_(node_index_1, node_index_1)] += admittance_matrix_11
self.node_admittance_matrix[np.ix_(node_index_1, node_index_2)] += admittance_matrix_12
self.node_admittance_matrix[np.ix_(node_index_2, node_index_1)] += admittance_matrix_21
self.node_admittance_matrix[np.ix_(node_index_2, node_index_2)] += admittance_matrix_22
# Add transformer element matrices to the branch admittance matrices.
self.branch_admittance_1_matrix[np.ix_(branch_index, node_index_1)] += admittance_matrix_11
self.branch_admittance_1_matrix[np.ix_(branch_index, node_index_2)] += admittance_matrix_12
self.branch_admittance_2_matrix[np.ix_(branch_index, node_index_1)] += admittance_matrix_21
self.branch_admittance_2_matrix[np.ix_(branch_index, node_index_2)] += admittance_matrix_22
# Add transformer element matrices to the branch incidence matrices.
self.branch_incidence_1_matrix[np.ix_(branch_index, node_index_1)] += (
np.identity(len(branch_index), dtype=int)
)
self.branch_incidence_2_matrix[np.ix_(branch_index, node_index_2)] += (
np.identity(len(branch_index), dtype=int)
)
# Define transformation matrix according to:
# https://doi.org/10.1109/TPWRS.2018.2823277
transformation_entries = (
np.array([
[1, -1, 0],
[0, 1, -1],
[-1, 0, 1]
])
)
for node_name, node in electric_grid_data.electric_grid_nodes.iterrows():
# Obtain node phases index.
phases_index = mesmo.utils.get_element_phases_array(node) - 1
# Construct node transformation matrix.
transformation_matrix = transformation_entries[np.ix_(phases_index, phases_index)]
# Obtain index for positioning node transformation matrix in full transformation matrix.
node_index = (
mesmo.utils.get_index(
self.nodes,
node_name=node['node_name']
)
)
# Add node transformation matrix to full transformation matrix.
self.node_transformation_matrix[np.ix_(node_index, node_index)] = transformation_matrix
# Add DERs to der incidence matrix.
for der_name, der in electric_grid_data.electric_grid_ders.iterrows():
# Obtain der connection type.
connection = der['connection']
# Obtain indexes for positioning the DER in the incidence matrix.
node_index = (
mesmo.utils.get_index(
self.nodes,
node_name=der['node_name'],
phase=mesmo.utils.get_element_phases_array(der)
)
)
der_index = (
mesmo.utils.get_index(
self.ders,
der_name=der['der_name']
)
)
if connection == "wye":
# Define incidence matrix entries.
# - Wye ders are represented as balanced ders across all
# their connected phases.
incidence_matrix = (
np.ones((len(node_index), 1), dtype=float)
/ len(node_index)
)
self.der_incidence_wye_matrix[np.ix_(node_index, der_index)] = incidence_matrix
elif connection == "delta":
# Obtain phases of the delta der.
phases_list = mesmo.utils.get_element_phases_array(der).tolist()
# Select connection node based on phase arrangement of delta der.
# TODO: Why no multi-phase delta DERs?
# - Delta DERs must be single-phase.
if phases_list in ([1, 2], [2, 3]):
node_index = [node_index[0]]
elif phases_list == [1, 3]:
node_index = [node_index[1]]
else:
raise ValueError(f"Unknown delta phase arrangement: {phases_list}")
# Define incidence matrix entry.
# - Delta ders are assumed to be single-phase.
incidence_matrix = np.array([1])
self.der_incidence_delta_matrix[np.ix_(node_index, der_index)] = incidence_matrix
else:
raise ValueError(f"Unknown der connection type: {connection}")
# Make modifications for single-phase-equivalent modelling.
if self.is_single_phase_equivalent:
self.der_incidence_wye_matrix /= 3
# Note that there won't be any delta loads in the single-phase-equivalent grid.
# Convert sparse matrices for nodal admittance, nodal transformation,
# branch admittance, branch incidence and der incidence matrices.
# - Converting from DOK to CSR format for more efficient calculations
# according to <https://docs.scipy.org/doc/scipy/reference/sparse.html>.
self.node_admittance_matrix = self.node_admittance_matrix.tocsr()
self.node_transformation_matrix = self.node_transformation_matrix.tocsr()
self.branch_admittance_1_matrix = self.branch_admittance_1_matrix.tocsr()
self.branch_admittance_2_matrix = self.branch_admittance_2_matrix.tocsr()
self.branch_incidence_1_matrix = self.branch_incidence_1_matrix.tocsr()
self.branch_incidence_2_matrix = self.branch_incidence_2_matrix.tocsr()
self.der_incidence_wye_matrix = self.der_incidence_wye_matrix.tocsr()
self.der_incidence_delta_matrix = self.der_incidence_delta_matrix.tocsr()
# Define shorthands for no-source variables.
# TODO: Add in class documentation.
# TODO: Replace local variables in power flow / linear models.
self.node_admittance_matrix_no_source = (
self.node_admittance_matrix[np.ix_(
mesmo.utils.get_index(self.nodes, node_type='no_source'),
mesmo.utils.get_index(self.nodes, node_type='no_source')
)]
)
self.node_admittance_matrix_source_to_no_source = (
self.node_admittance_matrix[np.ix_(
mesmo.utils.get_index(self.nodes, node_type='no_source'),
mesmo.utils.get_index(self.nodes, node_type='source')
)]
)
self.node_transformation_matrix_no_source = (
self.node_transformation_matrix[np.ix_(
mesmo.utils.get_index(self.nodes, node_type='no_source'),
mesmo.utils.get_index(self.nodes, node_type='no_source')
)]
)
self.der_incidence_wye_matrix_no_source = (
self.der_incidence_wye_matrix[
np.ix_(
mesmo.utils.get_index(self.nodes, node_type='no_source'),
range(len(self.ders))
)
]
)
self.der_incidence_delta_matrix_no_source = (
self.der_incidence_delta_matrix[
np.ix_(
mesmo.utils.get_index(self.nodes, node_type='no_source'),
range(len(self.ders))
)
]
)
self.node_voltage_vector_reference_no_source = (
self.node_voltage_vector_reference[
mesmo.utils.get_index(self.nodes, node_type='no_source')
]
)
self.node_voltage_vector_reference_source = (
self.node_voltage_vector_reference[
mesmo.utils.get_index(self.nodes, node_type='source')
]
)
# Calculate inverse of no-source node admittance matrix.
# - Raise error if not invertible.
# - Only checking invertibility of no-source node admittance matrix, because full node admittance matrix may
# be non-invertible, e.g. zero entries when connecting a multi-phase line at three-phase source node.
try:
self.node_admittance_matrix_no_source_inverse = (
scipy.sparse.linalg.inv(self.node_admittance_matrix_no_source.tocsc())
)
assert not np.isnan(self.node_admittance_matrix_no_source_inverse.data).any()
except (RuntimeError, AssertionError) as exception:
raise (
ValueError(f"Node admittance matrix could not be inverted. Please check electric grid definition.")
) from exception
class ElectricGridModelOpenDSS(ElectricGridModel):
"""OpenDSS electric grid model object.
- Instantiate OpenDSS circuit by running generating OpenDSS commands corresponding to given `electric_grid_data`,
utilizing the `OpenDSSDirect.py` package.
- The OpenDSS circuit can be accessed with the API of
`OpenDSSDirect.py`: http://dss-extensions.org/OpenDSSDirect.py/opendssdirect.html
- Due to dependency on `OpenDSSDirect.py`, creating multiple objects of this type may result in erroneous behavior.
:syntax:
- ``ElectricGridModelOpenDSS(electric_grid_data)``: Initialize OpenDSS circuit model for given
`electric_grid_data`.
- ``ElectricGridModelOpenDSS(scenario_name)`` Initialize OpenDSS circuit model for given `scenario_name`.
The required `electric_grid_data` is obtained from the database.
Parameters:
scenario_name (str): MESMO scenario name.
electric_grid_data (mesmo.data_interface.ElectricGridData): Electric grid data object.
Attributes:
phases (pd.Index): Index set of the phases.
node_names (pd.Index): Index set of the node names.
node_types (pd.Index): Index set of the node types.
line_names (pd.Index): Index set of the line names.
transformer_names (pd.Index): Index set of the transformer names.
branch_names (pd.Index): Index set of the branch names, i.e., all line names and transformer names.
branch_types (pd.Index): Index set of the branch types.
der_names (pd.Index): Index set of the DER names.
der_types (pd.Index): Index set of the DER types.
nodes (pd.Index): Multi-level / tuple index set of the node types, node names and phases
corresponding to the dimension of the node admittance matrices.
branches (pd.Index): Multi-level / tuple index set of the branch types, branch names and phases
corresponding to the dimension of the branch admittance matrices.
lines (pd.Index): Multi-level / tuple index set of the branch types, branch names and phases
for the lines only.
transformers (pd.Index): Multi-level / tuple index set of the branch types, branch names and phases
for the transformers only.
ders (pd.Index): Index set of the DER names, corresponding to the dimension of the DER power vector.
node_voltage_vector_reference (np.ndarray): Node voltage reference / no load vector.
branch_power_vector_magnitude_reference (np.ndarray): Branch power reference / rated power vector.
der_power_vector_reference (np.ndarray): DER power reference / nominal power vector.
is_single_phase_equivalent (bool): Singe-phase-equivalent modelling flag. If true, electric grid is modelled
as single-phase-equivalent of three-phase balanced system.
circuit_name (str): Circuit name, stored for validation that the correct OpenDSS model is being accessed.
electric_grid_data: (mesmo.data_interface.ElectricGridData): Electric grid data object, stored for
possible reinitialization of the OpenDSS model.
"""
circuit_name: str
electric_grid_data: mesmo.data_interface.ElectricGridData
@multimethod
def __init__(
self,
scenario_name: str
):
# Obtain electric grid data.
electric_grid_data = (
mesmo.data_interface.ElectricGridData(scenario_name)
)
self.__init__(
electric_grid_data
)
@multimethod
def __init__(
self,
electric_grid_data: mesmo.data_interface.ElectricGridData
):
# TODO: Add reset method to ensure correct circuit model is set in OpenDSS when handling multiple models.
# Obtain electric grid indexes, via `ElectricGridModel.__init__()`.
super().__init__(electric_grid_data)
# Obtain circuit name.
self.circuit_name = electric_grid_data.electric_grid.at['electric_grid_name']
# Store electric grid data.
self.electric_grid_data = electric_grid_data
# Clear OpenDSS.
opendss_command_string = "clear"
logger.debug(f"opendss_command_string = \n{opendss_command_string}")
opendssdirect.run_command(opendss_command_string)
# Obtain source voltage.
source_voltage = (
electric_grid_data.electric_grid_nodes.at[
electric_grid_data.electric_grid.at['source_node_name'],
'voltage'
]
)
# Adjust source voltage for single-phase, non-single-phase-equivalent modelling.
if (len(self.phases) == 1) and not self.is_single_phase_equivalent:
source_voltage /= np.sqrt(3)
# Add circuit info to OpenDSS command string.
opendss_command_string = (
f"set defaultbasefrequency={electric_grid_data.electric_grid.at['base_frequency']}"
+ f"\nnew circuit.{self.circuit_name}"
+ f" phases={len(self.phases)}"
+ f" bus1={electric_grid_data.electric_grid.at['source_node_name']}"
+ f" basekv={source_voltage / 1000}"
+ f" mvasc3=9999999999 9999999999" # Set near-infinite power limit for source node.
)
# Create circuit in OpenDSS.
logger.debug(f"opendss_command_string = \n{opendss_command_string}")
opendssdirect.run_command(opendss_command_string)
# Define line codes.
for line_type_index, line_type in electric_grid_data.electric_grid_line_types.iterrows():
# Obtain line resistance and reactance matrix entries for the line.
matrices = (
electric_grid_data.electric_grid_line_types_matrices.loc[
(
electric_grid_data.electric_grid_line_types_matrices.loc[:, 'line_type']
== line_type.at['line_type']
),
['resistance', 'reactance', 'capacitance']
]
)
# Obtain number of phases.
# - Only define as line types for as many phases as needed for current grid.
n_phases = min(line_type.at['n_phases'], len(self.phases))
# Add line type name and number of phases to OpenDSS command string.
opendss_command_string = (
f"new linecode.{line_type.at['line_type']}"
+ f" nphases={n_phases}"
)
# Add resistance and reactance matrix entries to OpenDSS command string,
# with formatting depending on number of phases.
if n_phases == 1:
opendss_command_string += (
" rmatrix = "
+ "[{:.8f}]".format(*matrices.loc[:, 'resistance'])
+ " xmatrix = "
+ "[{:.8f}]".format(*matrices.loc[:, 'reactance'])
+ " cmatrix = "
+ "[{:.8f}]".format(*matrices.loc[:, 'capacitance'])
)
elif n_phases == 2:
opendss_command_string += (
" rmatrix = "
+ "[{:.8f} | {:.8f} {:.8f}]".format(*matrices.loc[:, 'resistance'])
+ " xmatrix = "
+ "[{:.8f} | {:.8f} {:.8f}]".format(*matrices.loc[:, 'reactance'])
+ " cmatrix = "
+ "[{:.8f} | {:.8f} {:.8f}]".format(*matrices.loc[:, 'capacitance'])
)
elif n_phases == 3:
opendss_command_string += (
" rmatrix = "
+ "[{:.8f} | {:.8f} {:.8f} | {:.8f} {:.8f} {:.8f}]".format(*matrices.loc[:, 'resistance'])
+ f" xmatrix = "
+ "[{:.8f} | {:.8f} {:.8f} | {:.8f} {:.8f} {:.8f}]".format(*matrices.loc[:, 'reactance'])
+ f" cmatrix = "
+ "[{:.8f} | {:.8f} {:.8f} | {:.8f} {:.8f} {:.8f}]".format(*matrices.loc[:, 'capacitance'])
)
# Create line code in OpenDSS.
logger.debug(f"opendss_command_string = \n{opendss_command_string}")
opendssdirect.run_command(opendss_command_string)
# Define lines.
for line_index, line in electric_grid_data.electric_grid_lines.iterrows():
# Obtain number of phases for the line.
n_phases = len(mesmo.utils.get_element_phases_array(line))
# Add line name, phases, node connections, line type and length
# to OpenDSS command string.
opendss_command_string = (
f"new line.{line['line_name']}"
+ f" phases={n_phases}"
+ f" bus1={line['node_1_name']}{mesmo.utils.get_element_phases_string(line)}"
+ f" bus2={line['node_2_name']}{mesmo.utils.get_element_phases_string(line)}"
+ f" linecode={line['line_type']}"
+ f" length={line['length']}"
)
# Create line in OpenDSS.
logger.debug(f"opendss_command_string = \n{opendss_command_string}")
opendssdirect.run_command(opendss_command_string)
# Define transformers.
for transformer_index, transformer in electric_grid_data.electric_grid_transformers.iterrows():
# Obtain number of phases.
n_phases = len(mesmo.utils.get_element_phases_array(transformer))
# Add transformer name, number of phases / windings and reactances to OpenDSS command string.
opendss_command_string = (
f"new transformer.{transformer.at['transformer_name']}"
+ f" phases={n_phases}"
+ f" windings=2"
+ f" xscarray=[{transformer.at['reactance_percentage']}]"
)
# Add windings to OpenDSS command string.
windings = [1, 2]
for winding in windings:
# Obtain nominal voltage level for each winding.
voltage = electric_grid_data.electric_grid_nodes.at[transformer.at[f'node_{winding}_name'], 'voltage']
# Obtain node phases connection string for each winding.
connection = transformer.at['connection'].split('-')[winding - 1]
if connection == "wye":
node_phases_string = (
mesmo.utils.get_element_phases_string(transformer)
+ ".0" # Enforce wye-grounded connection.
)
elif connection == "delta":
node_phases_string = (
mesmo.utils.get_element_phases_string(transformer)
)
else:
raise ValueError(f"Unknown transformer connection type: {connection}")
# Add node connection, nominal voltage / power, resistance and maximum / minimum tap level
# to OpenDSS command string for each winding.
opendss_command_string += (
f" wdg={winding}"
+ f" bus={transformer.at[f'node_{winding}_name']}" + node_phases_string
+ f" conn={connection}"
+ f" kv={voltage / 1000}"
+ f" kva={transformer.at['apparent_power'] / 1000}"
+ f" %r={transformer.at['resistance_percentage']}"
+ f" maxtap="
+ f"{transformer.at['tap_maximum_voltage_per_unit']}"
+ f" mintap="
+ f"{transformer.at['tap_minimum_voltage_per_unit']}"
)
# Create transformer in OpenDSS.
logger.debug(f"opendss_command_string = \n{opendss_command_string}")
opendssdirect.run_command(opendss_command_string)
# Define DERs.
# TODO: At the moment, all DERs are modelled as loads in OpenDSS.
for der_index, der in electric_grid_data.electric_grid_ders.iterrows():
# Obtain number of phases for the DER.
n_phases = len(mesmo.utils.get_element_phases_array(der))
# Obtain nominal voltage level for the DER.
voltage = electric_grid_data.electric_grid_nodes.at[der['node_name'], 'voltage']
# Convert to line-to-neutral voltage for single-phase DERs, according to:
# https://sourceforge.net/p/electricdss/discussion/861976/thread/9c9e0efb/
# - Not needed for single-phase-equivalent modelling.
if (n_phases == 1) and not self.is_single_phase_equivalent:
voltage /= np.sqrt(3)
# Add explicit ground-phase connection for single-phase, wye DERs, according to:
# https://sourceforge.net/p/electricdss/discussion/861976/thread/d420e8fb/
# - This does not seem to make a difference if omitted, but is kept here to follow the recommendation.
# - Not needed for single-phase-equivalent modelling.
if (n_phases == 1) and (der['connection'] == 'wye') and not self.is_single_phase_equivalent:
ground_phase_string = ".0"
else:
ground_phase_string = ""
# Add node connection, model type, voltage, nominal power to OpenDSS command string.
opendss_command_string = (
f"new load.{der['der_name']}"
+ f" bus1={der['node_name']}{ground_phase_string}{mesmo.utils.get_element_phases_string(der)}"
+ f" phases={n_phases}"
+ f" conn={der['connection']}"
# All loads are modelled as constant P/Q according to:
# OpenDSS Manual April 2018, page 150, "Model"
+ f" model=1"
+ f" kv={voltage / 1000}"
+ f" kw={- der['active_power_nominal'] / 1000}"
+ f" kvar={- der['reactive_power_nominal'] / 1000}"
# Set low V_min to avoid switching to impedance model according to:
# OpenDSS Manual April 2018, page 150, "Vminpu"
+ f" vminpu=0.6"
# Set high V_max to avoid switching to impedance model according to:
# OpenDSS Manual April 2018, page 150, "Vmaxpu"
+ f" vmaxpu=1.4"
)
# Create DER in OpenDSS.
logger.debug(f"opendss_command_string = \n{opendss_command_string}")
opendssdirect.run_command(opendss_command_string)
# Obtain voltage bases.
voltage_bases = (
np.unique(
electric_grid_data.electric_grid_nodes.loc[:, 'voltage'].values / 1000
).tolist()
)
# Set control mode and voltage bases.
opendss_command_string = (
f"set voltagebases={voltage_bases}"
+ f"\nset controlmode=off"
+ f"\ncalcvoltagebases"
)
logger.debug(f"opendss_command_string = \n{opendss_command_string}")
opendssdirect.run_command(opendss_command_string)
# Set solution mode to "single snapshot power flow" according to:
# OpenDSSComDoc, November 2016, page 1
opendss_command_string = "set mode=0"
logger.debug(f"opendss_command_string = \n{opendss_command_string}")
opendssdirect.run_command(opendss_command_string)
class ElectricGridDEROperationResults(mesmo.utils.ResultsBase):
der_active_power_vector: pd.DataFrame
der_active_power_vector_per_unit: pd.DataFrame
der_reactive_power_vector: pd.DataFrame
der_reactive_power_vector_per_unit: pd.DataFrame
class ElectricGridOperationResults(ElectricGridDEROperationResults):
electric_grid_model: ElectricGridModel
node_voltage_magnitude_vector: pd.DataFrame
node_voltage_magnitude_vector_per_unit: pd.DataFrame
node_voltage_angle_vector: pd.DataFrame
branch_power_magnitude_vector_1: pd.DataFrame
branch_power_magnitude_vector_1_per_unit: pd.DataFrame
branch_active_power_vector_1: pd.DataFrame
branch_active_power_vector_1_per_unit: pd.DataFrame
branch_reactive_power_vector_1: pd.DataFrame
branch_reactive_power_vector_1_per_unit: pd.DataFrame
branch_power_magnitude_vector_2: pd.DataFrame
branch_power_magnitude_vector_2_per_unit: pd.DataFrame
branch_active_power_vector_2: pd.DataFrame
branch_active_power_vector_2_per_unit: pd.DataFrame
branch_reactive_power_vector_2: pd.DataFrame
branch_reactive_power_vector_2_per_unit: pd.DataFrame
loss_active: pd.DataFrame
loss_reactive: pd.DataFrame
class ElectricGridDLMPResults(mesmo.utils.ResultsBase):
electric_grid_energy_dlmp_node_active_power: pd.DataFrame
electric_grid_voltage_dlmp_node_active_power: pd.DataFrame
electric_grid_congestion_dlmp_node_active_power: pd.DataFrame
electric_grid_loss_dlmp_node_active_power: pd.DataFrame
electric_grid_total_dlmp_node_active_power: pd.DataFrame
electric_grid_voltage_dlmp_node_reactive_power: pd.DataFrame
electric_grid_congestion_dlmp_node_reactive_power: pd.DataFrame
electric_grid_loss_dlmp_node_reactive_power: pd.DataFrame
electric_grid_energy_dlmp_node_reactive_power: pd.DataFrame
electric_grid_total_dlmp_node_reactive_power: pd.DataFrame
electric_grid_energy_dlmp_der_active_power: pd.DataFrame
electric_grid_voltage_dlmp_der_active_power: pd.DataFrame
electric_grid_congestion_dlmp_der_active_power: pd.DataFrame
electric_grid_loss_dlmp_der_active_power: pd.DataFrame
electric_grid_total_dlmp_der_active_power: pd.DataFrame
electric_grid_voltage_dlmp_der_reactive_power: pd.DataFrame
electric_grid_congestion_dlmp_der_reactive_power: pd.DataFrame
electric_grid_loss_dlmp_der_reactive_power: pd.DataFrame
electric_grid_energy_dlmp_der_reactive_power: pd.DataFrame
electric_grid_total_dlmp_der_reactive_power: pd.DataFrame
electric_grid_total_dlmp_price_timeseries: pd.DataFrame
class PowerFlowSolution(mesmo.utils.ObjectBase):
"""Power flow solution object consisting of DER power vector and the corresponding solution for
nodal voltage vector / branch power vector and total loss (all complex valued).
"""
der_power_vector: np.ndarray
node_voltage_vector: np.ndarray
branch_power_vector_1: np.ndarray
branch_power_vector_2: np.ndarray
loss: complex
class PowerFlowSolutionFixedPoint(PowerFlowSolution):
"""Fixed point power flow solution object."""
@multimethod
def __init__(
self,
scenario_name: str,
**kwargs
):
# Obtain `electric_grid_model`.
electric_grid_model = ElectricGridModelDefault(scenario_name)
self.__init__(
electric_grid_model,
**kwargs
)
@multimethod
def __init__(
self,
electric_grid_model: ElectricGridModelDefault,
**kwargs
):
# Obtain `der_power_vector`, assuming nominal power conditions.
der_power_vector = electric_grid_model.der_power_vector_reference
self.__init__(
electric_grid_model,
der_power_vector,
**kwargs
)
@multimethod
def __init__(
self,
electric_grid_model: ElectricGridModelDefault,
der_power_vector: np.ndarray,
**kwargs
):
# Store DER power vector.
self.der_power_vector = der_power_vector.ravel()
# Obtain voltage solution.
self.node_voltage_vector = (
self.get_voltage(
electric_grid_model,
self.der_power_vector,
**kwargs
)
)
# Obtain branch flow solution.
(
self.branch_power_vector_1,
self.branch_power_vector_2
) = (
self.get_branch_power(
electric_grid_model,
self.node_voltage_vector
)
)
# Obtain loss solution.
self.loss = (
self.get_loss(
electric_grid_model,
self.node_voltage_vector
)
)
@staticmethod
def check_solution_conditions(
electric_grid_model: ElectricGridModelDefault,
node_power_vector_wye_initial_no_source: np.ndarray,
node_power_vector_delta_initial_no_source: np.ndarray,
node_power_vector_wye_candidate_no_source: np.ndarray,
node_power_vector_delta_candidate_no_source: np.ndarray,
node_voltage_vector_initial_no_source: np.ndarray
) -> bool:
"""Check conditions for fixed-point solution existence, uniqueness and non-singularity for
given power vector candidate and initial point.
- Conditions are formulated according to: <https://arxiv.org/pdf/1702.03310.pdf>
- Note the performance issues of this condition check algorithm due to the
requirement for matrix inversions / solving of linear equations.
"""
# Calculate norm of the initial nodal power vector.
xi_initial = (
np.max(np.sum(
np.abs(
(electric_grid_model.node_voltage_vector_reference_no_source ** -1)
* scipy.sparse.linalg.spsolve(
electric_grid_model.node_admittance_matrix_no_source,
(
(electric_grid_model.node_voltage_vector_reference_no_source ** -1)
* node_power_vector_wye_initial_no_source
)
)
),
axis=1
))
+ np.max(np.sum(
np.abs(
(electric_grid_model.node_voltage_vector_reference_no_source ** -1)
* scipy.sparse.linalg.spsolve(
electric_grid_model.node_admittance_matrix_no_source,
(
(
electric_grid_model.node_transformation_matrix_no_source
* (
np.abs(electric_grid_model.node_transformation_matrix_no_source)
@ np.abs(electric_grid_model.node_voltage_vector_reference_no_source)
) ** -1
)
* node_power_vector_delta_initial_no_source
)
)
),
axis=1
))
)
# Calculate norm of the candidate nodal power vector.
xi_candidate = (
np.max(np.sum(
np.abs(
(electric_grid_model.node_voltage_vector_reference_no_source ** -1)
* scipy.sparse.linalg.spsolve(
electric_grid_model.node_admittance_matrix_no_source,
(
(electric_grid_model.node_voltage_vector_reference_no_source ** -1)
* (
node_power_vector_wye_candidate_no_source
- node_power_vector_wye_initial_no_source
)
)
)
),
axis=1
))
+ np.max(np.sum(
np.abs(
(electric_grid_model.node_voltage_vector_reference_no_source ** -1)
* scipy.sparse.linalg.spsolve(
electric_grid_model.node_admittance_matrix_no_source,
(
(
electric_grid_model.node_transformation_matrix_no_source
* (
np.abs(electric_grid_model.node_transformation_matrix_no_source)
@ np.abs(electric_grid_model.node_voltage_vector_reference_no_source)
) ** -1
) * (
node_power_vector_delta_candidate_no_source
- node_power_vector_delta_initial_no_source
)
)
)
),
axis=1
))
)
# Calculate norm of the initial nodal voltage vector.
gamma = (
np.min([
np.min(
np.abs(node_voltage_vector_initial_no_source)
/ np.abs(electric_grid_model.node_voltage_vector_reference_no_source)
),
np.min(
np.abs(
electric_grid_model.node_transformation_matrix_no_source
* node_voltage_vector_initial_no_source
)
/ (
np.abs(electric_grid_model.node_transformation_matrix_no_source)
* np.abs(electric_grid_model.node_voltage_vector_reference_no_source)
)
)
])
)
# Obtain conditions for solution existence, uniqueness and non-singularity.
condition_initial = (
xi_initial
<
(gamma ** 2)
)
condition_candidate = (
xi_candidate
<
(0.25 * (((gamma ** 2) - xi_initial) / gamma) ** 2)
)
is_valid = (
condition_initial
& condition_candidate
)
# If `condition_initial` is violated, the given initial nodal voltage vector and power vectors are not valid.
# This suggests an error in the problem setup and hence triggers a warning.
if ~condition_initial:
logger.warning("Fixed point solution condition is not satisfied for the provided initial point.")
return is_valid
@staticmethod
def get_voltage(
electric_grid_model: ElectricGridModelDefault,
der_power_vector: np.ndarray,
outer_iteration_limit=100,
outer_solution_algorithm='check_solution', # Choices: `check_conditions`, `check_solution`.
power_candidate_iteration_limit=100,
power_candidate_reduction_factor=0.5,
voltage_iteration_limit=100,
voltage_tolerance=1e-2
) -> np.ndarray:
"""Get nodal voltage vector by solving with the fixed point algorithm.
- Initial DER power vector / node voltage vector must be a valid
solution to te fixed-point equation, e.g., a previous solution from a past
operation point.
- Fixed point equation according to: <https://arxiv.org/pdf/1702.03310.pdf>
"""
# TODO: Add proper documentation.
# TODO: Validate fixed-point solution conditions.
# Debug message.
logger.debug("Starting fixed point solution algorithm...")
# Obtain nodal power vectors.
node_power_vector_wye_no_source = (
electric_grid_model.der_incidence_wye_matrix_no_source
@ np.transpose([der_power_vector])
).ravel()
node_power_vector_delta_no_source = (
electric_grid_model.der_incidence_delta_matrix_no_source
@ np.transpose([der_power_vector])
).ravel()
# Obtain initial nodal power and voltage vectors, assuming no power conditions.
# TODO: Enable passing previous solution for fixed-point initialization.
node_power_vector_wye_initial_no_source = np.zeros(node_power_vector_wye_no_source.shape, dtype=complex)
node_power_vector_delta_initial_no_source = np.zeros(node_power_vector_delta_no_source.shape, dtype=complex)
node_voltage_vector_initial_no_source = electric_grid_model.node_voltage_vector_reference_no_source.copy()
# Define nodal power vector candidate to the desired nodal power vector.
node_power_vector_wye_candidate_no_source = node_power_vector_wye_no_source.copy()
node_power_vector_delta_candidate_no_source = node_power_vector_delta_no_source.copy()
# Instantiate outer iteration variables.
is_final = False
outer_iteration = 0
# Outer iteration between power vector candidate selection and fixed point voltage solution algorithm
# until a final solution is found.
while (
~is_final
& (outer_iteration < outer_iteration_limit)
):
# Outer solution algorithm based on fixed-point solution conditions check.
# - Checks solution conditions and adjust power vector candidate if necessary, before solving for voltage.
if outer_solution_algorithm == 'check_conditions':
# Reset nodal power vector candidate to the desired nodal power vector.
node_power_vector_wye_candidate_no_source = node_power_vector_wye_no_source.copy()
node_power_vector_delta_candidate_no_source = node_power_vector_delta_no_source.copy()
# Check solution conditions for nodal power vector candidate.
is_final = (
PowerFlowSolutionFixedPoint.check_solution_conditions(
electric_grid_model,
node_power_vector_wye_initial_no_source,
node_power_vector_delta_initial_no_source,
node_power_vector_wye_candidate_no_source,
node_power_vector_delta_candidate_no_source,
node_voltage_vector_initial_no_source
)
)
# Instantiate power candidate iteration variable.
power_candidate_iteration = 0
is_valid = is_final.copy()
# If solution conditions are violated, iteratively reduce power to find a power vector candidate
# which satisfies the solution conditions.
while (
~is_valid
& (power_candidate_iteration < power_candidate_iteration_limit)
):
# Reduce nodal power vector candidate.
node_power_vector_wye_candidate_no_source -= (
power_candidate_reduction_factor
* (
node_power_vector_wye_candidate_no_source
- node_power_vector_wye_initial_no_source
)
)
node_power_vector_delta_candidate_no_source -= (
power_candidate_reduction_factor
* (
node_power_vector_delta_candidate_no_source
- node_power_vector_delta_initial_no_source
)
)
is_valid = (
PowerFlowSolutionFixedPoint.check_solution_conditions(
electric_grid_model,
node_power_vector_wye_initial_no_source,
node_power_vector_delta_initial_no_source,
node_power_vector_wye_candidate_no_source,
node_power_vector_delta_candidate_no_source,
node_voltage_vector_initial_no_source,
)
)
power_candidate_iteration += 1
# Reaching the iteration limit is considered undesired and triggers a warning.
if power_candidate_iteration >= power_candidate_iteration_limit:
logger.warning(
"Power vector candidate selection algorithm for fixed-point solution reached "
f"maximum limit of {power_candidate_iteration_limit} iterations."
)
# Store current candidate power vectors as initial power vectors
# for next round of computation of solution conditions.
node_power_vector_wye_initial_no_source = (
node_power_vector_wye_candidate_no_source.copy()
)
node_power_vector_delta_initial_no_source = (
node_power_vector_delta_candidate_no_source.copy()
)
# Instantiate fixed point iteration variables.
voltage_iteration = 0
voltage_change = np.inf
while (
(voltage_iteration < voltage_iteration_limit)
& (voltage_change > voltage_tolerance)
):
# Calculate fixed point equation.
node_voltage_vector_estimate_no_source = (
np.transpose([electric_grid_model.node_voltage_vector_reference_no_source])
+ np.transpose([
scipy.sparse.linalg.spsolve(
electric_grid_model.node_admittance_matrix_no_source,
(
(
(
np.conj(np.transpose([node_voltage_vector_initial_no_source])) ** -1
)
* np.conj(np.transpose([node_power_vector_wye_candidate_no_source]))
)
+ (
np.transpose(electric_grid_model.node_transformation_matrix_no_source)
@ (
(
(
electric_grid_model.node_transformation_matrix_no_source
@ np.conj(np.transpose([node_voltage_vector_initial_no_source]))
) ** -1
)
* np.conj(np.transpose([node_power_vector_delta_candidate_no_source]))
)
)
)
)
])
).ravel()
# Calculate voltage change from previous iteration.
voltage_change = (
np.max(np.abs(
node_voltage_vector_estimate_no_source
- node_voltage_vector_initial_no_source
))
)
# Set voltage solution as initial voltage for next iteration.
node_voltage_vector_initial_no_source = node_voltage_vector_estimate_no_source.copy()
# Increment voltage iteration counter.
voltage_iteration += 1
# Outer solution algorithm based on voltage solution check.
# - Checks if voltage solution exceeded iteration limit and adjusts power vector candidate if needed.
if outer_solution_algorithm == 'check_solution':
# If voltage solution exceeds iteration limit, reduce power and re-try voltage solution.
if voltage_iteration >= voltage_iteration_limit:
# Reduce nodal power vector candidate.
node_power_vector_wye_candidate_no_source *= power_candidate_reduction_factor
node_power_vector_delta_candidate_no_source *= power_candidate_reduction_factor
# Reset initial nodal voltage vector.
node_voltage_vector_initial_no_source = (
electric_grid_model.node_voltage_vector_reference_no_source.copy()
)
# Otherwise, if power has previously been reduced, raise back power and re-try voltage solution.
else:
if (
(node_power_vector_wye_candidate_no_source != node_power_vector_wye_no_source).any()
or (node_power_vector_delta_candidate_no_source != node_power_vector_delta_no_source).any()
):
# Increase nodal power vector candidate.
node_power_vector_wye_candidate_no_source *= power_candidate_reduction_factor ** -1
node_power_vector_delta_candidate_no_source *= power_candidate_reduction_factor ** -1
else:
is_final = True
# For fixed-point algorithm, reaching the iteration limit is considered undesired and triggers a warning
elif voltage_iteration >= voltage_iteration_limit:
logger.warning(
"Fixed point voltage solution algorithm reached "
f"maximum limit of {voltage_iteration_limit} iterations."
)
# Increment outer iteration counter.
outer_iteration += 1
# Reaching the outer iteration limit is considered undesired and triggers a warning.
if outer_iteration >= outer_iteration_limit:
logger.warning(
"Outer wrapper algorithm for fixed-point solution reached "
f"maximum limit of {outer_iteration_limit} iterations."
)
# Debug message.
logger.debug(
"Completed fixed point solution algorithm. "
f"Outer wrapper iterations: {outer_iteration}"
)
# Get full voltage vector.
node_voltage_vector = np.zeros(len(electric_grid_model.nodes), dtype=complex)
node_voltage_vector[mesmo.utils.get_index(electric_grid_model.nodes, node_type='source')] += (
electric_grid_model.node_voltage_vector_reference_source
)
node_voltage_vector[mesmo.utils.get_index(electric_grid_model.nodes, node_type='no_source')] += (
node_voltage_vector_initial_no_source # Takes value of `node_voltage_vector_estimate_no_source`.
)
return node_voltage_vector
@staticmethod
def get_branch_power(
electric_grid_model: ElectricGridModelDefault,
node_voltage_vector: np.ndarray
):
"""Get branch power vectors by calculating power flow with given nodal voltage.
- Returns two branch power vectors, where `branch_power_vector_1` represents the
"from"-direction and `branch_power_vector_2` represents the "to"-direction.
"""
# Obtain branch admittance and incidence matrices.
branch_admittance_1_matrix = (
electric_grid_model.branch_admittance_1_matrix
)
branch_admittance_2_matrix = (
electric_grid_model.branch_admittance_2_matrix
)
branch_incidence_1_matrix = (
electric_grid_model.branch_incidence_1_matrix
)
branch_incidence_2_matrix = (
electric_grid_model.branch_incidence_2_matrix
)
# Calculate branch power vectors.
branch_power_vector_1 = (
(
branch_incidence_1_matrix
@ np.transpose([node_voltage_vector])
)
* np.conj(
branch_admittance_1_matrix
@ np.transpose([node_voltage_vector])
)
).ravel()
branch_power_vector_2 = (
(
branch_incidence_2_matrix
@ np.transpose([node_voltage_vector])
)
* np.conj(
branch_admittance_2_matrix
@ np.transpose([node_voltage_vector])
)
).ravel()
# Make modifications for single-phase-equivalent modelling.
if electric_grid_model.is_single_phase_equivalent:
branch_power_vector_1 *= 3
branch_power_vector_2 *= 3
return (
branch_power_vector_1,
branch_power_vector_2
)
@staticmethod
def get_loss(
electric_grid_model: ElectricGridModelDefault,
node_voltage_vector: np.ndarray
):
"""Get total electric losses with given nodal voltage."""
# Calculate total losses.
# TODO: Check if summing up branch power is faster.
# loss = (
# np.sum(
# branch_power_vector_1
# + branch_power_vector_2
# )
# )
loss = (
np.array([node_voltage_vector])
@ np.conj(electric_grid_model.node_admittance_matrix)
@ np.transpose([np.conj(node_voltage_vector)])
).ravel()
# Make modifications for single-phase-equivalent modelling.
if electric_grid_model.is_single_phase_equivalent:
loss *= 3
return loss
class PowerFlowSolutionZBus(PowerFlowSolutionFixedPoint):
"""Implicit Z-bus power flow solution object."""
# Overwrite `check_solution_conditions`, which is invalid for the Z-bus power flow.
@staticmethod
def check_solution_conditions(*args, **kwargs):
raise NotImplementedError("This method is invalid for the Z-bus power flow.")
@staticmethod
def get_voltage(
electric_grid_model: ElectricGridModelDefault,
der_power_vector: np.ndarray,
voltage_iteration_limit=100,
voltage_tolerance=1e-2,
**kwargs
) -> np.ndarray:
"""Get nodal voltage vector by solving with the implicit Z-bus method."""
# Implicit Z-bus power flow solution (<NAME>).
# - “Can, Can, Lah!” (literal meaning, can accomplish)
# - <https://www.financialexpress.com/opinion/singapore-turns-50-the-remarkable-nation-that-can-lah/115775/>
# Obtain nodal power vectors.
node_power_vector_wye_no_source = (
electric_grid_model.der_incidence_wye_matrix_no_source
@ np.transpose([der_power_vector])
).ravel()
node_power_vector_delta_no_source = (
electric_grid_model.der_incidence_delta_matrix_no_source
@ np.transpose([der_power_vector])
).ravel()
# Obtain utility variables.
node_admittance_matrix_no_source_inverse = (
scipy.sparse.linalg.inv(electric_grid_model.node_admittance_matrix_no_source.tocsc())
)
node_admittance_matrix_source_to_no_source = (
electric_grid_model.node_admittance_matrix[np.ix_(
mesmo.utils.get_index(electric_grid_model.nodes, node_type='no_source'),
mesmo.utils.get_index(electric_grid_model.nodes, node_type='source')
)]
)
node_voltage_vector_initial_no_source = (
electric_grid_model.node_voltage_vector_reference_no_source.copy()
)
# Instantiate implicit Z-bus power flow iteration variables.
voltage_iteration = 0
voltage_change = np.inf
while (
(voltage_iteration < voltage_iteration_limit)
& (voltage_change > voltage_tolerance)
):
# Calculate current injections.
node_current_injection_delta_in_wye_no_source = (
electric_grid_model.node_transformation_matrix_no_source.transpose()
@ np.conj(
np.linalg.inv(np.diag((
electric_grid_model.node_transformation_matrix_no_source
@ node_voltage_vector_initial_no_source
).ravel()))
@ node_power_vector_wye_no_source
)
)
node_current_injection_wye_no_source = (
np.conj(node_power_vector_delta_no_source)
/ np.conj(node_voltage_vector_initial_no_source)
)
node_current_injection_no_source = (
node_current_injection_delta_in_wye_no_source
+ node_current_injection_wye_no_source
)
# Calculate voltage.
node_voltage_vector_estimate_no_source = (
node_admittance_matrix_no_source_inverse @ (
- node_admittance_matrix_source_to_no_source
@ electric_grid_model.node_voltage_vector_reference_source
+ node_current_injection_no_source
)
)
# node_voltage_vector_estimate_no_source = (
# electric_grid_model.node_voltage_vector_reference_no_source
# + node_admittance_matrix_no_source_inverse @ node_current_injection_no_source
# )
# Calculate voltage change from previous iteration.
voltage_change = (
np.max(np.abs(
node_voltage_vector_estimate_no_source
- node_voltage_vector_initial_no_source
))
)
# Set voltage estimate as new initial voltage for next iteration.
node_voltage_vector_initial_no_source = node_voltage_vector_estimate_no_source.copy()
# Increment voltage iteration counter.
voltage_iteration += 1
# Reaching the iteration limit is considered undesired and triggers a warning.
if voltage_iteration >= voltage_iteration_limit:
logger.warning(
"Z-bus solution algorithm reached "
f"maximum limit of {voltage_iteration_limit} iterations."
)
# Get full voltage vector.
node_voltage_vector = np.zeros(len(electric_grid_model.nodes), dtype=complex)
node_voltage_vector[mesmo.utils.get_index(electric_grid_model.nodes, node_type='source')] += (
electric_grid_model.node_voltage_vector_reference_source
)
node_voltage_vector[mesmo.utils.get_index(electric_grid_model.nodes, node_type='no_source')] += (
node_voltage_vector_initial_no_source # Takes value of `node_voltage_vector_estimate_no_source`.
)
return node_voltage_vector
class PowerFlowSolutionOpenDSS(PowerFlowSolution):
"""OpenDSS power flow solution object."""
@multimethod
def __init__(
self,
scenario_name: str,
**kwargs
):
# Obtain `electric_grid_model`.
electric_grid_model = ElectricGridModelOpenDSS(scenario_name)
self.__init__(
electric_grid_model,
**kwargs
)
@multimethod
def __init__(
self,
electric_grid_model: ElectricGridModelOpenDSS,
**kwargs
):
# Obtain `der_power_vector`, assuming nominal power conditions.
der_power_vector = electric_grid_model.der_power_vector_reference
self.__init__(
electric_grid_model,
der_power_vector,
**kwargs
)
@multimethod
def __init__(
self,
electric_grid_model: ElectricGridModelOpenDSS,
der_power_vector: np.ndarray,
**kwargs
):
# Store DER power vector.
self.der_power_vector = der_power_vector.ravel()
# Check if correct OpenDSS circuit is initialized, otherwise reinitialize.
if opendssdirect.Circuit.Name() != electric_grid_model.circuit_name:
electric_grid_model.__init__(electric_grid_model.electric_grid_data)
# Set DER power vector in OpenDSS model.
for der_index, der_name in enumerate(electric_grid_model.der_names):
# TODO: For OpenDSS, all DERs are assumed to be loads.
opendss_command_string = (
f"load.{der_name}.kw = {- np.real(self.der_power_vector[der_index]) / 1000.0}"
+ f"\nload.{der_name}.kvar = {- np.imag(self.der_power_vector[der_index]) / 1000.0}"
)
logger.debug(f"opendss_command_string = \n{opendss_command_string}")
opendssdirect.run_command(opendss_command_string)
# Solve OpenDSS model.
opendssdirect.run_command("solve")
# Obtain voltage solution.
self.node_voltage_vector = (
self.get_voltage(
electric_grid_model
)
)
# Obtain branch flow solution.
(
self.branch_power_vector_1,
self.branch_power_vector_2
) = (
self.get_branch_power()
)
# Obtain loss solution.
self.loss = (
self.get_loss()
)
@staticmethod
def get_voltage(
electric_grid_model: ElectricGridModelOpenDSS
):
"""Get nodal voltage vector by solving OpenDSS model.
- OpenDSS model must be readily set up, with the desired power being set for all DERs.
"""
# Create index for OpenDSS nodes.
opendss_nodes = pd.Series(opendssdirect.Circuit.AllNodeNames()).str.split('.', expand=True)
opendss_nodes.columns = ['node_name', 'phase']
opendss_nodes.loc[:, 'phase'] = opendss_nodes.loc[:, 'phase'].astype(int)
opendss_nodes = pd.MultiIndex.from_frame(opendss_nodes)
# Extract nodal voltage vector and reindex to match MESMO nodes order.
node_voltage_vector_solution = (
pd.Series(
(
np.array(opendssdirect.Circuit.AllBusVolts()[0::2])
+ 1j * np.array(opendssdirect.Circuit.AllBusVolts()[1::2])
),
index=opendss_nodes
).reindex(
electric_grid_model.nodes.droplevel('node_type')
).values
)
# Make modifications for single-phase-equivalent modelling.
if electric_grid_model.is_single_phase_equivalent:
node_voltage_vector_solution /= np.sqrt(3)
return node_voltage_vector_solution
@staticmethod
def get_branch_power():
"""Get branch power vectors by solving OpenDSS model.
- OpenDSS model must be readily set up, with the desired power being set for all DERs.
"""
# Solve OpenDSS model.
opendssdirect.run_command("solve")
# Instantiate branch vectors.
branch_power_vector_1 = (
np.full(((opendssdirect.Lines.Count() + opendssdirect.Transformers.Count()), 3), np.nan, dtype=complex)
)
branch_power_vector_2 = (
np.full(((opendssdirect.Lines.Count() + opendssdirect.Transformers.Count()), 3), np.nan, dtype=complex)
)
# Instantiate iteration variables.
branch_vector_index = 0
line_index = opendssdirect.Lines.First()
# Obtain line branch power vectors.
while line_index > 0:
branch_power_opendss = np.array(opendssdirect.CktElement.Powers()) * 1000.0
branch_phase_count = opendssdirect.CktElement.NumPhases()
branch_power_vector_1[branch_vector_index, :branch_phase_count] = (
branch_power_opendss[0:(branch_phase_count * 2):2]
+ 1.0j * branch_power_opendss[1:(branch_phase_count * 2):2]
)
branch_power_vector_2[branch_vector_index, :branch_phase_count] = (
branch_power_opendss[0 + (branch_phase_count * 2)::2]
+ 1.0j * branch_power_opendss[1 + (branch_phase_count * 2)::2]
)
branch_vector_index += 1
line_index = opendssdirect.Lines.Next()
# Obtain transformer branch power vectors.
transformer_index = opendssdirect.Transformers.First()
while transformer_index > 0:
branch_power_opendss = np.array(opendssdirect.CktElement.Powers()) * 1000.0
branch_phase_count = opendssdirect.CktElement.NumPhases()
skip_phase = 2 if 0 in opendssdirect.CktElement.NodeOrder() else 0 # Ignore ground nodes.
branch_power_vector_1[branch_vector_index, :branch_phase_count] = (
branch_power_opendss[0:(branch_phase_count * 2):2]
+ 1.0j * branch_power_opendss[1:(branch_phase_count * 2):2]
)
branch_power_vector_2[branch_vector_index, :branch_phase_count] = (
branch_power_opendss[0 + (branch_phase_count * 2) + skip_phase:-skip_phase:2]
+ 1.0j * branch_power_opendss[1 + (branch_phase_count * 2) + skip_phase:-skip_phase:2]
)
branch_vector_index += 1
transformer_index = opendssdirect.Transformers.Next()
# Reshape branch power vectors to appropriate size and remove entries for nonexistent phases.
# TODO: Sort vector by branch name if not in order.
branch_power_vector_1 = branch_power_vector_1.flatten()
branch_power_vector_2 = branch_power_vector_2.flatten()
branch_power_vector_1 = branch_power_vector_1[~np.isnan(branch_power_vector_1)]
branch_power_vector_2 = branch_power_vector_2[~np.isnan(branch_power_vector_2)]
return (
branch_power_vector_1,
branch_power_vector_2
)
@staticmethod
def get_loss():
"""Get total loss by solving OpenDSS model.
- OpenDSS model must be readily set up, with the desired power being set for all DERs.
"""
# Solve OpenDSS model.
opendssdirect.run_command("solve")
# Obtain loss.
loss = opendssdirect.Circuit.Losses()[0] + 1.0j * opendssdirect.Circuit.Losses()[1]
return loss
class PowerFlowSolutionSet(mesmo.utils.ObjectBase):
power_flow_solutions: typing.Dict[pd.Timestamp, PowerFlowSolution]
electric_grid_model: ElectricGridModelDefault
der_power_vector: pd.DataFrame
timesteps: pd.Index
@multimethod
def __init__(
self,
electric_grid_model: ElectricGridModelDefault,
der_operation_results: ElectricGridDEROperationResults,
**kwargs
):
der_power_vector = (
der_operation_results.der_active_power_vector
+ 1.0j * der_operation_results.der_reactive_power_vector
)
self.__init__(
electric_grid_model,
der_power_vector,
**kwargs
)
@multimethod
def __init__(
self,
electric_grid_model: ElectricGridModelDefault,
der_power_vector: pd.DataFrame,
power_flow_solution_method=PowerFlowSolutionFixedPoint
):
# Store attributes.
self.electric_grid_model = electric_grid_model
self.der_power_vector = der_power_vector
self.timesteps = self.electric_grid_model.timesteps
# Obtain power flow solutions.
power_flow_solutions = (
mesmo.utils.starmap(
power_flow_solution_method,
zip(
itertools.repeat(self.electric_grid_model),
der_power_vector.values
)
)
)
self.power_flow_solutions = dict(zip(self.timesteps, power_flow_solutions))
def get_results(self) -> ElectricGridOperationResults:
# Instantiate results variables.
der_power_vector = (
pd.DataFrame(columns=self.electric_grid_model.ders, index=self.timesteps, dtype=complex)
)
node_voltage_vector = (
pd.DataFrame(columns=self.electric_grid_model.nodes, index=self.timesteps, dtype=complex)
)
branch_power_vector_1 = (
pd.DataFrame(columns=self.electric_grid_model.branches, index=self.timesteps, dtype=complex)
)
branch_power_vector_2 = (
pd.DataFrame(columns=self.electric_grid_model.branches, index=self.timesteps, dtype=complex)
)
loss = pd.DataFrame(columns=['total'], index=self.timesteps, dtype=complex)
# Obtain results.
for timestep in self.timesteps:
power_flow_solution = self.power_flow_solutions[timestep]
der_power_vector.loc[timestep, :] = power_flow_solution.der_power_vector
node_voltage_vector.loc[timestep, :] = power_flow_solution.node_voltage_vector
branch_power_vector_1.loc[timestep, :] = power_flow_solution.branch_power_vector_1
branch_power_vector_2.loc[timestep, :] = power_flow_solution.branch_power_vector_2
loss.loc[timestep, :] = power_flow_solution.loss
der_active_power_vector = der_power_vector.apply(np.real)
der_reactive_power_vector = der_power_vector.apply(np.imag)
node_voltage_magnitude_vector = np.abs(node_voltage_vector)
branch_power_magnitude_vector_1 = np.abs(branch_power_vector_1)
branch_power_magnitude_vector_2 = np.abs(branch_power_vector_2)
loss_active = loss.apply(np.real)
loss_reactive = loss.apply(np.imag)
# Obtain per-unit values.
der_active_power_vector_per_unit = (
der_active_power_vector
* mesmo.utils.get_inverse_with_zeros(np.real(self.electric_grid_model.der_power_vector_reference))
)
der_reactive_power_vector_per_unit = (
der_reactive_power_vector
* mesmo.utils.get_inverse_with_zeros(np.imag(self.electric_grid_model.der_power_vector_reference))
)
node_voltage_magnitude_vector_per_unit = (
node_voltage_magnitude_vector
* mesmo.utils.get_inverse_with_zeros(np.abs(self.electric_grid_model.node_voltage_vector_reference))
)
branch_power_magnitude_vector_1_per_unit = (
branch_power_magnitude_vector_1
* mesmo.utils.get_inverse_with_zeros(self.electric_grid_model.branch_power_vector_magnitude_reference)
)
branch_power_magnitude_vector_2_per_unit = (
branch_power_magnitude_vector_2
* mesmo.utils.get_inverse_with_zeros(self.electric_grid_model.branch_power_vector_magnitude_reference)
)
# Store results.
return ElectricGridOperationResults(
electric_grid_model=self.electric_grid_model,
der_active_power_vector=der_active_power_vector,
der_active_power_vector_per_unit=der_active_power_vector_per_unit,
der_reactive_power_vector=der_reactive_power_vector,
der_reactive_power_vector_per_unit=der_reactive_power_vector_per_unit,
node_voltage_magnitude_vector=node_voltage_magnitude_vector,
node_voltage_magnitude_vector_per_unit=node_voltage_magnitude_vector_per_unit,
branch_power_magnitude_vector_1=branch_power_magnitude_vector_1,
branch_power_magnitude_vector_1_per_unit=branch_power_magnitude_vector_1_per_unit,
branch_power_magnitude_vector_2=branch_power_magnitude_vector_2,
branch_power_magnitude_vector_2_per_unit=branch_power_magnitude_vector_2_per_unit,
loss_active=loss_active,
loss_reactive=loss_reactive
)
class LinearElectricGridModel(mesmo.utils.ObjectBase):
"""Abstract linear electric model object, consisting of the sensitivity matrices for
voltage / voltage magnitude / squared branch power / active loss / reactive loss by changes in nodal wye power /
nodal delta power.
Note:
This abstract class only defines the expected variables of linear electric grid model objects,
but does not implement any functionality.
Attributes:
electric_grid_model (ElectricGridModelDefault): Electric grid model object.
power_flow_solution (PowerFlowSolution): Reference power flow solution object.
sensitivity_voltage_by_power_wye_active (sp.spmatrix): Sensitivity matrix for complex voltage vector
by active wye power vector.
sensitivity_voltage_by_power_wye_reactive (sp.spmatrix): Sensitivity matrix for complex voltage
vector by reactive wye power vector.
sensitivity_voltage_by_power_delta_active (sp.spmatrix): Sensitivity matrix for complex voltage vector
by active delta power vector.
sensitivity_voltage_by_power_delta_reactive (sp.spmatrix): Sensitivity matrix for complex voltage
vector by reactive delta power vector.
sensitivity_voltage_by_der_power_active (sp.spmatrix): Sensitivity matrix for
complex voltage vector by DER active power vector.
sensitivity_voltage_by_der_power_reactive (sp.spmatrix): Sensitivity matrix for
complex voltage vector by DER reactive power vector.
sensitivity_voltage_magnitude_by_power_wye_active (sp.spmatrix): Sensitivity matrix for voltage
magnitude vector by active wye power vector.
sensitivity_voltage_magnitude_by_power_wye_reactive (sp.spmatrix): Sensitivity matrix for
voltage magnitude vector by reactive wye power vector.
sensitivity_voltage_magnitude_by_power_delta_active (sp.spmatrix): Sensitivity matrix for
voltage magnitude vector by active delta power vector.
sensitivity_voltage_magnitude_by_power_delta_reactive (sp.spmatrix): Sensitivity matrix for
voltage magnitude vector by reactive delta power vector.
sensitivity_voltage_magnitude_by_der_power_active (sp.spmatrix): Sensitivity matrix for
voltage magnitude vector by DER active power vector.
sensitivity_voltage_magnitude_by_der_power_reactive (sp.spmatrix): Sensitivity matrix for
voltage magnitude vector by DER reactive power vector.
sensitivity_branch_power_1_magnitude_by_power_wye_active (sp.spmatrix): Sensitivity matrix for
branch flow power magnitude vector 1 ('from' direction) by active wye power vector.
sensitivity_branch_power_1_magnitude_by_power_wye_reactive (sp.spmatrix): Sensitivity matrix for
branch flow power magnitude vector 1 ('from' direction) by reactive wye power vector.
sensitivity_branch_power_1_magnitude_by_power_delta_active (sp.spmatrix): Sensitivity matrix for
branch flow power magnitude vector 1 ('from' direction) by active delta power vector.
sensitivity_branch_power_1_magnitude_by_power_delta_reactive (sp.spmatrix): Sensitivity matrix for
branch flow power magnitude vector 1 ('from' direction) by reactive delta power vector.
sensitivity_branch_power_1_magnitude_by_der_power_active (sp.spmatrix): Sensitivity matrix for
branch flow power magnitude vector 1 by DER active power vector.
sensitivity_branch_power_1_magnitude_by_der_power_reactive (sp.spmatrix): Sensitivity matrix for
branch flow power magnitude vector 1 by DER reactive power vector.
sensitivity_branch_power_2_magnitude_by_power_wye_active (sp.spmatrix): Sensitivity matrix for
branch flow power magnitude vector 2 ('to' direction) by active wye power vector.
sensitivity_branch_power_2_magnitude_by_power_wye_reactive (sp.spmatrix): Sensitivity matrix for
branch flow power magnitude vector 2 ('to' direction) by reactive wye power vector.
sensitivity_branch_power_2_magnitude_by_power_delta_active (sp.spmatrix): Sensitivity matrix for
branch flow power magnitude vector 2 ('to' direction) by active delta power vector.
sensitivity_branch_power_2_magnitude_by_power_delta_reactive (sp.spmatrix): Sensitivity matrix for
branch flow power magnitude vector 2 ('to' direction) by reactive delta power vector.
sensitivity_branch_power_2_magnitude_by_der_power_active (sp.spmatrix): Sensitivity matrix for
branch flow power magnitude vector 2 by DER active power vector.
sensitivity_branch_power_2_magnitude_by_der_power_reactive (sp.spmatrix): Sensitivity matrix for
branch flow power magnitude vector 2 by DER reactive power vector.
sensitivity_branch_power_1_squared_by_power_wye_active (sp.spmatrix): Sensitivity matrix for
squared branch flow power vector 1 ('from' direction) by active wye power vector.
sensitivity_branch_power_1_squared_by_power_wye_reactive (sp.spmatrix): Sensitivity matrix for
squared branch flow power vector 1 ('from' direction) by reactive wye power vector.
sensitivity_branch_power_1_squared_by_power_delta_active (sp.spmatrix): Sensitivity matrix for
squared branch flow power vector 1 ('from' direction) by active delta power vector.
sensitivity_branch_power_1_squared_by_power_delta_reactive (sp.spmatrix): Sensitivity matrix for
squared branch flow power vector 1 ('from' direction) by reactive delta power vector.
sensitivity_branch_power_1_squared_by_der_power_active (sp.spmatrix): Sensitivity matrix for
squared branch flow power vector 1 by DER active power vector.
sensitivity_branch_power_1_squared_by_der_power_reactive (sp.spmatrix): Sensitivity matrix for
squared branch flow power vector 1 by DER reactive power vector.
sensitivity_branch_power_2_squared_by_power_wye_active (sp.spmatrix): Sensitivity matrix for
squared branch flow power vector 2 ('to' direction) by active wye power vector.
sensitivity_branch_power_2_squared_by_power_wye_reactive (sp.spmatrix): Sensitivity matrix for
squared branch flow power vector 2 ('to' direction) by reactive wye power vector.
sensitivity_branch_power_2_squared_by_power_delta_active (sp.spmatrix): Sensitivity matrix for
squared branch flow power vector 2 ('to' direction) by active delta power vector.
sensitivity_branch_power_2_squared_by_power_delta_reactive (sp.spmatrix): Sensitivity matrix for
squared branch flow power vector 2 ('to' direction) by reactive delta power vector.
sensitivity_branch_power_2_squared_by_der_power_active (sp.spmatrix): Sensitivity matrix for
squared branch flow power vector 2 by DER active power vector.
sensitivity_branch_power_2_squared_by_der_power_reactive (sp.spmatrix): Sensitivity matrix for
squared branch flow power vector 2 by DER reactive power vector.
sensitivity_loss_active_by_power_wye_active (sp.spmatrix): Sensitivity matrix for
active loss by active wye power vector.
sensitivity_loss_active_by_power_wye_reactive (sp.spmatrix): Sensitivity matrix for
active loss by reactive wye power vector.
sensitivity_loss_active_by_power_delta_active (sp.spmatrix): Sensitivity matrix for
active loss by active delta power vector.
sensitivity_loss_active_by_power_delta_reactive (sp.spmatrix): Sensitivity matrix for
active loss by reactive delta power vector.
sensitivity_loss_active_by_der_power_active (sp.spmatrix): Sensitivity matrix for
active loss by DER active power vector.
sensitivity_loss_active_by_der_power_reactive (sp.spmatrix): Sensitivity matrix for
active loss by DER reactive power vector.
sensitivity_loss_reactive_by_power_wye_active (sp.spmatrix): Sensitivity matrix for
reactive loss by active wye power vector.
sensitivity_loss_reactive_by_power_wye_reactive (sp.spmatrix): Sensitivity matrix for
reactive loss by reactive wye power vector.
sensitivity_loss_reactive_by_power_delta_active (sp.spmatrix): Sensitivity matrix for
reactive loss by active delta power vector.
sensitivity_loss_reactive_by_power_delta_reactive (sp.spmatrix): Sensitivity matrix for
reactive loss by reactive delta power vector.
sensitivity_loss_reactive_by_der_power_active (sp.spmatrix): Sensitivity matrix for
reactive loss by DER active power vector.
sensitivity_loss_reactive_by_der_power_reactive (sp.spmatrix): Sensitivity matrix for
reactive loss by DER reactive power vector.
"""
electric_grid_model: ElectricGridModelDefault
power_flow_solution: PowerFlowSolution
sensitivity_voltage_by_power_wye_active: sp.spmatrix
sensitivity_voltage_by_power_wye_reactive: sp.spmatrix
sensitivity_voltage_by_power_delta_active: sp.spmatrix
sensitivity_voltage_by_power_delta_reactive: sp.spmatrix
sensitivity_voltage_by_der_power_active: sp.spmatrix
sensitivity_voltage_by_der_power_reactive: sp.spmatrix
sensitivity_voltage_magnitude_by_power_wye_active: sp.spmatrix
sensitivity_voltage_magnitude_by_power_wye_reactive: sp.spmatrix
sensitivity_voltage_magnitude_by_power_delta_active: sp.spmatrix
sensitivity_voltage_magnitude_by_power_delta_reactive: sp.spmatrix
sensitivity_voltage_magnitude_by_der_power_active: sp.spmatrix
sensitivity_voltage_magnitude_by_der_power_reactive: sp.spmatrix
sensitivity_branch_power_1_magnitude_by_power_wye_active: sp.spmatrix
sensitivity_branch_power_1_magnitude_by_power_wye_reactive: sp.spmatrix
sensitivity_branch_power_1_magnitude_by_power_delta_active: sp.spmatrix
sensitivity_branch_power_1_magnitude_by_power_delta_reactive: sp.spmatrix
sensitivity_branch_power_1_magnitude_by_der_power_active: sp.spmatrix
sensitivity_branch_power_1_magnitude_by_der_power_reactive: sp.spmatrix
sensitivity_branch_power_2_magnitude_by_power_wye_active: sp.spmatrix
sensitivity_branch_power_2_magnitude_by_power_wye_reactive: sp.spmatrix
sensitivity_branch_power_2_magnitude_by_power_delta_active: sp.spmatrix
sensitivity_branch_power_2_magnitude_by_power_delta_reactive: sp.spmatrix
sensitivity_branch_power_2_magnitude_by_der_power_active: sp.spmatrix
sensitivity_branch_power_2_magnitude_by_der_power_reactive: sp.spmatrix
sensitivity_branch_power_1_squared_by_power_wye_active: sp.spmatrix
sensitivity_branch_power_1_squared_by_power_wye_reactive: sp.spmatrix
sensitivity_branch_power_1_squared_by_power_delta_active: sp.spmatrix
sensitivity_branch_power_1_squared_by_power_delta_reactive: sp.spmatrix
sensitivity_branch_power_1_squared_by_der_power_active: sp.spmatrix
sensitivity_branch_power_1_squared_by_der_power_reactive: sp.spmatrix
sensitivity_branch_power_2_squared_by_power_wye_active: sp.spmatrix
sensitivity_branch_power_2_squared_by_power_wye_reactive: sp.spmatrix
sensitivity_branch_power_2_squared_by_power_delta_active: sp.spmatrix
sensitivity_branch_power_2_squared_by_power_delta_reactive: sp.spmatrix
sensitivity_branch_power_2_squared_by_der_power_active: sp.spmatrix
sensitivity_branch_power_2_squared_by_der_power_reactive: sp.spmatrix
sensitivity_loss_active_by_power_wye_active: sp.spmatrix
sensitivity_loss_active_by_power_wye_reactive: sp.spmatrix
sensitivity_loss_active_by_power_delta_active: sp.spmatrix
sensitivity_loss_active_by_power_delta_reactive: sp.spmatrix
sensitivity_loss_active_by_der_power_active: sp.spmatrix
sensitivity_loss_active_by_der_power_reactive: sp.spmatrix
sensitivity_loss_reactive_by_power_wye_active: sp.spmatrix
sensitivity_loss_reactive_by_power_wye_reactive: sp.spmatrix
sensitivity_loss_reactive_by_power_delta_active: sp.spmatrix
sensitivity_loss_reactive_by_power_delta_reactive: sp.spmatrix
sensitivity_loss_reactive_by_der_power_active: sp.spmatrix
sensitivity_loss_reactive_by_der_power_reactive: sp.spmatrix
class LinearElectricGridModelGlobal(LinearElectricGridModel):
"""Linear electric grid model object based on global approximations, consisting of the sensitivity matrices for
voltage / voltage magnitude / squared branch power / active loss / reactive loss by changes in nodal wye power /
nodal delta power.
:syntax:
- ``LinearElectricGridModelGlobal(electric_grid_model, power_flow_solution)``: Instantiate linear electric grid
model object for given `electric_grid_model` and `power_flow_solution`.
- ``LinearElectricGridModelGlobal(scenario_name)``: Instantiate linear electric grid model for given
`scenario_name`. The required `electric_grid_model` is obtained for given `scenario_name` and the
`power_flow_solution` is obtained for nominal power conditions.
Parameters:
electric_grid_model (ElectricGridModelDefault): Electric grid model object.
power_flow_solution (PowerFlowSolution): Power flow solution object.
scenario_name (str): MESMO scenario name.
Attributes:
electric_grid_model (ElectricGridModelDefault): Electric grid model object.
power_flow_solution (PowerFlowSolution): Reference power flow solution object.
sensitivity_voltage_by_power_wye_active (sp.spmatrix): Sensitivity matrix for complex voltage vector
by active wye power vector.
sensitivity_voltage_by_power_wye_reactive (sp.spmatrix): Sensitivity matrix for complex voltage
vector by reactive wye power vector.
sensitivity_voltage_by_power_delta_active (sp.spmatrix): Sensitivity matrix for complex voltage vector
by active delta power vector.
sensitivity_voltage_by_power_delta_reactive (sp.spmatrix): Sensitivity matrix for complex voltage
vector by reactive delta power vector.
sensitivity_voltage_by_der_power_active (sp.spmatrix): Sensitivity matrix for
complex voltage vector by DER active power vector.
sensitivity_voltage_by_der_power_reactive (sp.spmatrix): Sensitivity matrix for
complex voltage vector by DER reactive power vector.
sensitivity_voltage_magnitude_by_power_wye_active (sp.spmatrix): Sensitivity matrix for voltage
magnitude vector by active wye power vector.
sensitivity_voltage_magnitude_by_power_wye_reactive (sp.spmatrix): Sensitivity matrix for
voltage magnitude vector by reactive wye power vector.
sensitivity_voltage_magnitude_by_power_delta_active (sp.spmatrix): Sensitivity matrix for
voltage magnitude vector by active delta power vector.
sensitivity_voltage_magnitude_by_power_delta_reactive (sp.spmatrix): Sensitivity matrix for
voltage magnitude vector by reactive delta power vector.
sensitivity_voltage_magnitude_by_der_power_active (sp.spmatrix): Sensitivity matrix for
voltage magnitude vector by DER active power vector.
sensitivity_voltage_magnitude_by_der_power_reactive (sp.spmatrix): Sensitivity matrix for
voltage magnitude vector by DER reactive power vector.
sensitivity_branch_power_1_magnitude_by_power_wye_active (sp.spmatrix): Sensitivity matrix for
branch flow power magnitude vector 1 ('from' direction) by active wye power vector.
sensitivity_branch_power_1_magnitude_by_power_wye_reactive (sp.spmatrix): Sensitivity matrix for
branch flow power magnitude vector 1 ('from' direction) by reactive wye power vector.
sensitivity_branch_power_1_magnitude_by_power_delta_active (sp.spmatrix): Sensitivity matrix for
branch flow power magnitude vector 1 ('from' direction) by active delta power vector.
sensitivity_branch_power_1_magnitude_by_power_delta_reactive (sp.spmatrix): Sensitivity matrix for
branch flow power magnitude vector 1 ('from' direction) by reactive delta power vector.
sensitivity_branch_power_1_magnitude_by_der_power_active (sp.spmatrix): Sensitivity matrix for
branch flow power magnitude vector 1 by DER active power vector.
sensitivity_branch_power_1_magnitude_by_der_power_reactive (sp.spmatrix): Sensitivity matrix for
branch flow power magnitude vector 1 by DER reactive power vector.
sensitivity_branch_power_2_magnitude_by_power_wye_active (sp.spmatrix): Sensitivity matrix for
branch flow power magnitude vector 2 ('to' direction) by active wye power vector.
sensitivity_branch_power_2_magnitude_by_power_wye_reactive (sp.spmatrix): Sensitivity matrix for
branch flow power magnitude vector 2 ('to' direction) by reactive wye power vector.
sensitivity_branch_power_2_magnitude_by_power_delta_active (sp.spmatrix): Sensitivity matrix for
branch flow power magnitude vector 2 ('to' direction) by active delta power vector.
sensitivity_branch_power_2_magnitude_by_power_delta_reactive (sp.spmatrix): Sensitivity matrix for
branch flow power magnitude vector 2 ('to' direction) by reactive delta power vector.
sensitivity_branch_power_2_magnitude_by_der_power_active (sp.spmatrix): Sensitivity matrix for
branch flow power magnitude vector 2 by DER active power vector.
sensitivity_branch_power_2_magnitude_by_der_power_reactive (sp.spmatrix): Sensitivity matrix for
branch flow power magnitude vector 2 by DER reactive power vector.
sensitivity_branch_power_1_squared_by_power_wye_active (sp.spmatrix): Sensitivity matrix for
squared branch flow power vector 1 ('from' direction) by active wye power vector.
sensitivity_branch_power_1_squared_by_power_wye_reactive (sp.spmatrix): Sensitivity matrix for
squared branch flow power vector 1 ('from' direction) by reactive wye power vector.
sensitivity_branch_power_1_squared_by_power_delta_active (sp.spmatrix): Sensitivity matrix for
squared branch flow power vector 1 ('from' direction) by active delta power vector.
sensitivity_branch_power_1_squared_by_power_delta_reactive (sp.spmatrix): Sensitivity matrix for
squared branch flow power vector 1 ('from' direction) by reactive delta power vector.
sensitivity_branch_power_1_squared_by_der_power_active (sp.spmatrix): Sensitivity matrix for
squared branch flow power vector 1 by DER active power vector.
sensitivity_branch_power_1_squared_by_der_power_reactive (sp.spmatrix): Sensitivity matrix for
squared branch flow power vector 1 by DER reactive power vector.
sensitivity_branch_power_2_squared_by_power_wye_active (sp.spmatrix): Sensitivity matrix for
squared branch flow power vector 2 ('to' direction) by active wye power vector.
sensitivity_branch_power_2_squared_by_power_wye_reactive (sp.spmatrix): Sensitivity matrix for
squared branch flow power vector 2 ('to' direction) by reactive wye power vector.
sensitivity_branch_power_2_squared_by_power_delta_active (sp.spmatrix): Sensitivity matrix for
squared branch flow power vector 2 ('to' direction) by active delta power vector.
sensitivity_branch_power_2_squared_by_power_delta_reactive (sp.spmatrix): Sensitivity matrix for
squared branch flow power vector 2 ('to' direction) by reactive delta power vector.
sensitivity_branch_power_2_squared_by_der_power_active (sp.spmatrix): Sensitivity matrix for
squared branch flow power vector 2 by DER active power vector.
sensitivity_branch_power_2_squared_by_der_power_reactive (sp.spmatrix): Sensitivity matrix for
squared branch flow power vector 2 by DER reactive power vector.
sensitivity_loss_active_by_power_wye_active (sp.spmatrix): Sensitivity matrix for
active loss by active wye power vector.
sensitivity_loss_active_by_power_wye_reactive (sp.spmatrix): Sensitivity matrix for
active loss by reactive wye power vector.
sensitivity_loss_active_by_power_delta_active (sp.spmatrix): Sensitivity matrix for
active loss by active delta power vector.
sensitivity_loss_active_by_power_delta_reactive (sp.spmatrix): Sensitivity matrix for
active loss by reactive delta power vector.
sensitivity_loss_active_by_der_power_active (sp.spmatrix): Sensitivity matrix for
active loss by DER active power vector.
sensitivity_loss_active_by_der_power_reactive (sp.spmatrix): Sensitivity matrix for
active loss by DER reactive power vector.
sensitivity_loss_reactive_by_power_wye_active (sp.spmatrix): Sensitivity matrix for
reactive loss by active wye power vector.
sensitivity_loss_reactive_by_power_wye_reactive (sp.spmatrix): Sensitivity matrix for
reactive loss by reactive wye power vector.
sensitivity_loss_reactive_by_power_delta_active (sp.spmatrix): Sensitivity matrix for
reactive loss by active delta power vector.
sensitivity_loss_reactive_by_power_delta_reactive (sp.spmatrix): Sensitivity matrix for
reactive loss by reactive delta power vector.
sensitivity_loss_reactive_by_der_power_active (sp.spmatrix): Sensitivity matrix for
reactive loss by DER active power vector.
sensitivity_loss_reactive_by_der_power_reactive (sp.spmatrix): Sensitivity matrix for
reactive loss by DER reactive power vector.
"""
@multimethod
def __init__(
self,
scenario_name: str,
):
# Obtain electric grid model.
electric_grid_model = (
ElectricGridModelDefault(scenario_name)
)
# Obtain der power vector.
der_power_vector = (
electric_grid_model.der_power_vector_reference
)
# Obtain power flow solution.
power_flow_solution = (
PowerFlowSolutionFixedPoint(
electric_grid_model,
der_power_vector
)
)
self.__init__(
electric_grid_model,
power_flow_solution
)
@multimethod
def __init__(
self,
electric_grid_model: ElectricGridModelDefault,
power_flow_solution: PowerFlowSolution
):
# TODO: Validate linear model with delta DERs.
# Store power flow solution.
self.power_flow_solution = power_flow_solution
# Store electric grid model.
self.electric_grid_model = electric_grid_model
# Obtain shorthands for no-source matrices and vectors.
electric_grid_model.node_admittance_matrix_no_source = (
electric_grid_model.node_admittance_matrix[np.ix_(
mesmo.utils.get_index(electric_grid_model.nodes, node_type='no_source'),
mesmo.utils.get_index(electric_grid_model.nodes, node_type='no_source')
)]
)
electric_grid_model.node_transformation_matrix_no_source = (
electric_grid_model.node_transformation_matrix[np.ix_(
mesmo.utils.get_index(electric_grid_model.nodes, node_type='no_source'),
mesmo.utils.get_index(electric_grid_model.nodes, node_type='no_source')
)]
)
node_voltage_no_source = (
self.power_flow_solution.node_voltage_vector[
mesmo.utils.get_index(electric_grid_model.nodes, node_type='no_source')
]
)
# Instantiate voltage sensitivity matrices.
self.sensitivity_voltage_by_power_wye_active = (
sp.dok_matrix(
(len(electric_grid_model.nodes), len(electric_grid_model.nodes)),
dtype=complex
)
)
self.sensitivity_voltage_by_power_wye_reactive = (
sp.dok_matrix(
(len(electric_grid_model.nodes), len(electric_grid_model.nodes)),
dtype=complex
)
)
self.sensitivity_voltage_by_power_delta_active = (
sp.dok_matrix(
(len(electric_grid_model.nodes), len(electric_grid_model.nodes)),
dtype=complex
)
)
self.sensitivity_voltage_by_power_delta_reactive = (
sp.dok_matrix(
(len(electric_grid_model.nodes), len(electric_grid_model.nodes)),
dtype=complex
)
)
# Calculate voltage sensitivity matrices.
# TODO: Document the change in sign in the reactive part compared to Hanif.
self.sensitivity_voltage_by_power_wye_active[np.ix_(
mesmo.utils.get_index(electric_grid_model.nodes, node_type='no_source'),
mesmo.utils.get_index(electric_grid_model.nodes, node_type='no_source')
)] = (
scipy.sparse.linalg.spsolve(
electric_grid_model.node_admittance_matrix_no_source.tocsc(),
sp.diags(np.conj(node_voltage_no_source) ** -1, format='csc')
)
)
self.sensitivity_voltage_by_power_wye_reactive[np.ix_(
mesmo.utils.get_index(electric_grid_model.nodes, node_type='no_source'),
mesmo.utils.get_index(electric_grid_model.nodes, node_type='no_source')
)] = (
scipy.sparse.linalg.spsolve(
1.0j * electric_grid_model.node_admittance_matrix_no_source.tocsc(),
sp.diags(np.conj(node_voltage_no_source) ** -1, format='csc')
)
)
self.sensitivity_voltage_by_power_delta_active[np.ix_(
mesmo.utils.get_index(electric_grid_model.nodes, node_type='no_source'),
mesmo.utils.get_index(electric_grid_model.nodes, node_type='no_source')
)] = (
scipy.sparse.linalg.spsolve(
electric_grid_model.node_admittance_matrix_no_source.tocsc(),
np.transpose(electric_grid_model.node_transformation_matrix_no_source)
)
@ sp.diags(
(
(
electric_grid_model.node_transformation_matrix_no_source
@ np.conj(node_voltage_no_source)
) ** -1
).ravel()
)
)
self.sensitivity_voltage_by_power_delta_reactive[np.ix_(
mesmo.utils.get_index(electric_grid_model.nodes, node_type='no_source'),
mesmo.utils.get_index(electric_grid_model.nodes, node_type='no_source')
)] = (
scipy.sparse.linalg.spsolve(
1.0j * electric_grid_model.node_admittance_matrix_no_source.tocsc(),
np.transpose(electric_grid_model.node_transformation_matrix_no_source)
)
@ sp.diags(
(
(
electric_grid_model.node_transformation_matrix_no_source
* np.conj(node_voltage_no_source)
) ** -1
).ravel()
)
)
self.sensitivity_voltage_by_der_power_active = (
self.sensitivity_voltage_by_power_wye_active
@ electric_grid_model.der_incidence_wye_matrix
+ self.sensitivity_voltage_by_power_delta_active
@ electric_grid_model.der_incidence_delta_matrix
)
self.sensitivity_voltage_by_der_power_reactive = (
self.sensitivity_voltage_by_power_wye_reactive
@ electric_grid_model.der_incidence_wye_matrix
+ self.sensitivity_voltage_by_power_delta_reactive
@ electric_grid_model.der_incidence_delta_matrix
)
self.sensitivity_voltage_magnitude_by_power_wye_active = (
sp.diags(abs(self.power_flow_solution.node_voltage_vector) ** -1)
@ np.real(
sp.diags(np.conj(self.power_flow_solution.node_voltage_vector))
@ self.sensitivity_voltage_by_power_wye_active
)
)
self.sensitivity_voltage_magnitude_by_power_wye_reactive = (
sp.diags(abs(self.power_flow_solution.node_voltage_vector) ** -1)
@ np.real(
sp.diags(np.conj(self.power_flow_solution.node_voltage_vector))
@ self.sensitivity_voltage_by_power_wye_reactive
)
)
self.sensitivity_voltage_magnitude_by_power_delta_active = (
sp.diags(abs(self.power_flow_solution.node_voltage_vector) ** -1)
@ np.real(
sp.diags(np.conj(self.power_flow_solution.node_voltage_vector))
@ self.sensitivity_voltage_by_power_delta_active
)
)
self.sensitivity_voltage_magnitude_by_power_delta_reactive = (
sp.diags(abs(self.power_flow_solution.node_voltage_vector) ** -1)
@ np.real(
sp.diags(np.conj(self.power_flow_solution.node_voltage_vector))
@ self.sensitivity_voltage_by_power_delta_reactive
)
)
self.sensitivity_voltage_magnitude_by_der_power_active = (
self.sensitivity_voltage_magnitude_by_power_wye_active
@ electric_grid_model.der_incidence_wye_matrix
+ self.sensitivity_voltage_magnitude_by_power_delta_active
@ electric_grid_model.der_incidence_delta_matrix
)
self.sensitivity_voltage_magnitude_by_der_power_reactive = (
self.sensitivity_voltage_magnitude_by_power_wye_reactive
@ electric_grid_model.der_incidence_wye_matrix
+ self.sensitivity_voltage_magnitude_by_power_delta_reactive
@ electric_grid_model.der_incidence_delta_matrix
)
# Calculate branch power sensitivity matrices.
sensitivity_branch_power_1_by_voltage = (
sp.diags((
np.conj(electric_grid_model.branch_admittance_1_matrix)
@ np.conj(self.power_flow_solution.node_voltage_vector)
).ravel())
@ electric_grid_model.branch_incidence_1_matrix
+ sp.diags((
electric_grid_model.branch_incidence_1_matrix
@ np.conj(self.power_flow_solution.node_voltage_vector)
).ravel())
@ electric_grid_model.branch_admittance_1_matrix
* np.sqrt(3)
)
sensitivity_branch_power_2_by_voltage = (
sp.diags((
np.conj(electric_grid_model.branch_admittance_2_matrix)
@ np.conj(self.power_flow_solution.node_voltage_vector)
).ravel())
@ electric_grid_model.branch_incidence_2_matrix
+ sp.diags((
electric_grid_model.branch_incidence_2_matrix
@ np.conj(self.power_flow_solution.node_voltage_vector)
).ravel())
@ electric_grid_model.branch_admittance_2_matrix
* np.sqrt(3)
)
self.sensitivity_branch_power_1_magnitude_by_power_wye_active = (
sp.diags(abs(self.power_flow_solution.branch_power_vector_1) ** -1)
@ np.real(
sp.diags(np.conj(self.power_flow_solution.branch_power_vector_1))
@ sensitivity_branch_power_1_by_voltage
@ self.sensitivity_voltage_by_power_wye_active
)
)
self.sensitivity_branch_power_1_magnitude_by_power_wye_reactive = (
sp.diags(abs(self.power_flow_solution.branch_power_vector_1) ** -1)
@ np.real(
sp.diags(np.conj(self.power_flow_solution.branch_power_vector_1))
@ sensitivity_branch_power_1_by_voltage
@ self.sensitivity_voltage_by_power_wye_reactive
)
)
self.sensitivity_branch_power_1_magnitude_by_power_delta_active = (
sp.diags(abs(self.power_flow_solution.branch_power_vector_1) ** -1)
@ np.real(
sp.diags(np.conj(self.power_flow_solution.branch_power_vector_1))
@ sensitivity_branch_power_1_by_voltage
@ self.sensitivity_voltage_by_power_delta_active
)
)
self.sensitivity_branch_power_1_magnitude_by_power_delta_reactive = (
sp.diags(abs(self.power_flow_solution.branch_power_vector_1) ** -1)
@ np.real(
sp.diags(np.conj(self.power_flow_solution.branch_power_vector_1))
@ sensitivity_branch_power_1_by_voltage
@ self.sensitivity_voltage_by_power_delta_reactive
)
)
self.sensitivity_branch_power_2_magnitude_by_power_wye_active = (
sp.diags(abs(self.power_flow_solution.branch_power_vector_2) ** -1)
@ np.real(
sp.diags(np.conj(self.power_flow_solution.branch_power_vector_2))
@ sensitivity_branch_power_2_by_voltage
@ self.sensitivity_voltage_by_power_wye_active
)
)
self.sensitivity_branch_power_2_magnitude_by_power_wye_reactive = (
sp.diags(abs(self.power_flow_solution.branch_power_vector_2) ** -1)
@ np.real(
sp.diags(np.conj(self.power_flow_solution.branch_power_vector_2))
@ sensitivity_branch_power_2_by_voltage
@ self.sensitivity_voltage_by_power_wye_reactive
)
)
self.sensitivity_branch_power_2_magnitude_by_power_delta_active = (
sp.diags(abs(self.power_flow_solution.branch_power_vector_2) ** -1)
@ np.real(
sp.diags(np.conj(self.power_flow_solution.branch_power_vector_2))
@ sensitivity_branch_power_2_by_voltage
@ self.sensitivity_voltage_by_power_delta_active
)
)
self.sensitivity_branch_power_2_magnitude_by_power_delta_reactive = (
sp.diags(abs(self.power_flow_solution.branch_power_vector_2) ** -1)
@ np.real(
sp.diags(np.conj(self.power_flow_solution.branch_power_vector_2))
@ sensitivity_branch_power_2_by_voltage
@ self.sensitivity_voltage_by_power_delta_reactive
)
)
self.sensitivity_branch_power_1_magnitude_by_der_power_active = (
self.sensitivity_branch_power_1_magnitude_by_power_wye_active
@ electric_grid_model.der_incidence_wye_matrix
+ self.sensitivity_branch_power_1_magnitude_by_power_delta_active
@ electric_grid_model.der_incidence_delta_matrix
)
self.sensitivity_branch_power_1_magnitude_by_der_power_reactive = (
self.sensitivity_branch_power_1_magnitude_by_power_wye_reactive
@ electric_grid_model.der_incidence_wye_matrix
+ self.sensitivity_branch_power_1_magnitude_by_power_delta_reactive
@ electric_grid_model.der_incidence_delta_matrix
)
self.sensitivity_branch_power_2_magnitude_by_der_power_active = (
self.sensitivity_branch_power_2_magnitude_by_power_wye_active
@ electric_grid_model.der_incidence_wye_matrix
+ self.sensitivity_branch_power_2_magnitude_by_power_delta_active
@ electric_grid_model.der_incidence_delta_matrix
)
self.sensitivity_branch_power_2_magnitude_by_der_power_reactive = (
self.sensitivity_branch_power_2_magnitude_by_power_wye_reactive
@ electric_grid_model.der_incidence_wye_matrix
+ self.sensitivity_branch_power_2_magnitude_by_power_delta_reactive
@ electric_grid_model.der_incidence_delta_matrix
)
self.sensitivity_branch_power_1_squared_by_power_wye_active = (
(
sp.diags(np.real(self.power_flow_solution.branch_power_vector_1))
@ np.real(
sensitivity_branch_power_1_by_voltage
@ self.sensitivity_voltage_by_power_wye_active
)
)
+ (
sp.diags(np.imag(self.power_flow_solution.branch_power_vector_1))
@ np.imag(
sensitivity_branch_power_1_by_voltage
@ self.sensitivity_voltage_by_power_wye_active
)
)
)
self.sensitivity_branch_power_1_squared_by_power_wye_reactive = (
(
sp.diags(np.real(self.power_flow_solution.branch_power_vector_1))
@ np.real(
sensitivity_branch_power_1_by_voltage
@ self.sensitivity_voltage_by_power_wye_reactive
)
)
+ (
sp.diags(np.imag(self.power_flow_solution.branch_power_vector_1))
@ np.imag(
sensitivity_branch_power_1_by_voltage
@ self.sensitivity_voltage_by_power_wye_reactive
)
)
)
self.sensitivity_branch_power_1_squared_by_power_delta_active = (
(
sp.diags(np.real(self.power_flow_solution.branch_power_vector_1))
@ np.real(
sensitivity_branch_power_1_by_voltage
@ self.sensitivity_voltage_by_power_delta_active
)
)
+ (
sp.diags(np.imag(self.power_flow_solution.branch_power_vector_1))
@ np.imag(
sensitivity_branch_power_1_by_voltage
@ self.sensitivity_voltage_by_power_delta_active
)
)
)
self.sensitivity_branch_power_1_squared_by_power_delta_reactive = (
(
sp.diags(np.real(self.power_flow_solution.branch_power_vector_1))
@ np.real(
sensitivity_branch_power_1_by_voltage
@ self.sensitivity_voltage_by_power_delta_reactive
)
)
+ (
sp.diags(np.imag(self.power_flow_solution.branch_power_vector_1))
@ np.imag(
sensitivity_branch_power_1_by_voltage
@ self.sensitivity_voltage_by_power_delta_reactive
)
)
)
self.sensitivity_branch_power_2_squared_by_power_wye_active = (
(
sp.diags(np.real(self.power_flow_solution.branch_power_vector_2))
@ np.real(
sensitivity_branch_power_2_by_voltage
@ self.sensitivity_voltage_by_power_wye_active
)
)
+ (
sp.diags(np.imag(self.power_flow_solution.branch_power_vector_2))
@ np.imag(
sensitivity_branch_power_2_by_voltage
@ self.sensitivity_voltage_by_power_wye_active
)
)
)
self.sensitivity_branch_power_2_squared_by_power_wye_reactive = (
(
sp.diags(np.real(self.power_flow_solution.branch_power_vector_2))
@ np.real(
sensitivity_branch_power_2_by_voltage
@ self.sensitivity_voltage_by_power_wye_reactive
)
)
+ (
sp.diags(np.imag(self.power_flow_solution.branch_power_vector_2))
@ np.imag(
sensitivity_branch_power_2_by_voltage
@ self.sensitivity_voltage_by_power_wye_reactive
)
)
)
self.sensitivity_branch_power_2_squared_by_power_delta_active = (
(
sp.diags(np.real(self.power_flow_solution.branch_power_vector_2))
@ np.real(
sensitivity_branch_power_2_by_voltage
@ self.sensitivity_voltage_by_power_delta_active
)
)
+ (
sp.diags(np.imag(self.power_flow_solution.branch_power_vector_2))
@ np.imag(
sensitivity_branch_power_2_by_voltage
@ self.sensitivity_voltage_by_power_delta_active
)
)
)
self.sensitivity_branch_power_2_squared_by_power_delta_reactive = (
(
sp.diags(np.real(self.power_flow_solution.branch_power_vector_2))
@ np.real(
sensitivity_branch_power_2_by_voltage
@ self.sensitivity_voltage_by_power_delta_reactive
)
)
+ (
sp.diags(np.imag(self.power_flow_solution.branch_power_vector_2))
@ np.imag(
sensitivity_branch_power_2_by_voltage
@ self.sensitivity_voltage_by_power_delta_reactive
)
)
)
self.sensitivity_branch_power_1_squared_by_der_power_active = (
self.sensitivity_branch_power_1_squared_by_power_wye_active
@ electric_grid_model.der_incidence_wye_matrix
+ self.sensitivity_branch_power_1_squared_by_power_delta_active
@ electric_grid_model.der_incidence_delta_matrix
)
self.sensitivity_branch_power_1_squared_by_der_power_reactive = (
self.sensitivity_branch_power_1_squared_by_power_wye_reactive
@ electric_grid_model.der_incidence_wye_matrix
+ self.sensitivity_branch_power_1_squared_by_power_delta_reactive
@ electric_grid_model.der_incidence_delta_matrix
)
self.sensitivity_branch_power_2_squared_by_der_power_active = (
self.sensitivity_branch_power_2_squared_by_power_wye_active
@ electric_grid_model.der_incidence_wye_matrix
+ self.sensitivity_branch_power_2_squared_by_power_delta_active
@ electric_grid_model.der_incidence_delta_matrix
)
self.sensitivity_branch_power_2_squared_by_der_power_reactive = (
self.sensitivity_branch_power_2_squared_by_power_wye_reactive
@ electric_grid_model.der_incidence_wye_matrix
+ self.sensitivity_branch_power_2_squared_by_power_delta_reactive
@ electric_grid_model.der_incidence_delta_matrix
)
# Calculate loss sensitivity matrices.
# sensitivity_loss_by_voltage = (
# np.array([self.power_flow_solution.node_voltage_vector])
# @ np.conj(electric_grid_model.node_admittance_matrix)
# + np.transpose(
# electric_grid_model.node_admittance_matrix
# @ np.transpose([self.power_flow_solution.node_voltage_vector])
# )
# )
sensitivity_loss_by_voltage = (
sum(np.transpose(
np.transpose(sensitivity_branch_power_1_by_voltage)
+ np.transpose(sensitivity_branch_power_2_by_voltage)
))
)
self.sensitivity_loss_active_by_power_wye_active = (
np.real(
sensitivity_loss_by_voltage
@ self.sensitivity_voltage_by_power_wye_active
)
/ (2 * np.sqrt(3))
)
self.sensitivity_loss_active_by_power_wye_reactive = (
np.real(
sensitivity_loss_by_voltage
@ self.sensitivity_voltage_by_power_wye_reactive
)
/ (2 * np.sqrt(3))
)
self.sensitivity_loss_active_by_power_delta_active = (
np.real(
sensitivity_loss_by_voltage
@ self.sensitivity_voltage_by_power_delta_active
)
/ (2 * np.sqrt(3))
)
self.sensitivity_loss_active_by_power_delta_reactive = (
np.real(
sensitivity_loss_by_voltage
@ self.sensitivity_voltage_by_power_delta_reactive
)
/ (2 * np.sqrt(3))
)
self.sensitivity_loss_reactive_by_power_wye_active = (
np.imag(
sensitivity_loss_by_voltage
@ self.sensitivity_voltage_by_power_wye_active
)
* -1 * np.sqrt(3)
)
self.sensitivity_loss_reactive_by_power_wye_reactive = (
np.imag(
sensitivity_loss_by_voltage
@ self.sensitivity_voltage_by_power_wye_reactive
)
* -1 * np.sqrt(3)
)
self.sensitivity_loss_reactive_by_power_delta_active = (
np.imag(
sensitivity_loss_by_voltage
@ self.sensitivity_voltage_by_power_delta_active
)
* -1 * np.sqrt(3)
)
self.sensitivity_loss_reactive_by_power_delta_reactive = (
np.imag(
sensitivity_loss_by_voltage
@ self.sensitivity_voltage_by_power_delta_reactive
)
* -1 * np.sqrt(3)
)
self.sensitivity_loss_active_by_der_power_active = (
self.sensitivity_loss_active_by_power_wye_active
@ electric_grid_model.der_incidence_wye_matrix
+ self.sensitivity_loss_active_by_power_delta_active
@ electric_grid_model.der_incidence_delta_matrix
)
self.sensitivity_loss_active_by_der_power_reactive = (
self.sensitivity_loss_active_by_power_wye_reactive
@ electric_grid_model.der_incidence_wye_matrix
+ self.sensitivity_loss_active_by_power_delta_reactive
@ electric_grid_model.der_incidence_delta_matrix
)
self.sensitivity_loss_reactive_by_der_power_active = (
self.sensitivity_loss_reactive_by_power_wye_active
@ electric_grid_model.der_incidence_wye_matrix
+ self.sensitivity_loss_reactive_by_power_delta_active
@ electric_grid_model.der_incidence_delta_matrix
)
self.sensitivity_loss_reactive_by_der_power_reactive = (
self.sensitivity_loss_reactive_by_power_wye_reactive
@ electric_grid_model.der_incidence_wye_matrix
+ self.sensitivity_loss_reactive_by_power_delta_reactive
@ electric_grid_model.der_incidence_delta_matrix
)
class LinearElectricGridModelLocal(LinearElectricGridModel):
"""Linear electric grid model object based on local approximations, consisting of the sensitivity matrices for
voltage / voltage magnitude / squared branch power / active loss / reactive loss by changes in nodal wye power /
nodal delta power.
:syntax:
- ``LinearElectricGridModelLocal(electric_grid_model, power_flow_solution)``: Instantiate linear electric grid
model object for given `electric_grid_model` and `power_flow_solution`.
- ``LinearElectricGridModelLocal(scenario_name)``: Instantiate linear electric grid model for given
`scenario_name`. The required `electric_grid_model` is obtained for given `scenario_name` and the
`power_flow_solution` is obtained for nominal power conditions.
Parameters:
electric_grid_model (ElectricGridModelDefault): Electric grid model object.
power_flow_solution (PowerFlowSolution): Power flow solution object.
scenario_name (str): MESMO scenario name.
Attributes:
electric_grid_model (ElectricGridModelDefault): Electric grid model object.
power_flow_solution (PowerFlowSolution): Reference power flow solution object.
sensitivity_voltage_by_power_wye_active (sp.spmatrix): Sensitivity matrix for complex voltage vector
by active wye power vector.
sensitivity_voltage_by_power_wye_reactive (sp.spmatrix): Sensitivity matrix for complex voltage
vector by reactive wye power vector.
sensitivity_voltage_by_power_delta_active (sp.spmatrix): Sensitivity matrix for complex voltage vector
by active delta power vector.
sensitivity_voltage_by_power_delta_reactive (sp.spmatrix): Sensitivity matrix for complex voltage
vector by reactive delta power vector.
sensitivity_voltage_by_der_power_active (sp.spmatrix): Sensitivity matrix for
complex voltage vector by DER active power vector.
sensitivity_voltage_by_der_power_reactive (sp.spmatrix): Sensitivity matrix for
complex voltage vector by DER reactive power vector.
sensitivity_voltage_magnitude_by_power_wye_active (sp.spmatrix): Sensitivity matrix for voltage
magnitude vector by active wye power vector.
sensitivity_voltage_magnitude_by_power_wye_reactive (sp.spmatrix): Sensitivity matrix for
voltage magnitude vector by reactive wye power vector.
sensitivity_voltage_magnitude_by_power_delta_active (sp.spmatrix): Sensitivity matrix for
voltage magnitude vector by active delta power vector.
sensitivity_voltage_magnitude_by_power_delta_reactive (sp.spmatrix): Sensitivity matrix for
voltage magnitude vector by reactive delta power vector.
sensitivity_voltage_magnitude_by_der_power_active (sp.spmatrix): Sensitivity matrix for
voltage magnitude vector by DER active power vector.
sensitivity_voltage_magnitude_by_der_power_reactive (sp.spmatrix): Sensitivity matrix for
voltage magnitude vector by DER reactive power vector.
sensitivity_branch_power_1_magnitude_by_power_wye_active (sp.spmatrix): Sensitivity matrix for
branch flow power magnitude vector 1 ('from' direction) by active wye power vector.
sensitivity_branch_power_1_magnitude_by_power_wye_reactive (sp.spmatrix): Sensitivity matrix for
branch flow power magnitude vector 1 ('from' direction) by reactive wye power vector.
sensitivity_branch_power_1_magnitude_by_power_delta_active (sp.spmatrix): Sensitivity matrix for
branch flow power magnitude vector 1 ('from' direction) by active delta power vector.
sensitivity_branch_power_1_magnitude_by_power_delta_reactive (sp.spmatrix): Sensitivity matrix for
branch flow power magnitude vector 1 ('from' direction) by reactive delta power vector.
sensitivity_branch_power_1_magnitude_by_der_power_active (sp.spmatrix): Sensitivity matrix for
branch flow power magnitude vector 1 by DER active power vector.
sensitivity_branch_power_1_magnitude_by_der_power_reactive (sp.spmatrix): Sensitivity matrix for
branch flow power magnitude vector 1 by DER reactive power vector.
sensitivity_branch_power_2_magnitude_by_power_wye_active (sp.spmatrix): Sensitivity matrix for
branch flow power magnitude vector 2 ('to' direction) by active wye power vector.
sensitivity_branch_power_2_magnitude_by_power_wye_reactive (sp.spmatrix): Sensitivity matrix for
branch flow power magnitude vector 2 ('to' direction) by reactive wye power vector.
sensitivity_branch_power_2_magnitude_by_power_delta_active (sp.spmatrix): Sensitivity matrix for
branch flow power magnitude vector 2 ('to' direction) by active delta power vector.
sensitivity_branch_power_2_magnitude_by_power_delta_reactive (sp.spmatrix): Sensitivity matrix for
branch flow power magnitude vector 2 ('to' direction) by reactive delta power vector.
sensitivity_branch_power_2_magnitude_by_der_power_active (sp.spmatrix): Sensitivity matrix for
branch flow power magnitude vector 2 by DER active power vector.
sensitivity_branch_power_2_magnitude_by_der_power_reactive (sp.spmatrix): Sensitivity matrix for
branch flow power magnitude vector 2 by DER reactive power vector.
sensitivity_branch_power_1_squared_by_power_wye_active (sp.spmatrix): Sensitivity matrix for
squared branch flow power vector 1 ('from' direction) by active wye power vector.
sensitivity_branch_power_1_squared_by_power_wye_reactive (sp.spmatrix): Sensitivity matrix for
squared branch flow power vector 1 ('from' direction) by reactive wye power vector.
sensitivity_branch_power_1_squared_by_power_delta_active (sp.spmatrix): Sensitivity matrix for
squared branch flow power vector 1 ('from' direction) by active delta power vector.
sensitivity_branch_power_1_squared_by_power_delta_reactive (sp.spmatrix): Sensitivity matrix for
squared branch flow power vector 1 ('from' direction) by reactive delta power vector.
sensitivity_branch_power_1_squared_by_der_power_active (sp.spmatrix): Sensitivity matrix for
squared branch flow power vector 1 by DER active power vector.
sensitivity_branch_power_1_squared_by_der_power_reactive (sp.spmatrix): Sensitivity matrix for
squared branch flow power vector 1 by DER reactive power vector.
sensitivity_branch_power_2_squared_by_power_wye_active (sp.spmatrix): Sensitivity matrix for
squared branch flow power vector 2 ('to' direction) by active wye power vector.
sensitivity_branch_power_2_squared_by_power_wye_reactive (sp.spmatrix): Sensitivity matrix for
squared branch flow power vector 2 ('to' direction) by reactive wye power vector.
sensitivity_branch_power_2_squared_by_power_delta_active (sp.spmatrix): Sensitivity matrix for
squared branch flow power vector 2 ('to' direction) by active delta power vector.
sensitivity_branch_power_2_squared_by_power_delta_reactive (sp.spmatrix): Sensitivity matrix for
squared branch flow power vector 2 ('to' direction) by reactive delta power vector.
sensitivity_branch_power_2_squared_by_der_power_active (sp.spmatrix): Sensitivity matrix for
squared branch flow power vector 2 by DER active power vector.
sensitivity_branch_power_2_squared_by_der_power_reactive (sp.spmatrix): Sensitivity matrix for
squared branch flow power vector 2 by DER reactive power vector.
sensitivity_loss_active_by_power_wye_active (sp.spmatrix): Sensitivity matrix for
active loss by active wye power vector.
sensitivity_loss_active_by_power_wye_reactive (sp.spmatrix): Sensitivity matrix for
active loss by reactive wye power vector.
sensitivity_loss_active_by_power_delta_active (sp.spmatrix): Sensitivity matrix for
active loss by active delta power vector.
sensitivity_loss_active_by_power_delta_reactive (sp.spmatrix): Sensitivity matrix for
active loss by reactive delta power vector.
sensitivity_loss_active_by_der_power_active (sp.spmatrix): Sensitivity matrix for
active loss by DER active power vector.
sensitivity_loss_active_by_der_power_reactive (sp.spmatrix): Sensitivity matrix for
active loss by DER reactive power vector.
sensitivity_loss_reactive_by_power_wye_active (sp.spmatrix): Sensitivity matrix for
reactive loss by active wye power vector.
sensitivity_loss_reactive_by_power_wye_reactive (sp.spmatrix): Sensitivity matrix for
reactive loss by reactive wye power vector.
sensitivity_loss_reactive_by_power_delta_active (sp.spmatrix): Sensitivity matrix for
reactive loss by active delta power vector.
sensitivity_loss_reactive_by_power_delta_reactive (sp.spmatrix): Sensitivity matrix for
reactive loss by reactive delta power vector.
sensitivity_loss_reactive_by_der_power_active (sp.spmatrix): Sensitivity matrix for
reactive loss by DER active power vector.
sensitivity_loss_reactive_by_der_power_reactive (sp.spmatrix): Sensitivity matrix for
reactive loss by DER reactive power vector.
"""
@multimethod
def __init__(
self,
scenario_name: str,
):
# Obtain electric grid model.
electric_grid_model = (
ElectricGridModelDefault(scenario_name)
)
# Obtain der power vector.
der_power_vector = (
electric_grid_model.der_power_vector_reference
)
# Obtain power flow solution.
power_flow_solution = (
PowerFlowSolutionFixedPoint(
electric_grid_model,
der_power_vector
)
)
self.__init__(
electric_grid_model,
power_flow_solution
)
@multimethod
def __init__(
self,
electric_grid_model: ElectricGridModelDefault,
power_flow_solution: PowerFlowSolution
):
# Store power flow solution.
self.power_flow_solution = power_flow_solution
# Store electric grid model.
self.electric_grid_model = electric_grid_model
# Obtain shorthands for no-source matrices and vectors.
electric_grid_model.node_admittance_matrix_no_source = (
electric_grid_model.node_admittance_matrix[np.ix_(
mesmo.utils.get_index(electric_grid_model.nodes, node_type='no_source'),
mesmo.utils.get_index(electric_grid_model.nodes, node_type='no_source')
)]
)
electric_grid_model.node_transformation_matrix_no_source = (
electric_grid_model.node_transformation_matrix[np.ix_(
mesmo.utils.get_index(electric_grid_model.nodes, node_type='no_source'),
mesmo.utils.get_index(electric_grid_model.nodes, node_type='no_source')
)]
)
node_voltage_no_source = (
self.power_flow_solution.node_voltage_vector[
mesmo.utils.get_index(electric_grid_model.nodes, node_type='no_source')
]
)
# Instantiate voltage sensitivity matrices.
self.sensitivity_voltage_by_power_wye_active = (
sp.dok_matrix(
(len(electric_grid_model.nodes), len(electric_grid_model.nodes)),
dtype=complex
)
)
self.sensitivity_voltage_by_power_wye_reactive = (
sp.dok_matrix(
(len(electric_grid_model.nodes), len(electric_grid_model.nodes)),
dtype=complex
)
)
self.sensitivity_voltage_by_power_delta_active = (
sp.dok_matrix(
(len(electric_grid_model.nodes), len(electric_grid_model.nodes)),
dtype=complex
)
)
self.sensitivity_voltage_by_power_delta_reactive = (
sp.dok_matrix(
(len(electric_grid_model.nodes), len(electric_grid_model.nodes)),
dtype=complex
)
)
# Calculate utility matrices.
A_matrix_inverse = (
sp.diags((
electric_grid_model.node_admittance_matrix_source_to_no_source
@ electric_grid_model.node_voltage_vector_reference_source
+ electric_grid_model.node_admittance_matrix_no_source
@ node_voltage_no_source
) ** -1)
)
A_matrix_conjugate = (
sp.diags(np.conj(
electric_grid_model.node_admittance_matrix_source_to_no_source
@ electric_grid_model.node_voltage_vector_reference_source
+ electric_grid_model.node_admittance_matrix_no_source
@ node_voltage_no_source
))
)
B_matrix = (
A_matrix_conjugate
- sp.diags(node_voltage_no_source)
@ np.conj(electric_grid_model.node_admittance_matrix_no_source)
@ A_matrix_inverse
@ sp.diags(np.conj(node_voltage_no_source))
@ electric_grid_model.node_admittance_matrix_no_source
)
# Calculate voltage sensitivity matrices.
# - TODO: Consider delta loads.
self.sensitivity_voltage_by_power_wye_active[np.ix_(
mesmo.utils.get_index(electric_grid_model.nodes, node_type='no_source'),
mesmo.utils.get_index(electric_grid_model.nodes, node_type='no_source')
)] = (
scipy.sparse.linalg.spsolve(
B_matrix.tocsc(),
(
sp.identity(len(node_voltage_no_source))
- sp.diags(node_voltage_no_source)
@ np.conj(electric_grid_model.node_admittance_matrix_no_source)
@ A_matrix_inverse
@ sp.identity(len(node_voltage_no_source))
).tocsc()
)
)
self.sensitivity_voltage_by_power_wye_reactive[np.ix_(
mesmo.utils.get_index(electric_grid_model.nodes, node_type='no_source'),
mesmo.utils.get_index(electric_grid_model.nodes, node_type='no_source')
)] = (
scipy.sparse.linalg.spsolve(
B_matrix.tocsc(),
(
(1.0j * sp.identity(len(node_voltage_no_source)))
- sp.diags(node_voltage_no_source)
@ np.conj(electric_grid_model.node_admittance_matrix_no_source)
@ A_matrix_inverse
@ (-1.0j * sp.identity(len(node_voltage_no_source)))
).tocsc()
)
)
# self.sensitivity_voltage_by_power_delta_active[np.ix_(
# mesmo.utils.get_index(electric_grid_model.nodes, node_type='no_source'),
# mesmo.utils.get_index(electric_grid_model.nodes, node_type='no_source')
# )] = (
# ???
# )
# self.sensitivity_voltage_by_power_delta_reactive[np.ix_(
# mesmo.utils.get_index(electric_grid_model.nodes, node_type='no_source'),
# mesmo.utils.get_index(electric_grid_model.nodes, node_type='no_source')
# )] = (
# ???
# )
self.sensitivity_voltage_by_der_power_active = (
self.sensitivity_voltage_by_power_wye_active
@ electric_grid_model.der_incidence_wye_matrix
+ self.sensitivity_voltage_by_power_delta_active
@ electric_grid_model.der_incidence_delta_matrix
)
self.sensitivity_voltage_by_der_power_reactive = (
self.sensitivity_voltage_by_power_wye_reactive
@ electric_grid_model.der_incidence_wye_matrix
+ self.sensitivity_voltage_by_power_delta_reactive
@ electric_grid_model.der_incidence_delta_matrix
)
self.sensitivity_voltage_magnitude_by_power_wye_active = (
sp.diags(abs(self.power_flow_solution.node_voltage_vector) ** -1)
@ np.real(
sp.diags(np.conj(self.power_flow_solution.node_voltage_vector))
@ self.sensitivity_voltage_by_power_wye_active
)
)
self.sensitivity_voltage_magnitude_by_power_wye_reactive = (
sp.diags(abs(self.power_flow_solution.node_voltage_vector) ** -1)
@ np.real(
sp.diags(np.conj(self.power_flow_solution.node_voltage_vector))
@ self.sensitivity_voltage_by_power_wye_reactive
)
)
self.sensitivity_voltage_magnitude_by_power_delta_active = (
sp.diags(abs(self.power_flow_solution.node_voltage_vector) ** -1)
@ np.real(
sp.diags(np.conj(self.power_flow_solution.node_voltage_vector))
@ self.sensitivity_voltage_by_power_delta_active
)
)
self.sensitivity_voltage_magnitude_by_power_delta_reactive = (
sp.diags(abs(self.power_flow_solution.node_voltage_vector) ** -1)
@ np.real(
sp.diags(np.conj(self.power_flow_solution.node_voltage_vector))
@ self.sensitivity_voltage_by_power_delta_reactive
)
)
self.sensitivity_voltage_magnitude_by_der_power_active = (
self.sensitivity_voltage_magnitude_by_power_wye_active
@ electric_grid_model.der_incidence_wye_matrix
+ self.sensitivity_voltage_magnitude_by_power_delta_active
@ electric_grid_model.der_incidence_delta_matrix
)
self.sensitivity_voltage_magnitude_by_der_power_reactive = (
self.sensitivity_voltage_magnitude_by_power_wye_reactive
@ electric_grid_model.der_incidence_wye_matrix
+ self.sensitivity_voltage_magnitude_by_power_delta_reactive
@ electric_grid_model.der_incidence_delta_matrix
)
# Calculate branch power sensitivity matrices.
sensitivity_branch_power_1_by_voltage = (
sp.diags((
np.conj(electric_grid_model.branch_admittance_1_matrix)
@ np.conj(self.power_flow_solution.node_voltage_vector)
).ravel())
@ electric_grid_model.branch_incidence_1_matrix
+ sp.diags((
electric_grid_model.branch_incidence_1_matrix
@ np.conj(self.power_flow_solution.node_voltage_vector)
).ravel())
@ electric_grid_model.branch_admittance_1_matrix
* np.sqrt(3)
)
sensitivity_branch_power_2_by_voltage = (
sp.diags((
np.conj(electric_grid_model.branch_admittance_2_matrix)
@ np.conj(self.power_flow_solution.node_voltage_vector)
).ravel())
@ electric_grid_model.branch_incidence_2_matrix
+ sp.diags((
electric_grid_model.branch_incidence_2_matrix
@ np.conj(self.power_flow_solution.node_voltage_vector)
).ravel())
@ electric_grid_model.branch_admittance_2_matrix
* np.sqrt(3)
)
self.sensitivity_branch_power_1_magnitude_by_power_wye_active = (
sp.diags(abs(self.power_flow_solution.branch_power_vector_1) ** -1)
@ np.real(
sp.diags(np.conj(self.power_flow_solution.branch_power_vector_1))
@ sensitivity_branch_power_1_by_voltage
@ self.sensitivity_voltage_by_power_wye_active
)
)
self.sensitivity_branch_power_1_magnitude_by_power_wye_reactive = (
sp.diags(abs(self.power_flow_solution.branch_power_vector_1) ** -1)
@ np.real(
sp.diags(np.conj(self.power_flow_solution.branch_power_vector_1))
@ sensitivity_branch_power_1_by_voltage
@ self.sensitivity_voltage_by_power_wye_reactive
)
)
self.sensitivity_branch_power_1_magnitude_by_power_delta_active = (
sp.diags(abs(self.power_flow_solution.branch_power_vector_1) ** -1)
@ np.real(
sp.diags(np.conj(self.power_flow_solution.branch_power_vector_1))
@ sensitivity_branch_power_1_by_voltage
@ self.sensitivity_voltage_by_power_delta_active
)
)
self.sensitivity_branch_power_1_magnitude_by_power_delta_reactive = (
sp.diags(abs(self.power_flow_solution.branch_power_vector_1) ** -1)
@ np.real(
sp.diags(np.conj(self.power_flow_solution.branch_power_vector_1))
@ sensitivity_branch_power_1_by_voltage
@ self.sensitivity_voltage_by_power_delta_reactive
)
)
self.sensitivity_branch_power_2_magnitude_by_power_wye_active = (
sp.diags(abs(self.power_flow_solution.branch_power_vector_2) ** -1)
@ np.real(
sp.diags(np.conj(self.power_flow_solution.branch_power_vector_2))
@ sensitivity_branch_power_2_by_voltage
@ self.sensitivity_voltage_by_power_wye_active
)
)
self.sensitivity_branch_power_2_magnitude_by_power_wye_reactive = (
sp.diags(abs(self.power_flow_solution.branch_power_vector_2) ** -1)
@ np.real(
sp.diags(np.conj(self.power_flow_solution.branch_power_vector_2))
@ sensitivity_branch_power_2_by_voltage
@ self.sensitivity_voltage_by_power_wye_reactive
)
)
self.sensitivity_branch_power_2_magnitude_by_power_delta_active = (
sp.diags(abs(self.power_flow_solution.branch_power_vector_2) ** -1)
@ np.real(
sp.diags(np.conj(self.power_flow_solution.branch_power_vector_2))
@ sensitivity_branch_power_2_by_voltage
@ self.sensitivity_voltage_by_power_delta_active
)
)
self.sensitivity_branch_power_2_magnitude_by_power_delta_reactive = (
sp.diags(abs(self.power_flow_solution.branch_power_vector_2) ** -1)
@ np.real(
sp.diags(np.conj(self.power_flow_solution.branch_power_vector_2))
@ sensitivity_branch_power_2_by_voltage
@ self.sensitivity_voltage_by_power_delta_reactive
)
)
self.sensitivity_branch_power_1_magnitude_by_der_power_active = (
self.sensitivity_branch_power_1_magnitude_by_power_wye_active
@ electric_grid_model.der_incidence_wye_matrix
+ self.sensitivity_branch_power_1_magnitude_by_power_delta_active
@ electric_grid_model.der_incidence_delta_matrix
)
self.sensitivity_branch_power_1_magnitude_by_der_power_reactive = (
self.sensitivity_branch_power_1_magnitude_by_power_wye_reactive
@ electric_grid_model.der_incidence_wye_matrix
+ self.sensitivity_branch_power_1_magnitude_by_power_delta_reactive
@ electric_grid_model.der_incidence_delta_matrix
)
self.sensitivity_branch_power_2_magnitude_by_der_power_active = (
self.sensitivity_branch_power_2_magnitude_by_power_wye_active
@ electric_grid_model.der_incidence_wye_matrix
+ self.sensitivity_branch_power_2_magnitude_by_power_delta_active
@ electric_grid_model.der_incidence_delta_matrix
)
self.sensitivity_branch_power_2_magnitude_by_der_power_reactive = (
self.sensitivity_branch_power_2_magnitude_by_power_wye_reactive
@ electric_grid_model.der_incidence_wye_matrix
+ self.sensitivity_branch_power_2_magnitude_by_power_delta_reactive
@ electric_grid_model.der_incidence_delta_matrix
)
self.sensitivity_branch_power_1_squared_by_power_wye_active = (
(
sp.diags(np.real(self.power_flow_solution.branch_power_vector_1))
@ np.real(
sensitivity_branch_power_1_by_voltage
@ self.sensitivity_voltage_by_power_wye_active
)
)
+ (
sp.diags(np.imag(self.power_flow_solution.branch_power_vector_1))
@ np.imag(
sensitivity_branch_power_1_by_voltage
@ self.sensitivity_voltage_by_power_wye_active
)
)
)
self.sensitivity_branch_power_1_squared_by_power_wye_reactive = (
(
sp.diags(np.real(self.power_flow_solution.branch_power_vector_1))
@ np.real(
sensitivity_branch_power_1_by_voltage
@ self.sensitivity_voltage_by_power_wye_reactive
)
)
+ (
sp.diags(np.imag(self.power_flow_solution.branch_power_vector_1))
@ np.imag(
sensitivity_branch_power_1_by_voltage
@ self.sensitivity_voltage_by_power_wye_reactive
)
)
)
self.sensitivity_branch_power_1_squared_by_power_delta_active = (
(
sp.diags(np.real(self.power_flow_solution.branch_power_vector_1))
@ np.real(
sensitivity_branch_power_1_by_voltage
@ self.sensitivity_voltage_by_power_delta_active
)
)
+ (
sp.diags(np.imag(self.power_flow_solution.branch_power_vector_1))
@ np.imag(
sensitivity_branch_power_1_by_voltage
@ self.sensitivity_voltage_by_power_delta_active
)
)
)
self.sensitivity_branch_power_1_squared_by_power_delta_reactive = (
(
sp.diags(np.real(self.power_flow_solution.branch_power_vector_1))
@ np.real(
sensitivity_branch_power_1_by_voltage
@ self.sensitivity_voltage_by_power_delta_reactive
)
)
+ (
sp.diags(np.imag(self.power_flow_solution.branch_power_vector_1))
@ np.imag(
sensitivity_branch_power_1_by_voltage
@ self.sensitivity_voltage_by_power_delta_reactive
)
)
)
self.sensitivity_branch_power_2_squared_by_power_wye_active = (
(
sp.diags(np.real(self.power_flow_solution.branch_power_vector_2))
@ np.real(
sensitivity_branch_power_2_by_voltage
@ self.sensitivity_voltage_by_power_wye_active
)
)
+ (
sp.diags(np.imag(self.power_flow_solution.branch_power_vector_2))
@ np.imag(
sensitivity_branch_power_2_by_voltage
@ self.sensitivity_voltage_by_power_wye_active
)
)
)
self.sensitivity_branch_power_2_squared_by_power_wye_reactive = (
(
sp.diags(np.real(self.power_flow_solution.branch_power_vector_2))
@ np.real(
sensitivity_branch_power_2_by_voltage
@ self.sensitivity_voltage_by_power_wye_reactive
)
)
+ (
sp.diags(np.imag(self.power_flow_solution.branch_power_vector_2))
@ np.imag(
sensitivity_branch_power_2_by_voltage
@ self.sensitivity_voltage_by_power_wye_reactive
)
)
)
self.sensitivity_branch_power_2_squared_by_power_delta_active = (
(
sp.diags(np.real(self.power_flow_solution.branch_power_vector_2))
@ np.real(
sensitivity_branch_power_2_by_voltage
@ self.sensitivity_voltage_by_power_delta_active
)
)
+ (
sp.diags(np.imag(self.power_flow_solution.branch_power_vector_2))
@ np.imag(
sensitivity_branch_power_2_by_voltage
@ self.sensitivity_voltage_by_power_delta_active
)
)
)
self.sensitivity_branch_power_2_squared_by_power_delta_reactive = (
(
sp.diags(np.real(self.power_flow_solution.branch_power_vector_2))
@ np.real(
sensitivity_branch_power_2_by_voltage
@ self.sensitivity_voltage_by_power_delta_reactive
)
)
+ (
sp.diags(np.imag(self.power_flow_solution.branch_power_vector_2))
@ np.imag(
sensitivity_branch_power_2_by_voltage
@ self.sensitivity_voltage_by_power_delta_reactive
)
)
)
self.sensitivity_branch_power_1_squared_by_der_power_active = (
self.sensitivity_branch_power_1_squared_by_power_wye_active
@ electric_grid_model.der_incidence_wye_matrix
+ self.sensitivity_branch_power_1_squared_by_power_delta_active
@ electric_grid_model.der_incidence_delta_matrix
)
self.sensitivity_branch_power_1_squared_by_der_power_reactive = (
self.sensitivity_branch_power_1_squared_by_power_wye_reactive
@ electric_grid_model.der_incidence_wye_matrix
+ self.sensitivity_branch_power_1_squared_by_power_delta_reactive
@ electric_grid_model.der_incidence_delta_matrix
)
self.sensitivity_branch_power_2_squared_by_der_power_active = (
self.sensitivity_branch_power_2_squared_by_power_wye_active
@ electric_grid_model.der_incidence_wye_matrix
+ self.sensitivity_branch_power_2_squared_by_power_delta_active
@ electric_grid_model.der_incidence_delta_matrix
)
self.sensitivity_branch_power_2_squared_by_der_power_reactive = (
self.sensitivity_branch_power_2_squared_by_power_wye_reactive
@ electric_grid_model.der_incidence_wye_matrix
+ self.sensitivity_branch_power_2_squared_by_power_delta_reactive
@ electric_grid_model.der_incidence_delta_matrix
)
# Calculate loss sensitivity matrices.
# sensitivity_loss_by_voltage = (
# np.array([self.power_flow_solution.node_voltage_vector])
# @ np.conj(electric_grid_model.node_admittance_matrix)
# + np.transpose(
# electric_grid_model.node_admittance_matrix
# @ np.transpose([self.power_flow_solution.node_voltage_vector])
# )
# )
sensitivity_loss_by_voltage = (
sum(np.transpose(
np.transpose(sensitivity_branch_power_1_by_voltage)
+ np.transpose(sensitivity_branch_power_2_by_voltage)
))
)
self.sensitivity_loss_active_by_power_wye_active = (
np.real(
sensitivity_loss_by_voltage
@ self.sensitivity_voltage_by_power_wye_active
)
/ (2 * np.sqrt(3))
)
self.sensitivity_loss_active_by_power_wye_reactive = (
np.real(
sensitivity_loss_by_voltage
@ self.sensitivity_voltage_by_power_wye_reactive
)
/ (2 * np.sqrt(3))
)
self.sensitivity_loss_active_by_power_delta_active = (
np.real(
sensitivity_loss_by_voltage
@ self.sensitivity_voltage_by_power_delta_active
)
/ (2 * np.sqrt(3))
)
self.sensitivity_loss_active_by_power_delta_reactive = (
np.real(
sensitivity_loss_by_voltage
@ self.sensitivity_voltage_by_power_delta_reactive
)
/ (2 * np.sqrt(3))
)
self.sensitivity_loss_reactive_by_power_wye_active = (
np.imag(
sensitivity_loss_by_voltage
@ self.sensitivity_voltage_by_power_wye_active
)
* -1 * np.sqrt(3)
)
self.sensitivity_loss_reactive_by_power_wye_reactive = (
np.imag(
sensitivity_loss_by_voltage
@ self.sensitivity_voltage_by_power_wye_reactive
)
* -1 * np.sqrt(3)
)
self.sensitivity_loss_reactive_by_power_delta_active = (
np.imag(
sensitivity_loss_by_voltage
@ self.sensitivity_voltage_by_power_delta_active
)
* -1 * np.sqrt(3)
)
self.sensitivity_loss_reactive_by_power_delta_reactive = (
np.imag(
sensitivity_loss_by_voltage
@ self.sensitivity_voltage_by_power_delta_reactive
)
* -1 * np.sqrt(3)
)
self.sensitivity_loss_active_by_der_power_active = (
self.sensitivity_loss_active_by_power_wye_active
@ electric_grid_model.der_incidence_wye_matrix
+ self.sensitivity_loss_active_by_power_delta_active
@ electric_grid_model.der_incidence_delta_matrix
)
self.sensitivity_loss_active_by_der_power_reactive = (
self.sensitivity_loss_active_by_power_wye_reactive
@ electric_grid_model.der_incidence_wye_matrix
+ self.sensitivity_loss_active_by_power_delta_reactive
@ electric_grid_model.der_incidence_delta_matrix
)
self.sensitivity_loss_reactive_by_der_power_active = (
self.sensitivity_loss_reactive_by_power_wye_active
@ electric_grid_model.der_incidence_wye_matrix
+ self.sensitivity_loss_reactive_by_power_delta_active
@ electric_grid_model.der_incidence_delta_matrix
)
self.sensitivity_loss_reactive_by_der_power_reactive = (
self.sensitivity_loss_reactive_by_power_wye_reactive
@ electric_grid_model.der_incidence_wye_matrix
+ self.sensitivity_loss_reactive_by_power_delta_reactive
@ electric_grid_model.der_incidence_delta_matrix
)
class LinearElectricGridModelSet(mesmo.utils.ObjectBase):
linear_electric_grid_models: typing.Dict[pd.Timestamp, LinearElectricGridModel]
electric_grid_model: ElectricGridModelDefault
timesteps: pd.Index
@multimethod
def __init__(
self,
scenario_name: str
):
# Obtain electric grid model & reference power flow solution.
electric_grid_model = ElectricGridModelDefault(scenario_name)
power_flow_solution = PowerFlowSolutionFixedPoint(electric_grid_model)
self.__init__(
electric_grid_model,
power_flow_solution
)
@multimethod
def __init__(
self,
electric_grid_model: ElectricGridModelDefault,
power_flow_solution: PowerFlowSolution,
linear_electric_grid_model_method: typing.Type[LinearElectricGridModel] = LinearElectricGridModelGlobal
):
self.check_linear_electric_grid_model_method(linear_electric_grid_model_method)
# Obtain linear electric grid models.
linear_electric_grid_model = linear_electric_grid_model_method(electric_grid_model, power_flow_solution)
linear_electric_grid_models = (
dict(zip(electric_grid_model.timesteps, itertools.repeat(linear_electric_grid_model)))
)
self.__init__(
electric_grid_model,
linear_electric_grid_models
)
@multimethod
def __init__(
self,
electric_grid_model: ElectricGridModelDefault,
power_flow_solution_set: PowerFlowSolutionSet,
linear_electric_grid_model_method: typing.Type[LinearElectricGridModel] = LinearElectricGridModelLocal
):
self.check_linear_electric_grid_model_method(linear_electric_grid_model_method)
# Obtain linear electric grid models.
linear_electric_grid_models = (
mesmo.utils.starmap(
linear_electric_grid_model_method,
zip(
itertools.repeat(electric_grid_model),
power_flow_solution_set.power_flow_solutions.values()
)
)
)
linear_electric_grid_models = (
dict(zip(electric_grid_model.timesteps, linear_electric_grid_models))
)
self.__init__(
electric_grid_model,
linear_electric_grid_models
)
@multimethod
def __init__(
self,
electric_grid_model: ElectricGridModelDefault,
linear_electric_grid_models: typing.Dict[pd.Timestamp, LinearElectricGridModel]
):
# Store attributes.
self.electric_grid_model = electric_grid_model
self.timesteps = self.electric_grid_model.timesteps
self.linear_electric_grid_models = linear_electric_grid_models
@staticmethod
def check_linear_electric_grid_model_method(linear_electric_grid_model_method):
if not issubclass(linear_electric_grid_model_method, LinearElectricGridModel):
raise ValueError(f"Invalid linear electric grid model method: {linear_electric_grid_model_method}")
def define_optimization_problem(
self,
optimization_problem: mesmo.utils.OptimizationProblem,
price_data: mesmo.data_interface.PriceData,
scenarios: typing.Union[list, pd.Index] = None,
**kwargs
):
# Defined optimization problem definitions through respective sub-methods.
self.define_optimization_variables(optimization_problem, scenarios=scenarios)
self.define_optimization_parameters(
optimization_problem,
price_data,
scenarios=scenarios,
**kwargs
)
self.define_optimization_constraints(optimization_problem, scenarios=scenarios)
self.define_optimization_objective(optimization_problem, scenarios=scenarios)
def define_optimization_variables(
self,
optimization_problem: mesmo.utils.OptimizationProblem,
scenarios: typing.Union[list, pd.Index] = None
):
# If no scenarios given, obtain default value.
if scenarios is None:
scenarios = [None]
# Define DER power vector variables.
optimization_problem.define_variable(
'der_active_power_vector', scenario=scenarios, timestep=self.timesteps,
der=self.electric_grid_model.ders
)
optimization_problem.define_variable(
'der_reactive_power_vector', scenario=scenarios, timestep=self.timesteps,
der=self.electric_grid_model.ders
)
# Define node voltage magnitude variable.
optimization_problem.define_variable(
'node_voltage_magnitude_vector', scenario=scenarios, timestep=self.timesteps,
node=self.electric_grid_model.nodes
)
# Define branch power magnitude variables.
optimization_problem.define_variable(
'branch_power_magnitude_vector_1', scenario=scenarios, timestep=self.timesteps,
branch=self.electric_grid_model.branches
)
optimization_problem.define_variable(
'branch_power_magnitude_vector_2', scenario=scenarios, timestep=self.timesteps,
branch=self.electric_grid_model.branches
)
# Define loss variables.
optimization_problem.define_variable(
'loss_active', scenario=scenarios, timestep=self.timesteps
)
optimization_problem.define_variable(
'loss_reactive', scenario=scenarios, timestep=self.timesteps
)
def define_optimization_parameters(
self,
optimization_problem: mesmo.utils.OptimizationProblem,
price_data: mesmo.data_interface.PriceData,
node_voltage_magnitude_vector_minimum: np.ndarray = None,
node_voltage_magnitude_vector_maximum: np.ndarray = None,
branch_power_magnitude_vector_maximum: np.ndarray = None,
scenarios: typing.Union[list, pd.Index] = None
):
# If no scenarios given, obtain default value.
if scenarios is None:
scenarios = [None]
# Obtain timestep interval in hours, for conversion of power to energy.
timestep_interval_hours = (self.timesteps[1] - self.timesteps[0]) / pd.Timedelta('1h')
# Define voltage variable terms.
optimization_problem.define_parameter(
'voltage_active_term',
sp.block_diag([
sp.diags(np.abs(linear_electric_grid_model.electric_grid_model.node_voltage_vector_reference) ** -1)
@ linear_electric_grid_model.sensitivity_voltage_magnitude_by_der_power_active
@ sp.diags(np.real(linear_electric_grid_model.electric_grid_model.der_power_vector_reference))
for linear_electric_grid_model in self.linear_electric_grid_models.values()
])
)
optimization_problem.define_parameter(
'voltage_reactive_term',
sp.block_diag([
sp.diags(np.abs(linear_electric_grid_model.electric_grid_model.node_voltage_vector_reference) ** -1)
@ linear_electric_grid_model.sensitivity_voltage_magnitude_by_der_power_reactive
@ sp.diags(np.imag(linear_electric_grid_model.electric_grid_model.der_power_vector_reference))
for linear_electric_grid_model in self.linear_electric_grid_models.values()
])
)
# Define voltage constant term.
optimization_problem.define_parameter(
'voltage_constant',
np.concatenate([
sp.diags(np.abs(linear_electric_grid_model.electric_grid_model.node_voltage_vector_reference) ** -1)
@ (
np.transpose([np.abs(linear_electric_grid_model.power_flow_solution.node_voltage_vector)])
- linear_electric_grid_model.sensitivity_voltage_magnitude_by_der_power_active
@ np.transpose([np.real(linear_electric_grid_model.power_flow_solution.der_power_vector)])
- linear_electric_grid_model.sensitivity_voltage_magnitude_by_der_power_reactive
@ np.transpose([np.imag(linear_electric_grid_model.power_flow_solution.der_power_vector)])
) for linear_electric_grid_model in self.linear_electric_grid_models.values()
])
)
# Define branch flow (direction 1) variable terms.
optimization_problem.define_parameter(
'branch_power_1_active_term',
sp.block_diag([
sp.diags(linear_electric_grid_model.electric_grid_model.branch_power_vector_magnitude_reference ** -1)
@ linear_electric_grid_model.sensitivity_branch_power_1_magnitude_by_der_power_active
@ sp.diags(np.real(linear_electric_grid_model.electric_grid_model.der_power_vector_reference))
for linear_electric_grid_model in self.linear_electric_grid_models.values()
])
)
optimization_problem.define_parameter(
'branch_power_1_reactive_term',
sp.block_diag([
sp.diags(linear_electric_grid_model.electric_grid_model.branch_power_vector_magnitude_reference ** -1)
@ linear_electric_grid_model.sensitivity_branch_power_1_magnitude_by_der_power_reactive
@ sp.diags(np.imag(linear_electric_grid_model.electric_grid_model.der_power_vector_reference))
for linear_electric_grid_model in self.linear_electric_grid_models.values()
])
)
# Define branch flow (direction 1) constant terms.
optimization_problem.define_parameter(
'branch_power_1_constant',
np.concatenate([
sp.diags(linear_electric_grid_model.electric_grid_model.branch_power_vector_magnitude_reference ** -1)
@ (
np.transpose([np.abs(linear_electric_grid_model.power_flow_solution.branch_power_vector_1)])
- linear_electric_grid_model.sensitivity_branch_power_1_magnitude_by_der_power_active
@ np.transpose([np.real(linear_electric_grid_model.power_flow_solution.der_power_vector)])
- linear_electric_grid_model.sensitivity_branch_power_1_magnitude_by_der_power_reactive
@ np.transpose([np.imag(linear_electric_grid_model.power_flow_solution.der_power_vector)])
) for linear_electric_grid_model in self.linear_electric_grid_models.values()
])
)
# Define branch flow (direction 2) variable terms.
optimization_problem.define_parameter(
'branch_power_2_active_term',
sp.block_diag([
sp.diags(linear_electric_grid_model.electric_grid_model.branch_power_vector_magnitude_reference ** -1)
@ linear_electric_grid_model.sensitivity_branch_power_2_magnitude_by_der_power_active
@ sp.diags(np.real(linear_electric_grid_model.electric_grid_model.der_power_vector_reference))
for linear_electric_grid_model in self.linear_electric_grid_models.values()
])
)
optimization_problem.define_parameter(
'branch_power_2_reactive_term',
sp.block_diag([
sp.diags(linear_electric_grid_model.electric_grid_model.branch_power_vector_magnitude_reference ** -1)
@ linear_electric_grid_model.sensitivity_branch_power_2_magnitude_by_der_power_reactive
@ sp.diags(np.imag(linear_electric_grid_model.electric_grid_model.der_power_vector_reference))
for linear_electric_grid_model in self.linear_electric_grid_models.values()
])
)
# Define branch flow (direction 2) constant term.
optimization_problem.define_parameter(
'branch_power_2_constant',
np.concatenate([
sp.diags(linear_electric_grid_model.electric_grid_model.branch_power_vector_magnitude_reference ** -1)
@ (
np.transpose([np.abs(linear_electric_grid_model.power_flow_solution.branch_power_vector_2)])
- linear_electric_grid_model.sensitivity_branch_power_2_magnitude_by_der_power_active
@ np.transpose([np.real(linear_electric_grid_model.power_flow_solution.der_power_vector)])
- linear_electric_grid_model.sensitivity_branch_power_2_magnitude_by_der_power_reactive
@ np.transpose([np.imag(linear_electric_grid_model.power_flow_solution.der_power_vector)])
) for linear_electric_grid_model in self.linear_electric_grid_models.values()
])
)
# Define active loss variable terms.
optimization_problem.define_parameter(
'loss_active_active_term',
sp.block_diag([
linear_electric_grid_model.sensitivity_loss_active_by_der_power_active
@ sp.diags(np.real(linear_electric_grid_model.electric_grid_model.der_power_vector_reference))
for linear_electric_grid_model in self.linear_electric_grid_models.values()
])
)
optimization_problem.define_parameter(
'loss_active_reactive_term',
sp.block_diag([
linear_electric_grid_model.sensitivity_loss_active_by_der_power_reactive
@ sp.diags(np.imag(linear_electric_grid_model.electric_grid_model.der_power_vector_reference))
for linear_electric_grid_model in self.linear_electric_grid_models.values()
])
)
# Define active loss constant term.
optimization_problem.define_parameter(
'loss_active_constant',
np.concatenate([
np.real(linear_electric_grid_model.power_flow_solution.loss)
- linear_electric_grid_model.sensitivity_loss_active_by_der_power_active
@ np.transpose([np.real(linear_electric_grid_model.power_flow_solution.der_power_vector)])
- linear_electric_grid_model.sensitivity_loss_active_by_der_power_reactive
@ np.transpose([np.imag(linear_electric_grid_model.power_flow_solution.der_power_vector)])
for linear_electric_grid_model in self.linear_electric_grid_models.values()
])
)
# Define reactive loss variable terms.
optimization_problem.define_parameter(
'loss_reactive_active_term',
sp.block_diag([
linear_electric_grid_model.sensitivity_loss_reactive_by_der_power_active
@ sp.diags(np.real(linear_electric_grid_model.electric_grid_model.der_power_vector_reference))
for linear_electric_grid_model in self.linear_electric_grid_models.values()
])
)
optimization_problem.define_parameter(
'loss_reactive_reactive_term',
sp.block_diag([
linear_electric_grid_model.sensitivity_loss_reactive_by_der_power_reactive
@ sp.diags(np.imag(linear_electric_grid_model.electric_grid_model.der_power_vector_reference))
for linear_electric_grid_model in self.linear_electric_grid_models.values()
])
)
# Define active loss constant term.
optimization_problem.define_parameter(
'loss_reactive_constant',
np.concatenate([
np.imag(linear_electric_grid_model.power_flow_solution.loss)
- linear_electric_grid_model.sensitivity_loss_reactive_by_der_power_active
@ np.transpose([np.real(linear_electric_grid_model.power_flow_solution.der_power_vector)])
- linear_electric_grid_model.sensitivity_loss_reactive_by_der_power_reactive
@ np.transpose([np.imag(linear_electric_grid_model.power_flow_solution.der_power_vector)])
for linear_electric_grid_model in self.linear_electric_grid_models.values()
])
)
# Define voltage limits.
optimization_problem.define_parameter(
'voltage_limit_minimum',
np.concatenate([
node_voltage_magnitude_vector_minimum.ravel()
/ np.abs(linear_electric_grid_model.electric_grid_model.node_voltage_vector_reference)
for linear_electric_grid_model in self.linear_electric_grid_models.values()
])
if node_voltage_magnitude_vector_minimum is not None
else -np.inf * np.ones((len(self.electric_grid_model.nodes) * len(self.timesteps), ))
)
optimization_problem.define_parameter(
'voltage_limit_maximum',
np.concatenate([
node_voltage_magnitude_vector_maximum.ravel()
/ np.abs(linear_electric_grid_model.electric_grid_model.node_voltage_vector_reference)
for linear_electric_grid_model in self.linear_electric_grid_models.values()
])
if node_voltage_magnitude_vector_maximum is not None
else +np.inf * np.ones((len(self.electric_grid_model.nodes) * len(self.timesteps), ))
)
# Define branch flow limits.
optimization_problem.define_parameter(
'branch_power_minimum',
np.concatenate([
- branch_power_magnitude_vector_maximum.ravel()
/ linear_electric_grid_model.electric_grid_model.branch_power_vector_magnitude_reference
for linear_electric_grid_model in self.linear_electric_grid_models.values()
])
if branch_power_magnitude_vector_maximum is not None
else -np.inf * np.ones((len(self.electric_grid_model.branches) * len(self.timesteps), ))
)
optimization_problem.define_parameter(
'branch_power_maximum',
np.concatenate([
branch_power_magnitude_vector_maximum.ravel()
/ linear_electric_grid_model.electric_grid_model.branch_power_vector_magnitude_reference
for linear_electric_grid_model in self.linear_electric_grid_models.values()
])
if branch_power_magnitude_vector_maximum is not None
else +np.inf * np.ones((len(self.electric_grid_model.branches) * len(self.timesteps), ))
)
# Define objective parameters.
optimization_problem.define_parameter(
'electric_grid_active_power_cost',
np.array([price_data.price_timeseries.loc[:, ('active_power', 'source', 'source')].values])
* -1.0 * timestep_interval_hours # In Wh.
@ sp.block_diag(
[np.array([np.real(self.electric_grid_model.der_power_vector_reference)])] * len(self.timesteps)
)
)
optimization_problem.define_parameter(
'electric_grid_active_power_cost_sensitivity',
price_data.price_sensitivity_coefficient
* timestep_interval_hours # In Wh.
* np.concatenate([np.real(self.electric_grid_model.der_power_vector_reference) ** 2] * len(self.timesteps))
)
optimization_problem.define_parameter(
'electric_grid_reactive_power_cost',
np.array([price_data.price_timeseries.loc[:, ('reactive_power', 'source', 'source')].values])
* -1.0 * timestep_interval_hours # In Wh.
@ sp.block_diag(
[np.array([np.imag(self.electric_grid_model.der_power_vector_reference)])] * len(self.timesteps)
)
)
optimization_problem.define_parameter(
'electric_grid_reactive_power_cost_sensitivity',
price_data.price_sensitivity_coefficient
* timestep_interval_hours # In Wh.
* np.concatenate([np.imag(self.electric_grid_model.der_power_vector_reference) ** 2] * len(self.timesteps))
)
optimization_problem.define_parameter(
'electric_grid_loss_active_cost',
price_data.price_timeseries.loc[:, ('active_power', 'source', 'source')].values
* timestep_interval_hours # In Wh.
)
optimization_problem.define_parameter(
'electric_grid_loss_active_cost_sensitivity',
price_data.price_sensitivity_coefficient
* timestep_interval_hours # In Wh.
)
def define_optimization_constraints(
self,
optimization_problem: mesmo.utils.OptimizationProblem,
scenarios: typing.Union[list, pd.Index] = None
):
# If no scenarios given, obtain default value.
if scenarios is None:
scenarios = [None]
# Define voltage equation.
optimization_problem.define_constraint(
('variable', 1.0, dict(
name='node_voltage_magnitude_vector', scenario=scenarios, timestep=self.timesteps,
node=self.electric_grid_model.nodes
)),
'==',
('variable', 'voltage_active_term', dict(
name='der_active_power_vector', scenario=scenarios, timestep=self.timesteps,
der=self.electric_grid_model.ders
)),
('variable', 'voltage_reactive_term', dict(
name='der_reactive_power_vector', scenario=scenarios, timestep=self.timesteps,
der=self.electric_grid_model.ders
)),
('constant', 'voltage_constant', dict(scenario=scenarios, timestep=self.timesteps)),
broadcast='scenario'
)
# Define branch flow (direction 1) equation.
optimization_problem.define_constraint(
('variable', 1.0, dict(
name='branch_power_magnitude_vector_1', scenario=scenarios, timestep=self.timesteps,
branch=self.electric_grid_model.branches
)),
'==',
('variable', 'branch_power_1_active_term', dict(
name='der_active_power_vector', scenario=scenarios, timestep=self.timesteps,
der=self.electric_grid_model.ders
)),
('variable', 'branch_power_1_reactive_term', dict(
name='der_reactive_power_vector', scenario=scenarios, timestep=self.timesteps,
der=self.electric_grid_model.ders
)),
('constant', 'branch_power_1_constant', dict(scenario=scenarios, timestep=self.timesteps)),
broadcast='scenario'
)
# Define branch flow (direction 2) equation.
optimization_problem.define_constraint(
('variable', 1.0, dict(
name='branch_power_magnitude_vector_2', scenario=scenarios, timestep=self.timesteps,
branch=self.electric_grid_model.branches
)),
'==',
('variable', 'branch_power_2_active_term', dict(
name='der_active_power_vector', scenario=scenarios, timestep=self.timesteps,
der=self.electric_grid_model.ders
)),
('variable', 'branch_power_2_reactive_term', dict(
name='der_reactive_power_vector', scenario=scenarios, timestep=self.timesteps,
der=self.electric_grid_model.ders
)),
('constant', 'branch_power_2_constant', dict(scenario=scenarios, timestep=self.timesteps)),
broadcast='scenario'
)
# Define active loss equation.
optimization_problem.define_constraint(
('variable', 1.0, dict(name='loss_active', scenario=scenarios, timestep=self.timesteps)),
'==',
('variable', 'loss_active_active_term', dict(
name='der_active_power_vector', scenario=scenarios, timestep=self.timesteps,
der=self.electric_grid_model.ders
)),
('variable', 'loss_active_reactive_term', dict(
name='der_reactive_power_vector', scenario=scenarios, timestep=self.timesteps,
der=self.electric_grid_model.ders
)),
('constant', 'loss_active_constant', dict(scenario=scenarios, timestep=self.timesteps)),
broadcast='scenario'
)
# Define reactive loss equation.
optimization_problem.define_constraint(
('variable', 1.0, dict(name='loss_reactive', scenario=scenarios, timestep=self.timesteps)),
'==',
('variable', 'loss_reactive_active_term', dict(
name='der_active_power_vector', scenario=scenarios, timestep=self.timesteps,
der=self.electric_grid_model.ders
)),
('variable', 'loss_reactive_reactive_term', dict(
name='der_reactive_power_vector', scenario=scenarios, timestep=self.timesteps,
der=self.electric_grid_model.ders
)),
('constant', 'loss_reactive_constant', dict(scenario=scenarios, timestep=self.timesteps)),
broadcast='scenario'
)
# Define voltage limits.
# Add dedicated keys to enable retrieving dual variables.
optimization_problem.define_constraint(
('variable', 1.0, dict(
name='node_voltage_magnitude_vector', scenario=scenarios, timestep=self.timesteps,
node=self.electric_grid_model.nodes
)),
'>=',
('constant', 'voltage_limit_minimum', dict(scenario=scenarios, timestep=self.timesteps)),
keys=dict(
name='voltage_magnitude_vector_minimum_constraint', scenario=scenarios, timestep=self.timesteps,
node=self.electric_grid_model.nodes
),
broadcast='scenario'
)
optimization_problem.define_constraint(
('variable', 1.0, dict(
name='node_voltage_magnitude_vector', scenario=scenarios, timestep=self.timesteps,
node=self.electric_grid_model.nodes
)),
'<=',
('constant', 'voltage_limit_maximum', dict(scenario=scenarios, timestep=self.timesteps)),
keys=dict(
name='voltage_magnitude_vector_maximum_constraint', scenario=scenarios, timestep=self.timesteps,
node=self.electric_grid_model.nodes
),
broadcast='scenario'
)
# Define branch flow limits.
# Add dedicated keys to enable retrieving dual variables.
optimization_problem.define_constraint(
('variable', 1.0, dict(
name='branch_power_magnitude_vector_1', scenario=scenarios, timestep=self.timesteps,
branch=self.electric_grid_model.branches
)),
'>=',
('constant', 'branch_power_minimum', dict(scenario=scenarios, timestep=self.timesteps)),
keys=dict(
name='branch_power_magnitude_vector_1_minimum_constraint', scenario=scenarios, timestep=self.timesteps,
branch=self.electric_grid_model.branches
),
broadcast='scenario'
)
optimization_problem.define_constraint(
('variable', 1.0, dict(
name='branch_power_magnitude_vector_1', scenario=scenarios, timestep=self.timesteps,
branch=self.electric_grid_model.branches
)),
'<=',
('constant', 'branch_power_maximum', dict(scenario=scenarios, timestep=self.timesteps)),
keys=dict(
name='branch_power_magnitude_vector_1_maximum_constraint', scenario=scenarios, timestep=self.timesteps,
branch=self.electric_grid_model.branches
),
broadcast='scenario'
)
optimization_problem.define_constraint(
('variable', 1.0, dict(
name='branch_power_magnitude_vector_2', scenario=scenarios, timestep=self.timesteps,
branch=self.electric_grid_model.branches
)),
'>=',
('constant', 'branch_power_minimum', dict(scenario=scenarios, timestep=self.timesteps)),
keys=dict(
name='branch_power_magnitude_vector_2_minimum_constraint', scenario=scenarios, timestep=self.timesteps,
branch=self.electric_grid_model.branches
),
broadcast='scenario'
)
optimization_problem.define_constraint(
('variable', 1.0, dict(
name='branch_power_magnitude_vector_2', scenario=scenarios, timestep=self.timesteps,
branch=self.electric_grid_model.branches
)),
'<=',
('constant', 'branch_power_maximum', dict(scenario=scenarios, timestep=self.timesteps)),
keys=dict(
name='branch_power_magnitude_vector_2_maximum_constraint', scenario=scenarios, timestep=self.timesteps,
branch=self.electric_grid_model.branches
),
broadcast='scenario'
)
def define_optimization_objective(
self,
optimization_problem: mesmo.utils.OptimizationProblem,
scenarios: typing.Union[list, pd.Index] = None
):
# If no scenarios given, obtain default value.
if scenarios is None:
scenarios = [None]
# Set objective flag.
optimization_problem.flags['has_electric_grid_objective'] = True
# Define objective for electric loads.
# - Defined as cost of electric supply at electric grid source node.
# - Only defined here, if not yet defined as cost of electric power supply at the DER node
# in `mesmo.der_models.DERModel.define_optimization_objective`.
if not optimization_problem.flags.get('has_der_objective'):
# Active power cost / revenue.
# - Cost for load / demand, revenue for generation / supply.
optimization_problem.define_objective(
('variable', 'electric_grid_active_power_cost', dict(
name='der_active_power_vector', scenario=scenarios, timestep=self.timesteps,
der=self.electric_grid_model.ders
)),
('variable', 'electric_grid_active_power_cost_sensitivity', dict(
name='der_active_power_vector', scenario=scenarios, timestep=self.timesteps,
der=self.electric_grid_model.ders
), dict(
name='der_active_power_vector', scenario=scenarios, timestep=self.timesteps,
der=self.electric_grid_model.ders
)),
broadcast='scenario'
)
# Reactive power cost / revenue.
# - Cost for load / demand, revenue for generation / supply.
optimization_problem.define_objective(
('variable', 'electric_grid_reactive_power_cost', dict(
name='der_reactive_power_vector', scenario=scenarios, timestep=self.timesteps,
der=self.electric_grid_model.ders
)),
('variable', 'electric_grid_reactive_power_cost_sensitivity', dict(
name='der_reactive_power_vector', scenario=scenarios, timestep=self.timesteps,
der=self.electric_grid_model.ders
), dict(
name='der_reactive_power_vector', scenario=scenarios, timestep=self.timesteps,
der=self.electric_grid_model.ders
)),
broadcast='scenario'
)
# Define active loss cost.
optimization_problem.define_objective(
('variable', 'electric_grid_loss_active_cost', dict(
name='loss_active', scenario=scenarios, timestep=self.timesteps
)),
('variable', 'electric_grid_loss_active_cost_sensitivity', dict(
name='loss_active', scenario=scenarios, timestep=self.timesteps
), dict(
name='loss_active', scenario=scenarios, timestep=self.timesteps
)),
broadcast='scenario'
)
def evaluate_optimization_objective(
self,
results: ElectricGridOperationResults,
price_data: mesmo.data_interface.PriceData
) -> float:
# Instantiate optimization problem.
optimization_problem = mesmo.utils.OptimizationProblem()
self.define_optimization_parameters(optimization_problem, price_data)
self.define_optimization_variables(optimization_problem)
self.define_optimization_objective(optimization_problem)
# Instantiate variable vector.
x_vector = np.zeros((len(optimization_problem.variables), 1))
# Set variable vector values.
objective_variable_names = [
'der_active_power_vector_per_unit',
'der_reactive_power_vector_per_unit',
'loss_active'
]
for variable_name in objective_variable_names:
index = mesmo.utils.get_index(optimization_problem.variables, name=variable_name.replace('_per_unit', ''))
x_vector[index, 0] = results[variable_name].values.ravel()
# Obtain objective value.
objective = optimization_problem.evaluate_objective(x_vector)
return objective
def get_optimization_dlmps(
self,
optimization_problem: mesmo.utils.OptimizationProblem,
price_data: mesmo.data_interface.PriceData,
scenarios: typing.Union[list, pd.Index] = None
) -> ElectricGridDLMPResults:
# Obtain results index sets, depending on if / if not scenarios given.
if scenarios in [None, [None]]:
scenarios = [None]
ders = self.electric_grid_model.ders
nodes = self.electric_grid_model.nodes
branches = self.electric_grid_model.branches
else:
ders = (
pd.MultiIndex.from_product(
(scenarios, self.electric_grid_model.ders.to_flat_index()),
names=['scenario', 'der']
)
)
nodes = (
pd.MultiIndex.from_product(
(scenarios, self.electric_grid_model.nodes.to_flat_index()),
names=['scenario', 'node']
)
)
branches = (
pd.MultiIndex.from_product(
(scenarios, self.electric_grid_model.branches.to_flat_index()),
names=['scenario', 'branch']
)
)
# Obtain individual duals.
voltage_magnitude_vector_minimum_dual = (
optimization_problem.duals['voltage_magnitude_vector_minimum_constraint'].loc[
self.electric_grid_model.timesteps, nodes
]
/ np.concatenate([np.abs(self.electric_grid_model.node_voltage_vector_reference)] * len(scenarios))
)
voltage_magnitude_vector_maximum_dual = (
-1.0 * optimization_problem.duals['voltage_magnitude_vector_maximum_constraint'].loc[
self.electric_grid_model.timesteps, nodes
]
/ np.concatenate([np.abs(self.electric_grid_model.node_voltage_vector_reference)] * len(scenarios))
)
branch_power_magnitude_vector_1_minimum_dual = (
optimization_problem.duals['branch_power_magnitude_vector_1_minimum_constraint'].loc[
self.electric_grid_model.timesteps, branches
]
/ np.concatenate([self.electric_grid_model.branch_power_vector_magnitude_reference] * len(scenarios))
)
branch_power_magnitude_vector_1_maximum_dual = (
-1.0 * optimization_problem.duals['branch_power_magnitude_vector_1_maximum_constraint'].loc[
self.electric_grid_model.timesteps, branches
]
/ np.concatenate([self.electric_grid_model.branch_power_vector_magnitude_reference] * len(scenarios))
)
branch_power_magnitude_vector_2_minimum_dual = (
optimization_problem.duals['branch_power_magnitude_vector_2_minimum_constraint'].loc[
self.electric_grid_model.timesteps, branches
]
/ np.concatenate([self.electric_grid_model.branch_power_vector_magnitude_reference] * len(scenarios))
)
branch_power_magnitude_vector_2_maximum_dual = (
-1.0 * optimization_problem.duals['branch_power_magnitude_vector_2_maximum_constraint'].loc[
self.electric_grid_model.timesteps, branches
]
/ np.concatenate([self.electric_grid_model.branch_power_vector_magnitude_reference] * len(scenarios))
)
# Instantiate DLMP variables.
# TODO: Consider delta connections in nodal DLMPs.
# TODO: Consider single-phase DLMPs.
electric_grid_energy_dlmp_node_active_power = (
pd.DataFrame(columns=nodes, index=self.electric_grid_model.timesteps, dtype=float)
)
electric_grid_voltage_dlmp_node_active_power = (
pd.DataFrame(columns=nodes, index=self.electric_grid_model.timesteps, dtype=float)
)
electric_grid_congestion_dlmp_node_active_power = (
pd.DataFrame(columns=nodes, index=self.electric_grid_model.timesteps, dtype=float)
)
electric_grid_loss_dlmp_node_active_power = (
pd.DataFrame(columns=nodes, index=self.electric_grid_model.timesteps, dtype=float)
)
electric_grid_energy_dlmp_node_reactive_power = (
pd.DataFrame(columns=nodes, index=self.electric_grid_model.timesteps, dtype=float)
)
electric_grid_voltage_dlmp_node_reactive_power = (
pd.DataFrame(columns=nodes, index=self.electric_grid_model.timesteps, dtype=float)
)
electric_grid_congestion_dlmp_node_reactive_power = (
| pd.DataFrame(columns=nodes, index=self.electric_grid_model.timesteps, dtype=float) | pandas.DataFrame |
import unittest
import numpy as np
import pandas as pd
from numpy import testing as nptest
from operational_analysis.toolkits import power_curve
from operational_analysis.toolkits.power_curve.parametric_forms import (
logistic5param,
logistic5param_capped,
)
noise = 0.1
class TestPowerCurveFunctions(unittest.TestCase):
def setUp(self):
np.random.seed(42)
params = [1300, -7, 11, 2, 0.5]
self.x = pd.Series(np.random.random(100) * 30)
self.y = pd.Series(logistic5param(self.x, *params) + np.random.random(100) * noise)
# power curve source: https://github.com/NREL/turbine-models/blob/master/Offshore/2020ATB_NREL_Reference_15MW_240.csv
self.nrel_15mw_wind = pd.Series(np.arange(4, 26))
self.nrel_15mw_power = pd.Series(
np.array(
[
720,
1239,
2271,
3817,
5876,
8450,
11536,
15000,
15000,
15000,
15000,
15000,
15000,
15000,
15000,
15000,
15000,
15000,
15000,
15000,
15000,
1500,
]
)
)
def test_IEC(self):
# Create test data using logistic5param form
curve = power_curve.IEC(self.x, self.y)
y_pred = curve(self.x)
# Does the IEC power curve match the test data?
nptest.assert_allclose(
self.y, y_pred, rtol=1, atol=noise * 2, err_msg="Power curve did not properly fit."
)
def test_IEC_with_bounds(self):
# Create the power curve with bounds at 4m/s adn 25m/s and bin width from power curve of 1m/s
cut_in = 4
cut_out = 25
curve = power_curve.IEC(
self.nrel_15mw_wind,
self.nrel_15mw_power,
windspeed_start=cut_in,
windspeed_end=cut_out,
bin_width=1,
)
# Create the test data
test_windspeeds = np.arange(0, 31)
test_power = curve(test_windspeeds)
# Test all windspeeds outside of cut-in and cut-out windspeeds produce no power
should_be_zeros = test_power[(test_windspeeds < cut_in) | (test_windspeeds > cut_out)]
nptest.assert_array_equal(should_be_zeros, np.zeros(should_be_zeros.shape))
# Test all the valid windspeeds are equal
valid_power = test_power[(test_windspeeds >= cut_in) & (test_windspeeds <= cut_out)]
nptest.assert_array_equal(self.nrel_15mw_power, valid_power)
def test_logistic_5_param(self):
# Create test data using logistic5param form
curve = power_curve.logistic_5_parametric(self.x, self.y)
y_pred = curve(self.x)
# Does the logistic-5 power curve match the test data?
nptest.assert_allclose(
self.y, y_pred, rtol=1, atol=noise * 2, err_msg="Power curve did not properly fit."
)
def test_gam(self):
# Create test data using logistic5param form
curve = power_curve.gam(windspeed_column=self.x, power_column=self.y, n_splines=20)
y_pred = curve(self.x)
# Does the spline-fit power curve match the test data?
nptest.assert_allclose(
self.y, y_pred, rtol=0.05, atol=20, err_msg="Power curve did not properly fit."
)
def test_3paramgam(self):
# Create test data using logistic5param form
winddir = np.random.random(100)
airdens = np.random.random(100)
curve = power_curve.gam_3param(
windspeed_column=self.x,
winddir_column=winddir,
airdens_column=airdens,
power_column=self.y,
n_splines=20,
)
y_pred = curve(self.x, winddir, airdens)
# Does the spline-fit power curve match the test data?
nptest.assert_allclose(
self.y, y_pred, rtol=0.05, atol=20, err_msg="Power curve did not properly fit."
)
def tearDown(self):
pass
class TestParametricForms(unittest.TestCase):
def setUp(self):
pass
def test_logistic5parameter(self):
y_pred = logistic5param(np.array([1.0, 2.0, 3.0]), *[1300.0, -7.0, 11.0, 2.0, 0.5])
y = np.array([2.29403585, 5.32662505, 15.74992462])
nptest.assert_allclose(y, y_pred, err_msg="Power curve did not properly fit.")
y_pred = logistic5param(np.array([1, 2, 3]), *[1300.0, -7.0, 11.0, 2.0, 0.5])
y = np.array([2.29403585, 5.32662505, 15.74992462])
nptest.assert_allclose(
y, y_pred, err_msg="Power curve did not handle integer inputs properly."
)
y_pred = logistic5param(np.array([0.01, 0.0]), 1300, 7, 11, 2, 0.5)
y = np.array([1300.0, 1300.0])
nptest.assert_allclose(y, y_pred, err_msg="Power curve did not handle zero properly (b>0).")
y_pred = logistic5param(np.array([0.01, 0.0]), 1300, -7, 11, 2, 0.5)
y = np.array([2.0, 2.0])
nptest.assert_allclose(y, y_pred, err_msg="Power curve did not handle zero properly (b<0).")
def test_logistic5parameter_capped(self):
# Numpy array + Lower Bound
y_pred = logistic5param_capped(
np.array([1.0, 2.0, 3.0]), *[1300.0, -7.0, 11.0, 2.0, 0.5], lower=5.0, upper=20.0
)
y = np.array([5.0, 5.32662505, 15.74992462])
nptest.assert_allclose(y, y_pred, err_msg="Power curve did not properly fit.")
# Numpy array + Upper and Lower Bound
y_pred = logistic5param_capped(
np.array([1.0, 2.0, 3.0]), *[1300.0, -7.0, 11.0, 2.0, 0.5], lower=5.0, upper=10.0
)
y = np.array([5.0, 5.32662505, 10.0])
nptest.assert_allclose(y, y_pred, err_msg="Power curve did not properly fit.")
# Pandas Series + Upper and Lower Bound
y_pred = logistic5param_capped(
pd.Series([1.0, 2.0, 3.0]), *[1300.0, -7.0, 11.0, 2.0, 0.5], lower=5.0, upper=20.0
)
y = | pd.Series([5.0, 5.32662505, 15.74992462]) | pandas.Series |
from typing import List, Optional, Tuple, Type, Union, Dict, Any
from datetime import datetime, timedelta, tzinfo
from pprint import pformat
import os
import warnings
import logging
import inspect
import pytz
import numpy as np
import pandas as pd
from sqlalchemy.engine import Engine
from sqlalchemy.orm import Query
from sqlalchemy.dialects import postgresql
from timetomodel.utils.debug_utils import render_query
from timetomodel.utils.time_utils import (
tz_aware_utc_now,
timedelta_to_pandas_freq_str,
timedelta_fits_into,
)
from timetomodel.exceptions import IncompatibleModelSpecs, NaNData, MissingData
from timetomodel.transforming import Transformation, ReversibleTransformation
"""
Specs for the context of your model and how to treat your model data.
"""
DEFAULT_RATIO_TRAINING_TESTING_DATA = 2 / 3
DEFAULT_REMODELING_FREQUENCY = timedelta(days=1)
np.seterr(all="warn")
warnings.filterwarnings("error", message="invalid value encountered in power")
logger = logging.getLogger(__name__)
class SeriesSpecs(object):
"""Describes a time series (e.g. a pandas Series).
In essence, a column in the regression frame, filled with numbers.
Using this base class, the column will be filled with NaN values.
If you have data to be loaded in automatically, you should be using one of the subclasses, which allow to describe
or pass in an actual data source to be loaded.
When dealing with columns, our code should usually refer to this superclass so it does not need to care
which kind of data source it is dealing with.
"""
# The name in the resulting feature frame, and possibly in the saved model specs (named by outcome var)
name: str
# The name of the data column in the data source. If None, the name will be tried.
column: Optional[str]
# timezone of the data - e.g. useful when de-serializing data (pandas serialises to UTC)
original_tz: tzinfo
# Custom transformation on feature data to be made before forecasting, back-transformed right after.
feature_transformation: Optional[ReversibleTransformation]
# Custom processing on data right after loading, e.g. for cleanup
post_load_processing: Optional[Transformation]
# Custom resampling parameters. All parameters apply to pd.resample, only "aggregation" is the name
# of the aggregation function to be called of the resulting resampler
resampling_config: Dict[str, Any]
interpolation_config: Dict[str, Any]
def __init__(
self,
name: str,
original_tz: Optional[
tzinfo
] = None, # TODO: why should this be possible to be set?
feature_transformation: Optional[ReversibleTransformation] = None,
post_load_processing: Optional[Transformation] = None,
resampling_config: Dict[str, Any] = None,
interpolation_config: Dict[str, Any] = None,
):
self.name = name
self.original_tz = original_tz
self.feature_transformation = feature_transformation
self.post_load_processing = post_load_processing
self.resampling_config = resampling_config
self.interpolation_config = interpolation_config
self.__series_type__ = self.__class__.__name__
def as_dict(self):
return vars(self)
def _load_series(self) -> pd.Series:
"""Subclasses overwrite this function to get the raw data.
This method is responsible to call any post_load_processing at the right place."""
data = | pd.Series() | pandas.Series |
# -*- coding: utf-8 -*-
# pylint: disable=E1101
# flake8: noqa
from datetime import datetime
import csv
import os
import sys
import re
import nose
import platform
from multiprocessing.pool import ThreadPool
from numpy import nan
import numpy as np
from pandas.io.common import DtypeWarning
from pandas import DataFrame, Series, Index, MultiIndex, DatetimeIndex
from pandas.compat import(
StringIO, BytesIO, PY3, range, long, lrange, lmap, u
)
from pandas.io.common import URLError
import pandas.io.parsers as parsers
from pandas.io.parsers import (read_csv, read_table, read_fwf,
TextFileReader, TextParser)
import pandas.util.testing as tm
import pandas as pd
from pandas.compat import parse_date
import pandas.lib as lib
from pandas import compat
from pandas.lib import Timestamp
from pandas.tseries.index import date_range
import pandas.tseries.tools as tools
from numpy.testing.decorators import slow
import pandas.parser
class ParserTests(object):
"""
Want to be able to test either C+Cython or Python+Cython parsers
"""
data1 = """index,A,B,C,D
foo,2,3,4,5
bar,7,8,9,10
baz,12,13,14,15
qux,12,13,14,15
foo2,12,13,14,15
bar2,12,13,14,15
"""
def read_csv(self, *args, **kwargs):
raise NotImplementedError
def read_table(self, *args, **kwargs):
raise NotImplementedError
def setUp(self):
import warnings
warnings.filterwarnings(action='ignore', category=FutureWarning)
self.dirpath = tm.get_data_path()
self.csv1 = os.path.join(self.dirpath, 'test1.csv')
self.csv2 = os.path.join(self.dirpath, 'test2.csv')
self.xls1 = os.path.join(self.dirpath, 'test.xls')
def construct_dataframe(self, num_rows):
df = DataFrame(np.random.rand(num_rows, 5), columns=list('abcde'))
df['foo'] = 'foo'
df['bar'] = 'bar'
df['baz'] = 'baz'
df['date'] = pd.date_range('20000101 09:00:00',
periods=num_rows,
freq='s')
df['int'] = np.arange(num_rows, dtype='int64')
return df
def generate_multithread_dataframe(self, path, num_rows, num_tasks):
def reader(arg):
start, nrows = arg
if not start:
return pd.read_csv(path, index_col=0, header=0, nrows=nrows,
parse_dates=['date'])
return pd.read_csv(path,
index_col=0,
header=None,
skiprows=int(start) + 1,
nrows=nrows,
parse_dates=[9])
tasks = [
(num_rows * i / num_tasks,
num_rows / num_tasks) for i in range(num_tasks)
]
pool = ThreadPool(processes=num_tasks)
results = pool.map(reader, tasks)
header = results[0].columns
for r in results[1:]:
r.columns = header
final_dataframe = pd.concat(results)
return final_dataframe
def test_converters_type_must_be_dict(self):
with tm.assertRaisesRegexp(TypeError, 'Type converters.+'):
self.read_csv(StringIO(self.data1), converters=0)
def test_empty_decimal_marker(self):
data = """A|B|C
1|2,334|5
10|13|10.
"""
self.assertRaises(ValueError, read_csv, StringIO(data), decimal='')
def test_empty_thousands_marker(self):
data = """A|B|C
1|2,334|5
10|13|10.
"""
self.assertRaises(ValueError, read_csv, StringIO(data), thousands='')
def test_multi_character_decimal_marker(self):
data = """A|B|C
1|2,334|5
10|13|10.
"""
self.assertRaises(ValueError, read_csv, StringIO(data), thousands=',,')
def test_empty_string(self):
data = """\
One,Two,Three
a,1,one
b,2,two
,3,three
d,4,nan
e,5,five
nan,6,
g,7,seven
"""
df = self.read_csv(StringIO(data))
xp = DataFrame({'One': ['a', 'b', np.nan, 'd', 'e', np.nan, 'g'],
'Two': [1, 2, 3, 4, 5, 6, 7],
'Three': ['one', 'two', 'three', np.nan, 'five',
np.nan, 'seven']})
tm.assert_frame_equal(xp.reindex(columns=df.columns), df)
df = self.read_csv(StringIO(data), na_values={'One': [], 'Three': []},
keep_default_na=False)
xp = DataFrame({'One': ['a', 'b', '', 'd', 'e', 'nan', 'g'],
'Two': [1, 2, 3, 4, 5, 6, 7],
'Three': ['one', 'two', 'three', 'nan', 'five',
'', 'seven']})
tm.assert_frame_equal(xp.reindex(columns=df.columns), df)
df = self.read_csv(
StringIO(data), na_values=['a'], keep_default_na=False)
xp = DataFrame({'One': [np.nan, 'b', '', 'd', 'e', 'nan', 'g'],
'Two': [1, 2, 3, 4, 5, 6, 7],
'Three': ['one', 'two', 'three', 'nan', 'five', '',
'seven']})
tm.assert_frame_equal(xp.reindex(columns=df.columns), df)
df = self.read_csv(StringIO(data), na_values={'One': [], 'Three': []})
xp = DataFrame({'One': ['a', 'b', np.nan, 'd', 'e', np.nan, 'g'],
'Two': [1, 2, 3, 4, 5, 6, 7],
'Three': ['one', 'two', 'three', np.nan, 'five',
np.nan, 'seven']})
tm.assert_frame_equal(xp.reindex(columns=df.columns), df)
# GH4318, passing na_values=None and keep_default_na=False yields
# 'None' as a na_value
data = """\
One,Two,Three
a,1,None
b,2,two
,3,None
d,4,nan
e,5,five
nan,6,
g,7,seven
"""
df = self.read_csv(
StringIO(data), keep_default_na=False)
xp = DataFrame({'One': ['a', 'b', '', 'd', 'e', 'nan', 'g'],
'Two': [1, 2, 3, 4, 5, 6, 7],
'Three': ['None', 'two', 'None', 'nan', 'five', '',
'seven']})
tm.assert_frame_equal(xp.reindex(columns=df.columns), df)
def test_read_csv(self):
if not compat.PY3:
if compat.is_platform_windows():
prefix = u("file:///")
else:
prefix = u("file://")
fname = prefix + compat.text_type(self.csv1)
# it works!
read_csv(fname, index_col=0, parse_dates=True)
def test_dialect(self):
data = """\
label1,label2,label3
index1,"a,c,e
index2,b,d,f
"""
dia = csv.excel()
dia.quoting = csv.QUOTE_NONE
df = self.read_csv(StringIO(data), dialect=dia)
data = '''\
label1,label2,label3
index1,a,c,e
index2,b,d,f
'''
exp = self.read_csv(StringIO(data))
exp.replace('a', '"a', inplace=True)
tm.assert_frame_equal(df, exp)
def test_dialect_str(self):
data = """\
fruit:vegetable
apple:brocolli
pear:tomato
"""
exp = DataFrame({
'fruit': ['apple', 'pear'],
'vegetable': ['brocolli', 'tomato']
})
dia = csv.register_dialect('mydialect', delimiter=':') # noqa
df = self.read_csv(StringIO(data), dialect='mydialect')
tm.assert_frame_equal(df, exp)
csv.unregister_dialect('mydialect')
def test_1000_sep(self):
data = """A|B|C
1|2,334|5
10|13|10.
"""
expected = DataFrame({
'A': [1, 10],
'B': [2334, 13],
'C': [5, 10.]
})
df = self.read_csv(StringIO(data), sep='|', thousands=',')
tm.assert_frame_equal(df, expected)
df = self.read_table(StringIO(data), sep='|', thousands=',')
tm.assert_frame_equal(df, expected)
def test_1000_sep_with_decimal(self):
data = """A|B|C
1|2,334.01|5
10|13|10.
"""
expected = DataFrame({
'A': [1, 10],
'B': [2334.01, 13],
'C': [5, 10.]
})
tm.assert_equal(expected.A.dtype, 'int64')
tm.assert_equal(expected.B.dtype, 'float')
tm.assert_equal(expected.C.dtype, 'float')
df = self.read_csv(StringIO(data), sep='|', thousands=',', decimal='.')
tm.assert_frame_equal(df, expected)
df = self.read_table(StringIO(data), sep='|',
thousands=',', decimal='.')
tm.assert_frame_equal(df, expected)
data_with_odd_sep = """A|B|C
1|2.334,01|5
10|13|10,
"""
df = self.read_csv(StringIO(data_with_odd_sep),
sep='|', thousands='.', decimal=',')
tm.assert_frame_equal(df, expected)
df = self.read_table(StringIO(data_with_odd_sep),
sep='|', thousands='.', decimal=',')
tm.assert_frame_equal(df, expected)
def test_separator_date_conflict(self):
# Regression test for issue #4678: make sure thousands separator and
# date parsing do not conflict.
data = '06-02-2013;13:00;1-000.215'
expected = DataFrame(
[[datetime(2013, 6, 2, 13, 0, 0), 1000.215]],
columns=['Date', 2]
)
df = self.read_csv(StringIO(data), sep=';', thousands='-',
parse_dates={'Date': [0, 1]}, header=None)
tm.assert_frame_equal(df, expected)
def test_squeeze(self):
data = """\
a,1
b,2
c,3
"""
idx = Index(['a', 'b', 'c'], name=0)
expected = Series([1, 2, 3], name=1, index=idx)
result = self.read_table(StringIO(data), sep=',', index_col=0,
header=None, squeeze=True)
tm.assertIsInstance(result, Series)
tm.assert_series_equal(result, expected)
def test_squeeze_no_view(self):
# GH 8217
# series should not be a view
data = """time,data\n0,10\n1,11\n2,12\n4,14\n5,15\n3,13"""
result = self.read_csv(StringIO(data), index_col='time', squeeze=True)
self.assertFalse(result._is_view)
def test_inf_parsing(self):
data = """\
,A
a,inf
b,-inf
c,Inf
d,-Inf
e,INF
f,-INF
g,INf
h,-INf
i,inF
j,-inF"""
inf = float('inf')
expected = Series([inf, -inf] * 5)
df = read_csv(StringIO(data), index_col=0)
tm.assert_almost_equal(df['A'].values, expected.values)
df = read_csv(StringIO(data), index_col=0, na_filter=False)
tm.assert_almost_equal(df['A'].values, expected.values)
def test_multiple_date_col(self):
# Can use multiple date parsers
data = """\
KORD,19990127, 19:00:00, 18:56:00, 0.8100, 2.8100, 7.2000, 0.0000, 280.0000
KORD,19990127, 20:00:00, 19:56:00, 0.0100, 2.2100, 7.2000, 0.0000, 260.0000
KORD,19990127, 21:00:00, 20:56:00, -0.5900, 2.2100, 5.7000, 0.0000, 280.0000
KORD,19990127, 21:00:00, 21:18:00, -0.9900, 2.0100, 3.6000, 0.0000, 270.0000
KORD,19990127, 22:00:00, 21:56:00, -0.5900, 1.7100, 5.1000, 0.0000, 290.0000
KORD,19990127, 23:00:00, 22:56:00, -0.5900, 1.7100, 4.6000, 0.0000, 280.0000
"""
def func(*date_cols):
return lib.try_parse_dates(parsers._concat_date_cols(date_cols))
df = self.read_csv(StringIO(data), header=None,
date_parser=func,
prefix='X',
parse_dates={'nominal': [1, 2],
'actual': [1, 3]})
self.assertIn('nominal', df)
self.assertIn('actual', df)
self.assertNotIn('X1', df)
self.assertNotIn('X2', df)
self.assertNotIn('X3', df)
d = datetime(1999, 1, 27, 19, 0)
self.assertEqual(df.ix[0, 'nominal'], d)
df = self.read_csv(StringIO(data), header=None,
date_parser=func,
parse_dates={'nominal': [1, 2],
'actual': [1, 3]},
keep_date_col=True)
self.assertIn('nominal', df)
self.assertIn('actual', df)
self.assertIn(1, df)
self.assertIn(2, df)
self.assertIn(3, df)
data = """\
KORD,19990127, 19:00:00, 18:56:00, 0.8100, 2.8100, 7.2000, 0.0000, 280.0000
KORD,19990127, 20:00:00, 19:56:00, 0.0100, 2.2100, 7.2000, 0.0000, 260.0000
KORD,19990127, 21:00:00, 20:56:00, -0.5900, 2.2100, 5.7000, 0.0000, 280.0000
KORD,19990127, 21:00:00, 21:18:00, -0.9900, 2.0100, 3.6000, 0.0000, 270.0000
KORD,19990127, 22:00:00, 21:56:00, -0.5900, 1.7100, 5.1000, 0.0000, 290.0000
KORD,19990127, 23:00:00, 22:56:00, -0.5900, 1.7100, 4.6000, 0.0000, 280.0000
"""
df = read_csv(StringIO(data), header=None,
prefix='X',
parse_dates=[[1, 2], [1, 3]])
self.assertIn('X1_X2', df)
self.assertIn('X1_X3', df)
self.assertNotIn('X1', df)
self.assertNotIn('X2', df)
self.assertNotIn('X3', df)
d = datetime(1999, 1, 27, 19, 0)
self.assertEqual(df.ix[0, 'X1_X2'], d)
df = read_csv(StringIO(data), header=None,
parse_dates=[[1, 2], [1, 3]], keep_date_col=True)
self.assertIn('1_2', df)
self.assertIn('1_3', df)
self.assertIn(1, df)
self.assertIn(2, df)
self.assertIn(3, df)
data = '''\
KORD,19990127 19:00:00, 18:56:00, 0.8100, 2.8100, 7.2000, 0.0000, 280.0000
KORD,19990127 20:00:00, 19:56:00, 0.0100, 2.2100, 7.2000, 0.0000, 260.0000
KORD,19990127 21:00:00, 20:56:00, -0.5900, 2.2100, 5.7000, 0.0000, 280.0000
KORD,19990127 21:00:00, 21:18:00, -0.9900, 2.0100, 3.6000, 0.0000, 270.0000
KORD,19990127 22:00:00, 21:56:00, -0.5900, 1.7100, 5.1000, 0.0000, 290.0000
'''
df = self.read_csv(StringIO(data), sep=',', header=None,
parse_dates=[1], index_col=1)
d = datetime(1999, 1, 27, 19, 0)
self.assertEqual(df.index[0], d)
def test_multiple_date_cols_int_cast(self):
data = ("KORD,19990127, 19:00:00, 18:56:00, 0.8100\n"
"KORD,19990127, 20:00:00, 19:56:00, 0.0100\n"
"KORD,19990127, 21:00:00, 20:56:00, -0.5900\n"
"KORD,19990127, 21:00:00, 21:18:00, -0.9900\n"
"KORD,19990127, 22:00:00, 21:56:00, -0.5900\n"
"KORD,19990127, 23:00:00, 22:56:00, -0.5900")
date_spec = {'nominal': [1, 2], 'actual': [1, 3]}
import pandas.io.date_converters as conv
# it works!
df = self.read_csv(StringIO(data), header=None, parse_dates=date_spec,
date_parser=conv.parse_date_time)
self.assertIn('nominal', df)
def test_multiple_date_col_timestamp_parse(self):
data = """05/31/2012,15:30:00.029,1306.25,1,E,0,,1306.25
05/31/2012,15:30:00.029,1306.25,8,E,0,,1306.25"""
result = self.read_csv(StringIO(data), sep=',', header=None,
parse_dates=[[0, 1]], date_parser=Timestamp)
ex_val = Timestamp('05/31/2012 15:30:00.029')
self.assertEqual(result['0_1'][0], ex_val)
def test_single_line(self):
# GH 6607
# Test currently only valid with python engine because sep=None and
# delim_whitespace=False. Temporarily copied to TestPythonParser.
# Test for ValueError with other engines:
with tm.assertRaisesRegexp(ValueError,
'sep=None with delim_whitespace=False'):
# sniff separator
buf = StringIO()
sys.stdout = buf
# printing warning message when engine == 'c' for now
try:
# it works!
df = self.read_csv(StringIO('1,2'), names=['a', 'b'],
header=None, sep=None)
tm.assert_frame_equal(DataFrame({'a': [1], 'b': [2]}), df)
finally:
sys.stdout = sys.__stdout__
def test_multiple_date_cols_with_header(self):
data = """\
ID,date,NominalTime,ActualTime,TDew,TAir,Windspeed,Precip,WindDir
KORD,19990127, 19:00:00, 18:56:00, 0.8100, 2.8100, 7.2000, 0.0000, 280.0000
KORD,19990127, 20:00:00, 19:56:00, 0.0100, 2.2100, 7.2000, 0.0000, 260.0000
KORD,19990127, 21:00:00, 20:56:00, -0.5900, 2.2100, 5.7000, 0.0000, 280.0000
KORD,19990127, 21:00:00, 21:18:00, -0.9900, 2.0100, 3.6000, 0.0000, 270.0000
KORD,19990127, 22:00:00, 21:56:00, -0.5900, 1.7100, 5.1000, 0.0000, 290.0000
KORD,19990127, 23:00:00, 22:56:00, -0.5900, 1.7100, 4.6000, 0.0000, 280.0000"""
df = self.read_csv(StringIO(data), parse_dates={'nominal': [1, 2]})
self.assertNotIsInstance(df.nominal[0], compat.string_types)
ts_data = """\
ID,date,nominalTime,actualTime,A,B,C,D,E
KORD,19990127, 19:00:00, 18:56:00, 0.8100, 2.8100, 7.2000, 0.0000, 280.0000
KORD,19990127, 20:00:00, 19:56:00, 0.0100, 2.2100, 7.2000, 0.0000, 260.0000
KORD,19990127, 21:00:00, 20:56:00, -0.5900, 2.2100, 5.7000, 0.0000, 280.0000
KORD,19990127, 21:00:00, 21:18:00, -0.9900, 2.0100, 3.6000, 0.0000, 270.0000
KORD,19990127, 22:00:00, 21:56:00, -0.5900, 1.7100, 5.1000, 0.0000, 290.0000
KORD,19990127, 23:00:00, 22:56:00, -0.5900, 1.7100, 4.6000, 0.0000, 280.0000
"""
def test_multiple_date_col_name_collision(self):
self.assertRaises(ValueError, self.read_csv, StringIO(self.ts_data),
parse_dates={'ID': [1, 2]})
data = """\
date_NominalTime,date,NominalTime,ActualTime,TDew,TAir,Windspeed,Precip,WindDir
KORD1,19990127, 19:00:00, 18:56:00, 0.8100, 2.8100, 7.2000, 0.0000, 280.0000
KORD2,19990127, 20:00:00, 19:56:00, 0.0100, 2.2100, 7.2000, 0.0000, 260.0000
KORD3,19990127, 21:00:00, 20:56:00, -0.5900, 2.2100, 5.7000, 0.0000, 280.0000
KORD4,19990127, 21:00:00, 21:18:00, -0.9900, 2.0100, 3.6000, 0.0000, 270.0000
KORD5,19990127, 22:00:00, 21:56:00, -0.5900, 1.7100, 5.1000, 0.0000, 290.0000
KORD6,19990127, 23:00:00, 22:56:00, -0.5900, 1.7100, 4.6000, 0.0000, 280.0000""" # noqa
self.assertRaises(ValueError, self.read_csv, StringIO(data),
parse_dates=[[1, 2]])
def test_index_col_named(self):
no_header = """\
KORD1,19990127, 19:00:00, 18:56:00, 0.8100, 2.8100, 7.2000, 0.0000, 280.0000
KORD2,19990127, 20:00:00, 19:56:00, 0.0100, 2.2100, 7.2000, 0.0000, 260.0000
KORD3,19990127, 21:00:00, 20:56:00, -0.5900, 2.2100, 5.7000, 0.0000, 280.0000
KORD4,19990127, 21:00:00, 21:18:00, -0.9900, 2.0100, 3.6000, 0.0000, 270.0000
KORD5,19990127, 22:00:00, 21:56:00, -0.5900, 1.7100, 5.1000, 0.0000, 290.0000
KORD6,19990127, 23:00:00, 22:56:00, -0.5900, 1.7100, 4.6000, 0.0000, 280.0000"""
h = "ID,date,NominalTime,ActualTime,TDew,TAir,Windspeed,Precip,WindDir\n"
data = h + no_header
rs = self.read_csv(StringIO(data), index_col='ID')
xp = self.read_csv(StringIO(data), header=0).set_index('ID')
tm.assert_frame_equal(rs, xp)
self.assertRaises(ValueError, self.read_csv, StringIO(no_header),
index_col='ID')
data = """\
1,2,3,4,hello
5,6,7,8,world
9,10,11,12,foo
"""
names = ['a', 'b', 'c', 'd', 'message']
xp = DataFrame({'a': [1, 5, 9], 'b': [2, 6, 10], 'c': [3, 7, 11],
'd': [4, 8, 12]},
index=Index(['hello', 'world', 'foo'], name='message'))
rs = self.read_csv(StringIO(data), names=names, index_col=['message'])
tm.assert_frame_equal(xp, rs)
self.assertEqual(xp.index.name, rs.index.name)
rs = self.read_csv(StringIO(data), names=names, index_col='message')
tm.assert_frame_equal(xp, rs)
self.assertEqual(xp.index.name, rs.index.name)
def test_usecols_index_col_False(self):
# Issue 9082
s = "a,b,c,d\n1,2,3,4\n5,6,7,8"
s_malformed = "a,b,c,d\n1,2,3,4,\n5,6,7,8,"
cols = ['a', 'c', 'd']
expected = DataFrame({'a': [1, 5], 'c': [3, 7], 'd': [4, 8]})
df = self.read_csv(StringIO(s), usecols=cols, index_col=False)
tm.assert_frame_equal(expected, df)
df = self.read_csv(StringIO(s_malformed),
usecols=cols, index_col=False)
tm.assert_frame_equal(expected, df)
def test_index_col_is_True(self):
# Issue 9798
self.assertRaises(ValueError, self.read_csv, StringIO(self.ts_data),
index_col=True)
def test_converter_index_col_bug(self):
# 1835
data = "A;B\n1;2\n3;4"
rs = self.read_csv(StringIO(data), sep=';', index_col='A',
converters={'A': lambda x: x})
xp = DataFrame({'B': [2, 4]}, index=Index([1, 3], name='A'))
tm.assert_frame_equal(rs, xp)
self.assertEqual(rs.index.name, xp.index.name)
def test_date_parser_int_bug(self):
# #3071
log_file = StringIO(
'posix_timestamp,elapsed,sys,user,queries,query_time,rows,'
'accountid,userid,contactid,level,silo,method\n'
'1343103150,0.062353,0,4,6,0.01690,3,'
'12345,1,-1,3,invoice_InvoiceResource,search\n'
)
def f(posix_string):
return datetime.utcfromtimestamp(int(posix_string))
# it works!
read_csv(log_file, index_col=0, parse_dates=0, date_parser=f)
def test_multiple_skts_example(self):
data = "year, month, a, b\n 2001, 01, 0.0, 10.\n 2001, 02, 1.1, 11."
pass
def test_malformed(self):
# all
data = """ignore
A,B,C
1,2,3 # comment
1,2,3,4,5
2,3,4
"""
try:
df = self.read_table(
StringIO(data), sep=',', header=1, comment='#')
self.assertTrue(False)
except Exception as inst:
self.assertIn('Expected 3 fields in line 4, saw 5', str(inst))
# skip_footer
data = """ignore
A,B,C
1,2,3 # comment
1,2,3,4,5
2,3,4
footer
"""
# GH 6607
# Test currently only valid with python engine because
# skip_footer != 0. Temporarily copied to TestPythonParser.
# Test for ValueError with other engines:
try:
with tm.assertRaisesRegexp(ValueError, 'skip_footer'): # XXX
df = self.read_table(
StringIO(data), sep=',', header=1, comment='#',
skip_footer=1)
self.assertTrue(False)
except Exception as inst:
self.assertIn('Expected 3 fields in line 4, saw 5', str(inst))
# first chunk
data = """ignore
A,B,C
skip
1,2,3
3,5,10 # comment
1,2,3,4,5
2,3,4
"""
try:
it = self.read_table(StringIO(data), sep=',',
header=1, comment='#',
iterator=True, chunksize=1,
skiprows=[2])
df = it.read(5)
self.assertTrue(False)
except Exception as inst:
self.assertIn('Expected 3 fields in line 6, saw 5', str(inst))
# middle chunk
data = """ignore
A,B,C
skip
1,2,3
3,5,10 # comment
1,2,3,4,5
2,3,4
"""
try:
it = self.read_table(StringIO(data), sep=',', header=1,
comment='#', iterator=True, chunksize=1,
skiprows=[2])
df = it.read(1)
it.read(2)
self.assertTrue(False)
except Exception as inst:
self.assertIn('Expected 3 fields in line 6, saw 5', str(inst))
# last chunk
data = """ignore
A,B,C
skip
1,2,3
3,5,10 # comment
1,2,3,4,5
2,3,4
"""
try:
it = self.read_table(StringIO(data), sep=',',
header=1, comment='#',
iterator=True, chunksize=1, skiprows=[2])
df = it.read(1)
it.read()
self.assertTrue(False)
except Exception as inst:
self.assertIn('Expected 3 fields in line 6, saw 5', str(inst))
def test_passing_dtype(self):
# GH 6607
# Passing dtype is currently only supported by the C engine.
# Temporarily copied to TestCParser*.
# Test for ValueError with other engines:
with tm.assertRaisesRegexp(ValueError,
"The 'dtype' option is not supported"):
df = DataFrame(np.random.rand(5, 2), columns=list(
'AB'), index=['1A', '1B', '1C', '1D', '1E'])
with tm.ensure_clean('__passing_str_as_dtype__.csv') as path:
df.to_csv(path)
# GH 3795
# passing 'str' as the dtype
result = self.read_csv(path, dtype=str, index_col=0)
tm.assert_series_equal(result.dtypes, Series(
{'A': 'object', 'B': 'object'}))
# we expect all object columns, so need to convert to test for
# equivalence
result = result.astype(float)
tm.assert_frame_equal(result, df)
# invalid dtype
self.assertRaises(TypeError, self.read_csv, path,
dtype={'A': 'foo', 'B': 'float64'},
index_col=0)
# valid but we don't support it (date)
self.assertRaises(TypeError, self.read_csv, path,
dtype={'A': 'datetime64', 'B': 'float64'},
index_col=0)
self.assertRaises(TypeError, self.read_csv, path,
dtype={'A': 'datetime64', 'B': 'float64'},
index_col=0, parse_dates=['B'])
# valid but we don't support it
self.assertRaises(TypeError, self.read_csv, path,
dtype={'A': 'timedelta64', 'B': 'float64'},
index_col=0)
with tm.assertRaisesRegexp(ValueError,
"The 'dtype' option is not supported"):
# empty frame
# GH12048
self.read_csv(StringIO('A,B'), dtype=str)
def test_quoting(self):
bad_line_small = """printer\tresult\tvariant_name
Klosterdruckerei\tKlosterdruckerei <Salem> (1611-1804)\tMuller, Jacob
Klosterdruckerei\tKlosterdruckerei <Salem> (1611-1804)\tMuller, Jakob
Klosterdruckerei\tKlosterdruckerei <Kempten> (1609-1805)\t"Furststiftische Hofdruckerei, <Kempten""
Klosterdruckerei\tKlosterdruckerei <Kempten> (1609-1805)\tGaller, Alois
Klosterdruckerei\tKlosterdruckerei <Kempten> (1609-1805)\tHochfurstliche Buchhandlung <Kempten>"""
self.assertRaises(Exception, self.read_table, StringIO(bad_line_small),
sep='\t')
good_line_small = bad_line_small + '"'
df = self.read_table(StringIO(good_line_small), sep='\t')
self.assertEqual(len(df), 3)
def test_non_string_na_values(self):
# GH3611, na_values that are not a string are an issue
with tm.ensure_clean('__non_string_na_values__.csv') as path:
df = DataFrame({'A': [-999, 2, 3], 'B': [1.2, -999, 4.5]})
df.to_csv(path, sep=' ', index=False)
result1 = read_csv(path, sep=' ', header=0,
na_values=['-999.0', '-999'])
result2 = read_csv(path, sep=' ', header=0,
na_values=[-999, -999.0])
result3 = read_csv(path, sep=' ', header=0,
na_values=[-999.0, -999])
tm.assert_frame_equal(result1, result2)
tm.assert_frame_equal(result2, result3)
result4 = read_csv(path, sep=' ', header=0, na_values=['-999.0'])
result5 = read_csv(path, sep=' ', header=0, na_values=['-999'])
result6 = read_csv(path, sep=' ', header=0, na_values=[-999.0])
result7 = read_csv(path, sep=' ', header=0, na_values=[-999])
tm.assert_frame_equal(result4, result3)
tm.assert_frame_equal(result5, result3)
tm.assert_frame_equal(result6, result3)
tm.assert_frame_equal(result7, result3)
good_compare = result3
# with an odd float format, so we can't match the string 999.0
# exactly, but need float matching
df.to_csv(path, sep=' ', index=False, float_format='%.3f')
result1 = read_csv(path, sep=' ', header=0,
na_values=['-999.0', '-999'])
result2 = read_csv(path, sep=' ', header=0,
na_values=[-999, -999.0])
result3 = read_csv(path, sep=' ', header=0,
na_values=[-999.0, -999])
tm.assert_frame_equal(result1, good_compare)
tm.assert_frame_equal(result2, good_compare)
tm.assert_frame_equal(result3, good_compare)
result4 = read_csv(path, sep=' ', header=0, na_values=['-999.0'])
result5 = read_csv(path, sep=' ', header=0, na_values=['-999'])
result6 = read_csv(path, sep=' ', header=0, na_values=[-999.0])
result7 = read_csv(path, sep=' ', header=0, na_values=[-999])
tm.assert_frame_equal(result4, good_compare)
tm.assert_frame_equal(result5, good_compare)
tm.assert_frame_equal(result6, good_compare)
tm.assert_frame_equal(result7, good_compare)
def test_default_na_values(self):
_NA_VALUES = set(['-1.#IND', '1.#QNAN', '1.#IND', '-1.#QNAN',
'#N/A', 'N/A', 'NA', '#NA', 'NULL', 'NaN',
'nan', '-NaN', '-nan', '#N/A N/A', ''])
self.assertEqual(_NA_VALUES, parsers._NA_VALUES)
nv = len(_NA_VALUES)
def f(i, v):
if i == 0:
buf = ''
elif i > 0:
buf = ''.join([','] * i)
buf = "{0}{1}".format(buf, v)
if i < nv - 1:
buf = "{0}{1}".format(buf, ''.join([','] * (nv - i - 1)))
return buf
data = StringIO('\n'.join([f(i, v) for i, v in enumerate(_NA_VALUES)]))
expected = DataFrame(np.nan, columns=range(nv), index=range(nv))
df = self.read_csv(data, header=None)
tm.assert_frame_equal(df, expected)
def test_custom_na_values(self):
data = """A,B,C
ignore,this,row
1,NA,3
-1.#IND,5,baz
7,8,NaN
"""
expected = [[1., nan, 3],
[nan, 5, nan],
[7, 8, nan]]
df = self.read_csv(StringIO(data), na_values=['baz'], skiprows=[1])
tm.assert_almost_equal(df.values, expected)
df2 = self.read_table(StringIO(data), sep=',', na_values=['baz'],
skiprows=[1])
tm.assert_almost_equal(df2.values, expected)
df3 = self.read_table(StringIO(data), sep=',', na_values='baz',
skiprows=[1])
tm.assert_almost_equal(df3.values, expected)
def test_nat_parse(self):
# GH 3062
df = DataFrame(dict({
'A': np.asarray(lrange(10), dtype='float64'),
'B': pd.Timestamp('20010101')}))
df.iloc[3:6, :] = np.nan
with tm.ensure_clean('__nat_parse_.csv') as path:
df.to_csv(path)
result = read_csv(path, index_col=0, parse_dates=['B'])
tm.assert_frame_equal(result, df)
expected = Series(dict(A='float64', B='datetime64[ns]'))
tm.assert_series_equal(expected, result.dtypes)
# test with NaT for the nan_rep
# we don't have a method to specif the Datetime na_rep (it defaults
# to '')
df.to_csv(path)
result = read_csv(path, index_col=0, parse_dates=['B'])
tm.assert_frame_equal(result, df)
def test_skiprows_bug(self):
# GH #505
text = """#foo,a,b,c
#foo,a,b,c
#foo,a,b,c
#foo,a,b,c
#foo,a,b,c
#foo,a,b,c
1/1/2000,1.,2.,3.
1/2/2000,4,5,6
1/3/2000,7,8,9
"""
data = self.read_csv(StringIO(text), skiprows=lrange(6), header=None,
index_col=0, parse_dates=True)
data2 = self.read_csv(StringIO(text), skiprows=6, header=None,
index_col=0, parse_dates=True)
expected = DataFrame(np.arange(1., 10.).reshape((3, 3)),
columns=[1, 2, 3],
index=[datetime(2000, 1, 1), datetime(2000, 1, 2),
datetime(2000, 1, 3)])
expected.index.name = 0
tm.assert_frame_equal(data, expected)
tm.assert_frame_equal(data, data2)
def test_deep_skiprows(self):
# GH #4382
text = "a,b,c\n" + \
"\n".join([",".join([str(i), str(i + 1), str(i + 2)])
for i in range(10)])
condensed_text = "a,b,c\n" + \
"\n".join([",".join([str(i), str(i + 1), str(i + 2)])
for i in [0, 1, 2, 3, 4, 6, 8, 9]])
data = self.read_csv(StringIO(text), skiprows=[6, 8])
condensed_data = self.read_csv(StringIO(condensed_text))
tm.assert_frame_equal(data, condensed_data)
def test_skiprows_blank(self):
# GH 9832
text = """#foo,a,b,c
#foo,a,b,c
#foo,a,b,c
#foo,a,b,c
1/1/2000,1.,2.,3.
1/2/2000,4,5,6
1/3/2000,7,8,9
"""
data = self.read_csv(StringIO(text), skiprows=6, header=None,
index_col=0, parse_dates=True)
expected = DataFrame(np.arange(1., 10.).reshape((3, 3)),
columns=[1, 2, 3],
index=[datetime(2000, 1, 1), datetime(2000, 1, 2),
datetime(2000, 1, 3)])
expected.index.name = 0
tm.assert_frame_equal(data, expected)
def test_detect_string_na(self):
data = """A,B
foo,bar
NA,baz
NaN,nan
"""
expected = [['foo', 'bar'],
[nan, 'baz'],
[nan, nan]]
df = self.read_csv(StringIO(data))
tm.assert_almost_equal(df.values, expected)
def test_unnamed_columns(self):
data = """A,B,C,,
1,2,3,4,5
6,7,8,9,10
11,12,13,14,15
"""
expected = [[1, 2, 3, 4, 5.],
[6, 7, 8, 9, 10],
[11, 12, 13, 14, 15]]
df = self.read_table(StringIO(data), sep=',')
tm.assert_almost_equal(df.values, expected)
self.assert_numpy_array_equal(df.columns,
['A', 'B', 'C', 'Unnamed: 3',
'Unnamed: 4'])
def test_string_nas(self):
data = """A,B,C
a,b,c
d,,f
,g,h
"""
result = self.read_csv(StringIO(data))
expected = DataFrame([['a', 'b', 'c'],
['d', np.nan, 'f'],
[np.nan, 'g', 'h']],
columns=['A', 'B', 'C'])
tm.assert_frame_equal(result, expected)
def test_duplicate_columns(self):
for engine in ['python', 'c']:
data = """A,A,B,B,B
1,2,3,4,5
6,7,8,9,10
11,12,13,14,15
"""
# check default beahviour
df = self.read_table(StringIO(data), sep=',', engine=engine)
self.assertEqual(list(df.columns), ['A', 'A.1', 'B', 'B.1', 'B.2'])
df = self.read_table(StringIO(data), sep=',',
engine=engine, mangle_dupe_cols=False)
self.assertEqual(list(df.columns), ['A', 'A', 'B', 'B', 'B'])
df = self.read_table(StringIO(data), sep=',',
engine=engine, mangle_dupe_cols=True)
self.assertEqual(list(df.columns), ['A', 'A.1', 'B', 'B.1', 'B.2'])
def test_csv_mixed_type(self):
data = """A,B,C
a,1,2
b,3,4
c,4,5
"""
df = self.read_csv(StringIO(data))
# TODO
def test_csv_custom_parser(self):
data = """A,B,C
20090101,a,1,2
20090102,b,3,4
20090103,c,4,5
"""
f = lambda x: datetime.strptime(x, '%Y%m%d')
df = self.read_csv(StringIO(data), date_parser=f)
expected = self.read_csv(StringIO(data), parse_dates=True)
tm.assert_frame_equal(df, expected)
def test_parse_dates_implicit_first_col(self):
data = """A,B,C
20090101,a,1,2
20090102,b,3,4
20090103,c,4,5
"""
df = self.read_csv(StringIO(data), parse_dates=True)
expected = self.read_csv(StringIO(data), index_col=0, parse_dates=True)
self.assertIsInstance(
df.index[0], (datetime, np.datetime64, Timestamp))
tm.assert_frame_equal(df, expected)
def test_parse_dates_string(self):
data = """date,A,B,C
20090101,a,1,2
20090102,b,3,4
20090103,c,4,5
"""
rs = self.read_csv(
StringIO(data), index_col='date', parse_dates='date')
idx = date_range('1/1/2009', periods=3)
idx.name = 'date'
xp = DataFrame({'A': ['a', 'b', 'c'],
'B': [1, 3, 4],
'C': [2, 4, 5]}, idx)
tm.assert_frame_equal(rs, xp)
def test_yy_format(self):
data = """date,time,B,C
090131,0010,1,2
090228,1020,3,4
090331,0830,5,6
"""
rs = self.read_csv(StringIO(data), index_col=0,
parse_dates=[['date', 'time']])
idx = DatetimeIndex([datetime(2009, 1, 31, 0, 10, 0),
datetime(2009, 2, 28, 10, 20, 0),
datetime(2009, 3, 31, 8, 30, 0)],
dtype=object, name='date_time')
xp = DataFrame({'B': [1, 3, 5], 'C': [2, 4, 6]}, idx)
tm.assert_frame_equal(rs, xp)
rs = self.read_csv(StringIO(data), index_col=0,
parse_dates=[[0, 1]])
idx = DatetimeIndex([datetime(2009, 1, 31, 0, 10, 0),
datetime(2009, 2, 28, 10, 20, 0),
datetime(2009, 3, 31, 8, 30, 0)],
dtype=object, name='date_time')
xp = DataFrame({'B': [1, 3, 5], 'C': [2, 4, 6]}, idx)
tm.assert_frame_equal(rs, xp)
def test_parse_dates_column_list(self):
from pandas.core.datetools import to_datetime
data = '''date;destination;ventilationcode;unitcode;units;aux_date
01/01/2010;P;P;50;1;12/1/2011
01/01/2010;P;R;50;1;13/1/2011
15/01/2010;P;P;50;1;14/1/2011
01/05/2010;P;P;50;1;15/1/2011'''
expected = self.read_csv(StringIO(data), sep=";", index_col=lrange(4))
lev = expected.index.levels[0]
levels = list(expected.index.levels)
levels[0] = lev.to_datetime(dayfirst=True)
# hack to get this to work - remove for final test
levels[0].name = lev.name
expected.index.set_levels(levels, inplace=True)
expected['aux_date'] = to_datetime(expected['aux_date'],
dayfirst=True)
expected['aux_date'] = lmap(Timestamp, expected['aux_date'])
tm.assertIsInstance(expected['aux_date'][0], datetime)
df = self.read_csv(StringIO(data), sep=";", index_col=lrange(4),
parse_dates=[0, 5], dayfirst=True)
tm.assert_frame_equal(df, expected)
df = self.read_csv(StringIO(data), sep=";", index_col=lrange(4),
parse_dates=['date', 'aux_date'], dayfirst=True)
tm.assert_frame_equal(df, expected)
def test_no_header(self):
data = """1,2,3,4,5
6,7,8,9,10
11,12,13,14,15
"""
df = self.read_table(StringIO(data), sep=',', header=None)
df_pref = self.read_table(StringIO(data), sep=',', prefix='X',
header=None)
names = ['foo', 'bar', 'baz', 'quux', 'panda']
df2 = self.read_table(StringIO(data), sep=',', names=names)
expected = [[1, 2, 3, 4, 5.],
[6, 7, 8, 9, 10],
[11, 12, 13, 14, 15]]
tm.assert_almost_equal(df.values, expected)
tm.assert_almost_equal(df.values, df2.values)
self.assert_numpy_array_equal(df_pref.columns,
['X0', 'X1', 'X2', 'X3', 'X4'])
self.assert_numpy_array_equal(df.columns, lrange(5))
self.assert_numpy_array_equal(df2.columns, names)
def test_no_header_prefix(self):
data = """1,2,3,4,5
6,7,8,9,10
11,12,13,14,15
"""
df_pref = self.read_table(StringIO(data), sep=',', prefix='Field',
header=None)
expected = [[1, 2, 3, 4, 5.],
[6, 7, 8, 9, 10],
[11, 12, 13, 14, 15]]
tm.assert_almost_equal(df_pref.values, expected)
self.assert_numpy_array_equal(df_pref.columns,
['Field0', 'Field1', 'Field2', 'Field3', 'Field4'])
def test_header_with_index_col(self):
data = """foo,1,2,3
bar,4,5,6
baz,7,8,9
"""
names = ['A', 'B', 'C']
df = self.read_csv(StringIO(data), names=names)
self.assertEqual(names, ['A', 'B', 'C'])
values = [[1, 2, 3], [4, 5, 6], [7, 8, 9]]
expected = DataFrame(values, index=['foo', 'bar', 'baz'],
columns=['A', 'B', 'C'])
tm.assert_frame_equal(df, expected)
def test_read_csv_dataframe(self):
df = self.read_csv(self.csv1, index_col=0, parse_dates=True)
df2 = self.read_table(self.csv1, sep=',', index_col=0,
parse_dates=True)
self.assert_numpy_array_equal(df.columns, ['A', 'B', 'C', 'D'])
self.assertEqual(df.index.name, 'index')
self.assertIsInstance(
df.index[0], (datetime, np.datetime64, Timestamp))
self.assertEqual(df.values.dtype, np.float64)
tm.assert_frame_equal(df, df2)
def test_read_csv_no_index_name(self):
df = self.read_csv(self.csv2, index_col=0, parse_dates=True)
df2 = self.read_table(self.csv2, sep=',', index_col=0,
parse_dates=True)
self.assert_numpy_array_equal(df.columns, ['A', 'B', 'C', 'D', 'E'])
self.assertIsInstance(
df.index[0], (datetime, np.datetime64, Timestamp))
self.assertEqual(df.ix[:, ['A', 'B', 'C', 'D']
].values.dtype, np.float64)
tm.assert_frame_equal(df, df2)
def test_read_csv_infer_compression(self):
# GH 9770
expected = self.read_csv(self.csv1, index_col=0, parse_dates=True)
inputs = [self.csv1, self.csv1 + '.gz',
self.csv1 + '.bz2', open(self.csv1)]
for f in inputs:
df = self.read_csv(f, index_col=0, parse_dates=True,
compression='infer')
tm.assert_frame_equal(expected, df)
inputs[3].close()
def test_read_table_unicode(self):
fin = BytesIO(u('\u0141aski, Jan;1').encode('utf-8'))
df1 = read_table(fin, sep=";", encoding="utf-8", header=None)
tm.assertIsInstance(df1[0].values[0], compat.text_type)
def test_read_table_wrong_num_columns(self):
# too few!
data = """A,B,C,D,E,F
1,2,3,4,5,6
6,7,8,9,10,11,12
11,12,13,14,15,16
"""
self.assertRaises(Exception, self.read_csv, StringIO(data))
def test_read_table_duplicate_index(self):
data = """index,A,B,C,D
foo,2,3,4,5
bar,7,8,9,10
baz,12,13,14,15
qux,12,13,14,15
foo,12,13,14,15
bar,12,13,14,15
"""
result = self.read_csv(StringIO(data), index_col=0)
expected = self.read_csv(StringIO(data)).set_index('index',
verify_integrity=False)
tm.assert_frame_equal(result, expected)
def test_read_table_duplicate_index_implicit(self):
data = """A,B,C,D
foo,2,3,4,5
bar,7,8,9,10
baz,12,13,14,15
qux,12,13,14,15
foo,12,13,14,15
bar,12,13,14,15
"""
# it works!
result = self.read_csv(StringIO(data))
def test_parse_bools(self):
data = """A,B
True,1
False,2
True,3
"""
data = self.read_csv(StringIO(data))
self.assertEqual(data['A'].dtype, np.bool_)
data = """A,B
YES,1
no,2
yes,3
No,3
Yes,3
"""
data = self.read_csv(StringIO(data),
true_values=['yes', 'Yes', 'YES'],
false_values=['no', 'NO', 'No'])
self.assertEqual(data['A'].dtype, np.bool_)
data = """A,B
TRUE,1
FALSE,2
TRUE,3
"""
data = self.read_csv(StringIO(data))
self.assertEqual(data['A'].dtype, np.bool_)
data = """A,B
foo,bar
bar,foo"""
result = self.read_csv(StringIO(data), true_values=['foo'],
false_values=['bar'])
expected = DataFrame({'A': [True, False], 'B': [False, True]})
tm.assert_frame_equal(result, expected)
def test_int_conversion(self):
data = """A,B
1.0,1
2.0,2
3.0,3
"""
data = self.read_csv(StringIO(data))
self.assertEqual(data['A'].dtype, np.float64)
self.assertEqual(data['B'].dtype, np.int64)
def test_infer_index_col(self):
data = """A,B,C
foo,1,2,3
bar,4,5,6
baz,7,8,9
"""
data = self.read_csv(StringIO(data))
self.assertTrue(data.index.equals(Index(['foo', 'bar', 'baz'])))
def test_read_nrows(self):
df = self.read_csv(StringIO(self.data1), nrows=3)
expected = self.read_csv(StringIO(self.data1))[:3]
tm.assert_frame_equal(df, expected)
def test_read_chunksize(self):
reader = self.read_csv(StringIO(self.data1), index_col=0, chunksize=2)
df = self.read_csv(StringIO(self.data1), index_col=0)
chunks = list(reader)
tm.assert_frame_equal(chunks[0], df[:2])
tm.assert_frame_equal(chunks[1], df[2:4])
tm.assert_frame_equal(chunks[2], df[4:])
def test_read_chunksize_named(self):
reader = self.read_csv(
StringIO(self.data1), index_col='index', chunksize=2)
df = self.read_csv(StringIO(self.data1), index_col='index')
chunks = list(reader)
tm.assert_frame_equal(chunks[0], df[:2])
tm.assert_frame_equal(chunks[1], df[2:4])
tm.assert_frame_equal(chunks[2], df[4:])
def test_get_chunk_passed_chunksize(self):
data = """A,B,C
1,2,3
4,5,6
7,8,9
1,2,3"""
result = self.read_csv(StringIO(data), chunksize=2)
piece = result.get_chunk()
self.assertEqual(len(piece), 2)
def test_read_text_list(self):
data = """A,B,C\nfoo,1,2,3\nbar,4,5,6"""
as_list = [['A', 'B', 'C'], ['foo', '1', '2', '3'], ['bar',
'4', '5', '6']]
df = self.read_csv(StringIO(data), index_col=0)
parser = TextParser(as_list, index_col=0, chunksize=2)
chunk = parser.read(None)
tm.assert_frame_equal(chunk, df)
def test_iterator(self):
# GH 6607
# Test currently only valid with python engine because
# skip_footer != 0. Temporarily copied to TestPythonParser.
# Test for ValueError with other engines:
with tm.assertRaisesRegexp(ValueError, 'skip_footer'):
reader = self.read_csv(StringIO(self.data1), index_col=0,
iterator=True)
df = self.read_csv(StringIO(self.data1), index_col=0)
chunk = reader.read(3)
tm.assert_frame_equal(chunk, df[:3])
last_chunk = reader.read(5)
tm.assert_frame_equal(last_chunk, df[3:])
# pass list
lines = list(csv.reader(StringIO(self.data1)))
parser = TextParser(lines, index_col=0, chunksize=2)
df = self.read_csv(StringIO(self.data1), index_col=0)
chunks = list(parser)
tm.assert_frame_equal(chunks[0], df[:2])
tm.assert_frame_equal(chunks[1], df[2:4])
tm.assert_frame_equal(chunks[2], df[4:])
# pass skiprows
parser = TextParser(lines, index_col=0, chunksize=2, skiprows=[1])
chunks = list(parser)
tm.assert_frame_equal(chunks[0], df[1:3])
# test bad parameter (skip_footer)
reader = self.read_csv(StringIO(self.data1), index_col=0,
iterator=True, skip_footer=True)
self.assertRaises(ValueError, reader.read, 3)
treader = self.read_table(StringIO(self.data1), sep=',', index_col=0,
iterator=True)
tm.assertIsInstance(treader, TextFileReader)
# stopping iteration when on chunksize is specified, GH 3967
data = """A,B,C
foo,1,2,3
bar,4,5,6
baz,7,8,9
"""
reader = self.read_csv(StringIO(data), iterator=True)
result = list(reader)
expected = DataFrame(dict(A=[1, 4, 7], B=[2, 5, 8], C=[
3, 6, 9]), index=['foo', 'bar', 'baz'])
tm.assert_frame_equal(result[0], expected)
# chunksize = 1
reader = self.read_csv(StringIO(data), chunksize=1)
result = list(reader)
expected = DataFrame(dict(A=[1, 4, 7], B=[2, 5, 8], C=[
3, 6, 9]), index=['foo', 'bar', 'baz'])
self.assertEqual(len(result), 3)
tm.assert_frame_equal(pd.concat(result), expected)
def test_header_not_first_line(self):
data = """got,to,ignore,this,line
got,to,ignore,this,line
index,A,B,C,D
foo,2,3,4,5
bar,7,8,9,10
baz,12,13,14,15
"""
data2 = """index,A,B,C,D
foo,2,3,4,5
bar,7,8,9,10
baz,12,13,14,15
"""
df = self.read_csv(StringIO(data), header=2, index_col=0)
expected = self.read_csv(StringIO(data2), header=0, index_col=0)
tm.assert_frame_equal(df, expected)
def test_header_multi_index(self):
expected = tm.makeCustomDataframe(
5, 3, r_idx_nlevels=2, c_idx_nlevels=4)
data = """\
C0,,C_l0_g0,C_l0_g1,C_l0_g2
C1,,C_l1_g0,C_l1_g1,C_l1_g2
C2,,C_l2_g0,C_l2_g1,C_l2_g2
C3,,C_l3_g0,C_l3_g1,C_l3_g2
R0,R1,,,
R_l0_g0,R_l1_g0,R0C0,R0C1,R0C2
R_l0_g1,R_l1_g1,R1C0,R1C1,R1C2
R_l0_g2,R_l1_g2,R2C0,R2C1,R2C2
R_l0_g3,R_l1_g3,R3C0,R3C1,R3C2
R_l0_g4,R_l1_g4,R4C0,R4C1,R4C2
"""
df = self.read_csv(StringIO(data), header=[0, 1, 2, 3], index_col=[
0, 1], tupleize_cols=False)
tm.assert_frame_equal(df, expected)
# skipping lines in the header
df = self.read_csv(StringIO(data), header=[0, 1, 2, 3], index_col=[
0, 1], tupleize_cols=False)
tm.assert_frame_equal(df, expected)
#### invalid options ####
# no as_recarray
self.assertRaises(ValueError, self.read_csv, StringIO(data), header=[0, 1, 2, 3],
index_col=[0, 1], as_recarray=True, tupleize_cols=False)
# names
self.assertRaises(ValueError, self.read_csv, StringIO(data), header=[0, 1, 2, 3],
index_col=[0, 1], names=['foo', 'bar'], tupleize_cols=False)
# usecols
self.assertRaises(ValueError, self.read_csv, StringIO(data), header=[0, 1, 2, 3],
index_col=[0, 1], usecols=['foo', 'bar'], tupleize_cols=False)
# non-numeric index_col
self.assertRaises(ValueError, self.read_csv, StringIO(data), header=[0, 1, 2, 3],
index_col=['foo', 'bar'], tupleize_cols=False)
def test_header_multiindex_common_format(self):
df = DataFrame([[1, 2, 3, 4, 5, 6], [7, 8, 9, 10, 11, 12]],
index=['one', 'two'],
columns=MultiIndex.from_tuples([('a', 'q'), ('a', 'r'), ('a', 's'),
('b', 't'), ('c', 'u'), ('c', 'v')]))
# to_csv
data = """,a,a,a,b,c,c
,q,r,s,t,u,v
,,,,,,
one,1,2,3,4,5,6
two,7,8,9,10,11,12"""
result = self.read_csv(StringIO(data), header=[0, 1], index_col=0)
tm.assert_frame_equal(df, result)
# common
data = """,a,a,a,b,c,c
,q,r,s,t,u,v
one,1,2,3,4,5,6
two,7,8,9,10,11,12"""
result = self.read_csv(StringIO(data), header=[0, 1], index_col=0)
tm.assert_frame_equal(df, result)
# common, no index_col
data = """a,a,a,b,c,c
q,r,s,t,u,v
1,2,3,4,5,6
7,8,9,10,11,12"""
result = self.read_csv(StringIO(data), header=[0, 1], index_col=None)
tm.assert_frame_equal(df.reset_index(drop=True), result)
# malformed case 1
expected = DataFrame(np.array([[2, 3, 4, 5, 6],
[8, 9, 10, 11, 12]], dtype='int64'),
index=Index([1, 7]),
columns=MultiIndex(levels=[[u('a'), u('b'), u('c')], [u('r'), u('s'), u('t'), u('u'), u('v')]],
labels=[[0, 0, 1, 2, 2], [
0, 1, 2, 3, 4]],
names=[u('a'), u('q')]))
data = """a,a,a,b,c,c
q,r,s,t,u,v
1,2,3,4,5,6
7,8,9,10,11,12"""
result = self.read_csv(StringIO(data), header=[0, 1], index_col=0)
tm.assert_frame_equal(expected, result)
# malformed case 2
expected = DataFrame(np.array([[2, 3, 4, 5, 6],
[8, 9, 10, 11, 12]], dtype='int64'),
index=Index([1, 7]),
columns=MultiIndex(levels=[[u('a'), u('b'), u('c')], [u('r'), u('s'), u('t'), u('u'), u('v')]],
labels=[[0, 0, 1, 2, 2], [
0, 1, 2, 3, 4]],
names=[None, u('q')]))
data = """,a,a,b,c,c
q,r,s,t,u,v
1,2,3,4,5,6
7,8,9,10,11,12"""
result = self.read_csv(StringIO(data), header=[0, 1], index_col=0)
tm.assert_frame_equal(expected, result)
# mi on columns and index (malformed)
expected = DataFrame(np.array([[3, 4, 5, 6],
[9, 10, 11, 12]], dtype='int64'),
index=MultiIndex(levels=[[1, 7], [2, 8]],
labels=[[0, 1], [0, 1]]),
columns=MultiIndex(levels=[[u('a'), u('b'), u('c')], [u('s'), u('t'), u('u'), u('v')]],
labels=[[0, 1, 2, 2],
[0, 1, 2, 3]],
names=[None, u('q')]))
data = """,a,a,b,c,c
q,r,s,t,u,v
1,2,3,4,5,6
7,8,9,10,11,12"""
result = self.read_csv(StringIO(data), header=[0, 1], index_col=[0, 1])
tm.assert_frame_equal(expected, result)
def test_pass_names_with_index(self):
lines = self.data1.split('\n')
no_header = '\n'.join(lines[1:])
# regular index
names = ['index', 'A', 'B', 'C', 'D']
df = self.read_csv(StringIO(no_header), index_col=0, names=names)
expected = self.read_csv(StringIO(self.data1), index_col=0)
tm.assert_frame_equal(df, expected)
# multi index
data = """index1,index2,A,B,C,D
foo,one,2,3,4,5
foo,two,7,8,9,10
foo,three,12,13,14,15
bar,one,12,13,14,15
bar,two,12,13,14,15
"""
lines = data.split('\n')
no_header = '\n'.join(lines[1:])
names = ['index1', 'index2', 'A', 'B', 'C', 'D']
df = self.read_csv(StringIO(no_header), index_col=[0, 1],
names=names)
expected = self.read_csv(StringIO(data), index_col=[0, 1])
tm.assert_frame_equal(df, expected)
df = self.read_csv(StringIO(data), index_col=['index1', 'index2'])
tm.assert_frame_equal(df, expected)
def test_multi_index_no_level_names(self):
data = """index1,index2,A,B,C,D
foo,one,2,3,4,5
foo,two,7,8,9,10
foo,three,12,13,14,15
bar,one,12,13,14,15
bar,two,12,13,14,15
"""
data2 = """A,B,C,D
foo,one,2,3,4,5
foo,two,7,8,9,10
foo,three,12,13,14,15
bar,one,12,13,14,15
bar,two,12,13,14,15
"""
lines = data.split('\n')
no_header = '\n'.join(lines[1:])
names = ['A', 'B', 'C', 'D']
df = self.read_csv(StringIO(no_header), index_col=[0, 1],
header=None, names=names)
expected = self.read_csv(StringIO(data), index_col=[0, 1])
tm.assert_frame_equal(df, expected, check_names=False)
# 2 implicit first cols
df2 = self.read_csv(StringIO(data2))
tm.assert_frame_equal(df2, df)
# reverse order of index
df = self.read_csv(StringIO(no_header), index_col=[1, 0], names=names,
header=None)
expected = self.read_csv(StringIO(data), index_col=[1, 0])
tm.assert_frame_equal(df, expected, check_names=False)
def test_multi_index_parse_dates(self):
data = """index1,index2,A,B,C
20090101,one,a,1,2
20090101,two,b,3,4
20090101,three,c,4,5
20090102,one,a,1,2
20090102,two,b,3,4
20090102,three,c,4,5
20090103,one,a,1,2
20090103,two,b,3,4
20090103,three,c,4,5
"""
df = self.read_csv(StringIO(data), index_col=[0, 1], parse_dates=True)
self.assertIsInstance(df.index.levels[0][0],
(datetime, np.datetime64, Timestamp))
# specify columns out of order!
df2 = self.read_csv(StringIO(data), index_col=[1, 0], parse_dates=True)
self.assertIsInstance(df2.index.levels[1][0],
(datetime, np.datetime64, Timestamp))
def test_skip_footer(self):
# GH 6607
# Test currently only valid with python engine because
# skip_footer != 0. Temporarily copied to TestPythonParser.
# Test for ValueError with other engines:
with tm.assertRaisesRegexp(ValueError, 'skip_footer'):
data = """A,B,C
1,2,3
4,5,6
7,8,9
want to skip this
also also skip this
"""
result = self.read_csv(StringIO(data), skip_footer=2)
no_footer = '\n'.join(data.split('\n')[:-3])
expected = self.read_csv(StringIO(no_footer))
tm.assert_frame_equal(result, expected)
result = self.read_csv(StringIO(data), nrows=3)
tm.assert_frame_equal(result, expected)
# skipfooter alias
result = read_csv(StringIO(data), skipfooter=2)
no_footer = '\n'.join(data.split('\n')[:-3])
expected = read_csv(StringIO(no_footer))
tm.assert_frame_equal(result, expected)
def test_no_unnamed_index(self):
data = """ id c0 c1 c2
0 1 0 a b
1 2 0 c d
2 2 2 e f
"""
df = self.read_table(StringIO(data), sep=' ')
self.assertIsNone(df.index.name)
def test_converters(self):
data = """A,B,C,D
a,1,2,01/01/2009
b,3,4,01/02/2009
c,4,5,01/03/2009
"""
from pandas.compat import parse_date
result = self.read_csv(StringIO(data), converters={'D': parse_date})
result2 = self.read_csv(StringIO(data), converters={3: parse_date})
expected = self.read_csv(StringIO(data))
expected['D'] = expected['D'].map(parse_date)
tm.assertIsInstance(result['D'][0], (datetime, Timestamp))
tm.assert_frame_equal(result, expected)
tm.assert_frame_equal(result2, expected)
# produce integer
converter = lambda x: int(x.split('/')[2])
result = self.read_csv(StringIO(data), converters={'D': converter})
expected = self.read_csv(StringIO(data))
expected['D'] = expected['D'].map(converter)
tm.assert_frame_equal(result, expected)
def test_converters_no_implicit_conv(self):
# GH2184
data = """000102,1.2,A\n001245,2,B"""
f = lambda x: x.strip()
converter = {0: f}
df = self.read_csv(StringIO(data), header=None, converters=converter)
self.assertEqual(df[0].dtype, object)
def test_converters_euro_decimal_format(self):
data = """Id;Number1;Number2;Text1;Text2;Number3
1;1521,1541;187101,9543;ABC;poi;4,738797819
2;121,12;14897,76;DEF;uyt;0,377320872
3;878,158;108013,434;GHI;rez;2,735694704"""
f = lambda x: float(x.replace(",", "."))
converter = {'Number1': f, 'Number2': f, 'Number3': f}
df2 = self.read_csv(StringIO(data), sep=';', converters=converter)
self.assertEqual(df2['Number1'].dtype, float)
self.assertEqual(df2['Number2'].dtype, float)
self.assertEqual(df2['Number3'].dtype, float)
def test_converter_return_string_bug(self):
# GH #583
data = """Id;Number1;Number2;Text1;Text2;Number3
1;1521,1541;187101,9543;ABC;poi;4,738797819
2;121,12;14897,76;DEF;uyt;0,377320872
3;878,158;108013,434;GHI;rez;2,735694704"""
f = lambda x: float(x.replace(",", "."))
converter = {'Number1': f, 'Number2': f, 'Number3': f}
df2 = self.read_csv(StringIO(data), sep=';', converters=converter)
self.assertEqual(df2['Number1'].dtype, float)
def test_read_table_buglet_4x_multiindex(self):
# GH 6607
# Parsing multi-level index currently causes an error in the C parser.
# Temporarily copied to TestPythonParser.
# Here test that CParserError is raised:
with tm.assertRaises(pandas.parser.CParserError):
text = """ A B C D E
one two three four
a b 10.0032 5 -0.5109 -2.3358 -0.4645 0.05076 0.3640
a q 20 4 0.4473 1.4152 0.2834 1.00661 0.1744
x q 30 3 -0.6662 -0.5243 -0.3580 0.89145 2.5838"""
# it works!
df = self.read_table(StringIO(text), sep='\s+')
self.assertEqual(df.index.names, ('one', 'two', 'three', 'four'))
def test_line_comment(self):
data = """# empty
A,B,C
1,2.,4.#hello world
#ignore this line
5.,NaN,10.0
"""
expected = [[1., 2., 4.],
[5., np.nan, 10.]]
df = self.read_csv(StringIO(data), comment='#')
tm.assert_almost_equal(df.values, expected)
def test_comment_skiprows(self):
data = """# empty
random line
# second empty line
1,2,3
A,B,C
1,2.,4.
5.,NaN,10.0
"""
expected = [[1., 2., 4.],
[5., np.nan, 10.]]
# this should ignore the first four lines (including comments)
df = self.read_csv(StringIO(data), comment='#', skiprows=4)
tm.assert_almost_equal(df.values, expected)
def test_comment_header(self):
data = """# empty
# second empty line
1,2,3
A,B,C
1,2.,4.
5.,NaN,10.0
"""
expected = [[1., 2., 4.],
[5., np.nan, 10.]]
# header should begin at the second non-comment line
df = self.read_csv(StringIO(data), comment='#', header=1)
tm.assert_almost_equal(df.values, expected)
def test_comment_skiprows_header(self):
data = """# empty
# second empty line
# third empty line
X,Y,Z
1,2,3
A,B,C
1,2.,4.
5.,NaN,10.0
"""
expected = [[1., 2., 4.],
[5., np.nan, 10.]]
# skiprows should skip the first 4 lines (including comments), while
# header should start from the second non-commented line starting
# with line 5
df = self.read_csv(StringIO(data), comment='#', skiprows=4, header=1)
tm.assert_almost_equal(df.values, expected)
def test_read_csv_parse_simple_list(self):
text = """foo
bar baz
qux foo
foo
bar"""
df = read_csv(StringIO(text), header=None)
expected = DataFrame({0: ['foo', 'bar baz', 'qux foo',
'foo', 'bar']})
tm.assert_frame_equal(df, expected)
def test_parse_dates_custom_euroformat(self):
text = """foo,bar,baz
31/01/2010,1,2
01/02/2010,1,NA
02/02/2010,1,2
"""
parser = lambda d: parse_date(d, dayfirst=True)
df = self.read_csv(StringIO(text),
names=['time', 'Q', 'NTU'], header=0,
index_col=0, parse_dates=True,
date_parser=parser, na_values=['NA'])
exp_index = Index([datetime(2010, 1, 31), datetime(2010, 2, 1),
datetime(2010, 2, 2)], name='time')
expected = DataFrame({'Q': [1, 1, 1], 'NTU': [2, np.nan, 2]},
index=exp_index, columns=['Q', 'NTU'])
tm.assert_frame_equal(df, expected)
parser = lambda d: parse_date(d, day_first=True)
self.assertRaises(Exception, self.read_csv,
StringIO(text), skiprows=[0],
names=['time', 'Q', 'NTU'], index_col=0,
parse_dates=True, date_parser=parser,
na_values=['NA'])
def test_na_value_dict(self):
data = """A,B,C
foo,bar,NA
bar,foo,foo
foo,bar,NA
bar,foo,foo"""
df = self.read_csv(StringIO(data),
na_values={'A': ['foo'], 'B': ['bar']})
expected = DataFrame({'A': [np.nan, 'bar', np.nan, 'bar'],
'B': [np.nan, 'foo', np.nan, 'foo'],
'C': [np.nan, 'foo', np.nan, 'foo']})
tm.assert_frame_equal(df, expected)
data = """\
a,b,c,d
0,NA,1,5
"""
xp = DataFrame({'b': [np.nan], 'c': [1], 'd': [5]}, index=[0])
xp.index.name = 'a'
df = self.read_csv(StringIO(data), na_values={}, index_col=0)
tm.assert_frame_equal(df, xp)
xp = DataFrame({'b': [np.nan], 'd': [5]},
MultiIndex.from_tuples([(0, 1)]))
xp.index.names = ['a', 'c']
df = self.read_csv(StringIO(data), na_values={}, index_col=[0, 2])
tm.assert_frame_equal(df, xp)
xp = DataFrame({'b': [np.nan], 'd': [5]},
MultiIndex.from_tuples([(0, 1)]))
xp.index.names = ['a', 'c']
df = self.read_csv(StringIO(data), na_values={}, index_col=['a', 'c'])
tm.assert_frame_equal(df, xp)
@tm.network
def test_url(self):
# HTTP(S)
url = ('https://raw.github.com/pydata/pandas/master/'
'pandas/io/tests/data/salary.table')
url_table = self.read_table(url)
dirpath = tm.get_data_path()
localtable = os.path.join(dirpath, 'salary.table')
local_table = self.read_table(localtable)
tm.assert_frame_equal(url_table, local_table)
# TODO: ftp testing
@slow
def test_file(self):
# FILE
if sys.version_info[:2] < (2, 6):
raise nose.SkipTest("file:// not supported with Python < 2.6")
dirpath = tm.get_data_path()
localtable = os.path.join(dirpath, 'salary.table')
local_table = self.read_table(localtable)
try:
url_table = self.read_table('file://localhost/' + localtable)
except URLError:
# fails on some systems
raise nose.SkipTest("failing on %s" %
' '.join(platform.uname()).strip())
tm.assert_frame_equal(url_table, local_table)
def test_parse_tz_aware(self):
import pytz
# #1693
data = StringIO("Date,x\n2012-06-13T01:39:00Z,0.5")
# it works
result = read_csv(data, index_col=0, parse_dates=True)
stamp = result.index[0]
self.assertEqual(stamp.minute, 39)
try:
self.assertIs(result.index.tz, pytz.utc)
except AssertionError: # hello Yaroslav
arr = result.index.to_pydatetime()
result = tools.to_datetime(arr, utc=True)[0]
self.assertEqual(stamp.minute, result.minute)
self.assertEqual(stamp.hour, result.hour)
self.assertEqual(stamp.day, result.day)
def test_multiple_date_cols_index(self):
data = """\
ID,date,NominalTime,ActualTime,TDew,TAir,Windspeed,Precip,WindDir
KORD1,19990127, 19:00:00, 18:56:00, 0.8100, 2.8100, 7.2000, 0.0000, 280.0000
KORD2,19990127, 20:00:00, 19:56:00, 0.0100, 2.2100, 7.2000, 0.0000, 260.0000
KORD3,19990127, 21:00:00, 20:56:00, -0.5900, 2.2100, 5.7000, 0.0000, 280.0000
KORD4,19990127, 21:00:00, 21:18:00, -0.9900, 2.0100, 3.6000, 0.0000, 270.0000
KORD5,19990127, 22:00:00, 21:56:00, -0.5900, 1.7100, 5.1000, 0.0000, 290.0000
KORD6,19990127, 23:00:00, 22:56:00, -0.5900, 1.7100, 4.6000, 0.0000, 280.0000"""
xp = self.read_csv(StringIO(data), parse_dates={'nominal': [1, 2]})
df = self.read_csv(StringIO(data), parse_dates={'nominal': [1, 2]},
index_col='nominal')
tm.assert_frame_equal(xp.set_index('nominal'), df)
df2 = self.read_csv(StringIO(data), parse_dates={'nominal': [1, 2]},
index_col=0)
tm.assert_frame_equal(df2, df)
df3 = self.read_csv(StringIO(data), parse_dates=[[1, 2]], index_col=0)
tm.assert_frame_equal(df3, df, check_names=False)
def test_multiple_date_cols_chunked(self):
df = self.read_csv(StringIO(self.ts_data), parse_dates={
'nominal': [1, 2]}, index_col='nominal')
reader = self.read_csv(StringIO(self.ts_data), parse_dates={'nominal':
[1, 2]}, index_col='nominal', chunksize=2)
chunks = list(reader)
self.assertNotIn('nominalTime', df)
tm.assert_frame_equal(chunks[0], df[:2])
tm.assert_frame_equal(chunks[1], df[2:4])
tm.assert_frame_equal(chunks[2], df[4:])
def test_multiple_date_col_named_components(self):
xp = self.read_csv(StringIO(self.ts_data),
parse_dates={'nominal': [1, 2]},
index_col='nominal')
colspec = {'nominal': ['date', 'nominalTime']}
df = self.read_csv(StringIO(self.ts_data), parse_dates=colspec,
index_col='nominal')
tm.assert_frame_equal(df, xp)
def test_multiple_date_col_multiple_index(self):
df = self.read_csv(StringIO(self.ts_data),
parse_dates={'nominal': [1, 2]},
index_col=['nominal', 'ID'])
xp = self.read_csv(StringIO(self.ts_data),
parse_dates={'nominal': [1, 2]})
tm.assert_frame_equal(xp.set_index(['nominal', 'ID']), df)
def test_comment(self):
data = """A,B,C
1,2.,4.#hello world
5.,NaN,10.0
"""
expected = [[1., 2., 4.],
[5., np.nan, 10.]]
df = self.read_csv(StringIO(data), comment='#')
tm.assert_almost_equal(df.values, expected)
df = self.read_table(StringIO(data), sep=',', comment='#',
na_values=['NaN'])
tm.assert_almost_equal(df.values, expected)
def test_bool_na_values(self):
data = """A,B,C
True,False,True
NA,True,False
False,NA,True"""
result = self.read_csv(StringIO(data))
expected = DataFrame({'A': np.array([True, nan, False], dtype=object),
'B': np.array([False, True, nan], dtype=object),
'C': [True, False, True]})
tm.assert_frame_equal(result, expected)
def test_nonexistent_path(self):
# don't segfault pls #2428
path = '%s.csv' % tm.rands(10)
self.assertRaises(Exception, self.read_csv, path)
def test_missing_trailing_delimiters(self):
data = """A,B,C,D
1,2,3,4
1,3,3,
1,4,5"""
result = self.read_csv(StringIO(data))
self.assertTrue(result['D'].isnull()[1:].all())
def test_skipinitialspace(self):
s = ('"09-Apr-2012", "01:10:18.300", 2456026.548822908, 12849, '
'1.00361, 1.12551, 330.65659, 0355626618.16711, 73.48821, '
'314.11625, 1917.09447, 179.71425, 80.000, 240.000, -350, '
'70.06056, 344.98370, 1, 1, -0.689265, -0.692787, '
'0.212036, 14.7674, 41.605, -9999.0, -9999.0, '
'-9999.0, -9999.0, -9999.0, -9999.0, 000, 012, 128')
sfile = StringIO(s)
# it's 33 columns
result = self.read_csv(sfile, names=lrange(33), na_values=['-9999.0'],
header=None, skipinitialspace=True)
self.assertTrue(pd.isnull(result.ix[0, 29]))
def test_utf16_bom_skiprows(self):
# #2298
data = u("""skip this
skip this too
A\tB\tC
1\t2\t3
4\t5\t6""")
data2 = u("""skip this
skip this too
A,B,C
1,2,3
4,5,6""")
path = '__%s__.csv' % tm.rands(10)
with tm.ensure_clean(path) as path:
for sep, dat in [('\t', data), (',', data2)]:
for enc in ['utf-16', 'utf-16le', 'utf-16be']:
bytes = dat.encode(enc)
with open(path, 'wb') as f:
f.write(bytes)
s = BytesIO(dat.encode('utf-8'))
if compat.PY3:
# somewhat False since the code never sees bytes
from io import TextIOWrapper
s = TextIOWrapper(s, encoding='utf-8')
result = self.read_csv(path, encoding=enc, skiprows=2,
sep=sep)
expected = self.read_csv(s, encoding='utf-8', skiprows=2,
sep=sep)
tm.assert_frame_equal(result, expected)
def test_utf16_example(self):
path = tm.get_data_path('utf16_ex.txt')
# it works! and is the right length
result = self.read_table(path, encoding='utf-16')
self.assertEqual(len(result), 50)
if not compat.PY3:
buf = BytesIO(open(path, 'rb').read())
result = self.read_table(buf, encoding='utf-16')
self.assertEqual(len(result), 50)
def test_converters_corner_with_nas(self):
# skip aberration observed on Win64 Python 3.2.2
if hash(np.int64(-1)) != -2:
raise nose.SkipTest("skipping because of windows hash on Python"
" 3.2.2")
csv = """id,score,days
1,2,12
2,2-5,
3,,14+
4,6-12,2"""
def convert_days(x):
x = x.strip()
if not x:
return np.nan
is_plus = x.endswith('+')
if is_plus:
x = int(x[:-1]) + 1
else:
x = int(x)
return x
def convert_days_sentinel(x):
x = x.strip()
if not x:
return np.nan
is_plus = x.endswith('+')
if is_plus:
x = int(x[:-1]) + 1
else:
x = int(x)
return x
def convert_score(x):
x = x.strip()
if not x:
return np.nan
if x.find('-') > 0:
valmin, valmax = lmap(int, x.split('-'))
val = 0.5 * (valmin + valmax)
else:
val = float(x)
return val
fh = StringIO(csv)
result = self.read_csv(fh, converters={'score': convert_score,
'days': convert_days},
na_values=['', None])
self.assertTrue(pd.isnull(result['days'][1]))
fh = StringIO(csv)
result2 = self.read_csv(fh, converters={'score': convert_score,
'days': convert_days_sentinel},
na_values=['', None])
tm.assert_frame_equal(result, result2)
def test_unicode_encoding(self):
pth = tm.get_data_path('unicode_series.csv')
result = self.read_csv(pth, header=None, encoding='latin-1')
result = result.set_index(0)
got = result[1][1632]
expected = u('\xc1 k\xf6ldum klaka (Cold Fever) (1994)')
self.assertEqual(got, expected)
def test_trailing_delimiters(self):
# #2442. grumble grumble
data = """A,B,C
1,2,3,
4,5,6,
7,8,9,"""
result = self.read_csv(StringIO(data), index_col=False)
expected = DataFrame({'A': [1, 4, 7], 'B': [2, 5, 8],
'C': [3, 6, 9]})
tm.assert_frame_equal(result, expected)
def test_escapechar(self):
# http://stackoverflow.com/questions/13824840/feature-request-for-
# pandas-read-csv
data = '''SEARCH_TERM,ACTUAL_URL
"bra tv bord","http://www.ikea.com/se/sv/catalog/categories/departments/living_room/10475/?se%7cps%7cnonbranded%7cvardagsrum%7cgoogle%7ctv_bord"
"tv p\xc3\xa5 hjul","http://www.ikea.com/se/sv/catalog/categories/departments/living_room/10475/?se%7cps%7cnonbranded%7cvardagsrum%7cgoogle%7ctv_bord"
"SLAGBORD, \\"Bergslagen\\", IKEA:s 1700-tals serie","http://www.ikea.com/se/sv/catalog/categories/departments/living_room/10475/?se%7cps%7cnonbranded%7cvardagsrum%7cgoogle%7ctv_bord"'''
result = self.read_csv(StringIO(data), escapechar='\\',
quotechar='"', encoding='utf-8')
self.assertEqual(result['SEARCH_TERM'][2],
'SLAGBORD, "Bergslagen", IKEA:s 1700-tals serie')
self.assertTrue(np.array_equal(result.columns,
['SEARCH_TERM', 'ACTUAL_URL']))
def test_header_names_backward_compat(self):
# #2539
data = '1,2,3\n4,5,6'
result = self.read_csv(StringIO(data), names=['a', 'b', 'c'])
expected = self.read_csv(StringIO(data), names=['a', 'b', 'c'],
header=None)
tm.assert_frame_equal(result, expected)
data2 = 'foo,bar,baz\n' + data
result = self.read_csv(StringIO(data2), names=['a', 'b', 'c'],
header=0)
tm.assert_frame_equal(result, expected)
def test_int64_min_issues(self):
# #2599
data = 'A,B\n0,0\n0,'
result = self.read_csv(StringIO(data))
expected = DataFrame({'A': [0, 0], 'B': [0, np.nan]})
tm.assert_frame_equal(result, expected)
def test_parse_integers_above_fp_precision(self):
data = """Numbers
17007000002000191
17007000002000191
17007000002000191
17007000002000191
17007000002000192
17007000002000192
17007000002000192
17007000002000192
17007000002000192
17007000002000194"""
result = self.read_csv(StringIO(data))
expected = DataFrame({'Numbers': [17007000002000191,
17007000002000191,
17007000002000191,
17007000002000191,
17007000002000192,
17007000002000192,
17007000002000192,
17007000002000192,
17007000002000192,
17007000002000194]})
self.assertTrue(np.array_equal(result['Numbers'], expected['Numbers']))
def test_usecols_index_col_conflict(self):
# Issue 4201 Test that index_col as integer reflects usecols
data = """SecId,Time,Price,P2,P3
10000,2013-5-11,100,10,1
500,2013-5-12,101,11,1
"""
expected = DataFrame({'Price': [100, 101]}, index=[
datetime(2013, 5, 11), datetime(2013, 5, 12)])
expected.index.name = 'Time'
df = self.read_csv(StringIO(data), usecols=[
'Time', 'Price'], parse_dates=True, index_col=0)
tm.assert_frame_equal(expected, df)
df = self.read_csv(StringIO(data), usecols=[
'Time', 'Price'], parse_dates=True, index_col='Time')
tm.assert_frame_equal(expected, df)
df = self.read_csv(StringIO(data), usecols=[
1, 2], parse_dates=True, index_col='Time')
tm.assert_frame_equal(expected, df)
df = self.read_csv(StringIO(data), usecols=[
1, 2], parse_dates=True, index_col=0)
tm.assert_frame_equal(expected, df)
expected = DataFrame(
{'P3': [1, 1], 'Price': (100, 101), 'P2': (10, 11)})
expected = expected.set_index(['Price', 'P2'])
df = self.read_csv(StringIO(data), usecols=[
'Price', 'P2', 'P3'], parse_dates=True, index_col=['Price', 'P2'])
tm.assert_frame_equal(expected, df)
def test_chunks_have_consistent_numerical_type(self):
integers = [str(i) for i in range(499999)]
data = "a\n" + "\n".join(integers + ["1.0", "2.0"] + integers)
with tm.assert_produces_warning(False):
df = self.read_csv(StringIO(data))
# Assert that types were coerced.
self.assertTrue(type(df.a[0]) is np.float64)
self.assertEqual(df.a.dtype, np.float)
def test_warn_if_chunks_have_mismatched_type(self):
# See test in TestCParserLowMemory.
integers = [str(i) for i in range(499999)]
data = "a\n" + "\n".join(integers + ['a', 'b'] + integers)
with tm.assert_produces_warning(False):
df = self.read_csv(StringIO(data))
self.assertEqual(df.a.dtype, np.object)
def test_usecols(self):
data = """\
a,b,c
1,2,3
4,5,6
7,8,9
10,11,12"""
result = self.read_csv(StringIO(data), usecols=(1, 2))
result2 = self.read_csv(StringIO(data), usecols=('b', 'c'))
exp = self.read_csv(StringIO(data))
self.assertEqual(len(result.columns), 2)
self.assertTrue((result['b'] == exp['b']).all())
self.assertTrue((result['c'] == exp['c']).all())
tm.assert_frame_equal(result, result2)
result = self.read_csv(StringIO(data), usecols=[1, 2], header=0,
names=['foo', 'bar'])
expected = self.read_csv(StringIO(data), usecols=[1, 2])
expected.columns = ['foo', 'bar']
tm.assert_frame_equal(result, expected)
data = """\
1,2,3
4,5,6
7,8,9
10,11,12"""
result = self.read_csv(StringIO(data), names=['b', 'c'],
header=None, usecols=[1, 2])
expected = self.read_csv(StringIO(data), names=['a', 'b', 'c'],
header=None)
expected = expected[['b', 'c']]
tm.assert_frame_equal(result, expected)
result2 = self.read_csv(StringIO(data), names=['a', 'b', 'c'],
header=None, usecols=['b', 'c'])
tm.assert_frame_equal(result2, result)
# 5766
result = self.read_csv(StringIO(data), names=['a', 'b'],
header=None, usecols=[0, 1])
expected = self.read_csv(StringIO(data), names=['a', 'b', 'c'],
header=None)
expected = expected[['a', 'b']]
tm.assert_frame_equal(result, expected)
# length conflict, passed names and usecols disagree
self.assertRaises(ValueError, self.read_csv, StringIO(data),
names=['a', 'b'], usecols=[1], header=None)
def test_integer_overflow_bug(self):
# #2601
data = "65248E10 11\n55555E55 22\n"
result = self.read_csv(StringIO(data), header=None, sep=' ')
self.assertTrue(result[0].dtype == np.float64)
result = self.read_csv(StringIO(data), header=None, sep='\s+')
self.assertTrue(result[0].dtype == np.float64)
def test_catch_too_many_names(self):
# Issue 5156
data = """\
1,2,3
4,,6
7,8,9
10,11,12\n"""
tm.assertRaises(Exception, read_csv, StringIO(data),
header=0, names=['a', 'b', 'c', 'd'])
def test_ignore_leading_whitespace(self):
# GH 6607, GH 3374
data = ' a b c\n 1 2 3\n 4 5 6\n 7 8 9'
result = self.read_table(StringIO(data), sep='\s+')
expected = DataFrame({'a': [1, 4, 7], 'b': [2, 5, 8], 'c': [3, 6, 9]})
tm.assert_frame_equal(result, expected)
def test_nrows_and_chunksize_raises_notimplemented(self):
data = 'a b c'
self.assertRaises(NotImplementedError, self.read_csv, StringIO(data),
nrows=10, chunksize=5)
def test_single_char_leading_whitespace(self):
# GH 9710
data = """\
MyColumn
a
b
a
b\n"""
expected = DataFrame({'MyColumn': list('abab')})
result = self.read_csv(StringIO(data), skipinitialspace=True)
tm.assert_frame_equal(result, expected)
def test_chunk_begins_with_newline_whitespace(self):
# GH 10022
data = '\n hello\nworld\n'
result = self.read_csv(StringIO(data), header=None)
self.assertEqual(len(result), 2)
# GH 9735
chunk1 = 'a' * (1024 * 256 - 2) + '\na'
chunk2 = '\n a'
result = pd.read_csv(StringIO(chunk1 + chunk2), header=None)
expected = pd.DataFrame(['a' * (1024 * 256 - 2), 'a', ' a'])
tm.assert_frame_equal(result, expected)
def test_empty_with_index(self):
# GH 10184
data = 'x,y'
result = self.read_csv(StringIO(data), index_col=0)
expected = DataFrame([], columns=['y'], index=Index([], name='x'))
tm.assert_frame_equal(result, expected)
def test_emtpy_with_multiindex(self):
# GH 10467
data = 'x,y,z'
result = self.read_csv(StringIO(data), index_col=['x', 'y'])
expected = DataFrame([], columns=['z'],
index=MultiIndex.from_arrays([[]] * 2, names=['x', 'y']))
tm.assert_frame_equal(result, expected, check_index_type=False)
def test_empty_with_reversed_multiindex(self):
data = 'x,y,z'
result = self.read_csv(StringIO(data), index_col=[1, 0])
expected = DataFrame([], columns=['z'],
index=MultiIndex.from_arrays([[]] * 2, names=['y', 'x']))
tm.assert_frame_equal(result, expected, check_index_type=False)
def test_empty_index_col_scenarios(self):
data = 'x,y,z'
# None, no index
index_col, expected = None, DataFrame([], columns=list('xyz')),
tm.assert_frame_equal(self.read_csv(
StringIO(data), index_col=index_col), expected)
# False, no index
index_col, expected = False, DataFrame([], columns=list('xyz')),
tm.assert_frame_equal(self.read_csv(
StringIO(data), index_col=index_col), expected)
# int, first column
index_col, expected = 0, DataFrame(
[], columns=['y', 'z'], index=Index([], name='x'))
tm.assert_frame_equal(self.read_csv(
StringIO(data), index_col=index_col), expected)
# int, not first column
index_col, expected = 1, DataFrame(
[], columns=['x', 'z'], index=Index([], name='y'))
tm.assert_frame_equal(self.read_csv(
StringIO(data), index_col=index_col), expected)
# str, first column
index_col, expected = 'x', DataFrame(
[], columns=['y', 'z'], index=Index([], name='x'))
tm.assert_frame_equal(self.read_csv(
StringIO(data), index_col=index_col), expected)
# str, not the first column
index_col, expected = 'y', DataFrame(
[], columns=['x', 'z'], index=Index([], name='y'))
tm.assert_frame_equal(self.read_csv(
StringIO(data), index_col=index_col), expected)
# list of int
index_col, expected = [0, 1], DataFrame([], columns=['z'],
index=MultiIndex.from_arrays([[]] * 2, names=['x', 'y']))
tm.assert_frame_equal(self.read_csv(StringIO(data), index_col=index_col), expected,
check_index_type=False)
# list of str
index_col = ['x', 'y']
expected = DataFrame([], columns=['z'], index=MultiIndex.from_arrays(
[[]] * 2, names=['x', 'y']))
tm.assert_frame_equal(self.read_csv( | StringIO(data) | pandas.compat.StringIO |
import pandas as pd
import numpy as np
import matplotlib as mpl
#mpl.use('Agg')
import matplotlib.pyplot as plt
import os
import seaborn as sns
import matplotlib.dates as mdates
import sys
sys.path.append('../')
from processing_helpers import *
from load_paths import load_box_paths
mpl.rcParams['pdf.fonttype'] = 42
def get_pop_by_age(adjust_for_chicago=True):
"""Population by age per covid region"""
pop_df = pd.read_csv(os.path.join(datapath, "covid_IDPH/population/cc-est2019-agesex-17.csv"))
pop_df = pop_df[pop_df['YEAR']==12] # corresponds to 2019
pop_df['AGE16BELOW_TOT'] = pop_df['POPESTIMATE'] - pop_df['AGE16PLUS_TOT']
pop_df['AGE65BELOW_TOT'] = pop_df['POPESTIMATE'] - pop_df['AGE65PLUS_TOT']
pop_df['16-64'] = pop_df['AGE65BELOW_TOT'] - pop_df['AGE16BELOW_TOT']
pop_df['65+'] = pop_df['AGE65PLUS_TOT']
pop_df['county'] = pop_df['CTYNAME'].str.replace(' County','')
pop_df = pop_df[['county','POPESTIMATE','65+','16-64','MEDIAN_AGE_TOT','AGE16BELOW_TOT']]
if adjust_for_chicago:
chicago_pop = 2456274
chicago_perc_pop_below16 = 0.1758 # below 15 due to agebins..
chicago_perc_pop_16to65 = 0.70174 # estimated from https://www.chicagohealthatlas.org/indicators/total-population
chicago_perc_pop_above65 = 0.1224
chicago_pop_below16 = int(round(chicago_pop * chicago_perc_pop_below16,0))
chicago_pop_16to65 = int(round(chicago_pop * chicago_perc_pop_16to65,0))
chicago_pop_above65 = int(round(chicago_pop * chicago_perc_pop_above65,0))
pop_df[pop_df['county'] == 'Cook']
chicago_df = {'county': ['Chicago'], 'POPESTIMATE': [chicago_pop],
'65+': [chicago_pop_above65], '16-64': [chicago_pop_16to65],
'MEDIAN_AGE_TOT' : [-9],'AGE16BELOW_TOT':[chicago_pop_below16] }
chicago_df = pd.DataFrame(data=chicago_df)
cook_df = pop_df[pop_df['county'] == 'Cook']
cook_df['POPESTIMATE'] = cook_df['POPESTIMATE'] - chicago_pop
cook_df['65+'] = cook_df['65+'] - chicago_pop_above65
cook_df['16-64'] = cook_df['16-64'] - chicago_pop_16to65
cook_df['AGE16BELOW_TOT'] = cook_df['AGE16BELOW_TOT'] - chicago_pop_below16
cook_chicago_df = cook_df.append(chicago_df)
pop_df = pop_df[pop_df['county'] != 'Cook']
pop_df = pop_df.append(cook_chicago_df).reset_index()
### Chicgo (region 11 missing)
pop_df = merge_county_covidregions(pop_df, key_x='county', add_pop=False)
#pop_df.groupby(['covid_region'])[['MEDIAN_AGE_TOT']].agg([np.min, np.mean, np.max] ).reset_index()
pop_df = pop_df.groupby(['covid_region'])[['POPESTIMATE','AGE16BELOW_TOT', '65+', '16-64']].agg(np.nansum).reset_index()
pop_df_i = pd.melt(pop_df, id_vars=['covid_region'], value_vars=['65+', '16-64'])
pop_df_i.rename(columns={"variable": "agegrp"}, inplace=True)
pop_df_i.rename(columns={"value": "population"}, inplace=True)
pop_df['16-64'] = pop_df['16-64'] / pop_df['POPESTIMATE']
pop_df['65+'] = pop_df['65+'] / pop_df['POPESTIMATE']
pop_df_ii = pd.melt(pop_df, id_vars=['covid_region'], value_vars=['65+', '16-64'])
pop_df_ii.rename(columns={"variable": "agegrp"}, inplace=True)
pop_df_ii.rename(columns={"value": "pop_perc"}, inplace=True)
df = pd.merge(pop_df_i, pop_df_ii)
return df
def plot_by_age_region_time(df, channel,plot_title='',plot_name=None):
fig = plt.figure(figsize=(14, 8))
fig.subplots_adjust(right=0.97, left=0.05, hspace=0.5, wspace=0.3, top=0.91, bottom=0.08)
palette = ('#913058', "#F6851F", "#00A08A", "#D61B5A", "#5393C3", "#F1A31F", "#98B548", "#8971B3", "#969696")
# sns.color_palette('husl', len(channels))
fig.suptitle(x=0.5, y=0.98, t=plot_title, size=14)
for e, ems_num in enumerate(df['covid_region'].unique()):
plotsubtitle = f'COVID-19 Region {str(int(ems_num))}'
if ems_num == 0:
plotsubtitle = 'Illinois'
mdf = df[df['covid_region'] == ems_num]
ax = fig.add_subplot(3, 4, e + 1)
ax.grid(b=True, which='major', axis='y',color='#999999', linestyle='-', alpha=0.3)
ax.xaxis.set_major_formatter(mdates.DateFormatter('%b\n%d'))
ax.set_title(plotsubtitle)
for c, age in enumerate(df['agegrp'].unique()):
pdf = mdf[mdf['agegrp']==age]
ax.plot(pdf['date'], pdf[channel], color=palette[c], label=age)
ax.set_ylim(0, 1)
ax.legend()
if plot_name is None:
plot_name =f'{channel}_by_age_region'
plt.savefig(os.path.join(plot_path, f'{plot_name}_time.png'))
plt.savefig(os.path.join(plot_path, 'pdf', f'{plot_name}_time.pdf'), format='PDF')
def plot_by_age_region(df, channel,plot_title='',plot_name=None):
fig = plt.figure(figsize=(14, 8))
fig.subplots_adjust(right=0.97, left=0.05, hspace=0.5, wspace=0.3, top=0.91, bottom=0.08)
palette = ('#913058', "#F6851F", "#00A08A", "#D61B5A", "#5393C3", "#F1A31F", "#98B548", "#8971B3", "#969696")
# sns.color_palette('husl', len(channels))
fig.suptitle(x=0.5, y=0.98, t=plot_title, size=14)
for e, ems_num in enumerate(df['covid_region'].unique()):
plotsubtitle = f'COVID-19 Region {str(int(ems_num))}'
if ems_num == 0:
plotsubtitle = 'Illinois'
ax = fig.add_subplot(3, 4, e + 1)
ax.grid(b=True, which='major', axis='y',color='#999999', linestyle='-', alpha=0.3)
mdf = df[df['covid_region'] == ems_num]
ax.bar(mdf['agegrp'], mdf[channel], color=palette[0])
ax.set_title(plotsubtitle)
ax.set_ylim(0, 1)
ax.legend()
if plot_name is None:
plot_name =f'{channel}_by_age_region'
plt.savefig(os.path.join(plot_path, f'{plot_name}.png'))
plt.savefig(os.path.join(plot_path, 'pdf', f'{plot_name}.pdf'), format='PDF')
if __name__ == '__main__':
datapath, projectpath, wdir,exe_dir, git_dir = load_box_paths()
plot_path = os.path.join(projectpath, 'Plots + Graphs', 'vaccinations')
df = pd.read_csv(os.path.join(datapath, 'covid_IDPH','Corona virus reports','vaccinations.csv'))
df.columns
df['date'] = | pd.to_datetime(df['report_date']) | pandas.to_datetime |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.