prompt
stringlengths 19
1.03M
| completion
stringlengths 4
2.12k
| api
stringlengths 8
90
|
---|---|---|
import pytest
import numpy as np
import pandas as pd
from pandas import Categorical, Series, CategoricalIndex
from pandas.core.dtypes.concat import union_categoricals
from pandas.util import testing as tm
class TestUnionCategoricals(object):
def test_union_categorical(self):
# GH 13361
data = [
(list('abc'), list('abd'), list('abcabd')),
([0, 1, 2], [2, 3, 4], [0, 1, 2, 2, 3, 4]),
([0, 1.2, 2], [2, 3.4, 4], [0, 1.2, 2, 2, 3.4, 4]),
(['b', 'b', np.nan, 'a'], ['a', np.nan, 'c'],
['b', 'b', np.nan, 'a', 'a', np.nan, 'c']),
(pd.date_range('2014-01-01', '2014-01-05'),
pd.date_range('2014-01-06', '2014-01-07'),
pd.date_range('2014-01-01', '2014-01-07')),
(pd.date_range('2014-01-01', '2014-01-05', tz='US/Central'),
pd.date_range('2014-01-06', '2014-01-07', tz='US/Central'),
pd.date_range('2014-01-01', '2014-01-07', tz='US/Central')),
(pd.period_range('2014-01-01', '2014-01-05'),
| pd.period_range('2014-01-06', '2014-01-07') | pandas.period_range |
from math import radians, cos, sin, asin, sqrt
import dash
import dash_core_components as dcc
import dash_html_components as html
import plotly.express as px
import pandas as pd
import numpy as np
from datetime import date, timedelta
from pandas.tseries.offsets import DateOffset
from math import radians, cos, sin, asin, sqrt
import folium
import plotly.graph_objects as go
import json
SPD_data = pd.read_csv('sample_2018_2019.csv',delimiter = ',')
SPD_data.sort_values(by='Report DateTime', ascending = True, inplace = True)
SPD_data['coordinates'] = SPD_data[['Latitude', 'Longitude']].values.tolist()
SPD_data = SPD_data.iloc[:100000,:]
def crimes_in_radius_dates(coord, radius, start_date, end_date):
df = SPD_data
df['Report DateTime']=pd.to_datetime(df['Report DateTime']).dt.date
date_mask = (pd.to_datetime(df['Report DateTime']) >= start_date) & (pd.to_datetime(df['Report DateTime']) <= end_date)
dff = df[date_mask]
result = [point_in_radius(value[0],value[1],coord[0],coord[1],radius)
for value in dff['coordinates']]
return dff[result]
def point_in_radius(lat1, lon1, lat2, lon2, radius):
# """
# Calculate the great circle distance between two points
# on the earth (specified in decimal degrees)
# """
# convert decimal degrees to radians
lon1, lat1, lon2, lat2 = map(radians, [lon1, lat1, lon2, lat2])
# haversine formula
dlon = lon2 - lon1
dlat = lat2 - lat1
a = sin(dlat/2)**2 + cos(lat1) * cos(lat2) * sin(dlon/2)**2
c = 2 * asin(sqrt(a))
r = 3956 # Radius of earth in kilometers. Use 3956 for miles
if c*r<=int(radius):
return True
else:
return False
def address_to_coord(address_string):
result = address_string.replace(' ','+')
query = f'https://nominatim.openstreetmap.org/search?q={result}&format=geojson'
response = requests.get(f'https://nominatim.openstreetmap.org/search?q={query}&format=geojson')
return(response.json())
def crime_marker(coord,category,map):
colors = {'PROPERTY':'Blue','PERSON':'Red','SOCIETY':'#009933'}
feature_property = folium.FeatureGroup('PROPERTY')
feature_person = folium.FeatureGroup('PERSON')
feature_society = folium.FeatureGroup('SOCIETY')
group = {'PROPERTY':feature_property,'PERSON':feature_person,'SOCIETY':feature_society}
for x, y in zip(coord, category):
folium.CircleMarker(
location = x,
radius = 3,
popup = y,
color = colors[y],
fill = True,
fill_color = colors[y]
).add_to(group[y])
for key in group.keys():
group[key].add_to(map)
def crime_table(data,type, start, end):
df =data[data['Crime Against Category'] == type].sort_values('Report DateTime', ascending = True)
#df['date']=pd.to_datetime(df['Report DateTime']).dt.date
date_mask = (pd.to_datetime(df['Report DateTime']) >= start) & (pd.to_datetime(df['Report DateTime']) <= end)
return df[date_mask].groupby('Offense').count()['Report Number'].sort_values(ascending = False).reset_index()
def crime_trend_data(data,type, end_date):
df =data[data['Crime Against Category'] == type].sort_values('Report DateTime', ascending = True)
date_mask = (pd.to_datetime(df['Report DateTime']) <= end_date) & (pd.to_datetime(df['Report DateTime']) >= pd.to_datetime(end_date)-timedelta(days=180)) #selects only rows with certain timeframe
df = df[date_mask]
offense_names = df['Offense'].unique()
dff = pd.DataFrame()
fig = go.Figure()
for o_type in offense_names:
df_off = df[df['Offense'] == o_type]
df_off['Report DateTime'] = pd.to_datetime(df_off['Report DateTime'])
df_off = df_off.resample('M', on='Report DateTime').count()['Report Number'].reset_index()
fig.add_trace(go.Scatter(x =df_off['Report DateTime'], y = df_off['Report Number'], mode='lines+markers', name = o_type))
fig.update_layout(legend = dict(
yanchor = "top",
y = -0.5,
xanchor= "left",
x = 0.0
))
return fig
def slider_marks(marks,start_date):
maxmarks=marks
m1date=start_date
datelist= | pd.date_range(m1date, periods=maxmarks, freq='M') | pandas.date_range |
"""Unit tests for functions in src/util.py"""
import numpy as np
import pandas as pd
import pytest
from pandas.testing import assert_frame_equal, assert_series_equal
from src.util import (
StanInput,
make_columns_lower_case,
one_encode,
stanify_dict,
)
@pytest.mark.parametrize(
"s_in,expected",
[
(
pd.Series(["8", 1, "????"], index=["a", "b", "c"]),
pd.Series([1, 2, 3], index=["a", "b", "c"]),
),
(
| pd.Series([1, "????", "????"], index=["a", "b", "c"]) | pandas.Series |
import turtle
import pandas as pd
screen = turtle.Screen()
screen.title("U.S. States Game")
image = "us_states_map.gif"
ALIGNMENT = 'center'
FONT = ("Courier-Bold", 15, "normal")
screen.addshape(image)
turtle.shape(image)
guessed_states = []
states_data = pd.read_csv("us_states.csv")
all_states = states_data.state.to_list()
while len(guessed_states) < 50:
answer = screen.textinput(title = f"{len(guessed_states)}/50 State correct",
prompt = "Guess a state's name").title()
if answer == "Exit":
missing_states = [state for state in all_states if state not in guessed_states]
# for state in all_states:
# if state not in guessed_states:
# missing_states.append(state)
missing_states_df = | pd.DataFrame(missing_states) | pandas.DataFrame |
from itertools import chain
import operator
import numpy as np
import pytest
from pandas.core.dtypes.common import is_number
from pandas import (
DataFrame,
Index,
Series,
)
import pandas._testing as tm
from pandas.core.groupby.base import maybe_normalize_deprecated_kernels
from pandas.tests.apply.common import (
frame_transform_kernels,
series_transform_kernels,
)
@pytest.mark.parametrize("func", ["sum", "mean", "min", "max", "std"])
@pytest.mark.parametrize(
"args,kwds",
[
pytest.param([], {}, id="no_args_or_kwds"),
pytest.param([1], {}, id="axis_from_args"),
pytest.param([], {"axis": 1}, id="axis_from_kwds"),
pytest.param([], {"numeric_only": True}, id="optional_kwds"),
pytest.param([1, True], {"numeric_only": True}, id="args_and_kwds"),
],
)
@pytest.mark.parametrize("how", ["agg", "apply"])
def test_apply_with_string_funcs(request, float_frame, func, args, kwds, how):
if len(args) > 1 and how == "agg":
request.node.add_marker(
pytest.mark.xfail(
raises=TypeError,
reason="agg/apply signature mismatch - agg passes 2nd "
"argument to func",
)
)
result = getattr(float_frame, how)(func, *args, **kwds)
expected = getattr(float_frame, func)(*args, **kwds)
tm.assert_series_equal(result, expected)
def test_with_string_args(datetime_series):
for arg in ["sum", "mean", "min", "max", "std"]:
result = datetime_series.apply(arg)
expected = getattr(datetime_series, arg)()
assert result == expected
@pytest.mark.parametrize("op", ["mean", "median", "std", "var"])
@pytest.mark.parametrize("how", ["agg", "apply"])
def test_apply_np_reducer(float_frame, op, how):
# GH 39116
float_frame = DataFrame({"a": [1, 2], "b": [3, 4]})
result = getattr(float_frame, how)(op)
# pandas ddof defaults to 1, numpy to 0
kwargs = {"ddof": 1} if op in ("std", "var") else {}
expected = Series(
getattr(np, op)(float_frame, axis=0, **kwargs), index=float_frame.columns
)
tm.assert_series_equal(result, expected)
@pytest.mark.parametrize(
"op", ["abs", "ceil", "cos", "cumsum", "exp", "log", "sqrt", "square"]
)
@pytest.mark.parametrize("how", ["transform", "apply"])
def test_apply_np_transformer(float_frame, op, how):
# GH 39116
# float_frame will _usually_ have negative values, which will
# trigger the warning here, but let's put one in just to be sure
float_frame.iloc[0, 0] = -1.0
warn = None
if op in ["log", "sqrt"]:
warn = RuntimeWarning
with tm.assert_produces_warning(warn):
result = getattr(float_frame, how)(op)
expected = getattr(np, op)(float_frame)
tm.assert_frame_equal(result, expected)
@pytest.mark.parametrize(
"series, func, expected",
chain(
tm.get_cython_table_params(
Series(dtype=np.float64),
[
("sum", 0),
("max", np.nan),
("min", np.nan),
("all", True),
("any", False),
("mean", np.nan),
("prod", 1),
("std", np.nan),
("var", np.nan),
("median", np.nan),
],
),
tm.get_cython_table_params(
Series([np.nan, 1, 2, 3]),
[
("sum", 6),
("max", 3),
("min", 1),
("all", True),
("any", True),
("mean", 2),
("prod", 6),
("std", 1),
("var", 1),
("median", 2),
],
),
tm.get_cython_table_params(
Series("a b c".split()),
[
("sum", "abc"),
("max", "c"),
("min", "a"),
("all", True),
("any", True),
],
),
),
)
def test_agg_cython_table_series(series, func, expected):
# GH21224
# test reducing functions in
# pandas.core.base.SelectionMixin._cython_table
result = series.agg(func)
if is_number(expected):
assert np.isclose(result, expected, equal_nan=True)
else:
assert result == expected
@pytest.mark.parametrize(
"series, func, expected",
chain(
tm.get_cython_table_params(
Series(dtype=np.float64),
[
("cumprod", Series([], Index([]), dtype=np.float64)),
("cumsum", Series([], Index([]), dtype=np.float64)),
],
),
tm.get_cython_table_params(
Series([np.nan, 1, 2, 3]),
[
("cumprod", Series([np.nan, 1, 2, 6])),
("cumsum", Series([np.nan, 1, 3, 6])),
],
),
tm.get_cython_table_params(
Series("a b c".split()), [("cumsum", Series(["a", "ab", "abc"]))]
),
),
)
def test_agg_cython_table_transform_series(series, func, expected):
# GH21224
# test transforming functions in
# pandas.core.base.SelectionMixin._cython_table (cumprod, cumsum)
result = series.agg(func)
tm.assert_series_equal(result, expected)
@pytest.mark.parametrize(
"df, func, expected",
chain(
tm.get_cython_table_params(
DataFrame(),
[
("sum", Series(dtype="float64")),
("max", Series(dtype="float64")),
("min", Series(dtype="float64")),
("all", Series(dtype=bool)),
("any", Series(dtype=bool)),
("mean", Series(dtype="float64")),
("prod", Series(dtype="float64")),
("std", Series(dtype="float64")),
("var", Series(dtype="float64")),
("median", Series(dtype="float64")),
],
),
tm.get_cython_table_params(
DataFrame([[np.nan, 1], [1, 2]]),
[
("sum", Series([1.0, 3])),
("max", Series([1.0, 2])),
("min", Series([1.0, 1])),
("all", Series([True, True])),
("any", Series([True, True])),
("mean", Series([1, 1.5])),
("prod", Series([1.0, 2])),
("std", Series([np.nan, 0.707107])),
("var", Series([np.nan, 0.5])),
("median", Series([1, 1.5])),
],
),
),
)
def test_agg_cython_table_frame(df, func, expected, axis):
# GH 21224
# test reducing functions in
# pandas.core.base.SelectionMixin._cython_table
result = df.agg(func, axis=axis)
tm.assert_series_equal(result, expected)
@pytest.mark.parametrize(
"df, func, expected",
chain(
tm.get_cython_table_params(
DataFrame(), [("cumprod", DataFrame()), ("cumsum", DataFrame())]
),
tm.get_cython_table_params(
DataFrame([[np.nan, 1], [1, 2]]),
[
("cumprod", DataFrame([[np.nan, 1], [1, 2]])),
("cumsum", | DataFrame([[np.nan, 1], [1, 3]]) | pandas.DataFrame |
import pandas as pd
from .datastore import merge_postcodes
from .types import ErrorDefinition
from .utils import add_col_to_tables_CONTINUOUSLY_LOOKED_AFTER as add_CLA_column # Check 'Episodes' present before use!
def validate_165():
error = ErrorDefinition(
code = '165',
description = 'Data entry for mother status is invalid.',
affected_fields = ['MOTHER', 'SEX', 'ACTIV', 'ACCOM', 'IN_TOUCH', 'DECOM']
)
def _validate(dfs):
if 'Header' not in dfs or 'Episodes' not in dfs or 'OC3' not in dfs:
return {}
else:
header = dfs['Header']
episodes = dfs['Episodes']
oc3 = dfs['OC3']
collection_start = dfs['metadata']['collection_start']
collection_end = dfs['metadata']['collection_end']
valid_values = ['0','1']
# prepare to merge
oc3.reset_index(inplace=True)
header.reset_index(inplace=True)
episodes.reset_index(inplace=True)
collection_start = pd.to_datetime(collection_start, format='%d/%m/%Y', errors='coerce')
collection_end = pd.to_datetime(collection_end, format='%d/%m/%Y', errors='coerce')
episodes['DECOM'] = pd.to_datetime(episodes['DECOM'], format='%d/%m/%Y', errors='coerce')
episodes['EPS'] = (episodes['DECOM']>=collection_start) & (episodes['DECOM']<=collection_end)
episodes['EPS_COUNT'] = episodes.groupby('CHILD')['EPS'].transform('sum')
merged = episodes.merge(header, on='CHILD', how='left', suffixes=['_eps', '_er']).merge(oc3, on='CHILD', how='left')
# Raise error if provided <MOTHER> is not a valid value.
value_validity = merged['MOTHER'].notna() & (~merged['MOTHER'].isin(valid_values))
# If not provided
female = (merged['SEX']=='1')
eps_in_year = (merged['EPS_COUNT']>0)
none_provided = (merged['ACTIV'].isna()& merged['ACCOM'].isna()& merged['IN_TOUCH'].isna())
# If provided <MOTHER> must be a valid value. If not provided <MOTHER> then either <GENDER> is male or no episode record for current year and any of <IN_TOUCH>, <ACTIV> or <ACCOM> have been provided
mask = value_validity | (merged['MOTHER'].isna() & (female & (eps_in_year | none_provided)))
# That is, if value not provided and child is a female with eps in current year or no values of IN_TOUCH, ACTIV and ACCOM, then raise error.
error_locs_eps = merged.loc[mask, 'index_eps']
error_locs_header = merged.loc[mask, 'index_er']
error_locs_oc3 = merged.loc[mask, 'index']
return {'Header':error_locs_header.dropna().unique().tolist(),
'OC3':error_locs_oc3.dropna().unique().tolist()}
return error, _validate
def validate_1014():
error = ErrorDefinition(
code='1014',
description='UASC information is not required for care leavers',
affected_fields=['ACTIV', 'ACCOM', 'IN_TOUCH', 'DECOM']
)
def _validate(dfs):
if 'UASC' not in dfs or 'Episodes' not in dfs or 'OC3' not in dfs:
return {}
else:
uasc = dfs['UASC']
episodes = dfs['Episodes']
oc3 = dfs['OC3']
collection_start = dfs['metadata']['collection_start']
collection_end = dfs['metadata']['collection_end']
# prepare to merge
oc3.reset_index(inplace=True)
uasc.reset_index(inplace=True)
episodes.reset_index(inplace=True)
collection_start = pd.to_datetime(collection_start, format='%d/%m/%Y', errors='coerce')
collection_end = pd.to_datetime(collection_end, format='%d/%m/%Y', errors='coerce')
episodes['DECOM'] = pd.to_datetime(episodes['DECOM'], format='%d/%m/%Y', errors='coerce')
episodes['DEC'] = pd.to_datetime(episodes['DEC'], format='%d/%m/%Y', errors='coerce')
date_check = (
((episodes['DECOM'] >= collection_start) & (episodes['DECOM'] <= collection_end))
| ((episodes['DEC'] >= collection_start) & (episodes['DEC'] <= collection_end))
| ((episodes['DECOM'] <= collection_start) & episodes['DEC'].isna())
)
episodes['EPS'] = date_check
episodes['EPS_COUNT'] = episodes.groupby('CHILD')['EPS'].transform('sum')
# inner merge to take only episodes of children which are also found on the uasc table
merged = episodes.merge(uasc, on='CHILD', how='inner', suffixes=['_eps', '_sc']).merge(oc3, on='CHILD',
how='left')
# adding suffixes with the secondary merge here does not go so well yet.
some_provided = (merged['ACTIV'].notna() | merged['ACCOM'].notna() | merged['IN_TOUCH'].notna())
mask = (merged['EPS_COUNT'] == 0) & some_provided
error_locs_uasc = merged.loc[mask, 'index_sc']
error_locs_oc3 = merged.loc[mask, 'index']
return {'UASC': error_locs_uasc.unique().tolist(), 'OC3': error_locs_oc3.unique().tolist()}
return error, _validate
# !# not sure what this rule is actually supposed to be getting at - description is confusing
def validate_197B():
error = ErrorDefinition(
code='197B',
description="SDQ score or reason for no SDQ should be reported for 4- or 17-year-olds.",
affected_fields=['SDQ_REASON', 'DOB'],
)
def _validate(dfs):
if 'OC2' not in dfs or 'Episodes' not in dfs:
return {}
oc2 = add_CLA_column(dfs, 'OC2')
start = | pd.to_datetime(dfs['metadata']['collection_start'], format='%d/%m/%Y', errors='coerce') | pandas.to_datetime |
# -*- coding: utf-8 -*-
"""
Created on Mon Feb 11 11:30:17 2019
@author: max
"""
#!/usr/bin/env python3
import os
import sys
#import seaborn as sns
import numpy as np
import pandas as pd
import scipy.stats as stats
#import matplotlib.pyplot as plt
import argparse
import plotly
#import plotly.plotly as py
import plotly.graph_objs as go
#init_notebook_mode(connected=True)
#import statsmodels.api as sm
#from xattr import xattr
import time
#import subprocess
from plotly import __version__
#from plotly.offline import download_plotlyjs, init_notebook_mode, plot, iplot
print(__version__) # requires version >= 1.9.0
#from statsmodels.stats.multicomp import pairwise_tukeyhsd
from statsmodels.stats.multicomp import MultiComparison
from statsmodels.formula.api import ols
sys.path.append(os.path.realpath(__file__))
import load_data as exp
import statistics
#from load_data import KnockdownFeatures_class as kd
'''
This script is taking a list of folders and a list of knockdowns as the input.
These will be used as input to the load_data.py module.
This script is containing several statistical functions to analyze the data object
created by load_data.py.
It can be run from the command line and is giving the options to run a bonferroni corrected|
t-test to compare the knockdowns with the respective control and print figures for all features
with significance annotations. Figures can either be based on raw data, or on the z_score
In addition it gives the option to print .csv files of median feature values to be fed
into the PCA analysis app.
Dependencies:
KnockdownFeatures_class.py
load_data.py
'''
#%%
#add the paths to the experiment folders
# =============================================================================
# path=['/Users/max/Desktop/Office/test/data_test/SiRNA_31/segmented/']
# #add the knockdowns you want to load
# knockdowns=['CTRL', 'ARHGAP17', 'DOCK10', 'ITSN1']
# =============================================================================
def parseArguments():
# Define the parser and read arguments
parser = argparse.ArgumentParser(description='a function including various statistical tools to be applied to the data objects.')
parser.add_argument('-d','--dir', nargs='+', help='add the directories with spaces between them', required=True)
parser.add_argument('-k','--kd', nargs='+', help='add the knockdown folder names with spaces between them', required=True)
parser.add_argument('-t','--TSNE', help='set True for TSNE output, \'z_score\' for TSNE output as z_score \
\'long\' for long format csv with z_score values. leave empty to skip this output', required=False)
parser.add_argument('-f','--figures', help='set True for figure printing of raw data, z_score for figure printing of z_scores.set to featureplot for featureplots leave empty to skip this output ', required=False)
args = parser.parse_args()
return(args)
#%%
# =============================================================================
# def boxplot(feature, value):
# #makes a boxplot of the values from one feature, grouped by knockdown
# ax=sns.catplot(x='experiment', y=value, hue='KD',\
# data=data.grouped_features[feature], kind='box')
# sig, alpha=calc_Bonferroni(feature)
# plot_median=data.grouped_features[feature].groupby(['experiment', 'KD'])[value].median()
# nobs=[sig[x][1] for x in sig]
# axes = ax.axes.flatten()
# axes[0].set_xlabel(feature)
# pos=range(len(nobs))
# sns.FacetGrid.set_xticklabels(ax, nobs)
# plt.show()
# # ax=ax.get_figure()
# plt.close()
# return ax
# =============================================================================
#axes[1].set_title("External")
def createFolder(directory):
try:
if not os.path.exists(directory):
os.makedirs(directory)
except OSError:
print ('Error: Creating directory. ' + directory)
def median(a, l, r):
n = r - l + 1
n = (n + 1) // 2 - 1
return n + l
def featureplot(KD, value):
#to_tag=False
# =============================================================================
# DEFAULT_PLOTLY_COLORS=['rgb(31, 119, 180)', 'rgb(255, 127, 14)',
# 'rgb(44, 160, 44)', 'rgb(214, 39, 40)',
# 'rgb(148, 103, 189)', 'rgb(140, 86, 75)',
# 'rgb(227, 119, 194)', 'rgb(127, 127, 127)',
# 'rgb(188, 189, 34)', 'rgb(23, 190, 207)']
# =============================================================================
#if wide feature attribute isnt already existing in data it is calling
#pca_feature_data and pca_attribute_data()
if hasattr(data, 'wide_feature')==False:
data.pca_feature_data(value=value)
data.pca_attribute_data()
#creates a variable of data to plot
if hasattr(data, 'KD_plot_data')==False:
KD_plot_data=data.wide_feature
#adds the column for the knockdowns from the attribute_data
KD_plot_data['KD']=data.wide_attribute['knockdown']
#melts it to long format
KD_plot_data=pd.melt(KD_plot_data, id_vars='KD')
data.KD_plot_data=KD_plot_data.rename(columns={'variable':'feature', 'value':value})
to_plot=data.KD_plot_data[(data.KD_plot_data['KD']==KD)]
# =============================================================================
# z_score_mask=(data.grouped_features[feature]['KD']!='CTRL')
# #excluding the control from plots showing the z_score
# if value == 'z_score':
# x_data=list(data.grouped_features[feature][z_score_mask].groupby(['experiment', 'KD']).groups.keys())
# y_index=data.grouped_features[feature][z_score_mask].groupby(['experiment', 'KD'])[value]
# else:
# =============================================================================
#gets the keys to the groups for indexing
x_data=list(to_plot.groupby(['feature']).groups.keys())
#gets a group object the groups are referring to
y_index=to_plot.groupby(['feature'])[value]
#y_data=data.grouped_features[feature].iloc[list(y_index.groups[x_data[0]])]['value']
#y_data=data.grouped_features[feature].groupby(['experiment', 'KD']).groups[x_data[1]]
traces=[]
#Q3=[]
#rescale_values=[]
#lower_rescale_values=[]
#colour_dict={}
# =============================================================================
# for enum, kd in enumerate(data.knockdowns):
# if enum >= len(DEFAULT_PLOTLY_COLORS):
# enum=0
# #making a colour dictionary, to give each box its own colour based on the knockdown group
# if kd not in colour_dict.keys():
# colour_dict.update({kd:DEFAULT_PLOTLY_COLORS[enum]})
# =============================================================================
#sig, alpha=calc_Bonferroni(feature)
#https://stackoverflow.com/questions/26536899/how-do-you-add-labels-to-a-plotly-boxplot-in-python
for enum, xd in enumerate(x_data):
#rescale_values.append(to_plot.loc[list(y_index.groups[xd])][value].std()+to_plot.loc[list(y_index.groups[xd])][value].median())
#lower_rescale_values.append(-1*(to_plot.loc[list(y_index.groups[xd])][value].std())-to_plot.loc[list(y_index.groups[xd])][value].median())
#Q3.append(IQR(list(data.grouped_features[feature].iloc[list(y_index.groups[xd])][value]), len(data.grouped_features[feature].iloc[list(y_index.groups[xd])][value])))
traces.append(go.Box(
#list(y_index.groups[xd]) applies the index of one group to the grouped dataframe to obtain
# a list of indices for that group. This list of indeces is used to index the dataframe, and obtain
#the value column of it.
y=to_plot.loc[list(y_index.groups[xd])][value],
name=str(xd),
#adds the points for each value next to the box
boxpoints=False,
#boxpoint='all',
jitter=0.5,
whiskerwidth=0.2,
marker=dict(
size=2,
#color=colour_dict[xd[1]]
),
line=dict(width=1),
))
# =============================================================================
# if value=='z_score':
# lower_limit=3*statistics.median(lower_rescale_values)
# else:
# lower_limit=0
# upper_limit=4*statistics.median(rescale_values)
# =============================================================================
layout = go.Layout(
boxgap=0,
boxgroupgap=0,
title=KD,
autosize=True,
yaxis=dict(
#autorange=True,
showgrid=True,
zeroline=True,
dtick=5,
gridcolor='rgb(0, 0, 0)',
gridwidth=1,
zerolinecolor='rgb(0, 0, 0)',
zerolinewidth=2,
range=[-5, 5]
# automargin=True,
),
# =============================================================================
# margin=dict(
# l=40,
# r=30,
# b=80,
# t=3*max(Q3),
# ),
# =============================================================================
paper_bgcolor='rgb(243, 243, 243)',
plot_bgcolor='rgb(243, 243, 243)',
showlegend=False
)
fig = go.Figure(data=traces, layout=layout)
#counts the number of observations for each group
# =============================================================================
# count=to_plot.groupby(['feature'])[value].count()
# for enum, xd in enumerate(x_data):
# sig_index=xd[0]+xd[1]
# #gets the number of observations for the current box
# n=str(count[xd])
#
#
# #getting the title for each column the following way:
# #getting the sig_index by concatonating the two strings of xd
# #and using this as the key for the bonferrony corrected t_test
# #to obtain the second value, which is the p value
# try:
# p=round(sig[sig_index][1], 4)
# #adds a star if the p value is significant
# if p < alpha:
# p=str(p)
# p=p+'*'
# #marks the plot as being significant
# to_tag=True
# p=str(p)
# #exception, if no p value exists (i.e. for control)
# except:
# p=''
#
# fig['layout']['annotations']+=tuple([dict(
# #positions on x axis based on current box
# x=enum,
# #positions text based on y axis based on the median of current box
# y=to_plot.groupby(['feature'])[value].median(),
# yref='y',
# xref='x',
# text='p: {}<br>n: {}'.format('NA', n),
# showarrow=True,
# #determines the length of the arrow for the annotation text
# arrowhead=0,
# ax=0,
# ay=-10
# )])
# =============================================================================
# =============================================================================
# if to_tag==True:
# #saves the plot in a different folder, if one or more groups show significance
# sig_folder=os.path.join(path[0], 'significant')
# createFolder(sig_folder)
# file='{}/{}.html'.format(sig_folder,KD)
# else:
# =============================================================================
file='{}{}.html'.format(path[0],KD)
plotly.offline.plot(fig, filename = file, image='svg', auto_open=False)
return fig
def loop_featureplot(value):
'''
creates a graph for each knockdown
'''
for k in data.knockdowns:
featureplot(k, value)
time.sleep(1)
def pyplot(feature, value):
to_tag=False
DEFAULT_PLOTLY_COLORS=['rgb(31, 119, 180)', 'rgb(255, 127, 14)',
'rgb(44, 160, 44)', 'rgb(214, 39, 40)',
'rgb(148, 103, 189)', 'rgb(140, 86, 75)',
'rgb(227, 119, 194)', 'rgb(127, 127, 127)',
'rgb(188, 189, 34)', 'rgb(23, 190, 207)']
z_score_mask=(data.grouped_features[feature]['KD']!='CTRL')
#excluding the control from plots showing the z_score
if value == 'z_score':
x_data=list(data.grouped_features[feature][z_score_mask].groupby(['experiment', 'KD']).groups.keys())
y_index=data.grouped_features[feature][z_score_mask].groupby(['experiment', 'KD'])[value]
else:
#gets the keys to the groups for indexing
x_data=list(data.grouped_features[feature].groupby(['experiment', 'KD']).groups.keys())
#gets a group object the groups are referring to
y_index=data.grouped_features[feature].groupby(['experiment', 'KD'])[value]
#y_data=data.grouped_features[feature].iloc[list(y_index.groups[x_data[0]])]['value']
#y_data=data.grouped_features[feature].groupby(['experiment', 'KD']).groups[x_data[1]]
traces=[]
#Q3=[]
rescale_values=[]
lower_rescale_values=[]
colour_dict={}
for enum, kd in enumerate(knockdowns):
if enum >= len(DEFAULT_PLOTLY_COLORS):
enum=0
#making a colour dictionary, to give each box its own colour based on the knockdown group
if kd not in colour_dict.keys():
colour_dict.update({kd:DEFAULT_PLOTLY_COLORS[enum]})
sig, alpha=calc_Bonferroni(feature)
#https://stackoverflow.com/questions/26536899/how-do-you-add-labels-to-a-plotly-boxplot-in-python
for enum, xd in enumerate(x_data):
rescale_values.append(data.grouped_features[feature].iloc[list(y_index.groups[xd])][value].std()+data.grouped_features[feature].iloc[list(y_index.groups[xd])][value].median())
lower_rescale_values.append(-1*(data.grouped_features[feature].iloc[list(y_index.groups[xd])][value].std())-data.grouped_features[feature].iloc[list(y_index.groups[xd])][value].median())
#Q3.append(IQR(list(data.grouped_features[feature].iloc[list(y_index.groups[xd])][value]), len(data.grouped_features[feature].iloc[list(y_index.groups[xd])][value])))
traces.append(go.Box(
#list(y_index.groups[xd]) applies the index of one group to the grouped dataframe to obtain
# a list of indices for that group. This list of indeces is used to index the dataframe, and obtain
#the value column of it.
y=data.grouped_features[feature].loc[list(y_index.groups[xd])][value],
name=str(xd),
#adds the points for each value next to the box
boxpoints='all',
#boxpoint='all',
jitter=0.5,
whiskerwidth=0.2,
marker=dict(
size=2,
color=colour_dict[xd[1]]
),
line=dict(width=1),
))
if value=='z_score':
lower_limit=-8
upper_limit=8
else:
lower_limit=0
upper_limit=4*statistics.median(rescale_values)
layout = go.Layout(
title=feature,
autosize=True,
yaxis=dict(
#autorange=True,
showgrid=True,
zeroline=True,
dtick=5,
gridcolor='rgb(255, 255, 255)',
gridwidth=1,
zerolinecolor='rgb(255, 255, 255)',
zerolinewidth=2,
range=[lower_limit, upper_limit]
# automargin=True,
),
# =============================================================================
# margin=dict(
# l=40,
# r=30,
# b=80,
# t=3*max(Q3),
# ),
# =============================================================================
paper_bgcolor='rgb(243, 243, 243)',
plot_bgcolor='rgb(243, 243, 243)',
showlegend=True
)
fig = go.Figure(data=traces, layout=layout)
#counts the number of observations for each group
count=data.grouped_features[feature].groupby(['experiment', 'KD'])[value].count()
for enum, xd in enumerate(x_data):
sig_index=xd[0]+xd[1]
#gets the number of observations for the current box
n=str(count[xd])
#getting the title for each column the following way:
#getting the sig_index by concatonating the two strings of xd
#and using this as the key for the bonferrony corrected t_test
#to obtain the second value, which is the p value
try:
p=round(sig[sig_index][1], 4)
#adds a star if the p value is significant
if p < alpha:
p=str(p)
p=p+'*'
#marks the plot as being significant
to_tag=True
p=str(p)
#exception, if no p value exists (i.e. for control)
except:
p=''
fig['layout']['annotations']+=tuple([dict(
#positions on x axis based on current box
x=enum,
#positions text based on y axis based on the median of current box
y=data.grouped_features[feature].iloc[list(y_index.groups[xd])][value].median(),
yref='y',
xref='x',
text='p: {}<br>n: {}'.format(p, n),
showarrow=True,
#determines the length of the arrow for the annotation text
arrowhead=0,
ax=0,
ay=-10
)])
if to_tag==True:
#saves the plot in a different folder, if one or more groups show significance
sig_folder=os.path.join(path[0], 'significant')
createFolder(sig_folder)
file='{}/{}.html'.format(sig_folder,feature)
else:
file='{}{}.html'.format(path[0],feature)
plotly.offline.plot(fig, filename = file, image='svg', auto_open=False)
return fig, x_data
def loop_graph(function, value):
'''
creates a graph for each feature
'''
for f in data.features:
function(f, value)
time.sleep(1)
#%%
#data.grouped_features[feature].boxplot('z_score', by='KD', figsize=(12, 8))
#either computes the MAD (robust==True),
#or the standard deviation(robust==False)
def MAD_robust(x, robust=True):
if robust==True:
med=np.median(x)
dif=[np.abs(i-med) for i in x]
return np.median(dif)
else:
return np.std(x)
#either computes the median (robust==True), or the mean (robust==False)
def Mean_robust(x, robust=True):
if robust==True:
return np.median(x)
else:
return np.mean(x)
def calc_mean_features():
'''
calculates the mean values of each feature grouped by timepoint and by experiment
excluding the ctrl
'''
mean_features=[]
for f in data.features:
temp=pd.DataFrame()
temp=data.grouped_features[f][data.grouped_features[f]['KD']!='CTRL'].groupby(['timepoint', 'experiment', 'KD'], as_index=False).agg({'value':[MAD_robust, Mean_robust]})
temp['feature']=f
mean_features.append(temp)
mean_features = pd.concat(mean_features, axis=0, sort=True)
mean_features.columns = ["_".join(x) for x in mean_features.columns.ravel()]
mean_features=mean_features.reset_index(drop=True)
return mean_features
def calc_mean_ctrl(all_features=False):
'''
calculates the mean values of each feature grouped by timepoint and by experiment
only for the ctrl
'''
#all features==False used for standard z_score. Only calculates the mean and standard deviation for the control
if all_features==False:
mean_ctrl=[]
for f in data.features:
temp=pd.DataFrame()
temp=data.grouped_features[f][data.grouped_features[f]['KD']=='CTRL'].groupby(['experiment'], as_index=False).agg({'value':[MAD_robust, Mean_robust]})
#temp=data.grouped_features[f][data.grouped_features[f]['KD']=='CTRL'].groupby(['timepoint', 'experiment'], as_index=False).agg({'value':[MAD_robust, Mean_robust]})
temp['feature']=f
mean_ctrl.append(temp)
mean_ctrl = pd.concat(mean_ctrl, axis=0, sort=True)
mean_ctrl.columns = ["_".join(x) for x in mean_ctrl.columns.ravel()]
mean_ctrl=mean_ctrl.reset_index(drop=True)
#if all features==True used for internal z_score, computes the mean and standard deviation
#for all knockdowns
if all_features==True:
mean_ctrl=[]
for k in data.knockdowns:
for f in data.features:
temp= | pd.DataFrame() | pandas.DataFrame |
"""Get output of fits into a format for UNITY"""
import os
import copy
import click
import pickle
import sncosmo
import numpy as np
import pandas as pd
from collections import defaultdict
from astropy import units as u
from astropy.io import ascii
from astropy.coordinates import SkyCoord
from scipy.linalg import block_diag
def calc_mbstar(model, coefs, z):
"""Calculates $m_B^*$ for a supernova model with the given coefficients
Args:
model (sncosmo.Model): sncosmo Model object to use in calculation
coefs (np.array): array of model coefficients
z (float): redshift of object
Returns:
mbstart (float): AB magnitude in the Bessell B-band for a supernova with
the given model coefficients
"""
model = copy.copy(model)
model.set(**dict(zip(model.source.param_names, coefs)))
model.set(z=0)
model.set(t0=0)
mag = model.bandmag(band='bessellb', time=0, magsys='ab')
return mag
def radectoxyz(RAdeg, DECdeg):
x = np.cos(DECdeg/(180./np.pi))*np.cos(RAdeg/(180./np.pi))
y = np.cos(DECdeg/(180./np.pi))*np.sin(RAdeg/(180./np.pi))
z = np.sin(DECdeg/(180./np.pi))
return np.array([x, y, z], dtype=np.float64)
def get_dz(RAdeg, DECdeg):
dzCMB = 371.e3/299792458. # NED
#http://arxiv.org/pdf/astro-ph/9609034
#CMBcoordsRA = 167.98750000 # J2000 Lineweaver
#CMBcoordsDEC = -7.22000000
CMBcoordsRA = 168.01190437 # NED
CMBcoordsDEC = -6.98296811
CMBxyz = radectoxyz(CMBcoordsRA, CMBcoordsDEC)
inputxyz = radectoxyz(RAdeg, DECdeg)
dz = dzCMB*np.dot(CMBxyz, inputxyz)
return dz
def get_zCMB(RAdeg, DECdeg, z_helio):
dz = -get_dz(RAdeg, DECdeg)
one_plus_z_pec = np.sqrt((1. + dz)/(1. - dz))
one_plus_z_CMB = (1 + z_helio)/one_plus_z_pec
return one_plus_z_CMB - 1.
def get_zhelio(RAdeg, DECdeg, z_CMB):
dz = -get_dz(RAdeg, DECdeg)
one_plus_z_pec = np.sqrt((1. + dz)/(1. - dz))
one_plus_z_helio = (1 + z_CMB)*one_plus_z_pec
return one_plus_z_helio - 1.
@click.command()
@click.option('-m', '--model', default='snemo7', type=click.Choice(['salt2', 'snemo2', 'snemo7']))
@click.option('-e', '--err_floor', default=0., help='Desired error floor as fraction of maximum band flux.')
@click.option('-p', '--prefix', default='')
def main(model, err_floor, prefix):
print(model, err_floor, prefix)
err_floor_int = int(err_floor*100)
RESULTS_DIR = './results_mw_reddening_mcmc'
JLA_FIT_DIR = os.path.join(RESULTS_DIR, 'jla_{}_{:02d}/'.format(model, err_floor_int))
CSP_FIT_DIR = os.path.join(RESULTS_DIR, 'csp_{}_{:02d}/'.format(model, err_floor_int))
PS_FIT_DIR = os.path.join(RESULTS_DIR, 'ps_{}_{:02d}/'.format(model, err_floor_int))
if model=='snemo7':
n_props = 9
else:
n_props = 4
MODEL = sncosmo.Model(source=model)
OUT_PATH = prefix + '{}_{:02d}.pkl'.format(model, err_floor_int)
# Read pickle files from fits and standardize names to check for duplicates
fits = {}
for fit_dir in [JLA_FIT_DIR, PS_FIT_DIR, CSP_FIT_DIR]:
lc_source = fit_dir.split('/')[-2].split('_')[0]
print('Reading fit results from {}'.format(fit_dir))
for fname in os.listdir(fit_dir):
path = os.path.join(fit_dir, fname)
try:
name = 'SDSS{}'.format(int(fname.split('.')[0])) # SDSS SNe in JLA just have integer names
except:
name = fname.split('.')[0]
if name[:2] == 'sn': # CSP and JLA names use lowercase 'sn'
name = 'SN'+name[2:]
try:
if name in fits.keys():
duplicate_source = fits[name]['lc_source']
print(name+' duplicated in datasets {} and {}. Using {}'.format(duplicate_source, lc_source, lc_source))
fits[name] = pickle.load(open(path, 'rb'))
fits[name]['lc_source'] = lc_source
except IsADirectoryError:
continue
fit_df = | pd.DataFrame.from_dict(fits) | pandas.DataFrame.from_dict |
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Mon Jul 25 20:15:17 2019
@author: marcelo
"""
try:
from sklearn.svm import SVC
from sklearn.model_selection import GridSearchCV
import pandas as pd
except Exception as e:
print(e)
raise Exception('Alguns módulos não foram instalados...')
def fit_grid_search(x_train, y_train, out):
'''
Realiza o treinamento de modelos SVM alterando os parâmetros.
:x_train: ndarray()
Amostras para treinamento da forma [n_sample, n_col].
:y_train: ndarray()
Classes das amostras de treinamento da forma [n_sample, class]
:return: Sem retorno.
'''
param_grid = {
'C': [10, 100, 200],
'gamma': [0.001, 0.00001],
'kernel': ['rbf', 'poly', 'linear', 'sigmoid'],
'degree': [2, 3]
}
metrics = ['f1_macro', 'accuracy', 'precision_macro','recall_macro']
grid_search = GridSearchCV(estimator=SVC(), \
param_grid=param_grid, \
scoring=metrics, \
refit=False, \
cv=5, \
n_jobs=-1,
verbose=100)
grid_search.fit(x_train, y_train)
print(100 * '.')
print(grid_search.cv_results_)
df_result = | pd.DataFrame(grid_search.cv_results_) | pandas.DataFrame |
import numpy as np
import pandas as pd
import pulp
from laptimize.curve_approximation import CurveApproximator
from laptimize.log import LogFactory
class LAPModel(object):
"""solve the linear approximated LP problem and sub problems"""
def __init__(self, name='nlp_problem'):
self.logger = LogFactory.get_logger()
self.lp_variables = dict()
self.segment = | pd.DataFrame() | pandas.DataFrame |
# -*- coding: utf-8 -*-
import click
import logging
from pathlib import Path
import pandas as pd
from src.utils.config import Config
from src.features import build_features
from dotenv import find_dotenv, load_dotenv
from sklearn.manifold import TSNE
import umap
from sklearn.decomposition import PCA
import numpy as np
from sklearn.preprocessing import RobustScaler as rs
from sklearn.preprocessing import MinMaxScaler as mms
from sklearn.preprocessing import StandardScaler as sd
project_dir=Config.project_dir
def process_data():
labels= pd.read_csv(project_dir / "data/raw/labels.csv")
expression_data = pd.read_csv(project_dir / "data/raw/data.csv")
#rename and Merge labels and features
expression_data.rename({"Unnamed: 0":"sample"}, axis='columns', inplace =True)
labels.rename({"Unnamed: 0":"sample"}, axis='columns', inplace =True)
labled_expression_merged = pd.merge(labels,expression_data,on="sample")
# save
expression_data=expression_data.drop("sample",axis=1)
expression_data.to_csv(project_dir/ "data/processed/expression_data_original.csv")
labels=labels.drop("sample",axis=1)
labels.to_csv(project_dir/ "data/processed/labels.csv")
labled_expression_merged.to_csv(project_dir/ "data/processed/merged_expression_dataset.csv", index=True)
"""[Robust scaling ]
Robust rescaling the expression levels of each gene,
applying the formula :
rescaled = (gene_expression - median(gene_expression)) / IQR(gene_expression) where IQR stands for Inter Quartile Range.
"""
expression_data_centered = rs().fit_transform(expression_data)
df_expression_data_centered = pd.DataFrame(expression_data_centered,columns=expression_data.columns)
df_expression_data_centered.to_csv(project_dir/ "data/processed/expression_data_centerted.csv")
"""[standard scaling ]
"""
expression_data_standardized = sd().fit_transform(expression_data)
df_expression_data_standardized = pd.DataFrame(expression_data_standardized,columns=expression_data.columns)
df_expression_data_standardized.to_csv(project_dir/ "data/processed/expression_data_standardized.csv")
y = labels['Class'].values
true_labels = np.array([Config.labels_map[element] for element in y])
df_true_labels = pd.DataFrame(true_labels,columns=["Class"])
df_true_labels.to_csv(project_dir/ "data/processed/true_labels.csv")
expression_level_5000_HGV , features_5000_HGV= build_features.top_k_variance(
expression_data.values,
k=1000,
names= expression_data.columns
)
#--------------------- data reduction -----------------------#
pca_reducer = PCA(n_components=2)
pca_reducer.fit(expression_data )
pc = pca_reducer.transform(expression_data )
X_tsne = TSNE(n_components=2).fit_transform(expression_data)
UMAP_COMPONENTS_REDUCTION = 2
UMAP_COMPONENTS_FEATURES = 20
UMAP_EPOCHS = 2000
manifold_reducer = umap.UMAP(
n_components=UMAP_COMPONENTS_REDUCTION,
n_neighbors=200,
n_epochs=UMAP_EPOCHS,
metric='cosine',
min_dist=0.9)
manifold = manifold_reducer.fit_transform(expression_data)
# saving tranformed data
components= ["c1","c2"]
df_PCA =pd.DataFrame(pc,columns=components)
df_PCA.to_csv(Config.project_dir/ "data/transformed/PCA_reduction.csv")
df_PCA =pd.DataFrame(X_tsne,columns=components)
df_PCA.to_csv(Config.project_dir/ "data/transformed/TSNA_reduction.csv")
df_PCA =pd.DataFrame(manifold,columns=components)
df_PCA.to_csv(Config.project_dir/ "data/transformed/UMAP_reduction.csv")
# saving hvg
df_expression_level_5000_HGV =pd.DataFrame(expression_level_5000_HGV,columns=features_5000_HGV)
df_expression_level_5000_HGV.to_csv(Config.project_dir/ "data/transformed/expression_data_HVG_1000.csv")
def get_data(data_type:str):
"""
this function :
imports data
Args:
data_type (str): ["original","centered","standardized"] the type of data you want to import
Returns:
[tuple]: containing (the merged data , features , labels , true labels )
"""
merged_data= pd.read_csv(Config.data / f"processed/merged_expression_dataset.csv",index_col=0)
features=pd.read_csv(Config.data / f"processed/expression_data_{data_type}.csv",index_col=0)
labels=pd.read_csv(Config.data / f"processed/labels.csv",index_col=0)
true_labels=pd.read_csv(Config.data / f"processed/true_labels.csv",index_col=0)
return merged_data,features,labels,true_labels
def get_transformed_data():
"""
this function :
import reduced data
Args:
Returns:
[tuple]: containing (the merged data , features , labels , true labels )
"""
HGV= pd.read_csv(Config.data / f"transformed/expression_data_HVG_1000.csv",index_col=0)
PCA=pd.read_csv(Config.data / f"transformed/PCA_reduction.csv",index_col=0)
UMAP=pd.read_csv(Config.data / f"transformed/UMAP_reduction.csv",index_col=0)
TSNA= | pd.read_csv(Config.data / f"transformed/TSNA_reduction.csv",index_col=0) | pandas.read_csv |
import pandas as pd
from pathlib import Path
import os
import numpy as np
import datetime
from pickle_plotting import get_file_paths
import logarithmoforecast as lf
import holidays
def pickle_directory(datasets_dir, pickle_dir):
file_paths = os.listdir(datasets_dir)
sdp_series = {}
for path in file_paths:
number = Path(path).stem
print(number)
df = pd.read_csv(datasets_dir / path, header=4, sep=';', usecols=[0, 1, 2, 3, 4, 5], decimal=",")
# df = pd.read_csv(r"/home/joelhaubold/Dokumente/BADaten/FiN-Messdaten-LV_Spannung_Teil2/tmpFile-1492693540182.csv", header=4, sep=';', usecols=[0, 1, 2, 3, 4, 5], decimal=",")
df.drop(columns=['AliasName', 'Unit'])
df = df.set_index('TimeStamp')
df = df.sort_index()
sdp_list = df.ServiceDeliveryPoint.unique()
print(sdp_list)
for sdp in sdp_list:
df_sdp = df.loc[df.ServiceDeliveryPoint == sdp, :] # Slim the pd down here for less memory consumption?
if sdp in sdp_series:
combined_df = sdp_series.get(sdp)
combined_df = pd.concat([combined_df, df_sdp]).sort_index()
sdp_series[sdp] = combined_df
else:
sdp_series[sdp] = df_sdp
for key, value in sdp_series.items():
print(key)
if not os.path.exists(pickle_dir / key):
os.makedirs(pickle_dir / key)
value.index = pd.to_datetime(value.index)
pos1 = value.Description == 'Electric voltage momentary phase 1 (notverified)'
df_phase1 = value.loc[pos1, :]
pos2 = value.Description == 'Electric voltage momentary phase 2 (notverified)'
df_phase2 = value.loc[pos2, :]
pos3 = value.Description == 'Electric voltage momentary phase 3 (notverified)'
df_phase3 = value.loc[pos3, :]
# for phase in ['1', '2', '3']:
# if not os.path.exists('pickles/' + key + '/phase'+phase):
# os.makedirs('pickles/' + key + '/phase'+phase)
df_phase1.to_pickle(pickle_dir / key / "phase1")
df_phase2.to_pickle(pickle_dir / key / "phase2")
df_phase3.to_pickle(pickle_dir / key / "phase3")
# value.to_pickle(r"pickles/"+key+"/3PhasesDF")
def add_help_data(pickle_dir=Path('pickles')):
file_paths = get_file_paths(pickle_dir)
print(file_paths)
for path in file_paths:
print(path)
path = pickle_dir / Path(path)
df_phases = list(map(lambda p: pd.read_pickle(path / ("phase" + p)), ['1', '2', '3']))
print("Opened pickle")
phase_values = pd.DataFrame()
for i, df_p in enumerate(df_phases):
df_p.drop(columns=['Unit', 'AliasName'], inplace=True)
phase = 'p' + str(i + 1)
phase_values[phase] = df_p.Value
for df_p in df_phases:
df_p['row_dif'] = df_p.Value.diff()
print("Created help values")
np.diff(phase_values.values)
phase_values['max_dif'] = phase_values.apply(
lambda row: max(abs(row['p1'] - row['p2']), abs(row['p1'] - row['p3']),
abs(row['p2'] - row['p3'])), axis=1)
print("Calculated help data")
for df_p in df_phases:
df_p['phase_dif'] = phase_values['max_dif']
print("Assigned help data")
for i, df_p in enumerate(df_phases):
print(df_p)
df_p.to_pickle(path / ("h_phase" + str(i + 1)))
def update_trafo(pickle_dir=Path('pickles')):
# pd.options.mode.chained_assignment = None
file_paths = get_file_paths(pickle_dir)
print(file_paths)
for path in file_paths:
print(path)
path = pickle_dir / Path(path)
df_phases = list(map(lambda p: pd.read_pickle(path / ("h_phase" + p)), ['1', '2', '3']))
print("Opened pickle")
df_row_difs = pd.DataFrame()
for p, df_p in enumerate(df_phases):
df_p['row_dif'] = df_p.Value.diff() / df_p.Value.index.to_series().diff().dt.total_seconds()
df_row_difs[str(p)] = df_p['row_dif']
df_row_difs.loc[True ^ (((df_row_difs['0'] >= 0) & (df_row_difs['1'] >= 0) & (df_row_difs['2'] >= 0)) | (
(df_row_difs['0'] < 0) & (df_row_difs['1'] < 0) & (df_row_difs['2'] < 0)))] = 0
df_row_difs = df_row_difs.abs()
for df_p in df_phases:
# df_p['trafo'] = min(df_phases[0]['row_dif'].abs(), df_phases[1]['row_dif'].abs(), df_phases[2]['row_dif'].abs())
df_p['trafo'] = df_row_difs.min(axis=1)
print("Assigned help data")
for i, df_p in enumerate(df_phases):
# print(df_p)
df_p.to_pickle(path / ("h_phase" + str(i + 1)))
def add_seasonal_data(pickle_dir=Path('pickles')):
seasonal_data = pd.DataFrame()
file_paths = get_file_paths(pickle_dir)
print(file_paths)
day = pd.Timedelta('1d')
for path in file_paths:
print(path)
path = pickle_dir / Path(path)
df_phases = list(map(lambda p: pd.read_pickle(path / ("phase" + p))[['Value']], ['1', '2', '3']))
weekday_dfs_phases = [[None for x in range(7)] for y in range(3)]
min_date = min(list(map(lambda df: df.index.min(), df_phases))).date()
max_date = max(list(map(lambda df: df.index.max(), df_phases))).date()
for p, df_p in enumerate(df_phases):
for start_time in pd.date_range(min_date, max_date, freq='d'):
end_time = start_time + day
df_p_day = df_p.loc[start_time:end_time]
df_p_day_med = df_p_day.resample('30s').median().rename(columns={'Value': str(start_time.date())})
df_p_day_med.index = df_p_day_med.index.time
weekday = start_time.date().weekday()
# print(weekday_dfs_phases[p][weekday])
if weekday_dfs_phases[p][weekday] is None:
weekday_df = df_p_day_med
weekday_dfs_phases[p][weekday] = weekday_df
else:
weekday_df = weekday_dfs_phases[p][weekday]
weekday_df = weekday_df.join(df_p_day_med, how='outer')
weekday_dfs_phases[p][weekday] = weekday_df
print("Split DF")
for p, df_weekdays in enumerate(weekday_dfs_phases):
for w, df in enumerate(df_weekdays):
df['med'] = df.median(axis=1)
# print(df)
df_phases_h = list(map(lambda p: pd.read_pickle(path / ("h_phase" + p)), ['1', '2', '3']))
print(df_phases_h)
for p, df_p in enumerate(df_phases_h):
print(p)
df_weekdays = weekday_dfs_phases[p]
df_p['SeasDif'] = df_p.apply(lambda row: (row['Value'] - df_weekdays[row.name.weekday()].loc[
(row.name - datetime.timedelta(seconds=row.name.second % 30,
microseconds=row.name.microsecond)).time()]['med']), axis=1)
print(df_p)
df_p.to_pickle(path / ("h_phase" + str(p + 1)))
def add_new_seasonal_data(pickle_dir=Path('pickles')):
file_paths = get_file_paths(pickle_dir)
for path in file_paths:
station_season = pd.read_pickle(pickle_dir / (path + 'season_aggregation'))
print(path)
path = pickle_dir / Path(path)
df_phases = list(map(lambda p: pd.read_pickle(path / ("h_phase" + p)), ['1', '2', '3']))
for p, df_p in enumerate(df_phases):
df_p.drop(labels='SeasDif', inplace=True, errors='ignore')
print(p)
print(df_p)
v1s = []
print(station_season)
print(station_season.sort_index())
for index, row in df_p.iterrows():
print(row['Value'])
print(index)
print(index - datetime.timedelta(seconds=index.second % 30,
microseconds=index.microsecond))
print(station_season.loc[index - datetime.timedelta(seconds=index.second % 30,
microseconds=index.microsecond)])
v1 = row['Value'] - station_season.loc[index - datetime.timedelta(seconds=index.second % 30,
microseconds=index.microsecond)]
print(v1)
v1s.append(v1)
df_p['SeasDif'] = v1s
print(df_p)
df_p.to_pickle(path / ("h_phase" + str(p + 1)))
def add_cross_station_data(pickle_dir=Path('pickles')):
station_avgs = pd.read_pickle(pickle_directory / "meanStationValues")
file_paths = get_file_paths(pickle_dir)
for path in file_paths:
print(path)
path = pickle_dir / Path(path)
df_phases = list(map(lambda p: pd.read_pickle(path / ("h_phase" + p)), ['1', '2', '3']))
for p, df_p in enumerate(df_phases):
print(p)
print(df_p)
v1s = []
for index, row in df_p.iterrows():
v1 = row['Value'] - station_avgs.loc[index - datetime.timedelta(seconds=index.second % 30,
microseconds=index.microsecond)]
v1s.append(v1)
df_p['StationDif'] = v1s
# df_p.apply(lambda row:print(row), axis=1)
# df_p['StationDif'] = df_p.apply(lambda row: (row['Value'] - station_avgs.loc[
# (row.name - datetime.timedelta(seconds=row.name.second % 30,
# microseconds=row.name.microsecond)).time()]), axis=1)
print(df_p)
df_p.to_pickle(path / ("h_phase" + str(p + 1)))
def create_mean_pickle(pickle_dir=Path('pickles')):
station_avgs = pd.DataFrame()
file_paths = get_file_paths(pickle_dir)
print(file_paths)
day = pd.Timedelta('1d')
for path in file_paths:
station_name = path
print(path)
path = pickle_dir / Path(path)
df_phases = pd.DataFrame()
for p, df_p in enumerate(list(map(lambda p: pd.read_pickle(path / ("phase" + p))[['Value']], ['1', '2', '3']))):
df_phases = df_phases.join(other=df_p.rename(columns={'Value': 'ValueP' + str(p + 1)}), how='outer')
df_phases = df_phases.resample('30s').mean()
df_phases[station_name] = df_phases.mean(axis=1)
# print(df_phases[[station_name]])
# print(station_avgs)
station_avgs = station_avgs.join(df_phases[[station_name]], how='outer')
df_phases[[station_name]].to_pickle(pickle_dir / (str(station_name) + 'aggregation'))
station_avgs = station_avgs.mean(axis=1)
print(station_avgs)
station_avgs.to_pickle(pickle_dir / 'meanStationValues')
def create_mean_street_pickles(pickle_dir=Path('pickles')):
station_avgs = pd.DataFrame()
file_paths = get_file_paths(pickle_dir)
print(file_paths)
day = pd.Timedelta('1d')
for path in file_paths:
station_name = path
print(path)
path = pickle_dir / Path(path)
df_phases = pd.DataFrame()
for p, df_p in enumerate(list(map(lambda p: pd.read_pickle(path / ("phase" + p))[['Value']], ['1', '2', '3']))):
df_phases = df_phases.join(other=df_p.rename(columns={'Value': 'ValueP' + str(p + 1)}), how='outer')
df_phases = df_phases.resample('30s').mean()
df_phases[station_name] = df_phases.mean(axis=1)
station_avgs = station_avgs.join(df_phases[[station_name]], how='outer')
station_avgs = station_avgs.mean(axis=1)
print(station_avgs)
station_avgs.to_pickle(pickle_dir / 'meanStationValues')
def create_mean_season_pickle(pickle_dir=Path('pickles')):
df_mean_season = pd.Series()
df_mean_pickle = | pd.read_pickle(pickle_dir / 'meanStationValues') | pandas.read_pickle |
# Importer le fichier et afficher les premières lignes :
import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
import seaborn as sns
df=pd.read_csv('D:\projet_energie.csv', sep=';', low_memory=False)
df.head()
#Création “Date_datetime” au format date :
from datetime import datetime as dt
df["Date_datetime"]=pd.to_datetime(df.Date)
df.head()
#“Heure” au format heure
from datetime import datetime as dt
df["Heure"]=df.Heure.astype("datetime64")
df["Heure"]=df.Heure.dt.hour
df.head()
#Affichage des colonnes du dataset
df.columns
#Visualisation des informations des colonnes
df.dtypes
#Supression de la variable “Nature”
df=df.drop("Nature", axis=1)
df.head()
#Création “Date_datetime” au format date :
from datetime import datetime as dt
df["Date_datetime"]=pd.to_datetime(df.Date)
df.head()
#Colonne Heure au format Datetime
from datetime import datetime as dt
df["Heure"]=df.Heure.astype("datetime64")
df["Heure"]=df.Heure.dt.hour
df.head()
#Transformation des variables “TCH” au format float
df["TCH Thermique (%)"]=pd.to_numeric(df["TCH Thermique (%)"],errors="coerce")
df["TCH Nucléaire (%)"]=pd.to_numeric(df["TCH Nucléaire (%)"],errors="coerce")
df["TCH Eolien (%)"]=pd.to_numeric(df["TCH Eolien (%)"],errors="coerce")
df["TCH Solaire (%)"]=pd.to_numeric(df["TCH Solaire (%)"],errors="coerce")
df["TCH Hydraulique (%)"]=pd.to_numeric(df["TCH Hydraulique (%)"],errors="coerce")
df["TCH Bioénergies (%)"]=pd.to_numeric(df["TCH Bioénergies (%)"],errors="coerce")
#Controle de la modification des tranformations
df.info()
#Supression de la variable des lignes 01-01-2013 à 00H00 possède que des NA = 11 lignes en moins
df=df.drop(df.index[0:12], axis=0)
df.head()
#Supression de la variable “Pompage MW"
df=df.drop("Pompage (MW)", axis=1)
df.columns
#Suppression de la variable “Date-heure”
df=df.drop("Date - Heure", axis=1)
df.head()
#Supression des 38 variables flux
df=df.drop(df.iloc[:,12:50], axis=1)
df.columns
#Controle des dimensions du dataset
df.shape
#Création de la variable “Mois”
from datetime import datetime as dt
df["Mois"]= | pd.to_datetime(df.Date) | pandas.to_datetime |
"""test_algo_api.py module."""
# from datetime import datetime, timedelta
import pytest
# import sys
# from pathlib import Path
import numpy as np
import pandas as pd # type: ignore
import string
import math
from typing import Any, List, NamedTuple
# from typing_extensions import Final
from ibapi.tag_value import TagValue # type: ignore
from ibapi.contract import ComboLeg # type: ignore
from ibapi.contract import DeltaNeutralContract
from ibapi.contract import Contract, ContractDetails
from scottbrian_algo1.algo_api import AlgoApp, AlreadyConnected, \
DisconnectLockHeld, ConnectTimeout, RequestTimeout, DisconnectDuringRequest
from scottbrian_algo1.algo_maps import get_contract_dict, get_contract_obj
from scottbrian_algo1.algo_maps import get_contract_details_obj
# from scottbrian_utils.diag_msg import diag_msg
# from scottbrian_utils.file_catalog import FileCatalog
import logging
logger = logging.getLogger(__name__)
###############################################################################
# TestAlgoAppConnect class
###############################################################################
class TestAlgoAppConnect:
"""TestAlgoAppConnect class."""
def test_mock_connect_to_ib(self,
algo_app: "AlgoApp"
) -> None:
"""Test connecting to IB.
Args:
algo_app: pytest fixture instance of AlgoApp (see conftest.py)
"""
verify_algo_app_initialized(algo_app)
# we are testing connect_to_ib and the subsequent code that gets
# control as a result, such as getting the first requestID and then
# starting a separate thread for the run loop.
logger.debug("about to connect")
algo_app.connect_to_ib("127.0.0.1",
algo_app.PORT_FOR_LIVE_TRADING,
client_id=0)
# verify that algo_app is connected and alive with a valid reqId
verify_algo_app_connected(algo_app)
algo_app.disconnect_from_ib()
verify_algo_app_disconnected(algo_app)
def test_mock_connect_to_ib_with_timeout(self,
algo_app: "AlgoApp",
mock_ib: Any
) -> None:
"""Test connecting to IB.
Args:
algo_app: pytest fixture instance of AlgoApp (see conftest.py)
mock_ib: pytest fixture of contract_descriptions
"""
verify_algo_app_initialized(algo_app)
# we are testing connect_to_ib with a simulated timeout
logger.debug("about to connect")
with pytest.raises(ConnectTimeout):
algo_app.connect_to_ib("127.0.0.1",
mock_ib.PORT_FOR_REQID_TIMEOUT,
client_id=0)
# verify that algo_app is not connected
verify_algo_app_disconnected(algo_app)
assert algo_app.request_id == 0
def test_connect_to_ib_already_connected(self,
algo_app: "AlgoApp",
mock_ib: Any
) -> None:
"""Test connecting to IB.
Args:
algo_app: pytest fixture instance of AlgoApp (see conftest.py)
mock_ib: pytest fixture of contract_descriptions
"""
verify_algo_app_initialized(algo_app)
# first, connect normally to mock_ib
logger.debug("about to connect")
algo_app.connect_to_ib("127.0.0.1",
algo_app.PORT_FOR_PAPER_TRADING,
client_id=0)
# verify that algo_app is connected
verify_algo_app_connected(algo_app)
# try to connect again - should get error
with pytest.raises(AlreadyConnected):
algo_app.connect_to_ib("127.0.0.1",
algo_app.PORT_FOR_PAPER_TRADING,
client_id=0)
# verify that algo_app is still connected and alive with a valid reqId
verify_algo_app_connected(algo_app)
algo_app.disconnect_from_ib()
verify_algo_app_disconnected(algo_app)
def test_connect_to_ib_with_lock_held(self,
algo_app: "AlgoApp",
mock_ib: Any
) -> None:
"""Test connecting to IB with disconnect lock held.
Args:
algo_app: pytest fixture instance of AlgoApp (see conftest.py)
mock_ib: pytest fixture of contract_descriptions
"""
verify_algo_app_initialized(algo_app)
# obtain the disconnect lock
logger.debug("about to obtain disconnect lock")
algo_app.disconnect_lock.acquire()
# try to connect - should get error
with pytest.raises(DisconnectLockHeld):
algo_app.connect_to_ib("127.0.0.1",
algo_app.PORT_FOR_LIVE_TRADING,
client_id=0)
# verify that algo_app is still simply initialized
verify_algo_app_initialized(algo_app)
# def test_real_connect_to_IB(self) -> None:
# """Test connecting to IB.
#
# Args:
# algo_app: instance of AlgoApp from conftest pytest fixture
# monkeypatch: pytest fixture
#
# """
# proj_dir = Path.cwd().resolve().parents[1] # back two directories
# test_cat = \
# FileCatalog({'symbols': Path(proj_dir / 't_datasets/symbols.csv')
# })
# algo_app = AlgoApp(test_cat)
# verify_algo_app_initialized(algo_app)
#
# # we are testing connect_to_ib and the subsequent code that gets
# # control as a result, such as getting the first requestID and then
# # starting a separate thread for the run loop.
# logger.debug("about to connect")
# connect_ans = algo_app.connect_to_ib("127.0.0.1", 7496, client_id=0)
#
# # verify that algo_app is connected and alive with a valid reqId
# assert connect_ans
# assert algo_app.run_thread.is_alive()
# assert algo_app.isConnected()
# assert algo_app.request_id == 1
#
# algo_app.disconnect_from_ib()
# assert not algo_app.run_thread.is_alive()
# assert not algo_app.isConnected()
###############################################################################
# connect disconnect verification
###############################################################################
def verify_algo_app_initialized(algo_app: "AlgoApp") -> None:
"""Helper function to verify the also_app instance is initialized.
Args:
algo_app: instance of AlgoApp that is to be checked
"""
assert len(algo_app.ds_catalog) > 0
assert algo_app.request_id == 0
assert algo_app.symbols.empty
assert algo_app.stock_symbols.empty
assert algo_app.response_complete_event.is_set() is False
assert algo_app.nextValidId_event.is_set() is False
assert algo_app.__repr__() == 'AlgoApp(ds_catalog)'
# assert algo_app.run_thread is None
def verify_algo_app_connected(algo_app: "AlgoApp") -> None:
"""Helper function to verify we are connected to ib.
Args:
algo_app: instance of AlgoApp that is to be checked
"""
assert algo_app.run_thread.is_alive()
assert algo_app.isConnected()
assert algo_app.request_id == 1
def verify_algo_app_disconnected(algo_app: "AlgoApp") -> None:
"""Helper function to verify we are disconnected from ib.
Args:
algo_app: instance of AlgoApp that is to be checked
"""
assert not algo_app.run_thread.is_alive()
assert not algo_app.isConnected()
###############################################################################
###############################################################################
# matching symbols
###############################################################################
###############################################################################
class ExpCounts(NamedTuple):
"""NamedTuple for the expected counts."""
sym_non_recursive: int
sym_recursive: int
stock_sym_non_recursive: int
stock_sym_recursive: int
class SymDfs:
"""Saved sym dfs."""
def __init__(self,
mock_sym_df: Any,
sym_df: Any,
mock_stock_sym_df: Any,
stock_sym_df: Any) -> None:
"""Initialize the SymDfs.
Args:
mock_sym_df: mock sym DataFrame
sym_df: symbol DataFrame
mock_stock_sym_df: mock stock symbol DataFrame
stock_sym_df: stock symbols dataFrame
"""
self.mock_sym_df = mock_sym_df
self.sym_df = sym_df
self.mock_stock_sym_df = mock_stock_sym_df
self.stock_sym_df = stock_sym_df
class TestAlgoAppMatchingSymbols:
"""TestAlgoAppMatchingSymbols class."""
def test_request_symbols_all_combos(self,
algo_app: "AlgoApp",
mock_ib: Any) -> None:
"""Test request_symbols with all patterns.
Args:
algo_app: pytest fixture instance of AlgoApp (see conftest.py)
mock_ib: pytest fixture of contract_descriptions
"""
verify_algo_app_initialized(algo_app)
logger.debug("about to connect")
algo_app.connect_to_ib("127.0.0.1",
algo_app.PORT_FOR_LIVE_TRADING,
client_id=0)
verify_algo_app_connected(algo_app)
algo_app.request_throttle_secs = 0.01
try:
for idx, search_pattern in enumerate(
mock_ib.search_patterns()):
exp_counts = get_exp_number(search_pattern, mock_ib)
# verify symbol table has zero entries for the symbol
logger.info("calling verify_match_symbols req_type 1 "
"sym %s num %d", search_pattern, idx)
algo_app.symbols = pd.DataFrame()
algo_app.stock_symbols = pd.DataFrame()
verify_match_symbols(algo_app,
mock_ib,
search_pattern,
exp_counts=exp_counts,
req_type=1)
logger.info("calling verify_match_symbols req_type 2 "
"sym %s num %d", search_pattern, idx)
algo_app.symbols = pd.DataFrame()
algo_app.stock_symbols = pd.DataFrame()
verify_match_symbols(algo_app,
mock_ib,
search_pattern,
exp_counts=exp_counts,
req_type=2)
finally:
logger.debug('disconnecting')
algo_app.disconnect_from_ib()
logger.debug('verifying disconnected')
verify_algo_app_disconnected(algo_app)
logger.debug('disconnected - test case returning')
def test_request_symbols_zero_result(self,
algo_app: "AlgoApp",
mock_ib: Any
) -> None:
"""Test request_symbols with pattern that finds exactly 1 symbol.
Args:
algo_app: instance of AlgoApp from conftest pytest fixture
mock_ib: pytest fixture of contract_descriptions
"""
verify_algo_app_initialized(algo_app)
logger.debug("about to connect")
algo_app.connect_to_ib("127.0.0.1",
algo_app.PORT_FOR_LIVE_TRADING,
client_id=0)
verify_algo_app_connected(algo_app)
algo_app.request_throttle_secs = 0.01
try:
exp_counts = ExpCounts(0, 0, 0, 0)
# verify symbol table has zero entries for the symbols
for idx, search_pattern in enumerate(
mock_ib.no_find_search_patterns()):
logger.info("calling verify_match_symbols req_type 1 "
"sym %s num %d", search_pattern, idx)
verify_match_symbols(algo_app,
mock_ib,
search_pattern,
exp_counts=exp_counts,
req_type=1)
logger.info("calling verify_match_symbols req_type 2 "
"sym %s num %d", search_pattern, idx)
verify_match_symbols(algo_app,
mock_ib,
search_pattern,
exp_counts=exp_counts,
req_type=2)
finally:
logger.debug('disconnecting')
algo_app.disconnect_from_ib()
logger.debug('verifying disconnected')
verify_algo_app_disconnected(algo_app)
logger.debug('disconnected - test case returning')
def test_get_symbols_timeout(self,
algo_app: "AlgoApp",
mock_ib: Any) -> None:
"""Test get_symbols gets timeout.
Args:
algo_app: instance of AlgoApp from conftest pytest fixture
mock_ib: pytest fixture of contract_descriptions
"""
verify_algo_app_initialized(algo_app)
try:
logger.debug("about to connect")
algo_app.connect_to_ib("127.0.0.1",
mock_ib.PORT_FOR_SIMULATE_REQUEST_TIMEOUT,
client_id=0)
verify_algo_app_connected(algo_app)
with pytest.raises(RequestTimeout):
algo_app.request_symbols('A')
finally:
logger.debug('disconnecting')
algo_app.disconnect_from_ib()
logger.debug('verifying disconnected')
verify_algo_app_disconnected(algo_app)
logger.debug('disconnected - test case returning')
def test_get_symbols_disconnect(self,
algo_app: "AlgoApp",
mock_ib: Any) -> None:
"""Test get_symbols gets disconnected while waiting.
Args:
algo_app: instance of AlgoApp from conftest pytest fixture
mock_ib: pytest fixture of contract_descriptions
"""
verify_algo_app_initialized(algo_app)
try:
logger.debug("about to connect")
algo_app.connect_to_ib("127.0.0.1",
mock_ib.
PORT_FOR_SIMULATE_REQUEST_DISCONNECT,
client_id=0)
verify_algo_app_connected(algo_app)
with pytest.raises(DisconnectDuringRequest):
algo_app.request_symbols('A')
finally:
logger.debug('disconnecting')
algo_app.disconnect_from_ib()
logger.debug('verifying disconnected')
verify_algo_app_disconnected(algo_app)
logger.debug('disconnected - test case returning')
def test_get_symbols(self,
algo_app: "AlgoApp",
mock_ib: Any) -> None:
"""Test get_symbols with pattern that finds no symbols.
Args:
algo_app: instance of AlgoApp from conftest pytest fixture
mock_ib: pytest fixture of contract_descriptions
"""
verify_algo_app_initialized(algo_app)
try:
logger.debug("about to connect")
algo_app.connect_to_ib("127.0.0.1",
algo_app.PORT_FOR_LIVE_TRADING,
client_id=0)
verify_algo_app_connected(algo_app)
algo_app.request_throttle_secs = 0.01
sym_dfs = SymDfs(pd.DataFrame(),
pd.DataFrame(),
pd.DataFrame(),
pd.DataFrame())
# full_stock_sym_match_descs = pd.DataFrame()
# stock_symbols_ds = pd.DataFrame()
# full_sym_match_descs = pd.DataFrame()
# symbols_ds = pd.DataFrame()
# we need to loop from A to Z
for letter in string.ascii_uppercase:
logger.debug("about to verify_get_symbols for letter %s",
letter)
# full_stock_sym_match_descs, stock_symbols_ds,\
# full_sym_match_descs, symbols_ds = \
sym_dfs = verify_get_symbols(letter,
algo_app,
mock_ib,
sym_dfs)
finally:
logger.debug('disconnecting')
algo_app.disconnect_from_ib()
logger.debug('verifying disconnected')
verify_algo_app_disconnected(algo_app)
logger.debug('disconnected - test case returning')
def test_get_symbols_with_connect_disconnect(self,
algo_app: "AlgoApp",
mock_ib: Any) -> None:
"""Test get_symbols with pattern that finds no symbols.
Args:
algo_app: instance of AlgoApp from conftest pytest fixture
mock_ib: pytest fixture of contract_descriptions
"""
verify_algo_app_initialized(algo_app)
sym_dfs = SymDfs( | pd.DataFrame() | pandas.DataFrame |
import urllib.request as urlreq
import io,json
import pandas as pd
# ******************************************************************************************************************************************
def download_smiles(myList,intv=1) :
"""Retrieve canonical SMILES strings for a list of input INCHIKEYS.
Will return only one SMILES string per INCHIKEY. If there are multiple values returned, the first is retained and the others are returned in a the discard_lst. INCHIKEYS that fail to return a SMILES string are put in the fail_lst
Args:
myList (list): List of INCHIKEYS
intv (1) : number of INCHIKEYS to submit queries for in one request, default is 1
Returns:
list of SMILES strings corresponding to INCHIKEYS
list of INCHIKEYS, which failed to return a SMILES string
list of CIDs and SMILES, which were returned beyond the first CID and SMILE found for input INCHIKEY
"""
ncmpds=len(myList)
smiles_lst,cid_lst,inchikey_lst=[],[],[]
sublst=""
fail_lst=[]
discard_lst=[]
for it in range(0,ncmpds,intv) :
if (it+intv) > ncmpds :
upbnd=ncmpds
else :
upbnd=it+intv
sublst=myList[it:upbnd]
inchikey = ','.join(map(str,sublst))
url="https://pubchem.ncbi.nlm.nih.gov/rest/pug/compound/inchikey/"+inchikey+"/property/CanonicalSMILES/CSV"
try :
response = urlreq.urlopen(url)
html = response.read()
except :
fail_lst.append(inchikey)
continue
f=io.BytesIO(html)
cnt=0
for l in f :
l=l.decode("utf-8")
l=l.rstrip()
vals=l.split(',')
if vals[0] == '"CID"' :
continue
if cnt > 0:
#print("more than one SMILES returned, discarding. Appear to be multiple CID values",vals)
#print("using",cid_lst[-1],smiles_lst[-1],inchikey_lst[-1])
discard_lst.append(vals)
break
cid_lst.append(vals[0])
sstr=vals[1].replace('"','')
smiles_lst.append(vals[1])
inchikey_lst.append(myList[it+cnt])
cnt+=1
if cnt != len(sublst) :
print("warning, multiple SMILES for this inchikey key",cnt,len(sublst),sublst)
save_smiles_df=pd.DataFrame( {'CID' : cid_lst, 'standard_inchi_key' :inchikey_lst, 'smiles' : smiles_lst})
return save_smiles_df,fail_lst,discard_lst
#******************************************************************************************************************************************
def download_bioactivity_assay(myList,intv=1) :
"""Retrieve summary info on bioactivity assays.
Args:
myList (list): List of PubChem AIDs (bioactivity assay ids)
intv (1) : number of INCHIKEYS to submit queries for in one request, default is 1
Returns:
Nothing returned yet, will return basic stats to help decide whether to use assay or not
"""
ncmpds=len(myList)
smiles_lst,cid_lst,inchikey_lst=[],[],[]
sublst=""
fail_lst=[]
jsn_lst=[]
for it in range(0,ncmpds,intv) :
if (it+intv) > ncmpds :
upbnd=ncmpds
else :
upbnd=it+intv
sublst=myList[it:upbnd]
inchikey = ','.join(map(str,sublst))
url="https://pubchem.ncbi.nlm.nih.gov/rest/pug/assay/aid/"+inchikey+"/summary/JSON"
try :
response = urlreq.urlopen(url)
html = response.read()
except :
fail_lst.append(inchikey)
continue
f=io.BytesIO(html)
cnt=0
json_str=""
for l in f :
l=l.decode("utf-8")
l=l.rstrip()
json_str += l
jsn_lst.append(json_str)
return jsn_lst
# save_smiles_df=pd.DataFrame( {'CID' : cid_lst, 'standard_inchi_key' :inchikey_lst, 'smiles' : smiles_lst})
# return save_smiles_df,fail_lst,discard_lst
#******************************************************************************************************************************************
def download_SID_from_bioactivity_assay(bioassayid) :
"""Retrieve summary info on bioactivity assays.
Args:
a single bioactivity id: PubChem AIDs (bioactivity assay ids)
Returns:
Returns the sids tested on this assay
"""
myList=[bioassayid]
ncmpds=len(myList)
smiles_lst,cid_lst,inchikey_lst=[],[],[]
sublst=""
fail_lst=[]
jsn_lst=[]
intv=1
for it in range(0,ncmpds,intv) :
if (it+intv) > ncmpds :
upbnd=ncmpds
else :
upbnd=it+intv
sublst=myList[it:upbnd]
inchikey = ','.join(map(str,sublst))
url="https://pubchem.ncbi.nlm.nih.gov/rest/pug/assay/aid/"+inchikey+"/sids/JSON"
try :
response = urlreq.urlopen(url)
html = response.read()
except :
fail_lst.append(inchikey)
continue
f=io.BytesIO(html)
cnt=0
json_str=""
for l in f :
l=l.decode("utf-8")
l=l.rstrip()
json_str += l
jsn_lst.append(json_str)
res=json.loads(jsn_lst[0])
res_lst=res["InformationList"]['Information'][0]['SID']
return res_lst
#https://pubchem.ncbi.nlm.nih.gov/rest/pug/assay/aid/504526/doseresponse/CSV?sid=104169547,109967232
#******************************************************************************************************************************************
def download_dose_response_from_bioactivity(aid,sidlst) :
"""Retrieve data for assays for a select list of sids.
Args:
myList (list): a bioactivity id (aid)
sidlst (list): list of sids specified as integers
Returns:
Nothing returned yet, will return basic stats to help decide whether to use assay or not
"""
sidstr= "," . join(str(val) for val in sidlst)
myList=[sidstr]
ncmpds=len(myList)
smiles_lst,cid_lst,inchikey_lst=[],[],[]
sublst=""
fail_lst=[]
jsn_lst=[]
intv=1
for it in range(0,ncmpds,intv) :
if (it+intv) > ncmpds :
upbnd=ncmpds
else :
upbnd=it+intv
sublst=myList[it:upbnd]
inchikey = ','.join(map(str,sublst))
url="https://pubchem.ncbi.nlm.nih.gov/rest/pug/assay/aid/"+aid+"/doseresponse/CSV?sid="+inchikey
try :
response = urlreq.urlopen(url)
html = response.read()
except :
fail_lst.append(inchikey)
continue
f=io.BytesIO(html)
cnt=0
json_str=""
df= | pd.read_csv(f) | pandas.read_csv |
# -*- coding: utf-8 -*-
"""
Created on Tue Nov 24 13:10:27 2020
@author: Oliver
"""
import os
import numpy as np
import scipy.io
import pandas as pd
import matplotlib.pyplot as plt
from pathlib import Path
from scipy.signal import savgol_filter, find_peaks
database = pd.DataFrame(columns=['condition', 'name', 'ecg'])
from libraries.io import FileWizard
path1 = 'C:/Users/Oliver/Documents/FYP/code/database/MLII/'
fw = FileWizard()
database = fw.start(path1, database)
from libraries.noise_removal import BaselineNoiseRemover
# DC Notch filter to remove baseline noise from all signals
bnr = BaselineNoiseRemover(c = -0.99)
ecg_waves = database['ecg'].tolist()
ecg_filt = []
for wave in ecg_waves:
filt = bnr.fit(wave)
ecg_filt.append(filt)
database['ecg'] = pd.Series(ecg_filt)
from libraries.feature_extraction import LargeFrequencyExtractor
lfe = LargeFrequencyExtractor()
database = lfe.fit(database)
# Multilevel discrete decomposition to extract large frequencies from time series
from libraries.feature_extraction import PeakExtractor
LISTS2 = ['3 AFL', '4 AFIB', '5 SVTA', '6 WPW',
'7 PVC', '8 Bigeminy', '9 Trigeminy', '10 VT', '11 IVR',
'12 VFL', '13 Fusion', '14 LBBBB', '15 RBBBB', '16 SDHB', '17 PR']
for item in LISTS2:
database = database[database['condition'] != item]
thresh = 20
pe = PeakExtractor(c=thresh)
database = pe.fit(database)
examples = database[database['condition'] == '1 NSR']
example1 = examples.iloc[1]
peaks1 = example1['peaks']
position1 = example1['peak position']
ecg1 = example1['coefficient 4']
from libraries.feature_extraction import MidPointExtractor
mpe = MidPointExtractor()
database = mpe.fit(database)
ecg = database.iloc[0]
print(ecg['midpoints'])
qrs_db = | pd.DataFrame(columns=['condition', 'name', 'ecg']) | pandas.DataFrame |
import sys
import os
import argparse
import numpy as np
import pandas as pd
from pathlib import Path
from multiprocessing.pool import Pool
import h5py
sys.path.append('../data_tools/')
sys.path.append('../region_model/')
sys.path.append('../sequence_model/')
import DataExtractor
import kfold_mutations_main
import SequenceModel
import GenicDriver
def parse_args(text=None):
parser = argparse.ArgumentParser(description="Automation tool for running DIG pipeline")
subparsers = parser.add_subparsers(help='DIG sub-commands')
parser_a = subparsers.add_parser('runDIG', help='Run DIG model')
## required
parser_a.add_argument('--out-dir', type=str, dest='out_dir', required = True, help='Base Directory of DIG run. All intermediate files will be saved relative to this location')
parser_a.add_argument('--map-ref', type=str, dest='map_ref', help='path to mappability file')
parser_a.add_argument('--window-size', type=int, default=10000, dest='window', help='desired window size for DIG model regions')
parser_a.add_argument('--min-map', type=float, default=0.50, dest='min_mapp', help='minimum mappability for windows')
parser_a.add_argument('--ref-file', type=str, dest='ref_file', help='path to reference hg19 genome')
parser_a.add_argument('--mut-file', type=str, dest='mut_file', required = True, help='path to mutations file')
parser_a.add_argument('--N-procs', type = int, dest='n_procs', default = 20, help= 'number of processes to run')
## partial runs
parser_a.add_argument('--map-file', type = str, dest = 'map_file', help = 'map to precomputed mappability file')
parser_a.add_argument('--epi-dir', type=str, dest='epi_dir', help='path to epigenomics files')
parser_a.add_argument('--split_idx', type=str, dest='split_dir', help='path to split index dir')
parser_a.add_argument('--epi-matrix_dir', type=str, dest='epi_matrix_dir', help='path to constructed epigenome matrix h5 file')
parser_a.add_argument('--fmodel-dir', type=str, dest='fmodel_dir', help='path to constructed genome context frequency file')
parser_a.add_argument('--gp-results-base', type=str, dest='gp_res', help='path to generic file name of gp results fold')
##optional arguments
parser_a.add_argument('-c', '--cancer-key', type = str, dest = 'cancer_key', help = 'key name for cancer targets')
parser_a.add_argument('-g', "--gpus", required=False, nargs='?', action='store', type=str, dest='gpus',
default='all', help='GPUs devices (all/comma separted list)')
parser_a.set_defaults(func=run)
if text:
args = parser.parse_args(text.split())
else:
args = parser.parse_args()
return args
# inputs are epi-genome tracks and mutation file
def run(args):
if args.gp_res is None:
if args.epi_matrix_dir is None:
if args.epi_dir is None:
print('Error: need to provide either a epi_track dir or a epi_matrix_dir')
return
else:
map_file_name = "high_mapp_{}_{}_{}".format(args.min_mapp, args.window, 0)
mapp_file_path = os.path.join(args.out_dir, map_file_name)
if args.map_file is None:
print('Finding mappable windows...')
mapp_args = DataExtractor.parse_args('mappability {} --out-dir {} --window {} --overlap {} --min-map {}'.format(args.map_ref, args.out_dir, args.window, 0, args.min_mapp))
DataExtractor.mappability(mapp_args)
print('map file saved at: ' + mapp_file_path)
print('creating split index...')
if args.split_dir is None:
split_path = os.path.join(args.out_dir, 'splitIdx_{}'.format(args.window))
if not os.path.exists(split_path):
os.mkdir(split_path)
split_args = DataExtractor.parse_args('splitDataIdx --base-dir {} --out-dir {} --chunk-size {} --window {} --overlap {} --min-map {}'.format(args.out_dir, split_path, 10000, args.window, 0, args.min_mapp))
DataExtractor.split_data_idx(split_args)
print('splitIdx files saved at'+ split_path)
else:
split_path = args.split_dir
print('creating matrix chunks...')
chunks_path = os.path.join(args.out_dir, 'matrix_chunks_{}'.format(args.window))
print(chunks_path)
if not os.path.exists(chunks_path):
os.mkdir(chunks_path)
p = Pool(args.n_procs)
path = Path(split_path).glob('**/*')
files = [str(x) for x in path if x.is_file()]
res = []
for f in files:
res.append(p.apply_async(chunk_runner, (f, chunks_path, args.ref_file, args.epi_dir, args.mut_file, args.window, args.cancer_key)))
p.close()
p.join()
_ = [r.get() for r in res]
print('chunks saved')
print('concatenating chunks...')
concat_args = DataExtractor.parse_args('concatH5 {} --out-dir {}'.format(chunks_path, args.out_dir))
DataExtractor.concatH5(concat_args)
print('adding mappability track')
epi_matrix_fname = os.path.join(args.out_dir, 'data_matrices' + '_{}_0_{}'.format(args.window, args.min_mapp) + '.h5')
addMap_args = DataExtractor.parse_args('addMappability {} {}'.format(epi_matrix_fname, args.map_ref))
DataExtractor.add_mappability(addMap_args)
print('epi track done!')
else:
print('running NN model')
epi_matrix_fname = args.epi_matrix_dir
kfold_args = kfold_mutations_main.get_cmd_arguments('-c {} -d {} -o {} -m {} -g {}'.format(args.cancer_key, epi_matrix_fname, args.out_dir, args.min_mapp, args.gpus))
kfold_mutations_main.main(kfold_args)
print('finished NN model')
directory = os.path.join(args.out_dir, 'kfold/{}'.format(args.cancer_key))
date_dir = max([os.path.join(directory,d) for d in os.listdir(directory)], key=os.path.getmtime)
gp_results_base = os.path.join(date_dir, 'gp_results_fold_{}.h5')
else:
gp_results_base = args.gp_res
mapp_file_path = args.map_file
# we assume that you either dont have anything, have the genome counts but not the mutation counts (or annotations) or have everything
if args.fmodel_dir is None:
f_model_path = os.path.join(args.out_dir, 'fmodel_{}_trinuc_192.h5'.format(args.window))
genome_context_args = SequenceModel.parse_args('countGenomeContext {} {} {} {} --up {} --down {} --n-procs {}'.format(mapp_file_path, args.window, args.ref_file, f_model_path, 1, 1, args.n_procs))
SequenceModel.countGenomeContext(genome_context_args)
else:
f_model_path = args.fmodel_dir
fmodel = h5py.File(f_model_path, 'r')
if args.cancer_key + '_mutation_counts' in fmodel.keys():
run_canc = False
else:
run_canc = True
fmodel.close()
if run_canc:
annot_name = os.path.basename(args.mut_file).split('txt.gz')[0] + 'trinuc.txt'
annot_path = os.path.join(args.out_dir, annot_name)
print(annot_path)
annot_args = SequenceModel.parse_args('annotateMutationFile {} {} {} {} --n-procs {}'.format(args.mut_file, f_model_path, args.ref_file, annot_path, args.n_procs))
SequenceModel.annotateMutationFile(annot_args)
annot_path = annot_path + '.gz'
count_contexts_args = SequenceModel.parse_args('countMutationContext {} {} {} {} {} --n-procs {} '.format(mapp_file_path, annot_path, f_model_path, args.window, args.cancer_key, args.n_procs))
SequenceModel.countMutationContext(count_contexts_args)
else:
annot_path = args.mut_file
#run models
print('running models')
submap_path = gp_results_base.split('gp_results')[0] + 'sub_mapp_results_fold_{}.h5'
# for fold in range(5):
# apply_seq_args = SequenceModel.parse_args('applySequenceModel {} {} {} {} {} --cancer {} --key-prefix {} --key {} --n-procs {} --bins {} --run ensemble'.format(gp_results_base.format(fold), f_model_path, annot_path, args.ref_file, args.window, args.cancer_key, args.cancer_key, args.cancer_key, args.n_procs, 50))
# SequenceModel.applySequenceModel(apply_seq_args)
results_path = os.path.join(args.out_dir, 'results')
if not os.path.exists(results_path):
os.mkdir(results_path)
# concat_sequence_results(gp_results_base, args.cancer_key, os.path.join(results_path, 'hotspot_results_{}.h5'.format(args.cancer_key)))
genic_out = os.path.join(results_path, 'genicDetect_{}_{}_{}.h5'.format(args.cancer_key, args.window, args.min_mapp))
genic_args = GenicDriver.parse_args('genicDetectParallel {} {} {} {} -c {} -N {} -m {} -u {}'.format(annot_path, gp_results_base, f_model_path, genic_out, args.cancer_key, args.n_procs, args.min_mapp, submap_path))
GenicDriver.genicDetectParallel(genic_args)
nonc_out = os.path.join(results_path, 'noncDetect_{}_{}_{}.h5'.format(args.cancer_key, args.window, args.min_mapp))
nonc_args = GenicDriver.parse_args('noncDetectParallel {} {} {} {} -c {} -N {} -m {} -u {} -t both'.format(annot_path, gp_results_base, f_model_path, nonc_out, args.cancer_key, args.n_procs, args.min_mapp, submap_path))
GenicDriver.noncodingDetectParallel(nonc_args)
def main():
args = parse_args()
args.func(args)
print('Done!')
def chunk_runner(f, chunks_path, ref_file, epi_dir, mut_file, window, cancer_key):
chunk_args = DataExtractor.parse_args('createChunk {} --out-dir {} --ref-file {} --epi-dir {} --mut-file {} --window {} --bins {} --cancer-key {}'.format(f, chunks_path, ref_file, epi_dir, mut_file, window, 100, cancer_key))
DataExtractor.create_chunk(chunk_args)
def concat_sequence_results(base_results, cancer, out_path):
fout = h5py.File(out_path, 'a')
#keys = [k for k in f[cancer]['test'].keys() if 'nb_model' in k]
keys = ['nb_model_up1_down1_binsize50_run_ensemble']
if len(keys) ==0:
return -1
for k in keys:
print('working on {}'.format(k))
df_lst = []
for run in range(5):
run_res = pd.read_hdf(base_results.format(run), key='{}/test/{}'.format(cancer, k))
run_res = run_res.astype({'CHROM': 'int32', 'POS': 'float64', 'OBS': 'int32', 'EXP': 'float64','PVAL': 'float64','Pi': 'float64','MU': 'float64','SIGMA': 'float64','REGION': 'object'})
df_lst.append(run_res)
complete = | pd.concat(df_lst) | pandas.concat |
import numpy as np
import pandas as pd
from random import randint
from statistics import mode
from datetime import datetime
import backend.utils.finder as finder
from dateutil.relativedelta import relativedelta
def arrange_df(df, df_type, relevant_col_idx=None, items_to_delete=None, assembly_df=None, bom_trim=False):
"""
:param bom_trim:
:param df:
pandas.DataFrame object that contains the raw format that is read from the file.
:param df_type:
File type of
:param relevant_col_idx:
:param items_to_delete:
:param assembly_df:
:return:
"""
df = df.copy()
if df_type.lower() == "bom":
# Reformatting the columns
df = reformat_columns(df, relevant_col_idx, "bom")
df.part_no = df.part_no.astype(str)
# If specified, bom will be trimmed
if bom_trim:
df = trim_bom(df)
# This part will be discarded for the time being, 15.04.2020
# Deleting the trial products
# df.drop(df[df.product_no.str.split(".", 0).apply(lambda x: int(x[2]) > 900)].index, inplace = True)
# Deleting the entries where two successive entries are level 1s.
df.drop(df[df.level.eq(df.level.shift(-1, fill_value=1)) & df.level.eq(1)].index, inplace=True)
# This to be deleted parts can be redundant, so it will be decided that if these codes are going to stay or not
tbd_list = items_to_delete["Silinecekler"].unique().tolist()
df.drop(df[df["part_no"].str.split(".").apply(lambda x: x[0] in tbd_list)].index, inplace=True)
# Deleting the entries where two successive entries are level 1s.
df.drop(df[df.level.eq(df.level.shift(-1, fill_value=1)) & df.level.eq(1)].index, inplace=True)
# Check if the product structure is okay or not, if not okay, delete the corresponding products from the BOM
df.drop(df[df.groupby("product_no").apply(corrupt_product_bom).values].index, inplace=True)
# Transforming the amounts to a desired format for the simulation model.
df.amount = determine_amounts(df)
# Making sure that the dataframe returns in order
df.reset_index(drop=True, inplace=True)
return df
if df_type.lower() == "times":
# Reformatting the columns
df = reformat_columns(df, relevant_col_idx, "times")
# Transforming the machine names to ASCII characters.
df["station"] = format_machine_names(df, "station")
# Transforming non-numeric values to numeric values
df.cycle_times = pd.to_numeric(df.cycle_times, errors="coerce").fillna(0)
df.setup_times = pd.to_numeric(df.setup_times, errors="coerce").fillna(0)
# Grouping by the times of the parts that has multiple times in the same work station
df = df.groupby(["part_no", "station"], as_index=False).agg({"cycle_times": sum, "setup_times": max})
df.drop(df[df["part_no"].duplicated(keep="last")].index, inplace=True)
# Creating the setup matrix
set_list_df = df[["station", "setup_times"]].copy()
set_list_df.columns = ["stations_list", "setup_time"]
set_list_df = set_list_df.groupby(by="stations_list", as_index=False).agg({"setup_time": mode})
set_list_df["setup_prob"] = 1
set_list_df.loc[(set_list_df.stations_list == "ANKASTRE_BOYAHANE") |
(set_list_df.stations_list == "ENDUSTRI_BOYAHANE"), "setup_prob"] = 3 / 100
set_list_df.loc[set_list_df.stations_list == "ANKASTRE_BOYAHANE", "setup_time"] = 900
set_list_df.loc[set_list_df.stations_list == "ENDUSTRI_BOYAHANE", "setup_time"] = 1200
# Creating a dataframe with the assembly times
montaj_df = df[(df["station"] == "BANT") | (df["station"] == "LOOP")]
# Creating a dataframe with the glass bonding
cmy_df = df[df["station"] == "CAM_YAPISTIRMA"]
# Dropping the assembly times from the original times dataframe and resetting the index
df.drop(df[(df["station"] == "BANT") |
(df["station"] == "LOOP") |
(df["station"] == "CAM_YAPISTIRMA") |
(df["part_no"].apply(lambda x: len(x)) == 13)].index, inplace=True)
# Resetting the index
df.reset_index(drop="index", inplace=True)
# Getting rid of the setup column of time matrix
# df.drop("setup_times", axis = 1, inplace = True)
return df, montaj_df, cmy_df, set_list_df
if df_type.lower() == "merged":
df["station"] = level_lookup(df, "level", "station")
df["cycle_times"] = level_lookup(df, "level", "cycle_times")
df.loc[df.level == 1, ["station", "cycle_times"]] = \
pd.merge(df["product_no"], assembly_df[["part_no", "station", "cycle_times"]], "left",
left_on="product_no",
right_on="part_no")[["station", "cycle_times"]]
missing_dict = missing_values_df(df)
missing_df = pd.DataFrame(missing_dict).transpose().reset_index()
missing_df.columns = ["code", "station", "cycle_times"]
# Ask for what are the values for the NAs in the missing dictionary
"""
THIS WILL CHANGE
"""
missing_df.station.fillna("CAM_YAPISTIRMA", inplace=True)
missing_df.cycle_times.fillna(np.random.randint(25, 60), inplace=True)
"""
END OF THIS WILL CHANGE
"""
# Rounding all the numerical values to integers.
missing_df.loc[~missing_df.station.isna(), "cycle_times"] = \
missing_df.loc[~missing_df.station.isna(), "cycle_times"].apply(np.ceil)
# Creating the missing slice to fill it to the merged bom dataframe later
missing_slice = pd.merge(left=df[df.station.isna()].part_no.str.split(".").apply(lambda x: x[0]),
right=missing_df, left_on="part_no", right_on="code", how="left")
missing_slice.index = df.loc[df.station.isna()].index
# Equating the filled missing data slice into the bom
df.loc[df.station.isna(), ["station", "cycle_times"]] = \
missing_slice[["station", "cycle_times"]]
return df
def reformat_columns(df, relevant_col_idx, df_type):
if df_type == "bom":
df = df.copy()
# Rearranging the level amount
df["Seviye"] = [int(str(x)[-1]) for x in df[df.columns[relevant_col_idx][2]]]
relevant_col_idx[2] = len(df.columns) - 1
# Determining the columns names to use for reindex later
relevant_col_names = df.columns[relevant_col_idx]
# Columns to be dropped from the dataframe
cols_to_drop = list(set(df.columns) - set(df.columns[relevant_col_idx]))
# Dropping, sorting and indexing the corresponding columns
df.drop(cols_to_drop, axis=1, inplace=True)
df = df.reindex(columns=relevant_col_names)
df.columns = ["product_no", "part_no", "level", "amount", "explanation"]
if df_type == "times":
# Determining the columns names to use for reindex later
relevant_col_names = df.columns[relevant_col_idx]
# Columns to be dropped from the dataframe
cols_to_drop = list(set(df.columns) - set(df.columns[relevant_col_idx]))
# Dropping, sorting and indexing the corresponding columns
df.drop(cols_to_drop, axis=1, inplace=True)
df = df.reindex(columns=relevant_col_names)
df.columns = ["part_no", "station", "cycle_times", "setup_times"]
return df
def trim_bom(df):
# This little piece of code trims the bom so that there is only one instance of product for each product family.
df["product_family"] = [x.split(".")[0] for x in df.product_no]
first_products = df[df.product_family.ne(df.product_family.shift(1, fill_value=0))]["product_no"].to_list()
a = pd.Series([x in first_products for x in df.product_no])
df.drop(df[~a].index, inplace=True)
df.reset_index(drop=True, inplace=True)
# This part is a bit more tricky, this code takes the 90th percentile in the past orders and then takes them into
# consideration, enable if proper use is needed. For a one time usage, just replace the FILE_PATH with pivoted total
# demand info as a csv file. Else, bind the variable to the frontend.
# Order data processing and finding 90 Percentile
# order_data = pd.read_csv(FILE_PATH)
# order_data.drop(order_data.columns[1:4], axis = 1, inplace = True)
# order_data["sum"] = order_data[order_data.columns[1:]].sum(axis = 1)
# order_data.sort_values(by = "sum", inplace = True, ascending = False)
# This part drops the items that are not in the demand page, will change later.
# bom_list = set(df.product_no.to_list())
# order_list = order_data[order_data.columns[0]].to_list()
# order_data.drop(order_data[[x not in bom_list for x in order_list]].index, inplace = True)
# End of that part.
# order_data["perc"] = order_data["sum"].cumsum()/order_data["sum"].sum().cumsum()
# order_data.reset_index(drop = True, inplace = True)
# perc_count = order_data[order_data.perc > 0.9].head(1).index.astype(int)[0]
# prod_list = order_data[order_data.columns[0]].tolist()
# perc_list = [x for x in prod_list if x in frozenset(df.product_no.to_list())][:perc_count]
# a = pd.Series([x in perc_list for x in df.product_no])
# df.drop(df[~a].index, inplace = True)
# df.reset_index(drop = True, inplace = True)
return df
def trim_order(order_df, bom_df):
order_df = order_df.pivot()
return order_df[order_df.index.to_series().str.split(".").apply(lambda x: x[0]).isin(
bom_df.product_no.str.split(".").apply(lambda x: x[0]))]
def trim_df(df, plan_df):
temp_df = df.copy()
products_to_be_taken = pd.DataFrame(plan_df.product_no.unique().tolist(), columns=["product_no"])
products_to_be_taken["is_in_merged"] = products_to_be_taken.product_no.isin(temp_df.product_no.unique().tolist())
missing_dict = find_most_close_products(products_to_be_taken[products_to_be_taken.is_in_merged.eq(0)], temp_df)
products_to_be_taken.product_no.replace(missing_dict, inplace=True)
temp_df = temp_df[temp_df.product_no.isin(products_to_be_taken.product_no.to_list()).eq(1)]
temp_df.reset_index(drop=True, inplace=True)
return temp_df, missing_dict
def schedule_changer_dict(df, days):
current_month = datetime(df.start_date.dt.year.mode(), df.start_date.dt.month.mode(), 1).date()
availability = [
1 if (list(days.values)[0][x] == 1) & ((current_month + pd.to_timedelta(x, unit="d")).weekday() < 5) else 0 for
x in range(0, days.columns.max())]
replace_dict = {}
if current_month.weekday() >= 5:
replace_dict[current_month] = pd.to_datetime(
(current_month + pd.to_timedelta(7 - current_month.weekday(), unit="d")))
else:
replace_dict[current_month] = pd.to_datetime(current_month)
for x in range(1, days.columns.max()):
if availability[x] == 1:
replace_dict[(current_month + pd.to_timedelta(x, unit="d"))] = \
pd.to_datetime(current_month + pd.to_timedelta(x, unit="d"))
else:
replace_dict[(current_month + pd.to_timedelta(x, unit="d"))] = \
pd.to_datetime(max(replace_dict.values()))
renewed_dict = {x: replace_dict[x].date() for x in list(replace_dict.keys())}
# days["day"] = days["date"].dt.day
# days.day.replace(renewed_dict, inplace = True)
return renewed_dict
def level_lookup(df, level_col, lookup_col):
dummies = pd.get_dummies(df[level_col])
idx = dummies.index.to_series()
last_index = dummies.apply(lambda col: idx.where(col != 0, np.nan).fillna(method="ffill"))
last_index[0] = 1
idx = last_index.lookup(last_index.index, df[level_col] - 1)
return pd.DataFrame({lookup_col: df.reindex(idx)[lookup_col].values}, index=df.index)
def missing_values_df(df):
missing_parts = df[df.station.isna()].part_no.str.split(".").apply(lambda x: x[0]).unique()
missing_dict = {}
for items in missing_parts:
temp_station = df[df.part_no.apply(lambda x: x.split(".")[0]) == items].station.mode()
if temp_station.shape == (0,):
temp_station = np.nan
else:
temp_station = temp_station[0]
temp_cycle = df[df.part_no.apply(lambda x: x.split(".")[0]) == items].cycle_times.mean()
missing_dict[items] = [temp_station, temp_cycle]
return missing_dict
def merge_bom_and_times(df_bom, df_times):
df = pd.merge(left=df_bom, right=df_times, how="left", on="part_no")
df = df[list(df.columns[0:4]) + list(df.columns[5:]) + list(df.columns[4:5])].copy()
return df
def format_word(word, letter_dict):
new_word = ""
for i in range(len(word)):
if word[i] in letter_dict.keys():
new_word += letter_dict[word[i]].upper()
continue
new_word += word[i].upper()
return new_word
def format_machine_names(df, column):
turkish_char_dict = {"ı": "i", "ğ": "g", "Ğ": "G", "ü": "u", "Ü": "U", "Ş": "s", "-": "_",
"ş": "s", "İ": "I", "Ö": "O", "ö": "o", "Ç": "C", "ç": "c", " ": "_", "/": "_"}
machine_dict = {}
for item in list(set(df[column])):
machine_dict[item] = (format_word(item, turkish_char_dict))
return df[column].replace(machine_dict)
def determine_amounts(df):
copy_series = pd.Series(np.nan, index=df.index)
idx = df.level.lt(df.level.shift(1, fill_value=2)) | df.level.eq(1)
copy_series[idx] = df.loc[idx, "amount"]
copy_series.ffill(inplace=True)
return copy_series
def corrupt_product_bom(group):
if (sum(group.level == 1) == 0) | (
sum((group.level - group.level.shift(1, fill_value=group.at[group.index[0], "level"])) >= 2) != 0):
return pd.DataFrame({"is_valid": [True] * len(group)}, index=group.index)
else:
return pd.DataFrame({"is_valid": [False] * len(group)}, index=group.index)
def find_most_close_products(missing_products, complete_df) -> dict:
product_parameter = [10, 5, 3, 1]
products = pd.DataFrame(complete_df.product_no.unique(), columns=["product_no"])
products[["product_family", "length", "customer", "option"]] = products.product_no.str.split(".", expand=True)
missing_prods = pd.DataFrame(missing_products.product_no.unique(), columns=["product_no"])
missing_prods[["product_family", "length", "customer", "option"]] = missing_prods.product_no.str.split(".",
expand=True)
missing_dict = {missing_prods.product_no[x]:
products[::-1].product_no[
((products[products.columns[-4:]] == missing_prods.iloc[x, -4:]) * product_parameter).sum(
axis=1).idxmax()] for x in
missing_prods.index}
return missing_dict
# noinspection PyTypeChecker
def create_operational_table(df, table_type, aux=None, *args):
if table_type == "legend":
df = df.iloc[finder.input_indices(df)][["product_no", "part_no"]]
df.index = list(range(1, len(df) + 1))
return df
elif table_type == "input":
df = df.iloc[finder.input_indices(df)][["product_no", "amount"]]
df["product_no"] = finder.product_numerator(df)
df.index = list(range(1, len(df) + 1))
df.columns = ["product", "amount"]
return df
elif table_type == "dup":
# This function gets the input table as a base dataframe to work on and make calculations with.
df = create_operational_table(df=df, table_type="input")
# The following three lines creates the products' index in the process input list, i.e. from the input table
s = df["product"].ne(df["product"].shift(fill_value=df.iloc[0]["product"]))
product_idx = pd.Series([1] + list(np.where(s)[0] + 1))
product_idx.index += 1
# Following line calculates the entity amounts to be duplicated in the simulation software
dup_count = product_idx.shift(-1, fill_value=len(df) + 1) - product_idx
# The next two lines concatanates, basically zipps the created product index and the duplication amounts and
# converts them to a pandas dataframe with the product # with them.
duplication_table = pd.concat(
[pd.Series(list(range(1, len(product_idx) + 1)), index=list(range(1, len(product_idx) + 1))), product_idx,
dup_count], axis=1)
duplication_table.columns = ["product", "start", "number to duplicate"]
return duplication_table
elif table_type == "sequence":
df_copy = df.copy().reset_index()
dummies = pd.get_dummies(df_copy["level"])
lookup_series = df_copy["station"]
gross_matrix = dummies.apply(lambda col: lookup_series.where(col != 0, np.nan).fillna(method="ffill"))
gross_matrix.index = df.index
gross_matrix = gross_seq_matrix_trimmer(gsm=gross_matrix, df=df, matrix_type="station")
gross_matrix.index = list(range(1, gross_matrix.shape[0] + 1))
return gross_matrix
elif table_type == "time":
df_copy = df.copy().reset_index()
dummies = pd.get_dummies(df_copy["level"])
lookup_series = df_copy["cycle_times"].copy()
gross_matrix = dummies.apply(lambda col: lookup_series.where(col != 0, np.nan).fillna(method="ffill"))
gross_matrix.index = df.index
gross_matrix = gross_seq_matrix_trimmer(gsm=gross_matrix, df=df, matrix_type="time")
gross_matrix.index = list(range(1, gross_matrix.shape[0] + 1))
return gross_matrix
elif table_type == "joins":
# Tutorial df for joining matrix
df = df[["product_no", "level"]].copy()
df["product_no"] = finder.product_numerator(df)
# df = df[df["product_no"].le(100)].copy()
input_idx = finder.input_indices(df)
join_df = df.loc[finder.joining_indices(df)].copy()
join_matrix = pd.DataFrame(index=input_idx, columns=list(range(1, df.level.max() + 1)))
join_idx = 2
product_assembly_amount = df.loc[finder.input_indices(df)].copy().reset_index().groupby(by="product_no").agg(
{"index": list, "level": list})
product_assembly_amount["count"] = df.copy().reset_index().groupby(by="product_no").apply(num_of_input)
join_amount_count = [1]
# start loop here
while len(join_df) > 0:
curr_row = int(join_df.tail(1).index[0])
curr_level = df.loc[curr_row, "level"]
start_row = curr_row
end_row = int(df[df["level"].eq(df.loc[curr_row, "level"] - 1) & (df.index < curr_row)].tail(1).index[0])
middle_parts = df[
df["level"].eq(df.loc[curr_row, "level"]) & (df.index <= start_row) & (df.index >= end_row)]
inputs_n_levels = [[input_idx[input_idx >= x][0], df.loc[input_idx[input_idx >= x][0], "level"]] for x in
middle_parts.index]
if pd.isna(join_matrix.loc[inputs_n_levels[0][0], inputs_n_levels[0][1] - curr_level + 1]):
product_assembly_amount.loc[df.loc[inputs_n_levels[0][0], "product_no"], "count"] -= (
len(inputs_n_levels) - 1)
for inputs in inputs_n_levels:
join_matrix.loc[inputs[0], inputs[1] - curr_level + 1] = join_idx
join_df.drop(join_df.tail(1).index[0], inplace=True)
join_amount_count.append(len(inputs_n_levels))
join_idx += 1
else:
join_df.drop(join_df.tail(1).index[0], inplace=True)
for product_idx in product_assembly_amount.index:
temp_idx = product_assembly_amount.loc[product_idx, "index"]
for idx in temp_idx:
join_matrix.loc[idx, df.loc[idx, "level"]] = join_idx
join_matrix.loc[idx, list(range(1, df.loc[idx, "level"]))] = \
join_matrix.loc[idx, list(range(1, df.loc[idx, "level"]))].fillna(1)
join_amount_count.append(product_assembly_amount.loc[product_idx, "count"])
join_idx += 1
join_amount_df = pd.DataFrame(
{"join_code": list(range(1, len(join_amount_count) + 1)), "amount": join_amount_count},
index=list(range(1, len(join_amount_count) + 1)))
join_matrix.reset_index(drop=True, inplace=True)
join_matrix.index = list(range(1, join_matrix.shape[0] + 1))
join_amount_df.amount[join_amount_df.amount < 1] = 1
return join_matrix, join_amount_df
elif table_type == "set_list":
x_y_coord = pd.merge(left=df, right=aux, left_on="stations_list", right_on="machine", how="left").loc[:,
["x_coordinate", "y_coordinate"]]
df["queues_list"] = [str(x) + "_Q" for x in df.stations_list]
df["resources_list"] = [str(x) + "_RES" for x in df.stations_list]
df[["x_coordinates", "y_coordinates"]] = x_y_coord
df = df[[df.columns[0]] + list(df.columns[3:]) + list(df.columns[1:3])]
df.index = list(range(1, df.shape[0] + 1))
return df
elif table_type == "order":
products = aux.copy()
prod_idx = products["product_no"].ne(
products["product_no"].shift(1, fill_value=products.iloc[0]["product_no"])).cumsum() + 1
products["prod_code"] = prod_idx
idx_dict = \
products.drop_duplicates("product_no", keep="first").drop("part_no", axis=1).set_index("product_no",
drop=True).to_dict()[
"prod_code"]
whole_dataframe = df.copy()
whole_dataframe.product_no.replace(idx_dict, inplace=True)
whole_dataframe["day_of_month"] = whole_dataframe.start_date.dt.day
if args[0] > 1:
if args[1]:
whole_dataframe.drop(whole_dataframe[~whole_dataframe["start_date"].dt.month ==
whole_dataframe["start_date"].dt.month.mode().values[0]].index,
inplace=True)
if whole_dataframe.start_date.dt.month.mode().values[0] != 12:
curr_month_day = (datetime(year=whole_dataframe.start_date.dt.year.mode().values[0],
month=whole_dataframe.start_date.dt.month.mode().values[0] + 1,
day=1) - pd.to_timedelta(1, unit="d")).day
else:
curr_month_day = (datetime(year=whole_dataframe.start_date.dt.year.mode().values[0] + 1, month=1,
day=1) - pd.to_timedelta(1, unit="d")).day
whole_dataframe.loc[int(whole_dataframe.shape[0] / 2):, "day_of_month"] = [x + curr_month_day for x in
whole_dataframe.iloc[int(
whole_dataframe.shape[
0] / 2):,
:].day_of_month]
# This is VERY, VERY dumb
whole_dataframe["due_date"] = pd.concat(
[whole_dataframe.loc[:int(whole_dataframe.shape[0] / 2) - 1, "due_date"],
whole_dataframe.loc[int(whole_dataframe.shape[0] / 2):, "due_date"] + pd.to_timedelta(
curr_month_day, unit="d")])
whole_dataframe["start_date"] = pd.concat(
[whole_dataframe.loc[:int(whole_dataframe.shape[0] / 2) - 1, "start_date"],
whole_dataframe.loc[int(whole_dataframe.shape[0] / 2):, "start_date"] + pd.to_timedelta(
curr_month_day, unit="d")])
# THIS LINE CAUSED REDUNDANCY AFTER MULTIPLE PLAN SPLIT, IS COMMENTED OUT FOR NOW
# whole_dataframe.drop(whole_dataframe[whole_dataframe["day_of_month"] > curr_month_day*args[0]].index, inplace = True)
else:
months_count = whole_dataframe["start_date"].dt.month.value_counts().to_dict()
plan_months = []
for _ in range(len(months_count.keys())):
plan_months.append(max(months_count, key=lambda x: months_count[x]))
months_count.pop(max(months_count, key=lambda x: months_count[x]))
whole_dataframe.drop(whole_dataframe[~whole_dataframe["start_date"].dt.month.isin(plan_months)].index,
inplace=True)
first_month_day = (datetime(whole_dataframe["start_date"].dt.year.max(),
whole_dataframe["start_date"].dt.month.max(), 1) - pd.to_timedelta(1,
unit="d")).day
whole_dataframe.loc[whole_dataframe["start_date"].dt.month == max(plan_months), "day_of_month"] = \
whole_dataframe.loc[
whole_dataframe["start_date"].dt.month == max(plan_months), "day_of_month"] + first_month_day
else:
whole_dataframe.drop(whole_dataframe[~whole_dataframe["start_date"].dt.month ==
whole_dataframe["start_date"].dt.month.mode().values[0]].index,
inplace=True)
whole_dataframe.reset_index(drop=True, inplace=True)
whole_dataframe["sim_release_time"] = whole_dataframe.groupby("day_of_month").apply(
create_sim_time_release_month)
whole_dataframe["due_date_attribute"] = [x.strftime("%y%m%d") for x in whole_dataframe.due_date]
whole_dataframe["days_until_due"] = [x.days for x in whole_dataframe.due_date - whole_dataframe.start_date]
whole_dataframe = whole_dataframe[
["product_no", "sim_release_time", "amount", "due_date_attribute", "days_until_due"]].copy()
whole_dataframe.sort_values("sim_release_time", inplace=True)
whole_dataframe.index = list(range(1, whole_dataframe.shape[0] + 1))
return whole_dataframe
else:
raise KeyError
def create_tactical_table(df, table_type):
if table_type == "mult":
df = df.copy()
df.amount = df.amount.astype(int)
df.cycle_times = df.cycle_times.astype(float)
df.cycle_times = df.amount * df.cycle_times.astype(float)
df.drop(finder.joining_indices(df), inplace=True)
df.drop(df[df.product_no.eq(df.product_no.shift(1, fill_value=0)) & df.level.eq(1)].index, inplace=True)
production_path = df.groupby("station", as_index=False).agg({"level": np.mean})
production_path.sort_values("level", ascending=False, inplace=True)
machine_legend_table = production_path["station"].copy()
machine_legend_table.index = list(range(1, machine_legend_table.shape[0] + 1))
machine_legend_table = machine_legend_table.to_frame()
production_path.drop("level", axis=1, inplace=True)
production_path.columns = [1]
production_path = production_path.transpose()
production_path.columns = list(range(1, production_path.shape[1] + 1))
df = df.groupby(["product_no", "station"], as_index=False).agg({"cycle_times": sum})
# mean_table = df.groupby(["product_no", "station"], as_index = False).agg({"cycle_times": sum})
df["product_family"] = df.product_no.str.split(".").apply(lambda x: x[0])
product_family_legend_table = pd.DataFrame(df.product_family.unique().tolist(), columns=["product_family"],
index=list(range(1, len(df.product_family.unique().tolist()) + 1)))
count_dict = {x: df[df.product_family == x].product_no.nunique() for x in
df.product_family.unique()}
prob_table = df.groupby(["product_family", "station"], as_index=False).agg({"product_no": "count"})
prob_table["total_products"] = prob_table.product_family
prob_table.total_products.replace(count_dict, inplace=True)
prob_table["probabilities"] = prob_table.product_no / prob_table.total_products
prob_table.drop(["product_no", "total_products"], axis=1, inplace=True)
prob_table = prob_table.pivot("product_family", "station", "probabilities").fillna(0)
prob_table = prob_table[list(production_path.to_numpy()[0])].copy()
df = df.groupby(["product_family", "station"], as_index=False).agg({"cycle_times": [min, np.mean, max]})
df.columns = ["product_family", "station", "min", "mean", "max"]
min_table = df.pivot("product_family", "station", "min").fillna(0)
min_table = min_table[list(production_path.to_numpy()[0])].copy()
mean_table = df.pivot("product_family", "station", "mean").fillna(0)
mean_table = mean_table[list(production_path.to_numpy()[0])].copy()
max_table = df.pivot("product_family", "station", "max").fillna(0)
max_table = max_table[list(production_path.to_numpy()[0])].copy()
for table in [prob_table, min_table, mean_table, max_table]:
table.index = list(range(1, table.shape[0] + 1))
table.columns = list(range(1, table.shape[1] + 1))
return product_family_legend_table, machine_legend_table, production_path, min_table, mean_table, max_table, prob_table
elif table_type == "set_list":
df["queues_list"] = [str(x) + "_Q" for x in df.stations_list]
df["resources_list"] = [str(x) + "_RES" for x in df.stations_list]
df["x_coordinates"] = [randint(-50, 150) for _ in df.stations_list]
df["y_coordinates"] = [randint(-50, 150) for _ in df.stations_list]
df = df[[df.columns[0]] + list(df.columns[3:]) + list(df.columns[1:3])]
df.index = list(range(1, df.shape[0] + 1))
return df
elif table_type == "order":
temp_df = df[df.columns[-12:]].copy()
temp_df.reset_index(inplace=True)
temp_df = temp_df.melt(id_vars="product_no")
temp_df["date"] = pd.to_datetime(temp_df.date)
temp_df["product_family"] = temp_df.product_no.str.split(".").apply(lambda x: x[0])
temp_df.groupby(["product_family", "date"], as_index=False).agg({"value": sum})
temp_df = temp_df.groupby(["product_family", "date"], as_index=False).agg({"value": sum})
temp_df = temp_df.groupby(by=["product_family", "date"], as_index=False).apply(create_sim_timestamps)
temp_df.drop(temp_df[temp_df.amount == 0].index, inplace=True)
temp_df.reset_index(drop=True, inplace=True)
products = temp_df.copy()
prod_idx = products["product_family"].ne(
products["product_family"].shift(1, fill_value=products.iloc[0]["product_family"])).cumsum() + 1
products["prod_code"] = prod_idx
idx_dict = \
products.drop_duplicates("product_family", keep="first").set_index("product_family", drop=True).to_dict()[
"prod_code"]
temp_df.product_family.replace(idx_dict, inplace=True)
temp_df["day_of_year"] = temp_df.start_date.dt.dayofyear
temp_df["sim_release_time"] = temp_df.groupby("day_of_year").apply(create_sim_time_release_year)
temp_df["due_date_attribute"] = [x.strftime("%y%m%d") for x in temp_df.due_date]
temp_df["days_until_due"] = (temp_df.due_date - temp_df.start_date).dt.days
temp_df = temp_df[
["product_family", "sim_release_time", "amount", "due_date_attribute", "days_until_due"]].copy()
temp_df.sort_values("sim_release_time", inplace=True)
temp_df.reset_index(drop=True, inplace=True)
return temp_df
elif table_type == "set_list":
df = df.copy()
df["queues_list"] = [str(x) + "_Q" for x in df.stations_list]
df["resources_list"] = [str(x) + "_RES" for x in df.stations_list]
df["x_coordinates"] = [randint(-50, 150) for _ in df.stations_list]
df["y_coordinates"] = [randint(-50, 150) for _ in df.stations_list]
df = df[[df.columns[0]] + list(df.columns[3:]) + list(df.columns[1:3])]
df.index = list(range(1, df.shape[0] + 1))
return df
else:
raise KeyError
def gross_seq_matrix_trimmer(gsm, df, matrix_type):
max_level = df.level.max()
input_idx = finder.input_indices(df)
temp_df = df.loc[input_idx]
trimmed_df = None
if matrix_type == "station":
trimmed_df = pd.DataFrame(index=input_idx, columns=list(range(1, gsm.columns.max() + 2)))
for curr_level in range(max_level, 1, -1):
temp_idx = temp_df.loc[temp_df.level == curr_level].index
temp_seq = gsm.loc[temp_idx][list(range(curr_level, 0, -1))]
temp_seq[curr_level + 1] = "ENDING_STATION"
temp_seq.columns = list(range(1, curr_level + 2))
trimmed_df.loc[temp_idx, list(range(1, curr_level + 2))] = temp_seq
elif matrix_type == "time":
trimmed_df = pd.DataFrame(index=input_idx, columns=list(range(1, gsm.columns.max() + 1)))
for curr_level in range(max_level, 1, -1):
temp_idx = temp_df.loc[temp_df.level == curr_level].index
temp_seq = gsm.loc[temp_idx][list(range(curr_level, 0, -1))]
temp_seq.columns = list(range(1, curr_level + 1))
trimmed_df.loc[temp_idx, list(range(1, curr_level + 1))] = temp_seq
trimmed_df.reset_index(drop=True, inplace=True)
return trimmed_df
def num_of_input(group):
return len(finder.input_indices(group))
def create_sim_time_release_month(group):
temp = group["day_of_month"] * 60 * 60 * 24
# noinspection PyTypeChecker
temp = temp + list(range(len(temp)))
return pd.DataFrame(temp, index=group.index)
def create_sim_time_release_year(group):
temp = group["day_of_year"] * 60 * 60 * 24
# noinspection PyTypeChecker
temp = temp + list(range(len(temp)))
return pd.DataFrame(temp, index=group.index)
def check_input_validity(group):
if len(group.level.unique()) == group.level.max():
return pd.DataFrame([True] * len(group), index=group.index)
else:
return pd.DataFrame([False] * len(group), index=group.index)
def create_input_branches(group):
new_series = group.groupby(by=group.level.eq(1).cumsum()).apply(check_input_validity)
return pd.DataFrame(new_series, index=group.index)
def create_sim_timestamps(group):
if group.value == 0:
return pd.DataFrame(columns=["product_family", "amount", "start_date", "due_date"])
else:
length_multiplier = int(np.select(
[group.value <= 10, group.value <= 50, group.value <= 250, group.value <= 500, group.value > 500],
[1, 2, 4, 6, 8]))
rel = relativedelta(days=int(30 / length_multiplier))
start_dates = group.date.apply(lambda x: [x + y * rel for y in range(length_multiplier)])[0]
end_dates = \
group.date.apply(lambda x: [x + y * rel - relativedelta(days=1) for y in range(1, length_multiplier + 1)])[0]
return pd.DataFrame({"product_family": [group.product_family.values[0]] * length_multiplier,
"amount": [int(group.value / length_multiplier)] * length_multiplier,
"start_date": start_dates, "due_date": end_dates})
def ctesi_creator(hour):
if hour < 24:
out = pd.DataFrame(data=[[1, hour], [0, 24 - hour]])
else:
out = | pd.DataFrame(data=[[1, 24], [0, 0]]) | pandas.DataFrame |
from typing import Union, cast
import warnings
import numpy as np
from pandas._libs.lib import no_default
import pandas._libs.testing as _testing
from pandas.core.dtypes.common import (
is_bool,
is_categorical_dtype,
is_extension_array_dtype,
is_interval_dtype,
is_number,
is_numeric_dtype,
needs_i8_conversion,
)
from pandas.core.dtypes.missing import array_equivalent
import pandas as pd
from pandas import (
Categorical,
DataFrame,
DatetimeIndex,
Index,
IntervalIndex,
MultiIndex,
PeriodIndex,
Series,
TimedeltaIndex,
)
from pandas.core.algorithms import take_1d
from pandas.core.arrays import (
DatetimeArray,
ExtensionArray,
IntervalArray,
PeriodArray,
TimedeltaArray,
)
from pandas.core.arrays.datetimelike import DatetimeLikeArrayMixin
from pandas.io.formats.printing import pprint_thing
def assert_almost_equal(
left,
right,
check_dtype: Union[bool, str] = "equiv",
check_less_precise: Union[bool, int] = no_default,
rtol: float = 1.0e-5,
atol: float = 1.0e-8,
**kwargs,
):
"""
Check that the left and right objects are approximately equal.
By approximately equal, we refer to objects that are numbers or that
contain numbers which may be equivalent to specific levels of precision.
Parameters
----------
left : object
right : object
check_dtype : bool or {'equiv'}, default 'equiv'
Check dtype if both a and b are the same type. If 'equiv' is passed in,
then `RangeIndex` and `Int64Index` are also considered equivalent
when doing type checking.
check_less_precise : bool or int, default False
Specify comparison precision. 5 digits (False) or 3 digits (True)
after decimal points are compared. If int, then specify the number
of digits to compare.
When comparing two numbers, if the first number has magnitude less
than 1e-5, we compare the two numbers directly and check whether
they are equivalent within the specified precision. Otherwise, we
compare the **ratio** of the second number to the first number and
check whether it is equivalent to 1 within the specified precision.
.. deprecated:: 1.1.0
Use `rtol` and `atol` instead to define relative/absolute
tolerance, respectively. Similar to :func:`math.isclose`.
rtol : float, default 1e-5
Relative tolerance.
.. versionadded:: 1.1.0
atol : float, default 1e-8
Absolute tolerance.
.. versionadded:: 1.1.0
"""
if check_less_precise is not no_default:
warnings.warn(
"The 'check_less_precise' keyword in testing.assert_*_equal "
"is deprecated and will be removed in a future version. "
"You can stop passing 'check_less_precise' to silence this warning.",
FutureWarning,
stacklevel=2,
)
rtol = atol = _get_tol_from_less_precise(check_less_precise)
if isinstance(left, Index):
assert_index_equal(
left,
right,
check_exact=False,
exact=check_dtype,
rtol=rtol,
atol=atol,
**kwargs,
)
elif isinstance(left, Series):
assert_series_equal(
left,
right,
check_exact=False,
check_dtype=check_dtype,
rtol=rtol,
atol=atol,
**kwargs,
)
elif isinstance(left, DataFrame):
assert_frame_equal(
left,
right,
check_exact=False,
check_dtype=check_dtype,
rtol=rtol,
atol=atol,
**kwargs,
)
else:
# Other sequences.
if check_dtype:
if is_number(left) and is_number(right):
# Do not compare numeric classes, like np.float64 and float.
pass
elif is_bool(left) and is_bool(right):
# Do not compare bool classes, like np.bool_ and bool.
pass
else:
if isinstance(left, np.ndarray) or isinstance(right, np.ndarray):
obj = "numpy array"
else:
obj = "Input"
assert_class_equal(left, right, obj=obj)
_testing.assert_almost_equal(
left, right, check_dtype=check_dtype, rtol=rtol, atol=atol, **kwargs
)
def _get_tol_from_less_precise(check_less_precise: Union[bool, int]) -> float:
"""
Return the tolerance equivalent to the deprecated `check_less_precise`
parameter.
Parameters
----------
check_less_precise : bool or int
Returns
-------
float
Tolerance to be used as relative/absolute tolerance.
Examples
--------
>>> # Using check_less_precise as a bool:
>>> _get_tol_from_less_precise(False)
0.5e-5
>>> _get_tol_from_less_precise(True)
0.5e-3
>>> # Using check_less_precise as an int representing the decimal
>>> # tolerance intended:
>>> _get_tol_from_less_precise(2)
0.5e-2
>>> _get_tol_from_less_precise(8)
0.5e-8
"""
if isinstance(check_less_precise, bool):
if check_less_precise:
# 3-digit tolerance
return 0.5e-3
else:
# 5-digit tolerance
return 0.5e-5
else:
# Equivalent to setting checking_less_precise=<decimals>
return 0.5 * 10 ** -check_less_precise
def _check_isinstance(left, right, cls):
"""
Helper method for our assert_* methods that ensures that
the two objects being compared have the right type before
proceeding with the comparison.
Parameters
----------
left : The first object being compared.
right : The second object being compared.
cls : The class type to check against.
Raises
------
AssertionError : Either `left` or `right` is not an instance of `cls`.
"""
cls_name = cls.__name__
if not isinstance(left, cls):
raise AssertionError(
f"{cls_name} Expected type {cls}, found {type(left)} instead"
)
if not isinstance(right, cls):
raise AssertionError(
f"{cls_name} Expected type {cls}, found {type(right)} instead"
)
def assert_dict_equal(left, right, compare_keys: bool = True):
_check_isinstance(left, right, dict)
_testing.assert_dict_equal(left, right, compare_keys=compare_keys)
def assert_index_equal(
left: Index,
right: Index,
exact: Union[bool, str] = "equiv",
check_names: bool = True,
check_less_precise: Union[bool, int] = no_default,
check_exact: bool = True,
check_categorical: bool = True,
check_order: bool = True,
rtol: float = 1.0e-5,
atol: float = 1.0e-8,
obj: str = "Index",
) -> None:
"""
Check that left and right Index are equal.
Parameters
----------
left : Index
right : Index
exact : bool or {'equiv'}, default 'equiv'
Whether to check the Index class, dtype and inferred_type
are identical. If 'equiv', then RangeIndex can be substituted for
Int64Index as well.
check_names : bool, default True
Whether to check the names attribute.
check_less_precise : bool or int, default False
Specify comparison precision. Only used when check_exact is False.
5 digits (False) or 3 digits (True) after decimal points are compared.
If int, then specify the digits to compare.
.. deprecated:: 1.1.0
Use `rtol` and `atol` instead to define relative/absolute
tolerance, respectively. Similar to :func:`math.isclose`.
check_exact : bool, default True
Whether to compare number exactly.
check_categorical : bool, default True
Whether to compare internal Categorical exactly.
check_order : bool, default True
Whether to compare the order of index entries as well as their values.
If True, both indexes must contain the same elements, in the same order.
If False, both indexes must contain the same elements, but in any order.
.. versionadded:: 1.2.0
rtol : float, default 1e-5
Relative tolerance. Only used when check_exact is False.
.. versionadded:: 1.1.0
atol : float, default 1e-8
Absolute tolerance. Only used when check_exact is False.
.. versionadded:: 1.1.0
obj : str, default 'Index'
Specify object name being compared, internally used to show appropriate
assertion message.
Examples
--------
>>> from pandas.testing import assert_index_equal
>>> a = pd.Index([1, 2, 3])
>>> b = pd.Index([1, 2, 3])
>>> assert_index_equal(a, b)
"""
__tracebackhide__ = True
def _check_types(left, right, obj="Index"):
if exact:
assert_class_equal(left, right, exact=exact, obj=obj)
# Skip exact dtype checking when `check_categorical` is False
if check_categorical:
assert_attr_equal("dtype", left, right, obj=obj)
# allow string-like to have different inferred_types
if left.inferred_type in ("string"):
assert right.inferred_type in ("string")
else:
assert_attr_equal("inferred_type", left, right, obj=obj)
def _get_ilevel_values(index, level):
# accept level number only
unique = index.levels[level]
level_codes = index.codes[level]
filled = take_1d(unique._values, level_codes, fill_value=unique._na_value)
return unique._shallow_copy(filled, name=index.names[level])
if check_less_precise is not no_default:
warnings.warn(
"The 'check_less_precise' keyword in testing.assert_*_equal "
"is deprecated and will be removed in a future version. "
"You can stop passing 'check_less_precise' to silence this warning.",
FutureWarning,
stacklevel=2,
)
rtol = atol = _get_tol_from_less_precise(check_less_precise)
# instance validation
_check_isinstance(left, right, Index)
# class / dtype comparison
_check_types(left, right, obj=obj)
# level comparison
if left.nlevels != right.nlevels:
msg1 = f"{obj} levels are different"
msg2 = f"{left.nlevels}, {left}"
msg3 = f"{right.nlevels}, {right}"
raise_assert_detail(obj, msg1, msg2, msg3)
# length comparison
if len(left) != len(right):
msg1 = f"{obj} length are different"
msg2 = f"{len(left)}, {left}"
msg3 = f"{len(right)}, {right}"
raise_assert_detail(obj, msg1, msg2, msg3)
# If order doesn't matter then sort the index entries
if not check_order:
left = left.sort_values()
right = right.sort_values()
# MultiIndex special comparison for little-friendly error messages
if left.nlevels > 1:
left = cast(MultiIndex, left)
right = cast(MultiIndex, right)
for level in range(left.nlevels):
# cannot use get_level_values here because it can change dtype
llevel = _get_ilevel_values(left, level)
rlevel = _get_ilevel_values(right, level)
lobj = f"MultiIndex level [{level}]"
assert_index_equal(
llevel,
rlevel,
exact=exact,
check_names=check_names,
check_exact=check_exact,
rtol=rtol,
atol=atol,
obj=lobj,
)
# get_level_values may change dtype
_check_types(left.levels[level], right.levels[level], obj=obj)
# skip exact index checking when `check_categorical` is False
if check_exact and check_categorical:
if not left.equals(right):
diff = (
np.sum((left._values != right._values).astype(int)) * 100.0 / len(left)
)
msg = f"{obj} values are different ({np.round(diff, 5)} %)"
raise_assert_detail(obj, msg, left, right)
else:
_testing.assert_almost_equal(
left.values,
right.values,
rtol=rtol,
atol=atol,
check_dtype=exact,
obj=obj,
lobj=left,
robj=right,
)
# metadata comparison
if check_names:
assert_attr_equal("names", left, right, obj=obj)
if isinstance(left, PeriodIndex) or isinstance(right, PeriodIndex):
assert_attr_equal("freq", left, right, obj=obj)
if isinstance(left, IntervalIndex) or isinstance(right, IntervalIndex):
assert_interval_array_equal(left._values, right._values)
if check_categorical:
if is_categorical_dtype(left.dtype) or is_categorical_dtype(right.dtype):
assert_categorical_equal(left._values, right._values, obj=f"{obj} category")
def assert_class_equal(left, right, exact: Union[bool, str] = True, obj="Input"):
"""
Checks classes are equal.
"""
__tracebackhide__ = True
def repr_class(x):
if isinstance(x, Index):
# return Index as it is to include values in the error message
return x
return type(x).__name__
if exact == "equiv":
if type(left) != type(right):
# allow equivalence of Int64Index/RangeIndex
types = {type(left).__name__, type(right).__name__}
if len(types - {"Int64Index", "RangeIndex"}):
msg = f"{obj} classes are not equivalent"
raise_assert_detail(obj, msg, repr_class(left), repr_class(right))
elif exact:
if type(left) != type(right):
msg = f"{obj} classes are different"
raise_assert_detail(obj, msg, repr_class(left), repr_class(right))
def assert_attr_equal(attr: str, left, right, obj: str = "Attributes"):
"""
Check attributes are equal. Both objects must have attribute.
Parameters
----------
attr : str
Attribute name being compared.
left : object
right : object
obj : str, default 'Attributes'
Specify object name being compared, internally used to show appropriate
assertion message
"""
__tracebackhide__ = True
left_attr = getattr(left, attr)
right_attr = getattr(right, attr)
if left_attr is right_attr:
return True
elif (
is_number(left_attr)
and np.isnan(left_attr)
and is_number(right_attr)
and np.isnan(right_attr)
):
# np.nan
return True
try:
result = left_attr == right_attr
except TypeError:
# datetimetz on rhs may raise TypeError
result = False
if not isinstance(result, bool):
result = result.all()
if result:
return True
else:
msg = f'Attribute "{attr}" are different'
raise_assert_detail(obj, msg, left_attr, right_attr)
def assert_is_valid_plot_return_object(objs):
import matplotlib.pyplot as plt
if isinstance(objs, (Series, np.ndarray)):
for el in objs.ravel():
msg = (
"one of 'objs' is not a matplotlib Axes instance, "
f"type encountered {repr(type(el).__name__)}"
)
assert isinstance(el, (plt.Axes, dict)), msg
else:
msg = (
"objs is neither an ndarray of Artist instances nor a single "
"ArtistArtist instance, tuple, or dict, 'objs' is a "
f"{repr(type(objs).__name__)}"
)
assert isinstance(objs, (plt.Artist, tuple, dict)), msg
def assert_is_sorted(seq):
"""Assert that the sequence is sorted."""
if isinstance(seq, (Index, Series)):
seq = seq.values
# sorting does not change precisions
assert_numpy_array_equal(seq, np.sort(np.array(seq)))
def assert_categorical_equal(
left, right, check_dtype=True, check_category_order=True, obj="Categorical"
):
"""
Test that Categoricals are equivalent.
Parameters
----------
left : Categorical
right : Categorical
check_dtype : bool, default True
Check that integer dtype of the codes are the same
check_category_order : bool, default True
Whether the order of the categories should be compared, which
implies identical integer codes. If False, only the resulting
values are compared. The ordered attribute is
checked regardless.
obj : str, default 'Categorical'
Specify object name being compared, internally used to show appropriate
assertion message
"""
_check_isinstance(left, right, Categorical)
if check_category_order:
assert_index_equal(left.categories, right.categories, obj=f"{obj}.categories")
assert_numpy_array_equal(
left.codes, right.codes, check_dtype=check_dtype, obj=f"{obj}.codes"
)
else:
try:
lc = left.categories.sort_values()
rc = right.categories.sort_values()
except TypeError:
# e.g. '<' not supported between instances of 'int' and 'str'
lc, rc = left.categories, right.categories
assert_index_equal(lc, rc, obj=f"{obj}.categories")
assert_index_equal(
left.categories.take(left.codes),
right.categories.take(right.codes),
obj=f"{obj}.values",
)
assert_attr_equal("ordered", left, right, obj=obj)
def assert_interval_array_equal(left, right, exact="equiv", obj="IntervalArray"):
"""
Test that two IntervalArrays are equivalent.
Parameters
----------
left, right : IntervalArray
The IntervalArrays to compare.
exact : bool or {'equiv'}, default 'equiv'
Whether to check the Index class, dtype and inferred_type
are identical. If 'equiv', then RangeIndex can be substituted for
Int64Index as well.
obj : str, default 'IntervalArray'
Specify object name being compared, internally used to show appropriate
assertion message
"""
_check_isinstance(left, right, IntervalArray)
kwargs = {}
if left._left.dtype.kind in ["m", "M"]:
# We have a DatetimeArray or TimedeltaArray
kwargs["check_freq"] = False
assert_equal(left._left, right._left, obj=f"{obj}.left", **kwargs)
assert_equal(left._right, right._right, obj=f"{obj}.left", **kwargs)
assert_attr_equal("closed", left, right, obj=obj)
def assert_period_array_equal(left, right, obj="PeriodArray"):
_check_isinstance(left, right, PeriodArray)
assert_numpy_array_equal(left._data, right._data, obj=f"{obj}._data")
assert_attr_equal("freq", left, right, obj=obj)
def assert_datetime_array_equal(left, right, obj="DatetimeArray", check_freq=True):
__tracebackhide__ = True
_check_isinstance(left, right, DatetimeArray)
assert_numpy_array_equal(left._data, right._data, obj=f"{obj}._data")
if check_freq:
assert_attr_equal("freq", left, right, obj=obj)
assert_attr_equal("tz", left, right, obj=obj)
def assert_timedelta_array_equal(left, right, obj="TimedeltaArray", check_freq=True):
__tracebackhide__ = True
_check_isinstance(left, right, TimedeltaArray)
assert_numpy_array_equal(left._data, right._data, obj=f"{obj}._data")
if check_freq:
assert_attr_equal("freq", left, right, obj=obj)
def raise_assert_detail(obj, message, left, right, diff=None, index_values=None):
__tracebackhide__ = True
msg = f"""{obj} are different
{message}"""
if isinstance(index_values, np.ndarray):
msg += f"\n[index]: {pprint_thing(index_values)}"
if isinstance(left, np.ndarray):
left = pprint_thing(left)
elif is_categorical_dtype(left):
left = repr(left)
if isinstance(right, np.ndarray):
right = pprint_thing(right)
elif is_categorical_dtype(right):
right = repr(right)
msg += f"""
[left]: {left}
[right]: {right}"""
if diff is not None:
msg += f"\n[diff]: {diff}"
raise AssertionError(msg)
def assert_numpy_array_equal(
left,
right,
strict_nan=False,
check_dtype=True,
err_msg=None,
check_same=None,
obj="numpy array",
index_values=None,
):
"""
Check that 'np.ndarray' is equivalent.
Parameters
----------
left, right : numpy.ndarray or iterable
The two arrays to be compared.
strict_nan : bool, default False
If True, consider NaN and None to be different.
check_dtype : bool, default True
Check dtype if both a and b are np.ndarray.
err_msg : str, default None
If provided, used as assertion message.
check_same : None|'copy'|'same', default None
Ensure left and right refer/do not refer to the same memory area.
obj : str, default 'numpy array'
Specify object name being compared, internally used to show appropriate
assertion message.
index_values : numpy.ndarray, default None
optional index (shared by both left and right), used in output.
"""
__tracebackhide__ = True
# instance validation
# Show a detailed error message when classes are different
assert_class_equal(left, right, obj=obj)
# both classes must be an np.ndarray
_check_isinstance(left, right, np.ndarray)
def _get_base(obj):
return obj.base if getattr(obj, "base", None) is not None else obj
left_base = _get_base(left)
right_base = _get_base(right)
if check_same == "same":
if left_base is not right_base:
raise AssertionError(f"{repr(left_base)} is not {repr(right_base)}")
elif check_same == "copy":
if left_base is right_base:
raise AssertionError(f"{repr(left_base)} is {repr(right_base)}")
def _raise(left, right, err_msg):
if err_msg is None:
if left.shape != right.shape:
raise_assert_detail(
obj, f"{obj} shapes are different", left.shape, right.shape
)
diff = 0
for left_arr, right_arr in zip(left, right):
# count up differences
if not array_equivalent(left_arr, right_arr, strict_nan=strict_nan):
diff += 1
diff = diff * 100.0 / left.size
msg = f"{obj} values are different ({np.round(diff, 5)} %)"
raise_assert_detail(obj, msg, left, right, index_values=index_values)
raise AssertionError(err_msg)
# compare shape and values
if not array_equivalent(left, right, strict_nan=strict_nan):
_raise(left, right, err_msg)
if check_dtype:
if isinstance(left, np.ndarray) and isinstance(right, np.ndarray):
assert_attr_equal("dtype", left, right, obj=obj)
def assert_extension_array_equal(
left,
right,
check_dtype=True,
index_values=None,
check_less_precise=no_default,
check_exact=False,
rtol: float = 1.0e-5,
atol: float = 1.0e-8,
):
"""
Check that left and right ExtensionArrays are equal.
Parameters
----------
left, right : ExtensionArray
The two arrays to compare.
check_dtype : bool, default True
Whether to check if the ExtensionArray dtypes are identical.
index_values : numpy.ndarray, default None
Optional index (shared by both left and right), used in output.
check_less_precise : bool or int, default False
Specify comparison precision. Only used when check_exact is False.
5 digits (False) or 3 digits (True) after decimal points are compared.
If int, then specify the digits to compare.
.. deprecated:: 1.1.0
Use `rtol` and `atol` instead to define relative/absolute
tolerance, respectively. Similar to :func:`math.isclose`.
check_exact : bool, default False
Whether to compare number exactly.
rtol : float, default 1e-5
Relative tolerance. Only used when check_exact is False.
.. versionadded:: 1.1.0
atol : float, default 1e-8
Absolute tolerance. Only used when check_exact is False.
.. versionadded:: 1.1.0
Notes
-----
Missing values are checked separately from valid values.
A mask of missing values is computed for each and checked to match.
The remaining all-valid values are cast to object dtype and checked.
Examples
--------
>>> from pandas.testing import assert_extension_array_equal
>>> a = pd.Series([1, 2, 3, 4])
>>> b, c = a.array, a.array
>>> assert_extension_array_equal(b, c)
"""
if check_less_precise is not no_default:
warnings.warn(
"The 'check_less_precise' keyword in testing.assert_*_equal "
"is deprecated and will be removed in a future version. "
"You can stop passing 'check_less_precise' to silence this warning.",
FutureWarning,
stacklevel=2,
)
rtol = atol = _get_tol_from_less_precise(check_less_precise)
assert isinstance(left, ExtensionArray), "left is not an ExtensionArray"
assert isinstance(right, ExtensionArray), "right is not an ExtensionArray"
if check_dtype:
assert_attr_equal("dtype", left, right, obj="ExtensionArray")
if (
isinstance(left, DatetimeLikeArrayMixin)
and isinstance(right, DatetimeLikeArrayMixin)
and type(right) == type(left)
):
# Avoid slow object-dtype comparisons
# np.asarray for case where we have a np.MaskedArray
assert_numpy_array_equal(
np.asarray(left.asi8), np.asarray(right.asi8), index_values=index_values
)
return
left_na = np.asarray(left.isna())
right_na = np.asarray(right.isna())
assert_numpy_array_equal(
left_na, right_na, obj="ExtensionArray NA mask", index_values=index_values
)
left_valid = np.asarray(left[~left_na].astype(object))
right_valid = np.asarray(right[~right_na].astype(object))
if check_exact:
assert_numpy_array_equal(
left_valid, right_valid, obj="ExtensionArray", index_values=index_values
)
else:
_testing.assert_almost_equal(
left_valid,
right_valid,
check_dtype=check_dtype,
rtol=rtol,
atol=atol,
obj="ExtensionArray",
index_values=index_values,
)
# This could be refactored to use the NDFrame.equals method
def assert_series_equal(
left,
right,
check_dtype=True,
check_index_type="equiv",
check_series_type=True,
check_less_precise=no_default,
check_names=True,
check_exact=False,
check_datetimelike_compat=False,
check_categorical=True,
check_category_order=True,
check_freq=True,
check_flags=True,
rtol=1.0e-5,
atol=1.0e-8,
obj="Series",
*,
check_index=True,
):
"""
Check that left and right Series are equal.
Parameters
----------
left : Series
right : Series
check_dtype : bool, default True
Whether to check the Series dtype is identical.
check_index_type : bool or {'equiv'}, default 'equiv'
Whether to check the Index class, dtype and inferred_type
are identical.
check_series_type : bool, default True
Whether to check the Series class is identical.
check_less_precise : bool or int, default False
Specify comparison precision. Only used when check_exact is False.
5 digits (False) or 3 digits (True) after decimal points are compared.
If int, then specify the digits to compare.
When comparing two numbers, if the first number has magnitude less
than 1e-5, we compare the two numbers directly and check whether
they are equivalent within the specified precision. Otherwise, we
compare the **ratio** of the second number to the first number and
check whether it is equivalent to 1 within the specified precision.
.. deprecated:: 1.1.0
Use `rtol` and `atol` instead to define relative/absolute
tolerance, respectively. Similar to :func:`math.isclose`.
check_names : bool, default True
Whether to check the Series and Index names attribute.
check_exact : bool, default False
Whether to compare number exactly.
check_datetimelike_compat : bool, default False
Compare datetime-like which is comparable ignoring dtype.
check_categorical : bool, default True
Whether to compare internal Categorical exactly.
check_category_order : bool, default True
Whether to compare category order of internal Categoricals.
.. versionadded:: 1.0.2
check_freq : bool, default True
Whether to check the `freq` attribute on a DatetimeIndex or TimedeltaIndex.
check_flags : bool, default True
Whether to check the `flags` attribute.
.. versionadded:: 1.2.0
rtol : float, default 1e-5
Relative tolerance. Only used when check_exact is False.
.. versionadded:: 1.1.0
atol : float, default 1e-8
Absolute tolerance. Only used when check_exact is False.
.. versionadded:: 1.1.0
obj : str, default 'Series'
Specify object name being compared, internally used to show appropriate
assertion message.
check_index : bool, default True
Whether to check index equivalence. If False, then compare only values.
.. versionadded:: 1.3.0
Examples
--------
>>> from pandas.testing import assert_series_equal
>>> a = pd.Series([1, 2, 3, 4])
>>> b = pd.Series([1, 2, 3, 4])
>>> assert_series_equal(a, b)
"""
__tracebackhide__ = True
if check_less_precise is not no_default:
warnings.warn(
"The 'check_less_precise' keyword in testing.assert_*_equal "
"is deprecated and will be removed in a future version. "
"You can stop passing 'check_less_precise' to silence this warning.",
FutureWarning,
stacklevel=2,
)
rtol = atol = _get_tol_from_less_precise(check_less_precise)
# instance validation
_check_isinstance(left, right, Series)
if check_series_type:
assert_class_equal(left, right, obj=obj)
# length comparison
if len(left) != len(right):
msg1 = f"{len(left)}, {left.index}"
msg2 = f"{len(right)}, {right.index}"
raise_assert_detail(obj, "Series length are different", msg1, msg2)
if check_flags:
assert left.flags == right.flags, f"{repr(left.flags)} != {repr(right.flags)}"
if check_index:
# GH #38183
assert_index_equal(
left.index,
right.index,
exact=check_index_type,
check_names=check_names,
check_exact=check_exact,
check_categorical=check_categorical,
rtol=rtol,
atol=atol,
obj=f"{obj}.index",
)
if check_freq and isinstance(left.index, (DatetimeIndex, TimedeltaIndex)):
lidx = left.index
ridx = right.index
assert lidx.freq == ridx.freq, (lidx.freq, ridx.freq)
if check_dtype:
# We want to skip exact dtype checking when `check_categorical`
# is False. We'll still raise if only one is a `Categorical`,
# regardless of `check_categorical`
if (
is_categorical_dtype(left.dtype)
and is_categorical_dtype(right.dtype)
and not check_categorical
):
pass
else:
assert_attr_equal("dtype", left, right, obj=f"Attributes of {obj}")
if check_exact and is_numeric_dtype(left.dtype) and is_numeric_dtype(right.dtype):
# Only check exact if dtype is numeric
assert_numpy_array_equal(
left._values,
right._values,
check_dtype=check_dtype,
obj=str(obj),
index_values=np.asarray(left.index),
)
elif check_datetimelike_compat and (
needs_i8_conversion(left.dtype) or needs_i8_conversion(right.dtype)
):
# we want to check only if we have compat dtypes
# e.g. integer and M|m are NOT compat, but we can simply check
# the values in that case
# datetimelike may have different objects (e.g. datetime.datetime
# vs Timestamp) but will compare equal
if not | Index(left._values) | pandas.Index |
import numpy as np
import os, sys, argparse
from Bio.PDB import PDBParser
import pandas as pd
import itertools
from itertools import permutations
from math import sqrt
from time import sleep
__location__ = os.path.realpath(os.path.join(os.getcwd(), os.path.dirname(__file__)))
sys.path.append(f'{__location__}/..')
from helpers import get_neighbors, inNd, parse_input_dir, parse_output_dir, aa_dict_31, generate_pdb, get_cm, rotmats, get_pairs_mat
from LatticeModelComparison import LatticeModelComparison
# --- Relevant lattice distances ---
ca_dist = 3.8 / sqrt(3)
cacb_dist = 1.53 / sqrt(3)
n1_dist = 1.48 / sqrt(3)
cacb_dist_unit = cacb_dist / ca_dist
n1_dist_unit = n1_dist / ca_dist
# --- Helix structure modifiers ---
bcc_neighbor_mods = m1 = np.array(list(set(permutations((1, 1, 1, -1, -1, -1), 3))))
m2_mods = np.array([[1,1,-1], [-1, 1, 1], [1,-1,1]])
m3p_mods = m2_mods * np.array([[-1,1,1], [1, -1, 1], [1,1,-1]])
m3n_mods = m2_mods * np.array([[1,-1,1], [1, 1, -1], [-1, 1, 1]])
m4p_mods = m3p_mods * m2_mods
m4n_mods = m3n_mods * m2_mods
helix_mods = list(itertools.chain.from_iterable([[
np.vstack([mm1, mm1 * mm2, mm1 * mm3p, mm1 * mm4p]) if np.product(mm1) > 0
else np.vstack([mm1, mm1 * mm2, mm1 * mm3n, mm1 * mm4n])
for mm2, mm3p, mm3n, mm4p, mm4n in zip(m2_mods, m3p_mods, m3n_mods, m4p_mods, m4n_mods)] for mm1 in m1]))
helix_type_mod_dict = {
1: helix_mods,
5: helix_mods
}
# --- backbone and non-backbone atom names ---
atm_names_bb = ['N', 'H', 'CA', 'HA', 'C', 'O']
atm_names_res = np.array(['B', 'G', 'D', 'E', 'F'])
# --- functions ---
def parse_sheet_info(sheet_txt_list):
"""
Parse list of strings of PDB SHEET records to pandas data frame
"""
sheet_df = pd.DataFrame(
columns=['resi_start', 'resi_end', 'orientation', 'resi_hb_cur', 'resi_hb_prev'],
index=pd.MultiIndex(levels=[[], []], codes=[[], []], names=['strand_id', 'sheet_id']))
for line in sheet_txt_list:
ll = line.split()
if ll[1] == '1':
sheet_df.loc[(int(ll[1]),ll[2]), :] = [int(line[22:26]), int(line[33:37]), int(line[38:40]),
pd.NA, pd.NA]
else:
sheet_df.loc[(int(ll[1]),ll[2]), :] = [int(line[22:26]), int(line[33:37]), int(line[38:40]),
int(line[50:54]), int(line[65:69])]
return sheet_df
def parse_helix_info(helix_txt_list):
"""
Parse list of strings of PDB HELIX records to a pandas data frame
"""
helix_df = pd.DataFrame(
columns=['resi_start', 'resi_end', 'type', 'length']
)
for line in helix_txt_list:
resi_start, resi_end = int(line[21:25]), int(line[33:37])
if resi_end - resi_start < 3: continue
helix_df.loc[int(line[7:10]), :] = [int(line[21:25]), int(line[33:37]), int(line[39:40]), int(line[72:76])]
return helix_df
def parse_ssbond_info(ssbond_txt_list, resi2idx_dict):
ssbond_df = pd.DataFrame({'resi1': [resi2idx_dict[int(line[17:21])] for line in ssbond_txt_list],
'resi2': [resi2idx_dict[int(line[31:35])] for line in ssbond_txt_list]})
ssbond_df.loc[:, 'max_resi'] = ssbond_df.max(axis=1)
ssbond_df.loc[:, 'min_resi'] = ssbond_df.min(axis=1)
ssbond_df.set_index('max_resi', inplace=True)
return ssbond_df
def put_sheet_on_lattice(sheet_df):
# todo output hbond indicator
# strand_dict = {}
idx_list = []
coord_list = []
for idx, tup in sheet_df.iterrows():
len_strand = tup.resi_end - tup.resi_start + 1
strand_array = np.zeros((len_strand, 3), dtype=int)
strand_array[:, 1] = np.arange(len_strand)
if idx[0] != 1:
# Translate/rotate to correct position
strand_array[:, 1] = strand_array[:, 1] * tup.orientation
strand_array[:, 0] = idx[0]
hb_idx = np.argwhere(tup.resi_hb_cur == np.arange(tup.resi_start, tup.resi_end + 1))[0, 0]
hb_idx_prev = np.argwhere(tup.resi_hb_prev == prev_resi_range)[0, 0]
strand_array[:, 1] = strand_array[:, 1] + (hb_idx_prev - hb_idx)
prev_resi_range = np.arange(tup.resi_start, tup.resi_end + 1)
idx_list.extend(list(range(tup.resi_start, tup.resi_end + 1)))
coord_list.append(strand_array)
coord_array = np.vstack(coord_list)
return idx_list, coord_array
def put_helix_on_lattice(tup):
tup.resi_start = tup.resi_start + 1
tup.resi_end = tup.resi_end - 1
tup.length = tup.length - 2
# generate all possible conformations for type of helix
mods = helix_type_mod_dict[tup.type]
nb_steps = mods[0].shape[0]
coord_list = []
for mod in mods:
coords = np.zeros((tup.length, 3))
for i in range(tup.length - 1):
coords[i + 1, :3] = coords[i, :3] + mod[i % nb_steps, :]
coord_list.append(coords)
return np.arange(tup.resi_start, tup.resi_end + 1), coord_list
def pick_best_pose(poses, first_lat_coord, ss_real_coords):
best_pose = (None, np.inf)
for pose in poses:
pose = pose.copy()
pose[:, :3] = pose[:, :3] + first_lat_coord[:3]
sum_norm_diff = np.sum(np.linalg.norm(pose[:, :3] - ss_real_coords[:, :3], axis=1))
if sum_norm_diff < best_pose[1]:
best_pose = (pose, sum_norm_diff)
return best_pose[0]
def get_all_neighbors(c):
neighbors_out = bcc_neighbor_mods.copy()
neighbors_out[:, :3] += c[:3]
return neighbors_out
def parse_ss3_file(ss_fn):
ss_df = | pd.read_csv(ss_fn, skiprows=2, names=['idx', 'resn', 'ss', 'H', 'S', 'L'], sep='\s+') | pandas.read_csv |
import numpy as np
np.random.seed(875431)
import pandas as pd
import os
import astron_common_functions as astronfuns
from matplotlib import pyplot as plt
from matplotlib import transforms
from mpl_toolkits.mplot3d import axes3d
import matplotlib.font_manager as font_manager
# plt.ion()
font_path = '/home/anup/.matplotlib/fonts/arial.ttf'
fontprop = font_manager.FontProperties(fname=font_path)
import h5py
# --------------------------------
def stackplot_with_features(ah,t,y,peaks,rts,fwhms,taus,ipeaks,irts,ifwhms,itaus,hgap,tshift):
ah.plot(t+tshift,y+hgap,color="black",linewidth=0.5,marker="",alpha=1)
# plot tau region
if (len(ipeaks) > 0):
i20l = irts[0]
i80l = irts[1]
i50l = ifwhms[0]
i50r = ifwhms[1]
# }
for i in range(0,len(ipeaks)):
# plot peaks
ah.plot(t[ipeaks[i]]+tshift,y[ipeaks[i]]+hgap,marker="o",markersize=0.3,linestyle="",color="red")
# plot 20-80 risetime
# ah.plot([t[i20l[i]]+tshift,t[i80l[i]]+tshift],[y[i20l[i]]+hgap,y[i80l[i]]+hgap],color="green",linewidth=0.8)
# plot fwhms
# ah.plot([t[i50l[i]]+tshift,t[i50r[i]]+tshift],[y[i50l[i]]+hgap,y[i50r[i]]+hgap],color="orange",linewidth=0.8)
# plot taus
# ah.plot(t[itaus[i]]+tshift,y[itaus[i]]+hgap,marker="o",markersize=0.1,linestyle="",color="blue")
# ah.plot(t[ipeaks[i]:itaus[i]]+tshift,y[ipeaks[i]:itaus[i]]+hgap,color="blue",linewidth=0.8,marker="")
# }
# ah.spines["right"].set_visible(False)
# ah.spines["top"].set_visible(False)
# ah.spines["left"].set_visible(False)
# ah.spines["bottom"].set_visible(False)
# ah.set_xticks([])
# ah.set_xticklabels([],fontsize=8,font=fontprop)
# ah.set_yticks([])
# ah.set_yticklabels([],fontsize=8,font=fontprop)
return(ah)
# ------------------------------
def plot3d_with_features(ah,t,y,itrial,peaks,rts,fwhms,taus,ipeaks,irts,ifwhms,itaus):
z = np.zeros(t.shape)+itrial
ah.plot3D(t,y,itrial,zdir='x',color="grey",linewidth=0.5,marker="")
if (len(ipeaks) > 0):
i20l = irts[0]
i80l = irts[1]
i50l = ifwhms[0]
i50r = ifwhms[1]
for i in range(0,len(ipeaks)):
# plot peaks
ah.plot3D(t[ipeaks[i]],y[ipeaks[i]],itrial,zdir='x',marker="o",markersize=0.1,linestyle="",color="red")
# plot 20-80 risetime
ah.plot3D([t[i20l[i]],t[i80l[i]]],[y[i20l[i]],y[i80l[i]]],[itrial,itrial],zdir='x',color="black",linewidth=0.2)
# plot fwhms
ah.plot3D([t[i50l[i]],t[i50r[i]]],[y[i50l[i]],y[i50r[i]]],[itrial,itrial],zdir='x',color="black",linewidth=0.2)
# plot taus
ah.plot3D(t[itaus[i]],y[itaus[i]],itrial,zdir='x',marker="o",markersize=0.1,linestyle="",color="blue")
# }
# -------------------------------------
# diskname = "/home/anup/data/"
# dir1 = "ap1to1000dhz30scarel/run/"
disk = "/run/media/anup/3becd611-cb79-4b80-b941-2edcc0d64cb4/"
folder1 = "data/"
# folder2 = "dhpg100000nM2s/run/"
folder2 = "badhpgcarel/run/"
# groups = ["ctrl","admglur","adpmca","admglurpmca"]
groups = ["ctrl"]
ngroups = len(groups)
# allfreqs = [1,2,3,4,5,6,7,8,9,10,20,30,40,50,60,70,80,90,100,200,300,400,500,600,700,800,900,1000]
# freqs = [4,5,6,7,8,9,10,20,30,40,50,60,70,80,90,100,200,300,400,500,600,700,800,900,1000]
# freqs = [100]
# _,ifreqs,_ = np.intersect1d(allfreqs,freqs,return_indices=True)
# nfreqs = len(freqs)
# ------------------
dhpgs = [0,10,20,30,40,50,60,70,80,90,100,200,300,400,500,600,700,800,900,1000,2000,3000,4000,5000,6000,7000,8000,9000,10000,20000,30000,40000,50000,60000,70000,80000,90000,100000] # dhpg
ndhpgs = len(dhpgs)
dhpg = 6000
# ----------------
stimdurs = [2,10,30,60,90,120]
nstimdurs = len(stimdurs)
stimdur = 120
# ---------------
# print(nfreqs,freqs,ifreqs)
# input()
trial0 = 1
ntrials = 30
trials = np.sort(np.random.choice(range(trial0,ntrials+trial0),ntrials,replace=False))
# varnames=['time','n0ca_cyt','n0syt45_reltime_glu','n0syt45_vkdoc_glu','n0syt45_vfmob_glu']
timecol = "time"
cacol="n0ca_cyt"
varnames = [timecol,cacol]
thres = 300e-3 # 300 nM"
delta = 1*thres # 300 nM"
# hgap = -0.07
# hgaptrial = 5
# tshift = -0.17
# tshifttrial = 10
# tstimstart = 200
# tstimstop = 230
# hgap = 0.65
# hgaptrial0 = -0.3
# tshift = -0.4
# tshifttrial0 = 3
# tstimstart = 200
# tstimstop = 202
# fh1,(ah1) = plt.subplots(figsize=(4,4),dpi=600,frameon=False,ncols=1,nrows=1,gridspec_kw={"width_ratios":[1],"height_ratios":[1]},subplot_kw=dict(projection='3d'))
fh1,(ah1) = plt.subplots(figsize=(4,3),dpi=600,frameon=False,ncols=1,nrows=1)
# ahs_raster = [ah1[0],ah1[1],ah1[2],ah1[3]]
ahs_raster = [ah1]
# fh1.subplots_adjust(hspace=0,wspace=0)
# -------------------------
# for igroup in range(0,len(groups)):
# for ifreq in range(0,len(freqs)):
# fprefix = "".join(("astrocyte_ap30s",str(freqs[ifreq]),"dHz"))
# # fprefix = "".join(("astrocyte_ap",str(freqs[ifreq]),"dHz30s"))
# for itrial,ifile in zip(range(0,ntrials),trials):
# findex = ifile
# fname = "".join((fprefix,"_",groups[igroup],str(findex),".csv"))
# fullname = os.path.join(diskname,dir1,groups[igroup],fname)
# print(findex," ",fullname)
# df = pd.read_csv(fullname,header=0,usecols=varnames)
# t = df[timecol] - tstimstart
# y = df[cacol] * 1e6
# timesca = astronfuns.detect_peaks_above_threshold(t,y,thres,delta,eventval=0)
# # fh = plt.figure(figsize=(3,3),frameon=False)
# # ah = fh.add_subplot(111)
# # ah.plot(df[timecol],df[cacol])
# # plt.show()
# peaks,rts,fwhms,taus,ipeaks,irts,ifwhms,itaus = astronfuns.compute_event_features(df,thres,delta)
# hgaptrial = hgaptrial + hgap
# tshifttrial = tshifttrial + tshift
# ahs_raster[igroup] = stackplot_with_features(ahs_raster[igroup],t,y,peaks,rts,fwhms,taus,ipeaks,irts,ifwhms,itaus,hgaptrial,tshifttrial)
# # _ = plot3d_with_features(ahs_raster[igroup],t,y,itrial,peaks,rts,fwhms,taus,ipeaks,irts,ifwhms,itaus)
# # (ah,t,y,hgaptrial,tshifttrial)
# # ah.plot(t,y+itrial)
# # }
# # }
# # }
# ------------------------
# for igroup in range(0,len(groups)):
# fprefix = "astrocyte_dhpg100000nM2s"
# hgaptrial = hgaptrial0
# tshifttrial = tshifttrial0
# for itrial,ifile in zip(range(0,ntrials),trials):
# findex = ifile
# fname = "".join((fprefix,"_",groups[igroup],str(findex),".csv"))
# fullname = os.path.join(disk,folder1,folder2,groups[igroup],fname)
# print(findex," ",fullname)
# df = pd.read_csv(fullname,header=0,usecols=varnames)
# t = df[timecol][(df[timecol] > (tstimstart-1)) & (df[timecol] < (tstimstop + 1))].to_numpy() -tstimstart
# y = df[cacol][(df[timecol] > (tstimstart-1)) & (df[timecol] < (tstimstop + 1))].to_numpy() * 1e6
# # t = df[timecol] - tstimstart
# # y = df[cacol] * 1e6
# dftemp = pd.DataFrame({"time":t,cacol:y})
# timesca = astronfuns.detect_peaks_above_threshold(t,y,thres,delta,eventval=0)
# # fh = plt.figure(figsize=(3,3),frameon=False)
# # ah = fh.add_subplot(111)
# # ah.plot(df[timecol],df[cacol])
# # plt.show()
# peaks,rts,fwhms,taus,ipeaks,irts,ifwhms,itaus = astronfuns.compute_event_features(dftemp,thres,delta)
# hgaptrial = hgaptrial + hgap
# tshifttrial = tshifttrial + tshift
# ahs_raster[igroup] = stackplot_with_features(ahs_raster[igroup],t,y,peaks,rts,fwhms,taus,ipeaks,irts,ifwhms,itaus,hgaptrial,tshifttrial)
# # _ = plot3d_with_features(ahs_raster[igroup],t,y,itrial,peaks,rts,fwhms,taus,ipeaks,irts,ifwhms,itaus)
# # (ah,t,y,hgaptrial,tshifttrial)
# # ah.plot(t,y+itrial)
# # }
# # }
# ---------------------------
hgap = 0.65
hgaptrial0 = -0.7
tshift = -0.55
tshifttrial0 = 7
tstimstart = 200
tstimstop = 320
# ------------------
for igroup in range(0,len(groups)):
fprefix = "".join(("astrocyte_",str(stimdur),"s",str(dhpg),'nM'))
hgaptrial = hgaptrial0
tshifttrial = tshifttrial0
for itrial,ifile in zip(range(0,ntrials),trials):
findex = ifile
fname = "".join((fprefix,"_",groups[igroup],str(findex),".csv"))
fullname = os.path.join(disk,folder1,folder2,groups[igroup],fname)
print(findex," ",fullname)
df = | pd.read_csv(fullname,header=0,usecols=varnames) | pandas.read_csv |
# MIT License
#
# Copyright (c) 2020 <NAME>
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in all
# copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
from ..Api import AccountEndpoint, RequestType
from .utils import _dot_flatten
class Balances(AccountEndpoint):
_type = RequestType.Info
_resource = "accounts/{0}/balances.json"
def extract(self, response):
"""Extract certain fields from response"""
response = response.json()["response"]
balances = response["accountbalance"]
d = {k: v for k, v in _dot_flatten(balances).items()}
return d
@staticmethod
def DataFrame(raw):
import pandas as pd
# Wrap these in lists so that they can be read by pandas
raw = {k: [v] for k, v in raw.items()}
return | pd.DataFrame.from_dict(raw) | pandas.DataFrame.from_dict |
'''
1) Total sales data graph
2) Time interval for delivery
3) Individual users data analysis
4) Category wise sells data graph
'''
import pandas as pd
import matplotlib.pyplot as plt
import datetime
import numpy as np
import io
from os.path import dirname, join
def sells_graph(dataFrame):
counter = {}
dataList = list(dataFrame.iloc[1:, 6])
for i in dataList:
if i in counter.keys():
counter[i] += 1
else:
counter[i] = 1
list2 =[]
for i in counter.keys():
if counter[i] < 30:
list2.append(i)
for i in list2:
del counter[i]
categoryFrequency = list(counter.values())
categoryName = list(counter.keys())
plt.pie(categoryFrequency,labels=categoryName, autopct='%2.1f%%')
plt.show()
def deliveryTime(dataFrame):
differenceInDay = []
orderDay = []
dispatchedDay = []
orderDate = list(dataFrame.iloc[1:, 0])
orderDate = | pd.to_datetime(orderDate) | pandas.to_datetime |
import requests
import pandas as pd
import json
import tabulate
# https request to the opensea api
url = "https://api.opensea.io/api/v1/collections"
# the query of information, owner is their ETH address
querystring = {"owner":"0x48114b9211acdd83a4a5f9dced3b48c5032bfdd5","offset":"0","limit":"300"}
#response and the text data along with it
response = requests.request("GET", url, params=querystring)
# the text data
data = response.text
#data to json
json = json.loads(data)
#data to df
dataframe = pd.DataFrame.from_dict(json['collections'])
traits = dataframe['traits'].to_dict()
# extracting the individual features
Handshoes = pd.DataFrame(traits[0]['Handshoes'],index=[0])
Pants = pd.DataFrame(traits[0]['Pants'],index=[0])
Species = pd.DataFrame(traits[0]['Species'],index=[0])
Shoes = | pd.DataFrame(traits[0]['Shoes'],index=[0]) | pandas.DataFrame |
import pandas as pd
url = "https://opendata.arcgis.com/datasets/dd4580c810204019a7b8eb3e0b329dd6_0.csv"
df = pd.read_csv(url)
df['Meldedatum'] = pd.to_datetime(df["Meldedatum"])
df.sort_values(by='Meldedatum')
tables = pd.read_html("https://de.wikipedia.org/wiki/Liste_der_deutschen_Bundesl%C3%A4nder_nach_Bev%C3%B6lkerung", thousands='.')
population_states = tables[0][['Bundesland', '2018']]
population_states.loc[2, 'Bundesland'] = 'Berlin'
population_states.loc[16, 'Bundesland'] = 'Germany'
narrow_df = df[['Bundesland', 'AnzahlFall', 'AnzahlTodesfall', 'Meldedatum']]
narrow_df['Meldedatum'] = narrow_df['Meldedatum'].apply(lambda x:x.date())
narrow_df.sort_values(by='Meldedatum', inplace=True)
cumsum_dfs = []
for bundesland in list(narrow_df['Bundesland'].unique())+['Germany']:
if bundesland == 'Germany':
sel = narrow_df
else:
sel = narrow_df[narrow_df['Bundesland']==bundesland]
sel = sel[['Meldedatum','AnzahlFall', 'AnzahlTodesfall']].groupby('Meldedatum').agg('sum')
sel = sel.sort_values(by='Meldedatum')
sel['cumulative_cases'] = sel['AnzahlFall'].cumsum()
sel['cumulative_deaths'] = sel['AnzahlTodesfall'].cumsum()
if len(sel[sel['cumulative_cases'] > 50]) > 0:
first_case_date = sel[sel['cumulative_cases'] > 50].iloc[0].name
sel['days_since_50_cases'] = (sel.index - first_case_date)/ | pd.Timedelta(days=1) | pandas.Timedelta |
# -*- coding: utf-8 -*-
""" dati_selezione.ipynb
Extraction of data from ISS weekly covid-19 reports
https://www.epicentro.iss.it/coronavirus/aggiornamenti
See example pdf:
https://www.epicentro.iss.it/coronavirus/bollettino/Bollettino-sorveglianza-integrata-COVID-19_19-gennaio-2022.pdf
Requirements:
Python 3.6+, Ghostscript (ghostscript), Tkinter (python3-tk)
numpy, pandas, camelot, PyMuPDF, Beautiful Soup 4 """
import locale
import re
from datetime import datetime
from os import chdir, path
from urllib import request
from urllib.parse import urljoin
import camelot
import fitz
import numpy as np
import pandas as pd
from bs4 import BeautifulSoup
def get_surveillance_reports():
"""get_surveillance_reports() -> list
return: list of "integrated surveillance of Covid-19 in Italy" reports"""
# Source of the ISS reports
epicentro_url = "https://www.epicentro.iss.it/coronavirus/aggiornamenti"
# Requests URL and get http.client.HTTPResponse object
with request.urlopen(epicentro_url) as response:
# Parse text obtained
soup = BeautifulSoup(response, "html.parser")
# Find all hyperlinks present on webpage
links = soup.find_all("a")
# The table is available since 14/07/2021
# The script has been updated to 2022-01-19 report
cut_date = pd.to_datetime("2022-01-19")
cut_date_end = | pd.to_datetime("2022-03-16") | pandas.to_datetime |
# -*- coding: utf-8 -*-
"""
Created on Tue Jul 28 14:29:40 2020
@author: Joyqiao
"""
# -*- coding: utf-8 -*-
"""
Created on Thu Jun 18 15:58:06 2020
@author: Joyqiao
Assumption: tma is for unique patient; Primeid is for unique account that may involve multiple patients
this time we don't delete the tma which some of their tma links to multiple tma
since i want to confirm if one claim number only link to one tma or one prime id
since after using the mapping file
we still have aroung 100k records in 1/16 file that are duplicares
also we do not involve the transplant or the claim extracted stage'
"""
import pandas as pd
import numpy as np
import sqlite3
import os
import sys
sys.path.append(r'C:\Users\Joyqiao\Documents\CMU\RA\HIGHMARK Trajectory claim\HIghmark Project\code')
from transplant_dialysis import clean_transplant_dialysis
basepath = r'C:\Users\Joyqiao\Documents\CMU\RA\HIGHMARK Trajectory claim\HIghmark Project'
'''
#2. if still one claim number matches to two TMA in the new mapping, keep the mapping with smallest patient TMA acct value and delete the rest, leave the valid mapping relation to correct the previous one
#save paitnet with multiple TMA acct as a mapping
duplicate_mapping = mapping[mapping.duplicated(['claim_number_deidentified'],keep = False)]
len(duplicate_mapping['TMA_Acct'].unique())#597 unique tma acct
#since one claim number could only associated to one patient, if we idnetify any claim that matches to multiple TMA Acct, we treat these Acct as one patient.
duplicate_map = duplicate_mapping.sort_values(['TMA_Acct']).groupby(['claim_number_deidentified'])['TMA_Acct'].unique()
duplicate_map = pd.DataFrame([i.tolist() for i in duplicate_map])
duplicate_map.rename(columns = {0:'keep_acct'},inplace = True)
duplicate_map.drop_duplicates(keep = 'first',inplace = True)
duplicate_map.to_csv('patient_with_multiple_acct.csv')
#for i in duplicate_map['keep_acct']:
# print(duplicate_map[duplicate_map.eq(i).any(1)])
#new mapping(DELETE ALL ERROR LINKS)
mapping = mapping.sort_values(['TMA_Acct']).drop_duplicates(subset = 'claim_number_deidentified',keep = 'first')
len(mapping['claim_number_deidentified'].unique())
mapping['TMA_Acct'].nunique() #9509
'''
def map_process(map_file):
'''
find all the claims that link to different tma( which should not occur as it is a mapping file. each
claim should only be matched with A UNIQUE PATIENT)
Parameters
----------
map_file : TYPE str
DESCRIPTION.
Returns mapping
Write our a csv file of the problematic tma
------
'''
os.chdir(basepath+'\mapping')
map1 = pd.read_csv('df_claim_to_member_mapping_part1.csv')
map2 = pd.read_csv('df_claim_to_member_mapping_part2.csv')
map3 = pd.read_csv('df_claim_to_member_mapping_part3.csv')
map4 = pd.read_csv('df_claim_to_member_mapping_part4.csv')
map5 = pd.read_csv('df_claim_to_member_mapping_part5.csv')
map6 = pd.read_csv('df_claim_to_member_mapping_part6.csv')
map7 = pd.read_csv('df_claim_to_member_mapping_part7.csv')
mapping = pd.concat([map1,map2,map3,map4,map5,map6,map7],ignore_index = True)
mapping['TMA_Acct'].nunique() #There is total 9668 patients in mapping
new = pd.read_csv(map_file)
mapping = mapping.merge(new, how = 'outer',on = 'claim_number_deidentified',indicator = True)
mapping.loc[mapping['_merge'] == 'both','TMA_Acct'] = mapping['TMA Acct#']
mapping = mapping.drop(columns = ['TMA Acct#','_merge'])
mapping.drop_duplicates(keep = False,inplace = True)
#find the lines that claim match with 2 or more TMA matched
duplicate_mapping = mapping[mapping.duplicated(['claim_number_deidentified'],keep = False)]
#extract the involved tma account list
drop_member_list = duplicate_mapping['TMA_Acct'].drop_duplicates(keep = 'first')
#clean up the mapping
mapping = mapping.drop_duplicates(subset = 'claim_number_deidentified',keep = False)
#how many claims left in the mapping
len(mapping['claim_number_deidentified'].unique())
#how many unique patient left in the mapping
mapping['TMA_Acct'].nunique()
drop_member_list.to_csv(basepath+'\data\sorted_data\drop_member_list.csv',index = False)
return mapping,drop_member_list
# =============================================================================
# rearrange the data to cut the begin and end part into a separate file
# =============================================================================
def cut_startend_patient_todata(read_in,output_filename):
'''
since the begin and ending part of each file may exists patients that are in other files. To make the drop_duplicate method works right,
we extract all the first and last patients that already sorted in each file into the (+1) file
Parameters
----------
read_in : TYPE str
DESCRIPTION. filepath
output_filename : TYPE str
DESCRIPTION.
Returns
-------
data : TYPE dataframe
DESCRIPTION. After process df
'''
data = pd.read_csv(read_in)
start_cut = data['TMA Acct#'] == int(data[:1]['TMA Acct#'])
data[start_cut].to_csv(output_filename,mode = 'a')
data = data[~start_cut]
end_cut = data['TMA Acct#'] == int(data[-1:]['TMA Acct#'])
data[end_cut].to_csv(output_filename,mode = 'a')
data = data[~end_cut]
return data
# =============================================================================
#consider to mannuly match the patient TMA Account for those patients with multiple TMA number according to mapping dataset
#sum(mapping.groupby(['claim_number_deidentified']).count()['TMA_Acct'] > 2)+sum(mapping.groupby(['claim_number_deidentified']).count()['TMA_Acct'] == 1)
# =============================================================================
def drop_problematic_members(data):
'''
remove patients fall into patient with multiple TMA(patient_withTMA)
Parameters
----------
data : TYPE dataframe
DESCRIPTION.
Returns
-------
data : TYPE dataframe
DESCRIPTION.
'''
data = data[~data['TMA Acct#'].isin(drop_member_list)]
return data
# =============================================================================
# clean data based on scenarios
# =============================================================================
def clean(df):
df = df.drop_duplicates(keep = 'first')
#select the useful columns for later analysis
#df = data[['claim_type', 'line_number','eacl_prv_alcrg_at','proc_code','TMA Acct#', 'claim_number_deidentified','icrd_dt_deidentified']]
# =============================================================================
#1 Scenaro 1 join with upated claim_Tma mapping( To resolve the error that some claim number are matching two TMA )
df = df.merge(mapping, how = 'left',on = 'claim_number_deidentified')
df.loc[df.claim_number_deidentified.isin(mapping.claim_number_deidentified),'TMA Acct#'] = df['TMA_Acct'] #TMA Acct if from the df, TMA_Acct is from mapping
df = df.drop(columns = ['TMA_Acct'])
#verify if all df claim# are covered within mapping data
len(df) - sum(df.claim_number_deidentified.isin(mapping.claim_number_deidentified))
#corrected patient number
df['TMA Acct#'].nunique()
df.rename(columns = {'TMA Acct#':'TMA_Acct'},inplace = True)
#verify there's no claims match to multiple TMA
'''b=df.groupby(['claim_number_deidentified']).agg({'TMA_Acct':'count'})
print('claims matches to multiple tma {}'.format(sum(b.iloc[:,0]>1)))
'''
#save the Mapping of all the claim with multiple TMA links
#df.loc[~df.claim_number_deidentified.isin(mapping.claim_number_deidentified),['TMA Acct#','claim_number_deidentified']].drop_duplicates(keep = 'first').to_csv('One_claim_matches_multiple_TMA.csv')
#delete all the patients that are in patient_withTMA list
#df = df[~df['TMA Acct#'].isin(patient_withTMA )] #8
#len(df['TMA Acct#'].unique()) #8160
# =============================================================================
# rejection rule: as long as claim number, TMA proc code, incurred data cost are the same, keep only one of the duplicates
# =============================================================================
df_filter = df.drop_duplicates(subset = ['TMA_Acct','claim_number_deidentified','proc_code','eacl_prv_alcrg_at','icrd_dt_deidentified','claim_type'],keep = 'first')
# =============================================================================
#2 Scenario 2 It means if there is a claim, where all of its lines have values only <= 0, then you should ignore them.
'''
grouped=df.groupby(['claim_number_deidentified','proc_code'])
df_filter = grouped.filter(lambda x: (x['eacl_prv_alcrg_at']>0).any())
#save claims that only have zero or negative costs
df.merge(df_filter,how = 'outer',indicator = True).loc[lambda x: x['_merge']=='left_only'].to_csv('cost_Negative_only_claims.csv')
len(df_filter['TMA_Acct'].unique()) '''
#scenario 3 remove rejected line
# =============================================================================
# INSPECT if for the same patient and claim number and proc_code, how many lines have the same absolote cost value
#df_filter =df_filter[df_filter['eacl_prv_alcrg_at'] != 0]
#gb_filter = df_filter.groupby(['TMA_Acct','claim_number_deidentified','proc_code','eacl_prv_alcrg_at'],as_index = False).size().reset_index().rename(columns = {0:'count'})
#gb_filter['count'] = np.where(gb_filter['eacl_prv_alcrg_at']<0,gb_filter['count']*(-1),gb_filter['count'])
#gb_filter_count = gb_filter.groupby(['TMA_Acct','claim_number_deidentified','proc_code'])['eacl_prv_alcrg_at','count'].sum()
#gb_filter_other = gb_filter_count[~(gb_filter_count['eacl_prv_alcrg_at'] > -0.001) & (gb_filter_count['eacl_prv_alcrg_at']< 0.001)].reset_index()
#len(gb_filter_other['TMA_Acct'].unique())
# =============================================================================
# filter df keep
# =============================================================================
connection = sqlite3.connect(':memory:')
df_filter.to_sql('alldata',connection)
query1 = '''
select tab1.*
from alldata tab1 left join alldata tab2
on tab1.eacl_prv_alcrg_at = -tab2.eacl_prv_alcrg_at and
tab1.proc_code = tab2.proc_code and
tab1.icrd_dt_deidentified = tab2.icrd_dt_deidentified and
tab1.TMA_Acct = tab2.TMA_Acct and
tab1.claim_type = tab2.claim_type and
tab1.claim_number_deidentified = tab2.claim_number_deidentified
where tab2.claim_number_deidentified is null
'''
df_keep = pd.read_sql_query(query1,connection)
df_keep.set_index(['index'],inplace = True)
df_process = df_filter[~df_filter.index.isin(df_keep.index)]
#df_keep.to_sql('keepdata',connection,index = False)
# =============================================================================
# record still have negative cost, no matching positive but not all corresponding claims are of negative
# =============================================================================
c=df_keep[df_keep['eacl_prv_alcrg_at'] <= 0]
c.to_csv('negative_record_with_no_matching_one_with_positive_in_claims.csv',mode = 'a')
df_keep = df_keep[df_keep['eacl_prv_alcrg_at']>0]
# =============================================================================
# filter df process
# total = price for each record* #records
# =============================================================================
# gb = df_process.groupby(['TMA_Acct','claim_number_deidentified','proc_code','eacl_prv_alcrg_at','icrd_dt_deidentified','claim_type'],as_index = False).size().reset_index().rename(columns = {0:'count'})
# gb['total'] = gb['count']*gb['eacl_prv_alcrg_at']
# gb['cost_abs'] = gb['eacl_prv_alcrg_at'].abs()
# #gb['count'] = np.where(gb['eacl_prv_alcrg_at']<0,gb['count']*(-1),gb['count'])
# gb_count = gb.groupby(['TMA_Acct','claim_number_deidentified','proc_code','cost_abs','icrd_dt_deidentified','claim_type'])['total','count'].sum()
# if sum(gb_count['total']==0) == len(gb_count):
df_process = df_process[df_process['eacl_prv_alcrg_at']>0]
'''
# =============================================================================
# even scenario
# =============================================================================
even_keep = gb_count.loc[(gb_count['count']%2 ==0) & (gb_count['total']== 0)].reset_index()
# for i in range(1, max(gb_count['count'])//2):
# double_temp = gb_count.loc[(gb_count['count']== 2*i) & (gb_count['total']== 0)]
# double_keep = double_keep.append([double_temp],ignore_index=True)
gb_odd = gb_count[(gb_count['total'] !=0)].reset_index()
# =============================================================================
# triple lines scenario
# =============================================================================
# gb_triple = gb_process[gb_process['total']/gb_process['cost_abs'] == gb_process['count']/3]
# gb_other = gb_process[~gb_process.index.isin(gb_triple.index)]
# gb_triple['count'] /=3
# gb_triple.drop(columns = ['total'],inplace = True)
#
# for i in range(1, int(max(gb_triple['count']))):
# gb_temp = gb_triple[gb_triple['count']== (i+1)]
# gb_triple = gb_triple.append([gb_temp]*i,ignore_index=True)
#
# gb_triple.drop(columns = ['count'],inplace = True)
# triple_keep=gb_triple.rename(columns = {'cost_abs':'eacl_prv_alcrg_at'})
# =============================================================================
# odd scenario
# =============================================================================
gb_odd['triple_count'] = round(gb_odd['total']/gb_odd['cost_abs'])
gb_odd['double_count'] = (gb_odd['count'] - gb_odd['triple_count'])/2
for i in range(1, int(max(gb_odd['triple_count']))+1):
if i ==1:
continue
else:
gb_temp = gb_odd[gb_odd['triple_count']== i]
gb_odd= gb_odd.append([gb_temp]*(i-1),ignore_index=True)
for i in range(1, int(max(gb_odd['double_count']))+1):
gb_temp = gb_odd[gb_odd['double_count']== i]
gb_odd= gb_odd.append([gb_temp]*i,ignore_index=True)
gb_odd.drop(columns = ['total','count','triple_count','double_count'],inplace = True)
gb_odd=gb_odd.rename(columns = {'cost_abs':'eacl_prv_alcrg_at'})
df_keep.drop(columns = ['line_number'],inplace = True)
cleaned=pd.concat([df_keep,even_keep,gb_odd])
'''
cleaned= | pd.concat([df_keep,df_process]) | pandas.concat |
import numpy as np
import pandas as pd
from numba import njit, typeof
from numba.typed import List
from datetime import datetime, timedelta
import pytest
from copy import deepcopy
import vectorbt as vbt
from vectorbt.portfolio.enums import *
from vectorbt.generic.enums import drawdown_dt
from vectorbt.utils.random_ import set_seed
from vectorbt.portfolio import nb
from tests.utils import record_arrays_close
seed = 42
day_dt = np.timedelta64(86400000000000)
price = pd.Series([1., 2., 3., 4., 5.], index=pd.Index([
datetime(2020, 1, 1),
datetime(2020, 1, 2),
datetime(2020, 1, 3),
datetime(2020, 1, 4),
datetime(2020, 1, 5)
]))
price_wide = price.vbt.tile(3, keys=['a', 'b', 'c'])
big_price = pd.DataFrame(np.random.uniform(size=(1000,)))
big_price.index = [datetime(2018, 1, 1) + timedelta(days=i) for i in range(1000)]
big_price_wide = big_price.vbt.tile(1000)
# ############# Global ############# #
def setup_module():
vbt.settings.numba['check_func_suffix'] = True
vbt.settings.portfolio['attach_call_seq'] = True
vbt.settings.caching.enabled = False
vbt.settings.caching.whitelist = []
vbt.settings.caching.blacklist = []
def teardown_module():
vbt.settings.reset()
# ############# nb ############# #
def assert_same_tuple(tup1, tup2):
for i in range(len(tup1)):
assert tup1[i] == tup2[i] or np.isnan(tup1[i]) and np.isnan(tup2[i])
def test_execute_order_nb():
# Errors, ignored and rejected orders
with pytest.raises(Exception):
_ = nb.execute_order_nb(
ProcessOrderState(-100., 100., 0., 100., 10., 1100., 0, 0),
nb.order_nb(10, 10))
with pytest.raises(Exception):
_ = nb.execute_order_nb(
ProcessOrderState(np.nan, 100., 0., 100., 10., 1100., 0, 0),
nb.order_nb(10, 10))
with pytest.raises(Exception):
_ = nb.execute_order_nb(
ProcessOrderState(100., np.inf, 0., 100., 10., 1100., 0, 0),
nb.order_nb(10, 10))
with pytest.raises(Exception):
_ = nb.execute_order_nb(
ProcessOrderState(100., np.nan, 0., 100., 10., 1100., 0, 0),
nb.order_nb(10, 10))
with pytest.raises(Exception):
_ = nb.execute_order_nb(
ProcessOrderState(100., 100., np.nan, 100., 10., 1100., 0, 0),
nb.order_nb(10, 10))
with pytest.raises(Exception):
_ = nb.execute_order_nb(
ProcessOrderState(100., 100., -10., 100., 10., 1100., 0, 0),
nb.order_nb(10, 10))
with pytest.raises(Exception):
_ = nb.execute_order_nb(
ProcessOrderState(100., 100., 0., np.nan, 10., 1100., 0, 0),
nb.order_nb(10, 10))
with pytest.raises(Exception):
_ = nb.execute_order_nb(
ProcessOrderState(100., 100., 0., 100., 10., 1100., 0, 0),
nb.order_nb(10, 10, size_type=-2))
with pytest.raises(Exception):
_ = nb.execute_order_nb(
ProcessOrderState(100., 100., 0., 100., 10., 1100., 0, 0),
nb.order_nb(10, 10, size_type=20))
with pytest.raises(Exception):
_ = nb.execute_order_nb(
ProcessOrderState(100., 100., 0., 100., 10., 1100., 0, 0),
nb.order_nb(10, 10, direction=-2))
with pytest.raises(Exception):
_ = nb.execute_order_nb(
ProcessOrderState(100., 100., 0., 100., 10., 1100., 0, 0),
nb.order_nb(10, 10, direction=20))
with pytest.raises(Exception):
_ = nb.execute_order_nb(
ProcessOrderState(100., -100., 0., 100., 10., 1100., 0, 0),
nb.order_nb(10, 10, direction=Direction.LongOnly))
with pytest.raises(Exception):
_ = nb.execute_order_nb(
ProcessOrderState(100., 100., 0., 100., 10., 1100., 0, 0),
nb.order_nb(10, 10, direction=Direction.ShortOnly))
with pytest.raises(Exception):
_ = nb.execute_order_nb(
ProcessOrderState(100., 100., 0., 100., 10., 1100., 0, 0),
nb.order_nb(10, np.inf))
with pytest.raises(Exception):
_ = nb.execute_order_nb(
ProcessOrderState(100., 100., 0., 100., 10., 1100., 0, 0),
nb.order_nb(10, -10))
with pytest.raises(Exception):
_ = nb.execute_order_nb(
ProcessOrderState(100., 100., 0., 100., 10., 1100., 0, 0),
nb.order_nb(10, 10, fees=np.inf))
with pytest.raises(Exception):
_ = nb.execute_order_nb(
ProcessOrderState(100., 100., 0., 100., 10., 1100., 0, 0),
nb.order_nb(10, 10, fees=-1))
with pytest.raises(Exception):
_ = nb.execute_order_nb(
ProcessOrderState(100., 100., 0., 100., 10., 1100., 0, 0),
nb.order_nb(10, 10, fixed_fees=np.inf))
with pytest.raises(Exception):
_ = nb.execute_order_nb(
ProcessOrderState(100., 100., 0., 100., 10., 1100., 0, 0),
nb.order_nb(10, 10, fixed_fees=-1))
with pytest.raises(Exception):
_ = nb.execute_order_nb(
ProcessOrderState(100., 100., 0., 100., 10., 1100., 0, 0),
nb.order_nb(10, 10, slippage=np.inf))
with pytest.raises(Exception):
_ = nb.execute_order_nb(
ProcessOrderState(100., 100., 0., 100., 10., 1100., 0, 0),
nb.order_nb(10, 10, slippage=-1))
with pytest.raises(Exception):
_ = nb.execute_order_nb(
ProcessOrderState(100., 100., 0., 100., 10., 1100., 0, 0),
nb.order_nb(10, 10, min_size=np.inf))
with pytest.raises(Exception):
_ = nb.execute_order_nb(
ProcessOrderState(100., 100., 0., 100., 10., 1100., 0, 0),
nb.order_nb(10, 10, min_size=-1))
with pytest.raises(Exception):
_ = nb.execute_order_nb(
ProcessOrderState(100., 100., 0., 100., 10., 1100., 0, 0),
nb.order_nb(10, 10, max_size=0))
with pytest.raises(Exception):
_ = nb.execute_order_nb(
ProcessOrderState(100., 100., 0., 100., 10., 1100., 0, 0),
nb.order_nb(10, 10, max_size=-10))
with pytest.raises(Exception):
_ = nb.execute_order_nb(
ProcessOrderState(100., 100., 0., 100., 10., 1100., 0, 0),
nb.order_nb(10, 10, reject_prob=np.nan))
with pytest.raises(Exception):
_ = nb.execute_order_nb(
ProcessOrderState(100., 100., 0., 100., 10., 1100., 0, 0),
nb.order_nb(10, 10, reject_prob=-1))
with pytest.raises(Exception):
_ = nb.execute_order_nb(
ProcessOrderState(100., 100., 0., 100., 10., 1100., 0, 0),
nb.order_nb(10, 10, reject_prob=2))
exec_state, order_result = nb.execute_order_nb(
ProcessOrderState(100., 100., 0., 100., 10., np.nan, 0, 0),
nb.order_nb(1, 10, size_type=SizeType.TargetPercent))
assert exec_state == ExecuteOrderState(cash=100.0, position=100.0, debt=0.0, free_cash=100.0)
assert_same_tuple(order_result, OrderResult(
size=np.nan, price=np.nan, fees=np.nan, side=-1, status=1, status_info=3))
exec_state, order_result = nb.execute_order_nb(
ProcessOrderState(100., 100., 0., 100., 10., -10., 0, 0),
nb.order_nb(1, 10, size_type=SizeType.TargetPercent))
assert exec_state == ExecuteOrderState(cash=100.0, position=100.0, debt=0.0, free_cash=100.0)
assert_same_tuple(order_result, OrderResult(
size=np.nan, price=np.nan, fees=np.nan, side=-1, status=2, status_info=4))
with pytest.raises(Exception):
_ = nb.execute_order_nb(
ProcessOrderState(100., 100., 0., 100., np.inf, 1100., 0, 0),
nb.order_nb(10, 10, size_type=SizeType.Value))
with pytest.raises(Exception):
_ = nb.execute_order_nb(
ProcessOrderState(100., 100., 0., 100., -10., 1100, 0, 0),
nb.order_nb(10, 10, size_type=SizeType.Value))
exec_state, order_result = nb.execute_order_nb(
ProcessOrderState(100., 100., 0., 100., np.nan, 1100., 0, 0),
nb.order_nb(10, 10, size_type=SizeType.Value))
assert exec_state == ExecuteOrderState(cash=100.0, position=100.0, debt=0.0, free_cash=100.0)
with pytest.raises(Exception):
_ = nb.execute_order_nb(
ProcessOrderState(100., 100., 0., 100., np.inf, 1100., 0, 0),
nb.order_nb(10, 10, size_type=SizeType.TargetValue))
with pytest.raises(Exception):
_ = nb.execute_order_nb(
ProcessOrderState(100., 100., 0., 100., -10., 1100, 0, 0),
nb.order_nb(10, 10, size_type=SizeType.TargetValue))
exec_state, order_result = nb.execute_order_nb(
ProcessOrderState(100., 100., 0., 100., np.nan, 1100., 0, 0),
nb.order_nb(10, 10, size_type=SizeType.TargetValue))
assert exec_state == ExecuteOrderState(cash=100.0, position=100.0, debt=0.0, free_cash=100.0)
assert_same_tuple(order_result, OrderResult(
size=np.nan, price=np.nan, fees=np.nan, side=-1, status=1, status_info=2))
exec_state, order_result = nb.execute_order_nb(
ProcessOrderState(100., -10., 0., 100., 10., 1100., 0, 0),
nb.order_nb(np.inf, 10, direction=Direction.ShortOnly))
assert exec_state == ExecuteOrderState(cash=200.0, position=-20.0, debt=100.0, free_cash=0.0)
assert_same_tuple(order_result, OrderResult(
size=10.0, price=10.0, fees=0.0, side=1, status=0, status_info=-1))
exec_state, order_result = nb.execute_order_nb(
ProcessOrderState(100., -10., 0., 100., 10., 1100., 0, 0),
nb.order_nb(-np.inf, 10, direction=Direction.Both))
assert exec_state == ExecuteOrderState(cash=200.0, position=-20.0, debt=100.0, free_cash=0.0)
assert_same_tuple(order_result, OrderResult(
size=10.0, price=10.0, fees=0.0, side=1, status=0, status_info=-1))
exec_state, order_result = nb.execute_order_nb(
ProcessOrderState(100., 10., 0., 100., 10., 1100., 0, 0),
nb.order_nb(0, 10))
assert exec_state == ExecuteOrderState(cash=100.0, position=10.0, debt=0.0, free_cash=100.0)
assert_same_tuple(order_result, OrderResult(
size=np.nan, price=np.nan, fees=np.nan, side=-1, status=1, status_info=5))
exec_state, order_result = nb.execute_order_nb(
ProcessOrderState(100., 100., 0., 100., 10., 1100., 0, 0),
nb.order_nb(15, 10, max_size=10, allow_partial=False))
assert exec_state == ExecuteOrderState(cash=100.0, position=100.0, debt=0.0, free_cash=100.0)
assert_same_tuple(order_result, OrderResult(
size=np.nan, price=np.nan, fees=np.nan, side=-1, status=2, status_info=9))
exec_state, order_result = nb.execute_order_nb(
ProcessOrderState(100., 100., 0., 100., 10., 1100., 0, 0),
nb.order_nb(10, 10, reject_prob=1.))
assert exec_state == ExecuteOrderState(cash=100.0, position=100.0, debt=0.0, free_cash=100.0)
assert_same_tuple(order_result, OrderResult(
size=np.nan, price=np.nan, fees=np.nan, side=-1, status=2, status_info=10))
exec_state, order_result = nb.execute_order_nb(
ProcessOrderState(0., 100., 0., 0., 10., 1100., 0, 0),
nb.order_nb(10, 10, direction=Direction.LongOnly))
assert exec_state == ExecuteOrderState(cash=0.0, position=100.0, debt=0.0, free_cash=0.0)
assert_same_tuple(order_result, OrderResult(
size=np.nan, price=np.nan, fees=np.nan, side=-1, status=2, status_info=7))
exec_state, order_result = nb.execute_order_nb(
ProcessOrderState(0., 100., 0., 0., 10., 1100., 0, 0),
nb.order_nb(10, 10, direction=Direction.Both))
assert exec_state == ExecuteOrderState(cash=0.0, position=100.0, debt=0.0, free_cash=0.0)
assert_same_tuple(order_result, OrderResult(
size=np.nan, price=np.nan, fees=np.nan, side=-1, status=2, status_info=7))
with pytest.raises(Exception):
_ = nb.execute_order_nb(
ProcessOrderState(np.inf, 100, 0., np.inf, np.nan, 1100., 0, 0),
nb.order_nb(np.inf, 10, direction=Direction.LongOnly))
with pytest.raises(Exception):
_ = nb.execute_order_nb(
ProcessOrderState(np.inf, 100., 0., np.inf, 10., 1100., 0, 0),
nb.order_nb(np.inf, 10, direction=Direction.Both))
exec_state, order_result = nb.execute_order_nb(
ProcessOrderState(100., 0., 0., 100., 10., 1100., 0, 0),
nb.order_nb(-10, 10, direction=Direction.ShortOnly))
assert exec_state == ExecuteOrderState(cash=100.0, position=0.0, debt=0.0, free_cash=100.0)
assert_same_tuple(order_result, OrderResult(
size=np.nan, price=np.nan, fees=np.nan, side=-1, status=2, status_info=8))
with pytest.raises(Exception):
_ = nb.execute_order_nb(
ProcessOrderState(np.inf, 100., 0., np.inf, 10., 1100., 0, 0),
nb.order_nb(-np.inf, 10, direction=Direction.ShortOnly))
with pytest.raises(Exception):
_ = nb.execute_order_nb(
ProcessOrderState(np.inf, 100., 0., np.inf, 10., 1100., 0, 0),
nb.order_nb(-np.inf, 10, direction=Direction.Both))
exec_state, order_result = nb.execute_order_nb(
ProcessOrderState(100., 0., 0., 100., 10., 1100., 0, 0),
nb.order_nb(-10, 10, direction=Direction.LongOnly))
assert exec_state == ExecuteOrderState(cash=100.0, position=0.0, debt=0.0, free_cash=100.0)
assert_same_tuple(order_result, OrderResult(
size=np.nan, price=np.nan, fees=np.nan, side=-1, status=2, status_info=8))
exec_state, order_result = nb.execute_order_nb(
ProcessOrderState(100., 100., 0., 100., 10., 1100., 0, 0),
nb.order_nb(10, 10, fixed_fees=100))
assert exec_state == ExecuteOrderState(cash=100.0, position=100.0, debt=0.0, free_cash=100.0)
assert_same_tuple(order_result, OrderResult(
size=np.nan, price=np.nan, fees=np.nan, side=-1, status=2, status_info=11))
exec_state, order_result = nb.execute_order_nb(
ProcessOrderState(100., 100., 0., 100., 10., 1100., 0, 0),
nb.order_nb(10, 10, min_size=100))
assert exec_state == ExecuteOrderState(cash=100.0, position=100.0, debt=0.0, free_cash=100.0)
assert_same_tuple(order_result, OrderResult(
size=np.nan, price=np.nan, fees=np.nan, side=-1, status=2, status_info=12))
exec_state, order_result = nb.execute_order_nb(
ProcessOrderState(100., 100., 0., 100., 10., 1100., 0, 0),
nb.order_nb(100, 10, allow_partial=False))
assert exec_state == ExecuteOrderState(cash=100.0, position=100.0, debt=0.0, free_cash=100.0)
assert_same_tuple(order_result, OrderResult(
size=np.nan, price=np.nan, fees=np.nan, side=-1, status=2, status_info=13))
exec_state, order_result = nb.execute_order_nb(
ProcessOrderState(100., 100., 0., 100., 10., 1100., 0, 0),
nb.order_nb(-10, 10, min_size=100))
assert exec_state == ExecuteOrderState(cash=100.0, position=100.0, debt=0.0, free_cash=100.0)
assert_same_tuple(order_result, OrderResult(
size=np.nan, price=np.nan, fees=np.nan, side=-1, status=2, status_info=12))
exec_state, order_result = nb.execute_order_nb(
ProcessOrderState(100., 100., 0., 100., 10., 1100., 0, 0),
nb.order_nb(-200, 10, direction=Direction.LongOnly, allow_partial=False))
assert exec_state == ExecuteOrderState(cash=100.0, position=100.0, debt=0.0, free_cash=100.0)
assert_same_tuple(order_result, OrderResult(
size=np.nan, price=np.nan, fees=np.nan, side=-1, status=2, status_info=13))
exec_state, order_result = nb.execute_order_nb(
ProcessOrderState(100., 100., 0., 100., 10., 1100., 0, 0),
nb.order_nb(-10, 10, fixed_fees=1000))
assert exec_state == ExecuteOrderState(cash=100.0, position=100.0, debt=0.0, free_cash=100.0)
assert_same_tuple(order_result, OrderResult(
size=np.nan, price=np.nan, fees=np.nan, side=-1, status=2, status_info=11))
# Calculations
exec_state, order_result = nb.execute_order_nb(
ProcessOrderState(100., 0., 0., 100., 10., 100., 0, 0),
nb.order_nb(10, 10, fees=0.1, fixed_fees=1, slippage=0.1))
assert exec_state == ExecuteOrderState(cash=0.0, position=8.18181818181818, debt=0.0, free_cash=0.0)
assert_same_tuple(order_result, OrderResult(
size=8.18181818181818, price=11.0, fees=10.000000000000014, side=0, status=0, status_info=-1))
exec_state, order_result = nb.execute_order_nb(
ProcessOrderState(100., 0., 0., 100., 10., 100., 0, 0),
nb.order_nb(100, 10, fees=0.1, fixed_fees=1, slippage=0.1))
assert exec_state == ExecuteOrderState(cash=0.0, position=8.18181818181818, debt=0.0, free_cash=0.0)
assert_same_tuple(order_result, OrderResult(
size=8.18181818181818, price=11.0, fees=10.000000000000014, side=0, status=0, status_info=-1))
exec_state, order_result = nb.execute_order_nb(
ProcessOrderState(100., 0., 0., 100., 10., 100., 0, 0),
nb.order_nb(-10, 10, fees=0.1, fixed_fees=1, slippage=0.1))
assert exec_state == ExecuteOrderState(cash=180.0, position=-10.0, debt=90.0, free_cash=0.0)
assert_same_tuple(order_result, OrderResult(
size=10.0, price=9.0, fees=10.0, side=1, status=0, status_info=-1))
exec_state, order_result = nb.execute_order_nb(
ProcessOrderState(100., 0., 0., 100., 10., 100., 0, 0),
nb.order_nb(-100, 10, fees=0.1, fixed_fees=1, slippage=0.1))
assert exec_state == ExecuteOrderState(cash=909.0, position=-100.0, debt=900.0, free_cash=-891.0)
assert_same_tuple(order_result, OrderResult(
size=100.0, price=9.0, fees=91.0, side=1, status=0, status_info=-1))
exec_state, order_result = nb.execute_order_nb(
ProcessOrderState(100., 0., 0., 100., 10., 100., 0, 0),
nb.order_nb(10, 10, size_type=SizeType.TargetAmount))
assert exec_state == ExecuteOrderState(cash=0.0, position=10.0, debt=0.0, free_cash=0.0)
assert_same_tuple(order_result, OrderResult(
size=10., price=10.0, fees=0., side=0, status=0, status_info=-1))
exec_state, order_result = nb.execute_order_nb(
ProcessOrderState(100., 0., 0., 100., 10., 100., 0, 0),
nb.order_nb(-10, 10, size_type=SizeType.TargetAmount))
assert exec_state == ExecuteOrderState(cash=200.0, position=-10.0, debt=100.0, free_cash=0.0)
assert_same_tuple(order_result, OrderResult(
size=10., price=10.0, fees=0., side=1, status=0, status_info=-1))
exec_state, order_result = nb.execute_order_nb(
ProcessOrderState(100., 0., 0., 100., 10., 100., 0, 0),
nb.order_nb(100, 10, size_type=SizeType.Value))
assert exec_state == ExecuteOrderState(cash=0.0, position=10.0, debt=0.0, free_cash=0.0)
assert_same_tuple(order_result, OrderResult(
size=10., price=10.0, fees=0., side=0, status=0, status_info=-1))
exec_state, order_result = nb.execute_order_nb(
ProcessOrderState(100., 0., 0., 100., 10., 100., 0, 0),
nb.order_nb(-100, 10, size_type=SizeType.Value))
assert exec_state == ExecuteOrderState(cash=200.0, position=-10.0, debt=100.0, free_cash=0.0)
assert_same_tuple(order_result, OrderResult(
size=10., price=10.0, fees=0., side=1, status=0, status_info=-1))
exec_state, order_result = nb.execute_order_nb(
ProcessOrderState(100., 0., 0., 100., 10., 100., 0, 0),
nb.order_nb(100, 10, size_type=SizeType.TargetValue))
assert exec_state == ExecuteOrderState(cash=0.0, position=10.0, debt=0.0, free_cash=0.0)
assert_same_tuple(order_result, OrderResult(
size=10., price=10.0, fees=0., side=0, status=0, status_info=-1))
exec_state, order_result = nb.execute_order_nb(
ProcessOrderState(100., 0., 0., 100., 10., 100., 0, 0),
nb.order_nb(-100, 10, size_type=SizeType.TargetValue))
assert exec_state == ExecuteOrderState(cash=200.0, position=-10.0, debt=100.0, free_cash=0.0)
assert_same_tuple(order_result, OrderResult(
size=10., price=10.0, fees=0., side=1, status=0, status_info=-1))
exec_state, order_result = nb.execute_order_nb(
ProcessOrderState(100., 0., 0., 100., 10., 100., 0, 0),
nb.order_nb(1, 10, size_type=SizeType.TargetPercent))
assert exec_state == ExecuteOrderState(cash=0.0, position=10.0, debt=0.0, free_cash=0.0)
assert_same_tuple(order_result, OrderResult(
size=10., price=10.0, fees=0., side=0, status=0, status_info=-1))
exec_state, order_result = nb.execute_order_nb(
ProcessOrderState(100., 0., 0., 100., 10., 100., 0, 0),
nb.order_nb(-1, 10, size_type=SizeType.TargetPercent))
assert exec_state == ExecuteOrderState(cash=200.0, position=-10.0, debt=100.0, free_cash=0.0)
assert_same_tuple(order_result, OrderResult(
size=10., price=10.0, fees=0., side=1, status=0, status_info=-1))
exec_state, order_result = nb.execute_order_nb(
ProcessOrderState(50., 5., 0., 50., 10., 100., 0, 0),
nb.order_nb(1, 10, size_type=SizeType.Percent))
assert exec_state == ExecuteOrderState(cash=0.0, position=10.0, debt=0.0, free_cash=0.0)
assert_same_tuple(order_result, OrderResult(
size=5.0, price=10.0, fees=0.0, side=0, status=0, status_info=-1))
exec_state, order_result = nb.execute_order_nb(
ProcessOrderState(50., 5., 0., 50., 10., 100., 0, 0),
nb.order_nb(0.5, 10, size_type=SizeType.Percent))
assert exec_state == ExecuteOrderState(cash=25.0, position=7.5, debt=0.0, free_cash=25.0)
assert_same_tuple(order_result, OrderResult(
size=2.5, price=10.0, fees=0.0, side=0, status=0, status_info=-1))
exec_state, order_result = nb.execute_order_nb(
ProcessOrderState(50., 5., 0., 50., 10., 100., 0, 0),
nb.order_nb(-0.5, 10, size_type=SizeType.Percent))
assert exec_state == ExecuteOrderState(cash=125.0, position=-2.5, debt=25.0, free_cash=75.0)
assert_same_tuple(order_result, OrderResult(
size=7.5, price=10.0, fees=0.0, side=1, status=0, status_info=-1))
exec_state, order_result = nb.execute_order_nb(
ProcessOrderState(50., 5., 0., 50., 10., 100., 0, 0),
nb.order_nb(-1, 10, size_type=SizeType.Percent))
assert exec_state == ExecuteOrderState(cash=200.0, position=-10.0, debt=100.0, free_cash=0.0)
assert_same_tuple(order_result, OrderResult(
size=15.0, price=10.0, fees=0.0, side=1, status=0, status_info=-1))
exec_state, order_result = nb.execute_order_nb(
ProcessOrderState(50., 0., 0., 50., 10., 100., 0, 0),
nb.order_nb(1, 10, size_type=SizeType.Percent))
assert exec_state == ExecuteOrderState(cash=0.0, position=5.0, debt=0.0, free_cash=0.0)
assert_same_tuple(order_result, OrderResult(
size=5.0, price=10.0, fees=0.0, side=0, status=0, status_info=-1))
exec_state, order_result = nb.execute_order_nb(
ProcessOrderState(50., 0., 0., 50., 10., 100., 0, 0),
nb.order_nb(0.5, 10, size_type=SizeType.Percent))
assert exec_state == ExecuteOrderState(cash=25.0, position=2.5, debt=0.0, free_cash=25.0)
assert_same_tuple(order_result, OrderResult(
size=2.5, price=10.0, fees=0.0, side=0, status=0, status_info=-1))
exec_state, order_result = nb.execute_order_nb(
ProcessOrderState(50., 0., 0., 50., 10., 100., 0, 0),
nb.order_nb(-0.5, 10, size_type=SizeType.Percent))
assert exec_state == ExecuteOrderState(cash=75.0, position=-2.5, debt=25.0, free_cash=25.0)
assert_same_tuple(order_result, OrderResult(
size=2.5, price=10.0, fees=0.0, side=1, status=0, status_info=-1))
exec_state, order_result = nb.execute_order_nb(
ProcessOrderState(50., 0., 0., 50., 10., 100., 0, 0),
nb.order_nb(-1, 10, size_type=SizeType.Percent))
assert exec_state == ExecuteOrderState(cash=100.0, position=-5.0, debt=50.0, free_cash=0.0)
assert_same_tuple(order_result, OrderResult(
size=5.0, price=10.0, fees=0.0, side=1, status=0, status_info=-1))
exec_state, order_result = nb.execute_order_nb(
ProcessOrderState(50., -5., 0., 50., 10., 100., 0, 0),
nb.order_nb(1, 10, size_type=SizeType.Percent))
assert exec_state == ExecuteOrderState(cash=0.0, position=0.0, debt=0.0, free_cash=0.0)
assert_same_tuple(order_result, OrderResult(
size=5.0, price=10.0, fees=0.0, side=0, status=0, status_info=-1))
exec_state, order_result = nb.execute_order_nb(
ProcessOrderState(50., -5., 0., 50., 10., 100., 0, 0),
nb.order_nb(0.5, 10, size_type=SizeType.Percent))
assert exec_state == ExecuteOrderState(cash=25.0, position=-2.5, debt=0.0, free_cash=25.0)
assert_same_tuple(order_result, OrderResult(
size=2.5, price=10.0, fees=0.0, side=0, status=0, status_info=-1))
exec_state, order_result = nb.execute_order_nb(
ProcessOrderState(50., -5., 0., 50., 10., 100., 0, 0),
nb.order_nb(-0.5, 10, size_type=SizeType.Percent))
assert exec_state == ExecuteOrderState(cash=75.0, position=-7.5, debt=25.0, free_cash=25.0)
assert_same_tuple(order_result, OrderResult(
size=2.5, price=10.0, fees=0.0, side=1, status=0, status_info=-1))
exec_state, order_result = nb.execute_order_nb(
ProcessOrderState(50., -5., 0., 50., 10., 100., 0, 0),
nb.order_nb(-1, 10, size_type=SizeType.Percent))
assert exec_state == ExecuteOrderState(cash=100.0, position=-10.0, debt=50.0, free_cash=0.0)
assert_same_tuple(order_result, OrderResult(
size=5.0, price=10.0, fees=0.0, side=1, status=0, status_info=-1))
exec_state, order_result = nb.execute_order_nb(
ProcessOrderState(100., 0., 0., 100., 10., 100., 0, 0),
nb.order_nb(np.inf, 10))
assert exec_state == ExecuteOrderState(cash=0.0, position=10.0, debt=0.0, free_cash=0.0)
assert_same_tuple(order_result, OrderResult(
size=10., price=10.0, fees=0., side=0, status=0, status_info=-1))
exec_state, order_result = nb.execute_order_nb(
ProcessOrderState(100., -5., 0., 100., 10., 100., 0, 0),
nb.order_nb(np.inf, 10))
assert exec_state == ExecuteOrderState(cash=0.0, position=5.0, debt=0.0, free_cash=0.0)
assert_same_tuple(order_result, OrderResult(
size=10., price=10.0, fees=0., side=0, status=0, status_info=-1))
exec_state, order_result = nb.execute_order_nb(
ProcessOrderState(100., 0., 0., 100., 10., 100., 0, 0),
nb.order_nb(-np.inf, 10))
assert exec_state == ExecuteOrderState(cash=200.0, position=-10.0, debt=100.0, free_cash=0.0)
assert_same_tuple(order_result, OrderResult(
size=10., price=10.0, fees=0., side=1, status=0, status_info=-1))
exec_state, order_result = nb.execute_order_nb(
ProcessOrderState(150., -5., 0., 150., 10., 100., 0, 0),
nb.order_nb(-np.inf, 10))
assert exec_state == ExecuteOrderState(cash=300.0, position=-20.0, debt=150.0, free_cash=0.0)
assert_same_tuple(order_result, OrderResult(
size=15.0, price=10.0, fees=0.0, side=1, status=0, status_info=-1))
exec_state, order_result = nb.execute_order_nb(
ProcessOrderState(100., 0., 0., 50., 10., 100., 0, 0),
nb.order_nb(10, 10, lock_cash=True))
assert exec_state == ExecuteOrderState(cash=50.0, position=5.0, debt=0.0, free_cash=0.0)
assert_same_tuple(order_result, OrderResult(
size=5.0, price=10.0, fees=0.0, side=0, status=0, status_info=-1))
exec_state, order_result = nb.execute_order_nb(
ProcessOrderState(1000., -5., 50., 50., 10., 100., 0, 0),
nb.order_nb(10, 17.5, lock_cash=True))
assert exec_state == ExecuteOrderState(cash=850.0, position=3.571428571428571, debt=0.0, free_cash=0.0)
assert_same_tuple(order_result, OrderResult(
size=8.571428571428571, price=17.5, fees=0.0, side=0, status=0, status_info=-1))
exec_state, order_result = nb.execute_order_nb(
ProcessOrderState(100., -5., 50., 50., 10., 100., 0, 0),
nb.order_nb(10, 100, lock_cash=True))
assert exec_state == ExecuteOrderState(cash=37.5, position=-4.375, debt=43.75, free_cash=0.0)
assert_same_tuple(order_result, OrderResult(
size=0.625, price=100.0, fees=0.0, side=0, status=0, status_info=-1))
exec_state, order_result = nb.execute_order_nb(
ProcessOrderState(0., 10., 0., -50., 10., 100., 0, 0),
nb.order_nb(-20, 10, lock_cash=True))
assert exec_state == ExecuteOrderState(cash=150.0, position=-5.0, debt=50.0, free_cash=0.0)
assert_same_tuple(order_result, OrderResult(
size=15.0, price=10.0, fees=0.0, side=1, status=0, status_info=-1))
exec_state, order_result = nb.execute_order_nb(
ProcessOrderState(0., 1., 0., -50., 10., 100., 0, 0),
nb.order_nb(-10, 10, lock_cash=True))
assert exec_state == ExecuteOrderState(cash=10.0, position=0.0, debt=0.0, free_cash=-40.0)
assert_same_tuple(order_result, OrderResult(
size=1.0, price=10.0, fees=0.0, side=1, status=0, status_info=-1))
exec_state, order_result = nb.execute_order_nb(
ProcessOrderState(0., 0., 0., -100., 10., 100., 0, 0),
nb.order_nb(-10, 10, lock_cash=True))
assert exec_state == ExecuteOrderState(cash=0.0, position=0.0, debt=0.0, free_cash=-100.0)
assert_same_tuple(order_result, OrderResult(
size=np.nan, price=np.nan, fees=np.nan, side=-1, status=2, status_info=6))
exec_state, order_result = nb.execute_order_nb(
ProcessOrderState(0., 0., 0., 100., 10., 100., 0, 0),
nb.order_nb(-20, 10, fees=0.1, slippage=0.1, fixed_fees=1., lock_cash=True))
assert exec_state == ExecuteOrderState(cash=80.0, position=-10.0, debt=90.0, free_cash=0.0)
assert_same_tuple(order_result, OrderResult(
size=10.0, price=9.0, fees=10.0, side=1, status=0, status_info=-1))
def test_build_call_seq_nb():
group_lens = np.array([1, 2, 3, 4])
np.testing.assert_array_equal(
nb.build_call_seq_nb((10, 10), group_lens, CallSeqType.Default),
nb.build_call_seq((10, 10), group_lens, CallSeqType.Default)
)
np.testing.assert_array_equal(
nb.build_call_seq_nb((10, 10), group_lens, CallSeqType.Reversed),
nb.build_call_seq((10, 10), group_lens, CallSeqType.Reversed)
)
set_seed(seed)
out1 = nb.build_call_seq_nb((10, 10), group_lens, CallSeqType.Random)
set_seed(seed)
out2 = nb.build_call_seq((10, 10), group_lens, CallSeqType.Random)
np.testing.assert_array_equal(out1, out2)
# ############# from_orders ############# #
order_size = pd.Series([np.inf, -np.inf, np.nan, np.inf, -np.inf], index=price.index)
order_size_wide = order_size.vbt.tile(3, keys=['a', 'b', 'c'])
order_size_one = pd.Series([1, -1, np.nan, 1, -1], index=price.index)
def from_orders_both(close=price, size=order_size, **kwargs):
return vbt.Portfolio.from_orders(close, size, direction='both', **kwargs)
def from_orders_longonly(close=price, size=order_size, **kwargs):
return vbt.Portfolio.from_orders(close, size, direction='longonly', **kwargs)
def from_orders_shortonly(close=price, size=order_size, **kwargs):
return vbt.Portfolio.from_orders(close, size, direction='shortonly', **kwargs)
class TestFromOrders:
def test_one_column(self):
record_arrays_close(
from_orders_both().order_records,
np.array([
(0, 0, 0, 100.0, 1.0, 0.0, 0), (1, 0, 1, 200.0, 2.0, 0.0, 1), (2, 0, 3, 100.0, 4.0, 0.0, 0)
], dtype=order_dt)
)
record_arrays_close(
from_orders_longonly().order_records,
np.array([
(0, 0, 0, 100.0, 1.0, 0.0, 0), (1, 0, 1, 100.0, 2.0, 0.0, 1), (2, 0, 3, 50.0, 4.0, 0.0, 0),
(3, 0, 4, 50.0, 5.0, 0.0, 1)
], dtype=order_dt)
)
record_arrays_close(
from_orders_shortonly().order_records,
np.array([
(0, 0, 0, 100.0, 1.0, 0.0, 1), (1, 0, 1, 100.0, 2.0, 0.0, 0)
], dtype=order_dt)
)
pf = from_orders_both()
pd.testing.assert_index_equal(
pf.wrapper.index,
pd.DatetimeIndex(['2020-01-01', '2020-01-02', '2020-01-03', '2020-01-04', '2020-01-05'])
)
pd.testing.assert_index_equal(
pf.wrapper.columns,
pd.Int64Index([0], dtype='int64')
)
assert pf.wrapper.ndim == 1
assert pf.wrapper.freq == day_dt
assert pf.wrapper.grouper.group_by is None
def test_multiple_columns(self):
record_arrays_close(
from_orders_both(close=price_wide).order_records,
np.array([
(0, 0, 0, 100.0, 1.0, 0.0, 0), (1, 0, 1, 200.0, 2.0, 0.0, 1), (2, 0, 3, 100.0, 4.0, 0.0, 0),
(3, 1, 0, 100.0, 1.0, 0.0, 0), (4, 1, 1, 200.0, 2.0, 0.0, 1), (5, 1, 3, 100.0, 4.0, 0.0, 0),
(6, 2, 0, 100.0, 1.0, 0.0, 0), (7, 2, 1, 200.0, 2.0, 0.0, 1), (8, 2, 3, 100.0, 4.0, 0.0, 0)
], dtype=order_dt)
)
record_arrays_close(
from_orders_longonly(close=price_wide).order_records,
np.array([
(0, 0, 0, 100.0, 1.0, 0.0, 0), (1, 0, 1, 100.0, 2.0, 0.0, 1), (2, 0, 3, 50.0, 4.0, 0.0, 0),
(3, 0, 4, 50.0, 5.0, 0.0, 1), (4, 1, 0, 100.0, 1.0, 0.0, 0), (5, 1, 1, 100.0, 2.0, 0.0, 1),
(6, 1, 3, 50.0, 4.0, 0.0, 0), (7, 1, 4, 50.0, 5.0, 0.0, 1), (8, 2, 0, 100.0, 1.0, 0.0, 0),
(9, 2, 1, 100.0, 2.0, 0.0, 1), (10, 2, 3, 50.0, 4.0, 0.0, 0), (11, 2, 4, 50.0, 5.0, 0.0, 1)
], dtype=order_dt)
)
record_arrays_close(
from_orders_shortonly(close=price_wide).order_records,
np.array([
(0, 0, 0, 100.0, 1.0, 0.0, 1), (1, 0, 1, 100.0, 2.0, 0.0, 0), (2, 1, 0, 100.0, 1.0, 0.0, 1),
(3, 1, 1, 100.0, 2.0, 0.0, 0), (4, 2, 0, 100.0, 1.0, 0.0, 1), (5, 2, 1, 100.0, 2.0, 0.0, 0)
], dtype=order_dt)
)
pf = from_orders_both(close=price_wide)
pd.testing.assert_index_equal(
pf.wrapper.index,
pd.DatetimeIndex(['2020-01-01', '2020-01-02', '2020-01-03', '2020-01-04', '2020-01-05'])
)
pd.testing.assert_index_equal(
pf.wrapper.columns,
pd.Index(['a', 'b', 'c'], dtype='object')
)
assert pf.wrapper.ndim == 2
assert pf.wrapper.freq == day_dt
assert pf.wrapper.grouper.group_by is None
def test_size_inf(self):
record_arrays_close(
from_orders_both(size=[[np.inf, -np.inf]]).order_records,
np.array([
(0, 0, 0, 100.0, 1.0, 0.0, 0), (1, 1, 0, 100.0, 1.0, 0.0, 1)
], dtype=order_dt)
)
record_arrays_close(
from_orders_longonly(size=[[np.inf, -np.inf]]).order_records,
np.array([
(0, 0, 0, 100.0, 1.0, 0.0, 0)
], dtype=order_dt)
)
record_arrays_close(
from_orders_shortonly(size=[[np.inf, -np.inf]]).order_records,
np.array([
(0, 0, 0, 100.0, 1.0, 0.0, 1)
], dtype=order_dt)
)
def test_price(self):
record_arrays_close(
from_orders_both(price=price * 1.01).order_records,
np.array([
(0, 0, 0, 99.00990099009901, 1.01, 0.0, 0), (1, 0, 1, 198.01980198019803, 2.02, 0.0, 1),
(2, 0, 3, 99.00990099009901, 4.04, 0.0, 0)
], dtype=order_dt)
)
record_arrays_close(
from_orders_longonly(price=price * 1.01).order_records,
np.array([
(0, 0, 0, 99.00990099009901, 1.01, 0.0, 0), (1, 0, 1, 99.00990099009901, 2.02, 0.0, 1),
(2, 0, 3, 49.504950495049506, 4.04, 0.0, 0), (3, 0, 4, 49.504950495049506, 5.05, 0.0, 1)
], dtype=order_dt)
)
record_arrays_close(
from_orders_shortonly(price=price * 1.01).order_records,
np.array([
(0, 0, 0, 99.00990099009901, 1.01, 0.0, 1), (1, 0, 1, 99.00990099009901, 2.02, 0.0, 0)
], dtype=order_dt)
)
record_arrays_close(
from_orders_both(price=np.inf).order_records,
np.array([
(0, 0, 0, 100.0, 1.0, 0.0, 0), (1, 0, 1, 200.0, 2.0, 0.0, 1), (2, 0, 3, 100.0, 4.0, 0.0, 0)
], dtype=order_dt)
)
record_arrays_close(
from_orders_longonly(price=np.inf).order_records,
np.array([
(0, 0, 0, 100.0, 1.0, 0.0, 0), (1, 0, 1, 100.0, 2.0, 0.0, 1),
(2, 0, 3, 50.0, 4.0, 0.0, 0), (3, 0, 4, 50.0, 5.0, 0.0, 1)
], dtype=order_dt)
)
record_arrays_close(
from_orders_shortonly(price=np.inf).order_records,
np.array([
(0, 0, 0, 100.0, 1.0, 0.0, 1), (1, 0, 1, 100.0, 2.0, 0.0, 0)
], dtype=order_dt)
)
record_arrays_close(
from_orders_both(price=-np.inf).order_records,
np.array([
(0, 0, 1, 100.0, 1.0, 0.0, 1), (1, 0, 3, 66.66666666666667, 3.0, 0.0, 0)
], dtype=order_dt)
)
record_arrays_close(
from_orders_longonly(price=-np.inf).order_records,
np.array([
(0, 0, 3, 33.333333333333336, 3.0, 0.0, 0), (1, 0, 4, 33.333333333333336, 4.0, 0.0, 1)
], dtype=order_dt)
)
record_arrays_close(
from_orders_shortonly(price=-np.inf).order_records,
np.array([
(0, 0, 3, 33.333333333333336, 3.0, 0.0, 1), (1, 0, 4, 33.333333333333336, 4.0, 0.0, 0)
], dtype=order_dt)
)
def test_val_price(self):
price_nan = pd.Series([1, 2, np.nan, 4, 5], index=price.index)
record_arrays_close(
from_orders_both(close=price_nan, size=order_size_one, val_price=np.inf,
size_type='value').order_records,
from_orders_both(close=price_nan, size=order_size_one, val_price=price,
size_type='value').order_records
)
record_arrays_close(
from_orders_longonly(close=price_nan, size=order_size_one, val_price=np.inf,
size_type='value').order_records,
from_orders_longonly(close=price_nan, size=order_size_one, val_price=price,
size_type='value').order_records
)
record_arrays_close(
from_orders_shortonly(close=price_nan, size=order_size_one, val_price=np.inf,
size_type='value').order_records,
from_orders_shortonly(close=price_nan, size=order_size_one, val_price=price,
size_type='value').order_records
)
shift_price = price_nan.ffill().shift(1)
record_arrays_close(
from_orders_both(close=price_nan, size=order_size_one, val_price=-np.inf,
size_type='value').order_records,
from_orders_both(close=price_nan, size=order_size_one, val_price=shift_price,
size_type='value').order_records
)
record_arrays_close(
from_orders_longonly(close=price_nan, size=order_size_one, val_price=-np.inf,
size_type='value').order_records,
from_orders_longonly(close=price_nan, size=order_size_one, val_price=shift_price,
size_type='value').order_records
)
record_arrays_close(
from_orders_shortonly(close=price_nan, size=order_size_one, val_price=-np.inf,
size_type='value').order_records,
from_orders_shortonly(close=price_nan, size=order_size_one, val_price=shift_price,
size_type='value').order_records
)
record_arrays_close(
from_orders_both(close=price_nan, size=order_size_one, val_price=np.inf,
size_type='value', ffill_val_price=False).order_records,
from_orders_both(close=price_nan, size=order_size_one, val_price=price_nan,
size_type='value', ffill_val_price=False).order_records
)
record_arrays_close(
from_orders_longonly(close=price_nan, size=order_size_one, val_price=np.inf,
size_type='value', ffill_val_price=False).order_records,
from_orders_longonly(close=price_nan, size=order_size_one, val_price=price_nan,
size_type='value', ffill_val_price=False).order_records
)
record_arrays_close(
from_orders_shortonly(close=price_nan, size=order_size_one, val_price=np.inf,
size_type='value', ffill_val_price=False).order_records,
from_orders_shortonly(close=price_nan, size=order_size_one, val_price=price_nan,
size_type='value', ffill_val_price=False).order_records
)
shift_price_nan = price_nan.shift(1)
record_arrays_close(
from_orders_both(close=price_nan, size=order_size_one, val_price=-np.inf,
size_type='value', ffill_val_price=False).order_records,
from_orders_both(close=price_nan, size=order_size_one, val_price=shift_price_nan,
size_type='value', ffill_val_price=False).order_records
)
record_arrays_close(
from_orders_longonly(close=price_nan, size=order_size_one, val_price=-np.inf,
size_type='value', ffill_val_price=False).order_records,
from_orders_longonly(close=price_nan, size=order_size_one, val_price=shift_price_nan,
size_type='value', ffill_val_price=False).order_records
)
record_arrays_close(
from_orders_shortonly(close=price_nan, size=order_size_one, val_price=-np.inf,
size_type='value', ffill_val_price=False).order_records,
from_orders_shortonly(close=price_nan, size=order_size_one, val_price=shift_price_nan,
size_type='value', ffill_val_price=False).order_records
)
def test_fees(self):
record_arrays_close(
from_orders_both(size=order_size_one, fees=[[0., 0.1, 1.]]).order_records,
np.array([
(0, 0, 0, 1.0, 1.0, 0.0, 0), (1, 0, 1, 1.0, 2.0, 0.0, 1), (2, 0, 3, 1.0, 4.0, 0.0, 0),
(3, 0, 4, 1.0, 5.0, 0.0, 1), (4, 1, 0, 1.0, 1.0, 0.1, 0), (5, 1, 1, 1.0, 2.0, 0.2, 1),
(6, 1, 3, 1.0, 4.0, 0.4, 0), (7, 1, 4, 1.0, 5.0, 0.5, 1), (8, 2, 0, 1.0, 1.0, 1.0, 0),
(9, 2, 1, 1.0, 2.0, 2.0, 1), (10, 2, 3, 1.0, 4.0, 4.0, 0), (11, 2, 4, 1.0, 5.0, 5.0, 1)
], dtype=order_dt)
)
record_arrays_close(
from_orders_longonly(size=order_size_one, fees=[[0., 0.1, 1.]]).order_records,
np.array([
(0, 0, 0, 1.0, 1.0, 0.0, 0), (1, 0, 1, 1.0, 2.0, 0.0, 1), (2, 0, 3, 1.0, 4.0, 0.0, 0),
(3, 0, 4, 1.0, 5.0, 0.0, 1), (4, 1, 0, 1.0, 1.0, 0.1, 0), (5, 1, 1, 1.0, 2.0, 0.2, 1),
(6, 1, 3, 1.0, 4.0, 0.4, 0), (7, 1, 4, 1.0, 5.0, 0.5, 1), (8, 2, 0, 1.0, 1.0, 1.0, 0),
(9, 2, 1, 1.0, 2.0, 2.0, 1), (10, 2, 3, 1.0, 4.0, 4.0, 0), (11, 2, 4, 1.0, 5.0, 5.0, 1)
], dtype=order_dt)
)
record_arrays_close(
from_orders_shortonly(size=order_size_one, fees=[[0., 0.1, 1.]]).order_records,
np.array([
(0, 0, 0, 1.0, 1.0, 0.0, 1), (1, 0, 1, 1.0, 2.0, 0.0, 0), (2, 0, 3, 1.0, 4.0, 0.0, 1),
(3, 0, 4, 1.0, 5.0, 0.0, 0), (4, 1, 0, 1.0, 1.0, 0.1, 1), (5, 1, 1, 1.0, 2.0, 0.2, 0),
(6, 1, 3, 1.0, 4.0, 0.4, 1), (7, 1, 4, 1.0, 5.0, 0.5, 0), (8, 2, 0, 1.0, 1.0, 1.0, 1),
(9, 2, 1, 1.0, 2.0, 2.0, 0), (10, 2, 3, 1.0, 4.0, 4.0, 1), (11, 2, 4, 1.0, 5.0, 5.0, 0)
], dtype=order_dt)
)
def test_fixed_fees(self):
record_arrays_close(
from_orders_both(size=order_size_one, fixed_fees=[[0., 0.1, 1.]]).order_records,
np.array([
(0, 0, 0, 1.0, 1.0, 0.0, 0), (1, 0, 1, 1.0, 2.0, 0.0, 1), (2, 0, 3, 1.0, 4.0, 0.0, 0),
(3, 0, 4, 1.0, 5.0, 0.0, 1), (4, 1, 0, 1.0, 1.0, 0.1, 0), (5, 1, 1, 1.0, 2.0, 0.1, 1),
(6, 1, 3, 1.0, 4.0, 0.1, 0), (7, 1, 4, 1.0, 5.0, 0.1, 1), (8, 2, 0, 1.0, 1.0, 1.0, 0),
(9, 2, 1, 1.0, 2.0, 1.0, 1), (10, 2, 3, 1.0, 4.0, 1.0, 0), (11, 2, 4, 1.0, 5.0, 1.0, 1)
], dtype=order_dt)
)
record_arrays_close(
from_orders_longonly(size=order_size_one, fixed_fees=[[0., 0.1, 1.]]).order_records,
np.array([
(0, 0, 0, 1.0, 1.0, 0.0, 0), (1, 0, 1, 1.0, 2.0, 0.0, 1), (2, 0, 3, 1.0, 4.0, 0.0, 0),
(3, 0, 4, 1.0, 5.0, 0.0, 1), (4, 1, 0, 1.0, 1.0, 0.1, 0), (5, 1, 1, 1.0, 2.0, 0.1, 1),
(6, 1, 3, 1.0, 4.0, 0.1, 0), (7, 1, 4, 1.0, 5.0, 0.1, 1), (8, 2, 0, 1.0, 1.0, 1.0, 0),
(9, 2, 1, 1.0, 2.0, 1.0, 1), (10, 2, 3, 1.0, 4.0, 1.0, 0), (11, 2, 4, 1.0, 5.0, 1.0, 1)
], dtype=order_dt)
)
record_arrays_close(
from_orders_shortonly(size=order_size_one, fixed_fees=[[0., 0.1, 1.]]).order_records,
np.array([
(0, 0, 0, 1.0, 1.0, 0.0, 1), (1, 0, 1, 1.0, 2.0, 0.0, 0), (2, 0, 3, 1.0, 4.0, 0.0, 1),
(3, 0, 4, 1.0, 5.0, 0.0, 0), (4, 1, 0, 1.0, 1.0, 0.1, 1), (5, 1, 1, 1.0, 2.0, 0.1, 0),
(6, 1, 3, 1.0, 4.0, 0.1, 1), (7, 1, 4, 1.0, 5.0, 0.1, 0), (8, 2, 0, 1.0, 1.0, 1.0, 1),
(9, 2, 1, 1.0, 2.0, 1.0, 0), (10, 2, 3, 1.0, 4.0, 1.0, 1), (11, 2, 4, 1.0, 5.0, 1.0, 0)
], dtype=order_dt)
)
def test_slippage(self):
record_arrays_close(
from_orders_both(size=order_size_one, slippage=[[0., 0.1, 1.]]).order_records,
np.array([
(0, 0, 0, 1.0, 1.0, 0.0, 0), (1, 0, 1, 1.0, 2.0, 0.0, 1), (2, 0, 3, 1.0, 4.0, 0.0, 0),
(3, 0, 4, 1.0, 5.0, 0.0, 1), (4, 1, 0, 1.0, 1.1, 0.0, 0), (5, 1, 1, 1.0, 1.8, 0.0, 1),
(6, 1, 3, 1.0, 4.4, 0.0, 0), (7, 1, 4, 1.0, 4.5, 0.0, 1), (8, 2, 0, 1.0, 2.0, 0.0, 0),
(9, 2, 1, 1.0, 0.0, 0.0, 1), (10, 2, 3, 1.0, 8.0, 0.0, 0), (11, 2, 4, 1.0, 0.0, 0.0, 1)
], dtype=order_dt)
)
record_arrays_close(
from_orders_longonly(size=order_size_one, slippage=[[0., 0.1, 1.]]).order_records,
np.array([
(0, 0, 0, 1.0, 1.0, 0.0, 0), (1, 0, 1, 1.0, 2.0, 0.0, 1), (2, 0, 3, 1.0, 4.0, 0.0, 0),
(3, 0, 4, 1.0, 5.0, 0.0, 1), (4, 1, 0, 1.0, 1.1, 0.0, 0), (5, 1, 1, 1.0, 1.8, 0.0, 1),
(6, 1, 3, 1.0, 4.4, 0.0, 0), (7, 1, 4, 1.0, 4.5, 0.0, 1), (8, 2, 0, 1.0, 2.0, 0.0, 0),
(9, 2, 1, 1.0, 0.0, 0.0, 1), (10, 2, 3, 1.0, 8.0, 0.0, 0), (11, 2, 4, 1.0, 0.0, 0.0, 1)
], dtype=order_dt)
)
record_arrays_close(
from_orders_shortonly(size=order_size_one, slippage=[[0., 0.1, 1.]]).order_records,
np.array([
(0, 0, 0, 1.0, 1.0, 0.0, 1), (1, 0, 1, 1.0, 2.0, 0.0, 0), (2, 0, 3, 1.0, 4.0, 0.0, 1),
(3, 0, 4, 1.0, 5.0, 0.0, 0), (4, 1, 0, 1.0, 0.9, 0.0, 1), (5, 1, 1, 1.0, 2.2, 0.0, 0),
(6, 1, 3, 1.0, 3.6, 0.0, 1), (7, 1, 4, 1.0, 5.5, 0.0, 0), (8, 2, 0, 1.0, 0.0, 0.0, 1),
(9, 2, 1, 1.0, 4.0, 0.0, 0), (10, 2, 3, 1.0, 0.0, 0.0, 1), (11, 2, 4, 1.0, 10.0, 0.0, 0)
], dtype=order_dt)
)
def test_min_size(self):
record_arrays_close(
from_orders_both(size=order_size_one, min_size=[[0., 1., 2.]]).order_records,
np.array([
(0, 0, 0, 1.0, 1.0, 0.0, 0), (1, 0, 1, 1.0, 2.0, 0.0, 1), (2, 0, 3, 1.0, 4.0, 0.0, 0),
(3, 0, 4, 1.0, 5.0, 0.0, 1), (4, 1, 0, 1.0, 1.0, 0.0, 0), (5, 1, 1, 1.0, 2.0, 0.0, 1),
(6, 1, 3, 1.0, 4.0, 0.0, 0), (7, 1, 4, 1.0, 5.0, 0.0, 1)
], dtype=order_dt)
)
record_arrays_close(
from_orders_longonly(size=order_size_one, min_size=[[0., 1., 2.]]).order_records,
np.array([
(0, 0, 0, 1.0, 1.0, 0.0, 0), (1, 0, 1, 1.0, 2.0, 0.0, 1), (2, 0, 3, 1.0, 4.0, 0.0, 0),
(3, 0, 4, 1.0, 5.0, 0.0, 1), (4, 1, 0, 1.0, 1.0, 0.0, 0), (5, 1, 1, 1.0, 2.0, 0.0, 1),
(6, 1, 3, 1.0, 4.0, 0.0, 0), (7, 1, 4, 1.0, 5.0, 0.0, 1)
], dtype=order_dt)
)
record_arrays_close(
from_orders_shortonly(size=order_size_one, min_size=[[0., 1., 2.]]).order_records,
np.array([
(0, 0, 0, 1.0, 1.0, 0.0, 1), (1, 0, 1, 1.0, 2.0, 0.0, 0), (2, 0, 3, 1.0, 4.0, 0.0, 1),
(3, 0, 4, 1.0, 5.0, 0.0, 0), (4, 1, 0, 1.0, 1.0, 0.0, 1), (5, 1, 1, 1.0, 2.0, 0.0, 0),
(6, 1, 3, 1.0, 4.0, 0.0, 1), (7, 1, 4, 1.0, 5.0, 0.0, 0)
], dtype=order_dt)
)
def test_max_size(self):
record_arrays_close(
from_orders_both(size=order_size_one, max_size=[[0.5, 1., np.inf]]).order_records,
np.array([
(0, 0, 0, 0.5, 1.0, 0.0, 0), (1, 0, 1, 0.5, 2.0, 0.0, 1), (2, 0, 3, 0.5, 4.0, 0.0, 0),
(3, 0, 4, 0.5, 5.0, 0.0, 1), (4, 1, 0, 1.0, 1.0, 0.0, 0), (5, 1, 1, 1.0, 2.0, 0.0, 1),
(6, 1, 3, 1.0, 4.0, 0.0, 0), (7, 1, 4, 1.0, 5.0, 0.0, 1), (8, 2, 0, 1.0, 1.0, 0.0, 0),
(9, 2, 1, 1.0, 2.0, 0.0, 1), (10, 2, 3, 1.0, 4.0, 0.0, 0), (11, 2, 4, 1.0, 5.0, 0.0, 1)
], dtype=order_dt)
)
record_arrays_close(
from_orders_longonly(size=order_size_one, max_size=[[0.5, 1., np.inf]]).order_records,
np.array([
(0, 0, 0, 0.5, 1.0, 0.0, 0), (1, 0, 1, 0.5, 2.0, 0.0, 1), (2, 0, 3, 0.5, 4.0, 0.0, 0),
(3, 0, 4, 0.5, 5.0, 0.0, 1), (4, 1, 0, 1.0, 1.0, 0.0, 0), (5, 1, 1, 1.0, 2.0, 0.0, 1),
(6, 1, 3, 1.0, 4.0, 0.0, 0), (7, 1, 4, 1.0, 5.0, 0.0, 1), (8, 2, 0, 1.0, 1.0, 0.0, 0),
(9, 2, 1, 1.0, 2.0, 0.0, 1), (10, 2, 3, 1.0, 4.0, 0.0, 0), (11, 2, 4, 1.0, 5.0, 0.0, 1)
], dtype=order_dt)
)
record_arrays_close(
from_orders_shortonly(size=order_size_one, max_size=[[0.5, 1., np.inf]]).order_records,
np.array([
(0, 0, 0, 0.5, 1.0, 0.0, 1), (1, 0, 1, 0.5, 2.0, 0.0, 0), (2, 0, 3, 0.5, 4.0, 0.0, 1),
(3, 0, 4, 0.5, 5.0, 0.0, 0), (4, 1, 0, 1.0, 1.0, 0.0, 1), (5, 1, 1, 1.0, 2.0, 0.0, 0),
(6, 1, 3, 1.0, 4.0, 0.0, 1), (7, 1, 4, 1.0, 5.0, 0.0, 0), (8, 2, 0, 1.0, 1.0, 0.0, 1),
(9, 2, 1, 1.0, 2.0, 0.0, 0), (10, 2, 3, 1.0, 4.0, 0.0, 1), (11, 2, 4, 1.0, 5.0, 0.0, 0)
], dtype=order_dt)
)
def test_reject_prob(self):
record_arrays_close(
from_orders_both(size=order_size_one, reject_prob=[[0., 0.5, 1.]], seed=42).order_records,
np.array([
(0, 0, 0, 1.0, 1.0, 0.0, 0), (1, 0, 1, 1.0, 2.0, 0.0, 1), (2, 0, 3, 1.0, 4.0, 0.0, 0),
(3, 0, 4, 1.0, 5.0, 0.0, 1), (4, 1, 1, 1.0, 2.0, 0.0, 1), (5, 1, 3, 1.0, 4.0, 0.0, 0),
(6, 1, 4, 1.0, 5.0, 0.0, 1)
], dtype=order_dt)
)
record_arrays_close(
from_orders_longonly(size=order_size_one, reject_prob=[[0., 0.5, 1.]], seed=42).order_records,
np.array([
(0, 0, 0, 1.0, 1.0, 0.0, 0), (1, 0, 1, 1.0, 2.0, 0.0, 1), (2, 0, 3, 1.0, 4.0, 0.0, 0),
(3, 0, 4, 1.0, 5.0, 0.0, 1), (4, 1, 3, 1.0, 4.0, 0.0, 0), (5, 1, 4, 1.0, 5.0, 0.0, 1)
], dtype=order_dt)
)
record_arrays_close(
from_orders_shortonly(size=order_size_one, reject_prob=[[0., 0.5, 1.]], seed=42).order_records,
np.array([
(0, 0, 0, 1.0, 1.0, 0.0, 1), (1, 0, 1, 1.0, 2.0, 0.0, 0), (2, 0, 3, 1.0, 4.0, 0.0, 1),
(3, 0, 4, 1.0, 5.0, 0.0, 0), (4, 1, 3, 1.0, 4.0, 0.0, 1), (5, 1, 4, 1.0, 5.0, 0.0, 0)
], dtype=order_dt)
)
def test_lock_cash(self):
pf = vbt.Portfolio.from_orders(
pd.Series([1, 1]),
pd.DataFrame([[-25, -25], [np.inf, np.inf]]),
group_by=True, cash_sharing=True,
lock_cash=False, fees=0.01, fixed_fees=1., slippage=0.01)
np.testing.assert_array_equal(
pf.asset_flow().values,
np.array([
[-25.0, -25.0],
[143.12812469365747, 0.0]
])
)
np.testing.assert_array_equal(
pf.cash(group_by=False, in_sim_order=True).values,
np.array([
[123.5025, 147.005],
[0.0, 0.0]
])
)
np.testing.assert_array_equal(
pf.cash(group_by=False, in_sim_order=True, free=True).values,
np.array([
[74.0025, 48.004999999999995],
[-49.5, -49.5]
])
)
pf = vbt.Portfolio.from_orders(
pd.Series([1, 1]),
pd.DataFrame([[-25, -25], [np.inf, np.inf]]),
group_by=True, cash_sharing=True,
lock_cash=True, fees=0.01, fixed_fees=1., slippage=0.01)
np.testing.assert_array_equal(
pf.asset_flow().values,
np.array([
[-25.0, -25.0],
[94.6034702480149, 47.54435839623566]
])
)
np.testing.assert_array_equal(
pf.cash(group_by=False, in_sim_order=True).values,
np.array([
[123.5025, 147.005],
[49.5, 0.0]
])
)
np.testing.assert_array_equal(
pf.cash(group_by=False, in_sim_order=True, free=True).values,
np.array([
[74.0025, 48.004999999999995],
[0.0, 0.0]
])
)
pf = vbt.Portfolio.from_orders(
pd.Series([1, 100]),
pd.DataFrame([[-25, -25], [np.inf, np.inf]]),
group_by=True, cash_sharing=True,
lock_cash=False, fees=0.01, fixed_fees=1., slippage=0.01)
np.testing.assert_array_equal(
pf.asset_flow().values,
np.array([
[-25.0, -25.0],
[1.4312812469365748, 0.0]
])
)
np.testing.assert_array_equal(
pf.cash(group_by=False, in_sim_order=True).values,
np.array([
[123.5025, 147.005],
[0.0, 0.0]
])
)
np.testing.assert_array_equal(
pf.cash(group_by=False, in_sim_order=True, free=True).values,
np.array([
[74.0025, 48.004999999999995],
[-96.16606313106556, -96.16606313106556]
])
)
pf = vbt.Portfolio.from_orders(
pd.Series([1, 100]),
pd.DataFrame([[-25, -25], [np.inf, np.inf]]),
group_by=True, cash_sharing=True,
lock_cash=True, fees=0.01, fixed_fees=1., slippage=0.01)
np.testing.assert_array_equal(
pf.asset_flow().values,
np.array([
[-25.0, -25.0],
[0.4699090272918124, 0.0]
])
)
np.testing.assert_array_equal(
pf.cash(group_by=False, in_sim_order=True).values,
np.array([
[123.5025, 147.005],
[98.06958012596222, 98.06958012596222]
])
)
np.testing.assert_array_equal(
pf.cash(group_by=False, in_sim_order=True, free=True).values,
np.array([
[74.0025, 48.004999999999995],
[0.0, 0.0]
])
)
pf = from_orders_both(size=order_size_one * 1000, lock_cash=[[False, True]])
record_arrays_close(
pf.order_records,
np.array([
(0, 0, 0, 100., 1., 0., 0), (1, 0, 1, 1000., 2., 0., 1),
(2, 0, 3, 500., 4., 0., 0), (3, 0, 4, 1000., 5., 0., 1),
(4, 1, 0, 100., 1., 0., 0), (5, 1, 1, 200., 2., 0., 1),
(6, 1, 3, 100., 4., 0., 0)
], dtype=order_dt)
)
np.testing.assert_array_equal(
pf.cash(free=True).values,
np.array([
[0.0, 0.0],
[-1600.0, 0.0],
[-1600.0, 0.0],
[-1600.0, 0.0],
[-6600.0, 0.0]
])
)
pf = from_orders_longonly(size=order_size_one * 1000, lock_cash=[[False, True]])
record_arrays_close(
pf.order_records,
np.array([
(0, 0, 0, 100., 1., 0., 0), (1, 0, 1, 100., 2., 0., 1),
(2, 0, 3, 50., 4., 0., 0), (3, 0, 4, 50., 5., 0., 1),
(4, 1, 0, 100., 1., 0., 0), (5, 1, 1, 100., 2., 0., 1),
(6, 1, 3, 50., 4., 0., 0), (7, 1, 4, 50., 5., 0., 1)
], dtype=order_dt)
)
np.testing.assert_array_equal(
pf.cash(free=True).values,
np.array([
[0.0, 0.0],
[200.0, 200.0],
[200.0, 200.0],
[0.0, 0.0],
[250.0, 250.0]
])
)
pf = from_orders_shortonly(size=order_size_one * 1000, lock_cash=[[False, True]])
record_arrays_close(
pf.order_records,
np.array([
(0, 0, 0, 1000., 1., 0., 1), (1, 0, 1, 550., 2., 0., 0),
(2, 0, 3, 1000., 4., 0., 1), (3, 0, 4, 800., 5., 0., 0),
(4, 1, 0, 100., 1., 0., 1), (5, 1, 1, 100., 2., 0., 0)
], dtype=order_dt)
)
np.testing.assert_array_equal(
pf.cash(free=True).values,
np.array([
[-900.0, 0.0],
[-900.0, 0.0],
[-900.0, 0.0],
[-4900.0, 0.0],
[-3989.6551724137926, 0.0]
])
)
def test_allow_partial(self):
record_arrays_close(
from_orders_both(size=order_size_one * 1000, allow_partial=[[True, False]]).order_records,
np.array([
(0, 0, 0, 100.0, 1.0, 0.0, 0), (1, 0, 1, 1000.0, 2.0, 0.0, 1), (2, 0, 3, 500.0, 4.0, 0.0, 0),
(3, 0, 4, 1000.0, 5.0, 0.0, 1), (4, 1, 1, 1000.0, 2.0, 0.0, 1), (5, 1, 4, 1000.0, 5.0, 0.0, 1)
], dtype=order_dt)
)
record_arrays_close(
from_orders_longonly(size=order_size_one * 1000, allow_partial=[[True, False]]).order_records,
np.array([
(0, 0, 0, 100.0, 1.0, 0.0, 0), (1, 0, 1, 100.0, 2.0, 0.0, 1), (2, 0, 3, 50.0, 4.0, 0.0, 0),
(3, 0, 4, 50.0, 5.0, 0.0, 1)
], dtype=order_dt)
)
record_arrays_close(
from_orders_shortonly(size=order_size_one * 1000, allow_partial=[[True, False]]).order_records,
np.array([
(0, 0, 0, 1000.0, 1.0, 0.0, 1), (1, 0, 1, 550.0, 2.0, 0.0, 0), (2, 0, 3, 1000.0, 4.0, 0.0, 1),
(3, 0, 4, 800.0, 5.0, 0.0, 0), (4, 1, 0, 1000.0, 1.0, 0.0, 1), (5, 1, 3, 1000.0, 4.0, 0.0, 1),
(6, 1, 4, 1000.0, 5.0, 0.0, 0)
], dtype=order_dt)
)
record_arrays_close(
from_orders_both(size=order_size, allow_partial=[[True, False]]).order_records,
np.array([
(0, 0, 0, 100.0, 1.0, 0.0, 0), (1, 0, 1, 200.0, 2.0, 0.0, 1), (2, 0, 3, 100.0, 4.0, 0.0, 0),
(3, 1, 0, 100.0, 1.0, 0.0, 0), (4, 1, 1, 200.0, 2.0, 0.0, 1), (5, 1, 3, 100.0, 4.0, 0.0, 0)
], dtype=order_dt)
)
record_arrays_close(
from_orders_longonly(size=order_size, allow_partial=[[True, False]]).order_records,
np.array([
(0, 0, 0, 100.0, 1.0, 0.0, 0), (1, 0, 1, 100.0, 2.0, 0.0, 1), (2, 0, 3, 50.0, 4.0, 0.0, 0),
(3, 0, 4, 50.0, 5.0, 0.0, 1), (4, 1, 0, 100.0, 1.0, 0.0, 0), (5, 1, 1, 100.0, 2.0, 0.0, 1),
(6, 1, 3, 50.0, 4.0, 0.0, 0), (7, 1, 4, 50.0, 5.0, 0.0, 1)
], dtype=order_dt)
)
record_arrays_close(
from_orders_shortonly(size=order_size, allow_partial=[[True, False]]).order_records,
np.array([
(0, 0, 0, 100.0, 1.0, 0.0, 1), (1, 0, 1, 100.0, 2.0, 0.0, 0), (2, 1, 0, 100.0, 1.0, 0.0, 1),
(3, 1, 1, 100.0, 2.0, 0.0, 0)
], dtype=order_dt)
)
def test_raise_reject(self):
record_arrays_close(
from_orders_both(size=order_size_one * 1000, allow_partial=True, raise_reject=True).order_records,
np.array([
(0, 0, 0, 100.0, 1.0, 0.0, 0), (1, 0, 1, 1000.0, 2.0, 0.0, 1), (2, 0, 3, 500.0, 4.0, 0.0, 0),
(3, 0, 4, 1000.0, 5.0, 0.0, 1)
], dtype=order_dt)
)
record_arrays_close(
from_orders_longonly(size=order_size_one * 1000, allow_partial=True, raise_reject=True).order_records,
np.array([
(0, 0, 0, 100.0, 1.0, 0.0, 0), (1, 0, 1, 100.0, 2.0, 0.0, 1), (2, 0, 3, 50.0, 4.0, 0.0, 0),
(3, 0, 4, 50.0, 5.0, 0.0, 1)
], dtype=order_dt)
)
record_arrays_close(
from_orders_shortonly(size=order_size_one * 1000, allow_partial=True, raise_reject=True).order_records,
np.array([
(0, 0, 0, 1000.0, 1.0, 0.0, 1), (1, 0, 1, 550.0, 2.0, 0.0, 0), (2, 0, 3, 1000.0, 4.0, 0.0, 1),
(3, 0, 4, 800.0, 5.0, 0.0, 0)
], dtype=order_dt)
)
with pytest.raises(Exception):
_ = from_orders_both(size=order_size_one * 1000, allow_partial=False, raise_reject=True).order_records
with pytest.raises(Exception):
_ = from_orders_longonly(size=order_size_one * 1000, allow_partial=False, raise_reject=True).order_records
with pytest.raises(Exception):
_ = from_orders_shortonly(size=order_size_one * 1000, allow_partial=False, raise_reject=True).order_records
def test_log(self):
record_arrays_close(
from_orders_both(log=True).log_records,
np.array([
(0, 0, 0, 0, 100.0, 0.0, 0.0, 100.0, 1.0, 100.0, np.inf, 1.0, 0, 2,
0.0, 0.0, 0.0, 1e-08, np.inf, 0.0, False, True, False, True, 0.0,
100.0, 0.0, 0.0, 1.0, 100.0, 100.0, 1.0, 0.0, 0, 0, -1, 0),
(1, 0, 0, 1, 0.0, 100.0, 0.0, 0.0, 2.0, 200.0, -np.inf, 2.0, 0, 2,
0.0, 0.0, 0.0, 1e-08, np.inf, 0.0, False, True, False, True, 400.0,
-100.0, 200.0, 0.0, 2.0, 200.0, 200.0, 2.0, 0.0, 1, 0, -1, 1),
(2, 0, 0, 2, 400.0, -100.0, 200.0, 0.0, 3.0, 100.0, np.nan, 3.0, 0,
2, 0.0, 0.0, 0.0, 1e-08, np.inf, 0.0, False, True, False, True, 400.0,
-100.0, 200.0, 0.0, 3.0, 100.0, np.nan, np.nan, np.nan, -1, 1, 0, -1),
(3, 0, 0, 3, 400.0, -100.0, 200.0, 0.0, 4.0, 0.0, np.inf, 4.0, 0, 2,
0.0, 0.0, 0.0, 1e-08, np.inf, 0.0, False, True, False, True, 0.0, 0.0,
0.0, 0.0, 4.0, 0.0, 100.0, 4.0, 0.0, 0, 0, -1, 2),
(4, 0, 0, 4, 0.0, 0.0, 0.0, 0.0, 5.0, 0.0, -np.inf, 5.0, 0, 2, 0.0,
0.0, 0.0, 1e-08, np.inf, 0.0, False, True, False, True, 0.0, 0.0,
0.0, 0.0, 5.0, 0.0, np.nan, np.nan, np.nan, -1, 2, 6, -1)
], dtype=log_dt)
)
def test_group_by(self):
pf = from_orders_both(close=price_wide, group_by=np.array([0, 0, 1]))
record_arrays_close(
pf.order_records,
np.array([
(0, 0, 0, 100.0, 1.0, 0.0, 0), (1, 0, 1, 200.0, 2.0, 0.0, 1), (2, 0, 3, 100.0, 4.0, 0.0, 0),
(3, 1, 0, 100.0, 1.0, 0.0, 0), (4, 1, 1, 200.0, 2.0, 0.0, 1), (5, 1, 3, 100.0, 4.0, 0.0, 0),
(6, 2, 0, 100.0, 1.0, 0.0, 0), (7, 2, 1, 200.0, 2.0, 0.0, 1), (8, 2, 3, 100.0, 4.0, 0.0, 0)
], dtype=order_dt)
)
pd.testing.assert_index_equal(
pf.wrapper.grouper.group_by,
pd.Int64Index([0, 0, 1], dtype='int64')
)
pd.testing.assert_series_equal(
pf.init_cash,
pd.Series([200., 100.], index=pd.Int64Index([0, 1], dtype='int64')).rename('init_cash')
)
assert not pf.cash_sharing
def test_cash_sharing(self):
pf = from_orders_both(close=price_wide, group_by=np.array([0, 0, 1]), cash_sharing=True)
record_arrays_close(
pf.order_records,
np.array([
(0, 0, 0, 100., 1., 0., 0), (1, 0, 1, 200., 2., 0., 1),
(2, 0, 3, 100., 4., 0., 0), (3, 2, 0, 100., 1., 0., 0),
(4, 2, 1, 200., 2., 0., 1), (5, 2, 3, 100., 4., 0., 0)
], dtype=order_dt)
)
pd.testing.assert_index_equal(
pf.wrapper.grouper.group_by,
pd.Int64Index([0, 0, 1], dtype='int64')
)
pd.testing.assert_series_equal(
pf.init_cash,
pd.Series([100., 100.], index=pd.Int64Index([0, 1], dtype='int64')).rename('init_cash')
)
assert pf.cash_sharing
with pytest.raises(Exception):
_ = pf.regroup(group_by=False)
def test_call_seq(self):
pf = from_orders_both(close=price_wide, group_by=np.array([0, 0, 1]), cash_sharing=True)
record_arrays_close(
pf.order_records,
np.array([
(0, 0, 0, 100., 1., 0., 0), (1, 0, 1, 200., 2., 0., 1),
(2, 0, 3, 100., 4., 0., 0), (3, 2, 0, 100., 1., 0., 0),
(4, 2, 1, 200., 2., 0., 1), (5, 2, 3, 100., 4., 0., 0)
], dtype=order_dt)
)
np.testing.assert_array_equal(
pf.call_seq.values,
np.array([
[0, 1, 0],
[0, 1, 0],
[0, 1, 0],
[0, 1, 0],
[0, 1, 0]
])
)
pf = from_orders_both(
close=price_wide, group_by=np.array([0, 0, 1]),
cash_sharing=True, call_seq='reversed')
record_arrays_close(
pf.order_records,
np.array([
(0, 1, 0, 100., 1., 0., 0), (1, 1, 1, 200., 2., 0., 1),
(2, 1, 3, 100., 4., 0., 0), (3, 2, 0, 100., 1., 0., 0),
(4, 2, 1, 200., 2., 0., 1), (5, 2, 3, 100., 4., 0., 0)
], dtype=order_dt)
)
np.testing.assert_array_equal(
pf.call_seq.values,
np.array([
[1, 0, 0],
[1, 0, 0],
[1, 0, 0],
[1, 0, 0],
[1, 0, 0]
])
)
pf = from_orders_both(
close=price_wide, group_by=np.array([0, 0, 1]),
cash_sharing=True, call_seq='random', seed=seed)
record_arrays_close(
pf.order_records,
np.array([
(0, 1, 0, 100., 1., 0., 0), (1, 1, 1, 200., 2., 0., 1),
(2, 1, 3, 100., 4., 0., 0), (3, 2, 0, 100., 1., 0., 0),
(4, 2, 1, 200., 2., 0., 1), (5, 2, 3, 100., 4., 0., 0)
], dtype=order_dt)
)
np.testing.assert_array_equal(
pf.call_seq.values,
np.array([
[1, 0, 0],
[0, 1, 0],
[1, 0, 0],
[1, 0, 0],
[1, 0, 0]
])
)
kwargs = dict(
close=1.,
size=pd.DataFrame([
[0., 0., np.inf],
[0., np.inf, -np.inf],
[np.inf, -np.inf, 0.],
[-np.inf, 0., np.inf],
[0., np.inf, -np.inf],
]),
group_by=np.array([0, 0, 0]),
cash_sharing=True,
call_seq='auto'
)
pf = from_orders_both(**kwargs)
record_arrays_close(
pf.order_records,
np.array([
(0, 2, 0, 100., 1., 0., 0), (1, 2, 1, 200., 1., 0., 1),
(2, 1, 1, 200., 1., 0., 0), (3, 1, 2, 200., 1., 0., 1),
(4, 0, 2, 200., 1., 0., 0), (5, 0, 3, 200., 1., 0., 1),
(6, 2, 3, 200., 1., 0., 0), (7, 2, 4, 200., 1., 0., 1),
(8, 1, 4, 200., 1., 0., 0)
], dtype=order_dt)
)
np.testing.assert_array_equal(
pf.call_seq.values,
np.array([
[0, 1, 2],
[2, 0, 1],
[1, 2, 0],
[0, 1, 2],
[2, 0, 1]
])
)
pf = from_orders_longonly(**kwargs)
record_arrays_close(
pf.order_records,
np.array([
(0, 2, 0, 100., 1., 0., 0), (1, 2, 1, 100., 1., 0., 1),
(2, 1, 1, 100., 1., 0., 0), (3, 1, 2, 100., 1., 0., 1),
(4, 0, 2, 100., 1., 0., 0), (5, 0, 3, 100., 1., 0., 1),
(6, 2, 3, 100., 1., 0., 0), (7, 2, 4, 100., 1., 0., 1),
(8, 1, 4, 100., 1., 0., 0)
], dtype=order_dt)
)
np.testing.assert_array_equal(
pf.call_seq.values,
np.array([
[0, 1, 2],
[2, 0, 1],
[1, 2, 0],
[0, 1, 2],
[2, 0, 1]
])
)
pf = from_orders_shortonly(**kwargs)
record_arrays_close(
pf.order_records,
np.array([
(0, 2, 0, 100., 1., 0., 1), (1, 2, 1, 100., 1., 0., 0),
(2, 0, 2, 100., 1., 0., 1), (3, 0, 3, 100., 1., 0., 0),
(4, 1, 4, 100., 1., 0., 1)
], dtype=order_dt)
)
np.testing.assert_array_equal(
pf.call_seq.values,
np.array([
[2, 0, 1],
[1, 0, 2],
[0, 2, 1],
[2, 1, 0],
[1, 0, 2]
])
)
def test_value(self):
record_arrays_close(
from_orders_both(size=order_size_one, size_type='value').order_records,
np.array([
(0, 0, 0, 1.0, 1.0, 0.0, 0), (1, 0, 1, 0.5, 2.0, 0.0, 1),
(2, 0, 3, 0.25, 4.0, 0.0, 0), (3, 0, 4, 0.2, 5.0, 0.0, 1)
], dtype=order_dt)
)
record_arrays_close(
from_orders_longonly(size=order_size_one, size_type='value').order_records,
np.array([
(0, 0, 0, 1.0, 1.0, 0.0, 0), (1, 0, 1, 0.5, 2.0, 0.0, 1),
(2, 0, 3, 0.25, 4.0, 0.0, 0), (3, 0, 4, 0.2, 5.0, 0.0, 1)
], dtype=order_dt)
)
record_arrays_close(
from_orders_shortonly(size=order_size_one, size_type='value').order_records,
np.array([
(0, 0, 0, 1.0, 1.0, 0.0, 1), (1, 0, 1, 0.5, 2.0, 0.0, 0),
(2, 0, 3, 0.25, 4.0, 0.0, 1), (3, 0, 4, 0.2, 5.0, 0.0, 0)
], dtype=order_dt)
)
def test_target_amount(self):
record_arrays_close(
from_orders_both(size=[[75., -75.]], size_type='targetamount').order_records,
np.array([
(0, 0, 0, 75.0, 1.0, 0.0, 0), (1, 1, 0, 75.0, 1.0, 0.0, 1)
], dtype=order_dt)
)
record_arrays_close(
from_orders_longonly(size=[[75., -75.]], size_type='targetamount').order_records,
np.array([
(0, 0, 0, 75.0, 1.0, 0.0, 0)
], dtype=order_dt)
)
record_arrays_close(
from_orders_shortonly(size=[[75., -75.]], size_type='targetamount').order_records,
np.array([
(0, 0, 0, 75.0, 1.0, 0.0, 1)
], dtype=order_dt)
)
record_arrays_close(
from_orders_both(
close=price_wide, size=75., size_type='targetamount',
group_by=np.array([0, 0, 0]), cash_sharing=True).order_records,
np.array([
(0, 0, 0, 75.0, 1.0, 0.0, 0), (1, 1, 0, 25.0, 1.0, 0.0, 0)
], dtype=order_dt)
)
def test_target_value(self):
record_arrays_close(
from_orders_both(size=[[50., -50.]], size_type='targetvalue').order_records,
np.array([
(0, 0, 0, 50.0, 1.0, 0.0, 0), (1, 0, 1, 25.0, 2.0, 0.0, 1),
(2, 0, 2, 8.333333333333332, 3.0, 0.0, 1), (3, 0, 3, 4.166666666666668, 4.0, 0.0, 1),
(4, 0, 4, 2.5, 5.0, 0.0, 1), (5, 1, 0, 50.0, 1.0, 0.0, 1),
(6, 1, 1, 25.0, 2.0, 0.0, 0), (7, 1, 2, 8.333333333333332, 3.0, 0.0, 0),
(8, 1, 3, 4.166666666666668, 4.0, 0.0, 0), (9, 1, 4, 2.5, 5.0, 0.0, 0)
], dtype=order_dt)
)
record_arrays_close(
from_orders_longonly(size=[[50., -50.]], size_type='targetvalue').order_records,
np.array([
(0, 0, 0, 50.0, 1.0, 0.0, 0), (1, 0, 1, 25.0, 2.0, 0.0, 1),
(2, 0, 2, 8.333333333333332, 3.0, 0.0, 1), (3, 0, 3, 4.166666666666668, 4.0, 0.0, 1),
(4, 0, 4, 2.5, 5.0, 0.0, 1)
], dtype=order_dt)
)
record_arrays_close(
from_orders_shortonly(size=[[50., -50.]], size_type='targetvalue').order_records,
np.array([
(0, 0, 0, 50.0, 1.0, 0.0, 1), (1, 0, 1, 25.0, 2.0, 0.0, 0),
(2, 0, 2, 8.333333333333332, 3.0, 0.0, 0), (3, 0, 3, 4.166666666666668, 4.0, 0.0, 0),
(4, 0, 4, 2.5, 5.0, 0.0, 0)
], dtype=order_dt)
)
record_arrays_close(
from_orders_both(
close=price_wide, size=50., size_type='targetvalue',
group_by=np.array([0, 0, 0]), cash_sharing=True).order_records,
np.array([
(0, 0, 0, 50.0, 1.0, 0.0, 0), (1, 1, 0, 50.0, 1.0, 0.0, 0),
(2, 0, 1, 25.0, 2.0, 0.0, 1), (3, 1, 1, 25.0, 2.0, 0.0, 1),
(4, 2, 1, 25.0, 2.0, 0.0, 0), (5, 0, 2, 8.333333333333332, 3.0, 0.0, 1),
(6, 1, 2, 8.333333333333332, 3.0, 0.0, 1), (7, 2, 2, 8.333333333333332, 3.0, 0.0, 1),
(8, 0, 3, 4.166666666666668, 4.0, 0.0, 1), (9, 1, 3, 4.166666666666668, 4.0, 0.0, 1),
(10, 2, 3, 4.166666666666668, 4.0, 0.0, 1), (11, 0, 4, 2.5, 5.0, 0.0, 1),
(12, 1, 4, 2.5, 5.0, 0.0, 1), (13, 2, 4, 2.5, 5.0, 0.0, 1)
], dtype=order_dt)
)
def test_target_percent(self):
record_arrays_close(
from_orders_both(size=[[0.5, -0.5]], size_type='targetpercent').order_records,
np.array([
(0, 0, 0, 50.0, 1.0, 0.0, 0), (1, 0, 1, 12.5, 2.0, 0.0, 1), (2, 0, 2, 6.25, 3.0, 0.0, 1),
(3, 0, 3, 3.90625, 4.0, 0.0, 1), (4, 0, 4, 2.734375, 5.0, 0.0, 1), (5, 1, 0, 50.0, 1.0, 0.0, 1),
(6, 1, 1, 37.5, 2.0, 0.0, 0), (7, 1, 2, 6.25, 3.0, 0.0, 0), (8, 1, 3, 2.34375, 4.0, 0.0, 0),
(9, 1, 4, 1.171875, 5.0, 0.0, 0)
], dtype=order_dt)
)
record_arrays_close(
from_orders_longonly(size=[[0.5, -0.5]], size_type='targetpercent').order_records,
np.array([
(0, 0, 0, 50.0, 1.0, 0.0, 0), (1, 0, 1, 12.5, 2.0, 0.0, 1), (2, 0, 2, 6.25, 3.0, 0.0, 1),
(3, 0, 3, 3.90625, 4.0, 0.0, 1), (4, 0, 4, 2.734375, 5.0, 0.0, 1)
], dtype=order_dt)
)
record_arrays_close(
from_orders_shortonly(size=[[0.5, -0.5]], size_type='targetpercent').order_records,
np.array([
(0, 0, 0, 50.0, 1.0, 0.0, 1), (1, 0, 1, 37.5, 2.0, 0.0, 0), (2, 0, 2, 6.25, 3.0, 0.0, 0),
(3, 0, 3, 2.34375, 4.0, 0.0, 0), (4, 0, 4, 1.171875, 5.0, 0.0, 0)
], dtype=order_dt)
)
record_arrays_close(
from_orders_both(
close=price_wide, size=0.5, size_type='targetpercent',
group_by=np.array([0, 0, 0]), cash_sharing=True).order_records,
np.array([
(0, 0, 0, 50.0, 1.0, 0.0, 0), (1, 1, 0, 50.0, 1.0, 0.0, 0)
], dtype=order_dt)
)
def test_update_value(self):
record_arrays_close(
from_orders_both(size=0.5, size_type='targetpercent', fees=0.01, slippage=0.01,
update_value=False).order_records,
from_orders_both(size=0.5, size_type='targetpercent', fees=0.01, slippage=0.01,
update_value=True).order_records
)
record_arrays_close(
from_orders_both(
close=price_wide, size=0.5, size_type='targetpercent', fees=0.01, slippage=0.01,
group_by=np.array([0, 0, 0]), cash_sharing=True, update_value=False).order_records,
np.array([
(0, 0, 0, 50.0, 1.01, 0.505, 0),
(1, 1, 0, 48.02960494069208, 1.01, 0.485099009900992, 0),
(2, 0, 1, 0.9851975296539592, 1.98, 0.019506911087148394, 1),
(3, 1, 1, 0.9465661198057499, 2.02, 0.019120635620076154, 0),
(4, 0, 2, 0.019315704924103727, 2.9699999999999998, 0.0005736764362458806, 1),
(5, 1, 2, 0.018558300554959377, 3.0300000000000002, 0.0005623165068152705, 0),
(6, 0, 3, 0.00037870218456959037, 3.96, 1.4996606508955778e-05, 1),
(7, 1, 3, 0.0003638525743521767, 4.04, 1.4699644003827875e-05, 0),
(8, 0, 4, 7.424805112066224e-06, 4.95, 3.675278530472781e-07, 1),
(9, 1, 4, 7.133664827307231e-06, 5.05, 3.6025007377901643e-07, 0)
], dtype=order_dt)
)
record_arrays_close(
from_orders_both(
close=price_wide, size=0.5, size_type='targetpercent', fees=0.01, slippage=0.01,
group_by=np.array([0, 0, 0]), cash_sharing=True, update_value=True).order_records,
np.array([
(0, 0, 0, 50.0, 1.01, 0.505, 0),
(1, 1, 0, 48.02960494069208, 1.01, 0.485099009900992, 0),
(2, 0, 1, 0.9851975296539592, 1.98, 0.019506911087148394, 1),
(3, 1, 1, 0.7303208018821721, 2.02, 0.014752480198019875, 0),
(4, 2, 1, 0.21624531792357785, 2.02, 0.0043681554220562635, 0),
(5, 0, 2, 0.019315704924103727, 2.9699999999999998, 0.0005736764362458806, 1),
(6, 1, 2, 0.009608602243410758, 2.9699999999999998, 0.00028537548662929945, 1),
(7, 2, 2, 0.02779013180558861, 3.0300000000000002, 0.0008420409937093393, 0),
(8, 0, 3, 0.0005670876809631409, 3.96, 2.2456672166140378e-05, 1),
(9, 1, 3, 0.00037770350099464167, 3.96, 1.4957058639387809e-05, 1),
(10, 2, 3, 0.0009077441794302741, 4.04, 3.6672864848982974e-05, 0),
(11, 0, 4, 1.8523501267964093e-05, 4.95, 9.169133127642227e-07, 1),
(12, 1, 4, 1.2972670177191503e-05, 4.95, 6.421471737709794e-07, 1),
(13, 2, 4, 3.0261148547590434e-05, 5.05, 1.5281880016533242e-06, 0)
], dtype=order_dt)
)
def test_percent(self):
record_arrays_close(
from_orders_both(size=[[0.5, -0.5]], size_type='percent').order_records,
np.array([
(0, 0, 0, 50., 1., 0., 0), (1, 0, 1, 12.5, 2., 0., 0),
(2, 0, 2, 4.16666667, 3., 0., 0), (3, 0, 3, 1.5625, 4., 0., 0),
(4, 0, 4, 0.625, 5., 0., 0), (5, 1, 0, 50., 1., 0., 1),
(6, 1, 1, 12.5, 2., 0., 1), (7, 1, 2, 4.16666667, 3., 0., 1),
(8, 1, 3, 1.5625, 4., 0., 1), (9, 1, 4, 0.625, 5., 0., 1)
], dtype=order_dt)
)
record_arrays_close(
from_orders_longonly(size=[[0.5, -0.5]], size_type='percent').order_records,
np.array([
(0, 0, 0, 50., 1., 0., 0), (1, 0, 1, 12.5, 2., 0., 0),
(2, 0, 2, 4.16666667, 3., 0., 0), (3, 0, 3, 1.5625, 4., 0., 0),
(4, 0, 4, 0.625, 5., 0., 0)
], dtype=order_dt)
)
record_arrays_close(
from_orders_shortonly(size=[[0.5, -0.5]], size_type='percent').order_records,
np.array([
(0, 0, 0, 50., 1., 0., 1), (1, 0, 1, 12.5, 2., 0., 1),
(2, 0, 2, 4.16666667, 3., 0., 1), (3, 0, 3, 1.5625, 4., 0., 1),
(4, 0, 4, 0.625, 5., 0., 1)
], dtype=order_dt)
)
record_arrays_close(
from_orders_both(
close=price_wide, size=0.5, size_type='percent',
group_by=np.array([0, 0, 0]), cash_sharing=True).order_records,
np.array([
(0, 0, 0, 5.00000000e+01, 1., 0., 0), (1, 1, 0, 2.50000000e+01, 1., 0., 0),
(2, 2, 0, 1.25000000e+01, 1., 0., 0), (3, 0, 1, 3.12500000e+00, 2., 0., 0),
(4, 1, 1, 1.56250000e+00, 2., 0., 0), (5, 2, 1, 7.81250000e-01, 2., 0., 0),
(6, 0, 2, 2.60416667e-01, 3., 0., 0), (7, 1, 2, 1.30208333e-01, 3., 0., 0),
(8, 2, 2, 6.51041667e-02, 3., 0., 0), (9, 0, 3, 2.44140625e-02, 4., 0., 0),
(10, 1, 3, 1.22070312e-02, 4., 0., 0), (11, 2, 3, 6.10351562e-03, 4., 0., 0),
(12, 0, 4, 2.44140625e-03, 5., 0., 0), (13, 1, 4, 1.22070312e-03, 5., 0., 0),
(14, 2, 4, 6.10351562e-04, 5., 0., 0)
], dtype=order_dt)
)
def test_auto_seq(self):
target_hold_value = pd.DataFrame({
'a': [0., 70., 30., 0., 70.],
'b': [30., 0., 70., 30., 30.],
'c': [70., 30., 0., 70., 0.]
}, index=price.index)
pd.testing.assert_frame_equal(
from_orders_both(
close=1., size=target_hold_value, size_type='targetvalue',
group_by=np.array([0, 0, 0]), cash_sharing=True,
call_seq='auto').asset_value(group_by=False),
target_hold_value
)
pd.testing.assert_frame_equal(
from_orders_both(
close=1., size=target_hold_value / 100, size_type='targetpercent',
group_by=np.array([0, 0, 0]), cash_sharing=True,
call_seq='auto').asset_value(group_by=False),
target_hold_value
)
def test_max_orders(self):
_ = from_orders_both(close=price_wide)
_ = from_orders_both(close=price_wide, max_orders=9)
with pytest.raises(Exception):
_ = from_orders_both(close=price_wide, max_orders=8)
def test_max_logs(self):
_ = from_orders_both(close=price_wide, log=True)
_ = from_orders_both(close=price_wide, log=True, max_logs=15)
with pytest.raises(Exception):
_ = from_orders_both(close=price_wide, log=True, max_logs=14)
# ############# from_signals ############# #
entries = pd.Series([True, True, True, False, False], index=price.index)
entries_wide = entries.vbt.tile(3, keys=['a', 'b', 'c'])
exits = pd.Series([False, False, True, True, True], index=price.index)
exits_wide = exits.vbt.tile(3, keys=['a', 'b', 'c'])
def from_signals_both(close=price, entries=entries, exits=exits, **kwargs):
return vbt.Portfolio.from_signals(close, entries, exits, direction='both', **kwargs)
def from_signals_longonly(close=price, entries=entries, exits=exits, **kwargs):
return vbt.Portfolio.from_signals(close, entries, exits, direction='longonly', **kwargs)
def from_signals_shortonly(close=price, entries=entries, exits=exits, **kwargs):
return vbt.Portfolio.from_signals(close, entries, exits, direction='shortonly', **kwargs)
def from_ls_signals_both(close=price, entries=entries, exits=exits, **kwargs):
return vbt.Portfolio.from_signals(close, entries, False, exits, False, **kwargs)
def from_ls_signals_longonly(close=price, entries=entries, exits=exits, **kwargs):
return vbt.Portfolio.from_signals(close, entries, exits, False, False, **kwargs)
def from_ls_signals_shortonly(close=price, entries=entries, exits=exits, **kwargs):
return vbt.Portfolio.from_signals(close, False, False, entries, exits, **kwargs)
class TestFromSignals:
@pytest.mark.parametrize(
"test_ls",
[False, True],
)
def test_one_column(self, test_ls):
_from_signals_both = from_ls_signals_both if test_ls else from_signals_both
_from_signals_longonly = from_ls_signals_longonly if test_ls else from_signals_longonly
_from_signals_shortonly = from_ls_signals_shortonly if test_ls else from_signals_shortonly
record_arrays_close(
_from_signals_both().order_records,
np.array([
(0, 0, 0, 100., 1., 0., 0), (1, 0, 3, 200., 4., 0., 1)
], dtype=order_dt)
)
record_arrays_close(
_from_signals_longonly().order_records,
np.array([
(0, 0, 0, 100., 1., 0., 0), (1, 0, 3, 100., 4., 0., 1)
], dtype=order_dt)
)
record_arrays_close(
_from_signals_shortonly().order_records,
np.array([
(0, 0, 0, 100., 1., 0., 1), (1, 0, 3, 50., 4., 0., 0)
], dtype=order_dt)
)
pf = _from_signals_both()
pd.testing.assert_index_equal(
pf.wrapper.index,
pd.DatetimeIndex(['2020-01-01', '2020-01-02', '2020-01-03', '2020-01-04', '2020-01-05'])
)
pd.testing.assert_index_equal(
pf.wrapper.columns,
pd.Int64Index([0], dtype='int64')
)
assert pf.wrapper.ndim == 1
assert pf.wrapper.freq == day_dt
assert pf.wrapper.grouper.group_by is None
@pytest.mark.parametrize(
"test_ls",
[False, True],
)
def test_multiple_columns(self, test_ls):
_from_signals_both = from_ls_signals_both if test_ls else from_signals_both
_from_signals_longonly = from_ls_signals_longonly if test_ls else from_signals_longonly
_from_signals_shortonly = from_ls_signals_shortonly if test_ls else from_signals_shortonly
record_arrays_close(
_from_signals_both(close=price_wide).order_records,
np.array([
(0, 0, 0, 100., 1., 0., 0), (1, 0, 3, 200., 4., 0., 1),
(2, 1, 0, 100., 1., 0., 0), (3, 1, 3, 200., 4., 0., 1),
(4, 2, 0, 100., 1., 0., 0), (5, 2, 3, 200., 4., 0., 1)
], dtype=order_dt)
)
record_arrays_close(
_from_signals_longonly(close=price_wide).order_records,
np.array([
(0, 0, 0, 100., 1., 0., 0), (1, 0, 3, 100., 4., 0., 1),
(2, 1, 0, 100., 1., 0., 0), (3, 1, 3, 100., 4., 0., 1),
(4, 2, 0, 100., 1., 0., 0), (5, 2, 3, 100., 4., 0., 1)
], dtype=order_dt)
)
record_arrays_close(
_from_signals_shortonly(close=price_wide).order_records,
np.array([
(0, 0, 0, 100., 1., 0., 1), (1, 0, 3, 50., 4., 0., 0),
(2, 1, 0, 100., 1., 0., 1), (3, 1, 3, 50., 4., 0., 0),
(4, 2, 0, 100., 1., 0., 1), (5, 2, 3, 50., 4., 0., 0)
], dtype=order_dt)
)
pf = _from_signals_both(close=price_wide)
pd.testing.assert_index_equal(
pf.wrapper.index,
pd.DatetimeIndex(['2020-01-01', '2020-01-02', '2020-01-03', '2020-01-04', '2020-01-05'])
)
pd.testing.assert_index_equal(
pf.wrapper.columns,
pd.Index(['a', 'b', 'c'], dtype='object')
)
assert pf.wrapper.ndim == 2
assert pf.wrapper.freq == day_dt
assert pf.wrapper.grouper.group_by is None
def test_custom_signal_func(self):
@njit
def signal_func_nb(c, long_num_arr, short_num_arr):
long_num = nb.get_elem_nb(c, long_num_arr)
short_num = nb.get_elem_nb(c, short_num_arr)
is_long_entry = long_num > 0
is_long_exit = long_num < 0
is_short_entry = short_num > 0
is_short_exit = short_num < 0
return is_long_entry, is_long_exit, is_short_entry, is_short_exit
pf_base = vbt.Portfolio.from_signals(
pd.Series([1, 2, 3, 4, 5]),
entries=pd.Series([True, False, False, False, False]),
exits=pd.Series([False, False, True, False, False]),
short_entries=pd.Series([False, True, False, True, False]),
short_exits=pd.Series([False, False, False, False, True]),
size=1,
upon_opposite_entry='ignore'
)
pf = vbt.Portfolio.from_signals(
pd.Series([1, 2, 3, 4, 5]),
signal_func_nb=signal_func_nb,
signal_args=(vbt.Rep('long_num_arr'), vbt.Rep('short_num_arr')),
broadcast_named_args=dict(
long_num_arr=pd.Series([1, 0, -1, 0, 0]),
short_num_arr=pd.Series([0, 1, 0, 1, -1])
),
size=1,
upon_opposite_entry='ignore'
)
record_arrays_close(
pf_base.order_records,
pf.order_records
)
def test_amount(self):
record_arrays_close(
from_signals_both(size=[[0, 1, np.inf]], size_type='amount').order_records,
np.array([
(0, 1, 0, 1.0, 1.0, 0.0, 0), (1, 1, 3, 2.0, 4.0, 0.0, 1),
(2, 2, 0, 100.0, 1.0, 0.0, 0), (3, 2, 3, 200.0, 4.0, 0.0, 1)
], dtype=order_dt)
)
record_arrays_close(
from_signals_longonly(size=[[0, 1, np.inf]], size_type='amount').order_records,
np.array([
(0, 1, 0, 1.0, 1.0, 0.0, 0), (1, 1, 3, 1.0, 4.0, 0.0, 1),
(2, 2, 0, 100.0, 1.0, 0.0, 0), (3, 2, 3, 100.0, 4.0, 0.0, 1)
], dtype=order_dt)
)
record_arrays_close(
from_signals_shortonly(size=[[0, 1, np.inf]], size_type='amount').order_records,
np.array([
(0, 1, 0, 1.0, 1.0, 0.0, 1), (1, 1, 3, 1.0, 4.0, 0.0, 0),
(2, 2, 0, 100.0, 1.0, 0.0, 1), (3, 2, 3, 50.0, 4.0, 0.0, 0)
], dtype=order_dt)
)
def test_value(self):
record_arrays_close(
from_signals_both(size=[[0, 1, np.inf]], size_type='value').order_records,
np.array([
(0, 1, 0, 1.0, 1.0, 0.0, 0), (1, 1, 3, 0.3125, 4.0, 0.0, 1),
(2, 1, 4, 0.1775, 5.0, 0.0, 1), (3, 2, 0, 100.0, 1.0, 0.0, 0),
(4, 2, 3, 200.0, 4.0, 0.0, 1)
], dtype=order_dt)
)
record_arrays_close(
from_signals_longonly(size=[[0, 1, np.inf]], size_type='value').order_records,
np.array([
(0, 1, 0, 1.0, 1.0, 0.0, 0), (1, 1, 3, 1.0, 4.0, 0.0, 1),
(2, 2, 0, 100.0, 1.0, 0.0, 0), (3, 2, 3, 100.0, 4.0, 0.0, 1)
], dtype=order_dt)
)
record_arrays_close(
from_signals_shortonly(size=[[0, 1, np.inf]], size_type='value').order_records,
np.array([
(0, 1, 0, 1.0, 1.0, 0.0, 1), (1, 1, 3, 1.0, 4.0, 0.0, 0),
(2, 2, 0, 100.0, 1.0, 0.0, 1), (3, 2, 3, 50.0, 4.0, 0.0, 0)
], dtype=order_dt)
)
def test_percent(self):
with pytest.raises(Exception):
_ = from_signals_both(size=0.5, size_type='percent')
record_arrays_close(
from_signals_both(size=0.5, size_type='percent', upon_opposite_entry='close').order_records,
np.array([
(0, 0, 0, 50., 1., 0., 0), (1, 0, 3, 50., 4., 0., 1), (2, 0, 4, 25., 5., 0., 1)
], dtype=order_dt)
)
record_arrays_close(
from_signals_both(size=0.5, size_type='percent', upon_opposite_entry='close',
accumulate=True).order_records,
np.array([
(0, 0, 0, 50.0, 1.0, 0.0, 0), (1, 0, 1, 12.5, 2.0, 0.0, 0),
(2, 0, 3, 62.5, 4.0, 0.0, 1), (3, 0, 4, 27.5, 5.0, 0.0, 1)
], dtype=order_dt)
)
record_arrays_close(
from_signals_longonly(size=0.5, size_type='percent').order_records,
np.array([
(0, 0, 0, 50., 1., 0., 0), (1, 0, 3, 50., 4., 0., 1)
], dtype=order_dt)
)
record_arrays_close(
from_signals_shortonly(size=0.5, size_type='percent').order_records,
np.array([
(0, 0, 0, 50., 1., 0., 1), (1, 0, 3, 37.5, 4., 0., 0)
], dtype=order_dt)
)
record_arrays_close(
from_signals_longonly(
close=price_wide, size=0.5, size_type='percent',
group_by=np.array([0, 0, 0]), cash_sharing=True).order_records,
np.array([
(0, 0, 0, 50., 1., 0., 0), (1, 1, 0, 25., 1., 0., 0),
(2, 2, 0, 12.5, 1., 0., 0), (3, 0, 3, 50., 4., 0., 1),
(4, 1, 3, 25., 4., 0., 1), (5, 2, 3, 12.5, 4., 0., 1)
], dtype=order_dt)
)
def test_price(self):
record_arrays_close(
from_signals_both(price=price * 1.01).order_records,
np.array([
(0, 0, 0, 99.00990099009901, 1.01, 0.0, 0), (1, 0, 3, 198.01980198019803, 4.04, 0.0, 1)
], dtype=order_dt)
)
record_arrays_close(
from_signals_longonly(price=price * 1.01).order_records,
np.array([
(0, 0, 0, 99.00990099, 1.01, 0., 0), (1, 0, 3, 99.00990099, 4.04, 0., 1)
], dtype=order_dt)
)
record_arrays_close(
from_signals_shortonly(price=price * 1.01).order_records,
np.array([
(0, 0, 0, 99.00990099009901, 1.01, 0.0, 1), (1, 0, 3, 49.504950495049506, 4.04, 0.0, 0)
], dtype=order_dt)
)
record_arrays_close(
from_signals_both(price=np.inf).order_records,
np.array([
(0, 0, 0, 100.0, 1.0, 0.0, 0), (1, 0, 3, 200.0, 4.0, 0.0, 1)
], dtype=order_dt)
)
record_arrays_close(
from_signals_longonly(price=np.inf).order_records,
np.array([
(0, 0, 0, 100.0, 1.0, 0.0, 0), (1, 0, 3, 100.0, 4.0, 0.0, 1)
], dtype=order_dt)
)
record_arrays_close(
from_signals_shortonly(price=np.inf).order_records,
np.array([
(0, 0, 0, 100.0, 1.0, 0.0, 1), (1, 0, 3, 50.0, 4.0, 0.0, 0)
], dtype=order_dt)
)
record_arrays_close(
from_signals_both(price=-np.inf).order_records,
np.array([
(0, 0, 1, 100.0, 1.0, 0.0, 0), (1, 0, 3, 200.0, 3.0, 0.0, 1)
], dtype=order_dt)
)
record_arrays_close(
from_signals_longonly(price=-np.inf).order_records,
np.array([
(0, 0, 1, 100.0, 1.0, 0.0, 0), (1, 0, 3, 100.0, 3.0, 0.0, 1)
], dtype=order_dt)
)
record_arrays_close(
from_signals_shortonly(price=-np.inf).order_records,
np.array([
(0, 0, 1, 100.0, 1.0, 0.0, 1), (1, 0, 3, 66.66666666666667, 3.0, 0.0, 0)
], dtype=order_dt)
)
def test_val_price(self):
price_nan = pd.Series([1, 2, np.nan, 4, 5], index=price.index)
record_arrays_close(
from_signals_both(close=price_nan, size=1, val_price=np.inf,
size_type='value').order_records,
from_signals_both(close=price_nan, size=1, val_price=price,
size_type='value').order_records
)
record_arrays_close(
from_signals_longonly(close=price_nan, size=1, val_price=np.inf,
size_type='value').order_records,
from_signals_longonly(close=price_nan, size=1, val_price=price,
size_type='value').order_records
)
record_arrays_close(
from_signals_shortonly(close=price_nan, size=1, val_price=np.inf,
size_type='value').order_records,
from_signals_shortonly(close=price_nan, size=1, val_price=price,
size_type='value').order_records
)
shift_price = price_nan.ffill().shift(1)
record_arrays_close(
from_signals_both(close=price_nan, size=1, val_price=-np.inf,
size_type='value').order_records,
from_signals_both(close=price_nan, size=1, val_price=shift_price,
size_type='value').order_records
)
record_arrays_close(
from_signals_longonly(close=price_nan, size=1, val_price=-np.inf,
size_type='value').order_records,
from_signals_longonly(close=price_nan, size=1, val_price=shift_price,
size_type='value').order_records
)
record_arrays_close(
from_signals_shortonly(close=price_nan, size=1, val_price=-np.inf,
size_type='value').order_records,
from_signals_shortonly(close=price_nan, size=1, val_price=shift_price,
size_type='value').order_records
)
record_arrays_close(
from_signals_both(close=price_nan, size=1, val_price=np.inf,
size_type='value', ffill_val_price=False).order_records,
from_signals_both(close=price_nan, size=1, val_price=price_nan,
size_type='value', ffill_val_price=False).order_records
)
record_arrays_close(
from_signals_longonly(close=price_nan, size=1, val_price=np.inf,
size_type='value', ffill_val_price=False).order_records,
from_signals_longonly(close=price_nan, size=1, val_price=price_nan,
size_type='value', ffill_val_price=False).order_records
)
record_arrays_close(
from_signals_shortonly(close=price_nan, size=1, val_price=np.inf,
size_type='value', ffill_val_price=False).order_records,
from_signals_shortonly(close=price_nan, size=1, val_price=price_nan,
size_type='value', ffill_val_price=False).order_records
)
shift_price_nan = price_nan.shift(1)
record_arrays_close(
from_signals_both(close=price_nan, size=1, val_price=-np.inf,
size_type='value', ffill_val_price=False).order_records,
from_signals_both(close=price_nan, size=1, val_price=shift_price_nan,
size_type='value', ffill_val_price=False).order_records
)
record_arrays_close(
from_signals_longonly(close=price_nan, size=1, val_price=-np.inf,
size_type='value', ffill_val_price=False).order_records,
from_signals_longonly(close=price_nan, size=1, val_price=shift_price_nan,
size_type='value', ffill_val_price=False).order_records
)
record_arrays_close(
from_signals_shortonly(close=price_nan, size=1, val_price=-np.inf,
size_type='value', ffill_val_price=False).order_records,
from_signals_shortonly(close=price_nan, size=1, val_price=shift_price_nan,
size_type='value', ffill_val_price=False).order_records
)
def test_fees(self):
record_arrays_close(
from_signals_both(size=1, fees=[[0., 0.1, 1.]]).order_records,
np.array([
(0, 0, 0, 1.0, 1.0, 0.0, 0), (1, 0, 3, 2.0, 4.0, 0.0, 1), (2, 1, 0, 1.0, 1.0, 0.1, 0),
(3, 1, 3, 2.0, 4.0, 0.8, 1), (4, 2, 0, 1.0, 1.0, 1.0, 0), (5, 2, 3, 2.0, 4.0, 8.0, 1)
], dtype=order_dt)
)
record_arrays_close(
from_signals_longonly(size=1, fees=[[0., 0.1, 1.]]).order_records,
np.array([
(0, 0, 0, 1.0, 1.0, 0.0, 0), (1, 0, 3, 1.0, 4.0, 0.0, 1), (2, 1, 0, 1.0, 1.0, 0.1, 0),
(3, 1, 3, 1.0, 4.0, 0.4, 1), (4, 2, 0, 1.0, 1.0, 1.0, 0), (5, 2, 3, 1.0, 4.0, 4.0, 1)
], dtype=order_dt)
)
record_arrays_close(
from_signals_shortonly(size=1, fees=[[0., 0.1, 1.]]).order_records,
np.array([
(0, 0, 0, 1.0, 1.0, 0.0, 1), (1, 0, 3, 1.0, 4.0, 0.0, 0), (2, 1, 0, 1.0, 1.0, 0.1, 1),
(3, 1, 3, 1.0, 4.0, 0.4, 0), (4, 2, 0, 1.0, 1.0, 1.0, 1), (5, 2, 3, 1.0, 4.0, 4.0, 0)
], dtype=order_dt)
)
def test_fixed_fees(self):
record_arrays_close(
from_signals_both(size=1, fixed_fees=[[0., 0.1, 1.]]).order_records,
np.array([
(0, 0, 0, 1.0, 1.0, 0.0, 0), (1, 0, 3, 2.0, 4.0, 0.0, 1), (2, 1, 0, 1.0, 1.0, 0.1, 0),
(3, 1, 3, 2.0, 4.0, 0.1, 1), (4, 2, 0, 1.0, 1.0, 1.0, 0), (5, 2, 3, 2.0, 4.0, 1.0, 1)
], dtype=order_dt)
)
record_arrays_close(
from_signals_longonly(size=1, fixed_fees=[[0., 0.1, 1.]]).order_records,
np.array([
(0, 0, 0, 1.0, 1.0, 0.0, 0), (1, 0, 3, 1.0, 4.0, 0.0, 1), (2, 1, 0, 1.0, 1.0, 0.1, 0),
(3, 1, 3, 1.0, 4.0, 0.1, 1), (4, 2, 0, 1.0, 1.0, 1.0, 0), (5, 2, 3, 1.0, 4.0, 1.0, 1)
], dtype=order_dt)
)
record_arrays_close(
from_signals_shortonly(size=1, fixed_fees=[[0., 0.1, 1.]]).order_records,
np.array([
(0, 0, 0, 1.0, 1.0, 0.0, 1), (1, 0, 3, 1.0, 4.0, 0.0, 0), (2, 1, 0, 1.0, 1.0, 0.1, 1),
(3, 1, 3, 1.0, 4.0, 0.1, 0), (4, 2, 0, 1.0, 1.0, 1.0, 1), (5, 2, 3, 1.0, 4.0, 1.0, 0)
], dtype=order_dt)
)
def test_slippage(self):
record_arrays_close(
from_signals_both(size=1, slippage=[[0., 0.1, 1.]]).order_records,
np.array([
(0, 0, 0, 1.0, 1.0, 0.0, 0), (1, 0, 3, 2.0, 4.0, 0.0, 1), (2, 1, 0, 1.0, 1.1, 0.0, 0),
(3, 1, 3, 2.0, 3.6, 0.0, 1), (4, 2, 0, 1.0, 2.0, 0.0, 0), (5, 2, 3, 2.0, 0.0, 0.0, 1)
], dtype=order_dt)
)
record_arrays_close(
from_signals_longonly(size=1, slippage=[[0., 0.1, 1.]]).order_records,
np.array([
(0, 0, 0, 1.0, 1.0, 0.0, 0), (1, 0, 3, 1.0, 4.0, 0.0, 1), (2, 1, 0, 1.0, 1.1, 0.0, 0),
(3, 1, 3, 1.0, 3.6, 0.0, 1), (4, 2, 0, 1.0, 2.0, 0.0, 0), (5, 2, 3, 1.0, 0.0, 0.0, 1)
], dtype=order_dt)
)
record_arrays_close(
from_signals_shortonly(size=1, slippage=[[0., 0.1, 1.]]).order_records,
np.array([
(0, 0, 0, 1.0, 1.0, 0.0, 1), (1, 0, 3, 1.0, 4.0, 0.0, 0), (2, 1, 0, 1.0, 0.9, 0.0, 1),
(3, 1, 3, 1.0, 4.4, 0.0, 0), (4, 2, 0, 1.0, 0.0, 0.0, 1), (5, 2, 3, 1.0, 8.0, 0.0, 0)
], dtype=order_dt)
)
def test_min_size(self):
record_arrays_close(
from_signals_both(size=1, min_size=[[0., 1., 2.]]).order_records,
np.array([
(0, 0, 0, 1.0, 1.0, 0.0, 0), (1, 0, 3, 2.0, 4.0, 0.0, 1), (2, 1, 0, 1.0, 1.0, 0.0, 0),
(3, 1, 3, 2.0, 4.0, 0.0, 1)
], dtype=order_dt)
)
record_arrays_close(
from_signals_longonly(size=1, min_size=[[0., 1., 2.]]).order_records,
np.array([
(0, 0, 0, 1.0, 1.0, 0.0, 0), (1, 0, 3, 1.0, 4.0, 0.0, 1), (2, 1, 0, 1.0, 1.0, 0.0, 0),
(3, 1, 3, 1.0, 4.0, 0.0, 1)
], dtype=order_dt)
)
record_arrays_close(
from_signals_shortonly(size=1, min_size=[[0., 1., 2.]]).order_records,
np.array([
(0, 0, 0, 1.0, 1.0, 0.0, 1), (1, 0, 3, 1.0, 4.0, 0.0, 0), (2, 1, 0, 1.0, 1.0, 0.0, 1),
(3, 1, 3, 1.0, 4.0, 0.0, 0)
], dtype=order_dt)
)
def test_max_size(self):
record_arrays_close(
from_signals_both(size=1, max_size=[[0.5, 1., np.inf]]).order_records,
np.array([
(0, 0, 0, 0.5, 1.0, 0.0, 0), (1, 0, 3, 0.5, 4.0, 0.0, 1), (2, 0, 4, 0.5, 5.0, 0.0, 1),
(3, 1, 0, 1.0, 1.0, 0.0, 0), (4, 1, 3, 1.0, 4.0, 0.0, 1), (5, 1, 4, 1.0, 5.0, 0.0, 1),
(6, 2, 0, 1.0, 1.0, 0.0, 0), (7, 2, 3, 2.0, 4.0, 0.0, 1)
], dtype=order_dt)
)
record_arrays_close(
from_signals_longonly(size=1, max_size=[[0.5, 1., np.inf]]).order_records,
np.array([
(0, 0, 0, 0.5, 1.0, 0.0, 0), (1, 0, 3, 0.5, 4.0, 0.0, 1), (2, 1, 0, 1.0, 1.0, 0.0, 0),
(3, 1, 3, 1.0, 4.0, 0.0, 1), (4, 2, 0, 1.0, 1.0, 0.0, 0), (5, 2, 3, 1.0, 4.0, 0.0, 1)
], dtype=order_dt)
)
record_arrays_close(
from_signals_shortonly(size=1, max_size=[[0.5, 1., np.inf]]).order_records,
np.array([
(0, 0, 0, 0.5, 1.0, 0.0, 1), (1, 0, 3, 0.5, 4.0, 0.0, 0), (2, 1, 0, 1.0, 1.0, 0.0, 1),
(3, 1, 3, 1.0, 4.0, 0.0, 0), (4, 2, 0, 1.0, 1.0, 0.0, 1), (5, 2, 3, 1.0, 4.0, 0.0, 0)
], dtype=order_dt)
)
def test_reject_prob(self):
record_arrays_close(
from_signals_both(size=1., reject_prob=[[0., 0.5, 1.]], seed=42).order_records,
np.array([
(0, 0, 0, 1.0, 1.0, 0.0, 0), (1, 0, 3, 2.0, 4.0, 0.0, 1), (2, 1, 1, 1.0, 2.0, 0.0, 0),
(3, 1, 3, 2.0, 4.0, 0.0, 1)
], dtype=order_dt)
)
record_arrays_close(
from_signals_longonly(size=1., reject_prob=[[0., 0.5, 1.]], seed=42).order_records,
np.array([
(0, 0, 0, 1.0, 1.0, 0.0, 0), (1, 0, 3, 1.0, 4.0, 0.0, 1), (2, 1, 1, 1.0, 2.0, 0.0, 0),
(3, 1, 3, 1.0, 4.0, 0.0, 1)
], dtype=order_dt)
)
record_arrays_close(
from_signals_shortonly(size=1., reject_prob=[[0., 0.5, 1.]], seed=42).order_records,
np.array([
(0, 0, 0, 1.0, 1.0, 0.0, 1), (1, 0, 3, 1.0, 4.0, 0.0, 0), (2, 1, 1, 1.0, 2.0, 0.0, 1),
(3, 1, 3, 1.0, 4.0, 0.0, 0)
], dtype=order_dt)
)
def test_allow_partial(self):
record_arrays_close(
from_signals_both(size=1000, allow_partial=[[True, False]]).order_records,
np.array([
(0, 0, 0, 100.0, 1.0, 0.0, 0), (1, 0, 3, 1100.0, 4.0, 0.0, 1), (2, 1, 3, 1000.0, 4.0, 0.0, 1)
], dtype=order_dt)
)
record_arrays_close(
from_signals_longonly(size=1000, allow_partial=[[True, False]]).order_records,
np.array([
(0, 0, 0, 100.0, 1.0, 0.0, 0), (1, 0, 3, 100.0, 4.0, 0.0, 1)
], dtype=order_dt)
)
record_arrays_close(
from_signals_shortonly(size=1000, allow_partial=[[True, False]]).order_records,
np.array([
(0, 0, 0, 1000.0, 1.0, 0.0, 1), (1, 0, 3, 275.0, 4.0, 0.0, 0), (2, 1, 0, 1000.0, 1.0, 0.0, 1)
], dtype=order_dt)
)
record_arrays_close(
from_signals_both(size=np.inf, allow_partial=[[True, False]]).order_records,
np.array([
(0, 0, 0, 100.0, 1.0, 0.0, 0), (1, 0, 3, 200.0, 4.0, 0.0, 1), (2, 1, 0, 100.0, 1.0, 0.0, 0),
(3, 1, 3, 200.0, 4.0, 0.0, 1)
], dtype=order_dt)
)
record_arrays_close(
from_signals_longonly(size=np.inf, allow_partial=[[True, False]]).order_records,
np.array([
(0, 0, 0, 100.0, 1.0, 0.0, 0), (1, 0, 3, 100.0, 4.0, 0.0, 1), (2, 1, 0, 100.0, 1.0, 0.0, 0),
(3, 1, 3, 100.0, 4.0, 0.0, 1)
], dtype=order_dt)
)
record_arrays_close(
from_signals_shortonly(size=np.inf, allow_partial=[[True, False]]).order_records,
np.array([
(0, 0, 0, 100.0, 1.0, 0.0, 1), (1, 0, 3, 50.0, 4.0, 0.0, 0), (2, 1, 0, 100.0, 1.0, 0.0, 1)
], dtype=order_dt)
)
def test_raise_reject(self):
record_arrays_close(
from_signals_both(size=1000, allow_partial=True, raise_reject=True).order_records,
np.array([
(0, 0, 0, 100.0, 1.0, 0.0, 0), (1, 0, 3, 1100.0, 4.0, 0.0, 1)
], dtype=order_dt)
)
record_arrays_close(
from_signals_longonly(size=1000, allow_partial=True, raise_reject=True).order_records,
np.array([
(0, 0, 0, 100.0, 1.0, 0.0, 0), (1, 0, 3, 100.0, 4.0, 0.0, 1)
], dtype=order_dt)
)
with pytest.raises(Exception):
_ = from_signals_shortonly(size=1000, allow_partial=True, raise_reject=True).order_records
with pytest.raises(Exception):
_ = from_signals_both(size=1000, allow_partial=False, raise_reject=True).order_records
with pytest.raises(Exception):
_ = from_signals_longonly(size=1000, allow_partial=False, raise_reject=True).order_records
with pytest.raises(Exception):
_ = from_signals_shortonly(size=1000, allow_partial=False, raise_reject=True).order_records
def test_log(self):
record_arrays_close(
from_signals_both(log=True).log_records,
np.array([
(0, 0, 0, 0, 100.0, 0.0, 0.0, 100.0, 1.0, 100.0, np.inf, 1.0, 0, 2, 0.0, 0.0,
0.0, 1e-08, np.inf, 0.0, False, True, False, True, 0.0, 100.0, 0.0, 0.0, 1.0,
100.0, 100.0, 1.0, 0.0, 0, 0, -1, 0),
(1, 0, 0, 3, 0.0, 100.0, 0.0, 0.0, 4.0, 400.0, -np.inf, 4.0, 0, 2, 0.0,
0.0, 0.0, 1e-08, np.inf, 0.0, False, True, False, True, 800.0, -100.0,
400.0, 0.0, 4.0, 400.0, 200.0, 4.0, 0.0, 1, 0, -1, 1)
], dtype=log_dt)
)
def test_accumulate(self):
record_arrays_close(
from_signals_both(size=1, accumulate=[['disabled', 'addonly', 'removeonly', 'both']]).order_records,
np.array([
(0, 0, 0, 1.0, 1.0, 0.0, 0), (1, 0, 3, 2.0, 4.0, 0.0, 1), (2, 1, 0, 1.0, 1.0, 0.0, 0),
(3, 1, 1, 1.0, 2.0, 0.0, 0), (4, 1, 3, 3.0, 4.0, 0.0, 1), (5, 1, 4, 1.0, 5.0, 0.0, 1),
(6, 2, 0, 1.0, 1.0, 0.0, 0), (7, 2, 3, 1.0, 4.0, 0.0, 1), (8, 2, 4, 1.0, 5.0, 0.0, 1),
(9, 3, 0, 1.0, 1.0, 0.0, 0), (10, 3, 1, 1.0, 2.0, 0.0, 0), (11, 3, 3, 1.0, 4.0, 0.0, 1),
(12, 3, 4, 1.0, 5.0, 0.0, 1)
], dtype=order_dt)
)
record_arrays_close(
from_signals_longonly(size=1, accumulate=[['disabled', 'addonly', 'removeonly', 'both']]).order_records,
np.array([
(0, 0, 0, 1.0, 1.0, 0.0, 0), (1, 0, 3, 1.0, 4.0, 0.0, 1), (2, 1, 0, 1.0, 1.0, 0.0, 0),
(3, 1, 1, 1.0, 2.0, 0.0, 0), (4, 1, 3, 2.0, 4.0, 0.0, 1), (5, 2, 0, 1.0, 1.0, 0.0, 0),
(6, 2, 3, 1.0, 4.0, 0.0, 1), (7, 3, 0, 1.0, 1.0, 0.0, 0), (8, 3, 1, 1.0, 2.0, 0.0, 0),
(9, 3, 3, 1.0, 4.0, 0.0, 1), (10, 3, 4, 1.0, 5.0, 0.0, 1)
], dtype=order_dt)
)
record_arrays_close(
from_signals_shortonly(size=1, accumulate=[['disabled', 'addonly', 'removeonly', 'both']]).order_records,
np.array([
(0, 0, 0, 1.0, 1.0, 0.0, 1), (1, 0, 3, 1.0, 4.0, 0.0, 0), (2, 1, 0, 1.0, 1.0, 0.0, 1),
(3, 1, 1, 1.0, 2.0, 0.0, 1), (4, 1, 3, 2.0, 4.0, 0.0, 0), (5, 2, 0, 1.0, 1.0, 0.0, 1),
(6, 2, 3, 1.0, 4.0, 0.0, 0), (7, 3, 0, 1.0, 1.0, 0.0, 1), (8, 3, 1, 1.0, 2.0, 0.0, 1),
(9, 3, 3, 1.0, 4.0, 0.0, 0), (10, 3, 4, 1.0, 5.0, 0.0, 0)
], dtype=order_dt)
)
def test_upon_long_conflict(self):
kwargs = dict(
close=price[:3],
entries=pd.DataFrame([
[True, True, True, True, True, True, True],
[True, True, True, True, False, True, False],
[True, True, True, True, True, True, True]
]),
exits=pd.DataFrame([
[True, True, True, True, True, True, True],
[False, False, False, False, True, False, True],
[True, True, True, True, True, True, True]
]),
size=1.,
accumulate=True,
upon_long_conflict=[[
'ignore',
'entry',
'exit',
'adjacent',
'adjacent',
'opposite',
'opposite'
]]
)
record_arrays_close(
from_signals_longonly(**kwargs).order_records,
np.array([
(0, 0, 1, 1.0, 2.0, 0.0, 0),
(1, 1, 0, 1.0, 1.0, 0.0, 0), (2, 1, 1, 1.0, 2.0, 0.0, 0), (3, 1, 2, 1.0, 3.0, 0.0, 0),
(4, 2, 1, 1.0, 2.0, 0.0, 0), (5, 2, 2, 1.0, 3.0, 0.0, 1),
(6, 3, 1, 1.0, 2.0, 0.0, 0), (7, 3, 2, 1.0, 3.0, 0.0, 0),
(8, 5, 1, 1.0, 2.0, 0.0, 0), (9, 5, 2, 1.0, 3.0, 0.0, 1)
], dtype=order_dt)
)
def test_upon_short_conflict(self):
kwargs = dict(
close=price[:3],
entries=pd.DataFrame([
[True, True, True, True, True, True, True],
[True, True, True, True, False, True, False],
[True, True, True, True, True, True, True]
]),
exits=pd.DataFrame([
[True, True, True, True, True, True, True],
[False, False, False, False, True, False, True],
[True, True, True, True, True, True, True]
]),
size=1.,
accumulate=True,
upon_short_conflict=[[
'ignore',
'entry',
'exit',
'adjacent',
'adjacent',
'opposite',
'opposite'
]]
)
record_arrays_close(
from_signals_shortonly(**kwargs).order_records,
np.array([
(0, 0, 1, 1.0, 2.0, 0.0, 1),
(1, 1, 0, 1.0, 1.0, 0.0, 1), (2, 1, 1, 1.0, 2.0, 0.0, 1), (3, 1, 2, 1.0, 3.0, 0.0, 1),
(4, 2, 1, 1.0, 2.0, 0.0, 1), (5, 2, 2, 1.0, 3.0, 0.0, 0),
(6, 3, 1, 1.0, 2.0, 0.0, 1), (7, 3, 2, 1.0, 3.0, 0.0, 1),
(8, 5, 1, 1.0, 2.0, 0.0, 1), (9, 5, 2, 1.0, 3.0, 0.0, 0)
], dtype=order_dt)
)
def test_upon_dir_conflict(self):
kwargs = dict(
close=price[:3],
entries=pd.DataFrame([
[True, True, True, True, True, True, True],
[True, True, True, True, False, True, False],
[True, True, True, True, True, True, True]
]),
exits=pd.DataFrame([
[True, True, True, True, True, True, True],
[False, False, False, False, True, False, True],
[True, True, True, True, True, True, True]
]),
size=1.,
accumulate=True,
upon_dir_conflict=[[
'ignore',
'long',
'short',
'adjacent',
'adjacent',
'opposite',
'opposite'
]]
)
record_arrays_close(
from_signals_both(**kwargs).order_records,
np.array([
(0, 0, 1, 1.0, 2.0, 0.0, 0),
(1, 1, 0, 1.0, 1.0, 0.0, 0), (2, 1, 1, 1.0, 2.0, 0.0, 0), (3, 1, 2, 1.0, 3.0, 0.0, 0),
(4, 2, 0, 1.0, 1.0, 0.0, 1), (5, 2, 1, 1.0, 2.0, 0.0, 0), (6, 2, 2, 1.0, 3.0, 0.0, 1),
(7, 3, 1, 1.0, 2.0, 0.0, 0), (8, 3, 2, 1.0, 3.0, 0.0, 0),
(9, 4, 1, 1.0, 2.0, 0.0, 1), (10, 4, 2, 1.0, 3.0, 0.0, 1),
(11, 5, 1, 1.0, 2.0, 0.0, 0), (12, 5, 2, 1.0, 3.0, 0.0, 1),
(13, 6, 1, 1.0, 2.0, 0.0, 1), (14, 6, 2, 1.0, 3.0, 0.0, 0)
], dtype=order_dt)
)
def test_upon_opposite_entry(self):
kwargs = dict(
close=price[:3],
entries=pd.DataFrame([
[True, False, True, False, True, False, True, False, True, False],
[False, True, False, True, False, True, False, True, False, True],
[True, False, True, False, True, False, True, False, True, False]
]),
exits=pd.DataFrame([
[False, True, False, True, False, True, False, True, False, True],
[True, False, True, False, True, False, True, False, True, False],
[False, True, False, True, False, True, False, True, False, True]
]),
size=1.,
upon_opposite_entry=[[
'ignore',
'ignore',
'close',
'close',
'closereduce',
'closereduce',
'reverse',
'reverse',
'reversereduce',
'reversereduce'
]]
)
record_arrays_close(
from_signals_both(**kwargs).order_records,
np.array([
(0, 0, 0, 1.0, 1.0, 0.0, 0),
(1, 1, 0, 1.0, 1.0, 0.0, 1),
(2, 2, 0, 1.0, 1.0, 0.0, 0), (3, 2, 1, 1.0, 2.0, 0.0, 1), (4, 2, 2, 1.0, 3.0, 0.0, 0),
(5, 3, 0, 1.0, 1.0, 0.0, 1), (6, 3, 1, 1.0, 2.0, 0.0, 0), (7, 3, 2, 1.0, 3.0, 0.0, 1),
(8, 4, 0, 1.0, 1.0, 0.0, 0), (9, 4, 1, 1.0, 2.0, 0.0, 1), (10, 4, 2, 1.0, 3.0, 0.0, 0),
(11, 5, 0, 1.0, 1.0, 0.0, 1), (12, 5, 1, 1.0, 2.0, 0.0, 0), (13, 5, 2, 1.0, 3.0, 0.0, 1),
(14, 6, 0, 1.0, 1.0, 0.0, 0), (15, 6, 1, 2.0, 2.0, 0.0, 1), (16, 6, 2, 2.0, 3.0, 0.0, 0),
(17, 7, 0, 1.0, 1.0, 0.0, 1), (18, 7, 1, 2.0, 2.0, 0.0, 0), (19, 7, 2, 2.0, 3.0, 0.0, 1),
(20, 8, 0, 1.0, 1.0, 0.0, 0), (21, 8, 1, 2.0, 2.0, 0.0, 1), (22, 8, 2, 2.0, 3.0, 0.0, 0),
(23, 9, 0, 1.0, 1.0, 0.0, 1), (24, 9, 1, 2.0, 2.0, 0.0, 0), (25, 9, 2, 2.0, 3.0, 0.0, 1)
], dtype=order_dt)
)
record_arrays_close(
from_signals_both(**kwargs, accumulate=True).order_records,
np.array([
(0, 0, 0, 1.0, 1.0, 0.0, 0), (1, 0, 2, 1.0, 3.0, 0.0, 0),
(2, 1, 0, 1.0, 1.0, 0.0, 1), (3, 1, 2, 1.0, 3.0, 0.0, 1),
(4, 2, 0, 1.0, 1.0, 0.0, 0), (5, 2, 1, 1.0, 2.0, 0.0, 1), (6, 2, 2, 1.0, 3.0, 0.0, 0),
(7, 3, 0, 1.0, 1.0, 0.0, 1), (8, 3, 1, 1.0, 2.0, 0.0, 0), (9, 3, 2, 1.0, 3.0, 0.0, 1),
(10, 4, 0, 1.0, 1.0, 0.0, 0), (11, 4, 1, 1.0, 2.0, 0.0, 1), (12, 4, 2, 1.0, 3.0, 0.0, 0),
(13, 5, 0, 1.0, 1.0, 0.0, 1), (14, 5, 1, 1.0, 2.0, 0.0, 0), (15, 5, 2, 1.0, 3.0, 0.0, 1),
(16, 6, 0, 1.0, 1.0, 0.0, 0), (17, 6, 1, 2.0, 2.0, 0.0, 1), (18, 6, 2, 2.0, 3.0, 0.0, 0),
(19, 7, 0, 1.0, 1.0, 0.0, 1), (20, 7, 1, 2.0, 2.0, 0.0, 0), (21, 7, 2, 2.0, 3.0, 0.0, 1),
(22, 8, 0, 1.0, 1.0, 0.0, 0), (23, 8, 1, 1.0, 2.0, 0.0, 1), (24, 8, 2, 1.0, 3.0, 0.0, 0),
(25, 9, 0, 1.0, 1.0, 0.0, 1), (26, 9, 1, 1.0, 2.0, 0.0, 0), (27, 9, 2, 1.0, 3.0, 0.0, 1)
], dtype=order_dt)
)
def test_init_cash(self):
record_arrays_close(
from_signals_both(close=price_wide, size=1., init_cash=[0., 1., 100.]).order_records,
np.array([
(0, 0, 3, 1.0, 4.0, 0.0, 1), (1, 1, 0, 1.0, 1.0, 0.0, 0), (2, 1, 3, 2.0, 4.0, 0.0, 1),
(3, 2, 0, 1.0, 1.0, 0.0, 0), (4, 2, 3, 2.0, 4.0, 0.0, 1)
], dtype=order_dt)
)
record_arrays_close(
from_signals_longonly(close=price_wide, size=1., init_cash=[0., 1., 100.]).order_records,
np.array([
(0, 1, 0, 1.0, 1.0, 0.0, 0), (1, 1, 3, 1.0, 4.0, 0.0, 1), (2, 2, 0, 1.0, 1.0, 0.0, 0),
(3, 2, 3, 1.0, 4.0, 0.0, 1)
], dtype=order_dt)
)
record_arrays_close(
from_signals_shortonly(close=price_wide, size=1., init_cash=[0., 1., 100.]).order_records,
np.array([
(0, 0, 0, 1.0, 1.0, 0.0, 1), (1, 0, 3, 0.25, 4.0, 0.0, 0), (2, 1, 0, 1.0, 1.0, 0.0, 1),
(3, 1, 3, 0.5, 4.0, 0.0, 0), (4, 2, 0, 1.0, 1.0, 0.0, 1), (5, 2, 3, 1.0, 4.0, 0.0, 0)
], dtype=order_dt)
)
with pytest.raises(Exception):
_ = from_signals_both(init_cash=np.inf).order_records
with pytest.raises(Exception):
_ = from_signals_longonly(init_cash=np.inf).order_records
with pytest.raises(Exception):
_ = from_signals_shortonly(init_cash=np.inf).order_records
def test_group_by(self):
pf = from_signals_both(close=price_wide, group_by=np.array([0, 0, 1]))
record_arrays_close(
pf.order_records,
np.array([
(0, 0, 0, 100.0, 1.0, 0.0, 0), (1, 0, 3, 200.0, 4.0, 0.0, 1), (2, 1, 0, 100.0, 1.0, 0.0, 0),
(3, 1, 3, 200.0, 4.0, 0.0, 1), (4, 2, 0, 100.0, 1.0, 0.0, 0), (5, 2, 3, 200.0, 4.0, 0.0, 1)
], dtype=order_dt)
)
pd.testing.assert_index_equal(
pf.wrapper.grouper.group_by,
pd.Int64Index([0, 0, 1], dtype='int64')
)
pd.testing.assert_series_equal(
pf.init_cash,
pd.Series([200., 100.], index=pd.Int64Index([0, 1], dtype='int64')).rename('init_cash')
)
assert not pf.cash_sharing
def test_cash_sharing(self):
pf = from_signals_both(close=price_wide, group_by=np.array([0, 0, 1]), cash_sharing=True)
record_arrays_close(
pf.order_records,
np.array([
(0, 0, 0, 100., 1., 0., 0), (1, 0, 3, 200., 4., 0., 1),
(2, 2, 0, 100., 1., 0., 0), (3, 2, 3, 200., 4., 0., 1)
], dtype=order_dt)
)
pd.testing.assert_index_equal(
pf.wrapper.grouper.group_by,
pd.Int64Index([0, 0, 1], dtype='int64')
)
pd.testing.assert_series_equal(
pf.init_cash,
pd.Series([100., 100.], index=pd.Int64Index([0, 1], dtype='int64')).rename('init_cash')
)
assert pf.cash_sharing
with pytest.raises(Exception):
_ = pf.regroup(group_by=False)
def test_call_seq(self):
pf = from_signals_both(close=price_wide, group_by=np.array([0, 0, 1]), cash_sharing=True)
record_arrays_close(
pf.order_records,
np.array([
(0, 0, 0, 100., 1., 0., 0), (1, 0, 3, 200., 4., 0., 1),
(2, 2, 0, 100., 1., 0., 0), (3, 2, 3, 200., 4., 0., 1)
], dtype=order_dt)
)
np.testing.assert_array_equal(
pf.call_seq.values,
np.array([
[0, 1, 0],
[0, 1, 0],
[0, 1, 0],
[0, 1, 0],
[0, 1, 0]
])
)
pf = from_signals_both(
close=price_wide, group_by=np.array([0, 0, 1]),
cash_sharing=True, call_seq='reversed')
record_arrays_close(
pf.order_records,
np.array([
(0, 1, 0, 100., 1., 0., 0), (1, 1, 3, 200., 4., 0., 1),
(2, 2, 0, 100., 1., 0., 0), (3, 2, 3, 200., 4., 0., 1)
], dtype=order_dt)
)
np.testing.assert_array_equal(
pf.call_seq.values,
np.array([
[1, 0, 0],
[1, 0, 0],
[1, 0, 0],
[1, 0, 0],
[1, 0, 0]
])
)
pf = from_signals_both(
close=price_wide, group_by=np.array([0, 0, 1]),
cash_sharing=True, call_seq='random', seed=seed)
record_arrays_close(
pf.order_records,
np.array([
(0, 1, 0, 100., 1., 0., 0), (1, 1, 3, 200., 4., 0., 1),
(2, 2, 0, 100., 1., 0., 0), (3, 2, 3, 200., 4., 0., 1)
], dtype=order_dt)
)
np.testing.assert_array_equal(
pf.call_seq.values,
np.array([
[1, 0, 0],
[0, 1, 0],
[1, 0, 0],
[1, 0, 0],
[1, 0, 0]
])
)
kwargs = dict(
close=1.,
entries=pd.DataFrame([
[False, False, True],
[False, True, False],
[True, False, False],
[False, False, True],
[False, True, False],
]),
exits=pd.DataFrame([
[False, False, False],
[False, False, True],
[False, True, False],
[True, False, False],
[False, False, True],
]),
group_by=np.array([0, 0, 0]),
cash_sharing=True,
call_seq='auto'
)
pf = from_signals_both(**kwargs)
record_arrays_close(
pf.order_records,
np.array([
(0, 2, 0, 100., 1., 0., 0), (1, 2, 1, 200., 1., 0., 1),
(2, 1, 1, 200., 1., 0., 0), (3, 1, 2, 200., 1., 0., 1),
(4, 0, 2, 200., 1., 0., 0), (5, 0, 3, 200., 1., 0., 1),
(6, 2, 3, 200., 1., 0., 0), (7, 2, 4, 200., 1., 0., 1),
(8, 1, 4, 200., 1., 0., 0)
], dtype=order_dt)
)
np.testing.assert_array_equal(
pf.call_seq.values,
np.array([
[0, 1, 2],
[2, 0, 1],
[1, 2, 0],
[0, 1, 2],
[2, 0, 1]
])
)
pf = from_signals_longonly(**kwargs)
record_arrays_close(
pf.order_records,
np.array([
(0, 2, 0, 100., 1., 0., 0), (1, 2, 1, 100., 1., 0., 1),
(2, 1, 1, 100., 1., 0., 0), (3, 1, 2, 100., 1., 0., 1),
(4, 0, 2, 100., 1., 0., 0), (5, 0, 3, 100., 1., 0., 1),
(6, 2, 3, 100., 1., 0., 0), (7, 2, 4, 100., 1., 0., 1),
(8, 1, 4, 100., 1., 0., 0)
], dtype=order_dt)
)
np.testing.assert_array_equal(
pf.call_seq.values,
np.array([
[0, 1, 2],
[2, 0, 1],
[1, 2, 0],
[0, 1, 2],
[2, 0, 1]
])
)
pf = from_signals_shortonly(**kwargs)
record_arrays_close(
pf.order_records,
np.array([
(0, 2, 0, 100., 1., 0., 1), (1, 2, 1, 100., 1., 0., 0),
(2, 0, 2, 100., 1., 0., 1), (3, 0, 3, 100., 1., 0., 0),
(4, 1, 4, 100., 1., 0., 1)
], dtype=order_dt)
)
np.testing.assert_array_equal(
pf.call_seq.values,
np.array([
[2, 0, 1],
[1, 0, 2],
[0, 1, 2],
[2, 1, 0],
[1, 0, 2]
])
)
pf = from_signals_longonly(**kwargs, size=1., size_type='percent')
record_arrays_close(
pf.order_records,
np.array([
(0, 2, 0, 100.0, 1.0, 0.0, 0), (1, 2, 1, 100.0, 1.0, 0.0, 1), (2, 1, 1, 100.0, 1.0, 0.0, 0),
(3, 1, 2, 100.0, 1.0, 0.0, 1), (4, 0, 2, 100.0, 1.0, 0.0, 0), (5, 0, 3, 100.0, 1.0, 0.0, 1),
(6, 2, 3, 100.0, 1.0, 0.0, 0), (7, 2, 4, 100.0, 1.0, 0.0, 1), (8, 1, 4, 100.0, 1.0, 0.0, 0)
], dtype=order_dt)
)
np.testing.assert_array_equal(
pf.call_seq.values,
np.array([
[0, 1, 2],
[2, 0, 1],
[1, 0, 2],
[0, 1, 2],
[2, 0, 1]
])
)
def test_sl_stop(self):
entries = pd.Series([True, False, False, False, False], index=price.index)
exits = pd.Series([False, False, False, False, False], index=price.index)
with pytest.raises(Exception):
_ = from_signals_both(sl_stop=-0.1)
close = pd.Series([5., 4., 3., 2., 1.], index=price.index)
open = close + 0.25
high = close + 0.5
low = close - 0.5
record_arrays_close(
from_signals_longonly(
close=close, entries=entries, exits=exits,
sl_stop=[[np.nan, 0.1, 0.5, np.inf]]).order_records,
np.array([
(0, 0, 0, 20.0, 5.0, 0.0, 0),
(1, 1, 0, 20.0, 5.0, 0.0, 0), (2, 1, 1, 20.0, 4.0, 0.0, 1),
(3, 2, 0, 20.0, 5.0, 0.0, 0), (4, 2, 3, 20.0, 2.0, 0.0, 1),
(5, 3, 0, 20.0, 5.0, 0.0, 0)
], dtype=order_dt)
)
record_arrays_close(
from_signals_shortonly(
close=close, entries=entries, exits=exits,
sl_stop=[[np.nan, 0.1, 0.5, np.inf]]).order_records,
np.array([
(0, 0, 0, 20.0, 5.0, 0.0, 1),
(1, 1, 0, 20.0, 5.0, 0.0, 1),
(2, 2, 0, 20.0, 5.0, 0.0, 1),
(3, 3, 0, 20.0, 5.0, 0.0, 1)
], dtype=order_dt)
)
record_arrays_close(
from_signals_both(
close=close, entries=entries, exits=exits,
sl_stop=[[np.nan, 0.1, 0.5, np.inf]]).order_records,
from_signals_longonly(
close=close, entries=entries, exits=exits,
sl_stop=[[np.nan, 0.1, 0.5, np.inf]]).order_records
)
record_arrays_close(
from_signals_both(
close=close, entries=exits, exits=entries,
sl_stop=[[np.nan, 0.1, 0.5, np.inf]]).order_records,
from_signals_shortonly(
close=close, entries=entries, exits=exits,
sl_stop=[[np.nan, 0.1, 0.5, np.inf]]).order_records
)
record_arrays_close(
from_signals_longonly(
close=close, entries=entries, exits=exits,
open=open, high=high, low=low,
sl_stop=[[np.nan, 0.1, 0.15, 0.2, np.inf]]).order_records,
np.array([
(0, 0, 0, 20.0, 5.0, 0.0, 0),
(1, 1, 0, 20.0, 5.0, 0.0, 0), (2, 1, 1, 20.0, 4.25, 0.0, 1),
(3, 2, 0, 20.0, 5.0, 0.0, 0), (4, 2, 1, 20.0, 4.25, 0.0, 1),
(5, 3, 0, 20.0, 5.0, 0.0, 0), (6, 3, 1, 20.0, 4.0, 0.0, 1),
(7, 4, 0, 20.0, 5.0, 0.0, 0)
], dtype=order_dt)
)
record_arrays_close(
from_signals_shortonly(
close=close, entries=entries, exits=exits,
open=open, high=high, low=low,
sl_stop=[[np.nan, 0.1, 0.15, 0.2, np.inf]]).order_records,
np.array([
(0, 0, 0, 20.0, 5.0, 0.0, 1),
(1, 1, 0, 20.0, 5.0, 0.0, 1),
(2, 2, 0, 20.0, 5.0, 0.0, 1),
(3, 3, 0, 20.0, 5.0, 0.0, 1),
(4, 4, 0, 20.0, 5.0, 0.0, 1)
], dtype=order_dt)
)
close = pd.Series([1., 2., 3., 4., 5.], index=price.index)
open = close - 0.25
high = close + 0.5
low = close - 0.5
record_arrays_close(
from_signals_longonly(
close=close, entries=entries, exits=exits,
sl_stop=[[np.nan, 0.5, 3., np.inf]]).order_records,
np.array([
(0, 0, 0, 100.0, 1.0, 0.0, 0),
(1, 1, 0, 100.0, 1.0, 0.0, 0),
(2, 2, 0, 100.0, 1.0, 0.0, 0),
(3, 3, 0, 100.0, 1.0, 0.0, 0)
], dtype=order_dt)
)
record_arrays_close(
from_signals_shortonly(
close=close, entries=entries, exits=exits,
sl_stop=[[np.nan, 0.5, 3., np.inf]]).order_records,
np.array([
(0, 0, 0, 100.0, 1.0, 0.0, 1),
(1, 1, 0, 100.0, 1.0, 0.0, 1), (2, 1, 1, 100.0, 2.0, 0.0, 0),
(3, 2, 0, 100.0, 1.0, 0.0, 1), (4, 2, 3, 50.0, 4.0, 0.0, 0),
(5, 3, 0, 100.0, 1.0, 0.0, 1)
], dtype=order_dt)
)
record_arrays_close(
from_signals_both(
close=close, entries=entries, exits=exits,
sl_stop=[[np.nan, 0.5, 3., np.inf]]).order_records,
from_signals_longonly(
close=close, entries=entries, exits=exits,
sl_stop=[[np.nan, 0.5, 3., np.inf]]).order_records
)
record_arrays_close(
from_signals_both(
close=close, entries=exits, exits=entries,
sl_stop=[[np.nan, 0.5, 3., np.inf]]).order_records,
from_signals_shortonly(
close=close, entries=entries, exits=exits,
sl_stop=[[np.nan, 0.5, 3., np.inf]]).order_records
)
record_arrays_close(
from_signals_longonly(
close=close, entries=entries, exits=exits,
open=open, high=high, low=low,
sl_stop=[[np.nan, 0.5, 0.75, 1., np.inf]]).order_records,
np.array([
(0, 0, 0, 100.0, 1.0, 0.0, 0),
(1, 1, 0, 100.0, 1.0, 0.0, 0),
(2, 2, 0, 100.0, 1.0, 0.0, 0),
(3, 3, 0, 100.0, 1.0, 0.0, 0),
(4, 4, 0, 100.0, 1.0, 0.0, 0)
], dtype=order_dt)
)
record_arrays_close(
from_signals_shortonly(
close=close, entries=entries, exits=exits,
open=open, high=high, low=low,
sl_stop=[[np.nan, 0.5, 0.75, 1., np.inf]]).order_records,
np.array([
(0, 0, 0, 100.0, 1.0, 0.0, 1),
(1, 1, 0, 100.0, 1.0, 0.0, 1), (2, 1, 1, 100.0, 1.75, 0.0, 0),
(3, 2, 0, 100.0, 1.0, 0.0, 1), (4, 2, 1, 100.0, 1.75, 0.0, 0),
(5, 3, 0, 100.0, 1.0, 0.0, 1), (6, 3, 1, 100.0, 2.0, 0.0, 0),
(7, 4, 0, 100.0, 1.0, 0.0, 1)
], dtype=order_dt)
)
def test_ts_stop(self):
entries = pd.Series([True, False, False, False, False], index=price.index)
exits = pd.Series([False, False, False, False, False], index=price.index)
with pytest.raises(Exception):
_ = from_signals_both(ts_stop=-0.1)
close = pd.Series([4., 5., 4., 3., 2.], index=price.index)
open = close + 0.25
high = close + 0.5
low = close - 0.5
record_arrays_close(
from_signals_longonly(
close=close, entries=entries, exits=exits,
sl_stop=[[np.nan, 0.1, 0.5, np.inf]], sl_trail=True).order_records,
np.array([
(0, 0, 0, 25.0, 4.0, 0.0, 0),
(1, 1, 0, 25.0, 4.0, 0.0, 0), (2, 1, 2, 25.0, 4.0, 0.0, 1),
(3, 2, 0, 25.0, 4.0, 0.0, 0), (4, 2, 4, 25.0, 2.0, 0.0, 1),
(5, 3, 0, 25.0, 4.0, 0.0, 0)
], dtype=order_dt)
)
record_arrays_close(
from_signals_shortonly(
close=close, entries=entries, exits=exits,
sl_stop=[[np.nan, 0.1, 0.5, np.inf]], sl_trail=True).order_records,
np.array([
(0, 0, 0, 25.0, 4.0, 0.0, 1),
(1, 1, 0, 25.0, 4.0, 0.0, 1), (2, 1, 1, 25.0, 5.0, 0.0, 0),
(3, 2, 0, 25.0, 4.0, 0.0, 1),
(4, 3, 0, 25.0, 4.0, 0.0, 1)
], dtype=order_dt)
)
record_arrays_close(
from_signals_both(
close=close, entries=entries, exits=exits,
sl_stop=[[np.nan, 0.1, 0.5, np.inf]], sl_trail=True).order_records,
from_signals_longonly(
close=close, entries=entries, exits=exits,
sl_stop=[[np.nan, 0.1, 0.5, np.inf]], sl_trail=True).order_records
)
print('here')
record_arrays_close(
from_signals_both(
close=close, entries=exits, exits=entries,
sl_stop=[[np.nan, 0.1, 0.5, np.inf]], sl_trail=True).order_records,
from_signals_shortonly(
close=close, entries=entries, exits=exits,
sl_stop=[[np.nan, 0.1, 0.5, np.inf]], sl_trail=True).order_records
)
record_arrays_close(
from_signals_longonly(
close=close, entries=entries, exits=exits,
open=open, high=high, low=low,
sl_stop=[[np.nan, 0.15, 0.2, 0.25, np.inf]], sl_trail=True).order_records,
np.array([
(0, 0, 0, 25.0, 4.0, 0.0, 0),
(1, 1, 0, 25.0, 4.0, 0.0, 0), (2, 1, 2, 25.0, 4.25, 0.0, 1),
(3, 2, 0, 25.0, 4.0, 0.0, 0), (4, 2, 2, 25.0, 4.25, 0.0, 1),
(5, 3, 0, 25.0, 4.0, 0.0, 0), (6, 3, 2, 25.0, 4.125, 0.0, 1),
(7, 4, 0, 25.0, 4.0, 0.0, 0)
], dtype=order_dt)
)
record_arrays_close(
from_signals_shortonly(
close=close, entries=entries, exits=exits,
open=open, high=high, low=low,
sl_stop=[[np.nan, 0.15, 0.2, 0.25, np.inf]], sl_trail=True).order_records,
np.array([
(0, 0, 0, 25.0, 4.0, 0.0, 1),
(1, 1, 0, 25.0, 4.0, 0.0, 1), (2, 1, 1, 25.0, 5.25, 0.0, 0),
(3, 2, 0, 25.0, 4.0, 0.0, 1), (4, 2, 1, 25.0, 5.25, 0.0, 0),
(5, 3, 0, 25.0, 4.0, 0.0, 1), (6, 3, 1, 25.0, 5.25, 0.0, 0),
(7, 4, 0, 25.0, 4.0, 0.0, 1)
], dtype=order_dt)
)
close = pd.Series([2., 1., 2., 3., 4.], index=price.index)
open = close - 0.25
high = close + 0.5
low = close - 0.5
record_arrays_close(
from_signals_longonly(
close=close, entries=entries, exits=exits,
sl_stop=[[np.nan, 0.5, 3., np.inf]], sl_trail=True).order_records,
np.array([
(0, 0, 0, 50.0, 2.0, 0.0, 0),
(1, 1, 0, 50.0, 2.0, 0.0, 0), (2, 1, 1, 50.0, 1.0, 0.0, 1),
(3, 2, 0, 50.0, 2.0, 0.0, 0),
(4, 3, 0, 50.0, 2.0, 0.0, 0)
], dtype=order_dt)
)
record_arrays_close(
from_signals_shortonly(
close=close, entries=entries, exits=exits,
sl_stop=[[np.nan, 0.5, 3., np.inf]], sl_trail=True).order_records,
np.array([
(0, 0, 0, 50.0, 2.0, 0.0, 1),
(1, 1, 0, 50.0, 2.0, 0.0, 1), (2, 1, 2, 50.0, 2.0, 0.0, 0),
(3, 2, 0, 50.0, 2.0, 0.0, 1), (4, 2, 4, 50.0, 4.0, 0.0, 0),
(5, 3, 0, 50.0, 2.0, 0.0, 1)
], dtype=order_dt)
)
record_arrays_close(
from_signals_both(
close=close, entries=entries, exits=exits,
sl_stop=[[np.nan, 0.5, 3., np.inf]], sl_trail=True).order_records,
from_signals_longonly(
close=close, entries=entries, exits=exits,
sl_stop=[[np.nan, 0.5, 3., np.inf]], sl_trail=True).order_records
)
record_arrays_close(
from_signals_both(
close=close, entries=exits, exits=entries,
sl_stop=[[np.nan, 0.5, 3., np.inf]], sl_trail=True).order_records,
from_signals_shortonly(
close=close, entries=entries, exits=exits,
sl_stop=[[np.nan, 0.5, 3., np.inf]], sl_trail=True).order_records
)
record_arrays_close(
from_signals_longonly(
close=close, entries=entries, exits=exits,
open=open, high=high, low=low,
sl_stop=[[np.nan, 0.5, 0.75, 1., np.inf]], sl_trail=True).order_records,
np.array([
(0, 0, 0, 50.0, 2.0, 0.0, 0),
(1, 1, 0, 50.0, 2.0, 0.0, 0), (2, 1, 1, 50.0, 0.75, 0.0, 1),
(3, 2, 0, 50.0, 2.0, 0.0, 0), (4, 2, 1, 50.0, 0.5, 0.0, 1),
(5, 3, 0, 50.0, 2.0, 0.0, 0),
(6, 4, 0, 50.0, 2.0, 0.0, 0)
], dtype=order_dt)
)
record_arrays_close(
from_signals_shortonly(
close=close, entries=entries, exits=exits,
open=open, high=high, low=low,
sl_stop=[[np.nan, 0.5, 0.75, 1., np.inf]], sl_trail=True).order_records,
np.array([
(0, 0, 0, 50.0, 2.0, 0.0, 1),
(1, 1, 0, 50.0, 2.0, 0.0, 1), (2, 1, 2, 50.0, 1.75, 0.0, 0),
(3, 2, 0, 50.0, 2.0, 0.0, 1), (4, 2, 2, 50.0, 1.75, 0.0, 0),
(5, 3, 0, 50.0, 2.0, 0.0, 1), (6, 3, 2, 50.0, 1.75, 0.0, 0),
(7, 4, 0, 50.0, 2.0, 0.0, 1)
], dtype=order_dt)
)
def test_tp_stop(self):
entries = pd.Series([True, False, False, False, False], index=price.index)
exits = pd.Series([False, False, False, False, False], index=price.index)
with pytest.raises(Exception):
_ = from_signals_both(sl_stop=-0.1)
close = pd.Series([5., 4., 3., 2., 1.], index=price.index)
open = close + 0.25
high = close + 0.5
low = close - 0.5
record_arrays_close(
from_signals_longonly(
close=close, entries=entries, exits=exits,
tp_stop=[[np.nan, 0.1, 0.5, np.inf]]).order_records,
np.array([
(0, 0, 0, 20.0, 5.0, 0.0, 0),
(1, 1, 0, 20.0, 5.0, 0.0, 0),
(2, 2, 0, 20.0, 5.0, 0.0, 0),
(3, 3, 0, 20.0, 5.0, 0.0, 0)
], dtype=order_dt)
)
record_arrays_close(
from_signals_shortonly(
close=close, entries=entries, exits=exits,
tp_stop=[[np.nan, 0.1, 0.5, np.inf]]).order_records,
np.array([
(0, 0, 0, 20.0, 5.0, 0.0, 1),
(1, 1, 0, 20.0, 5.0, 0.0, 1), (2, 1, 1, 20.0, 4.0, 0.0, 0),
(3, 2, 0, 20.0, 5.0, 0.0, 1), (4, 2, 3, 20.0, 2.0, 0.0, 0),
(5, 3, 0, 20.0, 5.0, 0.0, 1)
], dtype=order_dt)
)
record_arrays_close(
from_signals_both(
close=close, entries=entries, exits=exits,
tp_stop=[[np.nan, 0.1, 0.5, np.inf]]).order_records,
from_signals_longonly(
close=close, entries=entries, exits=exits,
tp_stop=[[np.nan, 0.1, 0.5, np.inf]]).order_records
)
record_arrays_close(
from_signals_both(
close=close, entries=exits, exits=entries,
tp_stop=[[np.nan, 0.1, 0.5, np.inf]]).order_records,
from_signals_shortonly(
close=close, entries=entries, exits=exits,
tp_stop=[[np.nan, 0.1, 0.5, np.inf]]).order_records
)
record_arrays_close(
from_signals_longonly(
close=close, entries=entries, exits=exits,
open=open, high=high, low=low,
tp_stop=[[np.nan, 0.1, 0.15, 0.2, np.inf]]).order_records,
np.array([
(0, 0, 0, 20.0, 5.0, 0.0, 0),
(1, 1, 0, 20.0, 5.0, 0.0, 0),
(2, 2, 0, 20.0, 5.0, 0.0, 0),
(3, 3, 0, 20.0, 5.0, 0.0, 0),
(4, 4, 0, 20.0, 5.0, 0.0, 0)
], dtype=order_dt)
)
record_arrays_close(
from_signals_shortonly(
close=close, entries=entries, exits=exits,
open=open, high=high, low=low,
tp_stop=[[np.nan, 0.1, 0.15, 0.2, np.inf]]).order_records,
np.array([
(0, 0, 0, 20.0, 5.0, 0.0, 1),
(1, 1, 0, 20.0, 5.0, 0.0, 1), (2, 1, 1, 20.0, 4.25, 0.0, 0),
(3, 2, 0, 20.0, 5.0, 0.0, 1), (4, 2, 1, 20.0, 4.25, 0.0, 0),
(5, 3, 0, 20.0, 5.0, 0.0, 1), (6, 3, 1, 20.0, 4.0, 0.0, 0),
(7, 4, 0, 20.0, 5.0, 0.0, 1)
], dtype=order_dt)
)
close = pd.Series([1., 2., 3., 4., 5.], index=price.index)
open = close - 0.25
high = close + 0.5
low = close - 0.5
record_arrays_close(
from_signals_longonly(
close=close, entries=entries, exits=exits,
tp_stop=[[np.nan, 0.5, 3., np.inf]]).order_records,
np.array([
(0, 0, 0, 100.0, 1.0, 0.0, 0),
(1, 1, 0, 100.0, 1.0, 0.0, 0), (2, 1, 1, 100.0, 2.0, 0.0, 1),
(3, 2, 0, 100.0, 1.0, 0.0, 0), (4, 2, 3, 100.0, 4.0, 0.0, 1),
(5, 3, 0, 100.0, 1.0, 0.0, 0)
], dtype=order_dt)
)
record_arrays_close(
from_signals_shortonly(
close=close, entries=entries, exits=exits,
tp_stop=[[np.nan, 0.5, 3., np.inf]]).order_records,
np.array([
(0, 0, 0, 100.0, 1.0, 0.0, 1),
(1, 1, 0, 100.0, 1.0, 0.0, 1),
(2, 2, 0, 100.0, 1.0, 0.0, 1),
(3, 3, 0, 100.0, 1.0, 0.0, 1)
], dtype=order_dt)
)
record_arrays_close(
from_signals_both(
close=close, entries=entries, exits=exits,
tp_stop=[[np.nan, 0.5, 3., np.inf]]).order_records,
from_signals_longonly(
close=close, entries=entries, exits=exits,
tp_stop=[[np.nan, 0.5, 3., np.inf]]).order_records
)
record_arrays_close(
from_signals_both(
close=close, entries=exits, exits=entries,
tp_stop=[[np.nan, 0.5, 3., np.inf]]).order_records,
from_signals_shortonly(
close=close, entries=entries, exits=exits,
tp_stop=[[np.nan, 0.5, 3., np.inf]]).order_records
)
record_arrays_close(
from_signals_longonly(
close=close, entries=entries, exits=exits,
open=open, high=high, low=low,
tp_stop=[[np.nan, 0.5, 0.75, 1., np.inf]]).order_records,
np.array([
(0, 0, 0, 100.0, 1.0, 0.0, 0),
(1, 1, 0, 100.0, 1.0, 0.0, 0), (2, 1, 1, 100.0, 1.75, 0.0, 1),
(3, 2, 0, 100.0, 1.0, 0.0, 0), (4, 2, 1, 100.0, 1.75, 0.0, 1),
(5, 3, 0, 100.0, 1.0, 0.0, 0), (6, 3, 1, 100.0, 2.0, 0.0, 1),
(7, 4, 0, 100.0, 1.0, 0.0, 0)
], dtype=order_dt)
)
record_arrays_close(
from_signals_shortonly(
close=close, entries=entries, exits=exits,
open=open, high=high, low=low,
tp_stop=[[np.nan, 0.5, 0.75, 1., np.inf]]).order_records,
np.array([
(0, 0, 0, 100.0, 1.0, 0.0, 1),
(1, 1, 0, 100.0, 1.0, 0.0, 1),
(2, 2, 0, 100.0, 1.0, 0.0, 1),
(3, 3, 0, 100.0, 1.0, 0.0, 1),
(4, 4, 0, 100.0, 1.0, 0.0, 1)
], dtype=order_dt)
)
def test_stop_entry_price(self):
entries = pd.Series([True, False, False, False, False], index=price.index)
exits = pd.Series([False, False, False, False, False], index=price.index)
close = pd.Series([5., 4., 3., 2., 1.], index=price.index)
open = close + 0.25
high = close + 0.5
low = close - 0.5
record_arrays_close(
from_signals_longonly(
close=close, entries=entries, exits=exits,
open=open, high=high, low=low,
sl_stop=[[0.05, 0.5, 0.75]], price=1.1 * close, val_price=1.05 * close,
stop_entry_price='val_price',
stop_exit_price='stoplimit', slippage=0.1).order_records,
np.array([
(0, 0, 0, 16.52892561983471, 6.050000000000001, 0.0, 0),
(1, 0, 1, 16.52892561983471, 4.25, 0.0, 1),
(2, 1, 0, 16.52892561983471, 6.050000000000001, 0.0, 0),
(3, 1, 2, 16.52892561983471, 2.625, 0.0, 1),
(4, 2, 0, 16.52892561983471, 6.050000000000001, 0.0, 0),
(5, 2, 4, 16.52892561983471, 1.25, 0.0, 1)
], dtype=order_dt)
)
record_arrays_close(
from_signals_longonly(
close=close, entries=entries, exits=exits,
open=open, high=high, low=low,
sl_stop=[[0.05, 0.5, 0.75]], price=1.1 * close, val_price=1.05 * close,
stop_entry_price='price',
stop_exit_price='stoplimit', slippage=0.1).order_records,
np.array([
(0, 0, 0, 16.52892561983471, 6.050000000000001, 0.0, 0),
(1, 0, 1, 16.52892561983471, 4.25, 0.0, 1),
(2, 1, 0, 16.52892561983471, 6.050000000000001, 0.0, 0),
(3, 1, 2, 16.52892561983471, 2.75, 0.0, 1),
(4, 2, 0, 16.52892561983471, 6.050000000000001, 0.0, 0),
(5, 2, 4, 16.52892561983471, 1.25, 0.0, 1)
], dtype=order_dt)
)
record_arrays_close(
from_signals_longonly(
close=close, entries=entries, exits=exits,
open=open, high=high, low=low,
sl_stop=[[0.05, 0.5, 0.75]], price=1.1 * close, val_price=1.05 * close,
stop_entry_price='fillprice',
stop_exit_price='stoplimit', slippage=0.1).order_records,
np.array([
(0, 0, 0, 16.52892561983471, 6.050000000000001, 0.0, 0),
(1, 0, 1, 16.52892561983471, 4.25, 0.0, 1),
(2, 1, 0, 16.52892561983471, 6.050000000000001, 0.0, 0),
(3, 1, 2, 16.52892561983471, 3.0250000000000004, 0.0, 1),
(4, 2, 0, 16.52892561983471, 6.050000000000001, 0.0, 0),
(5, 2, 3, 16.52892561983471, 1.5125000000000002, 0.0, 1)
], dtype=order_dt)
)
record_arrays_close(
from_signals_longonly(
close=close, entries=entries, exits=exits,
open=open, high=high, low=low,
sl_stop=[[0.05, 0.5, 0.75]], price=1.1 * close, val_price=1.05 * close,
stop_entry_price='close',
stop_exit_price='stoplimit', slippage=0.1).order_records,
np.array([
(0, 0, 0, 16.52892561983471, 6.050000000000001, 0.0, 0),
(1, 0, 1, 16.52892561983471, 4.25, 0.0, 1),
(2, 1, 0, 16.52892561983471, 6.050000000000001, 0.0, 0),
(3, 1, 2, 16.52892561983471, 2.5, 0.0, 1),
(4, 2, 0, 16.52892561983471, 6.050000000000001, 0.0, 0),
(5, 2, 4, 16.52892561983471, 1.25, 0.0, 1)
], dtype=order_dt)
)
def test_stop_exit_price(self):
entries = pd.Series([True, False, False, False, False], index=price.index)
exits = pd.Series([False, False, False, False, False], index=price.index)
close = pd.Series([5., 4., 3., 2., 1.], index=price.index)
open = close + 0.25
high = close + 0.5
low = close - 0.5
record_arrays_close(
from_signals_longonly(
close=close, entries=entries, exits=exits,
open=open, high=high, low=low,
sl_stop=[[0.05, 0.5, 0.75]], price=1.1 * close,
stop_exit_price='stoplimit', slippage=0.1).order_records,
np.array([
(0, 0, 0, 16.528926, 6.05, 0.0, 0), (1, 0, 1, 16.528926, 4.25, 0.0, 1),
(2, 1, 0, 16.528926, 6.05, 0.0, 0), (3, 1, 2, 16.528926, 2.5, 0.0, 1),
(4, 2, 0, 16.528926, 6.05, 0.0, 0), (5, 2, 4, 16.528926, 1.25, 0.0, 1)
], dtype=order_dt)
)
record_arrays_close(
from_signals_longonly(
close=close, entries=entries, exits=exits,
open=open, high=high, low=low,
sl_stop=[[0.05, 0.5, 0.75]], price=1.1 * close,
stop_exit_price='stopmarket', slippage=0.1).order_records,
np.array([
(0, 0, 0, 16.528926, 6.05, 0.0, 0), (1, 0, 1, 16.528926, 3.825, 0.0, 1),
(2, 1, 0, 16.528926, 6.05, 0.0, 0), (3, 1, 2, 16.528926, 2.25, 0.0, 1),
(4, 2, 0, 16.528926, 6.05, 0.0, 0), (5, 2, 4, 16.528926, 1.125, 0.0, 1)
], dtype=order_dt)
)
record_arrays_close(
from_signals_longonly(
close=close, entries=entries, exits=exits,
open=open, high=high, low=low,
sl_stop=[[0.05, 0.5, 0.75]], price=1.1 * close,
stop_exit_price='close', slippage=0.1).order_records,
np.array([
(0, 0, 0, 16.528926, 6.05, 0.0, 0), (1, 0, 1, 16.528926, 3.6, 0.0, 1),
(2, 1, 0, 16.528926, 6.05, 0.0, 0), (3, 1, 2, 16.528926, 2.7, 0.0, 1),
(4, 2, 0, 16.528926, 6.05, 0.0, 0), (5, 2, 4, 16.528926, 0.9, 0.0, 1)
], dtype=order_dt)
)
record_arrays_close(
from_signals_longonly(
close=close, entries=entries, exits=exits,
open=open, high=high, low=low,
sl_stop=[[0.05, 0.5, 0.75]], price=1.1 * close,
stop_exit_price='price', slippage=0.1).order_records,
np.array([
(0, 0, 0, 16.528926, 6.05, 0.0, 0), (1, 0, 1, 16.528926, 3.9600000000000004, 0.0, 1),
(2, 1, 0, 16.528926, 6.05, 0.0, 0), (3, 1, 2, 16.528926, 2.97, 0.0, 1),
(4, 2, 0, 16.528926, 6.05, 0.0, 0), (5, 2, 4, 16.528926, 0.9900000000000001, 0.0, 1)
], dtype=order_dt)
)
def test_upon_stop_exit(self):
entries = pd.Series([True, False, False, False, False], index=price.index)
exits = pd.Series([False, False, False, False, False], index=price.index)
close = pd.Series([5., 4., 3., 2., 1.], index=price.index)
record_arrays_close(
from_signals_both(
close=close, entries=entries, exits=exits, size=1,
sl_stop=0.1, upon_stop_exit=[['close', 'closereduce', 'reverse', 'reversereduce']],
accumulate=True).order_records,
np.array([
(0, 0, 0, 1.0, 5.0, 0.0, 0), (1, 0, 1, 1.0, 4.0, 0.0, 1),
(2, 1, 0, 1.0, 5.0, 0.0, 0), (3, 1, 1, 1.0, 4.0, 0.0, 1),
(4, 2, 0, 1.0, 5.0, 0.0, 0), (5, 2, 1, 2.0, 4.0, 0.0, 1),
(6, 3, 0, 1.0, 5.0, 0.0, 0), (7, 3, 1, 1.0, 4.0, 0.0, 1)
], dtype=order_dt)
)
record_arrays_close(
from_signals_both(
close=close, entries=entries, exits=exits, size=1,
sl_stop=0.1, upon_stop_exit=[['close', 'closereduce', 'reverse', 'reversereduce']]).order_records,
np.array([
(0, 0, 0, 1.0, 5.0, 0.0, 0), (1, 0, 1, 1.0, 4.0, 0.0, 1),
(2, 1, 0, 1.0, 5.0, 0.0, 0), (3, 1, 1, 1.0, 4.0, 0.0, 1),
(4, 2, 0, 1.0, 5.0, 0.0, 0), (5, 2, 1, 2.0, 4.0, 0.0, 1),
(6, 3, 0, 1.0, 5.0, 0.0, 0), (7, 3, 1, 2.0, 4.0, 0.0, 1)
], dtype=order_dt)
)
def test_upon_stop_update(self):
entries = pd.Series([True, True, False, False, False], index=price.index)
exits = pd.Series([False, False, False, False, False], index=price.index)
close = pd.Series([5., 4., 3., 2., 1.], index=price.index)
sl_stop = pd.Series([0.4, np.nan, np.nan, np.nan, np.nan])
record_arrays_close(
from_signals_longonly(
close=close, entries=entries, exits=exits, accumulate=True, size=1.,
sl_stop=sl_stop, upon_stop_update=[['keep', 'override', 'overridenan']]).order_records,
np.array([
(0, 0, 0, 1.0, 5.0, 0.0, 0), (1, 0, 1, 1.0, 4.0, 0.0, 0), (2, 0, 2, 2.0, 3.0, 0.0, 1),
(3, 1, 0, 1.0, 5.0, 0.0, 0), (4, 1, 1, 1.0, 4.0, 0.0, 0), (5, 1, 2, 2.0, 3.0, 0.0, 1),
(6, 2, 0, 1.0, 5.0, 0.0, 0), (7, 2, 1, 1.0, 4.0, 0.0, 0)
], dtype=order_dt)
)
sl_stop = pd.Series([0.4, 0.4, np.nan, np.nan, np.nan])
record_arrays_close(
from_signals_longonly(
close=close, entries=entries, exits=exits, accumulate=True, size=1.,
sl_stop=sl_stop, upon_stop_update=[['keep', 'override']]).order_records,
np.array([
(0, 0, 0, 1.0, 5.0, 0.0, 0), (1, 0, 1, 1.0, 4.0, 0.0, 0), (2, 0, 2, 2.0, 3.0, 0.0, 1),
(3, 1, 0, 1.0, 5.0, 0.0, 0), (4, 1, 1, 1.0, 4.0, 0.0, 0), (5, 1, 3, 2.0, 2.0, 0.0, 1)
], dtype=order_dt)
)
def test_adjust_sl_func(self):
entries = pd.Series([True, False, False, False, False], index=price.index)
exits = pd.Series([False, False, False, False, False], index=price.index)
close = pd.Series([5., 4., 3., 2., 1.], index=price.index)
@njit
def adjust_sl_func_nb(c, dur):
return 0. if c.i - c.init_i >= dur else c.curr_stop, c.curr_trail
record_arrays_close(
from_signals_longonly(
close=close, entries=entries, exits=exits,
sl_stop=np.inf, adjust_sl_func_nb=adjust_sl_func_nb, adjust_sl_args=(2,)).order_records,
np.array([
(0, 0, 0, 20.0, 5.0, 0.0, 0), (1, 0, 2, 20.0, 3.0, 0.0, 1)
], dtype=order_dt)
)
def test_adjust_ts_func(self):
entries = pd.Series([True, False, False, False, False], index=price.index)
exits = pd.Series([False, False, False, False, False], index=price.index)
close = pd.Series([10., 11., 12., 11., 10.], index=price.index)
@njit
def adjust_sl_func_nb(c, dur):
return 0. if c.i - c.curr_i >= dur else c.curr_stop, c.curr_trail
record_arrays_close(
from_signals_longonly(
close=close, entries=entries, exits=exits,
sl_stop=np.inf, adjust_sl_func_nb=adjust_sl_func_nb, adjust_sl_args=(2,)).order_records,
np.array([
(0, 0, 0, 10.0, 10.0, 0.0, 0), (1, 0, 4, 10.0, 10.0, 0.0, 1)
], dtype=order_dt)
)
def test_adjust_tp_func(self):
entries = pd.Series([True, False, False, False, False], index=price.index)
exits = pd.Series([False, False, False, False, False], index=price.index)
close = pd.Series([1., 2., 3., 4., 5.], index=price.index)
@njit
def adjust_tp_func_nb(c, dur):
return 0. if c.i - c.init_i >= dur else c.curr_stop
record_arrays_close(
from_signals_longonly(
close=close, entries=entries, exits=exits,
tp_stop=np.inf, adjust_tp_func_nb=adjust_tp_func_nb, adjust_tp_args=(2,)).order_records,
np.array([
(0, 0, 0, 100.0, 1.0, 0.0, 0), (1, 0, 2, 100.0, 3.0, 0.0, 1)
], dtype=order_dt)
)
def test_max_orders(self):
_ = from_signals_both(close=price_wide)
_ = from_signals_both(close=price_wide, max_orders=6)
with pytest.raises(Exception):
_ = from_signals_both(close=price_wide, max_orders=5)
def test_max_logs(self):
_ = from_signals_both(close=price_wide, log=True)
_ = from_signals_both(close=price_wide, log=True, max_logs=6)
with pytest.raises(Exception):
_ = from_signals_both(close=price_wide, log=True, max_logs=5)
# ############# from_holding ############# #
class TestFromHolding:
def test_from_holding(self):
record_arrays_close(
vbt.Portfolio.from_holding(price).order_records,
vbt.Portfolio.from_signals(price, True, False, accumulate=False).order_records
)
# ############# from_random_signals ############# #
class TestFromRandomSignals:
def test_from_random_n(self):
result = vbt.Portfolio.from_random_signals(price, n=2, seed=seed)
record_arrays_close(
result.order_records,
vbt.Portfolio.from_signals(
price,
[True, False, True, False, False],
[False, True, False, False, True]
).order_records
)
pd.testing.assert_index_equal(
result.wrapper.index,
price.vbt.wrapper.index
)
pd.testing.assert_index_equal(
result.wrapper.columns,
price.vbt.wrapper.columns
)
result = vbt.Portfolio.from_random_signals(price, n=[1, 2], seed=seed)
record_arrays_close(
result.order_records,
vbt.Portfolio.from_signals(
price,
[[False, True], [True, False], [False, True], [False, False], [False, False]],
[[False, False], [False, True], [False, False], [False, True], [True, False]]
).order_records
)
pd.testing.assert_index_equal(
result.wrapper.index,
pd.DatetimeIndex([
'2020-01-01', '2020-01-02', '2020-01-03', '2020-01-04', '2020-01-05'
], dtype='datetime64[ns]', freq=None)
)
pd.testing.assert_index_equal(
result.wrapper.columns,
pd.Int64Index([1, 2], dtype='int64', name='randnx_n')
)
def test_from_random_prob(self):
result = vbt.Portfolio.from_random_signals(price, prob=0.5, seed=seed)
record_arrays_close(
result.order_records,
vbt.Portfolio.from_signals(
price,
[True, False, False, False, False],
[False, False, False, False, True]
).order_records
)
pd.testing.assert_index_equal(
result.wrapper.index,
price.vbt.wrapper.index
)
pd.testing.assert_index_equal(
result.wrapper.columns,
price.vbt.wrapper.columns
)
result = vbt.Portfolio.from_random_signals(price, prob=[0.25, 0.5], seed=seed)
record_arrays_close(
result.order_records,
vbt.Portfolio.from_signals(
price,
[[False, True], [False, False], [False, False], [False, False], [True, False]],
[[False, False], [False, True], [False, False], [False, False], [False, False]]
).order_records
)
pd.testing.assert_index_equal(
result.wrapper.index,
pd.DatetimeIndex([
'2020-01-01', '2020-01-02', '2020-01-03', '2020-01-04', '2020-01-05'
], dtype='datetime64[ns]', freq=None)
)
pd.testing.assert_index_equal(
result.wrapper.columns,
pd.MultiIndex.from_tuples(
[(0.25, 0.25), (0.5, 0.5)],
names=['rprobnx_entry_prob', 'rprobnx_exit_prob'])
)
# ############# from_order_func ############# #
@njit
def order_func_nb(c, size):
_size = nb.get_elem_nb(c, size)
return nb.order_nb(_size if c.i % 2 == 0 else -_size)
@njit
def log_order_func_nb(c, size):
_size = nb.get_elem_nb(c, size)
return nb.order_nb(_size if c.i % 2 == 0 else -_size, log=True)
@njit
def flex_order_func_nb(c, size):
if c.call_idx < c.group_len:
_size = nb.get_col_elem_nb(c, c.from_col + c.call_idx, size)
return c.from_col + c.call_idx, nb.order_nb(_size if c.i % 2 == 0 else -_size)
return -1, nb.order_nothing_nb()
@njit
def log_flex_order_func_nb(c, size):
if c.call_idx < c.group_len:
_size = nb.get_col_elem_nb(c, c.from_col + c.call_idx, size)
return c.from_col + c.call_idx, nb.order_nb(_size if c.i % 2 == 0 else -_size, log=True)
return -1, nb.order_nothing_nb()
class TestFromOrderFunc:
@pytest.mark.parametrize("test_row_wise", [False, True])
@pytest.mark.parametrize("test_flexible", [False, True])
def test_one_column(self, test_row_wise, test_flexible):
order_func = flex_order_func_nb if test_flexible else order_func_nb
pf = vbt.Portfolio.from_order_func(
price.tolist(), order_func, np.asarray(np.inf), row_wise=test_row_wise, flexible=test_flexible)
record_arrays_close(
pf.order_records,
np.array([
(0, 0, 0, 100.0, 1.0, 0.0, 0), (1, 0, 1, 200.0, 2.0, 0.0, 1),
(2, 0, 2, 133.33333333333334, 3.0, 0.0, 0), (3, 0, 3, 66.66666666666669, 4.0, 0.0, 1),
(4, 0, 4, 53.33333333333335, 5.0, 0.0, 0)
], dtype=order_dt)
)
pf = vbt.Portfolio.from_order_func(
price, order_func, np.asarray(np.inf), row_wise=test_row_wise, flexible=test_flexible)
record_arrays_close(
pf.order_records,
np.array([
(0, 0, 0, 100.0, 1.0, 0.0, 0), (1, 0, 1, 200.0, 2.0, 0.0, 1),
(2, 0, 2, 133.33333333333334, 3.0, 0.0, 0), (3, 0, 3, 66.66666666666669, 4.0, 0.0, 1),
(4, 0, 4, 53.33333333333335, 5.0, 0.0, 0)
], dtype=order_dt)
)
pd.testing.assert_index_equal(
pf.wrapper.index,
pd.DatetimeIndex(['2020-01-01', '2020-01-02', '2020-01-03', '2020-01-04', '2020-01-05'])
)
pd.testing.assert_index_equal(
pf.wrapper.columns,
pd.Int64Index([0], dtype='int64')
)
assert pf.wrapper.ndim == 1
assert pf.wrapper.freq == day_dt
assert pf.wrapper.grouper.group_by is None
@pytest.mark.parametrize("test_row_wise", [False, True])
@pytest.mark.parametrize("test_flexible", [False, True])
@pytest.mark.parametrize("test_use_numba", [False, True])
def test_multiple_columns(self, test_row_wise, test_flexible, test_use_numba):
order_func = flex_order_func_nb if test_flexible else order_func_nb
pf = vbt.Portfolio.from_order_func(
price_wide, order_func, vbt.Rep('size'), broadcast_named_args=dict(size=[0, 1, np.inf]),
row_wise=test_row_wise, flexible=test_flexible, use_numba=test_use_numba)
if test_row_wise:
record_arrays_close(
pf.order_records,
np.array([
(0, 1, 0, 1.0, 1.0, 0.0, 0), (1, 2, 0, 100.0, 1.0, 0.0, 0),
(2, 1, 1, 1.0, 2.0, 0.0, 1), (3, 2, 1, 200.0, 2.0, 0.0, 1),
(4, 1, 2, 1.0, 3.0, 0.0, 0), (5, 2, 2, 133.33333333333334, 3.0, 0.0, 0),
(6, 1, 3, 1.0, 4.0, 0.0, 1), (7, 2, 3, 66.66666666666669, 4.0, 0.0, 1),
(8, 1, 4, 1.0, 5.0, 0.0, 0), (9, 2, 4, 53.33333333333335, 5.0, 0.0, 0)
], dtype=order_dt)
)
else:
record_arrays_close(
pf.order_records,
np.array([
(0, 1, 0, 1.0, 1.0, 0.0, 0), (1, 1, 1, 1.0, 2.0, 0.0, 1),
(2, 1, 2, 1.0, 3.0, 0.0, 0), (3, 1, 3, 1.0, 4.0, 0.0, 1),
(4, 1, 4, 1.0, 5.0, 0.0, 0), (5, 2, 0, 100.0, 1.0, 0.0, 0),
(6, 2, 1, 200.0, 2.0, 0.0, 1), (7, 2, 2, 133.33333333333334, 3.0, 0.0, 0),
(8, 2, 3, 66.66666666666669, 4.0, 0.0, 1), (9, 2, 4, 53.33333333333335, 5.0, 0.0, 0)
], dtype=order_dt)
)
pd.testing.assert_index_equal(
pf.wrapper.index,
pd.DatetimeIndex(['2020-01-01', '2020-01-02', '2020-01-03', '2020-01-04', '2020-01-05'])
)
pd.testing.assert_index_equal(
pf.wrapper.columns,
pd.Index(['a', 'b', 'c'], dtype='object')
)
assert pf.wrapper.ndim == 2
assert pf.wrapper.freq == day_dt
assert pf.wrapper.grouper.group_by is None
@pytest.mark.parametrize("test_row_wise", [False, True])
@pytest.mark.parametrize("test_flexible", [False, True])
def test_group_by(self, test_row_wise, test_flexible):
order_func = flex_order_func_nb if test_flexible else order_func_nb
pf = vbt.Portfolio.from_order_func(
price_wide, order_func, np.asarray(np.inf),
group_by=np.array([0, 0, 1]), row_wise=test_row_wise, flexible=test_flexible)
if test_row_wise:
record_arrays_close(
pf.order_records,
np.array([
(0, 0, 0, 100.0, 1.0, 0.0, 0), (1, 1, 0, 100.0, 1.0, 0.0, 0),
(2, 2, 0, 100.0, 1.0, 0.0, 0), (3, 0, 1, 200.0, 2.0, 0.0, 1),
(4, 1, 1, 200.0, 2.0, 0.0, 1), (5, 2, 1, 200.0, 2.0, 0.0, 1),
(6, 0, 2, 133.33333333333334, 3.0, 0.0, 0), (7, 1, 2, 133.33333333333334, 3.0, 0.0, 0),
(8, 2, 2, 133.33333333333334, 3.0, 0.0, 0), (9, 0, 3, 66.66666666666669, 4.0, 0.0, 1),
(10, 1, 3, 66.66666666666669, 4.0, 0.0, 1), (11, 2, 3, 66.66666666666669, 4.0, 0.0, 1),
(12, 0, 4, 53.33333333333335, 5.0, 0.0, 0), (13, 1, 4, 53.33333333333335, 5.0, 0.0, 0),
(14, 2, 4, 53.33333333333335, 5.0, 0.0, 0)
], dtype=order_dt)
)
else:
record_arrays_close(
pf.order_records,
np.array([
(0, 0, 0, 100.0, 1.0, 0.0, 0), (1, 1, 0, 100.0, 1.0, 0.0, 0),
(2, 0, 1, 200.0, 2.0, 0.0, 1), (3, 1, 1, 200.0, 2.0, 0.0, 1),
(4, 0, 2, 133.33333333333334, 3.0, 0.0, 0), (5, 1, 2, 133.33333333333334, 3.0, 0.0, 0),
(6, 0, 3, 66.66666666666669, 4.0, 0.0, 1), (7, 1, 3, 66.66666666666669, 4.0, 0.0, 1),
(8, 0, 4, 53.33333333333335, 5.0, 0.0, 0), (9, 1, 4, 53.33333333333335, 5.0, 0.0, 0),
(10, 2, 0, 100.0, 1.0, 0.0, 0), (11, 2, 1, 200.0, 2.0, 0.0, 1),
(12, 2, 2, 133.33333333333334, 3.0, 0.0, 0), (13, 2, 3, 66.66666666666669, 4.0, 0.0, 1),
(14, 2, 4, 53.33333333333335, 5.0, 0.0, 0)
], dtype=order_dt)
)
pd.testing.assert_index_equal(
pf.wrapper.grouper.group_by,
pd.Int64Index([0, 0, 1], dtype='int64')
)
pd.testing.assert_series_equal(
pf.init_cash,
pd.Series([200., 100.], index=pd.Int64Index([0, 1], dtype='int64')).rename('init_cash')
)
assert not pf.cash_sharing
@pytest.mark.parametrize("test_row_wise", [False, True])
@pytest.mark.parametrize("test_flexible", [False, True])
def test_cash_sharing(self, test_row_wise, test_flexible):
order_func = flex_order_func_nb if test_flexible else order_func_nb
pf = vbt.Portfolio.from_order_func(
price_wide, order_func, np.asarray(np.inf),
group_by=np.array([0, 0, 1]), cash_sharing=True, row_wise=test_row_wise, flexible=test_flexible)
if test_row_wise:
record_arrays_close(
pf.order_records,
np.array([
(0, 0, 0, 100., 1., 0., 0), (1, 2, 0, 100., 1., 0., 0),
(2, 0, 1, 200., 2., 0., 1), (3, 2, 1, 200., 2., 0., 1),
(4, 0, 2, 133.33333333, 3., 0., 0), (5, 2, 2, 133.33333333, 3., 0., 0),
(6, 0, 3, 66.66666667, 4., 0., 1), (7, 2, 3, 66.66666667, 4., 0., 1),
(8, 0, 4, 53.33333333, 5., 0., 0), (9, 2, 4, 53.33333333, 5., 0., 0)
], dtype=order_dt)
)
else:
record_arrays_close(
pf.order_records,
np.array([
(0, 0, 0, 100., 1., 0., 0), (1, 0, 1, 200., 2., 0., 1),
(2, 0, 2, 133.33333333, 3., 0., 0), (3, 0, 3, 66.66666667, 4., 0., 1),
(4, 0, 4, 53.33333333, 5., 0., 0), (5, 2, 0, 100., 1., 0., 0),
(6, 2, 1, 200., 2., 0., 1), (7, 2, 2, 133.33333333, 3., 0., 0),
(8, 2, 3, 66.66666667, 4., 0., 1), (9, 2, 4, 53.33333333, 5., 0., 0)
], dtype=order_dt)
)
pd.testing.assert_index_equal(
pf.wrapper.grouper.group_by,
pd.Int64Index([0, 0, 1], dtype='int64')
)
pd.testing.assert_series_equal(
pf.init_cash,
pd.Series([100., 100.], index=pd.Int64Index([0, 1], dtype='int64')).rename('init_cash')
)
assert pf.cash_sharing
@pytest.mark.parametrize(
"test_row_wise",
[False, True],
)
def test_call_seq(self, test_row_wise):
pf = vbt.Portfolio.from_order_func(
price_wide, order_func_nb, np.asarray(np.inf), group_by=np.array([0, 0, 1]),
cash_sharing=True, row_wise=test_row_wise)
if test_row_wise:
record_arrays_close(
pf.order_records,
np.array([
(0, 0, 0, 100., 1., 0., 0), (1, 2, 0, 100., 1., 0., 0),
(2, 0, 1, 200., 2., 0., 1), (3, 2, 1, 200., 2., 0., 1),
(4, 0, 2, 133.33333333, 3., 0., 0), (5, 2, 2, 133.33333333, 3., 0., 0),
(6, 0, 3, 66.66666667, 4., 0., 1), (7, 2, 3, 66.66666667, 4., 0., 1),
(8, 0, 4, 53.33333333, 5., 0., 0), (9, 2, 4, 53.33333333, 5., 0., 0)
], dtype=order_dt)
)
else:
record_arrays_close(
pf.order_records,
np.array([
(0, 0, 0, 100., 1., 0., 0), (1, 0, 1, 200., 2., 0., 1),
(2, 0, 2, 133.33333333, 3., 0., 0), (3, 0, 3, 66.66666667, 4., 0., 1),
(4, 0, 4, 53.33333333, 5., 0., 0), (5, 2, 0, 100., 1., 0., 0),
(6, 2, 1, 200., 2., 0., 1), (7, 2, 2, 133.33333333, 3., 0., 0),
(8, 2, 3, 66.66666667, 4., 0., 1), (9, 2, 4, 53.33333333, 5., 0., 0)
], dtype=order_dt)
)
np.testing.assert_array_equal(
pf.call_seq.values,
np.array([
[0, 1, 0],
[0, 1, 0],
[0, 1, 0],
[0, 1, 0],
[0, 1, 0]
])
)
pf = vbt.Portfolio.from_order_func(
price_wide, order_func_nb, np.asarray(np.inf), group_by=np.array([0, 0, 1]),
cash_sharing=True, call_seq='reversed', row_wise=test_row_wise)
if test_row_wise:
record_arrays_close(
pf.order_records,
np.array([
(0, 1, 0, 100., 1., 0., 0), (1, 2, 0, 100., 1., 0., 0),
(2, 1, 1, 200., 2., 0., 1), (3, 2, 1, 200., 2., 0., 1),
(4, 1, 2, 133.33333333, 3., 0., 0), (5, 2, 2, 133.33333333, 3., 0., 0),
(6, 1, 3, 66.66666667, 4., 0., 1), (7, 2, 3, 66.66666667, 4., 0., 1),
(8, 1, 4, 53.33333333, 5., 0., 0), (9, 2, 4, 53.33333333, 5., 0., 0)
], dtype=order_dt)
)
else:
record_arrays_close(
pf.order_records,
np.array([
(0, 1, 0, 100., 1., 0., 0), (1, 1, 1, 200., 2., 0., 1),
(2, 1, 2, 133.33333333, 3., 0., 0), (3, 1, 3, 66.66666667, 4., 0., 1),
(4, 1, 4, 53.33333333, 5., 0., 0), (5, 2, 0, 100., 1., 0., 0),
(6, 2, 1, 200., 2., 0., 1), (7, 2, 2, 133.33333333, 3., 0., 0),
(8, 2, 3, 66.66666667, 4., 0., 1), (9, 2, 4, 53.33333333, 5., 0., 0)
], dtype=order_dt)
)
np.testing.assert_array_equal(
pf.call_seq.values,
np.array([
[1, 0, 0],
[1, 0, 0],
[1, 0, 0],
[1, 0, 0],
[1, 0, 0]
])
)
pf = vbt.Portfolio.from_order_func(
price_wide, order_func_nb, np.asarray(np.inf), group_by=np.array([0, 0, 1]),
cash_sharing=True, call_seq='random', seed=seed, row_wise=test_row_wise)
if test_row_wise:
record_arrays_close(
pf.order_records,
np.array([
(0, 1, 0, 100., 1., 0., 0), (1, 2, 0, 100., 1., 0., 0),
(2, 1, 1, 200., 2., 0., 1), (3, 2, 1, 200., 2., 0., 1),
(4, 1, 2, 133.33333333, 3., 0., 0), (5, 2, 2, 133.33333333, 3., 0., 0),
(6, 1, 3, 66.66666667, 4., 0., 1), (7, 2, 3, 66.66666667, 4., 0., 1),
(8, 1, 4, 53.33333333, 5., 0., 0), (9, 2, 4, 53.33333333, 5., 0., 0)
], dtype=order_dt)
)
else:
record_arrays_close(
pf.order_records,
np.array([
(0, 1, 0, 100., 1., 0., 0), (1, 1, 1, 200., 2., 0., 1),
(2, 1, 2, 133.33333333, 3., 0., 0), (3, 1, 3, 66.66666667, 4., 0., 1),
(4, 1, 4, 53.33333333, 5., 0., 0), (5, 2, 0, 100., 1., 0., 0),
(6, 2, 1, 200., 2., 0., 1), (7, 2, 2, 133.33333333, 3., 0., 0),
(8, 2, 3, 66.66666667, 4., 0., 1), (9, 2, 4, 53.33333333, 5., 0., 0)
], dtype=order_dt)
)
np.testing.assert_array_equal(
pf.call_seq.values,
np.array([
[1, 0, 0],
[0, 1, 0],
[1, 0, 0],
[1, 0, 0],
[1, 0, 0]
])
)
with pytest.raises(Exception):
_ = vbt.Portfolio.from_order_func(
price_wide, order_func_nb, np.asarray(np.inf), group_by=np.array([0, 0, 1]),
cash_sharing=True, call_seq='auto', row_wise=test_row_wise
)
target_hold_value = pd.DataFrame({
'a': [0., 70., 30., 0., 70.],
'b': [30., 0., 70., 30., 30.],
'c': [70., 30., 0., 70., 0.]
}, index=price.index)
@njit
def pre_segment_func_nb(c, target_hold_value):
order_size = np.copy(target_hold_value[c.i, c.from_col:c.to_col])
order_size_type = np.full(c.group_len, SizeType.TargetValue)
direction = np.full(c.group_len, Direction.Both)
order_value_out = np.empty(c.group_len, dtype=np.float_)
c.last_val_price[c.from_col:c.to_col] = c.close[c.i, c.from_col:c.to_col]
nb.sort_call_seq_nb(c, order_size, order_size_type, direction, order_value_out)
return order_size, order_size_type, direction
@njit
def pct_order_func_nb(c, order_size, order_size_type, direction):
col_i = c.call_seq_now[c.call_idx]
return nb.order_nb(
order_size[col_i],
c.close[c.i, col_i],
size_type=order_size_type[col_i],
direction=direction[col_i]
)
pf = vbt.Portfolio.from_order_func(
price_wide * 0 + 1, pct_order_func_nb, group_by=np.array([0, 0, 0]),
cash_sharing=True, pre_segment_func_nb=pre_segment_func_nb,
pre_segment_args=(target_hold_value.values,), row_wise=test_row_wise)
np.testing.assert_array_equal(
pf.call_seq.values,
np.array([
[0, 1, 2],
[2, 1, 0],
[0, 2, 1],
[1, 0, 2],
[2, 1, 0]
])
)
pd.testing.assert_frame_equal(
pf.asset_value(group_by=False),
target_hold_value
)
@pytest.mark.parametrize("test_row_wise", [False, True])
@pytest.mark.parametrize("test_flexible", [False, True])
def test_target_value(self, test_row_wise, test_flexible):
@njit
def target_val_pre_segment_func_nb(c, val_price):
c.last_val_price[c.from_col:c.to_col] = val_price[c.i]
return ()
if test_flexible:
@njit
def target_val_order_func_nb(c):
col = c.from_col + c.call_idx
if c.call_idx < c.group_len:
return col, nb.order_nb(50., nb.get_col_elem_nb(c, col, c.close), size_type=SizeType.TargetValue)
return -1, nb.order_nothing_nb()
else:
@njit
def target_val_order_func_nb(c):
return nb.order_nb(50., nb.get_elem_nb(c, c.close), size_type=SizeType.TargetValue)
pf = vbt.Portfolio.from_order_func(
price.iloc[1:], target_val_order_func_nb, row_wise=test_row_wise, flexible=test_flexible)
if test_row_wise:
record_arrays_close(
pf.order_records,
np.array([
(0, 0, 1, 25.0, 3.0, 0.0, 0), (1, 0, 2, 8.333333333333332, 4.0, 0.0, 1),
(2, 0, 3, 4.166666666666668, 5.0, 0.0, 1)
], dtype=order_dt)
)
else:
record_arrays_close(
pf.order_records,
np.array([
(0, 0, 1, 25.0, 3.0, 0.0, 0), (1, 0, 2, 8.333333333333332, 4.0, 0.0, 1),
(2, 0, 3, 4.166666666666668, 5.0, 0.0, 1)
], dtype=order_dt)
)
pf = vbt.Portfolio.from_order_func(
price.iloc[1:], target_val_order_func_nb,
pre_segment_func_nb=target_val_pre_segment_func_nb,
pre_segment_args=(price.iloc[:-1].values,), row_wise=test_row_wise, flexible=test_flexible)
if test_row_wise:
record_arrays_close(
pf.order_records,
np.array([
(0, 0, 0, 50.0, 2.0, 0.0, 0), (1, 0, 1, 25.0, 3.0, 0.0, 1),
(2, 0, 2, 8.333333333333332, 4.0, 0.0, 1), (3, 0, 3, 4.166666666666668, 5.0, 0.0, 1)
], dtype=order_dt)
)
else:
record_arrays_close(
pf.order_records,
np.array([
(0, 0, 0, 50.0, 2.0, 0.0, 0), (1, 0, 1, 25.0, 3.0, 0.0, 1),
(2, 0, 2, 8.333333333333332, 4.0, 0.0, 1), (3, 0, 3, 4.166666666666668, 5.0, 0.0, 1)
], dtype=order_dt)
)
@pytest.mark.parametrize("test_row_wise", [False, True])
@pytest.mark.parametrize("test_flexible", [False, True])
def test_target_percent(self, test_row_wise, test_flexible):
@njit
def target_pct_pre_segment_func_nb(c, val_price):
c.last_val_price[c.from_col:c.to_col] = val_price[c.i]
return ()
if test_flexible:
@njit
def target_pct_order_func_nb(c):
col = c.from_col + c.call_idx
if c.call_idx < c.group_len:
return col, nb.order_nb(0.5, nb.get_col_elem_nb(c, col, c.close), size_type=SizeType.TargetPercent)
return -1, nb.order_nothing_nb()
else:
@njit
def target_pct_order_func_nb(c):
return nb.order_nb(0.5, nb.get_elem_nb(c, c.close), size_type=SizeType.TargetPercent)
pf = vbt.Portfolio.from_order_func(
price.iloc[1:], target_pct_order_func_nb, row_wise=test_row_wise, flexible=test_flexible)
if test_row_wise:
record_arrays_close(
pf.order_records,
np.array([
(0, 0, 1, 25.0, 3.0, 0.0, 0), (1, 0, 2, 8.333333333333332, 4.0, 0.0, 1),
(2, 0, 3, 1.0416666666666679, 5.0, 0.0, 1)
], dtype=order_dt)
)
else:
record_arrays_close(
pf.order_records,
np.array([
(0, 0, 1, 25.0, 3.0, 0.0, 0), (1, 0, 2, 8.333333333333332, 4.0, 0.0, 1),
(2, 0, 3, 1.0416666666666679, 5.0, 0.0, 1)
], dtype=order_dt)
)
pf = vbt.Portfolio.from_order_func(
price.iloc[1:], target_pct_order_func_nb,
pre_segment_func_nb=target_pct_pre_segment_func_nb,
pre_segment_args=(price.iloc[:-1].values,), row_wise=test_row_wise, flexible=test_flexible)
if test_row_wise:
record_arrays_close(
pf.order_records,
np.array([
(0, 0, 0, 50.0, 2.0, 0.0, 0), (1, 0, 1, 25.0, 3.0, 0.0, 1),
(2, 0, 3, 3.125, 5.0, 0.0, 1)
], dtype=order_dt)
)
else:
record_arrays_close(
pf.order_records,
np.array([
(0, 0, 0, 50.0, 2.0, 0.0, 0), (1, 0, 1, 25.0, 3.0, 0.0, 1),
(2, 0, 3, 3.125, 5.0, 0.0, 1)
], dtype=order_dt)
)
@pytest.mark.parametrize("test_row_wise", [False, True])
@pytest.mark.parametrize("test_flexible", [False, True])
def test_update_value(self, test_row_wise, test_flexible):
if test_flexible:
@njit
def order_func_nb(c):
col = c.from_col + c.call_idx
if c.call_idx < c.group_len:
return col, nb.order_nb(
np.inf if c.i % 2 == 0 else -np.inf,
nb.get_col_elem_nb(c, col, c.close),
fees=0.01,
fixed_fees=1.,
slippage=0.01
)
return -1, nb.order_nothing_nb()
else:
@njit
def order_func_nb(c):
return nb.order_nb(
np.inf if c.i % 2 == 0 else -np.inf,
nb.get_elem_nb(c, c.close),
fees=0.01,
fixed_fees=1.,
slippage=0.01
)
@njit
def post_order_func_nb(c, value_before, value_now):
value_before[c.i, c.col] = c.value_before
value_now[c.i, c.col] = c.value_now
value_before = np.empty_like(price.values[:, None])
value_now = np.empty_like(price.values[:, None])
_ = vbt.Portfolio.from_order_func(
price,
order_func_nb,
post_order_func_nb=post_order_func_nb,
post_order_args=(value_before, value_now),
row_wise=test_row_wise,
update_value=False,
flexible=test_flexible)
np.testing.assert_array_equal(
value_before,
value_now
)
_ = vbt.Portfolio.from_order_func(
price,
order_func_nb,
post_order_func_nb=post_order_func_nb,
post_order_args=(value_before, value_now),
row_wise=test_row_wise,
update_value=True,
flexible=test_flexible)
np.testing.assert_array_equal(
value_before,
np.array([
[100.0],
[97.04930889128518],
[185.46988117104038],
[82.47853456223025],
[104.65775576218027]
])
)
np.testing.assert_array_equal(
value_now,
np.array([
[98.01980198019803],
[187.36243097890815],
[83.30331990785257],
[105.72569204546781],
[73.54075125567473]
])
)
@pytest.mark.parametrize("test_row_wise", [False, True])
@pytest.mark.parametrize("test_flexible", [False, True])
def test_states(self, test_row_wise, test_flexible):
close = np.array([
[1, 1, 1],
[np.nan, 2, 2],
[3, np.nan, 3],
[4, 4, np.nan],
[5, 5, 5]
])
size = np.array([
[1, 1, 1],
[-1, -1, -1],
[1, 1, 1],
[-1, -1, -1],
[1, 1, 1]
])
value_arr1 = np.empty((size.shape[0], 2), dtype=np.float_)
value_arr2 = np.empty(size.shape, dtype=np.float_)
value_arr3 = np.empty(size.shape, dtype=np.float_)
return_arr1 = np.empty((size.shape[0], 2), dtype=np.float_)
return_arr2 = np.empty(size.shape, dtype=np.float_)
return_arr3 = np.empty(size.shape, dtype=np.float_)
pos_record_arr1 = np.empty(size.shape, dtype=trade_dt)
pos_record_arr2 = np.empty(size.shape, dtype=trade_dt)
pos_record_arr3 = np.empty(size.shape, dtype=trade_dt)
def pre_segment_func_nb(c):
value_arr1[c.i, c.group] = c.last_value[c.group]
return_arr1[c.i, c.group] = c.last_return[c.group]
for col in range(c.from_col, c.to_col):
pos_record_arr1[c.i, col] = c.last_pos_record[col]
if c.i > 0:
c.last_val_price[c.from_col:c.to_col] = c.last_val_price[c.from_col:c.to_col] + 0.5
return ()
if test_flexible:
def order_func_nb(c):
col = c.from_col + c.call_idx
if c.call_idx < c.group_len:
value_arr2[c.i, col] = c.last_value[c.group]
return_arr2[c.i, col] = c.last_return[c.group]
pos_record_arr2[c.i, col] = c.last_pos_record[col]
return col, nb.order_nb(size[c.i, col], fixed_fees=1.)
return -1, nb.order_nothing_nb()
else:
def order_func_nb(c):
value_arr2[c.i, c.col] = c.value_now
return_arr2[c.i, c.col] = c.return_now
pos_record_arr2[c.i, c.col] = c.pos_record_now
return nb.order_nb(size[c.i, c.col], fixed_fees=1.)
def post_order_func_nb(c):
value_arr3[c.i, c.col] = c.value_now
return_arr3[c.i, c.col] = c.return_now
pos_record_arr3[c.i, c.col] = c.pos_record_now
_ = vbt.Portfolio.from_order_func(
close,
order_func_nb,
pre_segment_func_nb=pre_segment_func_nb,
post_order_func_nb=post_order_func_nb,
use_numba=False,
row_wise=test_row_wise,
update_value=True,
ffill_val_price=True,
group_by=[0, 0, 1],
cash_sharing=True,
flexible=test_flexible
)
np.testing.assert_array_equal(
value_arr1,
np.array([
[100.0, 100.0],
[98.0, 99.0],
[98.5, 99.0],
[99.0, 98.0],
[99.0, 98.5]
])
)
np.testing.assert_array_equal(
value_arr2,
np.array([
[100.0, 99.0, 100.0],
[99.0, 99.0, 99.5],
[99.0, 99.0, 99.0],
[100.0, 100.0, 98.5],
[99.0, 98.5, 99.0]
])
)
np.testing.assert_array_equal(
value_arr3,
np.array([
[99.0, 98.0, 99.0],
[99.0, 98.5, 99.0],
[99.0, 99.0, 98.0],
[100.0, 99.0, 98.5],
[98.5, 97.0, 99.0]
])
)
np.testing.assert_array_equal(
return_arr1,
np.array([
[np.nan, np.nan],
[-0.02, -0.01],
[0.00510204081632653, 0.0],
[0.005076142131979695, -0.010101010101010102],
[0.0, 0.00510204081632653]
])
)
np.testing.assert_array_equal(
return_arr2,
np.array([
[0.0, -0.01, 0.0],
[-0.01, -0.01, -0.005],
[0.01020408163265306, 0.01020408163265306, 0.0],
[0.015228426395939087, 0.015228426395939087, -0.005050505050505051],
[0.0, -0.005050505050505051, 0.01020408163265306]
])
)
np.testing.assert_array_equal(
return_arr3,
np.array([
[-0.01, -0.02, -0.01],
[-0.01, -0.015, -0.01],
[0.01020408163265306, 0.01020408163265306, -0.010101010101010102],
[0.015228426395939087, 0.005076142131979695, -0.005050505050505051],
[-0.005050505050505051, -0.020202020202020204, 0.01020408163265306]
])
)
record_arrays_close(
pos_record_arr1.flatten()[3:],
np.array([
(0, 0, 1.0, 0, 1.0, 1.0, -1, np.nan, 0.0, -1.0, -1.0, 0, 0, 0),
(0, 1, 1.0, 0, 1.0, 1.0, -1, np.nan, 0.0, -1.0, -1.0, 0, 0, 0),
(0, 2, 1.0, 0, 1.0, 1.0, -1, np.nan, 0.0, -1.0, -1.0, 0, 0, 0),
(0, 0, 1.0, 0, 1.0, 1.0, -1, np.nan, 0.0, -0.5, -0.5, 0, 0, 0),
(0, 1, 1.0, 0, 1.0, 1.0, 1, 2.0, 1.0, -1.0, -1.0, 0, 1, 0),
(0, 2, 1.0, 0, 1.0, 1.0, 1, 2.0, 1.0, -1.0, -1.0, 0, 1, 0),
(0, 0, 2.0, 0, 2.0, 2.0, -1, np.nan, 0.0, 0.0, 0.0, 0, 0, 0),
(0, 1, 1.0, 0, 1.0, 1.0, 1, 2.0, 1.0, -1.0, -1.0, 0, 1, 0),
(1, 2, 1.0, 2, 3.0, 1.0, -1, np.nan, 0.0, -1.0, -0.3333333333333333, 0, 0, 1),
(0, 0, 2.0, 0, 2.0, 2.0, -1, 4.0, 1.0, 1.0, 0.25, 0, 0, 0),
(1, 1, 1.0, 3, 4.0, 1.0, -1, np.nan, 0.0, -1.0, -0.25, 1, 0, 1),
(1, 2, 1.0, 2, 3.0, 1.0, -1, np.nan, 0.0, -0.5, -0.16666666666666666, 0, 0, 1)
], dtype=trade_dt)
)
record_arrays_close(
pos_record_arr2.flatten()[3:],
np.array([
(0, 0, 1.0, 0, 1.0, 1.0, -1, np.nan, 0.0, -0.5, -0.5, 0, 0, 0),
(0, 1, 1.0, 0, 1.0, 1.0, -1, np.nan, 0.0, -0.5, -0.5, 0, 0, 0),
(0, 2, 1.0, 0, 1.0, 1.0, -1, np.nan, 0.0, -0.5, -0.5, 0, 0, 0),
(0, 0, 1.0, 0, 1.0, 1.0, -1, np.nan, 0.0, 0.0, 0.0, 0, 0, 0),
(0, 1, 1.0, 0, 1.0, 1.0, 1, 2.0, 1.0, -1.0, -1.0, 0, 1, 0),
(0, 2, 1.0, 0, 1.0, 1.0, 1, 2.0, 1.0, -1.0, -1.0, 0, 1, 0),
(0, 0, 2.0, 0, 2.0, 2.0, -1, np.nan, 0.0, 1.0, 0.25, 0, 0, 0),
(0, 1, 1.0, 0, 1.0, 1.0, 1, 2.0, 1.0, -1.0, -1.0, 0, 1, 0),
(1, 2, 1.0, 2, 3.0, 1.0, -1, np.nan, 0.0, -0.5, -0.16666666666666666, 0, 0, 1),
(0, 0, 2.0, 0, 2.0, 2.0, -1, 4.0, 1.0, 1.5, 0.375, 0, 0, 0),
(1, 1, 1.0, 3, 4.0, 1.0, -1, np.nan, 0.0, -1.5, -0.375, 1, 0, 1),
(1, 2, 1.0, 2, 3.0, 1.0, -1, np.nan, 0.0, 0.0, 0.0, 0, 0, 1)
], dtype=trade_dt)
)
record_arrays_close(
pos_record_arr3.flatten(),
np.array([
(0, 0, 1.0, 0, 1.0, 1.0, -1, np.nan, 0.0, -1.0, -1.0, 0, 0, 0),
(0, 1, 1.0, 0, 1.0, 1.0, -1, np.nan, 0.0, -1.0, -1.0, 0, 0, 0),
(0, 2, 1.0, 0, 1.0, 1.0, -1, np.nan, 0.0, -1.0, -1.0, 0, 0, 0),
(0, 0, 1.0, 0, 1.0, 1.0, -1, np.nan, 0.0, -0.5, -0.5, 0, 0, 0),
(0, 1, 1.0, 0, 1.0, 1.0, 1, 2.0, 1.0, -1.0, -1.0, 0, 1, 0),
(0, 2, 1.0, 0, 1.0, 1.0, 1, 2.0, 1.0, -1.0, -1.0, 0, 1, 0),
(0, 0, 2.0, 0, 2.0, 2.0, -1, np.nan, 0.0, 0.0, 0.0, 0, 0, 0),
(0, 1, 1.0, 0, 1.0, 1.0, 1, 2.0, 1.0, -1.0, -1.0, 0, 1, 0),
(1, 2, 1.0, 2, 3.0, 1.0, -1, np.nan, 0.0, -1.0, -0.3333333333333333, 0, 0, 1),
(0, 0, 2.0, 0, 2.0, 2.0, -1, 4.0, 1.0, 1.0, 0.25, 0, 0, 0),
(1, 1, 1.0, 3, 4.0, 1.0, -1, np.nan, 0.0, -1.0, -0.25, 1, 0, 1),
(1, 2, 1.0, 2, 3.0, 1.0, -1, np.nan, 0.0, -0.5, -0.16666666666666666, 0, 0, 1),
(0, 0, 3.0, 0, 3.0, 3.0, -1, 4.0, 1.0, 1.0, 0.1111111111111111, 0, 0, 0),
(1, 1, 1.0, 3, 4.0, 1.0, 4, 5.0, 1.0, -3.0, -0.75, 1, 1, 1),
(1, 2, 2.0, 2, 4.0, 2.0, -1, np.nan, 0.0, 0.0, 0.0, 0, 0, 1)
], dtype=trade_dt)
)
cash_arr = np.empty((size.shape[0], 2), dtype=np.float_)
position_arr = np.empty(size.shape, dtype=np.float_)
val_price_arr = np.empty(size.shape, dtype=np.float_)
value_arr = np.empty((size.shape[0], 2), dtype=np.float_)
return_arr = np.empty((size.shape[0], 2), dtype=np.float_)
sim_order_cash_arr = np.empty(size.shape, dtype=np.float_)
sim_order_value_arr = np.empty(size.shape, dtype=np.float_)
sim_order_return_arr = np.empty(size.shape, dtype=np.float_)
def post_order_func_nb(c):
sim_order_cash_arr[c.i, c.col] = c.cash_now
sim_order_value_arr[c.i, c.col] = c.value_now
sim_order_return_arr[c.i, c.col] = c.value_now
if c.i == 0 and c.call_idx == 0:
sim_order_return_arr[c.i, c.col] -= c.init_cash[c.group]
sim_order_return_arr[c.i, c.col] /= c.init_cash[c.group]
else:
if c.call_idx == 0:
prev_i = c.i - 1
prev_col = c.to_col - 1
else:
prev_i = c.i
prev_col = c.from_col + c.call_idx - 1
sim_order_return_arr[c.i, c.col] -= sim_order_value_arr[prev_i, prev_col]
sim_order_return_arr[c.i, c.col] /= sim_order_value_arr[prev_i, prev_col]
def post_segment_func_nb(c):
cash_arr[c.i, c.group] = c.last_cash[c.group]
for col in range(c.from_col, c.to_col):
position_arr[c.i, col] = c.last_position[col]
val_price_arr[c.i, col] = c.last_val_price[col]
value_arr[c.i, c.group] = c.last_value[c.group]
return_arr[c.i, c.group] = c.last_return[c.group]
pf = vbt.Portfolio.from_order_func(
close,
order_func_nb,
post_order_func_nb=post_order_func_nb,
post_segment_func_nb=post_segment_func_nb,
use_numba=False,
row_wise=test_row_wise,
update_value=True,
ffill_val_price=True,
group_by=[0, 0, 1],
cash_sharing=True,
flexible=test_flexible
)
np.testing.assert_array_equal(
cash_arr,
pf.cash().values
)
np.testing.assert_array_equal(
position_arr,
pf.assets().values
)
np.testing.assert_array_equal(
val_price_arr,
pf.get_filled_close().values
)
np.testing.assert_array_equal(
value_arr,
pf.value().values
)
np.testing.assert_array_equal(
return_arr,
pf.returns().values
)
if test_flexible:
with pytest.raises(Exception):
pf.cash(in_sim_order=True, group_by=False)
with pytest.raises(Exception):
pf.value(in_sim_order=True, group_by=False)
with pytest.raises(Exception):
pf.returns(in_sim_order=True, group_by=False)
else:
np.testing.assert_array_equal(
sim_order_cash_arr,
pf.cash(in_sim_order=True, group_by=False).values
)
np.testing.assert_array_equal(
sim_order_value_arr,
pf.value(in_sim_order=True, group_by=False).values
)
np.testing.assert_array_equal(
sim_order_return_arr,
pf.returns(in_sim_order=True, group_by=False).values
)
@pytest.mark.parametrize("test_row_wise", [False, True])
@pytest.mark.parametrize("test_flexible", [False, True])
def test_post_sim_ctx(self, test_row_wise, test_flexible):
if test_flexible:
def order_func(c):
col = c.from_col + c.call_idx
if c.call_idx < c.group_len:
return col, nb.order_nb(
1.,
nb.get_col_elem_nb(c, col, c.close),
fees=0.01,
fixed_fees=1.,
slippage=0.01,
log=True
)
return -1, nb.order_nothing_nb()
else:
def order_func(c):
return nb.order_nb(
1.,
nb.get_elem_nb(c, c.close),
fees=0.01,
fixed_fees=1.,
slippage=0.01,
log=True
)
def post_sim_func(c, lst):
lst.append(deepcopy(c))
lst = []
_ = vbt.Portfolio.from_order_func(
price_wide,
order_func,
post_sim_func_nb=post_sim_func,
post_sim_args=(lst,),
row_wise=test_row_wise,
update_value=True,
max_logs=price_wide.shape[0] * price_wide.shape[1],
use_numba=False,
group_by=[0, 0, 1],
cash_sharing=True,
flexible=test_flexible
)
c = lst[-1]
assert c.target_shape == price_wide.shape
np.testing.assert_array_equal(
c.close,
price_wide.values
)
np.testing.assert_array_equal(
c.group_lens,
np.array([2, 1])
)
np.testing.assert_array_equal(
c.init_cash,
np.array([100., 100.])
)
assert c.cash_sharing
if test_flexible:
assert c.call_seq is None
else:
np.testing.assert_array_equal(
c.call_seq,
np.array([
[0, 1, 0],
[0, 1, 0],
[0, 1, 0],
[0, 1, 0],
[0, 1, 0]
])
)
np.testing.assert_array_equal(
c.segment_mask,
np.array([
[True, True],
[True, True],
[True, True],
[True, True],
[True, True]
])
)
assert c.ffill_val_price
assert c.update_value
if test_row_wise:
record_arrays_close(
c.order_records,
np.array([
(0, 0, 0, 1.0, 1.01, 1.0101, 0), (1, 1, 0, 1.0, 1.01, 1.0101, 0),
(2, 2, 0, 1.0, 1.01, 1.0101, 0), (3, 0, 1, 1.0, 2.02, 1.0202, 0),
(4, 1, 1, 1.0, 2.02, 1.0202, 0), (5, 2, 1, 1.0, 2.02, 1.0202, 0),
(6, 0, 2, 1.0, 3.0300000000000002, 1.0303, 0), (7, 1, 2, 1.0, 3.0300000000000002, 1.0303, 0),
(8, 2, 2, 1.0, 3.0300000000000002, 1.0303, 0), (9, 0, 3, 1.0, 4.04, 1.0404, 0),
(10, 1, 3, 1.0, 4.04, 1.0404, 0), (11, 2, 3, 1.0, 4.04, 1.0404, 0),
(12, 0, 4, 1.0, 5.05, 1.0505, 0), (13, 1, 4, 1.0, 5.05, 1.0505, 0),
(14, 2, 4, 1.0, 5.05, 1.0505, 0)
], dtype=order_dt)
)
else:
record_arrays_close(
c.order_records,
np.array([
(0, 0, 0, 1.0, 1.01, 1.0101, 0), (1, 1, 0, 1.0, 1.01, 1.0101, 0),
(2, 0, 1, 1.0, 2.02, 1.0202, 0), (3, 1, 1, 1.0, 2.02, 1.0202, 0),
(4, 0, 2, 1.0, 3.0300000000000002, 1.0303, 0), (5, 1, 2, 1.0, 3.0300000000000002, 1.0303, 0),
(6, 0, 3, 1.0, 4.04, 1.0404, 0), (7, 1, 3, 1.0, 4.04, 1.0404, 0),
(8, 0, 4, 1.0, 5.05, 1.0505, 0), (9, 1, 4, 1.0, 5.05, 1.0505, 0),
(10, 2, 0, 1.0, 1.01, 1.0101, 0), (11, 2, 1, 1.0, 2.02, 1.0202, 0),
(12, 2, 2, 1.0, 3.0300000000000002, 1.0303, 0), (13, 2, 3, 1.0, 4.04, 1.0404, 0),
(14, 2, 4, 1.0, 5.05, 1.0505, 0)
], dtype=order_dt)
)
if test_row_wise:
record_arrays_close(
c.log_records,
np.array([
(0, 0, 0, 0, 100.0, 0.0, 0.0, 100.0, np.nan, 100.0, 1.0, 1.0, 0, 2, 0.01, 1.0,
0.01, 0.0, np.inf, 0.0, False, True, False, True, 97.9799, 1.0, 0.0, 97.9799,
1.01, 98.9899, 1.0, 1.01, 1.0101, 0, 0, -1, 0),
(1, 0, 1, 0, 97.9799, 0.0, 0.0, 97.9799, np.nan, 98.9899, 1.0, 1.0, 0, 2,
0.01, 1.0, 0.01, 0.0, np.inf, 0.0, False, True, False, True, 95.9598, 1.0,
0.0, 95.9598, 1.01, 97.97980000000001, 1.0, 1.01, 1.0101, 0, 0, -1, 1),
(2, 1, 2, 0, 100.0, 0.0, 0.0, 100.0, np.nan, 100.0, 1.0, 1.0, 0, 2, 0.01,
1.0, 0.01, 0.0, np.inf, 0.0, False, True, False, True, 97.9799, 1.0, 0.0,
97.9799, 1.01, 98.9899, 1.0, 1.01, 1.0101, 0, 0, -1, 2),
(3, 0, 0, 1, 95.9598, 1.0, 0.0, 95.9598, 1.0, 97.9598, 1.0, 2.0, 0,
2, 0.01, 1.0, 0.01, 0.0, np.inf, 0.0, False, True, False, True, 92.9196,
2.0, 0.0, 92.9196, 2.02, 97.95960000000001, 1.0, 2.02, 1.0202, 0, 0, -1, 3),
(4, 0, 1, 1, 92.9196, 1.0, 0.0, 92.9196, 1.0, 97.95960000000001, 1.0, 2.0,
0, 2, 0.01, 1.0, 0.01, 0.0, np.inf, 0.0, False, True, False, True, 89.8794,
2.0, 0.0, 89.8794, 2.02, 97.95940000000002, 1.0, 2.02, 1.0202, 0, 0, -1, 4),
(5, 1, 2, 1, 97.9799, 1.0, 0.0, 97.9799, 1.0, 98.9799, 1.0, 2.0, 0, 2,
0.01, 1.0, 0.01, 0.0, np.inf, 0.0, False, True, False, True, 94.9397, 2.0,
0.0, 94.9397, 2.02, 98.97970000000001, 1.0, 2.02, 1.0202, 0, 0, -1, 5),
(6, 0, 0, 2, 89.8794, 2.0, 0.0, 89.8794, 2.0, 97.8794, 1.0, 3.0, 0, 2,
0.01, 1.0, 0.01, 0.0, np.inf, 0.0, False, True, False, True, 85.8191, 3.0,
0.0, 85.8191, 3.0300000000000002, 98.90910000000001, 1.0,
3.0300000000000002, 1.0303, 0, 0, -1, 6),
(7, 0, 1, 2, 85.8191, 2.0, 0.0, 85.8191, 2.0, 98.90910000000001,
1.0, 3.0, 0, 2, 0.01, 1.0, 0.01, 0.0, np.inf, 0.0, False, True, False, True,
81.75880000000001, 3.0, 0.0, 81.75880000000001, 3.0300000000000002,
99.93880000000001, 1.0, 3.0300000000000002, 1.0303, 0, 0, -1, 7),
(8, 1, 2, 2, 94.9397, 2.0, 0.0, 94.9397, 2.0, 98.9397, 1.0, 3.0, 0,
2, 0.01, 1.0, 0.01, 0.0, np.inf, 0.0, False, True, False, True, 90.8794,
3.0, 0.0, 90.8794, 3.0300000000000002, 99.96940000000001, 1.0,
3.0300000000000002, 1.0303, 0, 0, -1, 8),
(9, 0, 0, 3, 81.75880000000001, 3.0, 0.0, 81.75880000000001, 3.0, 99.75880000000001,
1.0, 4.0, 0, 2, 0.01, 1.0, 0.01, 0.0, np.inf, 0.0, False, True, False, True,
76.67840000000001, 4.0, 0.0, 76.67840000000001, 4.04, 101.83840000000001,
1.0, 4.04, 1.0404, 0, 0, -1, 9),
(10, 0, 1, 3, 76.67840000000001, 3.0, 0.0, 76.67840000000001, 3.0,
101.83840000000001, 1.0, 4.0, 0, 2, 0.01, 1.0, 0.01, 0.0, np.inf, 0.0,
False, True, False, True, 71.59800000000001, 4.0, 0.0, 71.59800000000001,
4.04, 103.918, 1.0, 4.04, 1.0404, 0, 0, -1, 10),
(11, 1, 2, 3, 90.8794, 3.0, 0.0, 90.8794, 3.0, 99.8794, 1.0, 4.0, 0, 2,
0.01, 1.0, 0.01, 0.0, np.inf, 0.0, False, True, False, True, 85.799, 4.0,
0.0, 85.799, 4.04, 101.959, 1.0, 4.04, 1.0404, 0, 0, -1, 11),
(12, 0, 0, 4, 71.59800000000001, 4.0, 0.0, 71.59800000000001, 4.0,
103.59800000000001, 1.0, 5.0, 0, 2, 0.01, 1.0, 0.01, 0.0, np.inf, 0.0,
False, True, False, True, 65.49750000000002, 5.0, 0.0, 65.49750000000002,
5.05, 106.74750000000002, 1.0, 5.05, 1.0505, 0, 0, -1, 12),
(13, 0, 1, 4, 65.49750000000002, 4.0, 0.0, 65.49750000000002, 4.0,
106.74750000000002, 1.0, 5.0, 0, 2, 0.01, 1.0, 0.01, 0.0, np.inf, 0.0,
False, True, False, True, 59.39700000000002, 5.0, 0.0, 59.39700000000002,
5.05, 109.89700000000002, 1.0, 5.05, 1.0505, 0, 0, -1, 13),
(14, 1, 2, 4, 85.799, 4.0, 0.0, 85.799, 4.0, 101.799, 1.0, 5.0, 0, 2,
0.01, 1.0, 0.01, 0.0, np.inf, 0.0, False, True, False, True, 79.69850000000001,
5.0, 0.0, 79.69850000000001, 5.05, 104.94850000000001, 1.0, 5.05, 1.0505, 0, 0, -1, 14)
], dtype=log_dt)
)
else:
record_arrays_close(
c.log_records,
np.array([
(0, 0, 0, 0, 100.0, 0.0, 0.0, 100.0, np.nan, 100.0, 1.0, 1.0, 0, 2,
0.01, 1.0, 0.01, 0.0, np.inf, 0.0, False, True, False, True, 97.9799,
1.0, 0.0, 97.9799, 1.01, 98.9899, 1.0, 1.01, 1.0101, 0, 0, -1, 0),
(1, 0, 1, 0, 97.9799, 0.0, 0.0, 97.9799, np.nan, 98.9899, 1.0, 1.0, 0,
2, 0.01, 1.0, 0.01, 0.0, np.inf, 0.0, False, True, False, True, 95.9598,
1.0, 0.0, 95.9598, 1.01, 97.97980000000001, 1.0, 1.01, 1.0101, 0, 0, -1, 1),
(2, 0, 0, 1, 95.9598, 1.0, 0.0, 95.9598, 1.0, 97.9598, 1.0, 2.0, 0, 2,
0.01, 1.0, 0.01, 0.0, np.inf, 0.0, False, True, False, True, 92.9196, 2.0,
0.0, 92.9196, 2.02, 97.95960000000001, 1.0, 2.02, 1.0202, 0, 0, -1, 2),
(3, 0, 1, 1, 92.9196, 1.0, 0.0, 92.9196, 1.0, 97.95960000000001, 1.0,
2.0, 0, 2, 0.01, 1.0, 0.01, 0.0, np.inf, 0.0, False, True, False, True, 89.8794,
2.0, 0.0, 89.8794, 2.02, 97.95940000000002, 1.0, 2.02, 1.0202, 0, 0, -1, 3),
(4, 0, 0, 2, 89.8794, 2.0, 0.0, 89.8794, 2.0, 97.8794, 1.0, 3.0, 0, 2,
0.01, 1.0, 0.01, 0.0, np.inf, 0.0, False, True, False, True, 85.8191,
3.0, 0.0, 85.8191, 3.0300000000000002, 98.90910000000001, 1.0,
3.0300000000000002, 1.0303, 0, 0, -1, 4),
(5, 0, 1, 2, 85.8191, 2.0, 0.0, 85.8191, 2.0, 98.90910000000001, 1.0,
3.0, 0, 2, 0.01, 1.0, 0.01, 0.0, np.inf, 0.0, False, True, False, True,
81.75880000000001, 3.0, 0.0, 81.75880000000001, 3.0300000000000002,
99.93880000000001, 1.0, 3.0300000000000002, 1.0303, 0, 0, -1, 5),
(6, 0, 0, 3, 81.75880000000001, 3.0, 0.0, 81.75880000000001, 3.0,
99.75880000000001, 1.0, 4.0, 0, 2, 0.01, 1.0, 0.01, 0.0, np.inf, 0.0,
False, True, False, True, 76.67840000000001, 4.0, 0.0, 76.67840000000001,
4.04, 101.83840000000001, 1.0, 4.04, 1.0404, 0, 0, -1, 6),
(7, 0, 1, 3, 76.67840000000001, 3.0, 0.0, 76.67840000000001, 3.0,
101.83840000000001, 1.0, 4.0, 0, 2, 0.01, 1.0, 0.01, 0.0, np.inf, 0.0,
False, True, False, True, 71.59800000000001, 4.0, 0.0, 71.59800000000001,
4.04, 103.918, 1.0, 4.04, 1.0404, 0, 0, -1, 7),
(8, 0, 0, 4, 71.59800000000001, 4.0, 0.0, 71.59800000000001, 4.0,
103.59800000000001, 1.0, 5.0, 0, 2, 0.01, 1.0, 0.01, 0.0, np.inf, 0.0,
False, True, False, True, 65.49750000000002, 5.0, 0.0, 65.49750000000002,
5.05, 106.74750000000002, 1.0, 5.05, 1.0505, 0, 0, -1, 8),
(9, 0, 1, 4, 65.49750000000002, 4.0, 0.0, 65.49750000000002, 4.0,
106.74750000000002, 1.0, 5.0, 0, 2, 0.01, 1.0, 0.01, 0.0, np.inf, 0.0,
False, True, False, True, 59.39700000000002, 5.0, 0.0, 59.39700000000002,
5.05, 109.89700000000002, 1.0, 5.05, 1.0505, 0, 0, -1, 9),
(10, 1, 2, 0, 100.0, 0.0, 0.0, 100.0, np.nan, 100.0, 1.0, 1.0, 0, 2, 0.01,
1.0, 0.01, 0.0, np.inf, 0.0, False, True, False, True, 97.9799, 1.0, 0.0,
97.9799, 1.01, 98.9899, 1.0, 1.01, 1.0101, 0, 0, -1, 10),
(11, 1, 2, 1, 97.9799, 1.0, 0.0, 97.9799, 1.0, 98.9799, 1.0, 2.0, 0,
2, 0.01, 1.0, 0.01, 0.0, np.inf, 0.0, False, True, False, True, 94.9397,
2.0, 0.0, 94.9397, 2.02, 98.97970000000001, 1.0, 2.02, 1.0202, 0, 0, -1, 11),
(12, 1, 2, 2, 94.9397, 2.0, 0.0, 94.9397, 2.0, 98.9397, 1.0, 3.0, 0,
2, 0.01, 1.0, 0.01, 0.0, np.inf, 0.0, False, True, False, True, 90.8794,
3.0, 0.0, 90.8794, 3.0300000000000002, 99.96940000000001, 1.0,
3.0300000000000002, 1.0303, 0, 0, -1, 12),
(13, 1, 2, 3, 90.8794, 3.0, 0.0, 90.8794, 3.0, 99.8794, 1.0, 4.0, 0, 2,
0.01, 1.0, 0.01, 0.0, np.inf, 0.0, False, True, False, True, 85.799, 4.0,
0.0, 85.799, 4.04, 101.959, 1.0, 4.04, 1.0404, 0, 0, -1, 13),
(14, 1, 2, 4, 85.799, 4.0, 0.0, 85.799, 4.0, 101.799, 1.0, 5.0, 0, 2, 0.01,
1.0, 0.01, 0.0, np.inf, 0.0, False, True, False, True, 79.69850000000001,
5.0, 0.0, 79.69850000000001, 5.05, 104.94850000000001, 1.0, 5.05, 1.0505, 0, 0, -1, 14)
], dtype=log_dt)
)
np.testing.assert_array_equal(
c.last_cash,
np.array([59.39700000000002, 79.69850000000001])
)
np.testing.assert_array_equal(
c.last_position,
np.array([5., 5., 5.])
)
np.testing.assert_array_equal(
c.last_val_price,
np.array([5.0, 5.0, 5.0])
)
np.testing.assert_array_equal(
c.last_value,
np.array([109.39700000000002, 104.69850000000001])
)
np.testing.assert_array_equal(
c.second_last_value,
np.array([103.59800000000001, 101.799])
)
np.testing.assert_array_equal(
c.last_return,
np.array([0.05597598409235705, 0.028482598060884715])
)
np.testing.assert_array_equal(
c.last_debt,
np.array([0., 0., 0.])
)
np.testing.assert_array_equal(
c.last_free_cash,
np.array([59.39700000000002, 79.69850000000001])
)
if test_row_wise:
np.testing.assert_array_equal(
c.last_oidx,
np.array([12, 13, 14])
)
np.testing.assert_array_equal(
c.last_lidx,
np.array([12, 13, 14])
)
else:
np.testing.assert_array_equal(
c.last_oidx,
np.array([8, 9, 14])
)
np.testing.assert_array_equal(
c.last_lidx,
np.array([8, 9, 14])
)
assert c.order_records[c.last_oidx[0]]['col'] == 0
assert c.order_records[c.last_oidx[1]]['col'] == 1
assert c.order_records[c.last_oidx[2]]['col'] == 2
assert c.log_records[c.last_lidx[0]]['col'] == 0
assert c.log_records[c.last_lidx[1]]['col'] == 1
assert c.log_records[c.last_lidx[2]]['col'] == 2
@pytest.mark.parametrize("test_row_wise", [False, True])
@pytest.mark.parametrize("test_flexible", [False, True])
def test_free_cash(self, test_row_wise, test_flexible):
if test_flexible:
def order_func(c, size):
col = c.from_col + c.call_idx
if c.call_idx < c.group_len:
return col, nb.order_nb(
size[c.i, col],
nb.get_col_elem_nb(c, col, c.close),
fees=0.01,
fixed_fees=1.,
slippage=0.01
)
return -1, nb.order_nothing_nb()
else:
def order_func(c, size):
return nb.order_nb(
size[c.i, c.col],
nb.get_elem_nb(c, c.close),
fees=0.01,
fixed_fees=1.,
slippage=0.01
)
def post_order_func(c, debt, free_cash):
debt[c.i, c.col] = c.debt_now
if c.cash_sharing:
free_cash[c.i, c.group] = c.free_cash_now
else:
free_cash[c.i, c.col] = c.free_cash_now
size = np.array([
[5, -5, 5],
[5, -5, -10],
[-5, 5, 10],
[-5, 5, -10],
[-5, 5, 10]
])
debt = np.empty(price_wide.shape, dtype=np.float_)
free_cash = np.empty(price_wide.shape, dtype=np.float_)
pf = vbt.Portfolio.from_order_func(
price_wide,
order_func, size,
post_order_func_nb=post_order_func,
post_order_args=(debt, free_cash,),
row_wise=test_row_wise,
use_numba=False,
flexible=test_flexible
)
np.testing.assert_array_equal(
debt,
np.array([
[0.0, 4.95, 0.0],
[0.0, 14.850000000000001, 9.9],
[0.0, 7.425000000000001, 0.0],
[0.0, 0.0, 19.8],
[24.75, 0.0, 0.0]
])
)
np.testing.assert_array_equal(
free_cash,
np.array([
[93.8995, 94.0005, 93.8995],
[82.6985, 83.00150000000001, 92.70150000000001],
[96.39999999999999, 81.55000000000001, 80.8985],
[115.002, 74.998, 79.5025],
[89.0045, 48.49550000000001, 67.0975]
])
)
np.testing.assert_almost_equal(
free_cash,
pf.cash(free=True).values
)
debt = np.empty(price_wide.shape, dtype=np.float_)
free_cash = np.empty(price_wide.shape, dtype=np.float_)
pf = vbt.Portfolio.from_order_func(
price_wide.vbt.wrapper.wrap(price_wide.values[::-1]),
order_func, size,
post_order_func_nb=post_order_func,
post_order_args=(debt, free_cash,),
row_wise=test_row_wise,
use_numba=False,
flexible=test_flexible
)
np.testing.assert_array_equal(
debt,
np.array([
[0.0, 24.75, 0.0],
[0.0, 44.55, 19.8],
[0.0, 22.275, 0.0],
[0.0, 0.0, 9.9],
[4.95, 0.0, 0.0]
])
)
np.testing.assert_array_equal(
free_cash,
np.array([
[73.4975, 74.0025, 73.4975],
[52.0955, 53.00449999999999, 72.1015],
[65.797, 81.25299999999999, 80.0985],
[74.598, 114.60199999999998, 78.9005],
[68.5985, 108.50149999999998, 87.49949999999998]
])
)
np.testing.assert_almost_equal(
free_cash,
pf.cash(free=True).values
)
debt = np.empty(price_wide.shape, dtype=np.float_)
free_cash = np.empty((price_wide.shape[0], 2), dtype=np.float_)
pf = vbt.Portfolio.from_order_func(
price_wide,
order_func, size,
post_order_func_nb=post_order_func,
post_order_args=(debt, free_cash,),
row_wise=test_row_wise,
use_numba=False,
group_by=[0, 0, 1],
cash_sharing=True,
flexible=test_flexible
)
np.testing.assert_array_equal(
debt,
np.array([
[0.0, 4.95, 0.0],
[0.0, 14.850000000000001, 9.9],
[0.0, 7.425000000000001, 0.0],
[0.0, 0.0, 19.8],
[24.75, 0.0, 0.0]
])
)
np.testing.assert_array_equal(
free_cash,
np.array([
[87.9, 93.8995],
[65.70000000000002, 92.70150000000001],
[77.95000000000002, 80.8985],
[90.00000000000001, 79.5025],
[37.500000000000014, 67.0975]
])
)
np.testing.assert_almost_equal(
free_cash,
pf.cash(free=True).values
)
@pytest.mark.parametrize("test_row_wise", [False, True])
@pytest.mark.parametrize("test_flexible", [False, True])
def test_init_cash(self, test_row_wise, test_flexible):
order_func = flex_order_func_nb if test_flexible else order_func_nb
pf = vbt.Portfolio.from_order_func(
price_wide, order_func, np.asarray(10.), row_wise=test_row_wise,
init_cash=[1., 10., np.inf], flexible=test_flexible)
if test_row_wise:
record_arrays_close(
pf.order_records,
np.array([
(0, 0, 0, 1.0, 1.0, 0.0, 0), (1, 1, 0, 10.0, 1.0, 0.0, 0),
(2, 2, 0, 10.0, 1.0, 0.0, 0), (3, 0, 1, 10.0, 2.0, 0.0, 1),
(4, 1, 1, 10.0, 2.0, 0.0, 1), (5, 2, 1, 10.0, 2.0, 0.0, 1),
(6, 0, 2, 6.666666666666667, 3.0, 0.0, 0), (7, 1, 2, 6.666666666666667, 3.0, 0.0, 0),
(8, 2, 2, 10.0, 3.0, 0.0, 0), (9, 0, 3, 10.0, 4.0, 0.0, 1),
(10, 1, 3, 10.0, 4.0, 0.0, 1), (11, 2, 3, 10.0, 4.0, 0.0, 1),
(12, 0, 4, 8.0, 5.0, 0.0, 0), (13, 1, 4, 8.0, 5.0, 0.0, 0),
(14, 2, 4, 10.0, 5.0, 0.0, 0)
], dtype=order_dt)
)
else:
record_arrays_close(
pf.order_records,
np.array([
(0, 0, 0, 1.0, 1.0, 0.0, 0), (1, 0, 1, 10.0, 2.0, 0.0, 1),
(2, 0, 2, 6.666666666666667, 3.0, 0.0, 0), (3, 0, 3, 10.0, 4.0, 0.0, 1),
(4, 0, 4, 8.0, 5.0, 0.0, 0), (5, 1, 0, 10.0, 1.0, 0.0, 0),
(6, 1, 1, 10.0, 2.0, 0.0, 1), (7, 1, 2, 6.666666666666667, 3.0, 0.0, 0),
(8, 1, 3, 10.0, 4.0, 0.0, 1), (9, 1, 4, 8.0, 5.0, 0.0, 0),
(10, 2, 0, 10.0, 1.0, 0.0, 0), (11, 2, 1, 10.0, 2.0, 0.0, 1),
(12, 2, 2, 10.0, 3.0, 0.0, 0), (13, 2, 3, 10.0, 4.0, 0.0, 1),
(14, 2, 4, 10.0, 5.0, 0.0, 0)
], dtype=order_dt)
)
assert type(pf._init_cash) == np.ndarray
base_pf = vbt.Portfolio.from_order_func(
price_wide, order_func, np.asarray(10.), row_wise=test_row_wise,
init_cash=np.inf, flexible=test_flexible)
pf = vbt.Portfolio.from_order_func(
price_wide, order_func, np.asarray(10.), row_wise=test_row_wise,
init_cash=InitCashMode.Auto, flexible=test_flexible)
record_arrays_close(
pf.order_records,
base_pf.orders.values
)
assert pf._init_cash == InitCashMode.Auto
pf = vbt.Portfolio.from_order_func(
price_wide, order_func, np.asarray(10.), row_wise=test_row_wise,
init_cash=InitCashMode.AutoAlign, flexible=test_flexible)
record_arrays_close(
pf.order_records,
base_pf.orders.values
)
assert pf._init_cash == InitCashMode.AutoAlign
def test_func_calls(self):
@njit
def pre_sim_func_nb(c, call_i, pre_sim_lst, sub_arg):
if sub_arg != 15:
raise ValueError
call_i[0] += 1
pre_sim_lst.append(call_i[0])
return (call_i,)
@njit
def post_sim_func_nb(c, call_i, post_sim_lst, sub_arg):
if sub_arg != 15:
raise ValueError
call_i[0] += 1
post_sim_lst.append(call_i[0])
return (call_i,)
@njit
def pre_group_func_nb(c, call_i, pre_group_lst, sub_arg):
if sub_arg != 15:
raise ValueError
call_i[0] += 1
pre_group_lst.append(call_i[0])
return (call_i,)
@njit
def post_group_func_nb(c, call_i, post_group_lst, sub_arg):
if sub_arg != 15:
raise ValueError
call_i[0] += 1
post_group_lst.append(call_i[0])
return (call_i,)
@njit
def pre_segment_func_nb(c, call_i, pre_segment_lst, sub_arg):
if sub_arg != 15:
raise ValueError
call_i[0] += 1
pre_segment_lst.append(call_i[0])
return (call_i,)
@njit
def post_segment_func_nb(c, call_i, post_segment_lst, sub_arg):
if sub_arg != 15:
raise ValueError
call_i[0] += 1
post_segment_lst.append(call_i[0])
return (call_i,)
@njit
def order_func_nb(c, call_i, order_lst, sub_arg):
if sub_arg != 15:
raise ValueError
call_i[0] += 1
order_lst.append(call_i[0])
return NoOrder
@njit
def post_order_func_nb(c, call_i, post_order_lst, sub_arg):
if sub_arg != 15:
raise ValueError
call_i[0] += 1
post_order_lst.append(call_i[0])
sub_arg = vbt.RepEval('np.prod([target_shape[0], target_shape[1]])')
call_i = np.array([0])
pre_sim_lst = List.empty_list(typeof(0))
post_sim_lst = List.empty_list(typeof(0))
pre_group_lst = List.empty_list(typeof(0))
post_group_lst = List.empty_list(typeof(0))
pre_segment_lst = List.empty_list(typeof(0))
post_segment_lst = List.empty_list(typeof(0))
order_lst = List.empty_list(typeof(0))
post_order_lst = List.empty_list(typeof(0))
_ = vbt.Portfolio.from_order_func(
price_wide, order_func_nb, order_lst, sub_arg,
group_by=np.array([0, 0, 1]),
pre_sim_func_nb=pre_sim_func_nb, pre_sim_args=(call_i, pre_sim_lst, sub_arg),
post_sim_func_nb=post_sim_func_nb, post_sim_args=(call_i, post_sim_lst, sub_arg),
pre_group_func_nb=pre_group_func_nb, pre_group_args=(pre_group_lst, sub_arg),
post_group_func_nb=post_group_func_nb, post_group_args=(post_group_lst, sub_arg),
pre_segment_func_nb=pre_segment_func_nb, pre_segment_args=(pre_segment_lst, sub_arg),
post_segment_func_nb=post_segment_func_nb, post_segment_args=(post_segment_lst, sub_arg),
post_order_func_nb=post_order_func_nb, post_order_args=(post_order_lst, sub_arg),
row_wise=False, template_mapping=dict(np=np)
)
assert call_i[0] == 56
assert list(pre_sim_lst) == [1]
assert list(post_sim_lst) == [56]
assert list(pre_group_lst) == [2, 34]
assert list(post_group_lst) == [33, 55]
assert list(pre_segment_lst) == [3, 9, 15, 21, 27, 35, 39, 43, 47, 51]
assert list(post_segment_lst) == [8, 14, 20, 26, 32, 38, 42, 46, 50, 54]
assert list(order_lst) == [4, 6, 10, 12, 16, 18, 22, 24, 28, 30, 36, 40, 44, 48, 52]
assert list(post_order_lst) == [5, 7, 11, 13, 17, 19, 23, 25, 29, 31, 37, 41, 45, 49, 53]
segment_mask = np.array([
[False, False],
[False, True],
[True, False],
[True, True],
[False, False],
])
call_i = np.array([0])
pre_sim_lst = List.empty_list(typeof(0))
post_sim_lst = List.empty_list(typeof(0))
pre_group_lst = List.empty_list(typeof(0))
post_group_lst = List.empty_list(typeof(0))
pre_segment_lst = List.empty_list(typeof(0))
post_segment_lst = List.empty_list(typeof(0))
order_lst = List.empty_list(typeof(0))
post_order_lst = List.empty_list(typeof(0))
_ = vbt.Portfolio.from_order_func(
price_wide, order_func_nb, order_lst, sub_arg,
group_by=np.array([0, 0, 1]),
pre_sim_func_nb=pre_sim_func_nb, pre_sim_args=(call_i, pre_sim_lst, sub_arg),
post_sim_func_nb=post_sim_func_nb, post_sim_args=(call_i, post_sim_lst, sub_arg),
pre_group_func_nb=pre_group_func_nb, pre_group_args=(pre_group_lst, sub_arg),
post_group_func_nb=post_group_func_nb, post_group_args=(post_group_lst, sub_arg),
pre_segment_func_nb=pre_segment_func_nb, pre_segment_args=(pre_segment_lst, sub_arg),
post_segment_func_nb=post_segment_func_nb, post_segment_args=(post_segment_lst, sub_arg),
post_order_func_nb=post_order_func_nb, post_order_args=(post_order_lst, sub_arg),
segment_mask=segment_mask, call_pre_segment=True, call_post_segment=True,
row_wise=False, template_mapping=dict(np=np)
)
assert call_i[0] == 38
assert list(pre_sim_lst) == [1]
assert list(post_sim_lst) == [38]
assert list(pre_group_lst) == [2, 22]
assert list(post_group_lst) == [21, 37]
assert list(pre_segment_lst) == [3, 5, 7, 13, 19, 23, 25, 29, 31, 35]
assert list(post_segment_lst) == [4, 6, 12, 18, 20, 24, 28, 30, 34, 36]
assert list(order_lst) == [8, 10, 14, 16, 26, 32]
assert list(post_order_lst) == [9, 11, 15, 17, 27, 33]
call_i = np.array([0])
pre_sim_lst = List.empty_list(typeof(0))
post_sim_lst = List.empty_list(typeof(0))
pre_group_lst = List.empty_list(typeof(0))
post_group_lst = List.empty_list(typeof(0))
pre_segment_lst = List.empty_list(typeof(0))
post_segment_lst = List.empty_list(typeof(0))
order_lst = List.empty_list(typeof(0))
post_order_lst = List.empty_list(typeof(0))
_ = vbt.Portfolio.from_order_func(
price_wide, order_func_nb, order_lst, sub_arg,
group_by=np.array([0, 0, 1]),
pre_sim_func_nb=pre_sim_func_nb, pre_sim_args=(call_i, pre_sim_lst, sub_arg),
post_sim_func_nb=post_sim_func_nb, post_sim_args=(call_i, post_sim_lst, sub_arg),
pre_group_func_nb=pre_group_func_nb, pre_group_args=(pre_group_lst, sub_arg),
post_group_func_nb=post_group_func_nb, post_group_args=(post_group_lst, sub_arg),
pre_segment_func_nb=pre_segment_func_nb, pre_segment_args=(pre_segment_lst, sub_arg),
post_segment_func_nb=post_segment_func_nb, post_segment_args=(post_segment_lst, sub_arg),
post_order_func_nb=post_order_func_nb, post_order_args=(post_order_lst, sub_arg),
segment_mask=segment_mask, call_pre_segment=False, call_post_segment=False,
row_wise=False, template_mapping=dict(np=np)
)
assert call_i[0] == 26
assert list(pre_sim_lst) == [1]
assert list(post_sim_lst) == [26]
assert list(pre_group_lst) == [2, 16]
assert list(post_group_lst) == [15, 25]
assert list(pre_segment_lst) == [3, 9, 17, 21]
assert list(post_segment_lst) == [8, 14, 20, 24]
assert list(order_lst) == [4, 6, 10, 12, 18, 22]
assert list(post_order_lst) == [5, 7, 11, 13, 19, 23]
def test_func_calls_flexible(self):
@njit
def pre_sim_func_nb(c, call_i, pre_sim_lst, sub_arg):
if sub_arg != 15:
raise ValueError
call_i[0] += 1
pre_sim_lst.append(call_i[0])
return (call_i,)
@njit
def post_sim_func_nb(c, call_i, post_sim_lst, sub_arg):
if sub_arg != 15:
raise ValueError
call_i[0] += 1
post_sim_lst.append(call_i[0])
return (call_i,)
@njit
def pre_group_func_nb(c, call_i, pre_group_lst, sub_arg):
if sub_arg != 15:
raise ValueError
call_i[0] += 1
pre_group_lst.append(call_i[0])
return (call_i,)
@njit
def post_group_func_nb(c, call_i, post_group_lst, sub_arg):
if sub_arg != 15:
raise ValueError
call_i[0] += 1
post_group_lst.append(call_i[0])
return (call_i,)
@njit
def pre_segment_func_nb(c, call_i, pre_segment_lst, sub_arg):
if sub_arg != 15:
raise ValueError
call_i[0] += 1
pre_segment_lst.append(call_i[0])
return (call_i,)
@njit
def post_segment_func_nb(c, call_i, post_segment_lst, sub_arg):
if sub_arg != 15:
raise ValueError
call_i[0] += 1
post_segment_lst.append(call_i[0])
return (call_i,)
@njit
def flex_order_func_nb(c, call_i, order_lst, sub_arg):
if sub_arg != 15:
raise ValueError
call_i[0] += 1
order_lst.append(call_i[0])
col = c.from_col + c.call_idx
if c.call_idx < c.group_len:
return col, NoOrder
return -1, NoOrder
@njit
def post_order_func_nb(c, call_i, post_order_lst, sub_arg):
if sub_arg != 15:
raise ValueError
call_i[0] += 1
post_order_lst.append(call_i[0])
sub_arg = vbt.RepEval('np.prod([target_shape[0], target_shape[1]])')
call_i = np.array([0])
pre_sim_lst = List.empty_list(typeof(0))
post_sim_lst = List.empty_list(typeof(0))
pre_group_lst = List.empty_list(typeof(0))
post_group_lst = List.empty_list(typeof(0))
pre_segment_lst = List.empty_list(typeof(0))
post_segment_lst = List.empty_list(typeof(0))
order_lst = List.empty_list(typeof(0))
post_order_lst = List.empty_list(typeof(0))
_ = vbt.Portfolio.from_order_func(
price_wide, flex_order_func_nb, order_lst, sub_arg,
group_by=np.array([0, 0, 1]),
pre_sim_func_nb=pre_sim_func_nb, pre_sim_args=(call_i, pre_sim_lst, sub_arg),
post_sim_func_nb=post_sim_func_nb, post_sim_args=(call_i, post_sim_lst, sub_arg),
pre_group_func_nb=pre_group_func_nb, pre_group_args=(pre_group_lst, sub_arg),
post_group_func_nb=post_group_func_nb, post_group_args=(post_group_lst, sub_arg),
pre_segment_func_nb=pre_segment_func_nb, pre_segment_args=(pre_segment_lst, sub_arg),
post_segment_func_nb=post_segment_func_nb, post_segment_args=(post_segment_lst, sub_arg),
post_order_func_nb=post_order_func_nb, post_order_args=(post_order_lst, sub_arg),
row_wise=False, flexible=True, template_mapping=dict(np=np)
)
assert call_i[0] == 66
assert list(pre_sim_lst) == [1]
assert list(post_sim_lst) == [66]
assert list(pre_group_lst) == [2, 39]
assert list(post_group_lst) == [38, 65]
assert list(pre_segment_lst) == [3, 10, 17, 24, 31, 40, 45, 50, 55, 60]
assert list(post_segment_lst) == [9, 16, 23, 30, 37, 44, 49, 54, 59, 64]
assert list(order_lst) == [
4, 6, 8, 11, 13, 15, 18, 20, 22, 25, 27, 29, 32, 34,
36, 41, 43, 46, 48, 51, 53, 56, 58, 61, 63
]
assert list(post_order_lst) == [5, 7, 12, 14, 19, 21, 26, 28, 33, 35, 42, 47, 52, 57, 62]
segment_mask = np.array([
[False, False],
[False, True],
[True, False],
[True, True],
[False, False],
])
call_i = np.array([0])
pre_sim_lst = List.empty_list(typeof(0))
post_sim_lst = List.empty_list(typeof(0))
pre_group_lst = List.empty_list(typeof(0))
post_group_lst = List.empty_list(typeof(0))
pre_segment_lst = List.empty_list(typeof(0))
post_segment_lst = List.empty_list(typeof(0))
order_lst = List.empty_list(typeof(0))
post_order_lst = List.empty_list(typeof(0))
_ = vbt.Portfolio.from_order_func(
price_wide, flex_order_func_nb, order_lst, sub_arg,
group_by=np.array([0, 0, 1]),
pre_sim_func_nb=pre_sim_func_nb, pre_sim_args=(call_i, pre_sim_lst, sub_arg),
post_sim_func_nb=post_sim_func_nb, post_sim_args=(call_i, post_sim_lst, sub_arg),
pre_group_func_nb=pre_group_func_nb, pre_group_args=(pre_group_lst, sub_arg),
post_group_func_nb=post_group_func_nb, post_group_args=(post_group_lst, sub_arg),
pre_segment_func_nb=pre_segment_func_nb, pre_segment_args=(pre_segment_lst, sub_arg),
post_segment_func_nb=post_segment_func_nb, post_segment_args=(post_segment_lst, sub_arg),
post_order_func_nb=post_order_func_nb, post_order_args=(post_order_lst, sub_arg),
segment_mask=segment_mask, call_pre_segment=True, call_post_segment=True,
row_wise=False, flexible=True, template_mapping=dict(np=np)
)
assert call_i[0] == 42
assert list(pre_sim_lst) == [1]
assert list(post_sim_lst) == [42]
assert list(pre_group_lst) == [2, 24]
assert list(post_group_lst) == [23, 41]
assert list(pre_segment_lst) == [3, 5, 7, 14, 21, 25, 27, 32, 34, 39]
assert list(post_segment_lst) == [4, 6, 13, 20, 22, 26, 31, 33, 38, 40]
assert list(order_lst) == [8, 10, 12, 15, 17, 19, 28, 30, 35, 37]
assert list(post_order_lst) == [9, 11, 16, 18, 29, 36]
call_i = np.array([0])
pre_sim_lst = List.empty_list(typeof(0))
post_sim_lst = List.empty_list(typeof(0))
pre_group_lst = List.empty_list(typeof(0))
post_group_lst = List.empty_list(typeof(0))
pre_segment_lst = List.empty_list(typeof(0))
post_segment_lst = List.empty_list(typeof(0))
order_lst = List.empty_list(typeof(0))
post_order_lst = List.empty_list(typeof(0))
_ = vbt.Portfolio.from_order_func(
price_wide, flex_order_func_nb, order_lst, sub_arg,
group_by=np.array([0, 0, 1]),
pre_sim_func_nb=pre_sim_func_nb, pre_sim_args=(call_i, pre_sim_lst, sub_arg),
post_sim_func_nb=post_sim_func_nb, post_sim_args=(call_i, post_sim_lst, sub_arg),
pre_group_func_nb=pre_group_func_nb, pre_group_args=(pre_group_lst, sub_arg),
post_group_func_nb=post_group_func_nb, post_group_args=(post_group_lst, sub_arg),
pre_segment_func_nb=pre_segment_func_nb, pre_segment_args=(pre_segment_lst, sub_arg),
post_segment_func_nb=post_segment_func_nb, post_segment_args=(post_segment_lst, sub_arg),
post_order_func_nb=post_order_func_nb, post_order_args=(post_order_lst, sub_arg),
segment_mask=segment_mask, call_pre_segment=False, call_post_segment=False,
row_wise=False, flexible=True, template_mapping=dict(np=np)
)
assert call_i[0] == 30
assert list(pre_sim_lst) == [1]
assert list(post_sim_lst) == [30]
assert list(pre_group_lst) == [2, 18]
assert list(post_group_lst) == [17, 29]
assert list(pre_segment_lst) == [3, 10, 19, 24]
assert list(post_segment_lst) == [9, 16, 23, 28]
assert list(order_lst) == [4, 6, 8, 11, 13, 15, 20, 22, 25, 27]
assert list(post_order_lst) == [5, 7, 12, 14, 21, 26]
def test_func_calls_row_wise(self):
@njit
def pre_sim_func_nb(c, call_i, pre_sim_lst):
call_i[0] += 1
pre_sim_lst.append(call_i[0])
return (call_i,)
@njit
def post_sim_func_nb(c, call_i, post_sim_lst):
call_i[0] += 1
post_sim_lst.append(call_i[0])
return (call_i,)
@njit
def pre_row_func_nb(c, call_i, pre_row_lst):
call_i[0] += 1
pre_row_lst.append(call_i[0])
return (call_i,)
@njit
def post_row_func_nb(c, call_i, post_row_lst):
call_i[0] += 1
post_row_lst.append(call_i[0])
return (call_i,)
@njit
def pre_segment_func_nb(c, call_i, pre_segment_lst):
call_i[0] += 1
pre_segment_lst.append(call_i[0])
return (call_i,)
@njit
def post_segment_func_nb(c, call_i, post_segment_lst):
call_i[0] += 1
post_segment_lst.append(call_i[0])
return (call_i,)
@njit
def order_func_nb(c, call_i, order_lst):
call_i[0] += 1
order_lst.append(call_i[0])
return NoOrder
@njit
def post_order_func_nb(c, call_i, post_order_lst):
call_i[0] += 1
post_order_lst.append(call_i[0])
sub_arg = vbt.RepEval('np.prod([target_shape[0], target_shape[1]])')
call_i = np.array([0])
pre_sim_lst = List.empty_list(typeof(0))
post_sim_lst = List.empty_list(typeof(0))
pre_row_lst = List.empty_list(typeof(0))
post_row_lst = List.empty_list(typeof(0))
pre_segment_lst = List.empty_list(typeof(0))
post_segment_lst = List.empty_list(typeof(0))
order_lst = List.empty_list(typeof(0))
post_order_lst = List.empty_list(typeof(0))
_ = vbt.Portfolio.from_order_func(
price_wide, order_func_nb, order_lst,
group_by=np.array([0, 0, 1]),
pre_sim_func_nb=pre_sim_func_nb, pre_sim_args=(call_i, pre_sim_lst),
post_sim_func_nb=post_sim_func_nb, post_sim_args=(call_i, post_sim_lst),
pre_row_func_nb=pre_row_func_nb, pre_row_args=(pre_row_lst,),
post_row_func_nb=post_row_func_nb, post_row_args=(post_row_lst,),
pre_segment_func_nb=pre_segment_func_nb, pre_segment_args=(pre_segment_lst,),
post_segment_func_nb=post_segment_func_nb, post_segment_args=(post_segment_lst,),
post_order_func_nb=post_order_func_nb, post_order_args=(post_order_lst,),
row_wise=True, template_mapping=dict(np=np)
)
assert call_i[0] == 62
assert list(pre_sim_lst) == [1]
assert list(post_sim_lst) == [62]
assert list(pre_row_lst) == [2, 14, 26, 38, 50]
assert list(post_row_lst) == [13, 25, 37, 49, 61]
assert list(pre_segment_lst) == [3, 9, 15, 21, 27, 33, 39, 45, 51, 57]
assert list(post_segment_lst) == [8, 12, 20, 24, 32, 36, 44, 48, 56, 60]
assert list(order_lst) == [4, 6, 10, 16, 18, 22, 28, 30, 34, 40, 42, 46, 52, 54, 58]
assert list(post_order_lst) == [5, 7, 11, 17, 19, 23, 29, 31, 35, 41, 43, 47, 53, 55, 59]
segment_mask = np.array([
[False, False],
[False, True],
[True, False],
[True, True],
[False, False],
])
call_i = np.array([0])
pre_sim_lst = List.empty_list(typeof(0))
post_sim_lst = List.empty_list(typeof(0))
pre_row_lst = List.empty_list(typeof(0))
post_row_lst = List.empty_list(typeof(0))
pre_segment_lst = List.empty_list(typeof(0))
post_segment_lst = List.empty_list(typeof(0))
order_lst = List.empty_list(typeof(0))
post_order_lst = List.empty_list(typeof(0))
_ = vbt.Portfolio.from_order_func(
price_wide, order_func_nb, order_lst,
group_by=np.array([0, 0, 1]),
pre_sim_func_nb=pre_sim_func_nb, pre_sim_args=(call_i, pre_sim_lst),
post_sim_func_nb=post_sim_func_nb, post_sim_args=(call_i, post_sim_lst),
pre_row_func_nb=pre_row_func_nb, pre_row_args=(pre_row_lst,),
post_row_func_nb=post_row_func_nb, post_row_args=(post_row_lst,),
pre_segment_func_nb=pre_segment_func_nb, pre_segment_args=(pre_segment_lst,),
post_segment_func_nb=post_segment_func_nb, post_segment_args=(post_segment_lst,),
post_order_func_nb=post_order_func_nb, post_order_args=(post_order_lst,),
segment_mask=segment_mask, call_pre_segment=True, call_post_segment=True,
row_wise=True, template_mapping=dict(np=np)
)
assert call_i[0] == 44
assert list(pre_sim_lst) == [1]
assert list(post_sim_lst) == [44]
assert list(pre_row_lst) == [2, 8, 16, 26, 38]
assert list(post_row_lst) == [7, 15, 25, 37, 43]
assert list(pre_segment_lst) == [3, 5, 9, 11, 17, 23, 27, 33, 39, 41]
assert list(post_segment_lst) == [4, 6, 10, 14, 22, 24, 32, 36, 40, 42]
assert list(order_lst) == [12, 18, 20, 28, 30, 34]
assert list(post_order_lst) == [13, 19, 21, 29, 31, 35]
call_i = np.array([0])
pre_sim_lst = List.empty_list(typeof(0))
post_sim_lst = List.empty_list(typeof(0))
pre_row_lst = List.empty_list(typeof(0))
post_row_lst = List.empty_list(typeof(0))
pre_segment_lst = List.empty_list(typeof(0))
post_segment_lst = List.empty_list(typeof(0))
order_lst = List.empty_list(typeof(0))
post_order_lst = List.empty_list(typeof(0))
_ = vbt.Portfolio.from_order_func(
price_wide, order_func_nb, order_lst,
group_by=np.array([0, 0, 1]),
pre_sim_func_nb=pre_sim_func_nb, pre_sim_args=(call_i, pre_sim_lst),
post_sim_func_nb=post_sim_func_nb, post_sim_args=(call_i, post_sim_lst),
pre_row_func_nb=pre_row_func_nb, pre_row_args=(pre_row_lst,),
post_row_func_nb=post_row_func_nb, post_row_args=(post_row_lst,),
pre_segment_func_nb=pre_segment_func_nb, pre_segment_args=(pre_segment_lst,),
post_segment_func_nb=post_segment_func_nb, post_segment_args=(post_segment_lst,),
post_order_func_nb=post_order_func_nb, post_order_args=(post_order_lst,),
segment_mask=segment_mask, call_pre_segment=False, call_post_segment=False,
row_wise=True, template_mapping=dict(np=np)
)
assert call_i[0] == 32
assert list(pre_sim_lst) == [1]
assert list(post_sim_lst) == [32]
assert list(pre_row_lst) == [2, 4, 10, 18, 30]
assert list(post_row_lst) == [3, 9, 17, 29, 31]
assert list(pre_segment_lst) == [5, 11, 19, 25]
assert list(post_segment_lst) == [8, 16, 24, 28]
assert list(order_lst) == [6, 12, 14, 20, 22, 26]
assert list(post_order_lst) == [7, 13, 15, 21, 23, 27]
def test_func_calls_row_wise_flexible(self):
@njit
def pre_sim_func_nb(c, call_i, pre_sim_lst, sub_arg):
if sub_arg != 15:
raise ValueError
call_i[0] += 1
pre_sim_lst.append(call_i[0])
return (call_i,)
@njit
def post_sim_func_nb(c, call_i, post_sim_lst, sub_arg):
if sub_arg != 15:
raise ValueError
call_i[0] += 1
post_sim_lst.append(call_i[0])
return (call_i,)
@njit
def pre_row_func_nb(c, call_i, pre_row_lst, sub_arg):
if sub_arg != 15:
raise ValueError
call_i[0] += 1
pre_row_lst.append(call_i[0])
return (call_i,)
@njit
def post_row_func_nb(c, call_i, post_row_lst, sub_arg):
if sub_arg != 15:
raise ValueError
call_i[0] += 1
post_row_lst.append(call_i[0])
return (call_i,)
@njit
def pre_segment_func_nb(c, call_i, pre_segment_lst, sub_arg):
if sub_arg != 15:
raise ValueError
call_i[0] += 1
pre_segment_lst.append(call_i[0])
return (call_i,)
@njit
def post_segment_func_nb(c, call_i, post_segment_lst, sub_arg):
if sub_arg != 15:
raise ValueError
call_i[0] += 1
post_segment_lst.append(call_i[0])
return (call_i,)
@njit
def flex_order_func_nb(c, call_i, order_lst, sub_arg):
if sub_arg != 15:
raise ValueError
call_i[0] += 1
order_lst.append(call_i[0])
col = c.from_col + c.call_idx
if c.call_idx < c.group_len:
return col, NoOrder
return -1, NoOrder
@njit
def post_order_func_nb(c, call_i, post_order_lst, sub_arg):
if sub_arg != 15:
raise ValueError
call_i[0] += 1
post_order_lst.append(call_i[0])
sub_arg = vbt.RepEval('np.prod([target_shape[0], target_shape[1]])')
call_i = np.array([0])
pre_sim_lst = List.empty_list(typeof(0))
post_sim_lst = List.empty_list(typeof(0))
pre_row_lst = List.empty_list(typeof(0))
post_row_lst = List.empty_list(typeof(0))
pre_segment_lst = List.empty_list(typeof(0))
post_segment_lst = List.empty_list(typeof(0))
order_lst = List.empty_list(typeof(0))
post_order_lst = List.empty_list(typeof(0))
_ = vbt.Portfolio.from_order_func(
price_wide, flex_order_func_nb, order_lst, sub_arg,
group_by=np.array([0, 0, 1]),
pre_sim_func_nb=pre_sim_func_nb, pre_sim_args=(call_i, pre_sim_lst, sub_arg),
post_sim_func_nb=post_sim_func_nb, post_sim_args=(call_i, post_sim_lst, sub_arg),
pre_row_func_nb=pre_row_func_nb, pre_row_args=(pre_row_lst, sub_arg),
post_row_func_nb=post_row_func_nb, post_row_args=(post_row_lst, sub_arg),
pre_segment_func_nb=pre_segment_func_nb, pre_segment_args=(pre_segment_lst, sub_arg),
post_segment_func_nb=post_segment_func_nb, post_segment_args=(post_segment_lst, sub_arg),
post_order_func_nb=post_order_func_nb, post_order_args=(post_order_lst, sub_arg),
row_wise=True, flexible=True, template_mapping=dict(np=np)
)
assert call_i[0] == 72
assert list(pre_sim_lst) == [1]
assert list(post_sim_lst) == [72]
assert list(pre_row_lst) == [2, 16, 30, 44, 58]
assert list(post_row_lst) == [15, 29, 43, 57, 71]
assert list(pre_segment_lst) == [3, 10, 17, 24, 31, 38, 45, 52, 59, 66]
assert list(post_segment_lst) == [9, 14, 23, 28, 37, 42, 51, 56, 65, 70]
assert list(order_lst) == [
4, 6, 8, 11, 13, 18, 20, 22, 25, 27, 32, 34, 36,
39, 41, 46, 48, 50, 53, 55, 60, 62, 64, 67, 69
]
assert list(post_order_lst) == [5, 7, 12, 19, 21, 26, 33, 35, 40, 47, 49, 54, 61, 63, 68]
segment_mask = np.array([
[False, False],
[False, True],
[True, False],
[True, True],
[False, False],
])
call_i = np.array([0])
pre_sim_lst = List.empty_list(typeof(0))
post_sim_lst = List.empty_list(typeof(0))
pre_row_lst = List.empty_list(typeof(0))
post_row_lst = List.empty_list(typeof(0))
pre_segment_lst = List.empty_list(typeof(0))
post_segment_lst = List.empty_list(typeof(0))
order_lst = List.empty_list(typeof(0))
post_order_lst = List.empty_list(typeof(0))
_ = vbt.Portfolio.from_order_func(
price_wide, flex_order_func_nb, order_lst, sub_arg,
group_by=np.array([0, 0, 1]),
pre_sim_func_nb=pre_sim_func_nb, pre_sim_args=(call_i, pre_sim_lst, sub_arg),
post_sim_func_nb=post_sim_func_nb, post_sim_args=(call_i, post_sim_lst, sub_arg),
pre_row_func_nb=pre_row_func_nb, pre_row_args=(pre_row_lst, sub_arg),
post_row_func_nb=post_row_func_nb, post_row_args=(post_row_lst, sub_arg),
pre_segment_func_nb=pre_segment_func_nb, pre_segment_args=(pre_segment_lst, sub_arg),
post_segment_func_nb=post_segment_func_nb, post_segment_args=(post_segment_lst, sub_arg),
post_order_func_nb=post_order_func_nb, post_order_args=(post_order_lst, sub_arg),
segment_mask=segment_mask, call_pre_segment=True, call_post_segment=True,
row_wise=True, flexible=True, template_mapping=dict(np=np)
)
assert call_i[0] == 48
assert list(pre_sim_lst) == [1]
assert list(post_sim_lst) == [48]
assert list(pre_row_lst) == [2, 8, 17, 28, 42]
assert list(post_row_lst) == [7, 16, 27, 41, 47]
assert list(pre_segment_lst) == [3, 5, 9, 11, 18, 25, 29, 36, 43, 45]
assert list(post_segment_lst) == [4, 6, 10, 15, 24, 26, 35, 40, 44, 46]
assert list(order_lst) == [12, 14, 19, 21, 23, 30, 32, 34, 37, 39]
assert list(post_order_lst) == [13, 20, 22, 31, 33, 38]
call_i = np.array([0])
pre_sim_lst = List.empty_list(typeof(0))
post_sim_lst = List.empty_list(typeof(0))
pre_row_lst = List.empty_list(typeof(0))
post_row_lst = List.empty_list(typeof(0))
pre_segment_lst = List.empty_list(typeof(0))
post_segment_lst = List.empty_list(typeof(0))
order_lst = List.empty_list(typeof(0))
post_order_lst = List.empty_list(typeof(0))
_ = vbt.Portfolio.from_order_func(
price_wide, flex_order_func_nb, order_lst, sub_arg,
group_by=np.array([0, 0, 1]),
pre_sim_func_nb=pre_sim_func_nb, pre_sim_args=(call_i, pre_sim_lst, sub_arg),
post_sim_func_nb=post_sim_func_nb, post_sim_args=(call_i, post_sim_lst, sub_arg),
pre_row_func_nb=pre_row_func_nb, pre_row_args=(pre_row_lst, sub_arg),
post_row_func_nb=post_row_func_nb, post_row_args=(post_row_lst, sub_arg),
pre_segment_func_nb=pre_segment_func_nb, pre_segment_args=(pre_segment_lst, sub_arg),
post_segment_func_nb=post_segment_func_nb, post_segment_args=(post_segment_lst, sub_arg),
post_order_func_nb=post_order_func_nb, post_order_args=(post_order_lst, sub_arg),
segment_mask=segment_mask, call_pre_segment=False, call_post_segment=False,
row_wise=True, flexible=True, template_mapping=dict(np=np)
)
assert call_i[0] == 36
assert list(pre_sim_lst) == [1]
assert list(post_sim_lst) == [36]
assert list(pre_row_lst) == [2, 4, 11, 20, 34]
assert list(post_row_lst) == [3, 10, 19, 33, 35]
assert list(pre_segment_lst) == [5, 12, 21, 28]
assert list(post_segment_lst) == [9, 18, 27, 32]
assert list(order_lst) == [6, 8, 13, 15, 17, 22, 24, 26, 29, 31]
assert list(post_order_lst) == [7, 14, 16, 23, 25, 30]
@pytest.mark.parametrize("test_row_wise", [False, True])
@pytest.mark.parametrize("test_flexible", [False, True])
def test_max_orders(self, test_row_wise, test_flexible):
order_func = flex_order_func_nb if test_flexible else order_func_nb
_ = vbt.Portfolio.from_order_func(
price_wide, order_func, np.asarray(np.inf),
row_wise=test_row_wise, flexible=test_flexible)
_ = vbt.Portfolio.from_order_func(
price_wide, order_func, np.asarray(np.inf),
row_wise=test_row_wise, max_orders=15, flexible=test_flexible)
with pytest.raises(Exception):
_ = vbt.Portfolio.from_order_func(
price_wide, order_func, np.asarray(np.inf),
row_wise=test_row_wise, max_orders=14, flexible=test_flexible)
@pytest.mark.parametrize("test_row_wise", [False, True])
@pytest.mark.parametrize("test_flexible", [False, True])
def test_max_logs(self, test_row_wise, test_flexible):
log_order_func = log_flex_order_func_nb if test_flexible else log_order_func_nb
_ = vbt.Portfolio.from_order_func(
price_wide, log_order_func, np.asarray(np.inf),
row_wise=test_row_wise, flexible=test_flexible)
_ = vbt.Portfolio.from_order_func(
price_wide, log_order_func, np.asarray(np.inf),
row_wise=test_row_wise, max_logs=15, flexible=test_flexible)
with pytest.raises(Exception):
_ = vbt.Portfolio.from_order_func(
price_wide, log_order_func, np.asarray(np.inf),
row_wise=test_row_wise, max_logs=14, flexible=test_flexible)
# ############# Portfolio ############# #
price_na = pd.DataFrame({
'a': [np.nan, 2., 3., 4., 5.],
'b': [1., 2., np.nan, 4., 5.],
'c': [1., 2., 3., 4., np.nan]
}, index=price.index)
order_size_new = pd.Series([1., 0.1, -1., -0.1, 1.])
directions = ['longonly', 'shortonly', 'both']
group_by = pd.Index(['first', 'first', 'second'], name='group')
pf = vbt.Portfolio.from_orders(
price_na, order_size_new, size_type='amount', direction=directions,
fees=0.01, fixed_fees=0.1, slippage=0.01, log=True,
call_seq='reversed', group_by=None,
init_cash=[100., 100., 100.], freq='1D', attach_call_seq=True
) # independent
pf_grouped = vbt.Portfolio.from_orders(
price_na, order_size_new, size_type='amount', direction=directions,
fees=0.01, fixed_fees=0.1, slippage=0.01, log=True,
call_seq='reversed', group_by=group_by, cash_sharing=False,
init_cash=[100., 100., 100.], freq='1D', attach_call_seq=True
) # grouped
pf_shared = vbt.Portfolio.from_orders(
price_na, order_size_new, size_type='amount', direction=directions,
fees=0.01, fixed_fees=0.1, slippage=0.01, log=True,
call_seq='reversed', group_by=group_by, cash_sharing=True,
init_cash=[200., 100.], freq='1D', attach_call_seq=True
) # shared
class TestPortfolio:
def test_config(self, tmp_path):
pf2 = pf.copy()
pf2._metrics = pf2._metrics.copy()
pf2.metrics['hello'] = 'world'
pf2._subplots = pf2.subplots.copy()
pf2.subplots['hello'] = 'world'
assert vbt.Portfolio.loads(pf2['a'].dumps()) == pf2['a']
assert vbt.Portfolio.loads(pf2.dumps()) == pf2
pf2.save(tmp_path / 'pf')
assert vbt.Portfolio.load(tmp_path / 'pf') == pf2
def test_wrapper(self):
pd.testing.assert_index_equal(
pf.wrapper.index,
price_na.index
)
pd.testing.assert_index_equal(
pf.wrapper.columns,
price_na.columns
)
assert pf.wrapper.ndim == 2
assert pf.wrapper.grouper.group_by is None
assert pf.wrapper.grouper.allow_enable
assert pf.wrapper.grouper.allow_disable
assert pf.wrapper.grouper.allow_modify
pd.testing.assert_index_equal(
pf_grouped.wrapper.index,
price_na.index
)
pd.testing.assert_index_equal(
pf_grouped.wrapper.columns,
price_na.columns
)
assert pf_grouped.wrapper.ndim == 2
pd.testing.assert_index_equal(
pf_grouped.wrapper.grouper.group_by,
group_by
)
assert pf_grouped.wrapper.grouper.allow_enable
assert pf_grouped.wrapper.grouper.allow_disable
assert pf_grouped.wrapper.grouper.allow_modify
pd.testing.assert_index_equal(
pf_shared.wrapper.index,
price_na.index
)
pd.testing.assert_index_equal(
pf_shared.wrapper.columns,
price_na.columns
)
assert pf_shared.wrapper.ndim == 2
pd.testing.assert_index_equal(
pf_shared.wrapper.grouper.group_by,
group_by
)
assert not pf_shared.wrapper.grouper.allow_enable
assert pf_shared.wrapper.grouper.allow_disable
assert not pf_shared.wrapper.grouper.allow_modify
def test_indexing(self):
assert pf['a'].wrapper == pf.wrapper['a']
assert pf['a'].orders == pf.orders['a']
assert pf['a'].logs == pf.logs['a']
assert pf['a'].init_cash == pf.init_cash['a']
pd.testing.assert_series_equal(pf['a'].call_seq, pf.call_seq['a'])
assert pf['c'].wrapper == pf.wrapper['c']
assert pf['c'].orders == pf.orders['c']
assert pf['c'].logs == pf.logs['c']
assert pf['c'].init_cash == pf.init_cash['c']
pd.testing.assert_series_equal(pf['c'].call_seq, pf.call_seq['c'])
assert pf[['c']].wrapper == pf.wrapper[['c']]
assert pf[['c']].orders == pf.orders[['c']]
assert pf[['c']].logs == pf.logs[['c']]
pd.testing.assert_series_equal(pf[['c']].init_cash, pf.init_cash[['c']])
pd.testing.assert_frame_equal(pf[['c']].call_seq, pf.call_seq[['c']])
assert pf_grouped['first'].wrapper == pf_grouped.wrapper['first']
assert pf_grouped['first'].orders == pf_grouped.orders['first']
assert pf_grouped['first'].logs == pf_grouped.logs['first']
assert pf_grouped['first'].init_cash == pf_grouped.init_cash['first']
pd.testing.assert_frame_equal(pf_grouped['first'].call_seq, pf_grouped.call_seq[['a', 'b']])
assert pf_grouped[['first']].wrapper == pf_grouped.wrapper[['first']]
assert pf_grouped[['first']].orders == pf_grouped.orders[['first']]
assert pf_grouped[['first']].logs == pf_grouped.logs[['first']]
pd.testing.assert_series_equal(
pf_grouped[['first']].init_cash,
pf_grouped.init_cash[['first']])
pd.testing.assert_frame_equal(pf_grouped[['first']].call_seq, pf_grouped.call_seq[['a', 'b']])
assert pf_grouped['second'].wrapper == pf_grouped.wrapper['second']
assert pf_grouped['second'].orders == pf_grouped.orders['second']
assert pf_grouped['second'].logs == pf_grouped.logs['second']
assert pf_grouped['second'].init_cash == pf_grouped.init_cash['second']
pd.testing.assert_series_equal(pf_grouped['second'].call_seq, pf_grouped.call_seq['c'])
assert pf_grouped[['second']].orders == pf_grouped.orders[['second']]
assert pf_grouped[['second']].wrapper == pf_grouped.wrapper[['second']]
assert pf_grouped[['second']].orders == pf_grouped.orders[['second']]
assert pf_grouped[['second']].logs == pf_grouped.logs[['second']]
pd.testing.assert_series_equal(
pf_grouped[['second']].init_cash,
pf_grouped.init_cash[['second']])
pd.testing.assert_frame_equal(pf_grouped[['second']].call_seq, pf_grouped.call_seq[['c']])
assert pf_shared['first'].wrapper == pf_shared.wrapper['first']
assert pf_shared['first'].orders == pf_shared.orders['first']
assert pf_shared['first'].logs == pf_shared.logs['first']
assert pf_shared['first'].init_cash == pf_shared.init_cash['first']
pd.testing.assert_frame_equal(pf_shared['first'].call_seq, pf_shared.call_seq[['a', 'b']])
assert pf_shared[['first']].orders == pf_shared.orders[['first']]
assert pf_shared[['first']].wrapper == pf_shared.wrapper[['first']]
assert pf_shared[['first']].orders == pf_shared.orders[['first']]
assert pf_shared[['first']].logs == pf_shared.logs[['first']]
pd.testing.assert_series_equal(
pf_shared[['first']].init_cash,
pf_shared.init_cash[['first']])
pd.testing.assert_frame_equal(pf_shared[['first']].call_seq, pf_shared.call_seq[['a', 'b']])
assert pf_shared['second'].wrapper == pf_shared.wrapper['second']
assert pf_shared['second'].orders == pf_shared.orders['second']
assert pf_shared['second'].logs == pf_shared.logs['second']
assert pf_shared['second'].init_cash == pf_shared.init_cash['second']
pd.testing.assert_series_equal(pf_shared['second'].call_seq, pf_shared.call_seq['c'])
assert pf_shared[['second']].wrapper == pf_shared.wrapper[['second']]
assert pf_shared[['second']].orders == pf_shared.orders[['second']]
assert pf_shared[['second']].logs == pf_shared.logs[['second']]
pd.testing.assert_series_equal(
pf_shared[['second']].init_cash,
pf_shared.init_cash[['second']])
pd.testing.assert_frame_equal(pf_shared[['second']].call_seq, pf_shared.call_seq[['c']])
def test_regroup(self):
assert pf.regroup(None) == pf
assert pf.regroup(False) == pf
assert pf.regroup(group_by) != pf
pd.testing.assert_index_equal(pf.regroup(group_by).wrapper.grouper.group_by, group_by)
assert pf_grouped.regroup(None) == pf_grouped
assert pf_grouped.regroup(False) != pf_grouped
assert pf_grouped.regroup(False).wrapper.grouper.group_by is None
assert pf_grouped.regroup(group_by) == pf_grouped
assert pf_shared.regroup(None) == pf_shared
with pytest.raises(Exception):
_ = pf_shared.regroup(False)
assert pf_shared.regroup(group_by) == pf_shared
def test_cash_sharing(self):
assert not pf.cash_sharing
assert not pf_grouped.cash_sharing
assert pf_shared.cash_sharing
def test_call_seq(self):
pd.testing.assert_frame_equal(
pf.call_seq,
pd.DataFrame(
np.array([
[0, 0, 0],
[0, 0, 0],
[0, 0, 0],
[0, 0, 0],
[0, 0, 0]
]),
index=price_na.index,
columns=price_na.columns
)
)
pd.testing.assert_frame_equal(
pf_grouped.call_seq,
pd.DataFrame(
np.array([
[1, 0, 0],
[1, 0, 0],
[1, 0, 0],
[1, 0, 0],
[1, 0, 0]
]),
index=price_na.index,
columns=price_na.columns
)
)
pd.testing.assert_frame_equal(
pf_shared.call_seq,
pd.DataFrame(
np.array([
[1, 0, 0],
[1, 0, 0],
[1, 0, 0],
[1, 0, 0],
[1, 0, 0]
]),
index=price_na.index,
columns=price_na.columns
)
)
def test_orders(self):
record_arrays_close(
pf.orders.values,
np.array([
(0, 0, 1, 0.1, 2.02, 0.10202, 0), (1, 0, 2, 0.1, 2.9699999999999998, 0.10297, 1),
(2, 0, 4, 1.0, 5.05, 0.1505, 0), (3, 1, 0, 1.0, 0.99, 0.10990000000000001, 1),
(4, 1, 1, 0.1, 1.98, 0.10198, 1), (5, 1, 3, 0.1, 4.04, 0.10404000000000001, 0),
(6, 1, 4, 1.0, 4.95, 0.14950000000000002, 1), (7, 2, 0, 1.0, 1.01, 0.1101, 0),
(8, 2, 1, 0.1, 2.02, 0.10202, 0), (9, 2, 2, 1.0, 2.9699999999999998, 0.1297, 1),
(10, 2, 3, 0.1, 3.96, 0.10396000000000001, 1)
], dtype=order_dt)
)
result = pd.Series(
np.array([3, 4, 4]),
index=price_na.columns
).rename('count')
pd.testing.assert_series_equal(
pf.orders.count(),
result
)
pd.testing.assert_series_equal(
pf_grouped.get_orders(group_by=False).count(),
result
)
pd.testing.assert_series_equal(
pf_shared.get_orders(group_by=False).count(),
result
)
result = pd.Series(
np.array([7, 4]),
index=pd.Index(['first', 'second'], dtype='object', name='group')
).rename('count')
pd.testing.assert_series_equal(
pf.get_orders(group_by=group_by).count(),
result
)
pd.testing.assert_series_equal(
pf_grouped.orders.count(),
result
)
pd.testing.assert_series_equal(
pf_shared.orders.count(),
result
)
def test_logs(self):
record_arrays_close(
pf.logs.values,
np.array([
(0, 0, 0, 0, 100.0, 0.0, 0.0, 100.0, np.nan, 100.0, 1.0, np.nan, 0, 0, 0.01,
0.1, 0.01, 1e-08, np.inf, 0.0, False, True, False, True, 100.0, 0.0, 0.0,
100.0, np.nan, 100.0, np.nan, np.nan, np.nan, -1, 1, 1, -1),
(1, 0, 0, 1, 100.0, 0.0, 0.0, 100.0, 2.0, 100.0, 0.1, 2.0, 0, 0, 0.01,
0.1, 0.01, 1e-08, np.inf, 0.0, False, True, False, True, 99.69598, 0.1,
0.0, 99.69598, 2.0, 100.0, 0.1, 2.02, 0.10202, 0, 0, -1, 0),
(2, 0, 0, 2, 99.69598, 0.1, 0.0, 99.69598, 3.0, 99.99598, -1.0, 3.0,
0, 0, 0.01, 0.1, 0.01, 1e-08, np.inf, 0.0, False, True, False, True, 99.89001,
0.0, 0.0, 99.89001, 3.0, 99.99598, 0.1, 2.9699999999999998, 0.10297, 1, 0, -1, 1),
(3, 0, 0, 3, 99.89001, 0.0, 0.0, 99.89001, 4.0, 99.89001, -0.1, 4.0,
0, 0, 0.01, 0.1, 0.01, 1e-08, np.inf, 0.0, False, True, False, True,
99.89001, 0.0, 0.0, 99.89001, 4.0, 99.89001, np.nan, np.nan, np.nan, -1, 2, 8, -1),
(4, 0, 0, 4, 99.89001, 0.0, 0.0, 99.89001, 5.0, 99.89001, 1.0, 5.0, 0,
0, 0.01, 0.1, 0.01, 1e-08, np.inf, 0.0, False, True, False, True, 94.68951,
1.0, 0.0, 94.68951, 5.0, 99.89001, 1.0, 5.05, 0.1505, 0, 0, -1, 2),
(5, 1, 1, 0, 100.0, 0.0, 0.0, 100.0, 1.0, 100.0, 1.0, 1.0, 0, 1, 0.01,
0.1, 0.01, 1e-08, np.inf, 0.0, False, True, False, True, 100.8801, -1.0,
0.99, 98.9001, 1.0, 100.0, 1.0, 0.99, 0.10990000000000001, 1, 0, -1, 3),
(6, 1, 1, 1, 100.8801, -1.0, 0.99, 98.9001, 2.0, 98.8801, 0.1, 2.0, 0, 1,
0.01, 0.1, 0.01, 1e-08, np.inf, 0.0, False, True, False, True, 100.97612,
-1.1, 1.188, 98.60011999999999, 2.0, 98.8801, 0.1, 1.98, 0.10198, 1, 0, -1, 4),
(7, 1, 1, 2, 100.97612, -1.1, 1.188, 98.60011999999999, 2.0, 98.77611999999999,
-1.0, np.nan, 0, 1, 0.01, 0.1, 0.01, 1e-08, np.inf, 0.0, False, True, False, True, 100.97612,
-1.1, 1.188, 98.60011999999999, 2.0, 98.77611999999999, np.nan, np.nan, np.nan, -1, 1, 1, -1),
(8, 1, 1, 3, 100.97612, -1.1, 1.188, 98.60011999999999, 4.0, 96.57611999999999,
-0.1, 4.0, 0, 1, 0.01, 0.1, 0.01, 1e-08, np.inf, 0.0, False, True, False, True,
100.46808, -1.0, 1.08, 98.30807999999999, 4.0, 96.57611999999999, 0.1, 4.04,
0.10404000000000001, 0, 0, -1, 5),
(9, 1, 1, 4, 100.46808, -1.0, 1.08, 98.30807999999999, 5.0, 95.46808, 1.0, 5.0, 0, 1,
0.01, 0.1, 0.01, 1e-08, np.inf, 0.0, False, True, False, True, 105.26858, -2.0, 6.03,
93.20857999999998, 5.0, 95.46808, 1.0, 4.95, 0.14950000000000002, 1, 0, -1, 6),
(10, 2, 2, 0, 100.0, 0.0, 0.0, 100.0, 1.0, 100.0, 1.0, 1.0, 0, 2, 0.01, 0.1,
0.01, 1e-08, np.inf, 0.0, False, True, False, True, 98.8799, 1.0, 0.0, 98.8799,
1.0, 100.0, 1.0, 1.01, 0.1101, 0, 0, -1, 7),
(11, 2, 2, 1, 98.8799, 1.0, 0.0, 98.8799, 2.0, 100.8799, 0.1, 2.0, 0, 2, 0.01,
0.1, 0.01, 1e-08, np.inf, 0.0, False, True, False, True, 98.57588000000001, 1.1,
0.0, 98.57588000000001, 2.0, 100.8799, 0.1, 2.02, 0.10202, 0, 0, -1, 8),
(12, 2, 2, 2, 98.57588000000001, 1.1, 0.0, 98.57588000000001, 3.0, 101.87588000000001,
-1.0, 3.0, 0, 2, 0.01, 0.1, 0.01, 1e-08, np.inf, 0.0, False, True, False, True,
101.41618000000001, 0.10000000000000009, 0.0, 101.41618000000001, 3.0,
101.87588000000001, 1.0, 2.9699999999999998, 0.1297, 1, 0, -1, 9),
(13, 2, 2, 3, 101.41618000000001, 0.10000000000000009, 0.0, 101.41618000000001,
4.0, 101.81618000000002, -0.1, 4.0, 0, 2, 0.01, 0.1, 0.01, 1e-08, np.inf, 0.0,
False, True, False, True, 101.70822000000001, 0.0, 0.0, 101.70822000000001,
4.0, 101.81618000000002, 0.1, 3.96, 0.10396000000000001, 1, 0, -1, 10),
(14, 2, 2, 4, 101.70822000000001, 0.0, 0.0, 101.70822000000001, 4.0, 101.70822000000001,
1.0, np.nan, 0, 2, 0.01, 0.1, 0.01, 1e-08, np.inf, 0.0, False, True, False, True,
101.70822000000001, 0.0, 0.0, 101.70822000000001, 4.0, 101.70822000000001,
np.nan, np.nan, np.nan, -1, 1, 1, -1)
], dtype=log_dt)
)
result = pd.Series(
np.array([5, 5, 5]),
index=price_na.columns
).rename('count')
pd.testing.assert_series_equal(
pf.logs.count(),
result
)
pd.testing.assert_series_equal(
pf_grouped.get_logs(group_by=False).count(),
result
)
pd.testing.assert_series_equal(
pf_shared.get_logs(group_by=False).count(),
result
)
result = pd.Series(
np.array([10, 5]),
index=pd.Index(['first', 'second'], dtype='object', name='group')
).rename('count')
pd.testing.assert_series_equal(
pf.get_logs(group_by=group_by).count(),
result
)
pd.testing.assert_series_equal(
pf_grouped.logs.count(),
result
)
pd.testing.assert_series_equal(
pf_shared.logs.count(),
result
)
def test_entry_trades(self):
record_arrays_close(
pf.entry_trades.values,
np.array([
(0, 0, 0.1, 1, 2.02, 0.10202, 2, 2.9699999999999998, 0.10297,
-0.10999000000000003, -0.5445049504950497, 0, 1, 0),
(1, 0, 1.0, 4, 5.05, 0.1505, 4, 5.0, 0.0, -0.20049999999999982, -0.03970297029702967, 0, 0, 1),
(2, 1, 1.0, 0, 0.99, 0.10990000000000001, 4, 4.954285714285714,
0.049542857142857145, -4.12372857142857, -4.165382395382394, 1, 0, 2),
(3, 1, 0.1, 1, 1.98, 0.10198, 4, 4.954285714285714, 0.004954285714285714,
-0.4043628571428571, -2.0422366522366517, 1, 0, 2),
(4, 1, 1.0, 4, 4.95, 0.14950000000000002, 4, 4.954285714285714,
0.049542857142857145, -0.20332857142857072, -0.04107647907647893, 1, 0, 2),
(5, 2, 1.0, 0, 1.01, 0.1101, 3, 3.0599999999999996, 0.21241818181818184,
1.727481818181818, 1.71037803780378, 0, 1, 3),
(6, 2, 0.1, 1, 2.02, 0.10202, 3, 3.0599999999999996, 0.021241818181818185,
-0.019261818181818203, -0.09535553555355546, 0, 1, 3)
], dtype=trade_dt)
)
result = pd.Series(
np.array([2, 3, 2]),
index=price_na.columns
).rename('count')
pd.testing.assert_series_equal(
pf.entry_trades.count(),
result
)
pd.testing.assert_series_equal(
pf_grouped.get_entry_trades(group_by=False).count(),
result
)
pd.testing.assert_series_equal(
pf_shared.get_entry_trades(group_by=False).count(),
result
)
result = pd.Series(
np.array([5, 2]),
index=pd.Index(['first', 'second'], dtype='object', name='group')
).rename('count')
pd.testing.assert_series_equal(
pf.get_entry_trades(group_by=group_by).count(),
result
)
pd.testing.assert_series_equal(
pf_grouped.entry_trades.count(),
result
)
pd.testing.assert_series_equal(
pf_shared.entry_trades.count(),
result
)
def test_exit_trades(self):
record_arrays_close(
pf.exit_trades.values,
np.array([
(0, 0, 0.1, 1, 2.02, 0.10202, 2, 2.9699999999999998, 0.10297,
-0.10999000000000003, -0.5445049504950497, 0, 1, 0),
(1, 0, 1.0, 4, 5.05, 0.1505, 4, 5.0, 0.0,
-0.20049999999999982, -0.03970297029702967, 0, 0, 1),
(2, 1, 0.1, 0, 1.0799999999999998, 0.019261818181818182,
3, 4.04, 0.10404000000000001, -0.4193018181818182, -3.882424242424243, 1, 1, 2),
(3, 1, 2.0, 0, 3.015, 0.3421181818181819, 4, 5.0, 0.0,
-4.312118181818182, -0.7151108095884214, 1, 0, 2),
(4, 2, 1.0, 0, 1.1018181818181818, 0.19283636363636364, 2,
2.9699999999999998, 0.1297, 1.5456454545454543, 1.4028135313531351, 0, 1, 3),
(5, 2, 0.10000000000000009, 0, 1.1018181818181818, 0.019283636363636378,
3, 3.96, 0.10396000000000001, 0.1625745454545457, 1.4755115511551162, 0, 1, 3)
], dtype=trade_dt)
)
result = pd.Series(
np.array([2, 2, 2]),
index=price_na.columns
).rename('count')
pd.testing.assert_series_equal(
pf.exit_trades.count(),
result
)
pd.testing.assert_series_equal(
pf_grouped.get_exit_trades(group_by=False).count(),
result
)
pd.testing.assert_series_equal(
pf_shared.get_exit_trades(group_by=False).count(),
result
)
result = pd.Series(
np.array([4, 2]),
index=pd.Index(['first', 'second'], dtype='object', name='group')
).rename('count')
pd.testing.assert_series_equal(
pf.get_exit_trades(group_by=group_by).count(),
result
)
pd.testing.assert_series_equal(
pf_grouped.exit_trades.count(),
result
)
pd.testing.assert_series_equal(
pf_shared.exit_trades.count(),
result
)
def test_positions(self):
record_arrays_close(
pf.positions.values,
np.array([
(0, 0, 0.1, 1, 2.02, 0.10202, 2, 2.9699999999999998,
0.10297, -0.10999000000000003, -0.5445049504950497, 0, 1, 0),
(1, 0, 1.0, 4, 5.05, 0.1505, 4, 5.0, 0.0,
-0.20049999999999982, -0.03970297029702967, 0, 0, 1),
(2, 1, 2.1, 0, 2.9228571428571426, 0.36138000000000003, 4, 4.954285714285714,
0.10404000000000001, -4.731420000000001, -0.7708406647116326, 1, 0, 2),
(3, 2, 1.1, 0, 1.1018181818181818, 0.21212000000000003, 3,
3.06, 0.23366000000000003, 1.7082200000000003, 1.4094224422442245, 0, 1, 3)
], dtype=trade_dt)
)
result = pd.Series(
np.array([2, 1, 1]),
index=price_na.columns
).rename('count')
pd.testing.assert_series_equal(
pf.positions.count(),
result
)
pd.testing.assert_series_equal(
pf_grouped.get_positions(group_by=False).count(),
result
)
pd.testing.assert_series_equal(
pf_shared.get_positions(group_by=False).count(),
result
)
result = pd.Series(
np.array([3, 1]),
index=pd.Index(['first', 'second'], dtype='object', name='group')
).rename('count')
pd.testing.assert_series_equal(
pf.get_positions(group_by=group_by).count(),
result
)
pd.testing.assert_series_equal(
pf_grouped.positions.count(),
result
)
pd.testing.assert_series_equal(
pf_shared.positions.count(),
result
)
def test_drawdowns(self):
record_arrays_close(
pf.drawdowns.values,
np.array([
(0, 0, 0, 1, 4, 4, 100.0, 99.68951, 99.68951, 0),
(1, 1, 0, 1, 4, 4, 99.8801, 95.26858, 95.26858, 0),
(2, 2, 2, 3, 3, 4, 101.71618000000001, 101.70822000000001, 101.70822000000001, 0)
], dtype=drawdown_dt)
)
result = pd.Series(
np.array([1, 1, 1]),
index=price_na.columns
).rename('count')
pd.testing.assert_series_equal(
pf.drawdowns.count(),
result
)
pd.testing.assert_series_equal(
pf_grouped.get_drawdowns(group_by=False).count(),
result
)
pd.testing.assert_series_equal(
pf_shared.get_drawdowns(group_by=False).count(),
result
)
result = pd.Series(
np.array([1, 1]),
index=pd.Index(['first', 'second'], dtype='object', name='group')
).rename('count')
pd.testing.assert_series_equal(
pf.get_drawdowns(group_by=group_by).count(),
result
)
pd.testing.assert_series_equal(
pf_grouped.drawdowns.count(),
result
)
pd.testing.assert_series_equal(
pf_shared.drawdowns.count(),
result
)
def test_close(self):
pd.testing.assert_frame_equal(pf.close, price_na)
pd.testing.assert_frame_equal(pf_grouped.close, price_na)
pd.testing.assert_frame_equal(pf_shared.close, price_na)
def test_get_filled_close(self):
pd.testing.assert_frame_equal(
pf.get_filled_close(),
price_na.ffill().bfill()
)
def test_asset_flow(self):
pd.testing.assert_frame_equal(
pf.asset_flow(direction='longonly'),
pd.DataFrame(
np.array([
[0., 0., 1.],
[0.1, 0., 0.1],
[-0.1, 0., -1.],
[0., 0., -0.1],
[1., 0., 0.]
]),
index=price_na.index,
columns=price_na.columns
)
)
pd.testing.assert_frame_equal(
pf.asset_flow(direction='shortonly'),
pd.DataFrame(
np.array([
[0., 1., 0.],
[0., 0.1, 0.],
[0., 0., 0.],
[0., -0.1, 0.],
[0., 1., 0.]
]),
index=price_na.index,
columns=price_na.columns
)
)
result = pd.DataFrame(
np.array([
[0., -1., 1.],
[0.1, -0.1, 0.1],
[-0.1, 0., -1.],
[0., 0.1, -0.1],
[1., -1., 0.]
]),
index=price_na.index,
columns=price_na.columns
)
pd.testing.assert_frame_equal(
pf.asset_flow(),
result
)
pd.testing.assert_frame_equal(
pf_grouped.asset_flow(),
result
)
pd.testing.assert_frame_equal(
pf_shared.asset_flow(),
result
)
def test_assets(self):
pd.testing.assert_frame_equal(
pf.assets(direction='longonly'),
pd.DataFrame(
np.array([
[0., 0., 1.],
[0.1, 0., 1.1],
[0., 0., 0.1],
[0., 0., 0.],
[1., 0., 0.]
]),
index=price_na.index,
columns=price_na.columns
)
)
pd.testing.assert_frame_equal(
pf.assets(direction='shortonly'),
pd.DataFrame(
np.array([
[0., 1., 0.],
[0., 1.1, 0.],
[0., 1.1, 0.],
[0., 1., 0.],
[0., 2., 0.]
]),
index=price_na.index,
columns=price_na.columns
)
)
result = pd.DataFrame(
np.array([
[0., -1., 1.],
[0.1, -1.1, 1.1],
[0., -1.1, 0.1],
[0., -1., 0.],
[1., -2., 0.]
]),
index=price_na.index,
columns=price_na.columns
)
pd.testing.assert_frame_equal(
pf.assets(),
result
)
pd.testing.assert_frame_equal(
pf_grouped.assets(),
result
)
pd.testing.assert_frame_equal(
pf_shared.assets(),
result
)
def test_position_mask(self):
pd.testing.assert_frame_equal(
pf.position_mask(direction='longonly'),
pd.DataFrame(
np.array([
[False, False, True],
[True, False, True],
[False, False, True],
[False, False, False],
[True, False, False]
]),
index=price_na.index,
columns=price_na.columns
)
)
pd.testing.assert_frame_equal(
pf.position_mask(direction='shortonly'),
pd.DataFrame(
np.array([
[False, True, False],
[False, True, False],
[False, True, False],
[False, True, False],
[False, True, False]
]),
index=price_na.index,
columns=price_na.columns
)
)
result = pd.DataFrame(
np.array([
[False, True, True],
[True, True, True],
[False, True, True],
[False, True, False],
[True, True, False]
]),
index=price_na.index,
columns=price_na.columns
)
pd.testing.assert_frame_equal(
pf.position_mask(),
result
)
pd.testing.assert_frame_equal(
pf_grouped.position_mask(group_by=False),
result
)
pd.testing.assert_frame_equal(
pf_shared.position_mask(group_by=False),
result
)
result = pd.DataFrame(
np.array([
[True, True],
[True, True],
[True, True],
[True, False],
[True, False]
]),
index=price_na.index,
columns=pd.Index(['first', 'second'], dtype='object', name='group')
)
pd.testing.assert_frame_equal(
pf.position_mask(group_by=group_by),
result
)
pd.testing.assert_frame_equal(
pf_grouped.position_mask(),
result
)
pd.testing.assert_frame_equal(
pf_shared.position_mask(),
result
)
def test_position_coverage(self):
pd.testing.assert_series_equal(
pf.position_coverage(direction='longonly'),
pd.Series(np.array([0.4, 0., 0.6]), index=price_na.columns).rename('position_coverage')
)
pd.testing.assert_series_equal(
pf.position_coverage(direction='shortonly'),
pd.Series(np.array([0., 1., 0.]), index=price_na.columns).rename('position_coverage')
)
result = pd.Series(np.array([0.4, 1., 0.6]), index=price_na.columns).rename('position_coverage')
pd.testing.assert_series_equal(
pf.position_coverage(),
result
)
pd.testing.assert_series_equal(
pf_grouped.position_coverage(group_by=False),
result
)
pd.testing.assert_series_equal(
pf_shared.position_coverage(group_by=False),
result
)
result = pd.Series(
np.array([0.7, 0.6]),
pd.Index(['first', 'second'], dtype='object', name='group')
).rename('position_coverage')
pd.testing.assert_series_equal(
pf.position_coverage(group_by=group_by),
result
)
pd.testing.assert_series_equal(
pf_grouped.position_coverage(),
result
)
pd.testing.assert_series_equal(
pf_shared.position_coverage(),
result
)
def test_cash_flow(self):
pd.testing.assert_frame_equal(
pf.cash_flow(free=True),
pd.DataFrame(
np.array([
[0.0, -1.0998999999999999, -1.1201],
[-0.30402, -0.2999800000000002, -0.3040200000000002],
[0.19402999999999998, 0.0, 2.8402999999999996],
[0.0, -0.2920400000000002, 0.29204000000000035],
[-5.2005, -5.0995, 0.0]
]),
index=price_na.index,
columns=price_na.columns
)
)
result = pd.DataFrame(
np.array([
[0., 0.8801, -1.1201],
[-0.30402, 0.09602, -0.30402],
[0.19403, 0., 2.8403],
[0., -0.50804, 0.29204],
[-5.2005, 4.8005, 0.]
]),
index=price_na.index,
columns=price_na.columns
)
pd.testing.assert_frame_equal(
pf.cash_flow(),
result
)
pd.testing.assert_frame_equal(
pf_grouped.cash_flow(group_by=False),
result
)
pd.testing.assert_frame_equal(
pf_shared.cash_flow(group_by=False),
result
)
result = pd.DataFrame(
np.array([
[0.8801, -1.1201],
[-0.208, -0.30402],
[0.19403, 2.8403],
[-0.50804, 0.29204],
[-0.4, 0.]
]),
index=price_na.index,
columns=pd.Index(['first', 'second'], dtype='object', name='group')
)
pd.testing.assert_frame_equal(
pf.cash_flow(group_by=group_by),
result
)
pd.testing.assert_frame_equal(
pf_grouped.cash_flow(),
result
)
pd.testing.assert_frame_equal(
pf_shared.cash_flow(),
result
)
def test_init_cash(self):
pd.testing.assert_series_equal(
pf.init_cash,
pd.Series(np.array([100., 100., 100.]), index=price_na.columns).rename('init_cash')
)
pd.testing.assert_series_equal(
pf_grouped.get_init_cash(group_by=False),
pd.Series(np.array([100., 100., 100.]), index=price_na.columns).rename('init_cash')
)
pd.testing.assert_series_equal(
pf_shared.get_init_cash(group_by=False),
pd.Series(np.array([200., 200., 100.]), index=price_na.columns).rename('init_cash')
)
result = pd.Series(
np.array([200., 100.]),
pd.Index(['first', 'second'], dtype='object', name='group')
).rename('init_cash')
pd.testing.assert_series_equal(
pf.get_init_cash(group_by=group_by),
result
)
pd.testing.assert_series_equal(
pf_grouped.init_cash,
result
)
pd.testing.assert_series_equal(
pf_shared.init_cash,
result
)
pd.testing.assert_series_equal(
vbt.Portfolio.from_orders(
price_na, 1000., init_cash=InitCashMode.Auto, group_by=None).init_cash,
pd.Series(
np.array([14000., 12000., 10000.]),
index=price_na.columns
).rename('init_cash')
)
pd.testing.assert_series_equal(
vbt.Portfolio.from_orders(
price_na, 1000., init_cash=InitCashMode.Auto, group_by=group_by).init_cash,
pd.Series(
np.array([26000.0, 10000.0]),
index=pd.Index(['first', 'second'], dtype='object', name='group')
).rename('init_cash')
)
pd.testing.assert_series_equal(
vbt.Portfolio.from_orders(
price_na, 1000., init_cash=InitCashMode.Auto, group_by=group_by, cash_sharing=True).init_cash,
pd.Series(
np.array([26000.0, 10000.0]),
index=pd.Index(['first', 'second'], dtype='object', name='group')
).rename('init_cash')
)
pd.testing.assert_series_equal(
vbt.Portfolio.from_orders(
price_na, 1000., init_cash=InitCashMode.AutoAlign, group_by=None).init_cash,
pd.Series(
np.array([14000., 14000., 14000.]),
index=price_na.columns
).rename('init_cash')
)
pd.testing.assert_series_equal(
vbt.Portfolio.from_orders(
price_na, 1000., init_cash=InitCashMode.AutoAlign, group_by=group_by).init_cash,
pd.Series(
np.array([26000.0, 26000.0]),
index=pd.Index(['first', 'second'], dtype='object', name='group')
).rename('init_cash')
)
pd.testing.assert_series_equal(
vbt.Portfolio.from_orders(
price_na, 1000., init_cash=InitCashMode.AutoAlign, group_by=group_by, cash_sharing=True).init_cash,
pd.Series(
np.array([26000.0, 26000.0]),
index=pd.Index(['first', 'second'], dtype='object', name='group')
).rename('init_cash')
)
def test_cash(self):
pd.testing.assert_frame_equal(
pf.cash(free=True),
pd.DataFrame(
np.array([
[100.0, 98.9001, 98.8799],
[99.69598, 98.60011999999999, 98.57588000000001],
[99.89001, 98.60011999999999, 101.41618000000001],
[99.89001, 98.30807999999999, 101.70822000000001],
[94.68951, 93.20857999999998, 101.70822000000001]
]),
index=price_na.index,
columns=price_na.columns
)
)
result = pd.DataFrame(
np.array([
[100., 100.8801, 98.8799],
[99.69598, 100.97612, 98.57588],
[99.89001, 100.97612, 101.41618],
[99.89001, 100.46808, 101.70822],
[94.68951, 105.26858, 101.70822]
]),
index=price_na.index,
columns=price_na.columns
)
pd.testing.assert_frame_equal(
pf.cash(),
result
)
pd.testing.assert_frame_equal(
pf_grouped.cash(group_by=False),
result
)
pd.testing.assert_frame_equal(
pf_shared.cash(group_by=False),
pd.DataFrame(
np.array([
[200., 200.8801, 98.8799],
[199.69598, 200.97612, 98.57588],
[199.89001, 200.97612, 101.41618],
[199.89001, 200.46808, 101.70822],
[194.68951, 205.26858, 101.70822]
]),
index=price_na.index,
columns=price_na.columns
)
)
pd.testing.assert_frame_equal(
pf_shared.cash(group_by=False, in_sim_order=True),
pd.DataFrame(
np.array([
[200.8801, 200.8801, 98.8799],
[200.6721, 200.97612, 98.57588000000001],
[200.86613, 200.6721, 101.41618000000001],
[200.35809, 200.35809, 101.70822000000001],
[199.95809, 205.15859, 101.70822000000001]
]),
index=price_na.index,
columns=price_na.columns
)
)
result = pd.DataFrame(
np.array([
[200.8801, 98.8799],
[200.6721, 98.57588],
[200.86613, 101.41618],
[200.35809, 101.70822],
[199.95809, 101.70822]
]),
index=price_na.index,
columns=pd.Index(['first', 'second'], dtype='object', name='group')
)
pd.testing.assert_frame_equal(
pf.cash(group_by=group_by),
result
)
pd.testing.assert_frame_equal(
pf_grouped.cash(),
result
)
pd.testing.assert_frame_equal(
pf_shared.cash(),
result
)
def test_asset_value(self):
pd.testing.assert_frame_equal(
pf.asset_value(direction='longonly'),
pd.DataFrame(
np.array([
[0., 0., 1.],
[0.2, 0., 2.2],
[0., 0., 0.3],
[0., 0., 0.],
[5., 0., 0.]
]),
index=price_na.index,
columns=price_na.columns
)
)
pd.testing.assert_frame_equal(
pf.asset_value(direction='shortonly'),
pd.DataFrame(
np.array([
[0., 1., 0.],
[0., 2.2, 0.],
[0., 2.2, 0.],
[0., 4., 0.],
[0., 10., 0.]
]),
index=price_na.index,
columns=price_na.columns
)
)
result = pd.DataFrame(
np.array([
[0., -1., 1.],
[0.2, -2.2, 2.2],
[0., -2.2, 0.3],
[0., -4., 0.],
[5., -10., 0.]
]),
index=price_na.index,
columns=price_na.columns
)
pd.testing.assert_frame_equal(
pf.asset_value(),
result
)
pd.testing.assert_frame_equal(
pf_grouped.asset_value(group_by=False),
result
)
pd.testing.assert_frame_equal(
pf_shared.asset_value(group_by=False),
result
)
result = pd.DataFrame(
np.array([
[-1., 1.],
[-2., 2.2],
[-2.2, 0.3],
[-4., 0.],
[-5., 0.]
]),
index=price_na.index,
columns=pd.Index(['first', 'second'], dtype='object', name='group')
)
pd.testing.assert_frame_equal(
pf.asset_value(group_by=group_by),
result
)
pd.testing.assert_frame_equal(
pf_grouped.asset_value(),
result
)
pd.testing.assert_frame_equal(
pf_shared.asset_value(),
result
)
def test_gross_exposure(self):
pd.testing.assert_frame_equal(
pf.gross_exposure(direction='longonly'),
pd.DataFrame(
np.array([
[0., 0., 0.01001202],
[0.00200208, 0., 0.02183062],
[0., 0., 0.00294938],
[0., 0., 0.],
[0.05015573, 0., 0.]
]),
index=price_na.index,
columns=price_na.columns
)
)
pd.testing.assert_frame_equal(
pf.gross_exposure(direction='shortonly'),
pd.DataFrame(
np.array([
[0.0, 0.01000999998999, 0.0],
[0.0, 0.021825370842812494, 0.0],
[0.0, 0.021825370842812494, 0.0],
[0.0, 0.03909759620159034, 0.0],
[0.0, 0.09689116931945001, 0.0]
]),
index=price_na.index,
columns=price_na.columns
)
)
result = pd.DataFrame(
np.array([
[0.0, -0.010214494162927312, 0.010012024441354066],
[0.00200208256628545, -0.022821548354919067, 0.021830620581035857],
[0.0, -0.022821548354919067, 0.002949383274126105],
[0.0, -0.04241418126633477, 0.0],
[0.050155728521486365, -0.12017991413866216, 0.0]
]),
index=price_na.index,
columns=price_na.columns
)
pd.testing.assert_frame_equal(
pf.gross_exposure(),
result
)
pd.testing.assert_frame_equal(
pf_grouped.gross_exposure(group_by=False),
result
)
pd.testing.assert_frame_equal(
pf_shared.gross_exposure(group_by=False),
pd.DataFrame(
np.array([
[0.0, -0.00505305454620791, 0.010012024441354066],
[0.0010005203706447724, -0.011201622483733716, 0.021830620581035857],
[0.0, -0.011201622483733716, 0.002949383274126105],
[0.0, -0.020585865497718882, 0.0],
[0.025038871596209537, -0.0545825965137659, 0.0]
]),
index=price_na.index,
columns=price_na.columns
)
)
result = pd.DataFrame(
np.array([
[-0.00505305454620791, 0.010012024441354066],
[-0.010188689433972452, 0.021830620581035857],
[-0.0112078992458765, 0.002949383274126105],
[-0.02059752492931316, 0.0],
[-0.027337628293439265, 0.0]
]),
index=price_na.index,
columns=pd.Index(['first', 'second'], dtype='object', name='group')
)
pd.testing.assert_frame_equal(
pf.gross_exposure(group_by=group_by),
result
)
pd.testing.assert_frame_equal(
pf_grouped.gross_exposure(),
result
)
pd.testing.assert_frame_equal(
pf_shared.gross_exposure(),
result
)
def test_net_exposure(self):
result = pd.DataFrame(
np.array([
[0.0, -0.01000999998999, 0.010012024441354066],
[0.00200208256628545, -0.021825370842812494, 0.021830620581035857],
[0.0, -0.021825370842812494, 0.002949383274126105],
[0.0, -0.03909759620159034, 0.0],
[0.050155728521486365, -0.09689116931945001, 0.0]
]),
index=price_na.index,
columns=price_na.columns
)
pd.testing.assert_frame_equal(
pf.net_exposure(),
result
)
pd.testing.assert_frame_equal(
pf_grouped.net_exposure(group_by=False),
result
)
pd.testing.assert_frame_equal(
pf_shared.net_exposure(group_by=False),
pd.DataFrame(
np.array([
[0.0, -0.005002498748124688, 0.010012024441354066],
[0.0010005203706447724, -0.010956168751293576, 0.021830620581035857],
[0.0, -0.010956168751293576, 0.002949383274126105],
[0.0, -0.019771825228137207, 0.0],
[0.025038871596209537, -0.049210520540028384, 0.0]
]),
index=price_na.index,
columns=price_na.columns
)
)
result = pd.DataFrame(
np.array([
[-0.005002498748124688, 0.010012024441354066],
[-0.009965205542937988, 0.021830620581035857],
[-0.010962173376438594, 0.002949383274126105],
[-0.019782580537729116, 0.0],
[-0.0246106361476199, 0.0]
]),
index=price_na.index,
columns=pd.Index(['first', 'second'], dtype='object', name='group')
)
pd.testing.assert_frame_equal(
pf.net_exposure(group_by=group_by),
result
)
pd.testing.assert_frame_equal(
pf_grouped.net_exposure(),
result
)
pd.testing.assert_frame_equal(
pf_shared.net_exposure(),
result
)
def test_value(self):
result = pd.DataFrame(
np.array([
[100., 99.8801, 99.8799],
[99.89598, 98.77612, 100.77588],
[99.89001, 98.77612, 101.71618],
[99.89001, 96.46808, 101.70822],
[99.68951, 95.26858, 101.70822]
]),
index=price_na.index,
columns=price_na.columns
)
pd.testing.assert_frame_equal(
pf.value(),
result
)
pd.testing.assert_frame_equal(
pf_grouped.value(group_by=False),
result
)
pd.testing.assert_frame_equal(
pf_shared.value(group_by=False),
pd.DataFrame(
np.array([
[200., 199.8801, 99.8799],
[199.89598, 198.77612, 100.77588],
[199.89001, 198.77612, 101.71618],
[199.89001, 196.46808, 101.70822],
[199.68951, 195.26858, 101.70822]
]),
index=price_na.index,
columns=price_na.columns
)
)
pd.testing.assert_frame_equal(
pf_shared.value(group_by=False, in_sim_order=True),
pd.DataFrame(
np.array([
[199.8801, 199.8801, 99.8799],
[198.6721, 198.77612000000002, 100.77588000000002],
[198.66613, 198.6721, 101.71618000000001],
[196.35809, 196.35809, 101.70822000000001],
[194.95809, 195.15859, 101.70822000000001]
]),
index=price_na.index,
columns=price_na.columns
)
)
result = pd.DataFrame(
np.array([
[199.8801, 99.8799],
[198.6721, 100.77588],
[198.66613, 101.71618],
[196.35809, 101.70822],
[194.95809, 101.70822]
]),
index=price_na.index,
columns=pd.Index(['first', 'second'], dtype='object', name='group')
)
pd.testing.assert_frame_equal(
pf.value(group_by=group_by),
result
)
pd.testing.assert_frame_equal(
pf_grouped.value(),
result
)
pd.testing.assert_frame_equal(
pf_shared.value(),
result
)
def test_total_profit(self):
result = pd.Series(
np.array([-0.31049, -4.73142, 1.70822]),
index=price_na.columns
).rename('total_profit')
pd.testing.assert_series_equal(
pf.total_profit(),
result
)
pd.testing.assert_series_equal(
pf_grouped.total_profit(group_by=False),
result
)
pd.testing.assert_series_equal(
pf_shared.total_profit(group_by=False),
result
)
result = pd.Series(
np.array([-5.04191, 1.70822]),
index=pd.Index(['first', 'second'], dtype='object', name='group')
).rename('total_profit')
pd.testing.assert_series_equal(
pf.total_profit(group_by=group_by),
result
)
pd.testing.assert_series_equal(
pf_grouped.total_profit(),
result
)
pd.testing.assert_series_equal(
pf_shared.total_profit(),
result
)
def test_final_value(self):
result = pd.Series(
np.array([99.68951, 95.26858, 101.70822]),
index=price_na.columns
).rename('final_value')
pd.testing.assert_series_equal(
pf.final_value(),
result
)
pd.testing.assert_series_equal(
pf_grouped.final_value(group_by=False),
result
)
pd.testing.assert_series_equal(
pf_shared.final_value(group_by=False),
pd.Series(
np.array([199.68951, 195.26858, 101.70822]),
index=price_na.columns
).rename('final_value')
)
result = pd.Series(
np.array([194.95809, 101.70822]),
index=pd.Index(['first', 'second'], dtype='object', name='group')
).rename('final_value')
pd.testing.assert_series_equal(
pf.final_value(group_by=group_by),
result
)
pd.testing.assert_series_equal(
pf_grouped.final_value(),
result
)
pd.testing.assert_series_equal(
pf_shared.final_value(),
result
)
def test_total_return(self):
result = pd.Series(
np.array([-0.0031049, -0.0473142, 0.0170822]),
index=price_na.columns
).rename('total_return')
pd.testing.assert_series_equal(
pf.total_return(),
result
)
pd.testing.assert_series_equal(
pf_grouped.total_return(group_by=False),
result
)
pd.testing.assert_series_equal(
pf_shared.total_return(group_by=False),
pd.Series(
np.array([-0.00155245, -0.0236571, 0.0170822]),
index=price_na.columns
).rename('total_return')
)
result = pd.Series(
np.array([-0.02520955, 0.0170822]),
index=pd.Index(['first', 'second'], dtype='object', name='group')
).rename('total_return')
pd.testing.assert_series_equal(
pf.total_return(group_by=group_by),
result
)
pd.testing.assert_series_equal(
pf_grouped.total_return(),
result
)
pd.testing.assert_series_equal(
pf_shared.total_return(),
result
)
def test_returns(self):
result = pd.DataFrame(
np.array([
[0.00000000e+00, -1.19900000e-03, -1.20100000e-03],
[-1.04020000e-03, -1.10530526e-02, 8.97057366e-03],
[-5.97621646e-05, 0.0, 9.33060570e-03],
[0.00000000e+00, -0.023366376407576966, -7.82569695e-05],
[-2.00720773e-03, -1.24341648e-02, 0.00000000e+00]
]),
index=price_na.index,
columns=price_na.columns
)
pd.testing.assert_frame_equal(
pf.returns(),
result
)
pd.testing.assert_frame_equal(
pf_grouped.returns(group_by=False),
result
)
pd.testing.assert_frame_equal(
pf_shared.returns(group_by=False),
pd.DataFrame(
np.array([
[0.00000000e+00, -5.99500000e-04, -1.20100000e-03],
[-5.20100000e-04, -5.52321117e-03, 8.97057366e-03],
[-2.98655331e-05, 0.0, 9.33060570e-03],
[0.00000000e+00, -0.011611253907159497, -7.82569695e-05],
[-1.00305163e-03, -6.10531746e-03, 0.00000000e+00]
]),
index=price_na.index,
columns=price_na.columns
)
)
pd.testing.assert_frame_equal(
pf_shared.returns(group_by=False, in_sim_order=True),
pd.DataFrame(
np.array([
[0.0, -0.0005995000000000062, -1.20100000e-03],
[-0.0005233022960706736, -0.005523211165093367, 8.97057366e-03],
[-3.0049513746473233e-05, 0.0, 9.33060570e-03],
[0.0, -0.011617682390048093, -7.82569695e-05],
[-0.0010273695869600474, -0.0061087373583639994, 0.00000000e+00]
]),
index=price_na.index,
columns=price_na.columns
)
)
result = pd.DataFrame(
np.array([
[-5.99500000e-04, -1.20100000e-03],
[-6.04362315e-03, 8.97057366e-03],
[-3.0049513746473233e-05, 9.33060570e-03],
[-0.011617682390048093, -7.82569695e-05],
[-7.12983101e-03, 0.00000000e+00]
]),
index=price_na.index,
columns=pd.Index(['first', 'second'], dtype='object', name='group')
)
pd.testing.assert_frame_equal(
pf.returns(group_by=group_by),
result
)
pd.testing.assert_frame_equal(
pf_grouped.returns(),
result
)
pd.testing.assert_frame_equal(
pf_shared.returns(),
result
)
def test_asset_returns(self):
result = pd.DataFrame(
np.array([
[0., -np.inf, -np.inf],
[-np.inf, -1.10398, 0.89598],
[-0.02985, 0.0, 0.42740909],
[0., -1.0491090909090908, -0.02653333],
[-np.inf, -0.299875, 0.]
]),
index=price_na.index,
columns=price_na.columns
)
pd.testing.assert_frame_equal(
pf.asset_returns(),
result
)
pd.testing.assert_frame_equal(
pf_grouped.asset_returns(group_by=False),
result
)
pd.testing.assert_frame_equal(
pf_shared.asset_returns(group_by=False),
result
)
result = pd.DataFrame(
np.array([
[-np.inf, -np.inf],
[-1.208, 0.89598],
[-0.0029850000000000154, 0.42740909],
[-1.0491090909090908, -0.02653333],
[-0.35, 0.]
]),
index=price_na.index,
columns=pd.Index(['first', 'second'], dtype='object', name='group')
)
pd.testing.assert_frame_equal(
pf.asset_returns(group_by=group_by),
result
)
pd.testing.assert_frame_equal(
pf_grouped.asset_returns(),
result
)
pd.testing.assert_frame_equal(
pf_shared.asset_returns(),
result
)
def test_benchmark_value(self):
result = pd.DataFrame(
np.array([
[100., 100., 100.],
[100., 200., 200.],
[150., 200., 300.],
[200., 400., 400.],
[250., 500., 400.]
]),
index=price_na.index,
columns=price_na.columns
)
pd.testing.assert_frame_equal(
pf.benchmark_value(),
result
)
pd.testing.assert_frame_equal(
pf_grouped.benchmark_value(group_by=False),
result
)
pd.testing.assert_frame_equal(
pf_shared.benchmark_value(group_by=False),
pd.DataFrame(
np.array([
[200., 200., 100.],
[200., 400., 200.],
[300., 400., 300.],
[400., 800., 400.],
[500., 1000., 400.]
]),
index=price_na.index,
columns=price_na.columns
)
)
result = pd.DataFrame(
np.array([
[200., 100.],
[300., 200.],
[350., 300.],
[600., 400.],
[750., 400.]
]),
index=price_na.index,
columns=pd.Index(['first', 'second'], dtype='object', name='group')
)
pd.testing.assert_frame_equal(
pf.benchmark_value(group_by=group_by),
result
)
pd.testing.assert_frame_equal(
pf_grouped.benchmark_value(),
result
)
pd.testing.assert_frame_equal(
pf_shared.benchmark_value(),
result
)
def test_benchmark_returns(self):
result = pd.DataFrame(
np.array([
[0., 0., 0.],
[0., 1., 1.],
[0.5, 0., 0.5],
[0.33333333, 1., 0.33333333],
[0.25, 0.25, 0.]
]),
index=price_na.index,
columns=price_na.columns
)
pd.testing.assert_frame_equal(
pf.benchmark_returns(),
result
)
pd.testing.assert_frame_equal(
pf_grouped.benchmark_returns(group_by=False),
result
)
pd.testing.assert_frame_equal(
pf_shared.benchmark_returns(group_by=False),
result
)
result = pd.DataFrame(
np.array([
[0., 0.],
[0.5, 1.],
[0.16666667, 0.5],
[0.71428571, 0.33333333],
[0.25, 0.]
]),
index=price_na.index,
columns=pd.Index(['first', 'second'], dtype='object', name='group')
)
pd.testing.assert_frame_equal(
pf.benchmark_returns(group_by=group_by),
result
)
pd.testing.assert_frame_equal(
pf_grouped.benchmark_returns(),
result
)
pd.testing.assert_frame_equal(
pf_shared.benchmark_returns(),
result
)
def test_total_benchmark_return(self):
result = pd.Series(
np.array([1.5, 4., 3.]),
index=price_na.columns
).rename('total_benchmark_return')
pd.testing.assert_series_equal(
pf.total_benchmark_return(),
result
)
pd.testing.assert_series_equal(
pf_grouped.total_benchmark_return(group_by=False),
result
)
pd.testing.assert_series_equal(
pf_shared.total_benchmark_return(group_by=False),
result
)
result = pd.Series(
np.array([2.75, 3.]),
index=pd.Index(['first', 'second'], dtype='object', name='group')
).rename('total_benchmark_return')
pd.testing.assert_series_equal(
pf.total_benchmark_return(group_by=group_by),
result
)
pd.testing.assert_series_equal(
pf_grouped.total_benchmark_return(),
result
)
pd.testing.assert_series_equal(
pf_shared.total_benchmark_return(),
result
)
def test_return_method(self):
pd.testing.assert_frame_equal(
pf_shared.cumulative_returns(),
pd.DataFrame(
np.array([
[-0.000599499999999975, -0.0012009999999998966],
[-0.006639499999999909, 0.007758800000000177],
[-0.006669349999999907, 0.017161800000000005],
[-0.01820955000000002, 0.017082199999999936],
[-0.025209550000000136, 0.017082199999999936]
]),
index=price_na.index,
columns=pd.Index(['first', 'second'], dtype='object', name='group')
)
)
pd.testing.assert_frame_equal(
pf_shared.cumulative_returns(group_by=False),
pd.DataFrame(
np.array([
[0.0, -0.000599499999999975, -0.0012009999999998966],
[-0.0005201000000001343, -0.006119399999999886, 0.007758800000000177],
[-0.0005499500000001323, -0.006119399999999886, 0.017161800000000005],
[-0.0005499500000001323, -0.017659599999999886, 0.017082199999999936],
[-0.0015524500000001495, -0.023657099999999875, 0.017082199999999936]
]),
index=price_na.index,
columns=price_na.columns
)
)
pd.testing.assert_series_equal(
pf_shared.sharpe_ratio(),
pd.Series(
np.array([-20.095906945591288, 12.345065267401496]),
index=pd.Index(['first', 'second'], dtype='object', name='group')
).rename('sharpe_ratio')
)
pd.testing.assert_series_equal(
pf_shared.sharpe_ratio(risk_free=0.01),
pd.Series(
np.array([-59.62258787402645, -23.91718815937344]),
index=pd.Index(['first', 'second'], dtype='object', name='group')
).rename('sharpe_ratio')
)
pd.testing.assert_series_equal(
pf_shared.sharpe_ratio(year_freq='365D'),
pd.Series(
np.array([-20.095906945591288, 12.345065267401496]),
index=pd.Index(['first', 'second'], dtype='object', name='group')
).rename('sharpe_ratio')
)
pd.testing.assert_series_equal(
pf_shared.sharpe_ratio(group_by=False),
pd.Series(
np.array([-13.30950646054953, -19.278625117344564, 12.345065267401496]),
index=price_na.columns
).rename('sharpe_ratio')
)
pd.testing.assert_series_equal(
pf_shared.information_ratio(group_by=False),
pd.Series(
np.array([-0.9988561334618041, -0.8809478746008806, -0.884780642352239]),
index=price_na.columns
).rename('information_ratio')
)
with pytest.raises(Exception):
_ = pf_shared.information_ratio(pf_shared.benchmark_returns(group_by=False) * 2)
def test_stats(self):
stats_index = pd.Index([
'Start', 'End', 'Period', 'Start Value', 'End Value',
'Total Return [%]', 'Benchmark Return [%]', 'Max Gross Exposure [%]',
'Total Fees Paid', 'Max Drawdown [%]', 'Max Drawdown Duration',
'Total Trades', 'Total Closed Trades', 'Total Open Trades',
'Open Trade PnL', 'Win Rate [%]', 'Best Trade [%]', 'Worst Trade [%]',
'Avg Winning Trade [%]', 'Avg Losing Trade [%]',
'Avg Winning Trade Duration', 'Avg Losing Trade Duration',
'Profit Factor', 'Expectancy', 'Sharpe Ratio', 'Calmar Ratio',
'Omega Ratio', 'Sortino Ratio'
], dtype='object')
pd.testing.assert_series_equal(
pf.stats(),
pd.Series(
np.array([
| pd.Timestamp('2020-01-01 00:00:00') | pandas.Timestamp |
import requests
import pandas
from StringIO import StringIO
import pickle
import astropy.units as u
import astropy.constants as const
#import EXOSIMS.PlanetPhysicalModel.Forecaster
from sqlalchemy import create_engine
import getpass,keyring
import numpy as np
import re
def substitute_data(original_data):
query_ext = """https://exoplanetarchive.ipac.caltech.edu/cgi-bin/nstedAPI/nph-nstedAPI?table=exomultpars&select=*&format=csv"""
r_ext = requests.get(query_ext)
data_ext = pandas.read_csv(StringIO(r_ext.content))
# planets_of_int = pandas.read_csv("C:\\Users\\NathanelKinzly\\github\\orbits\\plandb.sioslab.com\\output.csv")
# names = planets_of_int[["pl_name"]]
regex = re.compile(r"(<a.*f>)|(</a>)")
regex_year = re.compile(r"[A-Za-z'#\.&; \-]")
names = original_data[["pl_name"]]
names = names.values
extended = data_ext[data_ext["mpl_name"] == names[0][0]]
# extended = extended.reset_index()
# extended.at[0, "mpl_reflink"] = regex.sub("", extended.at[0, "mpl_reflink"])
# print(extended.loc[:, "mpl_reflink"])
# print(extended)
n = 1
# Gets all planets that are named in output.txt
while n < len(names):
extended = pandas.concat([extended, data_ext[data_ext["mpl_name"] == names[n][0]]])
n = n + 1
extended = extended.reset_index(drop=True)
# cols2 = ["mpl_reflink", "mpl_name", "mpl_orbper", "mpl_orbpererr1", "mpl_orbpererr2", "mpl_orbsmax", "mpl_orbsmaxerr1",
# "mpl_orbsmaxerr2", "mpl_orbeccen", "mpl_orbeccenerr1", "mpl_orbeccenerr2", "mpl_orbincl",
# "mpl_orbinclerr1", "mpl_orbinclerr2", "mpl_bmassj", "mpl_bmassjerr1", "mpl_bmassjerr2",
# "mpl_orbtper", "mpl_orbtpererr1", "mpl_orbtpererr2", "mpl_orblper", "mpl_orblpererr1",
# "mpl_orblpererr2"]
cols2 = ["mpl_reflink", "mpl_name", "mpl_orbper", "mpl_orbsmax",
"mpl_orbeccen", "mpl_orbincl", "mpl_bmassj", "mpl_orbtper", "mpl_orblper", "mpl_radj",
"mst_mass", "ref_author", "publication_year", "best_data"]
cols = list(extended.columns.values)
smax = cols.index("mpl_orbsmax")
per = cols.index("mpl_orbper")
eccen = cols.index("mpl_orbeccen")
periaps_time = cols.index("mpl_orbtper")
argument_periaps = cols.index("mpl_orblper")
pl_name = cols.index("mpl_name")
default_column = cols.index("mpl_def")
author = cols.index("mpl_reflink")
# extended = extended[cols]
length = len(extended.values)
num_cols = len(extended.columns)
cols.append("ref_author")
cols.append("publication_year")
cols.append("best_data")
n = 0
extended_arr = np.append(extended.values, np.zeros((length, 1)), axis=1)
extended_arr = np.append(extended_arr, np.zeros((length, 1)), axis=1)
# author_col = np.chararray((1, length))
author_col = list()
date_col = np.zeros(length)
while n < length:
# extended.at[n, "mpl_reflink"] = regex.sub("", extended.at[n, "mpl_reflink"])
# extended.at[n, "pub_year"] = regex_year.sub("", extended.at[n, "mpl_reflink"])
extended_arr[n][num_cols] = regex.sub("", extended_arr[n][author])
extended_arr[n][num_cols+1] = int(regex_year.sub("", extended_arr[n][num_cols]), 10)
# In order to catch issues where an extra number is added to the front of the year due to weird formatting
if extended_arr[n][num_cols+1] > 3000:
extended_arr[n][num_cols + 1] = extended_arr[n][num_cols+1] % 10000
author_col.append(regex.sub("", extended_arr[n][author]))
# print(int(regex_year.sub("", author_col[n], 10)))
date_col[n] = int(regex_year.sub("", author_col[n]), 10)
n = n + 1
n = 0
refs = extended["mpl_reflink"].values
best_data = np.zeros(length)
extended["ref_author"] = pandas.Series(author_col, index=extended.index)
extended["publication_year"] = pandas.Series(date_col, index=extended.index)
extended["best_data"] = pandas.Series(best_data, index=extended.index)
extended["mpl_orbperreflink"] = pandas.Series(refs, index=extended.index)
extended["mpl_orbsmaxreflink"] = pandas.Series(refs, index=extended.index)
extended["mpl_orbeccenreflink"] = pandas.Series(refs, index=extended.index)
extended["mpl_bmassreflink"] = pandas.Series(refs, index=extended.index)
extended["mpl_radreflink"] = pandas.Series(refs, index=extended.index)
extended["mst_massreflink"] = pandas.Series(refs, index=extended.index)
while n < len(names):
planet_rows = extended.loc[extended["mpl_name"] == names[n][0]]
print(names[n])
sorted_rows = planet_rows.sort_values(by=["publication_year"], axis=0, ascending=False)
good_idx = sorted_rows.index[0]
good_lvl = 0
for index, row in sorted_rows.iterrows():
base_need = (not pandas.isnull(row["mpl_orbsmax"]) or not pandas.isnull(row["mpl_orbper"])) and \
(not pandas.isnull(row["mpl_bmassj"]) or not pandas.isnull(row["mpl_radj"]))
# Has everything
if good_lvl < 4 and (base_need
and not pandas.isnull(row["mpl_orbeccen"]) and not pandas.isnull(row["mpl_orbtper"])
and not pandas.isnull(row["mpl_orblper"]) and not pandas.isnull(row["mpl_orbincl"])):
good_idx = index
good_lvl = 4
break
# Has everything except inclination
if good_lvl < 3 and (base_need
and not pandas.isnull(row["mpl_orbeccen"]) and not pandas.isnull(row["mpl_orbtper"])
and not pandas.isnull(row["mpl_orblper"])):
good_idx = index
good_lvl = 3
# Has either periapsis time or argument of pariapsis
elif good_lvl < 2 and (base_need
and not | pandas.isnull(row["mpl_orbeccen"]) | pandas.isnull |
#!/usr/bin/env python
# coding: utf-8
# In[1]:
import os
from pathlib import Path
testfolder = str(Path().resolve().parent.parent / 'PV_ICE' / 'TEMP' / 'ElectricFutures')
# Another option using relative address; for some operative systems you might need '/' instead of '\'
# testfolder = os.path.abspath(r'..\..\PV_DEMICE\TEMP')
print ("Your simulation will be stored in %s" % testfolder)
# In[2]:
if not os.path.exists(testfolder):
os.makedirs(testfolder)
# In[3]:
MATERIALS = ['glass','silver','silicon', 'copper','aluminium_frames']
MATERIAL = MATERIALS[0]
MODULEBASELINE = r'..\..\baselines\ElectrificationFutures_2021\EF-CapacityByState-basecase.csv'
MODULEBASELINE_High = r'..\..\baselines\ElectrificationFutures_2021\EF-CapacityByState-LowREHighElec.csv'
# In[4]:
import PV_ICE
import matplotlib.pyplot as plt
import pandas as pd
import numpy as np
# In[5]:
PV_ICE.__version__
# ### Loading Module Baseline. Will be used later to populate all the columsn otehr than 'new_Installed_Capacity_[MW]' which will be supplied by the REEDS model
# In[6]:
r1 = PV_ICE.Simulation(name='Simulation1', path=testfolder)
#r1.createScenario(name='US', file=r'..\..\baselines\ReedsSubset\baseline_modules_US_Reeds_EF.csv')
r1.createScenario(name='US', file=r'..\..\baselines\ElectrificationFutures_2021\baseline_modules_US_NREL_Electrification_Futures_2021_basecase.csv')
baseline = r1.scenario['US'].data
baseline = baseline.drop(columns=['new_Installed_Capacity_[MW]'])
baseline.set_index('year', inplace=True)
baseline.index = pd.PeriodIndex(baseline.index, freq='A') # A -- Annual
baseline.head()
# In[7]:
plt.rcParams.update({'font.size': 22})
plt.rcParams['figure.figsize'] = (12, 5)
# In[8]:
df = pd.read_csv(MODULEBASELINE)
df.set_index(['Type','State','year'], inplace=True)
df.head()
# In[9]:
for ii in range (len(df.unstack(level=2))):
STATE = df.unstack(level=2).iloc[ii].name[1]
SCEN = df.unstack(level=2).iloc[ii].name[0]
SCEN=SCEN.replace('+', '_')
filetitle = 'base_'+SCEN+'_'+STATE +'.csv'
subtestfolder = os.path.join(testfolder, 'baselines')
if not os.path.exists(subtestfolder):
os.makedirs(subtestfolder)
filetitle = os.path.join(subtestfolder, filetitle)
A = df.unstack(level=2).iloc[ii]
A = A.droplevel(level=0)
A.name = 'new_Installed_Capacity_[MW]'
A = pd.DataFrame(A)
A.index=pd.PeriodIndex(A.index, freq='A')
A = | pd.DataFrame(A) | pandas.DataFrame |
"""
Tests that work on both the Python and C engines but do not have a
specific classification into the other test modules.
"""
from datetime import datetime
from inspect import signature
from io import StringIO
import os
from pathlib import Path
import sys
import numpy as np
import pytest
from pandas.compat import PY310
from pandas.errors import (
EmptyDataError,
ParserError,
ParserWarning,
)
from pandas import (
DataFrame,
Index,
Series,
Timestamp,
compat,
)
import pandas._testing as tm
from pandas.io.parsers import TextFileReader
from pandas.io.parsers.c_parser_wrapper import CParserWrapper
xfail_pyarrow = pytest.mark.usefixtures("pyarrow_xfail")
skip_pyarrow = pytest.mark.usefixtures("pyarrow_skip")
def test_override_set_noconvert_columns():
# see gh-17351
#
# Usecols needs to be sorted in _set_noconvert_columns based
# on the test_usecols_with_parse_dates test from test_usecols.py
class MyTextFileReader(TextFileReader):
def __init__(self) -> None:
self._currow = 0
self.squeeze = False
class MyCParserWrapper(CParserWrapper):
def _set_noconvert_columns(self):
if self.usecols_dtype == "integer":
# self.usecols is a set, which is documented as unordered
# but in practice, a CPython set of integers is sorted.
# In other implementations this assumption does not hold.
# The following code simulates a different order, which
# before GH 17351 would cause the wrong columns to be
# converted via the parse_dates parameter
self.usecols = list(self.usecols)
self.usecols.reverse()
return CParserWrapper._set_noconvert_columns(self)
data = """a,b,c,d,e
0,1,20140101,0900,4
0,1,20140102,1000,4"""
parse_dates = [[1, 2]]
cols = {
"a": [0, 0],
"c_d": [Timestamp("2014-01-01 09:00:00"), Timestamp("2014-01-02 10:00:00")],
}
expected = DataFrame(cols, columns=["c_d", "a"])
parser = MyTextFileReader()
parser.options = {
"usecols": [0, 2, 3],
"parse_dates": parse_dates,
"delimiter": ",",
}
parser.engine = "c"
parser._engine = MyCParserWrapper(StringIO(data), **parser.options)
result = parser.read()
tm.assert_frame_equal(result, expected)
def test_read_csv_local(all_parsers, csv1):
prefix = "file:///" if compat.is_platform_windows() else "file://"
parser = all_parsers
fname = prefix + str(os.path.abspath(csv1))
result = parser.read_csv(fname, index_col=0, parse_dates=True)
expected = DataFrame(
[
[0.980269, 3.685731, -0.364216805298, -1.159738],
[1.047916, -0.041232, -0.16181208307, 0.212549],
[0.498581, 0.731168, -0.537677223318, 1.346270],
[1.120202, 1.567621, 0.00364077397681, 0.675253],
[-0.487094, 0.571455, -1.6116394093, 0.103469],
[0.836649, 0.246462, 0.588542635376, 1.062782],
[-0.157161, 1.340307, 1.1957779562, -1.097007],
],
columns=["A", "B", "C", "D"],
index=Index(
[
datetime(2000, 1, 3),
datetime(2000, 1, 4),
datetime(2000, 1, 5),
datetime(2000, 1, 6),
datetime(2000, 1, 7),
datetime(2000, 1, 10),
datetime(2000, 1, 11),
],
name="index",
),
)
tm.assert_frame_equal(result, expected)
@xfail_pyarrow
def test_1000_sep(all_parsers):
parser = all_parsers
data = """A|B|C
1|2,334|5
10|13|10.
"""
expected = DataFrame({"A": [1, 10], "B": [2334, 13], "C": [5, 10.0]})
result = parser.read_csv(StringIO(data), sep="|", thousands=",")
tm.assert_frame_equal(result, expected)
@pytest.mark.parametrize("squeeze", [True, False])
def test_squeeze(all_parsers, squeeze):
data = """\
a,1
b,2
c,3
"""
parser = all_parsers
index = Index(["a", "b", "c"], name=0)
expected = Series([1, 2, 3], name=1, index=index)
result = parser.read_csv_check_warnings(
FutureWarning,
"The squeeze argument has been deprecated "
"and will be removed in a future version. "
'Append .squeeze\\("columns"\\) to the call to squeeze.\n\n',
StringIO(data),
index_col=0,
header=None,
squeeze=squeeze,
)
if not squeeze:
expected = DataFrame(expected)
tm.assert_frame_equal(result, expected)
else:
tm.assert_series_equal(result, expected)
# see gh-8217
#
# Series should not be a view.
assert not result._is_view
@xfail_pyarrow
def test_unnamed_columns(all_parsers):
data = """A,B,C,,
1,2,3,4,5
6,7,8,9,10
11,12,13,14,15
"""
parser = all_parsers
expected = DataFrame(
[[1, 2, 3, 4, 5], [6, 7, 8, 9, 10], [11, 12, 13, 14, 15]],
dtype=np.int64,
columns=["A", "B", "C", "Unnamed: 3", "Unnamed: 4"],
)
result = parser.read_csv(StringIO(data))
tm.assert_frame_equal(result, expected)
def test_csv_mixed_type(all_parsers):
data = """A,B,C
a,1,2
b,3,4
c,4,5
"""
parser = all_parsers
expected = DataFrame({"A": ["a", "b", "c"], "B": [1, 3, 4], "C": [2, 4, 5]})
result = parser.read_csv(StringIO(data))
tm.assert_frame_equal(result, expected)
@xfail_pyarrow
def test_read_csv_low_memory_no_rows_with_index(all_parsers):
# see gh-21141
parser = all_parsers
if not parser.low_memory:
pytest.skip("This is a low-memory specific test")
data = """A,B,C
1,1,1,2
2,2,3,4
3,3,4,5
"""
result = parser.read_csv(StringIO(data), low_memory=True, index_col=0, nrows=0)
expected = DataFrame(columns=["A", "B", "C"])
tm.assert_frame_equal(result, expected)
def test_read_csv_dataframe(all_parsers, csv1):
parser = all_parsers
result = parser.read_csv(csv1, index_col=0, parse_dates=True)
expected = DataFrame(
[
[0.980269, 3.685731, -0.364216805298, -1.159738],
[1.047916, -0.041232, -0.16181208307, 0.212549],
[0.498581, 0.731168, -0.537677223318, 1.346270],
[1.120202, 1.567621, 0.00364077397681, 0.675253],
[-0.487094, 0.571455, -1.6116394093, 0.103469],
[0.836649, 0.246462, 0.588542635376, 1.062782],
[-0.157161, 1.340307, 1.1957779562, -1.097007],
],
columns=["A", "B", "C", "D"],
index=Index(
[
datetime(2000, 1, 3),
datetime(2000, 1, 4),
datetime(2000, 1, 5),
datetime(2000, 1, 6),
datetime(2000, 1, 7),
datetime(2000, 1, 10),
datetime(2000, 1, 11),
],
name="index",
),
)
tm.assert_frame_equal(result, expected)
@xfail_pyarrow
@pytest.mark.parametrize("nrows", [3, 3.0])
def test_read_nrows(all_parsers, nrows):
# see gh-10476
data = """index,A,B,C,D
foo,2,3,4,5
bar,7,8,9,10
baz,12,13,14,15
qux,12,13,14,15
foo2,12,13,14,15
bar2,12,13,14,15
"""
expected = DataFrame(
[["foo", 2, 3, 4, 5], ["bar", 7, 8, 9, 10], ["baz", 12, 13, 14, 15]],
columns=["index", "A", "B", "C", "D"],
)
parser = all_parsers
result = parser.read_csv(StringIO(data), nrows=nrows)
tm.assert_frame_equal(result, expected)
@xfail_pyarrow
@pytest.mark.parametrize("nrows", [1.2, "foo", -1])
def test_read_nrows_bad(all_parsers, nrows):
data = """index,A,B,C,D
foo,2,3,4,5
bar,7,8,9,10
baz,12,13,14,15
qux,12,13,14,15
foo2,12,13,14,15
bar2,12,13,14,15
"""
msg = r"'nrows' must be an integer >=0"
parser = all_parsers
with pytest.raises(ValueError, match=msg):
parser.read_csv(StringIO(data), nrows=nrows)
def test_nrows_skipfooter_errors(all_parsers):
msg = "'skipfooter' not supported with 'nrows'"
data = "a\n1\n2\n3\n4\n5\n6"
parser = all_parsers
with pytest.raises(ValueError, match=msg):
parser.read_csv(StringIO(data), skipfooter=1, nrows=5)
@xfail_pyarrow
def test_missing_trailing_delimiters(all_parsers):
parser = all_parsers
data = """A,B,C,D
1,2,3,4
1,3,3,
1,4,5"""
result = parser.read_csv(StringIO(data))
expected = DataFrame(
[[1, 2, 3, 4], [1, 3, 3, np.nan], [1, 4, 5, np.nan]],
columns=["A", "B", "C", "D"],
)
tm.assert_frame_equal(result, expected)
@xfail_pyarrow
def test_skip_initial_space(all_parsers):
data = (
'"09-Apr-2012", "01:10:18.300", 2456026.548822908, 12849, '
"1.00361, 1.12551, 330.65659, 0355626618.16711, 73.48821, "
"314.11625, 1917.09447, 179.71425, 80.000, 240.000, -350, "
"70.06056, 344.98370, 1, 1, -0.689265, -0.692787, "
"0.212036, 14.7674, 41.605, -9999.0, -9999.0, "
"-9999.0, -9999.0, -9999.0, -9999.0, 000, 012, 128"
)
parser = all_parsers
result = parser.read_csv(
StringIO(data),
names=list(range(33)),
header=None,
na_values=["-9999.0"],
skipinitialspace=True,
)
expected = DataFrame(
[
[
"09-Apr-2012",
"01:10:18.300",
2456026.548822908,
12849,
1.00361,
1.12551,
330.65659,
355626618.16711,
73.48821,
314.11625,
1917.09447,
179.71425,
80.0,
240.0,
-350,
70.06056,
344.9837,
1,
1,
-0.689265,
-0.692787,
0.212036,
14.7674,
41.605,
np.nan,
np.nan,
np.nan,
np.nan,
np.nan,
np.nan,
0,
12,
128,
]
]
)
tm.assert_frame_equal(result, expected)
@xfail_pyarrow
def test_trailing_delimiters(all_parsers):
# see gh-2442
data = """A,B,C
1,2,3,
4,5,6,
7,8,9,"""
parser = all_parsers
result = parser.read_csv(StringIO(data), index_col=False)
expected = DataFrame({"A": [1, 4, 7], "B": [2, 5, 8], "C": [3, 6, 9]})
tm.assert_frame_equal(result, expected)
def test_escapechar(all_parsers):
# https://stackoverflow.com/questions/13824840/feature-request-for-
# pandas-read-csv
data = '''SEARCH_TERM,ACTUAL_URL
"bra tv board","http://www.ikea.com/se/sv/catalog/categories/departments/living_room/10475/?se%7cps%7cnonbranded%7cvardagsrum%7cgoogle%7ctv_bord"
"tv p\xc3\xa5 hjul","http://www.ikea.com/se/sv/catalog/categories/departments/living_room/10475/?se%7cps%7cnonbranded%7cvardagsrum%7cgoogle%7ctv_bord"
"SLAGBORD, \\"Bergslagen\\", IKEA:s 1700-tals series","http://www.ikea.com/se/sv/catalog/categories/departments/living_room/10475/?se%7cps%7cnonbranded%7cvardagsrum%7cgoogle%7ctv_bord"''' # noqa:E501
parser = all_parsers
result = parser.read_csv(
StringIO(data), escapechar="\\", quotechar='"', encoding="utf-8"
)
assert result["SEARCH_TERM"][2] == 'SLAGBORD, "Bergslagen", IKEA:s 1700-tals series'
tm.assert_index_equal(result.columns, Index(["SEARCH_TERM", "ACTUAL_URL"]))
@xfail_pyarrow
def test_ignore_leading_whitespace(all_parsers):
# see gh-3374, gh-6607
parser = all_parsers
data = " a b c\n 1 2 3\n 4 5 6\n 7 8 9"
result = parser.read_csv(StringIO(data), sep=r"\s+")
expected = DataFrame({"a": [1, 4, 7], "b": [2, 5, 8], "c": [3, 6, 9]})
tm.assert_frame_equal(result, expected)
@xfail_pyarrow
@pytest.mark.parametrize("usecols", [None, [0, 1], ["a", "b"]])
def test_uneven_lines_with_usecols(all_parsers, usecols):
# see gh-12203
parser = all_parsers
data = r"""a,b,c
0,1,2
3,4,5,6,7
8,9,10"""
if usecols is None:
# Make sure that an error is still raised
# when the "usecols" parameter is not provided.
msg = r"Expected \d+ fields in line \d+, saw \d+"
with pytest.raises(ParserError, match=msg):
parser.read_csv(StringIO(data))
else:
expected = DataFrame({"a": [0, 3, 8], "b": [1, 4, 9]})
result = parser.read_csv(StringIO(data), usecols=usecols)
tm.assert_frame_equal(result, expected)
@xfail_pyarrow
@pytest.mark.parametrize(
"data,kwargs,expected",
[
# First, check to see that the response of parser when faced with no
# provided columns raises the correct error, with or without usecols.
("", {}, None),
("", {"usecols": ["X"]}, None),
(
",,",
{"names": ["Dummy", "X", "Dummy_2"], "usecols": ["X"]},
DataFrame(columns=["X"], index=[0], dtype=np.float64),
),
(
"",
{"names": ["Dummy", "X", "Dummy_2"], "usecols": ["X"]},
DataFrame(columns=["X"]),
),
],
)
def test_read_empty_with_usecols(all_parsers, data, kwargs, expected):
# see gh-12493
parser = all_parsers
if expected is None:
msg = "No columns to parse from file"
with pytest.raises(EmptyDataError, match=msg):
parser.read_csv(StringIO(data), **kwargs)
else:
result = parser.read_csv(StringIO(data), **kwargs)
tm.assert_frame_equal(result, expected)
@xfail_pyarrow
@pytest.mark.parametrize(
"kwargs,expected",
[
# gh-8661, gh-8679: this should ignore six lines, including
# lines with trailing whitespace and blank lines.
(
{
"header": None,
"delim_whitespace": True,
"skiprows": [0, 1, 2, 3, 5, 6],
"skip_blank_lines": True,
},
DataFrame([[1.0, 2.0, 4.0], [5.1, np.nan, 10.0]]),
),
# gh-8983: test skipping set of rows after a row with trailing spaces.
(
{
"delim_whitespace": True,
"skiprows": [1, 2, 3, 5, 6],
"skip_blank_lines": True,
},
DataFrame({"A": [1.0, 5.1], "B": [2.0, np.nan], "C": [4.0, 10]}),
),
],
)
def test_trailing_spaces(all_parsers, kwargs, expected):
data = "A B C \nrandom line with trailing spaces \nskip\n1,2,3\n1,2.,4.\nrandom line with trailing tabs\t\t\t\n \n5.1,NaN,10.0\n" # noqa:E501
parser = all_parsers
result = parser.read_csv(StringIO(data.replace(",", " ")), **kwargs)
tm.assert_frame_equal(result, expected)
def test_raise_on_sep_with_delim_whitespace(all_parsers):
# see gh-6607
data = "a b c\n1 2 3"
parser = all_parsers
with pytest.raises(ValueError, match="you can only specify one"):
parser.read_csv(StringIO(data), sep=r"\s", delim_whitespace=True)
def test_read_filepath_or_buffer(all_parsers):
# see gh-43366
parser = all_parsers
with pytest.raises(TypeError, match="Expected file path name or file-like"):
parser.read_csv(filepath_or_buffer=b"input")
@xfail_pyarrow
@pytest.mark.parametrize("delim_whitespace", [True, False])
def test_single_char_leading_whitespace(all_parsers, delim_whitespace):
# see gh-9710
parser = all_parsers
data = """\
MyColumn
a
b
a
b\n"""
expected = DataFrame({"MyColumn": list("abab")})
result = parser.read_csv(
StringIO(data), skipinitialspace=True, delim_whitespace=delim_whitespace
)
tm.assert_frame_equal(result, expected)
# Skip for now, actually only one test fails though, but its tricky to xfail
@skip_pyarrow
@pytest.mark.parametrize(
"sep,skip_blank_lines,exp_data",
[
(",", True, [[1.0, 2.0, 4.0], [5.0, np.nan, 10.0], [-70.0, 0.4, 1.0]]),
(r"\s+", True, [[1.0, 2.0, 4.0], [5.0, np.nan, 10.0], [-70.0, 0.4, 1.0]]),
(
",",
False,
[
[1.0, 2.0, 4.0],
[np.nan, np.nan, np.nan],
[np.nan, np.nan, np.nan],
[5.0, np.nan, 10.0],
[np.nan, np.nan, np.nan],
[-70.0, 0.4, 1.0],
],
),
],
)
def test_empty_lines(all_parsers, sep, skip_blank_lines, exp_data):
parser = all_parsers
data = """\
A,B,C
1,2.,4.
5.,NaN,10.0
-70,.4,1
"""
if sep == r"\s+":
data = data.replace(",", " ")
result = parser.read_csv(StringIO(data), sep=sep, skip_blank_lines=skip_blank_lines)
expected = DataFrame(exp_data, columns=["A", "B", "C"])
tm.assert_frame_equal(result, expected)
@xfail_pyarrow
def test_whitespace_lines(all_parsers):
parser = all_parsers
data = """
\t \t\t
\t
A,B,C
\t 1,2.,4.
5.,NaN,10.0
"""
expected = DataFrame([[1, 2.0, 4.0], [5.0, np.nan, 10.0]], columns=["A", "B", "C"])
result = parser.read_csv(StringIO(data))
tm.assert_frame_equal(result, expected)
@xfail_pyarrow
@pytest.mark.parametrize(
"data,expected",
[
(
""" A B C D
a 1 2 3 4
b 1 2 3 4
c 1 2 3 4
""",
DataFrame(
[[1, 2, 3, 4], [1, 2, 3, 4], [1, 2, 3, 4]],
columns=["A", "B", "C", "D"],
index=["a", "b", "c"],
),
),
(
" a b c\n1 2 3 \n4 5 6\n 7 8 9",
DataFrame([[1, 2, 3], [4, 5, 6], [7, 8, 9]], columns=["a", "b", "c"]),
),
],
)
def test_whitespace_regex_separator(all_parsers, data, expected):
# see gh-6607
parser = all_parsers
result = parser.read_csv(StringIO(data), sep=r"\s+")
tm.assert_frame_equal(result, expected)
def test_sub_character(all_parsers, csv_dir_path):
# see gh-16893
filename = os.path.join(csv_dir_path, "sub_char.csv")
expected = DataFrame([[1, 2, 3]], columns=["a", "\x1ab", "c"])
parser = all_parsers
result = parser.read_csv(filename)
tm.assert_frame_equal(result, expected)
@pytest.mark.parametrize("filename", ["sé-es-vé.csv", "ru-sй.csv", "中文文件名.csv"])
def test_filename_with_special_chars(all_parsers, filename):
# see gh-15086.
parser = all_parsers
df = DataFrame({"a": [1, 2, 3]})
with tm.ensure_clean(filename) as path:
df.to_csv(path, index=False)
result = parser.read_csv(path)
tm.assert_frame_equal(result, df)
def test_read_table_same_signature_as_read_csv(all_parsers):
# GH-34976
parser = all_parsers
table_sign = signature(parser.read_table)
csv_sign = signature(parser.read_csv)
assert table_sign.parameters.keys() == csv_sign.parameters.keys()
assert table_sign.return_annotation == csv_sign.return_annotation
for key, csv_param in csv_sign.parameters.items():
table_param = table_sign.parameters[key]
if key == "sep":
assert csv_param.default == ","
assert table_param.default == "\t"
assert table_param.annotation == csv_param.annotation
assert table_param.kind == csv_param.kind
continue
else:
assert table_param == csv_param
def test_read_table_equivalency_to_read_csv(all_parsers):
# see gh-21948
# As of 0.25.0, read_table is undeprecated
parser = all_parsers
data = "a\tb\n1\t2\n3\t4"
expected = parser.read_csv(StringIO(data), sep="\t")
result = parser.read_table(StringIO(data))
tm.assert_frame_equal(result, expected)
@pytest.mark.skipif(
PY310,
reason="GH41935 This test is leaking only on Python 3.10,"
"causing other tests to fail with a cryptic error.",
)
@pytest.mark.parametrize("read_func", ["read_csv", "read_table"])
def test_read_csv_and_table_sys_setprofile(all_parsers, read_func):
# GH#41069
parser = all_parsers
data = "a b\n0 1"
sys.setprofile(lambda *a, **k: None)
result = getattr(parser, read_func)(StringIO(data))
sys.setprofile(None)
expected = DataFrame({"a b": ["0 1"]})
tm.assert_frame_equal(result, expected)
@xfail_pyarrow
def test_first_row_bom(all_parsers):
# see gh-26545
parser = all_parsers
data = '''\ufeff"Head1"\t"Head2"\t"Head3"'''
result = parser.read_csv(StringIO(data), delimiter="\t")
expected = DataFrame(columns=["Head1", "Head2", "Head3"])
tm.assert_frame_equal(result, expected)
@xfail_pyarrow
def test_first_row_bom_unquoted(all_parsers):
# see gh-36343
parser = all_parsers
data = """\ufeffHead1\tHead2\tHead3"""
result = parser.read_csv(StringIO(data), delimiter="\t")
expected = DataFrame(columns=["Head1", "Head2", "Head3"])
tm.assert_frame_equal(result, expected)
@xfail_pyarrow
@pytest.mark.parametrize("nrows", range(1, 6))
def test_blank_lines_between_header_and_data_rows(all_parsers, nrows):
# GH 28071
ref = DataFrame(
[[np.nan, np.nan], [np.nan, np.nan], [1, 2], [np.nan, np.nan], [3, 4]],
columns=list("ab"),
)
csv = "\nheader\n\na,b\n\n\n1,2\n\n3,4"
parser = all_parsers
df = parser.read_csv(StringIO(csv), header=3, nrows=nrows, skip_blank_lines=False)
tm.assert_frame_equal(df, ref[:nrows])
@xfail_pyarrow
def test_no_header_two_extra_columns(all_parsers):
# GH 26218
column_names = ["one", "two", "three"]
ref = DataFrame([["foo", "bar", "baz"]], columns=column_names)
stream = StringIO("foo,bar,baz,bam,blah")
parser = all_parsers
with tm.assert_produces_warning(ParserWarning):
df = parser.read_csv(stream, header=None, names=column_names, index_col=False)
tm.assert_frame_equal(df, ref)
def test_read_csv_names_not_accepting_sets(all_parsers):
# GH 34946
data = """\
1,2,3
4,5,6\n"""
parser = all_parsers
with pytest.raises(ValueError, match="Names should be an ordered collection."):
parser.read_csv(StringIO(data), names=set("QAZ"))
@xfail_pyarrow
def test_read_table_delim_whitespace_default_sep(all_parsers):
# GH: 35958
f = StringIO("a b c\n1 -2 -3\n4 5 6")
parser = all_parsers
result = parser.read_table(f, delim_whitespace=True)
expected = DataFrame({"a": [1, 4], "b": [-2, 5], "c": [-3, 6]})
tm.assert_frame_equal(result, expected)
@pytest.mark.parametrize("delimiter", [",", "\t"])
def test_read_csv_delim_whitespace_non_default_sep(all_parsers, delimiter):
# GH: 35958
f = StringIO("a b c\n1 -2 -3\n4 5 6")
parser = all_parsers
msg = (
"Specified a delimiter with both sep and "
"delim_whitespace=True; you can only specify one."
)
with pytest.raises(ValueError, match=msg):
parser.read_csv(f, delim_whitespace=True, sep=delimiter)
with pytest.raises(ValueError, match=msg):
parser.read_csv(f, delim_whitespace=True, delimiter=delimiter)
def test_read_csv_delimiter_and_sep_no_default(all_parsers):
# GH#39823
f = StringIO("a,b\n1,2")
parser = all_parsers
msg = "Specified a sep and a delimiter; you can only specify one."
with pytest.raises(ValueError, match=msg):
parser.read_csv(f, sep=" ", delimiter=".")
@pytest.mark.parametrize("kwargs", [{"delimiter": "\n"}, {"sep": "\n"}])
def test_read_csv_line_break_as_separator(kwargs, all_parsers):
# GH#43528
parser = all_parsers
data = """a,b,c
1,2,3
"""
msg = (
r"Specified \\n as separator or delimiter. This forces the python engine "
r"which does not accept a line terminator. Hence it is not allowed to use "
r"the line terminator as separator."
)
with pytest.raises(ValueError, match=msg):
parser.read_csv(StringIO(data), **kwargs)
def test_read_csv_posargs_deprecation(all_parsers):
# GH 41485
f = StringIO("a,b\n1,2")
parser = all_parsers
msg = (
"In a future version of pandas all arguments of read_csv "
"except for the argument 'filepath_or_buffer' will be keyword-only"
)
with tm.assert_produces_warning(FutureWarning, match=msg):
parser.read_csv(f, " ")
@pytest.mark.parametrize("delimiter", [",", "\t"])
def test_read_table_delim_whitespace_non_default_sep(all_parsers, delimiter):
# GH: 35958
f = StringIO("a b c\n1 -2 -3\n4 5 6")
parser = all_parsers
msg = (
"Specified a delimiter with both sep and "
"delim_whitespace=True; you can only specify one."
)
with pytest.raises(ValueError, match=msg):
parser.read_table(f, delim_whitespace=True, sep=delimiter)
with pytest.raises(ValueError, match=msg):
parser.read_table(f, delim_whitespace=True, delimiter=delimiter)
@pytest.mark.parametrize("func", ["read_csv", "read_table"])
def test_names_and_prefix_not_None_raises(all_parsers, func):
# GH#39123
f = StringIO("a,b\n1,2")
parser = all_parsers
msg = "Specified named and prefix; you can only specify one."
with pytest.raises(ValueError, match=msg):
with tm.assert_produces_warning(FutureWarning):
getattr(parser, func)(f, names=["a", "b"], prefix="x")
@pytest.mark.parametrize("func", ["read_csv", "read_table"])
@pytest.mark.parametrize("prefix, names", [(None, ["x0", "x1"]), ("x", None)])
def test_names_and_prefix_explicit_None(all_parsers, names, prefix, func):
# GH42387
f = StringIO("a,b\n1,2")
expected = | DataFrame({"x0": ["a", "1"], "x1": ["b", "2"]}) | pandas.DataFrame |
# Licensed to Modin Development Team under one or more contributor license agreements.
# See the NOTICE file distributed with this work for additional information regarding
# copyright ownership. The Modin Development Team licenses this file to you under the
# Apache License, Version 2.0 (the "License"); you may not use this file except in
# compliance with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software distributed under
# the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF
# ANY KIND, either express or implied. See the License for the specific language
# governing permissions and limitations under the License.
import pytest
import modin.pandas as pd
from modin.utils import try_cast_to_pandas
import pandas
import datetime
import numpy as np
from pandas.api.types import is_datetime64_any_dtype
import pyarrow as pa
from modin.pandas.test.utils import (
df_equals,
io_ops_bad_exc,
eval_io as general_eval_io,
)
from modin.experimental.core.execution.native.implementations.omnisci_on_native.omnisci_worker import (
OmnisciServer,
)
def eval_io(
fn_name,
comparator=df_equals,
cast_to_str=False,
check_exception_type=True,
raising_exceptions=io_ops_bad_exc,
check_kwargs_callable=True,
modin_warning=None,
md_extra_kwargs=None,
*args,
**kwargs,
):
"""
Evaluate I/O operation and do equality check after importing Modin's data to OmniSci.
Notes
-----
For parameters description please refer to ``modin.pandas.test.utils.eval_io``.
"""
def omnisci_comparator(df1, df2):
"""Evaluate equality comparison of the passed frames after importing the Modin's one to OmniSci."""
with ForceOmnisciImport(df1, df2):
# Aligning DateTime dtypes because of the bug related to the `parse_dates` parameter:
# https://github.com/modin-project/modin/issues/3485
df1, df2 = align_datetime_dtypes(df1, df2)
comparator(df1, df2)
general_eval_io(
fn_name,
comparator=omnisci_comparator,
cast_to_str=cast_to_str,
check_exception_type=check_exception_type,
raising_exceptions=raising_exceptions,
check_kwargs_callable=check_kwargs_callable,
modin_warning=modin_warning,
md_extra_kwargs=md_extra_kwargs,
*args,
**kwargs,
)
def align_datetime_dtypes(*dfs):
"""
Make all of the passed frames have DateTime dtype for the same columns.
Cast column type of the certain frame to the DateTime type if any frame in
the `dfs` sequence has DateTime type for this column.
Parameters
----------
*dfs : iterable of DataFrames
DataFrames to align DateTime dtypes.
Notes
-----
Passed Modin frames may be casted to pandas in the result.
"""
datetime_cols = {}
time_cols = set()
for df in dfs:
for col, dtype in df.dtypes.items():
# If we already decided to cast this column to DateTime no more actions are needed
if col not in datetime_cols and is_datetime64_any_dtype(dtype):
datetime_cols[col] = dtype
# datetime.time is considered to be an 'object' dtype in pandas that's why
# we have to explicitly check the values type in the column
elif (
dtype == np.dtype("O")
and col not in time_cols
# OmniSci has difficulties with empty frames, so explicitly skip them
# https://github.com/modin-project/modin/issues/3428
and len(df) > 0
and all(
isinstance(val, datetime.time) or pandas.isna(val)
for val in df[col]
)
):
time_cols.add(col)
if len(datetime_cols) == 0 and len(time_cols) == 0:
return dfs
def convert_to_time(value):
"""Convert passed value to `datetime.time`."""
if isinstance(value, datetime.time):
return value
elif isinstance(value, str):
return datetime.time.fromisoformat(value)
else:
return datetime.time(value)
time_cols_list = list(time_cols)
casted_dfs = []
for df in dfs:
# OmniSci has difficulties with casting to certain dtypes (i.e. datetime64),
# so casting it to pandas
pandas_df = try_cast_to_pandas(df)
if datetime_cols:
pandas_df = pandas_df.astype(datetime_cols)
if time_cols:
pandas_df[time_cols_list] = pandas_df[time_cols_list].applymap(
convert_to_time
)
casted_dfs.append(pandas_df)
return casted_dfs
class ForceOmnisciImport:
"""
Trigger import execution for Modin DataFrames obtained by OmniSci engine if already not.
When using as a context class also cleans up imported tables at the end of the context.
Parameters
----------
*dfs : iterable
DataFrames to trigger import.
"""
def __init__(self, *dfs):
self._imported_frames = []
for df in dfs:
if not isinstance(df, (pd.DataFrame, pd.Series)):
continue
df.shape # to trigger real execution
if df.empty:
continue
partition = df._query_compiler._modin_frame._partitions[0][0]
if partition.frame_id is not None:
continue
frame = partition.get()
if isinstance(frame, (pandas.DataFrame, pandas.Series)):
frame_id = OmnisciServer().put_pandas_to_omnisci(frame)
elif isinstance(frame, pa.Table):
frame_id = OmnisciServer().put_arrow_to_omnisci(frame)
else:
raise TypeError(
f"Unexpected storage format, expected pandas.DataFrame or pyarrow.Table, got: {type(frame)}."
)
partition.frame_id = frame_id
self._imported_frames.append((df, frame_id))
def __enter__(self):
return self
def __exit__(self, exc_type, exc_val, exc_tb):
for df, frame_id in self._imported_frames:
actual_frame_id = df._query_compiler._modin_frame._partitions[0][0].frame_id
OmnisciServer().executeDDL(f"DROP TABLE IF EXISTS {frame_id}")
if actual_frame_id == frame_id:
df._query_compiler._modin_frame._partitions[0][0].frame_id = None
self._imported_frames = []
def set_execution_mode(frame, mode, recursive=False):
"""
Enable execution mode assertions for the passed frame.
Enabled execution mode checks mean, that the frame raises an AssertionError
if the execution flow is out of the scope of the selected mode.
Parameters
----------
frame : DataFrame or Series
Modin frame to set execution mode at.
mode : {None, "lazy", "arrow"}
Execution mode to set:
- "lazy": only delayed computations.
- "arrow": only computations via Pyarrow.
- None: allow any type of computations.
recursive : bool, default: False
Whether to set the specified execution mode for every frame
in the delayed computation tree.
"""
if isinstance(frame, (pd.Series, pd.DataFrame)):
frame = frame._query_compiler._modin_frame
frame._force_execution_mode = mode
if recursive and hasattr(frame._op, "input"):
for child in frame._op.input:
set_execution_mode(child, mode, True)
def run_and_compare(
fn,
data,
data2=None,
force_lazy=True,
force_arrow_execute=False,
allow_subqueries=False,
comparator=df_equals,
**kwargs,
):
"""Verify equality of the results of the passed function executed against pandas and modin frame."""
def run_modin(
fn,
data,
data2,
force_lazy,
force_arrow_execute,
allow_subqueries,
constructor_kwargs,
**kwargs,
):
kwargs["df1"] = pd.DataFrame(data, **constructor_kwargs)
kwargs["df2"] = pd.DataFrame(data2, **constructor_kwargs)
kwargs["df"] = kwargs["df1"]
if force_lazy:
set_execution_mode(kwargs["df1"], "lazy")
set_execution_mode(kwargs["df2"], "lazy")
elif force_arrow_execute:
set_execution_mode(kwargs["df1"], "arrow")
set_execution_mode(kwargs["df2"], "arrow")
exp_res = fn(lib=pd, **kwargs)
if force_arrow_execute:
set_execution_mode(exp_res, "arrow", allow_subqueries)
elif force_lazy:
set_execution_mode(exp_res, None, allow_subqueries)
return exp_res
constructor_kwargs = kwargs.pop("constructor_kwargs", {})
try:
kwargs["df1"] = pandas.DataFrame(data, **constructor_kwargs)
kwargs["df2"] = | pandas.DataFrame(data2, **constructor_kwargs) | pandas.DataFrame |
import argparse
import json
import os
from os import listdir
from os.path import isfile
import shutil
from genson import SchemaBuilder
from enum import Enum
import copy
import flatdict
import pandas as pd
import numpy as np
from collections import OrderedDict
from functools import reduce # forward compatibility for Python 3
import operator
import sys
from echr.utils.folders import make_build_folder
from echr.utils.cli import TAB
from echr.utils.logger import getlogger
from rich.markdown import Markdown
from rich.console import Console
log = getlogger()
__console = Console(record=True)
DELIMITER = '.'
type_priority = OrderedDict([
('number', float),
('integer', int),
('string', str)
])
class COL_HINT(str, Enum):
HOT_ONE = 'hot_one'
POSITIONAL = 'positional'
def format_structured_json(cases_list):
res = []
representents = {}
extractedapp = {}
scl = {}
decision_body = {}
for name in cases_list:
with open(name, 'r') as f:
c = json.load(f)
c['representedby'] = [r for r in c['representedby'] if r != 'N/A']
representents[c['appno']] = {'representedby': c['representedby']}
extractedapp[c['appno']] = {'appnos': c['extractedappno']}
decision_body[c['appno']] = {
'name': [e['name'] for e in c['decision_body']],
'role': {e['name']: e['role'] for e in c['decision_body'] if 'role' in e}
}
scl[c['appno']] = {'scl': c['scl']}
c['respondent'] = c['respondent'].split(';') #
c['applicability'] = c['applicability'].strip().split(';')
c['appno'] = c['appno'].split(';')[0]
c['decisiondate'] = c['decisiondate'].split(' ')[0]
c['judgementdate'] = c['judgementdate'].split(' ')[0]
c['introductiondate'] = c['introductiondate'].split(' ')[0]
c['kpdate'] = c['kpdate'].split(' ')[0]
c['separateopinion'] = True if c['separateopinion'] == 'TRUE' else False
del c['representedby']
del c['extractedappno']
del c['decision_body']
del c['scl']
del c['documents']
del c['content']
del c['externalsources']
del c['kpthesaurus']
del c['__conclusion']
del c['__articles']
if not len(c['issue']):
del c['issue']
else:
c['issue'] = sorted(c['issue'])
if not len(c['applicability']):
del c['applicability']
res.append(c)
return res, representents, extractedapp, scl, decision_body
def get_by_path(root, items):
return reduce(operator.getitem, items, root)
def set_by_path(root, items, value):
get_by_path(root, items[:-1])[items[-1]] = value
def determine_schema(X):
builder = SchemaBuilder()
for x in X:
builder.add_object(x)
schema = builder
return schema
def get_flat_type_mapping(flat_schema):
flat_type_mapping = {}
for k in flat_schema.keys():
if k.endswith(DELIMITER + 'type'):
key = k.replace('properties' + DELIMITER, '').replace(DELIMITER + 'type', '')
flat_type_mapping[key] = flat_schema[k]
return flat_type_mapping
def get_flat_domain_mapping(X, flat_type_mapping):
flat_domain_mapping = {}
for x in X:
flat = flatdict.FlatterDict(x, delimiter='.')
for k in flat_type_mapping.keys():
v = flat.get(k)
if v is not None:
if k not in flat_domain_mapping:
flat_domain_mapping[k] = set()
type_ = flat_type_mapping[k]
try:
if type_ == 'array':
flat_domain_mapping[k].update(get_by_path(x, k.split('.')))
else:
flat_domain_mapping[k].add(get_by_path(x, k.split('.')))
except:
if not flat_domain_mapping[k]:
del flat_domain_mapping[k]
for k in flat_domain_mapping:
flat_domain_mapping[k] = list(flat_domain_mapping[k])
return flat_domain_mapping
def flatten_dataset(X, flat_type_mapping, schema_hints=None):
if schema_hints is None:
schema_hints = {}
flat_X = []
for x in X:
flat = flatdict.FlatterDict(x, delimiter=DELIMITER)
c_x = copy.deepcopy(x)
for k in flat_type_mapping.keys():
col_type = schema_hints.get(k, {}).get('col_type')
if col_type not in [None, COL_HINT.POSITIONAL]:
continue
v = flat.get(k)
if v is not None:
sort = schema_hints.get(k, {}).get('sort', False)
if sort:
type_ = flat_type_mapping[k]
if type_ == 'array':
item_types = flat_type_mapping.get(k + '.items')
a = get_by_path(c_x, k.split('.'))
if isinstance(item_types, list):
try:
a = sorted(a)
except:
print('# Warning: mix-type array with types: {}'.format(', '.join(item_types)))
print('# Warning; no comparison operator provided. Try to assess the proper cast...')
for t in type_priority:
try:
a = list(map(type_priority[t], a))
print('# Casting \'{}\' to {}'.format(k, t))
break
except:
log.error('Could not cast \'{}\' to {}'.format(k, t))
else:
print('# Error: Could not find any way to sort {}'.format(k))
raise Exception('Could not find any way to sort {}'.format(k))
set_by_path(c_x, k.split('.'), sorted(a))
flat = flatdict.FlatterDict(c_x, delimiter=DELIMITER)
flat_X.append(flat)
return flat_X
def hot_one_encoder_on_list(df, column):
v = [x if isinstance(x, list) else [] for x in df[column].values]
l = [len(x) for x in v]
f, u = pd.factorize(np.concatenate(v))
n, m = len(v), u.size
i = np.arange(n).repeat(l)
dummies = pd.DataFrame(
np.bincount(i * m + f, minlength=n * m).reshape(n, m),
df.index, map(lambda x: str(column) + '=' + str(x), u)
)
return df.drop(column, 1).join(dummies)
def normalize(X, schema_hints=None):
if schema_hints is None:
schema_hints = {}
def hot_one_encoder(df, columns):
return pd.get_dummies(df, prefix_sep="=", columns=columns)
schema = determine_schema(X)
flat_schema = flatdict.FlatDict(schema.to_schema(), delimiter=DELIMITER)
flat_type_mapping = get_flat_type_mapping(flat_schema)
flat_domain_mapping = get_flat_domain_mapping(X, flat_type_mapping)
flat_X = flatten_dataset(X, flat_type_mapping, schema_hints)
columns_to_encode = [k for k, v in schema_hints.items() if v['col_type'] == COL_HINT.HOT_ONE]
df = pd.DataFrame(flat_X)
for c in df.columns:
f = next((k for k in columns_to_encode if c.startswith(k)), None)
if f:
df = df.drop(c, 1)
encoded = []
for c in columns_to_encode:
type_ = flat_type_mapping[c]
if type_ == 'array':
if c == 'conclusion':
articles = set()
for x in X:
for e in x[c]:
if 'article' in e:
articles.add(e['article'])
articles = sorted(articles)
df2 = []
for x in X:
e = []
xart = {v['article']: v['type'] for v in x['conclusion'] if 'article' in v}
for a in articles:
v = 0
if a in xart:
if xart[a] == 'violation':
v = 1
else:
v = -1
e.append(v)
df2.append(e)
df2 = pd.DataFrame(df2, columns=list(map(lambda x: 'ccl_article={}'.format(x), articles)))
encoded.append(df2)
else:
df2 = | pd.DataFrame(X) | pandas.DataFrame |
# coding=utf-8
# pylint: disable-msg=E1101,W0612
from datetime import datetime, timedelta
import operator
from itertools import product, starmap
from numpy import nan, inf
import numpy as np
import pandas as pd
from pandas import (Index, Series, DataFrame, isnull, bdate_range,
NaT, date_range, timedelta_range,
_np_version_under1p8)
from pandas.tseries.index import Timestamp
from pandas.tseries.tdi import Timedelta
import pandas.core.nanops as nanops
from pandas.compat import range, zip
from pandas import compat
from pandas.util.testing import assert_series_equal, assert_almost_equal
import pandas.util.testing as tm
from .common import TestData
class TestSeriesOperators(TestData, tm.TestCase):
_multiprocess_can_split_ = True
def test_comparisons(self):
left = np.random.randn(10)
right = np.random.randn(10)
left[:3] = np.nan
result = nanops.nangt(left, right)
with np.errstate(invalid='ignore'):
expected = (left > right).astype('O')
expected[:3] = np.nan
assert_almost_equal(result, expected)
s = Series(['a', 'b', 'c'])
s2 = Series([False, True, False])
# it works!
exp = Series([False, False, False])
tm.assert_series_equal(s == s2, exp)
tm.assert_series_equal(s2 == s, exp)
def test_op_method(self):
def check(series, other, check_reverse=False):
simple_ops = ['add', 'sub', 'mul', 'floordiv', 'truediv', 'pow']
if not compat.PY3:
simple_ops.append('div')
for opname in simple_ops:
op = getattr(Series, opname)
if op == 'div':
alt = operator.truediv
else:
alt = getattr(operator, opname)
result = op(series, other)
expected = alt(series, other)
tm.assert_almost_equal(result, expected)
if check_reverse:
rop = getattr(Series, "r" + opname)
result = rop(series, other)
expected = alt(other, series)
tm.assert_almost_equal(result, expected)
check(self.ts, self.ts * 2)
check(self.ts, self.ts[::2])
check(self.ts, 5, check_reverse=True)
check(tm.makeFloatSeries(), tm.makeFloatSeries(), check_reverse=True)
def test_neg(self):
assert_series_equal(-self.series, -1 * self.series)
def test_invert(self):
assert_series_equal(-(self.series < 0), ~(self.series < 0))
def test_div(self):
with np.errstate(all='ignore'):
# no longer do integer div for any ops, but deal with the 0's
p = DataFrame({'first': [3, 4, 5, 8], 'second': [0, 0, 0, 3]})
result = p['first'] / p['second']
expected = Series(
p['first'].values.astype(float) / p['second'].values,
dtype='float64')
expected.iloc[0:3] = np.inf
assert_series_equal(result, expected)
result = p['first'] / 0
expected = Series(np.inf, index=p.index, name='first')
assert_series_equal(result, expected)
p = p.astype('float64')
result = p['first'] / p['second']
expected = Series(p['first'].values / p['second'].values)
assert_series_equal(result, expected)
p = DataFrame({'first': [3, 4, 5, 8], 'second': [1, 1, 1, 1]})
result = p['first'] / p['second']
assert_series_equal(result, p['first'].astype('float64'),
check_names=False)
self.assertTrue(result.name is None)
self.assertFalse(np.array_equal(result, p['second'] / p['first']))
# inf signing
s = Series([np.nan, 1., -1.])
result = s / 0
expected = Series([np.nan, np.inf, -np.inf])
assert_series_equal(result, expected)
# float/integer issue
# GH 7785
p = DataFrame({'first': (1, 0), 'second': (-0.01, -0.02)})
expected = Series([-0.01, -np.inf])
result = p['second'].div(p['first'])
assert_series_equal(result, expected, check_names=False)
result = p['second'] / p['first']
assert_series_equal(result, expected)
# GH 9144
s = Series([-1, 0, 1])
result = 0 / s
expected = Series([0.0, nan, 0.0])
assert_series_equal(result, expected)
result = s / 0
expected = Series([-inf, nan, inf])
assert_series_equal(result, expected)
result = s // 0
expected = Series([-inf, nan, inf])
assert_series_equal(result, expected)
def test_operators(self):
def _check_op(series, other, op, pos_only=False,
check_dtype=True):
left = np.abs(series) if pos_only else series
right = np.abs(other) if pos_only else other
cython_or_numpy = op(left, right)
python = left.combine(right, op)
tm.assert_series_equal(cython_or_numpy, python,
check_dtype=check_dtype)
def check(series, other):
simple_ops = ['add', 'sub', 'mul', 'truediv', 'floordiv', 'mod']
for opname in simple_ops:
_check_op(series, other, getattr(operator, opname))
_check_op(series, other, operator.pow, pos_only=True)
_check_op(series, other, lambda x, y: operator.add(y, x))
_check_op(series, other, lambda x, y: operator.sub(y, x))
_check_op(series, other, lambda x, y: operator.truediv(y, x))
_check_op(series, other, lambda x, y: operator.floordiv(y, x))
_check_op(series, other, lambda x, y: operator.mul(y, x))
_check_op(series, other, lambda x, y: operator.pow(y, x),
pos_only=True)
_check_op(series, other, lambda x, y: operator.mod(y, x))
check(self.ts, self.ts * 2)
check(self.ts, self.ts * 0)
check(self.ts, self.ts[::2])
check(self.ts, 5)
def check_comparators(series, other, check_dtype=True):
_check_op(series, other, operator.gt, check_dtype=check_dtype)
_check_op(series, other, operator.ge, check_dtype=check_dtype)
_check_op(series, other, operator.eq, check_dtype=check_dtype)
_check_op(series, other, operator.lt, check_dtype=check_dtype)
_check_op(series, other, operator.le, check_dtype=check_dtype)
check_comparators(self.ts, 5)
check_comparators(self.ts, self.ts + 1, check_dtype=False)
def test_operators_empty_int_corner(self):
s1 = Series([], [], dtype=np.int32)
s2 = Series({'x': 0.})
tm.assert_series_equal(s1 * s2, Series([np.nan], index=['x']))
def test_operators_timedelta64(self):
# invalid ops
self.assertRaises(Exception, self.objSeries.__add__, 1)
self.assertRaises(Exception, self.objSeries.__add__,
np.array(1, dtype=np.int64))
self.assertRaises(Exception, self.objSeries.__sub__, 1)
self.assertRaises(Exception, self.objSeries.__sub__,
np.array(1, dtype=np.int64))
# seriese ops
v1 = date_range('2012-1-1', periods=3, freq='D')
v2 = date_range('2012-1-2', periods=3, freq='D')
rs = Series(v2) - Series(v1)
xp = Series(1e9 * 3600 * 24,
rs.index).astype('int64').astype('timedelta64[ns]')
assert_series_equal(rs, xp)
self.assertEqual(rs.dtype, 'timedelta64[ns]')
df = DataFrame(dict(A=v1))
td = Series([timedelta(days=i) for i in range(3)])
self.assertEqual(td.dtype, 'timedelta64[ns]')
# series on the rhs
result = df['A'] - df['A'].shift()
self.assertEqual(result.dtype, 'timedelta64[ns]')
result = df['A'] + td
self.assertEqual(result.dtype, 'M8[ns]')
# scalar Timestamp on rhs
maxa = df['A'].max()
tm.assertIsInstance(maxa, Timestamp)
resultb = df['A'] - df['A'].max()
self.assertEqual(resultb.dtype, 'timedelta64[ns]')
# timestamp on lhs
result = resultb + df['A']
values = [Timestamp('20111230'), Timestamp('20120101'),
Timestamp('20120103')]
expected = Series(values, name='A')
assert_series_equal(result, expected)
# datetimes on rhs
result = df['A'] - datetime(2001, 1, 1)
expected = Series(
[timedelta(days=4017 + i) for i in range(3)], name='A')
assert_series_equal(result, expected)
self.assertEqual(result.dtype, 'm8[ns]')
d = datetime(2001, 1, 1, 3, 4)
resulta = df['A'] - d
self.assertEqual(resulta.dtype, 'm8[ns]')
# roundtrip
resultb = resulta + d
assert_series_equal(df['A'], resultb)
# timedeltas on rhs
td = timedelta(days=1)
resulta = df['A'] + td
resultb = resulta - td
assert_series_equal(resultb, df['A'])
self.assertEqual(resultb.dtype, 'M8[ns]')
# roundtrip
td = timedelta(minutes=5, seconds=3)
resulta = df['A'] + td
resultb = resulta - td
assert_series_equal(df['A'], resultb)
self.assertEqual(resultb.dtype, 'M8[ns]')
# inplace
value = rs[2] + np.timedelta64(timedelta(minutes=5, seconds=1))
rs[2] += np.timedelta64(timedelta(minutes=5, seconds=1))
self.assertEqual(rs[2], value)
def test_operator_series_comparison_zerorank(self):
# GH 13006
result = np.float64(0) > pd.Series([1, 2, 3])
expected = 0.0 > pd.Series([1, 2, 3])
self.assert_series_equal(result, expected)
result = pd.Series([1, 2, 3]) < np.float64(0)
expected = pd.Series([1, 2, 3]) < 0.0
self.assert_series_equal(result, expected)
result = np.array([0, 1, 2])[0] > pd.Series([0, 1, 2])
expected = 0.0 > pd.Series([1, 2, 3])
self.assert_series_equal(result, expected)
def test_timedeltas_with_DateOffset(self):
# GH 4532
# operate with pd.offsets
s = Series([Timestamp('20130101 9:01'), Timestamp('20130101 9:02')])
result = s + pd.offsets.Second(5)
result2 = pd.offsets.Second(5) + s
expected = Series([Timestamp('20130101 9:01:05'), Timestamp(
'20130101 9:02:05')])
assert_series_equal(result, expected)
assert_series_equal(result2, expected)
result = s - pd.offsets.Second(5)
result2 = -pd.offsets.Second(5) + s
expected = Series([Timestamp('20130101 9:00:55'), Timestamp(
'20130101 9:01:55')])
assert_series_equal(result, expected)
assert_series_equal(result2, expected)
result = s + pd.offsets.Milli(5)
result2 = pd.offsets.Milli(5) + s
expected = Series([Timestamp('20130101 9:01:00.005'), Timestamp(
'20130101 9:02:00.005')])
assert_series_equal(result, expected)
assert_series_equal(result2, expected)
result = s + pd.offsets.Minute(5) + pd.offsets.Milli(5)
expected = Series([Timestamp('20130101 9:06:00.005'), Timestamp(
'20130101 9:07:00.005')])
assert_series_equal(result, expected)
# operate with np.timedelta64 correctly
result = s + np.timedelta64(1, 's')
result2 = np.timedelta64(1, 's') + s
expected = Series([Timestamp('20130101 9:01:01'), Timestamp(
'20130101 9:02:01')])
assert_series_equal(result, expected)
assert_series_equal(result2, expected)
result = s + np.timedelta64(5, 'ms')
result2 = np.timedelta64(5, 'ms') + s
expected = Series([Timestamp('20130101 9:01:00.005'), Timestamp(
'20130101 9:02:00.005')])
assert_series_equal(result, expected)
assert_series_equal(result2, expected)
# valid DateOffsets
for do in ['Hour', 'Minute', 'Second', 'Day', 'Micro', 'Milli',
'Nano']:
op = getattr(pd.offsets, do)
s + op(5)
op(5) + s
def test_timedelta_series_ops(self):
# GH11925
s = Series(timedelta_range('1 day', periods=3))
ts = Timestamp('2012-01-01')
expected = Series(date_range('2012-01-02', periods=3))
assert_series_equal(ts + s, expected)
assert_series_equal(s + ts, expected)
expected2 = Series(date_range('2011-12-31', periods=3, freq='-1D'))
assert_series_equal(ts - s, expected2)
assert_series_equal(ts + (-s), expected2)
def test_timedelta64_operations_with_DateOffset(self):
# GH 10699
td = Series([timedelta(minutes=5, seconds=3)] * 3)
result = td + pd.offsets.Minute(1)
expected = Series([timedelta(minutes=6, seconds=3)] * 3)
assert_series_equal(result, expected)
result = td - pd.offsets.Minute(1)
expected = Series([timedelta(minutes=4, seconds=3)] * 3)
assert_series_equal(result, expected)
result = td + Series([pd.offsets.Minute(1), pd.offsets.Second(3),
pd.offsets.Hour(2)])
expected = Series([timedelta(minutes=6, seconds=3), timedelta(
minutes=5, seconds=6), timedelta(hours=2, minutes=5, seconds=3)])
assert_series_equal(result, expected)
result = td + pd.offsets.Minute(1) + pd.offsets.Second(12)
expected = Series([timedelta(minutes=6, seconds=15)] * 3)
assert_series_equal(result, expected)
# valid DateOffsets
for do in ['Hour', 'Minute', 'Second', 'Day', 'Micro', 'Milli',
'Nano']:
op = getattr(pd.offsets, do)
td + op(5)
op(5) + td
td - op(5)
op(5) - td
def test_timedelta64_operations_with_timedeltas(self):
# td operate with td
td1 = Series([timedelta(minutes=5, seconds=3)] * 3)
td2 = timedelta(minutes=5, seconds=4)
result = td1 - td2
expected = Series([timedelta(seconds=0)] * 3) - Series([timedelta(
seconds=1)] * 3)
self.assertEqual(result.dtype, 'm8[ns]')
assert_series_equal(result, expected)
result2 = td2 - td1
expected = (Series([timedelta(seconds=1)] * 3) - Series([timedelta(
seconds=0)] * 3))
assert_series_equal(result2, expected)
# roundtrip
assert_series_equal(result + td2, td1)
# Now again, using pd.to_timedelta, which should build
# a Series or a scalar, depending on input.
td1 = Series(pd.to_timedelta(['00:05:03'] * 3))
td2 = pd.to_timedelta('00:05:04')
result = td1 - td2
expected = Series([timedelta(seconds=0)] * 3) - Series([timedelta(
seconds=1)] * 3)
self.assertEqual(result.dtype, 'm8[ns]')
assert_series_equal(result, expected)
result2 = td2 - td1
expected = (Series([timedelta(seconds=1)] * 3) - Series([timedelta(
seconds=0)] * 3))
assert_series_equal(result2, expected)
# roundtrip
assert_series_equal(result + td2, td1)
def test_timedelta64_operations_with_integers(self):
# GH 4521
# divide/multiply by integers
startdate = Series(date_range('2013-01-01', '2013-01-03'))
enddate = Series(date_range('2013-03-01', '2013-03-03'))
s1 = enddate - startdate
s1[2] = np.nan
s2 = Series([2, 3, 4])
expected = Series(s1.values.astype(np.int64) / s2, dtype='m8[ns]')
expected[2] = np.nan
result = s1 / s2
assert_series_equal(result, expected)
s2 = Series([20, 30, 40])
expected = Series(s1.values.astype(np.int64) / s2, dtype='m8[ns]')
expected[2] = np.nan
result = s1 / s2
assert_series_equal(result, expected)
result = s1 / 2
expected = Series(s1.values.astype(np.int64) / 2, dtype='m8[ns]')
expected[2] = np.nan
assert_series_equal(result, expected)
s2 = Series([20, 30, 40])
expected = Series(s1.values.astype(np.int64) * s2, dtype='m8[ns]')
expected[2] = np.nan
result = s1 * s2
assert_series_equal(result, expected)
for dtype in ['int32', 'int16', 'uint32', 'uint64', 'uint32', 'uint16',
'uint8']:
s2 = Series([20, 30, 40], dtype=dtype)
expected = Series(
s1.values.astype(np.int64) * s2.astype(np.int64),
dtype='m8[ns]')
expected[2] = np.nan
result = s1 * s2
assert_series_equal(result, expected)
result = s1 * 2
expected = Series(s1.values.astype(np.int64) * 2, dtype='m8[ns]')
expected[2] = np.nan
assert_series_equal(result, expected)
result = s1 * -1
expected = Series(s1.values.astype(np.int64) * -1, dtype='m8[ns]')
expected[2] = np.nan
assert_series_equal(result, expected)
# invalid ops
assert_series_equal(s1 / s2.astype(float),
Series([Timedelta('2 days 22:48:00'), Timedelta(
'1 days 23:12:00'), Timedelta('NaT')]))
assert_series_equal(s1 / 2.0,
Series([Timedelta('29 days 12:00:00'), Timedelta(
'29 days 12:00:00'), Timedelta('NaT')]))
for op in ['__add__', '__sub__']:
sop = getattr(s1, op, None)
if sop is not None:
self.assertRaises(TypeError, sop, 1)
self.assertRaises(TypeError, sop, s2.values)
def test_timedelta64_conversions(self):
startdate = Series(date_range('2013-01-01', '2013-01-03'))
enddate = Series(date_range('2013-03-01', '2013-03-03'))
s1 = enddate - startdate
s1[2] = np.nan
for m in [1, 3, 10]:
for unit in ['D', 'h', 'm', 's', 'ms', 'us', 'ns']:
# op
expected = s1.apply(lambda x: x / np.timedelta64(m, unit))
result = s1 / np.timedelta64(m, unit)
assert_series_equal(result, expected)
if m == 1 and unit != 'ns':
# astype
result = s1.astype("timedelta64[{0}]".format(unit))
assert_series_equal(result, expected)
# reverse op
expected = s1.apply(
lambda x: Timedelta(np.timedelta64(m, unit)) / x)
result = np.timedelta64(m, unit) / s1
# astype
s = Series(date_range('20130101', periods=3))
result = s.astype(object)
self.assertIsInstance(result.iloc[0], datetime)
self.assertTrue(result.dtype == np.object_)
result = s1.astype(object)
self.assertIsInstance(result.iloc[0], timedelta)
self.assertTrue(result.dtype == np.object_)
def test_timedelta64_equal_timedelta_supported_ops(self):
ser = Series([Timestamp('20130301'), Timestamp('20130228 23:00:00'),
Timestamp('20130228 22:00:00'), Timestamp(
'20130228 21:00:00')])
intervals = 'D', 'h', 'm', 's', 'us'
# TODO: unused
# npy16_mappings = {'D': 24 * 60 * 60 * 1000000,
# 'h': 60 * 60 * 1000000,
# 'm': 60 * 1000000,
# 's': 1000000,
# 'us': 1}
def timedelta64(*args):
return sum(starmap(np.timedelta64, zip(args, intervals)))
for op, d, h, m, s, us in product([operator.add, operator.sub],
*([range(2)] * 5)):
nptd = timedelta64(d, h, m, s, us)
pytd = timedelta(days=d, hours=h, minutes=m, seconds=s,
microseconds=us)
lhs = op(ser, nptd)
rhs = op(ser, pytd)
try:
assert_series_equal(lhs, rhs)
except:
raise AssertionError(
"invalid comparsion [op->{0},d->{1},h->{2},m->{3},"
"s->{4},us->{5}]\n{6}\n{7}\n".format(op, d, h, m, s,
us, lhs, rhs))
def test_operators_datetimelike(self):
def run_ops(ops, get_ser, test_ser):
# check that we are getting a TypeError
# with 'operate' (from core/ops.py) for the ops that are not
# defined
for op_str in ops:
op = getattr(get_ser, op_str, None)
with tm.assertRaisesRegexp(TypeError, 'operate'):
op(test_ser)
# ## timedelta64 ###
td1 = Series([timedelta(minutes=5, seconds=3)] * 3)
td1.iloc[2] = np.nan
td2 = timedelta(minutes=5, seconds=4)
ops = ['__mul__', '__floordiv__', '__pow__', '__rmul__',
'__rfloordiv__', '__rpow__']
run_ops(ops, td1, td2)
td1 + td2
td2 + td1
td1 - td2
td2 - td1
td1 / td2
td2 / td1
# ## datetime64 ###
dt1 = Series([Timestamp('20111230'), Timestamp('20120101'),
Timestamp('20120103')])
dt1.iloc[2] = np.nan
dt2 = Series([Timestamp('20111231'), Timestamp('20120102'),
Timestamp('20120104')])
ops = ['__add__', '__mul__', '__floordiv__', '__truediv__', '__div__',
'__pow__', '__radd__', '__rmul__', '__rfloordiv__',
'__rtruediv__', '__rdiv__', '__rpow__']
run_ops(ops, dt1, dt2)
dt1 - dt2
dt2 - dt1
# ## datetime64 with timetimedelta ###
ops = ['__mul__', '__floordiv__', '__truediv__', '__div__', '__pow__',
'__rmul__', '__rfloordiv__', '__rtruediv__', '__rdiv__',
'__rpow__']
run_ops(ops, dt1, td1)
dt1 + td1
td1 + dt1
dt1 - td1
# TODO: Decide if this ought to work.
# td1 - dt1
# ## timetimedelta with datetime64 ###
ops = ['__sub__', '__mul__', '__floordiv__', '__truediv__', '__div__',
'__pow__', '__rmul__', '__rfloordiv__', '__rtruediv__',
'__rdiv__', '__rpow__']
run_ops(ops, td1, dt1)
td1 + dt1
dt1 + td1
# 8260, 10763
# datetime64 with tz
ops = ['__mul__', '__floordiv__', '__truediv__', '__div__', '__pow__',
'__rmul__', '__rfloordiv__', '__rtruediv__', '__rdiv__',
'__rpow__']
tz = 'US/Eastern'
dt1 = Series(date_range('2000-01-01 09:00:00', periods=5,
tz=tz), name='foo')
dt2 = dt1.copy()
dt2.iloc[2] = np.nan
td1 = Series(timedelta_range('1 days 1 min', periods=5, freq='H'))
td2 = td1.copy()
td2.iloc[1] = np.nan
run_ops(ops, dt1, td1)
result = dt1 + td1[0]
exp = (dt1.dt.tz_localize(None) + td1[0]).dt.tz_localize(tz)
assert_series_equal(result, exp)
result = dt2 + td2[0]
exp = (dt2.dt.tz_localize(None) + td2[0]).dt.tz_localize(tz)
assert_series_equal(result, exp)
# odd numpy behavior with scalar timedeltas
if not _np_version_under1p8:
result = td1[0] + dt1
exp = (dt1.dt.tz_localize(None) + td1[0]).dt.tz_localize(tz)
assert_series_equal(result, exp)
result = td2[0] + dt2
exp = (dt2.dt.tz_localize(None) + td2[0]).dt.tz_localize(tz)
assert_series_equal(result, exp)
result = dt1 - td1[0]
exp = (dt1.dt.tz_localize(None) - td1[0]).dt.tz_localize(tz)
assert_series_equal(result, exp)
self.assertRaises(TypeError, lambda: td1[0] - dt1)
result = dt2 - td2[0]
exp = (dt2.dt.tz_localize(None) - td2[0]).dt.tz_localize(tz)
assert_series_equal(result, exp)
self.assertRaises(TypeError, lambda: td2[0] - dt2)
result = dt1 + td1
exp = (dt1.dt.tz_localize(None) + td1).dt.tz_localize(tz)
assert_series_equal(result, exp)
result = dt2 + td2
exp = (dt2.dt.tz_localize(None) + td2).dt.tz_localize(tz)
assert_series_equal(result, exp)
result = dt1 - td1
exp = (dt1.dt.tz_localize(None) - td1).dt.tz_localize(tz)
assert_series_equal(result, exp)
result = dt2 - td2
exp = (dt2.dt.tz_localize(None) - td2).dt.tz_localize(tz)
assert_series_equal(result, exp)
self.assertRaises(TypeError, lambda: td1 - dt1)
self.assertRaises(TypeError, lambda: td2 - dt2)
def test_sub_single_tz(self):
# GH12290
s1 = Series([pd.Timestamp('2016-02-10', tz='America/Sao_Paulo')])
s2 = Series([pd.Timestamp('2016-02-08', tz='America/Sao_Paulo')])
result = s1 - s2
expected = Series([Timedelta('2days')])
assert_series_equal(result, expected)
result = s2 - s1
expected = Series([Timedelta('-2days')])
assert_series_equal(result, expected)
def test_ops_nat(self):
# GH 11349
timedelta_series = Series([NaT, Timedelta('1s')])
datetime_series = Series([NaT, Timestamp('19900315')])
nat_series_dtype_timedelta = Series(
[NaT, NaT], dtype='timedelta64[ns]')
nat_series_dtype_timestamp = Series([NaT, NaT], dtype='datetime64[ns]')
single_nat_dtype_datetime = Series([NaT], dtype='datetime64[ns]')
single_nat_dtype_timedelta = Series([NaT], dtype='timedelta64[ns]')
# subtraction
assert_series_equal(timedelta_series - NaT, nat_series_dtype_timedelta)
assert_series_equal(-NaT + timedelta_series,
nat_series_dtype_timedelta)
assert_series_equal(timedelta_series - single_nat_dtype_timedelta,
nat_series_dtype_timedelta)
assert_series_equal(-single_nat_dtype_timedelta + timedelta_series,
nat_series_dtype_timedelta)
assert_series_equal(datetime_series - NaT, nat_series_dtype_timestamp)
assert_series_equal(-NaT + datetime_series, nat_series_dtype_timestamp)
assert_series_equal(datetime_series - single_nat_dtype_datetime,
nat_series_dtype_timedelta)
with tm.assertRaises(TypeError):
-single_nat_dtype_datetime + datetime_series
assert_series_equal(datetime_series - single_nat_dtype_timedelta,
nat_series_dtype_timestamp)
assert_series_equal(-single_nat_dtype_timedelta + datetime_series,
nat_series_dtype_timestamp)
# without a Series wrapping the NaT, it is ambiguous
# whether it is a datetime64 or timedelta64
# defaults to interpreting it as timedelta64
assert_series_equal(nat_series_dtype_timestamp - NaT,
nat_series_dtype_timestamp)
assert_series_equal(-NaT + nat_series_dtype_timestamp,
nat_series_dtype_timestamp)
assert_series_equal(nat_series_dtype_timestamp -
single_nat_dtype_datetime,
nat_series_dtype_timedelta)
with tm.assertRaises(TypeError):
-single_nat_dtype_datetime + nat_series_dtype_timestamp
assert_series_equal(nat_series_dtype_timestamp -
single_nat_dtype_timedelta,
nat_series_dtype_timestamp)
assert_series_equal(-single_nat_dtype_timedelta +
nat_series_dtype_timestamp,
nat_series_dtype_timestamp)
with tm.assertRaises(TypeError):
timedelta_series - single_nat_dtype_datetime
# addition
assert_series_equal(nat_series_dtype_timestamp + NaT,
nat_series_dtype_timestamp)
assert_series_equal(NaT + nat_series_dtype_timestamp,
nat_series_dtype_timestamp)
assert_series_equal(nat_series_dtype_timestamp +
single_nat_dtype_timedelta,
nat_series_dtype_timestamp)
assert_series_equal(single_nat_dtype_timedelta +
nat_series_dtype_timestamp,
nat_series_dtype_timestamp)
assert_series_equal(nat_series_dtype_timedelta + NaT,
nat_series_dtype_timedelta)
assert_series_equal(NaT + nat_series_dtype_timedelta,
nat_series_dtype_timedelta)
assert_series_equal(nat_series_dtype_timedelta +
single_nat_dtype_timedelta,
nat_series_dtype_timedelta)
assert_series_equal(single_nat_dtype_timedelta +
nat_series_dtype_timedelta,
nat_series_dtype_timedelta)
assert_series_equal(timedelta_series + NaT, nat_series_dtype_timedelta)
assert_series_equal(NaT + timedelta_series, nat_series_dtype_timedelta)
assert_series_equal(timedelta_series + single_nat_dtype_timedelta,
nat_series_dtype_timedelta)
assert_series_equal(single_nat_dtype_timedelta + timedelta_series,
nat_series_dtype_timedelta)
assert_series_equal(nat_series_dtype_timestamp + NaT,
nat_series_dtype_timestamp)
assert_series_equal(NaT + nat_series_dtype_timestamp,
nat_series_dtype_timestamp)
assert_series_equal(nat_series_dtype_timestamp +
single_nat_dtype_timedelta,
nat_series_dtype_timestamp)
assert_series_equal(single_nat_dtype_timedelta +
nat_series_dtype_timestamp,
nat_series_dtype_timestamp)
assert_series_equal(nat_series_dtype_timedelta + NaT,
nat_series_dtype_timedelta)
assert_series_equal(NaT + nat_series_dtype_timedelta,
nat_series_dtype_timedelta)
assert_series_equal(nat_series_dtype_timedelta +
single_nat_dtype_timedelta,
nat_series_dtype_timedelta)
assert_series_equal(single_nat_dtype_timedelta +
nat_series_dtype_timedelta,
nat_series_dtype_timedelta)
assert_series_equal(nat_series_dtype_timedelta +
single_nat_dtype_datetime,
nat_series_dtype_timestamp)
assert_series_equal(single_nat_dtype_datetime +
nat_series_dtype_timedelta,
nat_series_dtype_timestamp)
# multiplication
assert_series_equal(nat_series_dtype_timedelta * 1.0,
nat_series_dtype_timedelta)
assert_series_equal(1.0 * nat_series_dtype_timedelta,
nat_series_dtype_timedelta)
assert_series_equal(timedelta_series * 1, timedelta_series)
assert_series_equal(1 * timedelta_series, timedelta_series)
assert_series_equal(timedelta_series * 1.5,
Series([NaT, Timedelta('1.5s')]))
assert_series_equal(1.5 * timedelta_series,
Series([NaT, Timedelta('1.5s')]))
assert_series_equal(timedelta_series * nan, nat_series_dtype_timedelta)
assert_series_equal(nan * timedelta_series, nat_series_dtype_timedelta)
with tm.assertRaises(TypeError):
datetime_series * 1
with tm.assertRaises(TypeError):
nat_series_dtype_timestamp * 1
with tm.assertRaises(TypeError):
datetime_series * 1.0
with tm.assertRaises(TypeError):
nat_series_dtype_timestamp * 1.0
# division
assert_series_equal(timedelta_series / 2,
Series([NaT, Timedelta('0.5s')]))
assert_series_equal(timedelta_series / 2.0,
Series([NaT, Timedelta('0.5s')]))
assert_series_equal(timedelta_series / nan, nat_series_dtype_timedelta)
with tm.assertRaises(TypeError):
nat_series_dtype_timestamp / 1.0
with tm.assertRaises(TypeError):
nat_series_dtype_timestamp / 1
def test_ops_datetimelike_align(self):
# GH 7500
# datetimelike ops need to align
dt = Series(date_range('2012-1-1', periods=3, freq='D'))
dt.iloc[2] = np.nan
dt2 = dt[::-1]
expected = Series([timedelta(0), timedelta(0), pd.NaT])
# name is reset
result = dt2 - dt
assert_series_equal(result, expected)
expected = Series(expected, name=0)
result = (dt2.to_frame() - dt.to_frame())[0]
assert_series_equal(result, expected)
def test_object_comparisons(self):
s = Series(['a', 'b', np.nan, 'c', 'a'])
result = s == 'a'
expected = Series([True, False, False, False, True])
assert_series_equal(result, expected)
result = s < 'a'
expected = Series([False, False, False, False, False])
assert_series_equal(result, expected)
result = s != 'a'
expected = -(s == 'a')
assert_series_equal(result, expected)
def test_comparison_tuples(self):
# GH11339
# comparisons vs tuple
s = Series([(1, 1), (1, 2)])
result = s == (1, 2)
expected = Series([False, True])
assert_series_equal(result, expected)
result = s != (1, 2)
expected = Series([True, False])
assert_series_equal(result, expected)
result = s == (0, 0)
expected = Series([False, False])
assert_series_equal(result, expected)
result = s != (0, 0)
expected = Series([True, True])
assert_series_equal(result, expected)
s = Series([(1, 1), (1, 1)])
result = s == (1, 1)
expected = Series([True, True])
assert_series_equal(result, expected)
result = s != (1, 1)
expected = Series([False, False])
assert_series_equal(result, expected)
s = Series([frozenset([1]), frozenset([1, 2])])
result = s == frozenset([1])
expected = Series([True, False])
assert_series_equal(result, expected)
def test_comparison_operators_with_nas(self):
s = Series(bdate_range('1/1/2000', periods=10), dtype=object)
s[::2] = np.nan
# test that comparisons work
ops = ['lt', 'le', 'gt', 'ge', 'eq', 'ne']
for op in ops:
val = s[5]
f = getattr(operator, op)
result = f(s, val)
expected = f(s.dropna(), val).reindex(s.index)
if op == 'ne':
expected = expected.fillna(True).astype(bool)
else:
expected = expected.fillna(False).astype(bool)
assert_series_equal(result, expected)
# fffffffuuuuuuuuuuuu
# result = f(val, s)
# expected = f(val, s.dropna()).reindex(s.index)
# assert_series_equal(result, expected)
# boolean &, |, ^ should work with object arrays and propagate NAs
ops = ['and_', 'or_', 'xor']
mask = s.isnull()
for bool_op in ops:
f = getattr(operator, bool_op)
filled = s.fillna(s[0])
result = f(s < s[9], s > s[3])
expected = f(filled < filled[9], filled > filled[3])
expected[mask] = False
assert_series_equal(result, expected)
def test_comparison_object_numeric_nas(self):
s = Series(np.random.randn(10), dtype=object)
shifted = s.shift(2)
ops = ['lt', 'le', 'gt', 'ge', 'eq', 'ne']
for op in ops:
f = getattr(operator, op)
result = f(s, shifted)
expected = f(s.astype(float), shifted.astype(float))
assert_series_equal(result, expected)
def test_comparison_invalid(self):
# GH4968
# invalid date/int comparisons
s = Series(range(5))
s2 = Series(date_range('20010101', periods=5))
for (x, y) in [(s, s2), (s2, s)]:
self.assertRaises(TypeError, lambda: x == y)
self.assertRaises(TypeError, lambda: x != y)
self.assertRaises(TypeError, lambda: x >= y)
self.assertRaises(TypeError, lambda: x > y)
self.assertRaises(TypeError, lambda: x < y)
self.assertRaises(TypeError, lambda: x <= y)
def test_more_na_comparisons(self):
for dtype in [None, object]:
left = Series(['a', np.nan, 'c'], dtype=dtype)
right = Series(['a', np.nan, 'd'], dtype=dtype)
result = left == right
expected = Series([True, False, False])
assert_series_equal(result, expected)
result = left != right
expected = Series([False, True, True])
assert_series_equal(result, expected)
result = left == np.nan
expected = Series([False, False, False])
assert_series_equal(result, expected)
result = left != np.nan
expected = Series([True, True, True])
assert_series_equal(result, expected)
def test_nat_comparisons(self):
data = [([pd.Timestamp('2011-01-01'), pd.NaT,
pd.Timestamp('2011-01-03')],
[pd.NaT, pd.NaT, pd.Timestamp('2011-01-03')]),
([pd.Timedelta('1 days'), pd.NaT,
pd.Timedelta('3 days')],
[pd.NaT, pd.NaT, pd.Timedelta('3 days')]),
([pd.Period('2011-01', freq='M'), pd.NaT,
pd.Period('2011-03', freq='M')],
[pd.NaT, pd.NaT, pd.Period('2011-03', freq='M')])]
# add lhs / rhs switched data
data = data + [(r, l) for l, r in data]
for l, r in data:
for dtype in [None, object]:
left = Series(l, dtype=dtype)
# Series, Index
for right in [Series(r, dtype=dtype), Index(r, dtype=dtype)]:
expected = Series([False, False, True])
assert_series_equal(left == right, expected)
expected = Series([True, True, False])
assert_series_equal(left != right, expected)
expected = Series([False, False, False])
assert_series_equal(left < right, expected)
expected = Series([False, False, False])
assert_series_equal(left > right, expected)
expected = Series([False, False, True])
assert_series_equal(left >= right, expected)
expected = Series([False, False, True])
assert_series_equal(left <= right, expected)
def test_nat_comparisons_scalar(self):
data = [[pd.Timestamp('2011-01-01'), pd.NaT,
pd.Timestamp('2011-01-03')],
[pd.Timedelta('1 days'), pd.NaT, pd.Timedelta('3 days')],
[pd.Period('2011-01', freq='M'), pd.NaT,
pd.Period('2011-03', freq='M')]]
for l in data:
for dtype in [None, object]:
left = Series(l, dtype=dtype)
expected = Series([False, False, False])
assert_series_equal(left == pd.NaT, expected)
assert_series_equal(pd.NaT == left, expected)
expected = Series([True, True, True])
assert_series_equal(left != pd.NaT, expected)
assert_series_equal(pd.NaT != left, expected)
expected = Series([False, False, False])
assert_series_equal(left < pd.NaT, expected)
assert_series_equal(pd.NaT > left, expected)
assert_series_equal(left <= pd.NaT, expected)
assert_series_equal(pd.NaT >= left, expected)
assert_series_equal(left > pd.NaT, expected)
assert_series_equal(pd.NaT < left, expected)
assert_series_equal(left >= pd.NaT, expected)
assert_series_equal(pd.NaT <= left, expected)
def test_comparison_different_length(self):
a = Series(['a', 'b', 'c'])
b = Series(['b', 'a'])
self.assertRaises(ValueError, a.__lt__, b)
a = Series([1, 2])
b = Series([2, 3, 4])
self.assertRaises(ValueError, a.__eq__, b)
def test_comparison_label_based(self):
# GH 4947
# comparisons should be label based
a = Series([True, False, True], list('bca'))
b = Series([False, True, False], list('abc'))
expected = Series([False, True, False], list('abc'))
result = a & b
assert_series_equal(result, expected)
expected = Series([True, True, False], list('abc'))
result = a | b
assert_series_equal(result, expected)
expected = Series([True, False, False], list('abc'))
result = a ^ b
assert_series_equal(result, expected)
# rhs is bigger
a = Series([True, False, True], list('bca'))
b = Series([False, True, False, True], list('abcd'))
expected = Series([False, True, False, False], list('abcd'))
result = a & b
assert_series_equal(result, expected)
expected = Series([True, True, False, False], list('abcd'))
result = a | b
assert_series_equal(result, expected)
# filling
# vs empty
result = a & | Series([]) | pandas.Series |
import logging
from pathlib import Path
import re
import scipy.stats as ss
import matplotlib.pyplot as plt
import numpy as np
import pandas as pd
import statsmodels.stats.multitest as multitest
import sklearn.metrics
from intermine.webservice import Service
import biclust_comp.utils as utils
def plot_sample_enrichment_impc(X_file, max_factors=None, max_traits=None):
sample_info = read_sample_info_IMPC("data/real/IMPC/sample_info.txt")
X = utils.read_matrix_tsv(X_file)
trait_dummies = pd.get_dummies(sample_info[['tissue', 'genotype']])
return plot_enrichment(trait_dummies, X, max_factors, max_traits)
def plot_pathway_enrichment(B_file, gene_ensembl_ids_file,
full_pathways_file="analysis/IMPC/full_pathways.tsv",
max_factors=None, max_pathways=None):
with open(gene_ensembl_ids_file) as f:
gene_ensembl_ids = [line.strip() for line in f.readlines()]
B = pd.read_csv(B_file, sep="\t")
full_pathways_df = pd.read_csv(full_pathways_file, sep="\t")
pathways_df = construct_pathways_df(gene_ensembl_ids, full_pathways_df)
return plot_enrichment(pathways_df, B, max_factors, max_pathways)
def construct_ko_pathways_df():
sample_info = read_sample_info_IMPC("data/real/IMPC/sample_info.txt")
service = Service("http://www.mousemine.org/mousemine/service")
knocked_out_genes = []
for genotype in sample_info.genotype.unique():
match = re.match(r"(.*) knockout", genotype)
if match:
knocked_out_genes.append(match[1])
ko_genes_pathways = {}
pathway_names_dict = {}
for knocked_out_gene in knocked_out_genes:
query = service.new_query("ProteinCodingGene")
query.add_view("pathways.identifier", "pathways.name", "symbol")
query.add_constraint("symbol", "=", knocked_out_gene)
pathways = [f"{row['pathways.name']}_-_{row['pathways.identifier']}" for row in query.rows()]
ko_genes_pathways[knocked_out_gene] = pathways
for row in query.rows():
pathway_names_dict[row["pathways.identifier"]] = row["pathways.name"]
ko_genes_pathways_df = utils.transform_dict_to_count_df(ko_genes_pathways)
return ko_genes_pathways_df, pathway_names_dict
def construct_full_pathways_df(pathways):
service = Service("http://www.mousemine.org/mousemine/service")
pathways_dict = {}
for pathway in pathways:
query = service.new_query("Pathway")
query.add_view(
"genes.primaryIdentifier", "genes.symbol", "genes.name",
"genes.sequenceOntologyTerm.name", "genes.chromosome.primaryIdentifier"
)
query.add_constraint("identifier", "=", pathway)
pathways_dict[pathway] = [row["genes.primaryIdentifier"]
for row in query.rows()]
pathways_df = utils.transform_dict_to_count_df(pathways_dict).T
return pathways_df
def construct_pathways_df(gene_ensembl_ids, full_pathways_df,
ensembl_to_mgi_file="analysis/mart_export.txt"):
ensembl_to_mgi = pd.read_csv(ensembl_to_mgi_file,
sep="\t",
index_col=0)
pathways_df = pd.DataFrame(index=gene_ensembl_ids,
columns=full_pathways_df.columns,
dtype=int,
data=0)
for ensembl_id in gene_ensembl_ids:
unversioned_id = ensembl_id.split('.')[0]
try:
mgi_id = ensembl_to_mgi.loc[unversioned_id, 'MGI ID']
if isinstance(mgi_id, str) and mgi_id.startswith('MGI'):
pass
else:
raise KeyError
except KeyError as e:
print(f"Unable to translate ID {ensembl_id}")
try:
pathways_df.loc[ensembl_id, :] = full_pathways_df.loc[mgi_id, :]
except KeyError as e:
print(f"MGI ID not found in pathways matrix {mgi_id}")
return pathways_df
def plot_enrichment(trait_df, factor_df, max_factors, max_traits):
f1_scores, intersections, _fisher_pvals = calculate_trait_enrichment(factor_df, trait_df)
if max_factors:
num_factors = min(factor_df.shape[1], max_factors)
else:
num_factors = factor_df.shape[1]
if max_traits:
num_traits = min(trait_df.shape[1], max_traits)
else:
num_traits = trait_df.shape[1]
# Sort the columns and rows by maximum f1 score, so that the factors with
# best enrichment will be left-most in the chart, and traits with best
# enrichment will be highest in the chart
ordered_columns = sorted(list(f1_scores.columns),
key=lambda k: f1_scores.iloc[:, k].max(),
reverse=True)
ordered_rows = sorted(list(f1_scores.index),
key=lambda row: f1_scores.loc[row, :].max(),
reverse=True)
intersections.loc['total', :] = (factor_df != 0).sum()
f1_scores.loc['total', :] = 0
ordered_rows.insert(0, 'total')
ordered_intersections = intersections.loc[ordered_rows, ordered_columns]
ordered_intersections.insert(0, 'total', trait_df.sum())
ordered_f1_scores = f1_scores.loc[ordered_rows, ordered_columns]
ordered_f1_scores.insert(0, 'total', 0)
fig, ax = plt.subplots(figsize=(num_factors * 0.7 + 3,
num_traits * 0.7))
# Colour each square by the F1 score
plt.imshow(ordered_f1_scores.iloc[:num_traits + 1, :num_factors + 1],
aspect='auto',
cmap='Blues')
# Sort out axis labels
ax.set_yticks(np.arange(num_traits + 1))
ax.set_xticks(np.arange(num_factors + 1))
ax.set_yticklabels(ordered_f1_scores.index)
ax.set_xticklabels(ordered_f1_scores.columns)
# Add text that notes the number of samples in intersection of trait and factor
threshold_black = 0.5
for j in range(num_factors + 1):
for i in range(num_traits + 1):
value = ordered_intersections.iloc[i, j]
opacity = ordered_f1_scores.iloc[i, j]
if opacity < threshold_black and value != 0:
color="black"
else:
color="white"
text = ax.text(j, i, value,
ha="center", va="center", color=color)
plt.axvline(x=0.5, color='black')
plt.axhline(y=0.5, color='black')
plt.colorbar()
fig.tight_layout()
plt.show()
return ordered_f1_scores, ordered_intersections
def calculate_trait_enrichment(factor_df, trait_df):
f1_scores = pd.DataFrame(index=trait_df.columns,
columns=factor_df.columns,
dtype=float)
fisher_pvals = pd.DataFrame(index=trait_df.columns,
columns=factor_df.columns,
dtype=float)
odds_ratios = pd.DataFrame(index=trait_df.columns,
columns=factor_df.columns,
dtype=float)
intersections = pd.DataFrame(index=trait_df.columns,
columns=factor_df.columns,
dtype=int)
for trait_name, trait_column in trait_df.items():
for factor_index, factor_column in factor_df.items():
total_from_trait = trait_column.sum()
total_population = len(trait_column)
factor_size = (factor_column != 0).sum()
trait_non_zero = np.where(trait_column)[0]
intersection_size = ((factor_column.iloc[trait_non_zero]) != 0).sum()
trait_size = trait_column.sum()
intersections.loc[trait_name, factor_index] = intersection_size
f1_scores.loc[trait_name, factor_index] = sklearn.metrics.f1_score(trait_column,
factor_column != 0)
# sf is the 'survival' function i.e. 1 - cdf
# So we are finding the probability that the intersection size is at least
# equal to the intersection size we have observed, under the assumption that this
# has Hypergeometric distribution with M=total_population, n=trait_size and N=factor_size
# where M is 'total number of objects in the bin', N is 'number of objects we pick'
# n is 'total number of objects which are successes' and
# m is 'number of objects we pick which are successes'
fisher_pvals.loc[trait_name, factor_index] = ss.hypergeom.sf(intersection_size - 1,
total_population,
trait_size,
factor_size)
odds_in_factor = intersection_size / (factor_size - intersection_size)
notfactor_nottrait = total_population - trait_size - factor_size + intersection_size
odds_out_of_factor = (trait_size - intersection_size) / notfactor_nottrait
odds_ratios.loc[trait_name, factor_index] = odds_in_factor / odds_out_of_factor
_reject, corrected_fisher_pvals = utils.correct_multiple_testing(fisher_pvals)
return f1_scores, intersections, corrected_fisher_pvals, odds_ratios
def summarise_enrichment(sort_measure_name, measures_dict, factor_df, trait_df):
trait_enrichment_dicts = []
sort_measure_df = measures_dict[sort_measure_name]
for trait in sort_measure_df.index:
best_factor = sort_measure_df.loc[trait, :].argmax()
trait_enrichment_dict = {'trait': trait,
'best factor (by F1 score)': best_factor,
'factor size': (factor_df.loc[:, best_factor] != 0).sum(),
'trait size': (trait_df.loc[:, trait] != 0).sum()}
for measure, measure_df in measures_dict.items():
trait_enrichment_dict[measure] = measure_df.loc[trait, best_factor]
trait_enrichment_dicts.append(trait_enrichment_dict)
return | pd.DataFrame(trait_enrichment_dicts) | pandas.DataFrame |
# Pre-defined lists
names = ['United States', 'Australia', 'Japan', 'India', 'Russia', 'Morocco', 'Egypt']
dr = [True, False, False, False, True, True, True]
cpc = [809, 731, 588, 18, 200, 70, 45]
# Import pandas as pd
import pandas as pd
# Create dictionary my_dict with three key:value pairs: my_dict
my_dict = {'country':names,'drives_right':dr,'cars_per_cap':cpc}
# Build a DataFrame cars from my_dict: cars
cars = | pd.DataFrame(my_dict) | pandas.DataFrame |
"""
Defines the sample class which is used to validate all the input data provided
and will contain the information required to implement the Crank-Nicolson
scheme.
"""
import numpy as np
import sys
import pandas as pd
class solid_sample():
"""
Contains all the geometrical and thermophysical properties of the sample
as well as the description of the thermal environment. Additionally,
validates input.
"""
def __init__(self, problem_description):
"""initiliazes the class"""
self.material = problem_description["material"]
def validate_input(self, problem_description):
"""validates the input provided by the user"""
# validate material name
if problem_description["material"] is None:
print("Error, material not valid")
sys.exit(1)
# validate problem type
if problem_description["problem_type"] not in ["direct", "inverse"]:
print("Error, problem type not valid")
sys.exit(1)
# validate properties type
if problem_description["properties_type"] not in [
"constant", "temperature_dependent"]:
print("Error, properties type not valid")
sys.exit(1)
# validate numerical input
exit_value = False
for property_name in ["depth", "x_divisions", "time_total",
"temperature_ambient", "temperature_initial",
]:
# don't check temperature initial as a float if it is an array
if (property_name == "temperature_initial") and (
isinstance(problem_description[property_name],
np.ndarray)):
continue
try:
float(problem_description[property_name])
except (ValueError, TypeError):
print(f"Error, {property_name} not valid")
exit_value = True
if exit_value:
sys.exit(1)
# validate numerical input for thermal properties
for property_name in ["conductivity_coeff", "density_coeff",
"heat_capacity_coeff"]:
# validate the base value
try:
float(problem_description[property_name][0])
except (ValueError, TypeError):
print(f"Error, base {property_name.split('_')[0]} not valid")
exit_value = True
# validate the exponent
if problem_description["properties_type"] == "constant":
if problem_description[property_name][1] is not None:
print("Error, exponent for constant "
f"{property_name.split('_')[0]} not valid")
exit_value = True
elif problem_description[
"properties_type"] == "temperature_dependent":
try:
float(problem_description[property_name][1])
except (ValueError, TypeError):
print("Error, exponent for temperature dependent "
f"{property_name.split('_')[0]} not valid")
exit_value = True
if exit_value:
sys.exit(1)
# validate initial temperature if array
if isinstance(problem_description["temperature_initial"], np.ndarray):
if not problem_description["x_divisions"] == len(
problem_description["temperature_initial"]):
print("Error, size of the initial temperature array not valid")
sys.exit(1)
# validate the surface boundary condition
if problem_description["boundcond_surface"] not in [
"dirichlet", "neunman", "robin"]:
print("Error, surface boundary condition not valid")
sys.exit(1)
exit_value = False
# dirichlet
if problem_description["boundcond_surface"] == "dirichlet":
try:
float(problem_description["temperature_surface"])
except (ValueError, TypeError):
print("Error, surface temperature for dirichlet boundary "
"condition not valid")
exit_value = True
# neunman
elif problem_description["boundcond_surface"] == "neunman":
try:
float(problem_description["nhf"])
except (ValueError, TypeError):
print("Error, net heat flux for neunman boundary condition"
" not valid")
exit_value = True
# robin
elif problem_description["boundcond_surface"] == "robin":
if problem_description["ihf_type"] not in ["constant",
"polynomial",
"sinusoidal"]:
print("Error, ihf type not valid")
sys.exit(1)
# constant ihf
elif problem_description["ihf_type"] == "constant":
try:
float(problem_description["ihf_coefficients"])
except (ValueError, TypeError):
print("Error, ihf coefficients not valid for constant ihf")
exit_value = True
# polynomial ihf
elif problem_description["ihf_type"] == "polynomial":
try:
np.array(problem_description["ihf_coefficients"],
dtype=float)
except (ValueError, TypeError):
print("Error, ihf coefficients not valid for polynomial"
" ihf")
exit_value = True
# sinusoidal ihf
elif problem_description["ihf_type"] == "sinusoidal":
if len(problem_description["ihf_coefficients"]) != 3:
print("Error, ihf coefficients not valid for sinusoidal"
" ihf")
sys.exit(1)
try:
np.array(problem_description["ihf_coefficients"],
dtype=float)
except (ValueError, TypeError):
print("Error, ihf coefficients not valid for sinusoidal"
" ihf")
exit_value = True
# surface heat losses
if problem_description[
"surface_losses_type"] not in ["linear", "non-linear"]:
print("Error, surface losses not valid")
sys.exit(1)
# linear surface losses
elif problem_description["surface_losses_type"] == "linear":
try:
float(problem_description["h_total"])
except (ValueError, TypeError):
print("Error, total heat transfer coefficient not valid")
exit_value = True
# non-linear surface losses
elif problem_description["surface_losses_type"] == "non-linear":
for property_name in ["h_convective", "absorptivity",
"emissivity"]:
try:
float(problem_description[property_name])
except (ValueError, TypeError):
print(f"Error, {property_name} not valid")
exit_value = True
# back face boundary condition
if problem_description["boundcond_back"] not in ["insulated",
"conductive_losses"]:
print("Error, back face boundary condition not valid")
sys.exit(1)
elif problem_description["boundcond_back"] == "conductive_losses":
try:
float(problem_description["conductivity_subs"])
except (ValueError, TypeError):
print("Error, conductivity of substrate material not valid")
exit_value = True
if exit_value:
sys.exit(1)
# pyrolysis
exit_value = False
if problem_description["material_type"] not in ["inert", "reactive"]:
print("Error, material type not valid")
sys.exit(1)
elif problem_description["material_type"] == "reactive":
for property_name in ["pre_exp_factor", "activation_energy",
"heat_reaction", "reaction_order"]:
try:
float(problem_description[property_name])
except (ValueError, TypeError):
print(f"Error, {property_name} not valid")
exit_value = True
if exit_value:
sys.exit(1)
# in-depth absorption
try:
float(problem_description["in-depth_absorptivity"])
except (ValueError, TypeError):
print("Error, in-depth absorptivity not valid")
exit_value = True
if exit_value:
sys.exit(1)
def assign_properties(self, problem_description):
"""Assigns properties given by the user to the sample class and
calculates additional parameters"""
# geometry
# -------
self.depth = problem_description["depth"]
self.x_divisions = problem_description["x_divisions"]
self.space_mesh = np.linspace(0, self.depth, self.x_divisions)
self.time_total = problem_description["time_total"]
# termophysical properties
# -------------------------
base_array_space = np.zeros_like(self.space_mesh)
conductivity_0 = base_array_space + problem_description[
"conductivity_coeff"][0]
density_0 = base_array_space + problem_description[
"density_coeff"][0]
heat_capacity_0 = base_array_space + problem_description[
"heat_capacity_coeff"][0]
diffusivity_0 = conductivity_0/density_0/heat_capacity_0
self.dx = self.space_mesh[1] - self.space_mesh[0]
self.dt = (1/6)*(self.dx**2/diffusivity_0)
self.temporal_mesh = np.arange(0, self.time_total, self.dt[0])
# values are stored in DataFrames where columns names are time stamps
self.conductivity = | pd.DataFrame(columns=self.temporal_mesh) | pandas.DataFrame |
import pandas as pd
import numpy as np
import scipy as sp
import argparse
import os
import gc
import time
from base import *
from features import *
from datetime import datetime
from sklearn.externals import joblib
from sklearn.model_selection import cross_val_score, StratifiedKFold
basepath = os.path.expanduser('../')
SEED = 1231
np.random.seed(SEED)
#############################################################################################################
# EXPERIMENT PARAMETERS #
#############################################################################################################
COLS_TO_REMOVE = ['TARGET',
"due_to_paid_3",
"instalment_dpd_num_147",
"instalment_amount_diff_num_143",
"total_cash_credit_dpd",
"due_to_paid_2",
"instalment_amount_diff_num_169",
"NONLIVINGAPARTMENTS_AVG",
"instalment_amount_diff_num_48",
"instalment_amount_diff_num_31",
"instalment_dpd_num_100",
"instalment_amount_diff_num_16",
"instalment_dpd_num_144",
"instalment_amount_diff_num_18",
"instalment_amount_diff_num_190",
"instalment_dpd_num_38",
"instalment_dpd_num_22",
"HOUR_APPR_PROCESS_START_7",
"instalment_dpd_num_191",
"instalment_amount_diff_num_170",
"instalment_amount_diff_num_69",
"instalment_dpd_num_171",
"instalment_amount_diff_num_212",
"instalment_dpd_num_175",
"instalment_dpd_num_72",
"instalment_dpd_num_97",
"instalment_amount_diff_num_192",
"instalment_amount_diff_num_26",
"instalment_amount_diff_num_160",
"instalment_dpd_num_57",
"bureau_credit_type_7.0",
"instalment_dpd_num_184",
"instalment_amount_diff_num_239",
"instalment_amount_diff_num_38",
"change_in_credit_limit_ot",
"instalment_amount_diff_num_131",
"instalment_amount_diff_num_130",
"mean_NAME_INCOME_TYPE_AMT_ANNUITY",
"instalment_amount_diff_num_146",
"instalment_amount_diff_num_198",
"instalment_amount_diff_num_39",
"instalment_amount_diff_num_6",
"instalment_dpd_num_194",
"instalment_amount_diff_num_204",
"instalment_dpd_num_51",
"due_to_paid_15",
"bureau_credit_type_14.0",
"instalment_dpd_num_168",
"instalment_dpd_num_160",
"instalment_amount_diff_num_90",
"instalment_dpd_num_78",
"HOUR_APPR_PROCESS_START_18",
"NONLIVINGAPARTMENTS_MEDI",
"instalment_amount_diff_num_33",
"instalment_amount_diff_num_178",
"instalment_dpd_num_136",
"instalment_dpd_num_17",
"instalment_amount_diff_num_89",
"prev_credit_year_4",
"instalment_amount_diff_num_105",
"instalment_dpd_num_64",
"instalment_dpd_num_21",
"NAME_GOODS_CATEGORY_19",
"instalment_amount_diff_num_194",
"instalment_dpd_num_114",
"instalment_dpd_num_134",
"instalment_dpd_num_98",
"due_to_paid_9",
"instalment_dpd_num_84",
"STATUS1.0",
"instalment_amount_diff_num_127",
"instalment_amount_diff_num_40",
"bureau_credit_type_5.0",
"prev_credit_year_5",
"instalment_dpd_num_127",
"instalment_amount_diff_num_56",
"PRODUCT_COMBINATION_9",
"instalment_amount_diff_num_155",
"instalment_amount_diff_num_219",
"due_to_paid_1",
"instalment_dpd_num_116",
"instalment_dpd_num_35",
"instalment_amount_diff_num_1",
"instalment_dpd_num_154",
"instalment_amount_diff_num_50",
"instalment_amount_diff_num_211",
"prev_credit_year_10",
"instalment_dpd_num_67",
"instalment_dpd_num_174",
"mean_OCCUPATION_TYPE_AMT_CREDIT",
"bbal_2",
"instalment_dpd_num_36",
"instalment_dpd_num_81",
"instalment_dpd_num_213",
"instalment_dpd_num_71",
"instalment_dpd_num_55",
"instalment_amount_diff_num_156",
"CNT_FAM_MEMBERS",
"bureau_credit_type_13.0",
"instalment_dpd_num_125",
"instalment_dpd_num_41",
"range_min_max_credit_limit",
"instalment_amount_diff_num_3",
"instalment_amount_diff_num_96",
"instalment_dpd_num_59",
"due_to_paid_19",
"instalment_dpd_num_69",
"instalment_dpd_num_130",
"instalment_dpd_num_204",
"instalment_amount_diff_num_177",
"instalment_dpd_num_135",
"NAME_GOODS_CATEGORY_2",
"instalment_amount_diff_num_150",
"instalment_dpd_num_143",
"instalment_amount_diff_num_122",
"instalment_dpd_num_122",
"instalment_dpd_num_117",
"instalment_dpd_num_146",
"instalment_amount_diff_num_55",
"due_to_paid_17",
"instalment_amount_diff_num_30",
"instalment_amount_diff_num_136",
"instalment_amount_diff_num_180",
"instalment_amount_diff_num_162",
"instalment_dpd_num_170",
"instalment_amount_diff_num_71",
"instalment_amount_diff_num_42",
"due_to_paid_4",
"mean_NAME_INCOME_TYPE_OCCUPATION_TYPE_AMT_ANNUITY",
"instalment_amount_diff_num_23",
"PRODUCT_COMBINATION_8",
"instalment_dpd_num_159",
"instalment_amount_diff_num_118",
"instalment_amount_diff_num_78",
"instalment_dpd_num_227",
"instalment_amount_diff_num_187",
"instalment_dpd_num_214",
"instalment_amount_diff_num_145",
"instalment_dpd_num_158",
"instalment_dpd_num_203",
"instalment_amount_diff_num_161",
"instalment_amount_diff_num_21",
"NUM_NULLS_EXT_SCORES",
"instalment_dpd_num_65",
"NAME_GOODS_CATEGORY_5",
"prev_credit_year_3",
"instalment_amount_diff_num_191",
"mean_cb_credit_annuity",
"instalment_amount_diff_num_17",
"instalment_dpd_num_63",
"instalment_amount_diff_num_129",
"instalment_amount_diff_num_148",
"instalment_amount_diff_num_27",
"instalment_dpd_num_121",
"HOUSETYPE_MODE",
"instalment_dpd_num_195",
"instalment_amount_diff_num_68",
"instalment_dpd_num_186",
"instalment_amount_diff_num_245",
"instalment_dpd_num_148",
"instalment_amount_diff_num_41",
"instalment_dpd_num_66",
"num_high_int_no_info_loans",
"mean_NAME_EDUCATION_TYPE_OCCUPATION_TYPE_DAYS_EMPLOYED",
"instalment_dpd_num_128",
"bbal_4",
"instalment_dpd_num_95",
"instalment_dpd_num_155",
"instalment_dpd_num_89",
"instalment_dpd_num_132",
"instalment_amount_diff_num_28",
"instalment_dpd_num_52",
"instalment_dpd_num_40",
"instalment_dpd_num_190",
"instalment_amount_diff_num_99",
"instalment_dpd_num_92",
"instalment_dpd_num_109",
"instalment_dpd_num_115",
"instalment_dpd_num_149",
"instalment_amount_diff_num_104",
"instalment_amount_diff_num_158",
"instalment_dpd_num_180",
"instalment_dpd_num_230",
"instalment_dpd_num_208",
"instalment_amount_diff_num_222",
"instalment_amount_diff_num_199",
"bureau_credit_year_10",
"instalment_dpd_num_177",
"instalment_amount_diff_num_63",
"due_to_paid_20",
"instalment_amount_diff_num_19",
"instalment_dpd_num_61",
"instalment_amount_diff_num_32",
"instalment_dpd_num_210",
"instalment_amount_diff_num_116",
"instalment_dpd_num_140",
"mean_OCCUPATION_TYPE_AMT_ANNUITY",
"instalment_amount_diff_num_117",
"due_to_paid_13",
"NAME_INCOME_TYPE__7",
"instalment_amount_diff_num_188",
"instalment_dpd_num_198",
"instalment_amount_diff_num_34",
"instalment_amount_diff_num_262",
"instalment_dpd_num_202",
"instalment_amount_diff_num_53",
"instalment_amount_diff_num_108",
"instalment_dpd_num_56",
"instalment_amount_diff_num_214",
"FONDKAPREMONT_MODE",
"instalment_dpd_num_192",
"instalment_amount_diff_num_189",
"instalment_amount_diff_num_86",
"instalment_dpd_num_169",
"instalment_amount_diff_num_172",
"instalment_dpd_num_46",
"instalment_dpd_num_211",
"instalment_amount_diff_num_109",
"mean_NAME_FAMILY_STATUS_NAME_INCOME_TYPE_DAYS_EMPLOYED",
"instalment_amount_diff_num_175",
"instalment_amount_diff_num_168",
"MONTHS_BALANCE_median",
"mean_CODE_GENDER_REG_CITY_NOT_WORK_CITY_AMT_INCOME_TOTAL",
"instalment_amount_diff_num_58",
"instalment_amount_diff_num_51",
"instalment_dpd_num_74",
"instalment_dpd_num_113",
"instalment_amount_diff_num_137",
"instalment_dpd_num_39",
"instalment_amount_diff_num_25",
"NAME_YIELD_GROUP_3",
"instalment_dpd_num_165",
"instalment_amount_diff_num_107",
"HOUR_APPR_PROCESS_START_16",
"prev_credit_year_11",
"CHANNEL_TYPE_6",
"instalment_amount_diff_num_88",
"instalment_amount_diff_num_64",
"instalment_amount_diff_num_201",
"ELEVATORS_AVG",
"prev_credit_year_2",
"instalment_amount_diff_num_37",
"instalment_dpd_num_54",
"instalment_amount_diff_num_153",
"instalment_amount_diff_num_203",
"instalment_dpd_num_166",
"ENTRANCES_MEDI",
"instalment_amount_diff_num_166",
"mean_NAME_INCOME_TYPE_DAYS_BIRTH",
"due_to_paid_10",
"instalment_amount_diff_num_141",
"instalment_dpd_num_96",
"instalment_dpd_num_167",
"instalment_amount_diff_num_140",
"instalment_amount_diff_num_77",
"NAME_FAMILY_STATUS",
"instalment_dpd_num_133",
"NAME_TYPE_SUITE",
"instalment_amount_diff_num_134",
"instalment_amount_diff_num_72",
"instalment_amount_diff_num_80",
"instalment_dpd_num_193",
"instalment_dpd_num_86",
"instalment_amount_diff_num_207",
"instalment_amount_diff_num_234",
"instalment_dpd_num_29",
"instalment_amount_diff_num_196",
"instalment_amount_diff_num_195",
"instalment_dpd_num_75",
"bureau_bal_pl_5",
"instalment_amount_diff_num_73",
"instalment_amount_diff_num_81",
"instalment_amount_diff_num_215",
"due_to_paid_23",
"instalment_amount_diff_num_114",
"instalment_amount_diff_num_157",
"bureau_credit_status_1.0",
"instalment_amount_diff_num_2",
"instalment_dpd_num_94",
"instalment_amount_diff_num_45",
"instalment_amount_diff_num_4",
"instalment_amount_diff_num_22",
"instalment_amount_diff_num_74",
"instalment_amount_diff_num_70",
"bureau_credit_year_11",
"instalment_dpd_num_85",
"instalment_amount_diff_num_184",
"instalment_amount_diff_num_126",
"instalment_dpd_num_14",
"instalment_amount_diff_num_62",
"instalment_amount_diff_num_121",
"instalment_amount_diff_num_15",
"instalment_dpd_num_172",
"instalment_dpd_num_142",
"mean_OCCUPATION_TYPE_DAYS_BIRTH",
"instalment_amount_diff_num_44",
"instalment_amount_diff_num_100",
"instalment_dpd_num_58",
"instalment_amount_diff_num_49",
"instalment_dpd_num_26",
"instalment_dpd_num_79",
"instalment_dpd_num_119",
"instalment_amount_diff_num_149",
"bbal_3",
"mean_CODE_GENDER_REG_CITY_NOT_WORK_CITY_DAYS_BIRTH",
"due_to_paid_22",
"instalment_amount_diff_num_202",
"instalment_amount_diff_num_208",
"instalment_dpd_num_47",
"young_age",
"mean_CODE_GENDER_NAME_EDUCATION_TYPE_DAYS_BIRTH",
"due_to_paid_24",
"instalment_dpd_num_212",
"mean_CODE_GENDER_REG_CITY_NOT_WORK_CITY_AMT_CREDIT",
"mean_OCCUPATION_TYPE_DAYS_EMPLOYED",
"instalment_dpd_num_44",
"instalment_amount_diff_num_182",
"due_to_paid_7",
"instalment_amount_diff_num_154",
"instalment_amount_diff_num_95",
"instalment_dpd_num_93",
"instalment_dpd_num_179",
"due_to_paid_11",
"bureau_credit_type_9.0",
"instalment_amount_diff_num_111",
"prev_credit_year_-1",
"mean_NAME_EDUCATION_TYPE_AMT_INCOME_TOTAL",
"instalment_dpd_num_189",
"instalment_amount_diff_num_256",
"instalment_dpd_num_90",
"instalment_amount_diff_num_254",
"diff_education_ext_income_mean",
"AMT_INCOME_TOTAL",
"instalment_amount_diff_num_29",
"instalment_amount_diff_num_60",
"prev_credit_year_9",
"instalment_amount_diff_num_210",
"mean_NAME_INCOME_TYPE_AMT_INCOME_TOTAL",
"instalment_amount_diff_num_176",
"instalment_amount_diff_num_98",
"instalment_amount_diff_num_47",
"instalment_amount_diff_num_173",
"HOUR_APPR_PROCESS_START_12",
"DPD_9",
"instalment_dpd_num_42",
"instalment_amount_diff_num_43",
"bureau_credit_type_11.0",
"instalment_amount_diff_num_221",
"instalment_dpd_num_138",
"instalment_amount_diff_num_128",
"instalment_dpd_num_108",
"mean_OCCUPATION_TYPE_EXT_SOURCE_2",
"instalment_dpd_num_123",
"instalment_amount_diff_num_76",
"instalment_dpd_num_24",
"instalment_dpd_num_139",
"prev_credit_year_7",
"credit_total_instalment_regular",
"due_to_paid_18",
"instalment_amount_diff_num_164",
"instalment_amount_diff_num_268",
"instalment_dpd_num_183",
"instalment_dpd_num_145",
"instalment_dpd_num_201",
"instalment_amount_diff_num_57",
"mean_NAME_INCOME_TYPE_DAYS_EMPLOYED",
"instalment_dpd_num_99",
"due_to_paid_25",
"instalment_dpd_num_137",
"instalment_dpd_num_73",
"instalment_dpd_num_68",
"instalment_amount_diff_num_183",
"instalment_dpd_num_30",
"instalment_dpd_num_70",
"instalment_dpd_num_37",
"NAME_EDUCATION_TYPE__1",
"instalment_dpd_num_151",
"bureau_credit_year_9",
"instalment_dpd_num_152",
"due_to_paid_5",
"instalment_dpd_num_207",
"child_to_non_child_ratio",
"instalment_dpd_num_87",
"bureau_credit_type_8.0",
"due_to_paid_6",
"due_to_paid_16",
"instalment_amount_diff_num_110",
"NONLIVINGAPARTMENTS_MODE",
"instalment_amount_diff_num_181",
"bureau_credit_year_0",
"instalment_amount_diff_num_91",
"instalment_amount_diff_num_152",
"bureau_bal_pl_3",
"instalment_dpd_num_45",
"instalment_amount_diff_num_54",
"instalment_dpd_num_173",
"instalment_dpd_num_120",
"instalment_dpd_num_31",
"due_to_paid_0",
"instalment_amount_diff_num_179",
"instalment_dpd_num_124",
"instalment_amount_diff_num_159",
"instalment_amount_diff_num_65",
"instalment_dpd_num_176",
"instalment_dpd_num_33",
"instalment_amount_diff_num_167",
"bureau_credit_year_8",
"instalment_dpd_num_53",
"instalment_dpd_num_164",
"EMERGENCYSTATE_MODE",
"instalment_dpd_num_188",
"instalment_amount_diff_num_79",
"instalment_dpd_num_141",
"bureau_credit_type_1.0",
"instalment_amount_diff_num_82",
"mean_CODE_GENDER_REG_CITY_NOT_WORK_CITY_CNT_CHILDREN",
"cash_dpd_sum",
"instalment_amount_diff_num_125",
"FLAG_OWN_CAR",
"instalment_amount_diff_num_132",
"mean_CODE_GENDER_REG_CITY_NOT_WORK_CITY_DAYS_ID_PUBLISH",
"instalment_amount_diff_num_8",
"instalment_amount_diff_num_138",
"instalment_dpd_num_80",
"instalment_amount_diff_num_106",
"instalment_amount_diff_num_135",
"bbal_5",
"mean_CODE_GENDER_NAME_EDUCATION_TYPE_AMT_CREDIT",
"instalment_dpd_num_62",
"instalment_dpd_num_126",
"due_to_paid_14",
"HOUR_APPR_PROCESS_START_11",
"mean_NAME_INCOME_TYPE_NAME_EDUCATION_TYPE_DAYS_BIRTH",
"instalment_amount_diff_num_139",
"instalment_amount_diff_num_87",
"instalment_amount_diff_num_61",
"most_recent_min_pos_cash_dpd",
"instalment_dpd_num_77",
"instalment_amount_diff_num_119",
"instalment_dpd_num_150",
"instalment_amount_diff_num_103",
"instalment_amount_diff_num_59",
"HOUR_APPR_PROCESS_START_17",
"instalment_dpd_num_82",
"mean_NAME_EDUCATION_TYPE_AMT_CREDIT",
"bureau_credit_type_2.0",
"bureau_credit_type_12.0",
"mean_NAME_EDUCATION_TYPE_AMT_ANNUITY",
"instalment_amount_diff_num_97",
"instalment_amount_diff_num_36",
"instalment_amount_diff_num_66",
"CODE_GENDER",
"instalment_dpd_num_112",
"instalment_dpd_num_34",
"HOUR_APPR_PROCESS_START_9",
"YEARS_BUILD_AVG",
"max_credit_term",
"instalment_amount_diff_num_147",
"due_to_paid_21",
"instalment_amount_diff_num_151",
"instalment_dpd_num_129",
"instalment_amount_diff_num_123",
"mean_CODE_GENDER_NAME_EDUCATION_TYPE_AMT_ANNUITY",
"instalment_dpd_num_215",
"instalment_dpd_num_218",
"instalment_amount_diff_num_94",
"instalment_dpd_num_178",
"instalment_dpd_num_118",
"instalment_dpd_num_162",
"STATUS7.0",
"prev_credit_year_8",
"HOUR_APPR_PROCESS_START_6",
"instalment_dpd_num_60",
"instalment_amount_diff_num_142",
"instalment_amount_diff_num_186",
"instalment_dpd_num_76",
"instalment_amount_diff_num_75",
"instalment_dpd_num_88",
"instalment_amount_diff_num_35",
"instalment_amount_diff_num_102",
"instalment_amount_diff_num_67",
"instalment_amount_diff_num_237",
"instalment_dpd_num_187",
"instalment_dpd_num_50",
"credit_dpd_sum",
"instalment_dpd_num_196",
"instalment_amount_diff_num_84",
"instalment_dpd_num_181",
"instalment_dpd_num_49",
"instalment_dpd_num_161",
"CNT_CHILDREN",
"instalment_dpd_num_157",
"total_credit_debt_active_to_closed",
"mean_NAME_INCOME_TYPE_NAME_EDUCATION_TYPE_DAYS_EMPLOYED",
"bureau_credit_type_6.0",
"instalment_amount_diff_num_174",
"mean_OCCUPATION_TYPE_OWN_CAR_AGE",
"instalment_amount_diff_num_133",
"instalment_amount_diff_num_144",
"instalment_dpd_num_91",
"instalment_amount_diff_num_124",
"instalment_amount_diff_num_120",
"instalment_amount_diff_num_85",
"due_to_paid_12",
"instalment_dpd_num_156",
"instalment_amount_diff_num_185",
"bureau_credit_year_-1",
"instalment_dpd_num_83",
"instalment_amount_diff_num_52",
"instalment_dpd_num_163",
"instalment_amount_diff_num_12",
"due_to_paid_8",
"instalment_dpd_num_131",
"instalment_dpd_num_32",
"FLOORSMAX_MEDI",
"NAME_EDUCATION_TYPE__4",
"instalment_amount_diff_num_93",
"instalment_dpd_num_110",
"instalment_amount_diff_num_113",
"instalment_dpd_num_185",
"instalment_amount_diff_num_163",
"instalment_amount_diff_num_92",
"instalment_amount_diff_num_264",
"instalment_amount_diff_num_112",
"children_ratio",
"instalment_amount_diff_num_165",
"ELEVATORS_MEDI",
"instalment_amount_diff_num_197",
"instalment_amount_diff_num_115",
"instalment_amount_diff_num_171",
"num_diff_credits",
"instalment_dpd_num_200",
"instalment_dpd_num_182",
"instalment_amount_diff_num_83",
"bureau_credit_type_0.0",
"instalment_amount_diff_num_13",
"FLOORSMAX_MODE",
"instalment_amount_diff_num_193",
"instalment_dpd_num_153",
"mean_NAME_FAMILY_STATUS_NAME_INCOME_TYPE_DAYS_BIRTH",
"STATUS2.0",
"mean_NAME_EDUCATION_TYPE_DAYS_EMPLOYED",
"instalment_dpd_num_111""due_to_paid_3",
"instalment_dpd_num_147",
"instalment_amount_diff_num_143",
"total_cash_credit_dpd",
"due_to_paid_2",
"instalment_amount_diff_num_169",
"NONLIVINGAPARTMENTS_AVG",
"instalment_amount_diff_num_48",
"instalment_amount_diff_num_31",
"instalment_dpd_num_100",
"instalment_amount_diff_num_16",
"instalment_dpd_num_144",
"instalment_amount_diff_num_18",
"instalment_amount_diff_num_190",
"instalment_dpd_num_38",
"instalment_dpd_num_22",
"HOUR_APPR_PROCESS_START_7",
"instalment_dpd_num_191",
"instalment_amount_diff_num_170",
"instalment_amount_diff_num_69",
"instalment_dpd_num_171",
"instalment_amount_diff_num_212",
"instalment_dpd_num_175",
"instalment_dpd_num_72",
"instalment_dpd_num_97",
"instalment_amount_diff_num_192",
"instalment_amount_diff_num_26",
"instalment_amount_diff_num_160",
"instalment_dpd_num_57",
"bureau_credit_type_7.0",
"instalment_dpd_num_184",
"instalment_amount_diff_num_239",
"instalment_amount_diff_num_38",
"change_in_credit_limit_ot",
"instalment_amount_diff_num_131",
"instalment_amount_diff_num_130",
"mean_NAME_INCOME_TYPE_AMT_ANNUITY",
"instalment_amount_diff_num_146",
"instalment_amount_diff_num_198",
"instalment_amount_diff_num_39",
"instalment_amount_diff_num_6",
"instalment_dpd_num_194",
"instalment_amount_diff_num_204",
"instalment_dpd_num_51",
"due_to_paid_15",
"bureau_credit_type_14.0",
"instalment_dpd_num_168",
"instalment_dpd_num_160",
"instalment_amount_diff_num_90",
"instalment_dpd_num_78",
"HOUR_APPR_PROCESS_START_18",
"NONLIVINGAPARTMENTS_MEDI",
"instalment_amount_diff_num_33",
"instalment_amount_diff_num_178",
"instalment_dpd_num_136",
"instalment_dpd_num_17",
"instalment_amount_diff_num_89",
"prev_credit_year_4",
"instalment_amount_diff_num_105",
"instalment_dpd_num_64",
"instalment_dpd_num_21",
"NAME_GOODS_CATEGORY_19",
"instalment_amount_diff_num_194",
"instalment_dpd_num_114",
"instalment_dpd_num_134",
"instalment_dpd_num_98",
"due_to_paid_9",
"instalment_dpd_num_84",
"STATUS1.0",
"instalment_amount_diff_num_127",
"instalment_amount_diff_num_40",
"bureau_credit_type_5.0",
"prev_credit_year_5",
"instalment_dpd_num_127",
"instalment_amount_diff_num_56",
"PRODUCT_COMBINATION_9",
"instalment_amount_diff_num_155",
"instalment_amount_diff_num_219",
"due_to_paid_1",
"instalment_dpd_num_116",
"instalment_dpd_num_35",
"instalment_amount_diff_num_1",
"instalment_dpd_num_154",
"instalment_amount_diff_num_50",
"instalment_amount_diff_num_211",
"prev_credit_year_10",
"instalment_dpd_num_67",
"instalment_dpd_num_174",
"mean_OCCUPATION_TYPE_AMT_CREDIT",
"bbal_2",
"instalment_dpd_num_36",
"instalment_dpd_num_81",
"instalment_dpd_num_213",
"instalment_dpd_num_71",
"instalment_dpd_num_55",
"instalment_amount_diff_num_156",
"CNT_FAM_MEMBERS",
"bureau_credit_type_13.0",
"instalment_dpd_num_125",
"instalment_dpd_num_41",
"range_min_max_credit_limit",
"instalment_amount_diff_num_3",
"instalment_amount_diff_num_96",
"instalment_dpd_num_59",
"due_to_paid_19",
"instalment_dpd_num_69",
"instalment_dpd_num_130",
"instalment_dpd_num_204",
"instalment_amount_diff_num_177",
"instalment_dpd_num_135",
"NAME_GOODS_CATEGORY_2",
"instalment_amount_diff_num_150",
"instalment_dpd_num_143",
"instalment_amount_diff_num_122",
"instalment_dpd_num_122",
"instalment_dpd_num_117",
"instalment_dpd_num_146",
"instalment_amount_diff_num_55",
"due_to_paid_17",
"instalment_amount_diff_num_30",
"instalment_amount_diff_num_136",
"instalment_amount_diff_num_180",
"instalment_amount_diff_num_162",
"instalment_dpd_num_170",
"instalment_amount_diff_num_71",
"instalment_amount_diff_num_42",
"due_to_paid_4",
"mean_NAME_INCOME_TYPE_OCCUPATION_TYPE_AMT_ANNUITY",
"instalment_amount_diff_num_23",
"PRODUCT_COMBINATION_8",
"instalment_dpd_num_159",
"instalment_amount_diff_num_118",
"instalment_amount_diff_num_78",
"instalment_dpd_num_227",
"instalment_amount_diff_num_187",
"instalment_dpd_num_214",
"instalment_amount_diff_num_145",
"instalment_dpd_num_158",
"instalment_dpd_num_203",
"instalment_amount_diff_num_161",
"instalment_amount_diff_num_21",
"NUM_NULLS_EXT_SCORES",
"instalment_dpd_num_65",
"NAME_GOODS_CATEGORY_5",
"prev_credit_year_3",
"instalment_amount_diff_num_191",
"mean_cb_credit_annuity",
"instalment_amount_diff_num_17",
"instalment_dpd_num_63",
"instalment_amount_diff_num_129",
"instalment_amount_diff_num_148",
"instalment_amount_diff_num_27",
"instalment_dpd_num_121",
"HOUSETYPE_MODE",
"instalment_dpd_num_195",
"instalment_amount_diff_num_68",
"instalment_dpd_num_186",
"instalment_amount_diff_num_245",
"instalment_dpd_num_148",
"instalment_amount_diff_num_41",
"instalment_dpd_num_66",
"num_high_int_no_info_loans",
"mean_NAME_EDUCATION_TYPE_OCCUPATION_TYPE_DAYS_EMPLOYED",
"instalment_dpd_num_128",
"bbal_4",
"instalment_dpd_num_95",
"instalment_dpd_num_155",
"instalment_dpd_num_89",
"instalment_dpd_num_132",
"instalment_amount_diff_num_28",
"instalment_dpd_num_52",
"instalment_dpd_num_40",
"instalment_dpd_num_190",
"instalment_amount_diff_num_99",
"instalment_dpd_num_92",
"instalment_dpd_num_109",
"instalment_dpd_num_115",
"instalment_dpd_num_149",
"instalment_amount_diff_num_104",
"instalment_amount_diff_num_158",
"instalment_dpd_num_180",
"instalment_dpd_num_230",
"instalment_dpd_num_208",
"instalment_amount_diff_num_222",
"instalment_amount_diff_num_199",
"bureau_credit_year_10",
"instalment_dpd_num_177",
"instalment_amount_diff_num_63",
"due_to_paid_20",
"instalment_amount_diff_num_19",
"instalment_dpd_num_61",
"instalment_amount_diff_num_32",
"instalment_dpd_num_210",
"instalment_amount_diff_num_116",
"instalment_dpd_num_140",
"mean_OCCUPATION_TYPE_AMT_ANNUITY",
"instalment_amount_diff_num_117",
"due_to_paid_13",
"NAME_INCOME_TYPE__7",
"instalment_amount_diff_num_188",
"instalment_dpd_num_198",
"instalment_amount_diff_num_34",
"instalment_amount_diff_num_262",
"instalment_dpd_num_202",
"instalment_amount_diff_num_53",
"instalment_amount_diff_num_108",
"instalment_dpd_num_56",
"instalment_amount_diff_num_214",
"FONDKAPREMONT_MODE",
"instalment_dpd_num_192",
"instalment_amount_diff_num_189",
"instalment_amount_diff_num_86",
"instalment_dpd_num_169",
"instalment_amount_diff_num_172",
"instalment_dpd_num_46",
"instalment_dpd_num_211",
"instalment_amount_diff_num_109",
"mean_NAME_FAMILY_STATUS_NAME_INCOME_TYPE_DAYS_EMPLOYED",
"instalment_amount_diff_num_175",
"instalment_amount_diff_num_168",
"MONTHS_BALANCE_median",
"mean_CODE_GENDER_REG_CITY_NOT_WORK_CITY_AMT_INCOME_TOTAL",
"instalment_amount_diff_num_58",
"instalment_amount_diff_num_51",
"instalment_dpd_num_74",
"instalment_dpd_num_113",
"instalment_amount_diff_num_137",
"instalment_dpd_num_39",
"instalment_amount_diff_num_25",
"NAME_YIELD_GROUP_3",
"instalment_dpd_num_165",
"instalment_amount_diff_num_107",
"HOUR_APPR_PROCESS_START_16",
"prev_credit_year_11",
"CHANNEL_TYPE_6",
"instalment_amount_diff_num_88",
"instalment_amount_diff_num_64",
"instalment_amount_diff_num_201",
"ELEVATORS_AVG",
"prev_credit_year_2",
"instalment_amount_diff_num_37",
"instalment_dpd_num_54",
"instalment_amount_diff_num_153",
"instalment_amount_diff_num_203",
"instalment_dpd_num_166",
"ENTRANCES_MEDI",
"instalment_amount_diff_num_166",
"mean_NAME_INCOME_TYPE_DAYS_BIRTH",
"due_to_paid_10",
"instalment_amount_diff_num_141",
"instalment_dpd_num_96",
"instalment_dpd_num_167",
"instalment_amount_diff_num_140",
"instalment_amount_diff_num_77",
"NAME_FAMILY_STATUS",
"instalment_dpd_num_133",
"NAME_TYPE_SUITE",
"instalment_amount_diff_num_134",
"instalment_amount_diff_num_72",
"instalment_amount_diff_num_80",
"instalment_dpd_num_193",
"instalment_dpd_num_86",
"instalment_amount_diff_num_207",
"instalment_amount_diff_num_234",
"instalment_dpd_num_29",
"instalment_amount_diff_num_196",
"instalment_amount_diff_num_195",
"instalment_dpd_num_75",
"bureau_bal_pl_5",
"instalment_amount_diff_num_73",
"instalment_amount_diff_num_81",
"instalment_amount_diff_num_215",
"due_to_paid_23",
"instalment_amount_diff_num_114",
"instalment_amount_diff_num_157",
"bureau_credit_status_1.0",
"instalment_amount_diff_num_2",
"instalment_dpd_num_94",
"instalment_amount_diff_num_45",
"instalment_amount_diff_num_4",
"instalment_amount_diff_num_22",
"instalment_amount_diff_num_74",
"instalment_amount_diff_num_70",
"bureau_credit_year_11",
"instalment_dpd_num_85",
"instalment_amount_diff_num_184",
"instalment_amount_diff_num_126",
"instalment_dpd_num_14",
"instalment_amount_diff_num_62",
"instalment_amount_diff_num_121",
"instalment_amount_diff_num_15",
"instalment_dpd_num_172",
"instalment_dpd_num_142",
"mean_OCCUPATION_TYPE_DAYS_BIRTH",
"instalment_amount_diff_num_44",
"instalment_amount_diff_num_100",
"instalment_dpd_num_58",
"instalment_amount_diff_num_49",
"instalment_dpd_num_26",
"instalment_dpd_num_79",
"instalment_dpd_num_119",
"instalment_amount_diff_num_149",
"bbal_3",
"mean_CODE_GENDER_REG_CITY_NOT_WORK_CITY_DAYS_BIRTH",
"due_to_paid_22",
"instalment_amount_diff_num_202",
"instalment_amount_diff_num_208",
"instalment_dpd_num_47",
"young_age",
"mean_CODE_GENDER_NAME_EDUCATION_TYPE_DAYS_BIRTH",
"due_to_paid_24",
"instalment_dpd_num_212",
"mean_CODE_GENDER_REG_CITY_NOT_WORK_CITY_AMT_CREDIT",
"mean_OCCUPATION_TYPE_DAYS_EMPLOYED",
"instalment_dpd_num_44",
"instalment_amount_diff_num_182",
"due_to_paid_7",
"instalment_amount_diff_num_154",
"instalment_amount_diff_num_95",
"instalment_dpd_num_93",
"instalment_dpd_num_179",
"due_to_paid_11",
"bureau_credit_type_9.0",
"instalment_amount_diff_num_111",
"prev_credit_year_-1",
"mean_NAME_EDUCATION_TYPE_AMT_INCOME_TOTAL",
"instalment_dpd_num_189",
"instalment_amount_diff_num_256",
"instalment_dpd_num_90",
"instalment_amount_diff_num_254",
"diff_education_ext_income_mean",
"AMT_INCOME_TOTAL",
"instalment_amount_diff_num_29",
"instalment_amount_diff_num_60",
"prev_credit_year_9",
"instalment_amount_diff_num_210",
"mean_NAME_INCOME_TYPE_AMT_INCOME_TOTAL",
"instalment_amount_diff_num_176",
"instalment_amount_diff_num_98",
"instalment_amount_diff_num_47",
"instalment_amount_diff_num_173",
"HOUR_APPR_PROCESS_START_12",
"DPD_9",
"instalment_dpd_num_42",
"instalment_amount_diff_num_43",
"bureau_credit_type_11.0",
"instalment_amount_diff_num_221",
"instalment_dpd_num_138",
"instalment_amount_diff_num_128",
"instalment_dpd_num_108",
"mean_OCCUPATION_TYPE_EXT_SOURCE_2",
"instalment_dpd_num_123",
"instalment_amount_diff_num_76",
"instalment_dpd_num_24",
"instalment_dpd_num_139",
"prev_credit_year_7",
"credit_total_instalment_regular",
"due_to_paid_18",
"instalment_amount_diff_num_164",
"instalment_amount_diff_num_268",
"instalment_dpd_num_183",
"instalment_dpd_num_145",
"instalment_dpd_num_201",
"instalment_amount_diff_num_57",
"mean_NAME_INCOME_TYPE_DAYS_EMPLOYED",
"instalment_dpd_num_99",
"due_to_paid_25",
"instalment_dpd_num_137",
"instalment_dpd_num_73",
"instalment_dpd_num_68",
"instalment_amount_diff_num_183",
"instalment_dpd_num_30",
"instalment_dpd_num_70",
"instalment_dpd_num_37",
"NAME_EDUCATION_TYPE__1",
"instalment_dpd_num_151",
"bureau_credit_year_9",
"instalment_dpd_num_152",
"due_to_paid_5",
"instalment_dpd_num_207",
"child_to_non_child_ratio",
"instalment_dpd_num_87",
"bureau_credit_type_8.0",
"due_to_paid_6",
"due_to_paid_16",
"instalment_amount_diff_num_110",
"NONLIVINGAPARTMENTS_MODE",
"instalment_amount_diff_num_181",
"bureau_credit_year_0",
"instalment_amount_diff_num_91",
"instalment_amount_diff_num_152",
"bureau_bal_pl_3",
"instalment_dpd_num_45",
"instalment_amount_diff_num_54",
"instalment_dpd_num_173",
"instalment_dpd_num_120",
"instalment_dpd_num_31",
"due_to_paid_0",
"instalment_amount_diff_num_179",
"instalment_dpd_num_124",
"instalment_amount_diff_num_159",
"instalment_amount_diff_num_65",
"instalment_dpd_num_176",
"instalment_dpd_num_33",
"instalment_amount_diff_num_167",
"bureau_credit_year_8",
"instalment_dpd_num_53",
"instalment_dpd_num_164",
"EMERGENCYSTATE_MODE",
"instalment_dpd_num_188",
"instalment_amount_diff_num_79",
"instalment_dpd_num_141",
"bureau_credit_type_1.0",
"instalment_amount_diff_num_82",
"mean_CODE_GENDER_REG_CITY_NOT_WORK_CITY_CNT_CHILDREN",
"cash_dpd_sum",
"instalment_amount_diff_num_125",
"FLAG_OWN_CAR",
"instalment_amount_diff_num_132",
"mean_CODE_GENDER_REG_CITY_NOT_WORK_CITY_DAYS_ID_PUBLISH",
"instalment_amount_diff_num_8",
"instalment_amount_diff_num_138",
"instalment_dpd_num_80",
"instalment_amount_diff_num_106",
"instalment_amount_diff_num_135",
"bbal_5",
"mean_CODE_GENDER_NAME_EDUCATION_TYPE_AMT_CREDIT",
"instalment_dpd_num_62",
"instalment_dpd_num_126",
"due_to_paid_14",
"HOUR_APPR_PROCESS_START_11",
"mean_NAME_INCOME_TYPE_NAME_EDUCATION_TYPE_DAYS_BIRTH",
"instalment_amount_diff_num_139",
"instalment_amount_diff_num_87",
"instalment_amount_diff_num_61",
"most_recent_min_pos_cash_dpd",
"instalment_dpd_num_77",
"instalment_amount_diff_num_119",
"instalment_dpd_num_150",
"instalment_amount_diff_num_103",
"instalment_amount_diff_num_59",
"HOUR_APPR_PROCESS_START_17",
"instalment_dpd_num_82",
"mean_NAME_EDUCATION_TYPE_AMT_CREDIT",
"bureau_credit_type_2.0",
"bureau_credit_type_12.0",
"mean_NAME_EDUCATION_TYPE_AMT_ANNUITY",
"instalment_amount_diff_num_97",
"instalment_amount_diff_num_36",
"instalment_amount_diff_num_66",
"CODE_GENDER",
"instalment_dpd_num_112",
"instalment_dpd_num_34",
"HOUR_APPR_PROCESS_START_9",
"YEARS_BUILD_AVG",
"max_credit_term",
"instalment_amount_diff_num_147",
"due_to_paid_21",
"instalment_amount_diff_num_151",
"instalment_dpd_num_129",
"instalment_amount_diff_num_123",
"mean_CODE_GENDER_NAME_EDUCATION_TYPE_AMT_ANNUITY",
"instalment_dpd_num_215",
"instalment_dpd_num_218",
"instalment_amount_diff_num_94",
"instalment_dpd_num_178",
"instalment_dpd_num_118",
"instalment_dpd_num_162",
"STATUS7.0",
"prev_credit_year_8",
"HOUR_APPR_PROCESS_START_6",
"instalment_dpd_num_60",
"instalment_amount_diff_num_142",
"instalment_amount_diff_num_186",
"instalment_dpd_num_76",
"instalment_amount_diff_num_75",
"instalment_dpd_num_88",
"instalment_amount_diff_num_35",
"instalment_amount_diff_num_102",
"instalment_amount_diff_num_67",
"instalment_amount_diff_num_237",
"instalment_dpd_num_187",
"instalment_dpd_num_50",
"credit_dpd_sum",
"instalment_dpd_num_196",
"instalment_amount_diff_num_84",
"instalment_dpd_num_181",
"instalment_dpd_num_49",
"instalment_dpd_num_161",
"CNT_CHILDREN",
"instalment_dpd_num_157",
"total_credit_debt_active_to_closed",
"mean_NAME_INCOME_TYPE_NAME_EDUCATION_TYPE_DAYS_EMPLOYED",
"bureau_credit_type_6.0",
"instalment_amount_diff_num_174",
"mean_OCCUPATION_TYPE_OWN_CAR_AGE",
"instalment_amount_diff_num_133",
"instalment_amount_diff_num_144",
"instalment_dpd_num_91",
"instalment_amount_diff_num_124",
"instalment_amount_diff_num_120",
"instalment_amount_diff_num_85",
"due_to_paid_12",
"instalment_dpd_num_156",
"instalment_amount_diff_num_185",
"bureau_credit_year_-1",
"instalment_dpd_num_83",
"instalment_amount_diff_num_52",
"instalment_dpd_num_163",
"instalment_amount_diff_num_12",
"due_to_paid_8",
"instalment_dpd_num_131",
"instalment_dpd_num_32",
"FLOORSMAX_MEDI",
"NAME_EDUCATION_TYPE__4",
"instalment_amount_diff_num_93",
"instalment_dpd_num_110",
"instalment_amount_diff_num_113",
"instalment_dpd_num_185",
"instalment_amount_diff_num_163",
"instalment_amount_diff_num_92",
"instalment_amount_diff_num_264",
"instalment_amount_diff_num_112",
"children_ratio",
"instalment_amount_diff_num_165",
"ELEVATORS_MEDI",
"instalment_amount_diff_num_197",
"instalment_amount_diff_num_115",
"instalment_amount_diff_num_171",
"num_diff_credits",
"instalment_dpd_num_200",
"instalment_dpd_num_182",
"instalment_amount_diff_num_83",
"bureau_credit_type_0.0",
"instalment_amount_diff_num_13",
"FLOORSMAX_MODE",
"instalment_amount_diff_num_193",
"instalment_dpd_num_153",
"mean_NAME_FAMILY_STATUS_NAME_INCOME_TYPE_DAYS_BIRTH",
"STATUS2.0",
"mean_NAME_EDUCATION_TYPE_DAYS_EMPLOYED",
"instalment_dpd_num_111"
]
PARAMS = {
'num_boost_round': 20000,
'early_stopping_rounds': 200,
'objective': 'binary',
'boosting_type': 'gbdt',
'learning_rate': .01,
'metric': 'auc',
'num_leaves': 20,
'sub_feature': 0.05,
'bagging_fraction': 0.9,
'reg_lambda': 75,
'reg_alpha': 5,
'min_split_gain': .5,
'min_data_in_leaf': 15,
'min_sum_hessian_in_leaf': 1,
'nthread': 16,
'verbose': -1,
'seed': SEED
}
PCA_PARAMS = {
'n_components': 10,
'whiten': True,
'random_state': SEED
}
MODEL_FILENAME = 'v145'
SAMPLE_SIZE = .3
# NOTE: column in frequency encoded columns
# cannot be in ohe cols.
FREQ_ENCODING_COLS = ['ORGANIZATION_OCCUPATION',
'age_emp_categorical',
'age_occupation'
]
OHE_COLS = [
'ORGANIZATION_TYPE',
'OCCUPATION_TYPE',
'NAME_EDUCATION_TYPE',
'NAME_HOUSING_TYPE',
'NAME_INCOME_TYPE'
]
TARGET_ENCODING_COLS = []
class Modelv145(BaseModel):
def __init__(self, **params):
self.params = params
self.n_train = 307511 # TODO: find a way to remove this constant
def load_data(self, filenames):
dfs = []
for filename in filenames:
dfs.append(pd.read_csv(filename, parse_dates=True, keep_date_col=True))
df = pd.concat(dfs)
df.index = np.arange(len(df))
df = super(Modelv145, self).reduce_mem_usage(df)
return df
def reduce_mem_usage(self, df):
return super(Modelv145, self).reduce_mem_usage(df)
def preprocess(self):
tr = pd.read_pickle(os.path.join(basepath, self.params['output_path'] + 'feature_groups/' + f'application_train.pkl'))
te = pd.read_pickle(os.path.join(basepath, self.params['output_path'] + 'feature_groups/' + f'application_test.pkl'))
ntrain = len(tr)
data = | pd.concat((tr, te)) | pandas.concat |
import numpy as np
import pandas as pd
from woodwork.logical_types import (
URL,
Age,
AgeNullable,
Boolean,
BooleanNullable,
Categorical,
CountryCode,
Datetime,
Double,
EmailAddress,
Filepath,
Integer,
IntegerNullable,
IPAddress,
LatLong,
NaturalLanguage,
Ordinal,
PersonFullName,
PhoneNumber,
PostalCode,
SubRegionCode,
Timedelta
)
from woodwork.statistics_utils import (
_get_describe_dict,
_get_mode,
_make_categorical_for_mutual_info,
_replace_nans_for_mutual_info
)
from woodwork.tests.testing_utils import mi_between_cols, to_pandas
from woodwork.utils import import_or_none
dd = import_or_none('dask.dataframe')
ks = import_or_none('databricks.koalas')
def test_get_mode():
series_list = [
pd.Series([1, 2, 3, 4, 2, 2, 3]),
pd.Series(['a', 'b', 'b', 'c', 'b']),
pd.Series([3, 2, 3, 2]),
pd.Series([np.nan, np.nan, np.nan]),
pd.Series([pd.NA, pd.NA, pd.NA]),
pd.Series([1, 2, np.nan, 2, np.nan, 3, 2]),
pd.Series([1, 2, pd.NA, 2, pd.NA, 3, 2])
]
answer_list = [2, 'b', 2, None, None, 2, 2]
for series, answer in zip(series_list, answer_list):
mode = _get_mode(series)
if answer is None:
assert mode is None
else:
assert mode == answer
def test_accessor_replace_nans_for_mutual_info():
df_nans = pd.DataFrame({
'ints': pd.Series([2, pd.NA, 5, 2], dtype='Int64'),
'floats': pd.Series([3.3, None, 2.3, 1.3]),
'bools': pd.Series([True, None, True, False]),
'bools_pdna': pd.Series([True, pd.NA, True, False], dtype='boolean'),
'int_to_cat_nan': pd.Series([1, np.nan, 3, 1], dtype='category'),
'str': pd.Series(['test', np.nan, 'test2', 'test']),
'str_no_nan': pd.Series(['test', 'test2', 'test2', 'test']),
'dates': pd.Series(['2020-01-01', None, '2020-01-02', '2020-01-03'])
})
df_nans.ww.init()
formatted_df = _replace_nans_for_mutual_info(df_nans.ww.schema, df_nans.copy())
assert isinstance(formatted_df, pd.DataFrame)
assert formatted_df['ints'].equals(pd.Series([2, 3, 5, 2], dtype='Int64'))
assert formatted_df['floats'].equals(pd.Series([3.3, 2.3, 2.3, 1.3], dtype='float'))
assert formatted_df['bools'].equals(pd.Series([True, True, True, False], dtype='category'))
assert formatted_df['bools_pdna'].equals(pd.Series([True, True, True, False], dtype='boolean'))
assert formatted_df['int_to_cat_nan'].equals(pd.Series([1, 1, 3, 1], dtype='category'))
assert formatted_df['str'].equals(pd.Series(['test', 'test', 'test2', 'test'], dtype='category'))
assert formatted_df['str_no_nan'].equals(pd.Series(['test', 'test2', 'test2', 'test'], dtype='category'))
assert formatted_df['dates'].equals(pd.Series(['2020-01-01', '2020-01-02', '2020-01-02', '2020-01-03'], dtype='datetime64[ns]'))
def test_accessor_make_categorical_for_mutual_info():
df = pd.DataFrame({
'ints1': pd.Series([1, 2, 3, 2]),
'ints2': pd.Series([1, 100, 1, 100]),
'ints3': pd.Series([1, 2, 3, 2], dtype='Int64'),
'bools': pd.Series([True, False, True, False]),
'booleans': pd.Series([True, False, True, False], dtype='boolean'),
'categories': pd.Series(['test', 'test2', 'test2', 'test']),
'dates': pd.Series(['2020-01-01', '2019-01-02', '2020-08-03', '1997-01-04'])
})
df.ww.init()
formatted_num_bins_df = _make_categorical_for_mutual_info(df.ww.schema, df.copy(), num_bins=4)
assert isinstance(formatted_num_bins_df, pd.DataFrame)
assert formatted_num_bins_df['ints1'].equals(pd.Series([0, 1, 3, 1], dtype='int8'))
assert formatted_num_bins_df['ints2'].equals(pd.Series([0, 1, 0, 1], dtype='int8'))
assert formatted_num_bins_df['ints3'].equals(pd.Series([0, 1, 3, 1], dtype='int8'))
assert formatted_num_bins_df['bools'].equals(pd.Series([1, 0, 1, 0], dtype='int8'))
assert formatted_num_bins_df['booleans'].equals(pd.Series([1, 0, 1, 0], dtype='int8'))
assert formatted_num_bins_df['categories'].equals(pd.Series([0, 1, 1, 0], dtype='int8'))
assert formatted_num_bins_df['dates'].equals(pd.Series([2, 1, 3, 0], dtype='int8'))
def test_mutual_info_same(df_same_mi):
df_same_mi.ww.init()
mi = df_same_mi.ww.mutual_information()
cols_used = set(np.unique(mi[['column_1', 'column_2']].values))
assert 'nans' not in cols_used
assert 'nat_lang' not in cols_used
assert mi.shape[0] == 1
assert mi_between_cols('floats', 'ints', mi) == 1.0
def test_mutual_info(df_mi):
df_mi.ww.init(logical_types={'dates': Datetime(datetime_format='%Y-%m-%d')})
original_df = df_mi.copy()
mi = df_mi.ww.mutual_information()
assert mi.shape[0] == 10
np.testing.assert_almost_equal(mi_between_cols('ints', 'bools', mi), 1.0, 3)
np.testing.assert_almost_equal(mi_between_cols('ints', 'strs', mi), 0.0, 3)
np.testing.assert_almost_equal(mi_between_cols('strs', 'bools', mi), 0, 3)
np.testing.assert_almost_equal(mi_between_cols('dates', 'ints', mi), 0.274, 3)
np.testing.assert_almost_equal(mi_between_cols('dates', 'bools', mi), 0.274, 3)
mi_many_rows = df_mi.ww.mutual_information(nrows=100000)
pd.testing.assert_frame_equal(mi, mi_many_rows)
mi = df_mi.ww.mutual_information(nrows=1)
assert mi.shape[0] == 10
assert (mi['mutual_info'] == 1.0).all()
mi = df_mi.ww.mutual_information(num_bins=2)
assert mi.shape[0] == 10
np.testing.assert_almost_equal(mi_between_cols('bools', 'ints', mi), 0.0, 3)
np.testing.assert_almost_equal(mi_between_cols('strs', 'ints', mi), 1.0, 3)
np.testing.assert_almost_equal(mi_between_cols('bools', 'strs', mi), 0, 3)
np.testing.assert_almost_equal(mi_between_cols('dates', 'strs', mi), 1.0, 3)
np.testing.assert_almost_equal(mi_between_cols('dates', 'ints', mi), 1.0, 3)
# Confirm that none of this changed the underlying df
pd.testing.assert_frame_equal(to_pandas(df_mi), to_pandas(original_df))
def test_mutual_info_on_index(sample_df):
sample_df.ww.init(index='id')
mi = sample_df.ww.mutual_information()
assert not ('id' in mi['column_1'].values or 'id' in mi['column_2'].values)
mi = sample_df.ww.mutual_information(include_index=True)
assert 'id' in mi['column_1'].values or 'id' in mi['column_2'].values
def test_mutual_info_returns_empty_df_properly(sample_df):
schema_df = sample_df[['id', 'age']]
schema_df.ww.init(index='id')
mi = schema_df.ww.mutual_information()
assert mi.empty
def test_mutual_info_sort(df_mi):
df_mi.ww.init()
mi = df_mi.ww.mutual_information()
for i in range(len(mi['mutual_info']) - 1):
assert mi['mutual_info'].iloc[i] >= mi['mutual_info'].iloc[i + 1]
def test_mutual_info_dict(df_mi):
df_mi.ww.init()
mi_dict = df_mi.ww.mutual_information_dict()
mi = df_mi.ww.mutual_information()
pd.testing.assert_frame_equal(pd.DataFrame(mi_dict), mi)
def test_mutual_info_unique_cols(df_mi_unique):
df_mi_unique.ww.init()
mi = df_mi_unique.ww.mutual_information()
cols_used = set(np.unique(mi[['column_1', 'column_2']].values))
assert 'unique' in cols_used
assert 'unique_with_one_nan' in cols_used
assert 'unique_with_nans' in cols_used
assert 'ints' in cols_used
def test_get_describe_dict(describe_df):
describe_df.ww.init(index='index_col')
stats_dict = _get_describe_dict(describe_df)
index_order = ['physical_type',
'logical_type',
'semantic_tags',
'count',
'nunique',
'nan_count',
'mean',
'mode',
'std',
'min',
'first_quartile',
'second_quartile',
'third_quartile',
'max',
'num_true',
'num_false']
stats_dict_to_df = pd.DataFrame(stats_dict).reindex(index_order)
stats_df = describe_df.ww.describe()
pd.testing.assert_frame_equal(stats_df, stats_dict_to_df)
def test_describe_does_not_include_index(describe_df):
describe_df.ww.init(index='index_col')
stats_df = describe_df.ww.describe()
assert 'index_col' not in stats_df.columns
def test_describe_accessor_method(describe_df):
categorical_ltypes = [Categorical,
CountryCode,
Ordinal(order=('yellow', 'red', 'blue')),
PostalCode,
SubRegionCode]
boolean_ltypes = [BooleanNullable]
non_nullable_boolean_ltypes = [Boolean]
datetime_ltypes = [Datetime]
formatted_datetime_ltypes = [Datetime(datetime_format='%Y~%m~%d')]
timedelta_ltypes = [Timedelta]
nullable_numeric_ltypes = [Double, IntegerNullable, AgeNullable]
non_nullable_numeric_ltypes = [Integer, Age]
natural_language_ltypes = [EmailAddress, Filepath, PersonFullName, IPAddress,
PhoneNumber, URL]
latlong_ltypes = [LatLong]
expected_index = ['physical_type',
'logical_type',
'semantic_tags',
'count',
'nunique',
'nan_count',
'mean',
'mode',
'std',
'min',
'first_quartile',
'second_quartile',
'third_quartile',
'max',
'num_true',
'num_false']
# Test categorical columns
category_data = describe_df[['category_col']]
if ks and isinstance(category_data, ks.DataFrame):
expected_dtype = 'string'
else:
expected_dtype = 'category'
for ltype in categorical_ltypes:
expected_vals = pd.Series({
'physical_type': expected_dtype,
'logical_type': ltype,
'semantic_tags': {'category', 'custom_tag'},
'count': 7,
'nunique': 3,
'nan_count': 1,
'mode': 'red'}, name='category_col')
category_data.ww.init(logical_types={'category_col': ltype}, semantic_tags={'category_col': 'custom_tag'})
stats_df = category_data.ww.describe()
assert isinstance(stats_df, pd.DataFrame)
assert set(stats_df.columns) == {'category_col'}
assert stats_df.index.tolist() == expected_index
assert expected_vals.equals(stats_df['category_col'].dropna())
# Test nullable boolean columns
boolean_data = describe_df[['boolean_col']]
for ltype in boolean_ltypes:
expected_dtype = ltype.primary_dtype
expected_vals = pd.Series({
'physical_type': expected_dtype,
'logical_type': ltype,
'semantic_tags': {'custom_tag'},
'count': 7,
'nan_count': 1,
'mode': True,
'num_true': 4,
'num_false': 3}, name='boolean_col')
boolean_data.ww.init(logical_types={'boolean_col': ltype}, semantic_tags={'boolean_col': 'custom_tag'})
stats_df = boolean_data.ww.describe()
assert isinstance(stats_df, pd.DataFrame)
assert set(stats_df.columns) == {'boolean_col'}
assert stats_df.index.tolist() == expected_index
assert expected_vals.equals(stats_df['boolean_col'].dropna())
# Test non-nullable boolean columns
boolean_data = describe_df[['boolean_col']].fillna(True)
for ltype in non_nullable_boolean_ltypes:
expected_dtype = ltype.primary_dtype
expected_vals = pd.Series({
'physical_type': expected_dtype,
'logical_type': ltype,
'semantic_tags': {'custom_tag'},
'count': 8,
'nan_count': 0,
'mode': True,
'num_true': 5,
'num_false': 3}, name='boolean_col')
boolean_data.ww.init(logical_types={'boolean_col': ltype}, semantic_tags={'boolean_col': 'custom_tag'})
stats_df = boolean_data.ww.describe()
assert isinstance(stats_df, pd.DataFrame)
assert set(stats_df.columns) == {'boolean_col'}
assert stats_df.index.tolist() == expected_index
assert expected_vals.equals(stats_df['boolean_col'].dropna())
# Test datetime columns
datetime_data = describe_df[['datetime_col']]
for ltype in datetime_ltypes:
expected_vals = pd.Series({
'physical_type': ltype.primary_dtype,
'logical_type': ltype,
'semantic_tags': {'custom_tag'},
'count': 7,
'nunique': 6,
'nan_count': 1,
'mean': | pd.Timestamp('2020-01-19 09:25:42.857142784') | pandas.Timestamp |
import os
import random
import math
import numpy as np
import pandas as pd
import itertools
from functools import lru_cache
##########################
## Compliance functions ##
##########################
def delayed_ramp_fun(Nc_old, Nc_new, t, tau_days, l, t_start):
"""
t : timestamp
current date
tau : int
number of days before measures start having an effect
l : int
number of additional days after the time delay until full compliance is reached
"""
return Nc_old + (Nc_new-Nc_old)/l * (t-t_start-tau_days)/pd.Timedelta('1D')
def ramp_fun(Nc_old, Nc_new, t, t_start, l):
"""
t : timestamp
current date
l : int
number of additional days after the time delay until full compliance is reached
"""
return Nc_old + (Nc_new-Nc_old)/l * (t-t_start)/pd.Timedelta('1D')
###############################
## Mobility update functions ##
###############################
def load_all_mobility_data(agg, dtype='fractional', beyond_borders=False):
"""
Function that fetches all available mobility data and adds it to a DataFrame with dates as indices and numpy matrices as values. Make sure to regularly update the mobility data with the notebook notebooks/preprocessing/Quick-update_mobility-matrices.ipynb to get the data for the most recent days. Also returns the average mobility over all available data, which might NOT always be desirable as a back-up mobility.
Input
-----
agg : str
Denotes the spatial aggregation at hand. Either 'prov', 'arr' or 'mun'
dtype : str
Choose the type of mobility data to return. Either 'fractional' (default), staytime (all available hours for region g spent in h), or visits (all unique visits from region g to h)
beyond_borders : boolean
If true, also include mobility abroad and mobility from foreigners
Returns
-------
all_mobility_data : pd.DataFrame
DataFrame with datetime objects as indices ('DATE') and np.arrays ('place') as value column
average_mobility_data : np.array
average mobility matrix over all available dates
"""
### Validate input ###
if agg not in ['mun', 'arr', 'prov']:
raise ValueError(
"spatial stratification '{0}' is not legitimate. Possible spatial "
"stratifications are 'mun', 'arr', or 'prov'".format(agg)
)
if dtype not in ['fractional', 'staytime', 'visits']:
raise ValueError(
"data type '{0}' is not legitimate. Possible mobility matrix "
"data types are 'fractional', 'staytime', or 'visits'".format(dtype)
)
### Load all available data ###
# Define absolute location of this file
abs_dir = os.path.dirname(__file__)
# Define data location for this particular aggregation level
data_location = f'../../../data/interim/mobility/{agg}/{dtype}'
# Iterate over all available interim mobility data
all_available_dates=[]
all_available_places=[]
directory=os.path.join(abs_dir, f'{data_location}')
for csv in os.listdir(directory):
# take YYYYMMDD information from processed CSVs. NOTE: this supposes a particular data name format!
datum = csv[-12:-4]
# Create list of datetime objects
all_available_dates.append(pd.to_datetime(datum, format="%Y%m%d"))
# Load the CSV as a np.array
if beyond_borders:
place = pd.read_csv(f'{directory}/{csv}', index_col='mllp_postalcode').values
else:
place = pd.read_csv(f'{directory}/{csv}', index_col='mllp_postalcode').drop(index='Foreigner', columns='ABROAD').values
if dtype=='fractional':
# make sure the rows sum up to 1 nicely again after dropping a row and a column
place = place / place.sum(axis=1)
# Create list of places
all_available_places.append(place)
# Create new empty dataframe with available dates. Load mobility later
df = pd.DataFrame({'DATE' : all_available_dates, 'place' : all_available_places}).set_index('DATE')
all_mobility_data = df.copy()
# Take average of all available mobility data
average_mobility_data = df['place'].values.mean()
return all_mobility_data, average_mobility_data
class make_mobility_update_function():
"""
Output the time-dependent mobility function with the data loaded in cache
Input
-----
proximus_mobility_data : DataFrame
Pandas DataFrame with dates as indices and matrices as values. Output of mobility.get_proximus_mobility_data.
proximus_mobility_data_avg : np.array
Average mobility matrix over all matrices
"""
def __init__(self, proximus_mobility_data, proximus_mobility_data_avg):
self.proximus_mobility_data = proximus_mobility_data
self.proximus_mobility_data_avg = proximus_mobility_data_avg
@lru_cache()
# Define mobility_update_func
def __call__(self, t, default_mobility=None):
"""
time-dependent function which has a mobility matrix of type dtype for every date.
Note: only works with datetime input (no integer time steps). This
Input
-----
t : timestamp
current date as datetime object
states : str
formal necessity
param : str
formal necessity
default_mobility : np.array or None
If None (default), returns average mobility over all available dates. Else, return user-defined mobility
Returns
-------
place : np.array
square matrix with mobility of type dtype (fractional, staytime or visits), dimension depending on agg
"""
t = pd.Timestamp(t.date())
try: # if there is data available for this date (if the key exists)
place = self.proximus_mobility_data['place'][t]
except:
if default_mobility: # If there is no data available and a user-defined input is given
place = self.default_mobility
else: # No data and no user input: fall back on average mobility
place = self.proximus_mobility_data_avg
return place
def mobility_wrapper_func(self, t, states, param, default_mobility=None):
t = pd.Timestamp(t.date())
if t <= pd.Timestamp('2020-03-17'):
place = self.__call__(t, default_mobility=default_mobility)
return np.eye(place.shape[0])
else:
return self.__call__(t, default_mobility=default_mobility)
###################
## VOC functions ##
###################
class make_VOC_function():
"""
Class that returns a time-dependant parameter function for COVID-19 SEIRD model parameter alpha (variant fraction).
Current implementation includes the alpha - delta strains.
If the class is initialized without arguments, a logistic model fitted to prelevance data of the alpha-gamma variant is used. The class can also be initialized with the alpha-gamma prelavence data provided by Prof. <NAME>.
A logistic model fitted to prelevance data of the delta variant is always used.
Input
-----
*df_abc: pd.dataFrame (optional)
Alpha, Beta, Gamma prelevance dataset by <NAME>, obtained using:
`from covid19model.data import VOC`
`df_abc = VOC.get_abc_data()`
`VOC_function = make_VOC_function(df_abc)`
Output
------
__class__ : function
Default variant function
"""
def __init__(self, *df_abc):
self.df_abc = df_abc
self.data_given = False
if self.df_abc != ():
self.df_abc = df_abc[0] # First entry in list of optional arguments (dataframe)
self.data_given = True
@lru_cache()
def VOC_abc_data(self,t):
return self.df_abc.iloc[self.df_abc.index.get_loc(t, method='nearest')]['baselinesurv_f_501Y.V1_501Y.V2_501Y.V3']
@lru_cache()
def VOC_abc_logistic(self,t):
# Parameters obtained by fitting logistic model to weekly prevalence data
t_sig = pd.Timestamp('2021-02-14')
k = 0.07
# Function to return the fraction of the delta-variant
return 1/(1+np.exp(-k*(t-t_sig)/pd.Timedelta(days=1)))
@lru_cache()
def VOC_delta_logistic(self,t):
# Parameters obtained by fitting logistic model to weekly prevalence data
t_sig = pd.Timestamp('2021-06-25')
k = 0.11
# Function to return the fraction of the delta-variant
return 1/(1+np.exp(-k*(t-t_sig)/pd.Timedelta(days=1)))
# Default VOC function includes British and Indian variants
def __call__(self, t, states, param):
# Convert time to timestamp
t = pd.Timestamp(t.date())
# Introduction Indian variant
t1 = pd.Timestamp('2021-05-01')
# Construct alpha
if t <= t1:
if self.data_given:
return np.array([1-self.VOC_abc_data(t), self.VOC_abc_data(t), 0])
else:
return np.array([1-self.VOC_abc_logistic(t), self.VOC_abc_logistic(t), 0])
else:
return np.array([0, 1-self.VOC_delta_logistic(t), self.VOC_delta_logistic(t)])
###########################
## Vaccination functions ##
###########################
from covid19model.data.model_parameters import construct_initN
class make_vaccination_function():
"""
Class that returns a two-fold time-dependent parameter function for the vaccination strategy by default. First, first dose data by sciensano are used. In the future, a hypothetical scheme is used. If spatial data is given, the output consists of vaccination data per NIS code.
Input
-----
df : pd.dataFrame
*either* Sciensano public dataset, obtained using:
`from covid19model.data import sciensano`
`df = sciensano.get_sciensano_COVID19_data(update=False)`
*or* public spatial vaccination data, obtained using:
`from covid19model.data import sciensano`
`df = sciensano.get_public_spatial_vaccination_data(update=False,agg='arr')`
spatial : Boolean
True if df is spatially explicit. None by default.
Output
------
__class__ : function
Default vaccination function
"""
def __init__(self, df, age_classes=pd.IntervalIndex.from_tuples([(0,12),(12,18),(18,25),(25,35),(35,45),(45,55),(55,65),(65,75),(75,85),(85,120)], closed='left')):
age_stratification_size = len(age_classes)
# Assign inputs to object
self.df = df
self.age_agg = age_stratification_size
# Check if spatial data is provided
self.spatial = None
if 'NIS' in self.df.index.names:
self.spatial = True
self.space_agg = len(self.df.index.get_level_values('NIS').unique().values)
# infer aggregation (prov, arr or mun)
if self.space_agg == 11:
self.agg = 'prov'
elif self.space_agg == 43:
self.agg = 'arr'
elif self.space_agg == 581:
self.agg = 'mun'
else:
raise Exception(f"Space is {G}-fold stratified. This is not recognized as being stratification at Belgian province, arrondissement, or municipality level.")
# Check if dose data is provided
self.doses = None
if 'dose' in self.df.index.names:
self.doses = True
self.dose_agg = len(self.df.index.get_level_values('dose').unique().values)
# Define start- and enddate
self.df_start = pd.Timestamp(self.df.index.get_level_values('date').min())
self.df_end = pd.Timestamp(self.df.index.get_level_values('date').max())
# Perform age conversion
# Define dataframe with desired format
iterables=[]
for index_name in self.df.index.names:
if index_name != 'age':
iterables += [self.df.index.get_level_values(index_name).unique()]
else:
iterables += [age_classes]
index = pd.MultiIndex.from_product(iterables, names=self.df.index.names)
self.new_df = pd.Series(index=index)
# Four possibilities exist: can this be sped up?
if self.spatial:
if self.doses:
# Shorten?
for date in self.df.index.get_level_values('date').unique():
for NIS in self.df.index.get_level_values('NIS').unique():
for dose in self.df.index.get_level_values('dose').unique():
data = self.df.loc[(date, NIS, slice(None), dose)]
self.new_df.loc[(date, NIS, slice(None), dose)] = self.convert_age_stratified_vaccination_data(data, age_classes, self.agg, NIS).values
else:
for date in self.df.index.get_level_values('date').unique():
for NIS in self.df.index.get_level_values('NIS').unique():
data = self.df.loc[(date,NIS)]
self.new_df.loc[(date, NIS)] = self.convert_age_stratified_vaccination_data(data, age_classes, self.agg, NIS).values
else:
if self.doses:
for date in self.df.index.get_level_values('date').unique():
for dose in self.df.index.get_level_values('dose').unique():
data = self.df.loc[(date, slice(None), dose)]
self.new_df.loc[(date, slice(None), dose)] = self.convert_age_stratified_vaccination_data(data, age_classes).values
else:
for date in self.df.index.get_level_values('date').unique():
data = self.df.loc[(date)]
self.new_df.loc[(date)] = self.convert_age_stratified_vaccination_data(data, age_classes).values
self.df = self.new_df
def convert_age_stratified_vaccination_data(self, data, age_classes, agg=None, NIS=None):
"""
A function to convert the sciensano vaccination data to the desired model age groups
Parameters
----------
data: pd.Series
A series of age-stratified vaccination incidences. Index must be of type pd.Intervalindex.
age_classes : pd.IntervalIndex
Desired age groups of the vaccination dataframe.
agg: str
Spatial aggregation: prov, arr or mun
NIS : str
NIS code of consired spatial element
Returns
-------
out: pd.Series
Converted data.
"""
# Pre-allocate new series
out = pd.Series(index = age_classes, dtype=float)
# Extract demographics
if agg:
data_n_individuals = construct_initN(data.index.get_level_values('age'), agg).loc[NIS,:].values
demographics = construct_initN(None, agg).loc[NIS,:].values
else:
data_n_individuals = construct_initN(data.index.get_level_values('age'), agg).values
demographics = construct_initN(None, agg).values
# Loop over desired intervals
for idx,interval in enumerate(age_classes):
result = []
for age in range(interval.left, interval.right):
try:
result.append(demographics[age]/data_n_individuals[data.index.get_level_values('age').contains(age)]*data.iloc[np.where(data.index.get_level_values('age').contains(age))[0][0]])
except:
result.append(0)
out.iloc[idx] = sum(result)
return out
@lru_cache()
def get_data(self,t):
if self.spatial:
if self.doses:
try:
# Only includes doses A, B and C (so not boosters!) for now
data = np.zeros([self.space_agg, self.age_agg, self.dose_agg+1])
data[:,:,:-1] = np.array(self.df.loc[t,:,:,:].values).reshape( (self.space_agg, self.age_agg, self.dose_agg) )
return data
except:
return np.zeros([self.space_agg, self.age_agg, self.dose_agg+1])
else:
try:
return np.array(self.df.loc[t,:,:].values).reshape( (self.space_agg, self.age_agg) )
except:
return np.zeros([self.space_agg, self.age_agg])
else:
if self.doses:
try:
return np.array(self.df.loc[t,:,:].values).reshape( (self.age_agg, self.dose_agg) )
except:
return np.zeros([self.age_agg, self.dose_agg])
else:
try:
return np.array(self.df.loc[t,:].values)
except:
return np.zeros(self.age_agg)
def unidose_2021_vaccination_campaign(self, states, initN, daily_doses, delay_immunity, vacc_order, stop_idx, refusal):
# Compute the number of vaccine eligible individuals
VE = states['S'] + states['R']
# Initialize N_vacc
N_vacc = np.zeros(self.age_agg)
# Start vaccination loop
idx = 0
while daily_doses > 0:
if idx == stop_idx:
daily_doses = 0 #End vaccination campaign at age 20
elif VE[vacc_order[idx]] - initN[vacc_order[idx]]*refusal[vacc_order[idx]] > daily_doses:
N_vacc[vacc_order[idx]] = daily_doses
daily_doses = 0
else:
N_vacc[vacc_order[idx]] = VE[vacc_order[idx]] - initN[vacc_order[idx]]*refusal[vacc_order[idx]]
daily_doses = daily_doses - (VE[vacc_order[idx]] - initN[vacc_order[idx]]*refusal[vacc_order[idx]])
idx = idx + 1
return N_vacc
def booster_campaign(self, states, daily_doses, vacc_order, stop_idx, refusal):
# Compute the number of booster eligible individuals
VE = states['S'][:,2] + states['E'][:,2] + states['I'][:,2] + states['A'][:,2] + states['R'][:,2] \
+ states['S'][:,3] + states['E'][:,3] + states['I'][:,3] + states['A'][:,3] + states['R'][:,3]
# Initialize N_vacc
N_vacc = np.zeros([self.age_agg,self.dose_agg])
# Booster vaccination strategy without refusal
idx = 0
while daily_doses > 0:
if idx == stop_idx:
daily_doses= 0 #End vaccination campaign at age 20
elif VE[vacc_order[idx]] - self.fully_vaccinated_0[vacc_order[idx]]*refusal[vacc_order[idx]] > daily_doses:
N_vacc[vacc_order[idx],3] = daily_doses
daily_doses= 0
else:
N_vacc[vacc_order[idx],3] = VE[vacc_order[idx]] - self.fully_vaccinated_0[vacc_order[idx]]*refusal[vacc_order[idx]]
daily_doses = daily_doses - (VE[vacc_order[idx]] - self.fully_vaccinated_0[vacc_order[idx]]*refusal[vacc_order[idx]])
idx = idx + 1
return N_vacc
# Default vaccination strategy = Sciensano data + hypothetical scheme after end of data collection for unidose model only (for now)
def __call__(self, t, states, param, initN, daily_doses=60000, delay_immunity = 21, vacc_order = [8,7,6,5,4,3,2,1,0], stop_idx=9, refusal = [0.3,0.3,0.3,0.3,0.3,0.3,0.3,0.3,0.3]):
"""
time-dependent function for the Belgian vaccination strategy
First, all available first-dose data from Sciensano are used. Then, the user can specify a custom vaccination strategy of "daily_first_dose" first doses per day,
administered in the order specified by the vector "vacc_order" with a refusal propensity of "refusal" in every age group.
This vaccination strategy does not distinguish between vaccination doses, individuals are transferred to the vaccination circuit after some time delay after the first dose.
For use with the model `COVID19_SEIRD` and `COVID19_SEIRD_spatial_vacc` in `~src/models/models.py`
Parameters
----------
t : int
Simulation time
states: dict
Dictionary containing values of model states
param : dict
Model parameter dictionary
initN : list or np.array
Demographics according to the epidemiological model age bins
daily_first_dose : int
Number of doses administered per day. Default is 30000 doses/day.
delay_immunity : int
Time delay between first dose vaccination and start of immunity. Default is 21 days.
vacc_order : array
Vector containing vaccination prioritization preference. Default is old to young. Must be equal in length to the number of age bins in the model.
stop_idx : float
Index of age group at which the vaccination campaign is halted. An index of 9 corresponds to vaccinating all age groups, an index of 8 corresponds to not vaccinating the age group corresponding with vacc_order[idx].
refusal: array
Vector containing the fraction of individuals refusing a vaccine per age group. Default is 30% in every age group. Must be equal in length to the number of age bins in the model.
Return
------
N_vacc : np.array
Number of individuals to be vaccinated at simulation time "t" per age, or per [patch,age]
"""
# Convert time to suitable format
t = pd.Timestamp(t.date())
# Convert delay to a timedelta
delay = pd.Timedelta(str(int(delay_immunity))+'D')
# Compute vaccinated individuals after spring-summer 2021 vaccination campaign
check_time = pd.Timestamp('2021-10-01')
# Only for non-spatial multi-vaccindation dose model
if not self.spatial:
if self.doses:
if t == check_time:
self.fully_vaccinated_0 = states['S'][:,2] + states['E'][:,2] + states['I'][:,2] + states['A'][:,2] + states['R'][:,2] + \
states['S'][:,3] + states['E'][:,3] + states['I'][:,3] + states['A'][:,3] + states['R'][:,3]
# Use data
if t <= self.df_end + delay:
return self.get_data(t-delay)
# Projection into the future
else:
if self.spatial:
if self.doses:
# No projection implemented
return np.zeros([self.space_agg, self.age_agg, self.dose_agg+1])
else:
# No projection implemented
return np.zeros([self.space_agg,self.age_agg])
else:
if self.doses:
return self.booster_campaign(states, daily_doses, vacc_order, stop_idx, refusal)
else:
return self.unidose_2021_vaccination_campaign(states, initN, daily_doses, delay_immunity, vacc_order, stop_idx, refusal)
###################################
## Google social policy function ##
###################################
class make_contact_matrix_function():
"""
Class that returns contact matrix based on 4 prevention parameters by default, but has other policies defined as well.
Input
-----
Nc_all : dictionnary
contact matrices for home, schools, work, transport, leisure and others
df_google : dataframe
google mobility data
Output
------
__class__ : default function
Default output function, based on contact_matrix_4prev
"""
def __init__(self, df_google, Nc_all):
self.df_google = df_google.astype(float)
self.Nc_all = Nc_all
# Compute start and endtimes of dataframe
self.df_google_start = df_google.index.get_level_values('date')[0]
self.df_google_end = df_google.index.get_level_values('date')[-1]
# Check if provincial data is provided
self.provincial = None
if 'NIS' in self.df_google.index.names:
self.provincial = True
self.space_agg = len(self.df_google.index.get_level_values('NIS').unique().values)
@lru_cache() # once the function is run for a set of parameters, it doesn't need to compile again
def __call__(self, t, prev_home=1, prev_schools=1, prev_work=1, prev_rest = 1,
school=None, work=None, transport=None, leisure=None, others=None, home=None):
"""
t : timestamp
current date
prev_... : float [0,1]
prevention parameter to estimate
school, work, transport, leisure, others : float [0,1]
level of opening of these sectors
if None, it is calculated from google mobility data
only school cannot be None!
"""
if school is None:
raise ValueError(
"Please indicate to which extend schools are open")
places_var = [work, transport, leisure, others]
places_names = ['work', 'transport', 'leisure', 'others']
GCMR_names = ['work', 'transport', 'retail_recreation', 'grocery']
if self.provincial:
if t < pd.Timestamp('2020-03-17'):
return np.ones(self.space_agg)[:,np.newaxis,np.newaxis]*self.Nc_all['total']
elif pd.Timestamp('2020-03-17') <= t <= self.df_google_end:
# Extract row at timestep t
row = -self.df_google.loc[(t, slice(None)),:]/100
else:
# Extract last 14 days and take the mean
row = -self.df_google.loc[(self.df_google_end - pd.Timedelta(days=14)): self.df_google_end, slice(None)].mean(level='NIS')/100
# Sort NIS codes from low to high
row.sort_index(level='NIS', ascending=True,inplace=True)
# Extract values
values_dict={}
for idx,place in enumerate(places_var):
if place is None:
place = 1 - row[GCMR_names[idx]].values
else:
try:
test=len(place)
except:
place = place*np.ones(self.space_agg)
values_dict.update({places_names[idx]: place})
# Schools:
try:
test=len(school)
except:
school = school*np.ones(self.space_agg)
# Construct contact matrix
CM = (prev_home*np.ones(self.space_agg)[:, np.newaxis,np.newaxis]*self.Nc_all['home'] +
(prev_schools*school)[:, np.newaxis,np.newaxis]*self.Nc_all['schools'] +
(prev_work*values_dict['work'])[:,np.newaxis,np.newaxis]*self.Nc_all['work'] +
(prev_rest*values_dict['transport'])[:,np.newaxis,np.newaxis]*self.Nc_all['transport'] +
(prev_rest*values_dict['leisure'])[:,np.newaxis,np.newaxis]*self.Nc_all['leisure'] +
(prev_rest*values_dict['others'])[:,np.newaxis,np.newaxis]*self.Nc_all['others'])
else:
if t < pd.Timestamp('2020-03-17'):
return self.Nc_all['total']
elif pd.Timestamp('2020-03-17') <= t <= self.df_google_end:
# Extract row at timestep t
row = -self.df_google.loc[t]/100
else:
# Extract last 14 days and take the mean
row = -self.df_google[-14:-1].mean()/100
# Extract values
values_dict={}
for idx,place in enumerate(places_var):
if place is None:
place = 1 - row[GCMR_names[idx]]
values_dict.update({places_names[idx]: place})
# Construct contact matrix
CM = (prev_home*self.Nc_all['home'] +
prev_schools*school*self.Nc_all['schools'] +
prev_work*values_dict['work']*self.Nc_all['work'] +
prev_rest*values_dict['transport']*self.Nc_all['transport'] +
prev_rest*values_dict['leisure']*self.Nc_all['leisure'] +
prev_rest*values_dict['others']*self.Nc_all['others'])
return CM
def all_contact(self):
return self.Nc_all['total']
def all_contact_no_schools(self):
return self.Nc_all['total'] - self.Nc_all['schools']
def ramp_fun(self, Nc_old, Nc_new, t, t_start, l):
"""
t : timestamp
current simulation time
t_start : timestamp
start of policy change
l : int
number of additional days after the time delay until full compliance is reached
"""
return Nc_old + (Nc_new-Nc_old)/l * float( (t-t_start)/pd.Timedelta('1D') )
def delayed_ramp_fun(self, Nc_old, Nc_new, t, tau_days, l, t_start):
"""
t : timestamp
current simulation time
t_start : timestamp
start of policy change
tau : int
number of days before measures start having an effect
l : int
number of additional days after the time delay until full compliance is reached
"""
return Nc_old + (Nc_new-Nc_old)/l * float( (t-t_start-tau_days)/pd.Timedelta('1D') )
####################
## National model ##
####################
def policies_all(self, t, states, param, l1, l2, prev_schools, prev_work, prev_rest_lockdown, prev_rest_relaxation, prev_home):
'''
Function that returns the time-dependant social contact matrix Nc for all COVID waves.
Input
-----
t : Timestamp
simulation time
states : xarray
model states
param : dict
model parameter dictionary
l1 : float
Compliance parameter for social policies during first lockdown 2020 COVID-19 wave
l2 : float
Compliance parameter for social policies during second lockdown 2020 COVID-19 wave
prev_{location} : float
Effectivity of contacts at {location}
Returns
-------
CM : np.array (9x9)
Effective contact matrix (output of __call__ function)
'''
t = pd.Timestamp(t.date())
# Convert compliance l to dates
l1_days = pd.Timedelta(l1, unit='D')
l2_days = pd.Timedelta(l2, unit='D')
# Define key dates of first wave
t1 = pd.Timestamp('2020-03-15') # start of lockdown
t2 = pd.Timestamp('2020-05-15') # gradual re-opening of schools (assume 50% of nominal scenario)
t3 = pd.Timestamp('2020-07-01') # start of summer holidays
t4 = pd.Timestamp('2020-08-03') # Summer lockdown in Antwerp
t5 = pd.Timestamp('2020-08-24') # End of summer lockdown in Antwerp
t6 = pd.Timestamp('2020-09-01') # end of summer holidays
t7 = pd.Timestamp('2020-09-21') # Opening universities
# Define key dates of second wave
t8 = pd.Timestamp('2020-10-19') # lockdown (1)
t9 = pd.Timestamp('2020-11-02') # lockdown (2)
t10 = pd.Timestamp('2020-11-16') # schools re-open
t11 = pd.Timestamp('2020-12-18') # Christmas holiday starts
t12 = pd.Timestamp('2021-01-04') # Christmas holiday ends
t13 = pd.Timestamp('2021-02-15') # Spring break starts
t14 = pd.Timestamp('2021-02-21') # Spring break ends
t15 = pd.Timestamp('2021-02-28') # Contact increase in children
t16 = pd.Timestamp('2021-03-26') # Start of Easter holiday
t17 = pd.Timestamp('2021-04-18') # End of Easter holiday
t18 = pd.Timestamp('2021-06-01') # Start of lockdown relaxation
t19 = pd.Timestamp('2021-07-01') # Start of Summer holiday
t20 = pd.Timestamp('2021-09-01') # End of Summer holiday
t21 = pd.Timestamp('2021-09-21') # Opening of universities
t22 = pd.Timestamp('2021-10-01') # Flanders releases all measures
t23 = pd.Timestamp('2021-11-01') # Start of autumn break
t24 = pd.Timestamp('2021-11-07') # End of autumn break
t25 = pd.Timestamp('2021-12-26') # Start of Christmass break
t26 = pd.Timestamp('2022-01-06') # End of Christmass break
t27 = pd.Timestamp('2022-02-28') # Start of Spring Break
t28 = pd.Timestamp('2022-03-06') # End of Spring Break
t29 = pd.Timestamp('2022-04-04') # Start of Easter Break
t30 = pd.Timestamp('2022-04-17') # End of Easter Break
t31 = pd.Timestamp('2022-07-01') # Start of summer holidays
t32 = pd.Timestamp('2022-09-01') # End of summer holidays
t33 = pd.Timestamp('2022-09-21') # Opening of universities
t34 = pd.Timestamp('2022-10-31') # Start of autumn break
t35 = pd.Timestamp('2022-11-06') # End of autumn break
if t <= t1:
return self.__call__(t, prev_home, prev_schools, prev_work, prev_rest_relaxation, school=1)
elif t1 < t <= t1 + l1_days:
t = pd.Timestamp(t.date())
policy_old = self.__call__(t, prev_home, prev_schools, prev_work, prev_rest_relaxation, school=1)
policy_new = self.__call__(t, prev_home=prev_home, prev_schools=prev_schools, prev_work=prev_work, prev_rest=prev_rest_lockdown, school=0)
return self.ramp_fun(policy_old, policy_new, t, t1, l1)
elif t1 + l1_days < t <= t2:
return self.__call__(t, prev_home=prev_home, prev_schools=prev_schools, prev_work=prev_work, prev_rest=prev_rest_lockdown, school=0)
elif t2 < t <= t3:
l = (t3 - t2)/pd.Timedelta(days=1)
r = (t3 - t2)/(t4 - t2)
policy_old = self.__call__(t, prev_home, prev_schools, prev_work, prev_rest_lockdown, school=0)
policy_new = self.__call__(t, prev_home, prev_schools, prev_work, r*prev_rest_relaxation, school=0)
return self.ramp_fun(policy_old, policy_new, t, t2, l)
elif t3 < t <= t4:
l = (t4 - t3)/pd.Timedelta(days=1)
r = (t3 - t2)/(t4 - t2)
policy_old = self.__call__(t, prev_home, prev_schools, prev_work, r*prev_rest_relaxation, school=0)
policy_new = self.__call__(t, prev_home, prev_schools, prev_work, prev_rest_relaxation, school=0)
return self.ramp_fun(policy_old, policy_new, t, t3, l)
elif t4 < t <= t5:
return self.__call__(t, prev_home, prev_schools, prev_work, prev_rest_lockdown, school=0)
elif t5 < t <= t6:
return self.__call__(t, prev_home, prev_schools, prev_work, prev_rest_relaxation, school=0)
# Second wave
elif t6 < t <= t7:
return self.__call__(t, prev_home, prev_schools, prev_work, prev_rest_relaxation, school=0.7)
elif t7 < t <= t8:
return self.__call__(t, prev_home, prev_schools, prev_work, prev_rest_relaxation, school=1)
elif t8 < t <= t8 + l2_days:
policy_old = self.__call__(t, prev_home, prev_schools, prev_work, prev_rest_relaxation, school=1)
policy_new = self.__call__(t, prev_schools, prev_work, prev_rest_lockdown, school=1)
return self.ramp_fun(policy_old, policy_new, t, t8, l2)
elif t8 + l2_days < t <= t9:
return self.__call__(t, prev_home, prev_schools, prev_work, prev_rest_lockdown,
school=1)
elif t9 < t <= t10:
return self.__call__(t, prev_home, prev_schools, prev_work, prev_rest_lockdown,
school=0)
elif t10 < t <= t11:
return self.__call__(t, prev_home, prev_schools, prev_work, prev_rest_lockdown,
school=1)
elif t11 < t <= t12:
return self.__call__(t, prev_home, prev_schools, prev_work, prev_rest_lockdown,
school=0)
elif t12 < t <= t13:
return self.__call__(t, prev_home, prev_schools, prev_work, prev_rest_lockdown,
school=1)
elif t13 < t <= t14:
return self.__call__(t, prev_home, prev_schools, prev_work, prev_rest_lockdown,
school=0)
elif t14 < t <= t15:
return self.__call__(t, prev_home, prev_schools, prev_work, prev_rest_lockdown,
school=1)
elif t15 < t <= t16:
return self.__call__(t, prev_home, prev_schools, prev_work, prev_rest_lockdown,
school=1)
elif t16 < t <= t17:
return self.__call__(t, prev_home, prev_schools, prev_work, prev_rest_lockdown,
school=0)
elif t17 < t <= t18:
return self.__call__(t, prev_home, prev_schools, prev_work, prev_rest_lockdown,
school=1)
elif t18 < t <= t19:
l = (t19 - t18)/pd.Timedelta(days=1)
r = (t19 - t18)/(t20 - t18)
policy_old = self.__call__(t, prev_home, prev_schools, prev_work, prev_rest_lockdown, school=1)
policy_new = self.__call__(t, prev_home, prev_schools, prev_work, r*prev_rest_relaxation, school=1)
return self.ramp_fun(policy_old, policy_new, t, t18, l)
elif t19 < t <= t20:
l = (t20 - t19)/pd.Timedelta(days=1)
r = (t19 - t18)/(t20 - t18)
policy_old = self.__call__(t, prev_home, prev_schools, prev_work, r*prev_rest_relaxation, school=0)
policy_new = self.__call__(t, prev_home, prev_schools, prev_work, 0.75*prev_rest_relaxation, school=0)
return self.ramp_fun(policy_old, policy_new, t, t19, l)
elif t20 < t <= t21:
return self.__call__(t, prev_home, prev_schools, prev_work, 0.75*prev_rest_relaxation, school=0.7)
elif t21 < t <= t22:
return self.__call__(t, prev_home, prev_schools, prev_work, 0.70*prev_rest_relaxation, school=1)
elif t22 < t <= t23:
return self.__call__(t, prev_home, prev_schools, prev_work, prev_rest_relaxation, school=1)
elif t23 < t <= t24:
return self.__call__(t, prev_home, prev_schools, prev_work, prev_rest_relaxation, school=0)
elif t24 < t <= t25:
return self.__call__(t, prev_home, prev_schools, prev_work, prev_rest_relaxation,
work=1, leisure=1, transport=1, others=1, school=1)
elif t25 < t <= t26:
return self.__call__(t, prev_home, prev_schools, prev_work, prev_rest_relaxation,
work=0.7, leisure=1.3, transport=1, others=1, school=0)
elif t26 < t <= t27:
return self.__call__(t, prev_home, prev_schools, prev_work, prev_rest_relaxation,
work=1, leisure=1, transport=1, others=1, school=1)
elif t27 < t <= t28:
return self.__call__(t, prev_home, prev_schools, prev_work, prev_rest_relaxation,
leisure=1.1, work=0.9, transport=1, others=1, school=0)
elif t28 < t <= t29:
return self.__call__(t, prev_home, prev_schools, prev_work, prev_rest_relaxation,
work=1, leisure=1, transport=1, others=1, school=1)
elif t29 < t <= t30:
return self.__call__(t, prev_home, prev_schools, prev_work, prev_rest_relaxation,
work=0.7, leisure=1.3, transport=1, others=1, school=0)
elif t30 < t <= t31:
return self.__call__(t, prev_home, prev_schools, prev_work, prev_rest_relaxation,
work=1, leisure=1, transport=1, others=1, school=1)
elif t31 < t <= t32:
return self.__call__(t, prev_home, prev_schools, prev_work, prev_rest_relaxation,
work=0.7, leisure=1.3, transport=1, others=1, school=0)
elif t32 < t <= t33:
return self.__call__(t, prev_home, prev_schools, prev_work, prev_rest_relaxation,
work=1, leisure=1, transport=1, others=1, school=0.8)
elif t33 < t <= t34:
return self.__call__(t, prev_home, prev_schools, prev_work, prev_rest_relaxation,
work=1, leisure=1, transport=1, others=1, school=1)
elif t34 < t <= t35:
return self.__call__(t, prev_home, prev_schools, prev_work, prev_rest_relaxation,
work=0.9, leisure=1.1, transport=1, others=1, school=0)
else:
return self.__call__(t, prev_home, prev_schools, prev_work, prev_rest_relaxation,
work=1, leisure=1, transport=1, others=1, school=1)
def policies_all_WAVE4(self, t, states, param, l1, l2, prev_schools, prev_work, prev_rest_lockdown, prev_rest_relaxation, prev_home, date_measures, scenario):
'''
Function that returns the time-dependant social contact matrix Nc for all COVID waves.
Input
-----
t : Timestamp
simulation time
states : xarray
model states
param : dict
model parameter dictionary
l1 : float
Compliance parameter for social policies during first lockdown 2020 COVID-19 wave
l2 : float
Compliance parameter for social policies during second lockdown 2020 COVID-19 wave
prev_{location} : float
Effectivity of contacts at {location}
Returns
-------
CM : np.array
Effective contact matrix (output of __call__ function)
'''
t = pd.Timestamp(t.date())
# Convert compliance l to dates
l1_days = pd.Timedelta(l1, unit='D')
l2_days = pd.Timedelta(l2, unit='D')
# Define key dates of first wave
t1 = pd.Timestamp('2020-03-15') # start of lockdown
t2 = pd.Timestamp('2020-05-15') # gradual re-opening of schools (assume 50% of nominal scenario)
t3 = pd.Timestamp('2020-07-01') # start of summer holidays
t4 = pd.Timestamp('2020-08-03') # Summer lockdown in Antwerp
t5 = pd.Timestamp('2020-08-24') # End of summer lockdown in Antwerp
t6 = pd.Timestamp('2020-09-01') # end of summer holidays
t7 = pd.Timestamp('2020-09-21') # Opening universities
# Define key dates of second wave
t8 = pd.Timestamp('2020-10-19') # lockdown (1)
t9 = pd.Timestamp('2020-11-02') # lockdown (2)
t10 = pd.Timestamp('2020-11-16') # schools re-open
t11 = pd.Timestamp('2020-12-18') # Christmas holiday starts
t12 = pd.Timestamp('2021-01-04') # Christmas holiday ends
t13 = pd.Timestamp('2021-02-15') # Spring break starts
t14 = pd.Timestamp('2021-02-21') # Spring break ends
t15 = pd.Timestamp('2021-02-28') # Contact increase in children
t16 = pd.Timestamp('2021-03-26') # Start of Easter holiday
t17 = pd.Timestamp('2021-04-18') # End of Easter holiday
t18 = pd.Timestamp('2021-06-01') # Start of lockdown relaxation
t19 = pd.Timestamp('2021-07-01') # Start of Summer holiday
t20 = pd.Timestamp('2021-09-01') # End of Summer holiday
t21 = pd.Timestamp('2021-09-21') # Opening of universities
t22 = pd.Timestamp('2021-10-01') # Flanders releases all measures
t23 = pd.Timestamp('2021-11-01') # Start of autumn break
t24 = pd.Timestamp('2021-11-07') # End of autumn break
# Fourth WAVE
t25 = pd.Timestamp('2021-11-22') # Start of mandatory telework + start easing in leisure restrictions
t26 = pd.Timestamp('2021-12-18') # Start of Christmass break for schools
t27 = pd.Timestamp('2021-12-26') # Start of Christmass break for general population
t28 = pd.Timestamp('2022-01-06') # End of Christmass break
t29 = pd.Timestamp('2022-01-28') # End of measures
t30 = pd.Timestamp('2022-02-28') # Start of Spring Break
t31 = pd.Timestamp('2022-03-06') # End of Spring Break
t32 = | pd.Timestamp('2022-04-04') | pandas.Timestamp |
import pandas as pd
import numpy as np
import warnings
from dateutil.parser import parse
import matplotlib.pyplot as plt
plt.rcParams['font.sans-serif'] = 'Times New Roman'
import seaborn as sns
sns.set_style('whitegrid')
### 一、数据清洗
option_contract = pd.read_excel('option_contract.xlsx')
#### 获取期权合约数据,
# 剔除华泰柏瑞的信息,以及多余的列:'kind', 'name', 'exercise_type’
##剔除华泰柏瑞的信息
list_name = list(option_contract.name)
del_rows = [i for i in range(len(list_name)) if '华泰柏瑞' in list_name[i]]
option_contract_2 = option_contract.drop(del_rows)
##剔除多余的列:'kind', 'name', 'exercise_type’
option_contract_3 = option_contract_2.drop(['kind', 'name', 'exercise_type'] \
, axis=1)
#### 插入一列,列名为'ttm',代表存续期,以天为单位表示,
# 并保留存续期大于30天的期权合约
##插入一列,列名为'ttm'
option_contract_3['ttm'] = pd.Series(pd.to_datetime(option_contract_3['maturity_date']) \
- pd.to_datetime(option_contract_3['list_date']))
##以天为单位表示
option_contract_3['ttm'] = option_contract_3['ttm']. \
map(lambda x: x.days)
##保留存续期大于30天的期权合约
df = option_contract_3.drop(option_contract_3[option_contract_3.ttm <= 30].index)
#### 剔除到期日在2019年之后的期权合约,
# 并将剩下所有的maturity_date储存在一个新的容器里,
##生成一个新的DataFrame,储存到期日在2020年以前所有的期权合约
df_2 = df.drop(df[df.maturity_date >= '2020-1-1'].index)
##将剩下所有的maturity_date储存在一个新的序列maturity_date_cleaned里
maturity_date_cleaned = df_2.maturity_date.value_counts().sort_index().index
#### 生成一个新的options列表,列表中每个元素用以储存每个到期日的所有期权合约
options = [df_2[df_2.maturity_date == i] for i in maturity_date_cleaned]
#### 读取price_start和price end数据
# price_strat储存着每月第一个交易日所有期权的收盘价
# price_end储存着每月到期日所有期权的收盘价
price_start = pd.read_excel('price_start.xlsx')
price_end = pd.read_excel('price_end.xlsx')
##获得每月第一个交易日据具体日期
start_date = price_start.trade_date.value_counts().sort_index().index
# 把用int数字表示的日期转化为真正的日期形式
price_start['Date_True'] = pd.Series([parse(str(y)) for y in list(price_start.trade_date)])
##获得每月到期日具体日期
end_date = price_end.trade_date.value_counts().sort_index().index
# 把用int数字表示的日期转化为真正的日期形式
ls = pd.Series([parse(str(y)) for y in list(price_end.trade_date)])
price_end['Date_True'] = ls
####搜集每个price_strat和price_end中所有日期标的资产的收盘价,
# 整理成excel文件并读取
ETF_start = pd.read_excel('50ETF_Start.xlsx')
ETF_end = pd.read_excel('50ETF_End.xlsx')
### 二. 构建期权对冲交易策略
#### 找出第一月的第一个交易日的要卖出和买入的期权合约,以及对应的期权价格
i = 0
# 第一个交易日的要买入的期权合约
opt_buy = options[i + 1]
# 第一个交易日的要卖出的期权合约
opt_sell = options[i]
# 第一个交易日的期权价格数据
price_data = price_start[price_start['Date_True'] == (list(ETF_start['Date'])[i])]
##要买入的期权合约在第一个交易日的价格
opt_buy = pd.merge(opt_buy, price_data, on='ts_code', how='inner')
##要卖出的期权合约在第一个交易日的价格
opt_sell = pd.merge(opt_sell, price_data, on='ts_code', how='inner')
#### 找出第一月的到期日时次月到期合约的价格,并计算当月到期合约的收益
# 第i个月到期日期权价格
price_data = price_end[price_end['Date_True'] == (list(ETF_end['Date'])[i])]
# 把到期日表弟资产价格插入DataFrame opt_sell中
opt_sell['50ETF_price'] = list(ETF_end['close'])[i]
# 把opt_sell和price_data合并
opt_buy = pd.merge(opt_buy, price_data, on='ts_code', how='inner')
# 计算第一个交易日所出售期权在交易日的收益(对多头而言是收益,对我们(空头)而言为损失)
opt_sell['payoff'] = (opt_sell['call_put'] == 'C') * np.maximum(opt_sell['50ETF_price'] - \
opt_sell['exercise_price'], 0) + (
opt_sell['call_put'] == 'P') * np.maximum( \
opt_sell['exercise_price'] - opt_sell['50ETF_price'], 0)
#### 计算第一个交易日建仓的成本,到期日平仓的收益,以及最终的收益率
cost = opt_buy['close_x'].sum() + opt_sell['payoff'].sum()
payoff = opt_buy['close_y'].sum() + opt_sell['close'].sum()
returns = (payoff - cost) / cost
#### 使用for loop计算得到之后每月的策略收益率
returns = []
for i in range(len(ETF_start) - 1):
# 第i月第一个交易日的要买入的期权合约
opt_buy = options[i + 1]
# 第i月第一个交易日的要卖出的期权合约
opt_sell = options[i]
# 第i月第一个交易日的期权价格数据
price_data = price_start[price_start['Date_True'] == (list(ETF_start['Date'])[i])]
##要买入的期权合约在第i月第一个交易日的价格
opt_buy = pd.merge(opt_buy, price_data, on='ts_code', how='inner')
##要卖出的期权合约在第i月第一个交易日的价格
opt_sell = pd.merge(opt_sell, price_data, on='ts_code', how='inner')
#### 找出第i月的到期日时次月到期合约的价格,并计算当月到期合约的收益
# 第i个月到期日期权价格
price_data = price_end[price_end['Date_True'] == (list(ETF_end['Date'])[i])]
# 把到期日表弟资产价格插入DataFrame opt_sell中
opt_sell['50ETF_price'] = list(ETF_end['close'])[i]
# 把opt_sell和price_data合并
opt_buy = pd.merge(opt_buy, price_data, on='ts_code', how='inner')
# 计算第i月第一个交易日所出售期权在交易日的收益(对多头而言是收益,对我们(空头)而言为损失)
opt_sell['payoff'] = (opt_sell['call_put'] == 'C') * np.maximum(opt_sell['50ETF_price'] - \
opt_sell['exercise_price'], 0) + (
opt_sell['call_put'] == 'P') * np.maximum( \
opt_sell['exercise_price'] - opt_sell['50ETF_price'], 0)
#### 计算第i月第一个交易日建仓的成本,到期日平仓的收益,以及最终的收益率
cost = opt_buy['close_x'].sum() + opt_sell['payoff'].sum()
payoff = opt_buy['close_y'].sum() + opt_sell['close'].sum()
returns.append((payoff - cost) / cost)
#### 作图展示策略每期收益率,并与上证50ETF基金的同期收益率作比较
plt.figure(figsize=[15, 8])
plt.plot(maturity_date_cleaned[:-1], returns, color='royalblue', marker='o', \
markersize=9)
fund_retuns = (ETF_end['close'] - ETF_start['close']) / ETF_start['close']
plt.plot(maturity_date_cleaned, fund_retuns, 'r', marker='o' \
, markersize=9)
plt.legend(['Option strategy', '50ETF Fund'], fontsize=14)
plt.xlabel('Years', fontsize=16)
plt.ylabel('Return', fontsize=16)
plt.xticks(fontsize=14)
plt.yticks(fontsize=14)
plt.grid()
plt.savefig('p1.svg')
plt.show()
#### 作图展示策略累计收益率,并与上证50ETF基金的累计收益率作比较
plt.figure(figsize=[14, 7])
plt.plot(maturity_date_cleaned[:-1], np.cumprod(1 + np.array(returns)), color='royalblue', marker='o', \
markersize=9)
fund_retuns = (ETF_end['close'] - ETF_start['close']) / ETF_start['close']
plt.plot(maturity_date_cleaned, np.cumprod(1 + np.array(fund_retuns)), 'r', marker='o' \
, markersize=9)
plt.legend(['Option strategy', '50ETF Fund'], fontsize=16)
plt.xlabel('Years', fontsize=16)
plt.ylabel('Return', fontsize=16)
plt.xticks(fontsize=14)
plt.yticks(fontsize=14)
plt.grid()
plt.savefig('p2.svg')
plt.show()
### 四、稳健性检验
#### 作图展示策略每个建仓日的所有期权合约的S/K-1,并计算其绝对值的期望和标准差
option_spread = []
for i in range(len(ETF_start) - 1):
opt_sell = options[i]
opt_sell['50price'] = ETF_start['close'][i]
##把第i月所有要卖出期权的S/K-1放入列表option_spread中
option_spread += (opt_sell['50price'] / opt_sell['exercise_price'] - 1).values \
.tolist()
##把第i月所有要买入期权的S/K-1放入列表option_spread中
opt_buy = options[i + 1]
opt_buy['50price'] = ETF_start['close'][i]
option_spread += (opt_buy['50price'] / opt_buy['exercise_price'] - 1).values \
.tolist()
import seaborn as sns
from scipy.stats import norm
plt.figure(figsize=[16, 8])
sns.distplot(option_spread, fit=norm, color='royalblue')
plt.title('Difference between spot price and strike price', fontsize=20)
plt.grid()
plt.xticks(fontsize=14)
plt.yticks(fontsize=14)
plt.savefig('p3.svg')
plt.show()
def ATM(eps):
returns = []
for i in range(len(ETF_start) - 1):
# 第i月第一个交易日的要买入的期权合约
opt_buy = options[i + 1]
# 第i月第一个交易日的要卖出的期权合约
opt_sell = options[i]
# 第i月第一个交易日的期权价格数据
price_data = price_start[price_start['Date_True'] == (list(ETF_start['Date'])[i])]
##要买入的期权合约在第i月第一个交易日的价格
opt_buy = pd.merge(opt_buy, price_data, on='ts_code', how='inner')
# opt_buy['50price']=list(ETF_start['close'])[i]
opt_buy = opt_buy[abs(opt_buy['50price'] / opt_buy['exercise_price'] - 1) < eps]
##要卖出的期权合约在第i月第一个交易日的价格
opt_sell = pd.merge(opt_sell, price_data, on='ts_code', how='inner')
# opt_sell['50price']=list(ETF_start['close'])[i+1]
opt_sell = opt_sell[abs(opt_sell['50price'] / opt_sell['exercise_price'] - 1) < eps]
#### 找出第i月的到期日时次月到期合约的价格,并计算当月到期合约的收益
# 第i个月到期日期权价格
price_data = price_end[price_end['Date_True'] == (list(ETF_end['Date'])[i])]
# 把到期日表弟资产价格插入DataFrame opt_sell中
opt_sell['50ETF_price'] = list(ETF_end['close'])[i]
# 把opt_sell和price_data合并
opt_buy = pd.merge(opt_buy, price_data, on='ts_code', how='inner')
# 计算第i月第一个交易日所出售期权在交易日的收益(对多头而言是收益,对我们(空头)而言为损失)
opt_sell['payoff'] = (opt_sell['call_put'] == 'C') * np.maximum(opt_sell['50ETF_price'] - \
opt_sell['exercise_price'], 0) + (
opt_sell['call_put'] == 'P') * np.maximum( \
opt_sell['exercise_price'] - opt_sell['50ETF_price'], 0)
#### 计算第i月第一个交易日建仓的成本,到期日平仓的收益,以及最终的收益率
cost = opt_buy['close_x'].sum() + opt_sell['payoff'].sum()
payoff = opt_buy['close_y'].sum() + opt_sell['close'].sum()
returns.append((payoff - cost) / cost)
return returns
# 非平值期权
def non_ATM(eps):
returns = []
for i in range(len(ETF_start) - 1):
# 第i月第一个交易日的要买入的期权合约
opt_buy = options[i + 1]
# 第i月第一个交易日的要卖出的期权合约
opt_sell = options[i]
# 第i月第一个交易日的期权价格数据
price_data = price_start[price_start['Date_True'] == (list(ETF_start['Date'])[i])]
##要买入的期权合约在第i月第一个交易日的价格
opt_buy = pd.merge(opt_buy, price_data, on='ts_code', how='inner')
opt_buy = opt_buy[abs(opt_buy['50price'] / opt_buy['exercise_price'] - 1) > eps]
##要卖出的期权合约在第i月第一个交易日的价格
opt_sell = pd.merge(opt_sell, price_data, on='ts_code', how='inner')
opt_sell = opt_sell[abs(opt_sell['50price'] / opt_sell['exercise_price'] - 1) > eps]
#### 找出第i月的到期日时次月到期合约的价格,并计算当月到期合约的收益
# 第i个月到期日期权价格
price_data = price_end[price_end['Date_True'] == (list(ETF_end['Date'])[i])]
# 把到期日表弟资产价格插入DataFrame opt_sell中
opt_sell['50ETF_price'] = list(ETF_end['close'])[i]
# 把opt_sell和price_data合并
opt_buy = | pd.merge(opt_buy, price_data, on='ts_code', how='inner') | pandas.merge |
#! /usr/bin/env python
# -*- coding: utf-8 -*-
"""
@version:
@author: zzh
@file: factor_earning_expectation.py
@time: 2019-9-19
"""
import pandas as pd
class FactorEarningExpectation():
"""
盈利预期
"""
def __init__(self):
__str__ = 'factor_earning_expectation'
self.name = '盈利预测'
self.factor_type1 = '盈利预测'
self.factor_type2 = '盈利预测'
self.description = '个股盈利预测因子'
@staticmethod
def NPFY1(tp_earning, factor_earning_expect, trade_date, dependencies=['net_profit_fy1']):
"""
:name: 一致预期净利润(FY1)
:desc: 一致预期净利润的未来第一年度的预测
:unit: 元
:view_dimension: 10000
"""
earning_expect = tp_earning[tp_earning['publish_date'] == trade_date].loc[:, dependencies]
earning_expect.rename(columns={'net_profit_fy1': 'NPFY1'}, inplace=True)
factor_earning_expect = pd.merge(factor_earning_expect, earning_expect, on='security_code')
return factor_earning_expect
@staticmethod
def NPFY2(tp_earning, factor_earning_expect, trade_date, dependencies=['net_profit_fy2']):
"""
:name: 一致预期净利润(FY2)
:desc: 一致预期净利润的未来第二年度的预测
:unit: 元
:view_dimension: 10000
"""
earning_expect = tp_earning[tp_earning['publish_date'] == trade_date].loc[:, dependencies]
earning_expect.rename(columns={'net_profit_fy2': 'NPFY2'}, inplace=True)
factor_earning_expect = pd.merge(factor_earning_expect, earning_expect, on='security_code')
return factor_earning_expect
@staticmethod
def EPSFY1(tp_earning, factor_earning_expect, trade_date, dependencies=['eps_fy1']):
"""
:name: 一致预期每股收益(FY1)
:desc: 一致预期每股收益未来第一年度的预测均值
:unit: 元
:view_dimension: 1
"""
earning_expect = tp_earning[tp_earning['publish_date'] == trade_date].loc[:, dependencies]
earning_expect.rename(columns={'eps_fy1': 'EPSFY1'}, inplace=True)
factor_earning_expect = pd.merge(factor_earning_expect, earning_expect, on='security_code')
return factor_earning_expect
@staticmethod
def EPSFY2(tp_earning, factor_earning_expect, trade_date, dependencies=['eps_fy2']):
"""
:name: 一致预期每股收益(FY2)
:desc: 一致预期每股收益未来第二年度的预测均值
:unit: 元
:view_dimension: 1
"""
earning_expect = tp_earning[tp_earning['publish_date'] == trade_date].loc[:, dependencies]
earning_expect.rename(columns={'eps_fy2': 'EPSFY2'}, inplace=True)
factor_earning_expect = pd.merge(factor_earning_expect, earning_expect, on='security_code')
return factor_earning_expect
@staticmethod
def OptIncFY1(tp_earning, factor_earning_expect, trade_date, dependencies=['operating_revenue_fy1']):
"""
:name: 一致预期营业收入(FY1)
:desc: 一致预期营业收入未来第一年度的预测均值
:unit: 元
:view_dimension: 10000
"""
earning_expect = tp_earning[tp_earning['publish_date'] == trade_date].loc[:, dependencies]
earning_expect.rename(columns={'operating_revenue_fy1': 'OptIncFY1'}, inplace=True)
factor_earning_expect = pd.merge(factor_earning_expect, earning_expect, on='security_code')
return factor_earning_expect
@staticmethod
def OptIncFY2(tp_earning, factor_earning_expect, trade_date, dependencies=['operating_revenue_fy2']):
"""
:name: 一致预期营业收入(FY2)
:desc: 一致预期营业收入未来第二年度的预测均值
:unit: 元
:view_dimension: 10000
"""
earning_expect = tp_earning[tp_earning['publish_date'] == trade_date].loc[:, dependencies]
earning_expect.rename(columns={'operating_revenue_fy2': 'OptIncFY2'}, inplace=True)
factor_earning_expect = pd.merge(factor_earning_expect, earning_expect, on='security_code')
return factor_earning_expect
@staticmethod
def CEPEFY1(tp_earning, factor_earning_expect, trade_date, dependencies=['pe_fy1']):
"""
:name: 一致预期市盈率(PE)(FY1)
:desc: 一致预期市盈率未来第一年度的预测均值
:unit: 倍
:view_dimension: 1
"""
earning_expect = tp_earning[tp_earning['publish_date'] == trade_date].loc[:, dependencies]
earning_expect.rename(columns={'pe_fy1': 'CEPEFY1'}, inplace=True)
factor_earning_expect = pd.merge(factor_earning_expect, earning_expect, on='security_code')
return factor_earning_expect
@staticmethod
def CEPEFY2(tp_earning, factor_earning_expect, trade_date, dependencies=['pe_fy2']):
"""
:name: 一致预期市盈率(PE)(FY2)
:desc: 一致预期市盈率未来第二年度的预测均值
:unit: 倍
:view_dimension: 1
"""
earning_expect = tp_earning[tp_earning['publish_date'] == trade_date].loc[:, dependencies]
earning_expect.rename(columns={'pe_fy2': 'CEPEFY2'}, inplace=True)
factor_earning_expect = pd.merge(factor_earning_expect, earning_expect, on='security_code')
return factor_earning_expect
@staticmethod
def CEPBFY1(tp_earning, factor_earning_expect, trade_date, dependencies=['pb_fy1']):
"""
:name: 一致预期市净率(PB)(FY1)
:desc: 一致预期市净率未来第一年度的预测均值
:unit: 倍
:view_dimension: 1
"""
earning_expect = tp_earning[tp_earning['publish_date'] == trade_date].loc[:, dependencies]
earning_expect.rename(columns={'pb_fy1': 'CEPBFY1'}, inplace=True)
factor_earning_expect = pd.merge(factor_earning_expect, earning_expect, on='security_code')
return factor_earning_expect
@staticmethod
def CEPBFY2(tp_earning, factor_earning_expect, trade_date, dependencies=['pb_fy2']):
"""
:name: 一致预期市净率(PB)(FY2)
:desc: 一致预期市净率未来第二年度的预测均值
:unit: 倍
:view_dimension: 1
"""
earning_expect = tp_earning[tp_earning['publish_date'] == trade_date].loc[:, dependencies]
earning_expect.rename(columns={'pb_fy2': 'CEPBFY2'}, inplace=True)
factor_earning_expect = pd.merge(factor_earning_expect, earning_expect, on='security_code')
return factor_earning_expect
@staticmethod
def CEPEGFY1(tp_earning, factor_earning_expect, trade_date, dependencies=['peg_fy1']):
"""
:name: 市盈率相对盈利增长比率(FY1)
:desc: 未来第一年度市盈率相对盈利增长比率
:unit:
:view_dimension: 0.01
"""
earning_expect = tp_earning[tp_earning['publish_date'] == trade_date].loc[:, dependencies]
earning_expect.rename(columns={'peg_fy1': 'CEPEGFY1'}, inplace=True)
factor_earning_expect = pd.merge(factor_earning_expect, earning_expect, on='security_code')
return factor_earning_expect
@staticmethod
def CEPEGFY2(tp_earning, factor_earning_expect, trade_date, dependencies=['peg_fy2']):
"""
:name: 市盈率相对盈利增长比率(FY2)
:desc: 未来第二年度市盈率相对盈利增长比率
:unit:
:view_dimension: 0.01
"""
earning_expect = tp_earning[tp_earning['publish_date'] == trade_date].loc[:, dependencies]
earning_expect.rename(columns={'peg_fy2': 'CEPEGFY2'}, inplace=True)
factor_earning_expect = pd.merge(factor_earning_expect, earning_expect, on='security_code')
return factor_earning_expect
@staticmethod
def _change_rate(tp_earning, trade_date, pre_trade_date, colunm, factor_name):
earning_expect = tp_earning[tp_earning['publish_date'] == trade_date].loc[:, colunm]
earning_expect_pre = tp_earning[tp_earning['publish_date'] == pre_trade_date].loc[:, colunm]
earning_expect = pd.merge(earning_expect, earning_expect_pre, on='security_code', how='left')
earning_expect[factor_name] = (earning_expect[colunm + '_x'] - earning_expect[colunm + '_y']) / \
earning_expect[colunm + '_y']
earning_expect.drop(columns=[colunm + '_x', colunm + '_y'], inplace=True)
return earning_expect
@staticmethod
def _change_value(tp_earning, trade_date, pre_trade_date, colunm, factor_name):
earning_expect = tp_earning[tp_earning['publish_date'] == trade_date].loc[:, colunm]
earning_expect_pre = tp_earning[tp_earning['publish_date'] == pre_trade_date].loc[:, colunm]
earning_expect = pd.merge(earning_expect, earning_expect_pre, on='security_code', how='left')
earning_expect[factor_name] = (earning_expect[colunm + '_x'] - earning_expect[colunm + '_y'])
earning_expect.drop(columns=[colunm + '_x', colunm + '_y'], inplace=True)
return earning_expect
@staticmethod
def NPFY11WRT(tp_earning, factor_earning_expect, trade_date):
"""
:name: 一致预测净利润(FY1)变化率_一周
:desc: 未来第一年度一致预测净利润一周内预测值变化率
:unit:
:view_dimension: 0.01
"""
trade_dates = sorted(set(tp_earning['publish_date']), reverse=True)
if len(trade_dates) >= 2:
earning_expect = FactorEarningExpectation._change_rate(tp_earning, trade_date, trade_dates[1],
'net_profit_fy1',
'NPFY11WRT')
factor_earning_expect = pd.merge(factor_earning_expect, earning_expect, on='security_code')
return factor_earning_expect
@staticmethod
def NPFY11MRT(tp_earning, factor_earning_expect, trade_date):
"""
:name: 一致预测净利润(FY1)变化率_一月
:desc: 未来第一年度一致预测净利润一月内预测值变化率
:unit:
:view_dimension: 0.01
"""
trade_dates = sorted(set(tp_earning['publish_date']), reverse=True)
if len(trade_dates) >= 3:
earning_expect = FactorEarningExpectation._change_rate(tp_earning, trade_date, trade_dates[2],
'net_profit_fy1',
'NPFY11MRT')
factor_earning_expect = pd.merge(factor_earning_expect, earning_expect, on='security_code')
return factor_earning_expect
@staticmethod
def NPFY13MRT(tp_earning, factor_earning_expect, trade_date):
"""
:name: 一致预测净利润(FY1)变化率_三月
:desc: 未来第一年度一致预测净利润三月内预测值变化率
:unit:
:view_dimension: 0.01
"""
trade_dates = sorted(set(tp_earning['publish_date']), reverse=True)
if len(trade_dates) >= 4:
earning_expect = FactorEarningExpectation._change_rate(tp_earning, trade_date, trade_dates[3],
'net_profit_fy1',
'NPFY13MRT')
factor_earning_expect = pd.merge(factor_earning_expect, earning_expect, on='security_code')
return factor_earning_expect
@staticmethod
def NPFY16MRT(tp_earning, factor_earning_expect, trade_date):
"""
:name: 一致预测净利润(FY1)变化率_六月
:desc: 未来第一年度一致预测净利润六月内预测值变化率
:unit:
:view_dimension: 0.01
"""
trade_dates = sorted(set(tp_earning['publish_date']), reverse=True)
if len(trade_dates) >= 5:
earning_expect = FactorEarningExpectation._change_rate(tp_earning, trade_date, trade_dates[4],
'net_profit_fy1',
'NPFY16MRT')
factor_earning_expect = pd.merge(factor_earning_expect, earning_expect, on='security_code')
return factor_earning_expect
@staticmethod
def EPSFY11WChg(tp_earning, factor_earning_expect, trade_date):
"""
:name: 一致预测每股收益(FY1)变化_一周
:desc: 未来第一年度一致预测每股收益一周内预测值变化
:unit: 元
:view_dimension: 1
"""
trade_dates = sorted(set(tp_earning['publish_date']), reverse=True)
if len(trade_dates) >= 2:
earning_expect = FactorEarningExpectation._change_value(tp_earning, trade_date, trade_dates[1],
'eps_fy1',
'EPSFY11WChg')
factor_earning_expect = pd.merge(factor_earning_expect, earning_expect, on='security_code')
return factor_earning_expect
@staticmethod
def EPSFY11MChg(tp_earning, factor_earning_expect, trade_date):
"""
:name: 一致预测每股收益(FY1)变化_一月
:desc: 未来第一年度一致预测每股收益一月内预测值变化
:unit: 元
:view_dimension: 1
"""
trade_dates = sorted(set(tp_earning['publish_date']), reverse=True)
if len(trade_dates) >= 3:
earning_expect = FactorEarningExpectation._change_value(tp_earning, trade_date, trade_dates[2],
'eps_fy1',
'EPSFY11MChg')
factor_earning_expect = pd.merge(factor_earning_expect, earning_expect, on='security_code')
return factor_earning_expect
@staticmethod
def EPSFY13MChg(tp_earning, factor_earning_expect, trade_date):
"""
:name: 一致预测每股收益(FY1)变化_三月
:desc: 未来第一年度一致预测每股收益三月内预测值变化
:unit: 元
:view_dimension: 1
"""
trade_dates = sorted(set(tp_earning['publish_date']), reverse=True)
if len(trade_dates) >= 4:
earning_expect = FactorEarningExpectation._change_value(tp_earning, trade_date, trade_dates[3],
'eps_fy1',
'EPSFY13MChg')
factor_earning_expect = pd.merge(factor_earning_expect, earning_expect, on='security_code')
return factor_earning_expect
@staticmethod
def EPSFY16MChg(tp_earning, factor_earning_expect, trade_date):
"""
:name: 一致预测每股收益(FY1)变化_六月
:desc: 未来第一年度一致预测每股收益六月内预测值变化
:unit: 元
:view_dimension: 1
"""
trade_dates = sorted(set(tp_earning['publish_date']), reverse=True)
if len(trade_dates) >= 5:
earning_expect = FactorEarningExpectation._change_value(tp_earning, trade_date, trade_dates[4],
'eps_fy1',
'EPSFY16MChg')
factor_earning_expect = pd.merge(factor_earning_expect, earning_expect, on='security_code')
return factor_earning_expect
@staticmethod
def EPSFY11WRT(tp_earning, factor_earning_expect, trade_date):
"""
:name: 一致预测每股收益(FY1)变化率_一周
:desc: 未来第一年度一致预测每股收益一周内预测值变化率
:unit:
:view_dimension: 0.01
"""
trade_dates = sorted(set(tp_earning['publish_date']), reverse=True)
if len(trade_dates) >= 2:
earning_expect = FactorEarningExpectation._change_value(tp_earning, trade_date, trade_dates[1],
'eps_fy1',
'EPSFY11WRT')
factor_earning_expect = pd.merge(factor_earning_expect, earning_expect, on='security_code')
return factor_earning_expect
@staticmethod
def EPSFY11MRT(tp_earning, factor_earning_expect, trade_date):
"""
:name: 一致预测每股收益(FY1)变化率_一月
:desc: 未来第一年度一致预测每股收益一月内预测值变化率
:unit:
:view_dimension: 0.01
"""
trade_dates = sorted(set(tp_earning['publish_date']), reverse=True)
if len(trade_dates) >= 3:
earning_expect = FactorEarningExpectation._change_rate(tp_earning, trade_date, trade_dates[2],
'eps_fy1',
'EPSFY11MRT')
factor_earning_expect = pd.merge(factor_earning_expect, earning_expect, on='security_code')
return factor_earning_expect
@staticmethod
def EPSFY13MRT(tp_earning, factor_earning_expect, trade_date):
"""
:name: 一致预测每股收益(FY1)变化率_三月
:desc: 未来第一年度一致预测每股收益三月内预测值变化率
:unit:
:view_dimension: 0.01
"""
trade_dates = sorted(set(tp_earning['publish_date']), reverse=True)
if len(trade_dates) >= 4:
earning_expect = FactorEarningExpectation._change_rate(tp_earning, trade_date, trade_dates[3],
'eps_fy1',
'EPSFY13MRT')
factor_earning_expect = pd.merge(factor_earning_expect, earning_expect, on='security_code')
return factor_earning_expect
@staticmethod
def EPSFY16MRT(tp_earning, factor_earning_expect, trade_date):
"""
:name: 一致预测每股收益(FY1)变化率_六月
:desc: 未来第一年度一致预测每股收益六月内预测值变化率
:unit:
:view_dimension: 0.01
"""
trade_dates = sorted(set(tp_earning['publish_date']), reverse=True)
if len(trade_dates) >= 5:
earning_expect = FactorEarningExpectation._change_rate(tp_earning, trade_date, trade_dates[4],
'eps_fy1',
'EPSFY16MRT')
factor_earning_expect = pd.merge(factor_earning_expect, earning_expect, on='security_code')
return factor_earning_expect
@staticmethod
def NPFY11WChg(tp_earning, factor_earning_expect, trade_date):
"""
:name: 一致预测净利润(FY1)变化_一周
:desc: 未来第一年度一致预测净利润一周内预测值变化
:unit: 元
:view_dimension: 10000
"""
trade_dates = sorted(set(tp_earning['publish_date']), reverse=True)
if len(trade_dates) >= 2:
earning_expect = FactorEarningExpectation._change_value(tp_earning, trade_date, trade_dates[1],
'net_profit_fy1',
'NPFY11WChg')
factor_earning_expect = pd.merge(factor_earning_expect, earning_expect, on='security_code')
return factor_earning_expect
@staticmethod
def NPFY11MChg(tp_earning, factor_earning_expect, trade_date):
"""
:name: 一致预测净利润(FY1)变化_一月
:desc: 未来第一年度一致预测净利润一月内预测值变化
:unit: 元
:view_dimension: 10000
"""
trade_dates = sorted(set(tp_earning['publish_date']), reverse=True)
if len(trade_dates) >= 3:
earning_expect = FactorEarningExpectation._change_value(tp_earning, trade_date, trade_dates[2],
'net_profit_fy1',
'NPFY11MChg')
factor_earning_expect = pd.merge(factor_earning_expect, earning_expect, on='security_code')
return factor_earning_expect
@staticmethod
def NPFY13MChg(tp_earning, factor_earning_expect, trade_date):
"""
:name: 一致预测净利润(FY1)变化_三月
:desc: 未来第一年度一致预测净利润三月内预测值变化
:unit: 元
:view_dimension: 10000
"""
trade_dates = sorted(set(tp_earning['publish_date']), reverse=True)
if len(trade_dates) >= 4:
earning_expect = FactorEarningExpectation._change_value(tp_earning, trade_date, trade_dates[3],
'net_profit_fy1',
'NPFY13MChg')
factor_earning_expect = | pd.merge(factor_earning_expect, earning_expect, on='security_code') | pandas.merge |
#!/usr/bin/env python
# coding: utf-8
'''
'''
import time
import pandas as pd
import datarobot as dr
from datarobot.models.modeljob import wait_for_async_model_creation
import numpy as np
import re
import os
from datarobot.errors import JobAlreadyRequested
token_id = ""
ts_setting = {"project_name":"fake_job_posting_210123","filename":"../Data/fake_job_postings.csv", \
"project_id": "60089b3d23aace3eea1810d0","model_id":"", \
"feature_list": "Informative Features","features":[],"set":"validation" , \
"AUC":"Weighted AUC", "LogLoss":"Weighted LogLoss", \
"downsampling": 36,"holdout_pct": 20,"validation_pct":16,"target":"fraudulent" }
parameter_name = ['stop_words','stemmer','num_ngram',"use_idf","pos_tagging"]
value = [1,"porter",[1,2,3,4],1,1]
param_df = pd.DataFrame(list(zip(parameter_name, value)),
columns =['parameter_name', 'value'])
dr.Client(token=token_id, endpoint='https://app.datarobot.com/api/v2')
def check_if_number(st):
tp = re.search("\d+",st)
if tp:
return int(tp.group())
else:
return np.nan
def get_min_max_salary (text):
'''
Get the min and max from the salary_range
:param text: string
:return: the min and max of a salary_range
'''
if type(text) == str:
if re.search("\-",text):
tp = text.split("-")
min_salary = check_if_number(tp[0].strip())
max_salary = check_if_number(tp[1].strip())
return min_salary,max_salary
else:
return np.nan,np.nan
else:
return np.nan, np.nan
def cleaned_location(text):
'''
Extract country, and country_and state from location
:param text: string with country, state, city
:return:
'''
country_state = ""
st = str(text)
if type(st) is str:
tp = re.search("[a-zA-Z]{2,}\s?\,(\s*[a-zA-Z0-9]+|\s)",st)
if tp:
country_state = tp.group().strip()
country = st.strip()[0:2]
else:
return "",""
return country,country_state
else:
return "",""
def create_binary_cat_for_education(text):
if pd.isnull(text) or pd.isna(text):
return "no"
elif text == "unspecified":
return "no"
else:
return "yes"
def PrepareDataSet():
'''
Prepare the dataset for fake_job_postings by adding new features.
:return: enriched original dataset with new features
'''
fake_jobs_df = pd.read_csv(ts_setting["filename"])
fake_jobs_df.min_salary = np.nan
fake_jobs_df.max_salary = np.nan
fake_jobs_df.salary_diff = np.nan
fake_jobs_df["min_salary"],fake_jobs_df["max_salary"] = zip(*fake_jobs_df["salary_range"].apply(get_min_max_salary))
fake_jobs_df["min_salary"] = | pd.to_numeric(fake_jobs_df["min_salary"]) | pandas.to_numeric |
#!/usr/bin/env python
# coding: utf-8
# # I have decided to interpretate the results using Facebook recently realeased Prophet, for TimeSeries forecast
# In[ ]:
import numpy as np # linear algebra
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
import matplotlib.pyplot as plt
import seaborn as sns
print()
df = pd.read_csv("../../../input/nsrose7224_crowdedness-at-the-campus-gym/data.csv")
# In[ ]:
from fbprophet import Prophet
# In[ ]:
df.head()
# In[ ]:
df.drop('timestamp', axis = 1, inplace = True)
# In[ ]:
df.head()
# In[ ]:
df['date'] = | pd.to_datetime(df['date']) | pandas.to_datetime |
#%%
import pandas as pd
import numpy as np
from datetime import datetime
import os
import pickle
import matplotlib.pyplot as plt
exec(open('../env_vars.py').read())
dir_data = os.environ['dir_data']
dir_picklejar = os.environ['dir_picklejar']
dir_code_methods = os.environ['dir_code_methods']
#%%
###############################################################################
# Dictionaries for latent variable models
###############################################################################
filename = os.path.join(os.path.realpath(dir_picklejar), 'dict_selfreport')
infile = open(filename,'rb')
dict_selfreport = pickle.load(infile)
infile.close()
filename = os.path.join(os.path.realpath(dir_picklejar), 'dict_random_ema')
infile = open(filename,'rb')
dict_random_ema = pickle.load(infile)
infile.close()
filename = os.path.join(os.path.realpath(dir_picklejar), 'dict_puffmarker')
infile = open(filename,'rb')
dict_puffmarker = pickle.load(infile)
infile.close()
#%%
###############################################################################
# Create a data frame with records of start & end of day timestamps
# for each participant-day
###############################################################################
# output of this script is the data frame data_day_limits
exec(open(os.path.join(os.path.realpath(dir_code_methods), 'setup-day-limits.py')).read())
data_reference = data_day_limits.loc[:,['participant_id','study_day']].groupby('participant_id').count().reset_index()
data_reference = data_reference.rename(columns = {'study_day':'max_study_day'})
# SANITY CHECK
#data_reference['max_study_day'].value_counts() # this is equal to 14
#%%
###############################################################################
# Knit together various data streams
###############################################################################
all_participant_id = data_hq_episodes['id'].drop_duplicates()
all_participant_id.index = np.array(range(0,len(all_participant_id.index)))
all_dict = {}
# %%
for i in range(0, len(all_participant_id)):
current_participant = all_participant_id[i]
current_dict = {}
for j in range(1, 15):
this_study_day = j
# Lets work with selfeport first ##########################################
current_dict_selfreport = dict_selfreport[current_participant][j]
if len(current_dict_selfreport['hours_since_start_day'])==0:
tmp_selfreport = pd.DataFrame({})
else:
tmp_selfreport = pd.DataFrame({'assessment_type':'selfreport',
'hours_since_start_day': current_dict_selfreport['hours_since_start_day'],
'smoke': 'Yes',
'when_smoke': current_dict_selfreport['message'],
'delta': current_dict_selfreport['delta']
})
# Now let's work with Random EMA ##########################################
current_dict_random_ema = dict_random_ema[current_participant][j]
if len(current_dict_random_ema['hours_since_start_day'])==0:
tmp_random_ema = | pd.DataFrame({}) | pandas.DataFrame |
# -*- coding: utf-8 -*-
# pylint: disable-msg=E1101,W0612
from datetime import datetime, timedelta
import pytest
import re
from numpy import nan as NA
import numpy as np
from numpy.random import randint
from pandas.compat import range, u
import pandas.compat as compat
from pandas import Index, Series, DataFrame, isna, MultiIndex, notna
from pandas.util.testing import assert_series_equal
import pandas.util.testing as tm
import pandas.core.strings as strings
class TestStringMethods(object):
def test_api(self):
# GH 6106, GH 9322
assert Series.str is strings.StringMethods
assert isinstance(Series(['']).str, strings.StringMethods)
# GH 9184
invalid = Series([1])
with tm.assert_raises_regex(AttributeError,
"only use .str accessor"):
invalid.str
assert not hasattr(invalid, 'str')
def test_iter(self):
# GH3638
strs = 'google', 'wikimedia', 'wikipedia', 'wikitravel'
ds = Series(strs)
for s in ds.str:
# iter must yield a Series
assert isinstance(s, Series)
# indices of each yielded Series should be equal to the index of
# the original Series
tm.assert_index_equal(s.index, ds.index)
for el in s:
# each element of the series is either a basestring/str or nan
assert isinstance(el, compat.string_types) or isna(el)
# desired behavior is to iterate until everything would be nan on the
# next iter so make sure the last element of the iterator was 'l' in
# this case since 'wikitravel' is the longest string
assert s.dropna().values.item() == 'l'
def test_iter_empty(self):
ds = Series([], dtype=object)
i, s = 100, 1
for i, s in enumerate(ds.str):
pass
# nothing to iterate over so nothing defined values should remain
# unchanged
assert i == 100
assert s == 1
def test_iter_single_element(self):
ds = Series(['a'])
for i, s in enumerate(ds.str):
pass
assert not i
assert_series_equal(ds, s)
def test_iter_object_try_string(self):
ds = Series([slice(None, randint(10), randint(10, 20)) for _ in range(
4)])
i, s = 100, 'h'
for i, s in enumerate(ds.str):
pass
assert i == 100
assert s == 'h'
def test_cat(self):
one = np.array(['a', 'a', 'b', 'b', 'c', NA], dtype=np.object_)
two = np.array(['a', NA, 'b', 'd', 'foo', NA], dtype=np.object_)
# single array
result = strings.str_cat(one)
exp = 'aabbc'
assert result == exp
result = strings.str_cat(one, na_rep='NA')
exp = 'aabbcNA'
assert result == exp
result = strings.str_cat(one, na_rep='-')
exp = 'aabbc-'
assert result == exp
result = strings.str_cat(one, sep='_', na_rep='NA')
exp = 'a_a_b_b_c_NA'
assert result == exp
result = strings.str_cat(two, sep='-')
exp = 'a-b-d-foo'
assert result == exp
# Multiple arrays
result = strings.str_cat(one, [two], na_rep='NA')
exp = np.array(['aa', 'aNA', 'bb', 'bd', 'cfoo', 'NANA'],
dtype=np.object_)
tm.assert_numpy_array_equal(result, exp)
result = strings.str_cat(one, two)
exp = np.array(['aa', NA, 'bb', 'bd', 'cfoo', NA], dtype=np.object_)
tm.assert_almost_equal(result, exp)
def test_count(self):
values = np.array(['foo', 'foofoo', NA, 'foooofooofommmfoo'],
dtype=np.object_)
result = strings.str_count(values, 'f[o]+')
exp = np.array([1, 2, NA, 4])
tm.assert_numpy_array_equal(result, exp)
result = Series(values).str.count('f[o]+')
exp = Series([1, 2, NA, 4])
assert isinstance(result, Series)
tm.assert_series_equal(result, exp)
# mixed
mixed = ['a', NA, 'b', True, datetime.today(), 'foo', None, 1, 2.]
rs = strings.str_count(mixed, 'a')
xp = np.array([1, NA, 0, NA, NA, 0, NA, NA, NA])
tm.assert_numpy_array_equal(rs, xp)
rs = Series(mixed).str.count('a')
xp = Series([1, NA, 0, NA, NA, 0, NA, NA, NA])
assert isinstance(rs, Series)
tm.assert_series_equal(rs, xp)
# unicode
values = [u('foo'), u('foofoo'), NA, u('foooofooofommmfoo')]
result = strings.str_count(values, 'f[o]+')
exp = np.array([1, 2, NA, 4])
tm.assert_numpy_array_equal(result, exp)
result = Series(values).str.count('f[o]+')
exp = Series([1, 2, NA, 4])
assert isinstance(result, Series)
tm.assert_series_equal(result, exp)
def test_contains(self):
values = np.array(['foo', NA, 'fooommm__foo',
'mmm_', 'foommm[_]+bar'], dtype=np.object_)
pat = 'mmm[_]+'
result = strings.str_contains(values, pat)
expected = np.array([False, NA, True, True, False], dtype=np.object_)
tm.assert_numpy_array_equal(result, expected)
result = strings.str_contains(values, pat, regex=False)
expected = np.array([False, NA, False, False, True], dtype=np.object_)
tm.assert_numpy_array_equal(result, expected)
values = ['foo', 'xyz', 'fooommm__foo', 'mmm_']
result = strings.str_contains(values, pat)
expected = np.array([False, False, True, True])
assert result.dtype == np.bool_
tm.assert_numpy_array_equal(result, expected)
# case insensitive using regex
values = ['Foo', 'xYz', 'fOOomMm__fOo', 'MMM_']
result = strings.str_contains(values, 'FOO|mmm', case=False)
expected = np.array([True, False, True, True])
tm.assert_numpy_array_equal(result, expected)
# case insensitive without regex
result = strings.str_contains(values, 'foo', regex=False, case=False)
expected = np.array([True, False, True, False])
tm.assert_numpy_array_equal(result, expected)
# mixed
mixed = ['a', NA, 'b', True, datetime.today(), 'foo', None, 1, 2.]
rs = strings.str_contains(mixed, 'o')
xp = np.array([False, NA, False, NA, NA, True, NA, NA, NA],
dtype=np.object_)
tm.assert_numpy_array_equal(rs, xp)
rs = Series(mixed).str.contains('o')
xp = Series([False, NA, False, NA, NA, True, NA, NA, NA])
assert isinstance(rs, Series)
tm.assert_series_equal(rs, xp)
# unicode
values = np.array([u'foo', NA, u'fooommm__foo', u'mmm_'],
dtype=np.object_)
pat = 'mmm[_]+'
result = strings.str_contains(values, pat)
expected = np.array([False, np.nan, True, True], dtype=np.object_)
tm.assert_numpy_array_equal(result, expected)
result = strings.str_contains(values, pat, na=False)
expected = np.array([False, False, True, True])
tm.assert_numpy_array_equal(result, expected)
values = np.array(['foo', 'xyz', 'fooommm__foo', 'mmm_'],
dtype=np.object_)
result = strings.str_contains(values, pat)
expected = np.array([False, False, True, True])
assert result.dtype == np.bool_
tm.assert_numpy_array_equal(result, expected)
# na
values = Series(['om', 'foo', np.nan])
res = values.str.contains('foo', na="foo")
assert res.loc[2] == "foo"
def test_startswith(self):
values = Series(['om', NA, 'foo_nom', 'nom', 'bar_foo', NA, 'foo'])
result = values.str.startswith('foo')
exp = Series([False, NA, True, False, False, NA, True])
tm.assert_series_equal(result, exp)
# mixed
mixed = np.array(['a', NA, 'b', True, datetime.today(),
'foo', None, 1, 2.], dtype=np.object_)
rs = strings.str_startswith(mixed, 'f')
xp = np.array([False, NA, False, NA, NA, True, NA, NA, NA],
dtype=np.object_)
tm.assert_numpy_array_equal(rs, xp)
rs = Series(mixed).str.startswith('f')
assert isinstance(rs, Series)
xp = Series([False, NA, False, NA, NA, True, NA, NA, NA])
tm.assert_series_equal(rs, xp)
# unicode
values = Series([u('om'), NA, u('foo_nom'), u('nom'), u('bar_foo'), NA,
u('foo')])
result = values.str.startswith('foo')
exp = Series([False, NA, True, False, False, NA, True])
tm.assert_series_equal(result, exp)
result = values.str.startswith('foo', na=True)
tm.assert_series_equal(result, exp.fillna(True).astype(bool))
def test_endswith(self):
values = Series(['om', NA, 'foo_nom', 'nom', 'bar_foo', NA, 'foo'])
result = values.str.endswith('foo')
exp = Series([False, NA, False, False, True, NA, True])
tm.assert_series_equal(result, exp)
# mixed
mixed = ['a', NA, 'b', True, datetime.today(), 'foo', None, 1, 2.]
rs = strings.str_endswith(mixed, 'f')
xp = np.array([False, NA, False, NA, NA, False, NA, NA, NA],
dtype=np.object_)
tm.assert_numpy_array_equal(rs, xp)
rs = Series(mixed).str.endswith('f')
xp = Series([False, NA, False, NA, NA, False, NA, NA, NA])
assert isinstance(rs, Series)
tm.assert_series_equal(rs, xp)
# unicode
values = Series([u('om'), NA, u('foo_nom'), u('nom'), u('bar_foo'), NA,
u('foo')])
result = values.str.endswith('foo')
exp = Series([False, NA, False, False, True, NA, True])
tm.assert_series_equal(result, exp)
result = values.str.endswith('foo', na=False)
tm.assert_series_equal(result, exp.fillna(False).astype(bool))
def test_title(self):
values = Series(["FOO", "BAR", NA, "Blah", "blurg"])
result = values.str.title()
exp = Series(["Foo", "Bar", NA, "Blah", "Blurg"])
tm.assert_series_equal(result, exp)
# mixed
mixed = Series(["FOO", NA, "bar", True, datetime.today(), "blah", None,
1, 2.])
mixed = mixed.str.title()
exp = Series(["Foo", NA, "Bar", NA, NA, "Blah", NA, NA, NA])
tm.assert_almost_equal(mixed, exp)
# unicode
values = Series([u("FOO"), NA, u("bar"), u("Blurg")])
results = values.str.title()
exp = Series([u("Foo"), NA, u("Bar"), u("Blurg")])
tm.assert_series_equal(results, exp)
def test_lower_upper(self):
values = Series(['om', NA, 'nom', 'nom'])
result = values.str.upper()
exp = Series(['OM', NA, 'NOM', 'NOM'])
tm.assert_series_equal(result, exp)
result = result.str.lower()
tm.assert_series_equal(result, values)
# mixed
mixed = Series(['a', NA, 'b', True, datetime.today(), 'foo', None, 1,
2.])
mixed = mixed.str.upper()
rs = Series(mixed).str.lower()
xp = Series(['a', NA, 'b', NA, NA, 'foo', NA, NA, NA])
assert isinstance(rs, Series)
tm.assert_series_equal(rs, xp)
# unicode
values = Series([u('om'), NA, u('nom'), u('nom')])
result = values.str.upper()
exp = Series([u('OM'), NA, u('NOM'), u('NOM')])
tm.assert_series_equal(result, exp)
result = result.str.lower()
tm.assert_series_equal(result, values)
def test_capitalize(self):
values = Series(["FOO", "BAR", NA, "Blah", "blurg"])
result = values.str.capitalize()
exp = Series(["Foo", "Bar", NA, "Blah", "Blurg"])
tm.assert_series_equal(result, exp)
# mixed
mixed = Series(["FOO", NA, "bar", True, datetime.today(), "blah", None,
1, 2.])
mixed = mixed.str.capitalize()
exp = Series(["Foo", NA, "Bar", NA, NA, "Blah", NA, NA, NA])
tm.assert_almost_equal(mixed, exp)
# unicode
values = Series([u("FOO"), NA, u("bar"), u("Blurg")])
results = values.str.capitalize()
exp = Series([u("Foo"), NA, u("Bar"), u("Blurg")])
tm.assert_series_equal(results, exp)
def test_swapcase(self):
values = Series(["FOO", "BAR", NA, "Blah", "blurg"])
result = values.str.swapcase()
exp = Series(["foo", "bar", NA, "bLAH", "BLURG"])
tm.assert_series_equal(result, exp)
# mixed
mixed = Series(["FOO", NA, "bar", True, datetime.today(), "Blah", None,
1, 2.])
mixed = mixed.str.swapcase()
exp = Series(["foo", NA, "BAR", NA, NA, "bLAH", NA, NA, NA])
tm.assert_almost_equal(mixed, exp)
# unicode
values = Series([u("FOO"), NA, u("bar"), u("Blurg")])
results = values.str.swapcase()
exp = Series([u("foo"), NA, u("BAR"), u("bLURG")])
tm.assert_series_equal(results, exp)
def test_casemethods(self):
values = ['aaa', 'bbb', 'CCC', 'Dddd', 'eEEE']
s = Series(values)
assert s.str.lower().tolist() == [v.lower() for v in values]
assert s.str.upper().tolist() == [v.upper() for v in values]
assert s.str.title().tolist() == [v.title() for v in values]
assert s.str.capitalize().tolist() == [v.capitalize() for v in values]
assert s.str.swapcase().tolist() == [v.swapcase() for v in values]
def test_replace(self):
values = Series(['fooBAD__barBAD', NA])
result = values.str.replace('BAD[_]*', '')
exp = Series(['foobar', NA])
tm.assert_series_equal(result, exp)
result = values.str.replace('BAD[_]*', '', n=1)
exp = Series(['foobarBAD', NA])
tm.assert_series_equal(result, exp)
# mixed
mixed = Series(['aBAD', NA, 'bBAD', True, datetime.today(), 'fooBAD',
None, 1, 2.])
rs = Series(mixed).str.replace('BAD[_]*', '')
xp = Series(['a', NA, 'b', NA, NA, 'foo', NA, NA, NA])
assert isinstance(rs, Series)
tm.assert_almost_equal(rs, xp)
# unicode
values = Series([u('fooBAD__barBAD'), NA])
result = values.str.replace('BAD[_]*', '')
exp = Series([u('foobar'), NA])
tm.assert_series_equal(result, exp)
result = values.str.replace('BAD[_]*', '', n=1)
exp = Series([u('foobarBAD'), NA])
tm.assert_series_equal(result, exp)
# flags + unicode
values = Series([b"abcd,\xc3\xa0".decode("utf-8")])
exp = Series([b"abcd, \xc3\xa0".decode("utf-8")])
result = values.str.replace(r"(?<=\w),(?=\w)", ", ", flags=re.UNICODE)
tm.assert_series_equal(result, exp)
# GH 13438
for klass in (Series, Index):
for repl in (None, 3, {'a': 'b'}):
for data in (['a', 'b', None], ['a', 'b', 'c', 'ad']):
values = klass(data)
pytest.raises(TypeError, values.str.replace, 'a', repl)
def test_replace_callable(self):
# GH 15055
values = Series(['fooBAD__barBAD', NA])
# test with callable
repl = lambda m: m.group(0).swapcase()
result = values.str.replace('[a-z][A-Z]{2}', repl, n=2)
exp = Series(['foObaD__baRbaD', NA])
tm.assert_series_equal(result, exp)
# test with wrong number of arguments, raising an error
if compat.PY2:
p_err = r'takes (no|(exactly|at (least|most)) ?\d+) arguments?'
else:
p_err = (r'((takes)|(missing)) (?(2)from \d+ to )?\d+ '
r'(?(3)required )positional arguments?')
repl = lambda: None
with tm.assert_raises_regex(TypeError, p_err):
values.str.replace('a', repl)
repl = lambda m, x: None
with tm.assert_raises_regex(TypeError, p_err):
values.str.replace('a', repl)
repl = lambda m, x, y=None: None
with tm.assert_raises_regex(TypeError, p_err):
values.str.replace('a', repl)
# test regex named groups
values = Series(['Foo Bar Baz', NA])
pat = r"(?P<first>\w+) (?P<middle>\w+) (?P<last>\w+)"
repl = lambda m: m.group('middle').swapcase()
result = values.str.replace(pat, repl)
exp = Series(['bAR', NA])
tm.assert_series_equal(result, exp)
def test_replace_compiled_regex(self):
# GH 15446
values = Series(['fooBAD__barBAD', NA])
# test with compiled regex
pat = re.compile(r'BAD[_]*')
result = values.str.replace(pat, '')
exp = Series(['foobar', NA])
tm.assert_series_equal(result, exp)
# mixed
mixed = Series(['aBAD', NA, 'bBAD', True, datetime.today(), 'fooBAD',
None, 1, 2.])
rs = Series(mixed).str.replace(pat, '')
xp = Series(['a', NA, 'b', NA, NA, 'foo', NA, NA, NA])
assert isinstance(rs, Series)
tm.assert_almost_equal(rs, xp)
# unicode
values = Series([u('fooBAD__barBAD'), NA])
result = values.str.replace(pat, '')
exp = Series([u('foobar'), NA])
tm.assert_series_equal(result, exp)
result = values.str.replace(pat, '', n=1)
exp = Series([u('foobarBAD'), NA])
tm.assert_series_equal(result, exp)
# flags + unicode
values = Series([b"abcd,\xc3\xa0".decode("utf-8")])
exp = Series([b"abcd, \xc3\xa0".decode("utf-8")])
pat = re.compile(r"(?<=\w),(?=\w)", flags=re.UNICODE)
result = values.str.replace(pat, ", ")
tm.assert_series_equal(result, exp)
# case and flags provided to str.replace will have no effect
# and will produce warnings
values = Series(['fooBAD__barBAD__bad', NA])
pat = re.compile(r'BAD[_]*')
with tm.assert_raises_regex(ValueError,
"case and flags cannot be"):
result = values.str.replace(pat, '', flags=re.IGNORECASE)
with tm.assert_raises_regex(ValueError,
"case and flags cannot be"):
result = values.str.replace(pat, '', case=False)
with tm.assert_raises_regex(ValueError,
"case and flags cannot be"):
result = values.str.replace(pat, '', case=True)
# test with callable
values = Series(['fooBAD__barBAD', NA])
repl = lambda m: m.group(0).swapcase()
pat = re.compile('[a-z][A-Z]{2}')
result = values.str.replace(pat, repl, n=2)
exp = Series(['foObaD__baRbaD', NA])
tm.assert_series_equal(result, exp)
def test_repeat(self):
values = Series(['a', 'b', NA, 'c', NA, 'd'])
result = values.str.repeat(3)
exp = Series(['aaa', 'bbb', NA, 'ccc', NA, 'ddd'])
tm.assert_series_equal(result, exp)
result = values.str.repeat([1, 2, 3, 4, 5, 6])
exp = Series(['a', 'bb', NA, 'cccc', NA, 'dddddd'])
tm.assert_series_equal(result, exp)
# mixed
mixed = Series(['a', NA, 'b', True, datetime.today(), 'foo', None, 1,
2.])
rs = Series(mixed).str.repeat(3)
xp = Series(['aaa', NA, 'bbb', NA, NA, 'foofoofoo', NA, NA, NA])
assert isinstance(rs, Series)
tm.assert_series_equal(rs, xp)
# unicode
values = Series([u('a'), u('b'), NA, u('c'), NA, u('d')])
result = values.str.repeat(3)
exp = Series([u('aaa'), u('bbb'), NA, u('ccc'), NA, u('ddd')])
tm.assert_series_equal(result, exp)
result = values.str.repeat([1, 2, 3, 4, 5, 6])
exp = Series([u('a'), u('bb'), NA, u('cccc'), NA, u('dddddd')])
tm.assert_series_equal(result, exp)
def test_match(self):
# New match behavior introduced in 0.13
values = Series(['fooBAD__barBAD', NA, 'foo'])
result = values.str.match('.*(BAD[_]+).*(BAD)')
exp = Series([True, NA, False])
tm.assert_series_equal(result, exp)
values = Series(['fooBAD__barBAD', NA, 'foo'])
result = values.str.match('.*BAD[_]+.*BAD')
exp = Series([True, NA, False])
tm.assert_series_equal(result, exp)
# test passing as_indexer still works but is ignored
values = Series(['fooBAD__barBAD', NA, 'foo'])
exp = Series([True, NA, False])
with tm.assert_produces_warning(FutureWarning):
result = values.str.match('.*BAD[_]+.*BAD', as_indexer=True)
tm.assert_series_equal(result, exp)
with tm.assert_produces_warning(FutureWarning):
result = values.str.match('.*BAD[_]+.*BAD', as_indexer=False)
tm.assert_series_equal(result, exp)
with tm.assert_produces_warning(FutureWarning):
result = values.str.match('.*(BAD[_]+).*(BAD)', as_indexer=True)
tm.assert_series_equal(result, exp)
pytest.raises(ValueError, values.str.match, '.*(BAD[_]+).*(BAD)',
as_indexer=False)
# mixed
mixed = Series(['aBAD_BAD', NA, 'BAD_b_BAD', True, datetime.today(),
'foo', None, 1, 2.])
rs = Series(mixed).str.match('.*(BAD[_]+).*(BAD)')
xp = Series([True, NA, True, NA, NA, False, NA, NA, NA])
assert isinstance(rs, Series)
tm.assert_series_equal(rs, xp)
# unicode
values = Series([u('fooBAD__barBAD'), NA, u('foo')])
result = values.str.match('.*(BAD[_]+).*(BAD)')
exp = Series([True, NA, False])
tm.assert_series_equal(result, exp)
# na GH #6609
res = Series(['a', 0, np.nan]).str.match('a', na=False)
exp = Series([True, False, False])
assert_series_equal(exp, res)
res = Series(['a', 0, np.nan]).str.match('a')
exp = Series([True, np.nan, np.nan])
assert_series_equal(exp, res)
def test_extract_expand_None(self):
values = Series(['fooBAD__barBAD', NA, 'foo'])
with tm.assert_produces_warning(FutureWarning):
values.str.extract('.*(BAD[_]+).*(BAD)', expand=None)
def test_extract_expand_unspecified(self):
values = Series(['fooBAD__barBAD', NA, 'foo'])
with tm.assert_produces_warning(FutureWarning):
values.str.extract('.*(BAD[_]+).*(BAD)')
def test_extract_expand_False(self):
# Contains tests like those in test_match and some others.
values = Series(['fooBAD__barBAD', NA, 'foo'])
er = [NA, NA] # empty row
result = values.str.extract('.*(BAD[_]+).*(BAD)', expand=False)
exp = DataFrame([['BAD__', 'BAD'], er, er])
tm.assert_frame_equal(result, exp)
# mixed
mixed = Series(['aBAD_BAD', NA, 'BAD_b_BAD', True, datetime.today(),
'foo', None, 1, 2.])
rs = Series(mixed).str.extract('.*(BAD[_]+).*(BAD)', expand=False)
exp = DataFrame([['BAD_', 'BAD'], er, ['BAD_', 'BAD'], er, er, er, er,
er, er])
tm.assert_frame_equal(rs, exp)
# unicode
values = Series([u('fooBAD__barBAD'), NA, u('foo')])
result = values.str.extract('.*(BAD[_]+).*(BAD)', expand=False)
exp = DataFrame([[u('BAD__'), u('BAD')], er, er])
tm.assert_frame_equal(result, exp)
# GH9980
# Index only works with one regex group since
# multi-group would expand to a frame
idx = Index(['A1', 'A2', 'A3', 'A4', 'B5'])
with tm.assert_raises_regex(ValueError, "supported"):
idx.str.extract('([AB])([123])', expand=False)
# these should work for both Series and Index
for klass in [Series, Index]:
# no groups
s_or_idx = klass(['A1', 'B2', 'C3'])
f = lambda: s_or_idx.str.extract('[ABC][123]', expand=False)
pytest.raises(ValueError, f)
# only non-capturing groups
f = lambda: s_or_idx.str.extract('(?:[AB]).*', expand=False)
pytest.raises(ValueError, f)
# single group renames series/index properly
s_or_idx = klass(['A1', 'A2'])
result = s_or_idx.str.extract(r'(?P<uno>A)\d', expand=False)
assert result.name == 'uno'
exp = klass(['A', 'A'], name='uno')
if klass == Series:
tm.assert_series_equal(result, exp)
else:
tm.assert_index_equal(result, exp)
s = Series(['A1', 'B2', 'C3'])
# one group, no matches
result = s.str.extract('(_)', expand=False)
exp = Series([NA, NA, NA], dtype=object)
tm.assert_series_equal(result, exp)
# two groups, no matches
result = s.str.extract('(_)(_)', expand=False)
exp = DataFrame([[NA, NA], [NA, NA], [NA, NA]], dtype=object)
tm.assert_frame_equal(result, exp)
# one group, some matches
result = s.str.extract('([AB])[123]', expand=False)
exp = Series(['A', 'B', NA])
tm.assert_series_equal(result, exp)
# two groups, some matches
result = s.str.extract('([AB])([123])', expand=False)
exp = DataFrame([['A', '1'], ['B', '2'], [NA, NA]])
tm.assert_frame_equal(result, exp)
# one named group
result = s.str.extract('(?P<letter>[AB])', expand=False)
exp = Series(['A', 'B', NA], name='letter')
tm.assert_series_equal(result, exp)
# two named groups
result = s.str.extract('(?P<letter>[AB])(?P<number>[123])',
expand=False)
exp = DataFrame([['A', '1'], ['B', '2'], [NA, NA]],
columns=['letter', 'number'])
tm.assert_frame_equal(result, exp)
# mix named and unnamed groups
result = s.str.extract('([AB])(?P<number>[123])', expand=False)
exp = DataFrame([['A', '1'], ['B', '2'], [NA, NA]],
columns=[0, 'number'])
tm.assert_frame_equal(result, exp)
# one normal group, one non-capturing group
result = s.str.extract('([AB])(?:[123])', expand=False)
exp = Series(['A', 'B', NA])
tm.assert_series_equal(result, exp)
# two normal groups, one non-capturing group
result = Series(['A11', 'B22', 'C33']).str.extract(
'([AB])([123])(?:[123])', expand=False)
exp = DataFrame([['A', '1'], ['B', '2'], [NA, NA]])
tm.assert_frame_equal(result, exp)
# one optional group followed by one normal group
result = Series(['A1', 'B2', '3']).str.extract(
'(?P<letter>[AB])?(?P<number>[123])', expand=False)
exp = DataFrame([['A', '1'], ['B', '2'], [NA, '3']],
columns=['letter', 'number'])
tm.assert_frame_equal(result, exp)
# one normal group followed by one optional group
result = Series(['A1', 'B2', 'C']).str.extract(
'(?P<letter>[ABC])(?P<number>[123])?', expand=False)
exp = DataFrame([['A', '1'], ['B', '2'], ['C', NA]],
columns=['letter', 'number'])
tm.assert_frame_equal(result, exp)
# GH6348
# not passing index to the extractor
def check_index(index):
data = ['A1', 'B2', 'C']
index = index[:len(data)]
s = Series(data, index=index)
result = s.str.extract(r'(\d)', expand=False)
exp = Series(['1', '2', NA], index=index)
tm.assert_series_equal(result, exp)
result = Series(data, index=index).str.extract(
r'(?P<letter>\D)(?P<number>\d)?', expand=False)
e_list = [
['A', '1'],
['B', '2'],
['C', NA]
]
exp = DataFrame(e_list, columns=['letter', 'number'], index=index)
tm.assert_frame_equal(result, exp)
i_funs = [
tm.makeStringIndex, tm.makeUnicodeIndex, tm.makeIntIndex,
tm.makeDateIndex, tm.makePeriodIndex, tm.makeRangeIndex
]
for index in i_funs:
check_index(index())
# single_series_name_is_preserved.
s = Series(['a3', 'b3', 'c2'], name='bob')
r = s.str.extract(r'(?P<sue>[a-z])', expand=False)
e = Series(['a', 'b', 'c'], name='sue')
tm.assert_series_equal(r, e)
assert r.name == e.name
def test_extract_expand_True(self):
# Contains tests like those in test_match and some others.
values = Series(['fooBAD__barBAD', NA, 'foo'])
er = [NA, NA] # empty row
result = values.str.extract('.*(BAD[_]+).*(BAD)', expand=True)
exp = DataFrame([['BAD__', 'BAD'], er, er])
tm.assert_frame_equal(result, exp)
# mixed
mixed = Series(['aBAD_BAD', NA, 'BAD_b_BAD', True, datetime.today(),
'foo', None, 1, 2.])
rs = Series(mixed).str.extract('.*(BAD[_]+).*(BAD)', expand=True)
exp = DataFrame([['BAD_', 'BAD'], er, ['BAD_', 'BAD'], er, er,
er, er, er, er])
tm.assert_frame_equal(rs, exp)
# unicode
values = Series([u('fooBAD__barBAD'), NA, u('foo')])
result = values.str.extract('.*(BAD[_]+).*(BAD)', expand=True)
exp = DataFrame([[u('BAD__'), u('BAD')], er, er])
tm.assert_frame_equal(result, exp)
# these should work for both Series and Index
for klass in [Series, Index]:
# no groups
s_or_idx = klass(['A1', 'B2', 'C3'])
f = lambda: s_or_idx.str.extract('[ABC][123]', expand=True)
pytest.raises(ValueError, f)
# only non-capturing groups
f = lambda: s_or_idx.str.extract('(?:[AB]).*', expand=True)
pytest.raises(ValueError, f)
# single group renames series/index properly
s_or_idx = klass(['A1', 'A2'])
result_df = s_or_idx.str.extract(r'(?P<uno>A)\d', expand=True)
assert isinstance(result_df, DataFrame)
result_series = result_df['uno']
assert_series_equal(result_series, Series(['A', 'A'], name='uno'))
def test_extract_series(self):
# extract should give the same result whether or not the
# series has a name.
for series_name in None, "series_name":
s = Series(['A1', 'B2', 'C3'], name=series_name)
# one group, no matches
result = s.str.extract('(_)', expand=True)
exp = DataFrame([NA, NA, NA], dtype=object)
tm.assert_frame_equal(result, exp)
# two groups, no matches
result = s.str.extract('(_)(_)', expand=True)
exp = DataFrame([[NA, NA], [NA, NA], [NA, NA]], dtype=object)
tm.assert_frame_equal(result, exp)
# one group, some matches
result = s.str.extract('([AB])[123]', expand=True)
exp = DataFrame(['A', 'B', NA])
tm.assert_frame_equal(result, exp)
# two groups, some matches
result = s.str.extract('([AB])([123])', expand=True)
exp = DataFrame([['A', '1'], ['B', '2'], [NA, NA]])
tm.assert_frame_equal(result, exp)
# one named group
result = s.str.extract('(?P<letter>[AB])', expand=True)
exp = DataFrame({"letter": ['A', 'B', NA]})
tm.assert_frame_equal(result, exp)
# two named groups
result = s.str.extract(
'(?P<letter>[AB])(?P<number>[123])',
expand=True)
e_list = [
['A', '1'],
['B', '2'],
[NA, NA]
]
exp = DataFrame(e_list, columns=['letter', 'number'])
tm.assert_frame_equal(result, exp)
# mix named and unnamed groups
result = s.str.extract('([AB])(?P<number>[123])', expand=True)
exp = DataFrame(e_list, columns=[0, 'number'])
tm.assert_frame_equal(result, exp)
# one normal group, one non-capturing group
result = s.str.extract('([AB])(?:[123])', expand=True)
exp = DataFrame(['A', 'B', NA])
tm.assert_frame_equal(result, exp)
def test_extract_optional_groups(self):
# two normal groups, one non-capturing group
result = Series(['A11', 'B22', 'C33']).str.extract(
'([AB])([123])(?:[123])', expand=True)
exp = DataFrame([['A', '1'], ['B', '2'], [NA, NA]])
tm.assert_frame_equal(result, exp)
# one optional group followed by one normal group
result = Series(['A1', 'B2', '3']).str.extract(
'(?P<letter>[AB])?(?P<number>[123])', expand=True)
e_list = [
['A', '1'],
['B', '2'],
[NA, '3']
]
exp = DataFrame(e_list, columns=['letter', 'number'])
tm.assert_frame_equal(result, exp)
# one normal group followed by one optional group
result = Series(['A1', 'B2', 'C']).str.extract(
'(?P<letter>[ABC])(?P<number>[123])?', expand=True)
e_list = [
['A', '1'],
['B', '2'],
['C', NA]
]
exp = DataFrame(e_list, columns=['letter', 'number'])
tm.assert_frame_equal(result, exp)
# GH6348
# not passing index to the extractor
def check_index(index):
data = ['A1', 'B2', 'C']
index = index[:len(data)]
result = Series(data, index=index).str.extract(
r'(\d)', expand=True)
exp = DataFrame(['1', '2', NA], index=index)
tm.assert_frame_equal(result, exp)
result = Series(data, index=index).str.extract(
r'(?P<letter>\D)(?P<number>\d)?', expand=True)
e_list = [
['A', '1'],
['B', '2'],
['C', NA]
]
exp = DataFrame(e_list, columns=['letter', 'number'], index=index)
tm.assert_frame_equal(result, exp)
i_funs = [
tm.makeStringIndex, tm.makeUnicodeIndex, tm.makeIntIndex,
tm.makeDateIndex, tm.makePeriodIndex, tm.makeRangeIndex
]
for index in i_funs:
check_index(index())
def test_extract_single_group_returns_frame(self):
# GH11386 extract should always return DataFrame, even when
# there is only one group. Prior to v0.18.0, extract returned
# Series when there was only one group in the regex.
s = Series(['a3', 'b3', 'c2'], name='series_name')
r = s.str.extract(r'(?P<letter>[a-z])', expand=True)
e = DataFrame({"letter": ['a', 'b', 'c']})
tm.assert_frame_equal(r, e)
def test_extractall(self):
subject_list = [
'<EMAIL>',
'<EMAIL>',
'<EMAIL>',
'<EMAIL> some text <EMAIL>',
'<EMAIL> some text c@d.<EMAIL> and <EMAIL>',
np.nan,
"",
]
expected_tuples = [
("dave", "google", "com"),
("tdhock5", "gmail", "com"),
("maudelaperriere", "gmail", "com"),
("rob", "gmail", "com"), ("steve", "gmail", "com"),
("a", "b", "com"), ("c", "d", "com"), ("e", "f", "com"),
]
named_pattern = r"""
(?P<user>[a-z0-9]+)
@
(?P<domain>[a-z]+)
\.
(?P<tld>[a-z]{2,4})
"""
expected_columns = ["user", "domain", "tld"]
S = Series(subject_list)
# extractall should return a DataFrame with one row for each
# match, indexed by the subject from which the match came.
expected_index = MultiIndex.from_tuples([
(0, 0),
(1, 0),
(2, 0),
(3, 0),
(3, 1),
(4, 0),
(4, 1),
(4, 2),
], names=(None, "match"))
expected_df = DataFrame(
expected_tuples, expected_index, expected_columns)
computed_df = S.str.extractall(named_pattern, re.VERBOSE)
tm.assert_frame_equal(computed_df, expected_df)
# The index of the input Series should be used to construct
# the index of the output DataFrame:
series_index = MultiIndex.from_tuples([
("single", "Dave"),
("single", "Toby"),
("single", "Maude"),
("multiple", "robAndSteve"),
("multiple", "abcdef"),
("none", "missing"),
("none", "empty"),
])
Si = Series(subject_list, series_index)
expected_index = MultiIndex.from_tuples([
("single", "Dave", 0),
("single", "Toby", 0),
("single", "Maude", 0),
("multiple", "robAndSteve", 0),
("multiple", "robAndSteve", 1),
("multiple", "abcdef", 0),
("multiple", "abcdef", 1),
("multiple", "abcdef", 2),
], names=(None, None, "match"))
expected_df = DataFrame(
expected_tuples, expected_index, expected_columns)
computed_df = Si.str.extractall(named_pattern, re.VERBOSE)
tm.assert_frame_equal(computed_df, expected_df)
# MultiIndexed subject with names.
Sn = Series(subject_list, series_index)
Sn.index.names = ("matches", "description")
expected_index.names = ("matches", "description", "match")
expected_df = DataFrame(
expected_tuples, expected_index, expected_columns)
computed_df = Sn.str.extractall(named_pattern, re.VERBOSE)
tm.assert_frame_equal(computed_df, expected_df)
# optional groups.
subject_list = ['', 'A1', '32']
named_pattern = '(?P<letter>[AB])?(?P<number>[123])'
computed_df = Series(subject_list).str.extractall(named_pattern)
expected_index = MultiIndex.from_tuples([
(1, 0),
(2, 0),
(2, 1),
], names=(None, "match"))
expected_df = DataFrame([
('A', '1'),
(NA, '3'),
(NA, '2'),
], expected_index, columns=['letter', 'number'])
tm.assert_frame_equal(computed_df, expected_df)
# only one of two groups has a name.
pattern = '([AB])?(?P<number>[123])'
computed_df = Series(subject_list).str.extractall(pattern)
expected_df = DataFrame([
('A', '1'),
(NA, '3'),
(NA, '2'),
], expected_index, columns=[0, 'number'])
tm.assert_frame_equal(computed_df, expected_df)
def test_extractall_single_group(self):
# extractall(one named group) returns DataFrame with one named
# column.
s = Series(['a3', 'b3', 'd4c2'], name='series_name')
r = s.str.extractall(r'(?P<letter>[a-z])')
i = MultiIndex.from_tuples([
(0, 0),
(1, 0),
(2, 0),
(2, 1),
], names=(None, "match"))
e = DataFrame({"letter": ['a', 'b', 'd', 'c']}, i)
tm.assert_frame_equal(r, e)
# extractall(one un-named group) returns DataFrame with one
# un-named column.
r = s.str.extractall(r'([a-z])')
e = DataFrame(['a', 'b', 'd', 'c'], i)
tm.assert_frame_equal(r, e)
def test_extractall_single_group_with_quantifier(self):
# extractall(one un-named group with quantifier) returns
# DataFrame with one un-named column (GH13382).
s = Series(['ab3', 'abc3', 'd4cd2'], name='series_name')
r = s.str.extractall(r'([a-z]+)')
i = MultiIndex.from_tuples([
(0, 0),
(1, 0),
(2, 0),
(2, 1),
], names=(None, "match"))
e = DataFrame(['ab', 'abc', 'd', 'cd'], i)
tm.assert_frame_equal(r, e)
def test_extractall_no_matches(self):
s = Series(['a3', 'b3', 'd4c2'], name='series_name')
# one un-named group.
r = s.str.extractall('(z)')
e = DataFrame(columns=[0])
tm.assert_frame_equal(r, e)
# two un-named groups.
r = s.str.extractall('(z)(z)')
e = DataFrame(columns=[0, 1])
tm.assert_frame_equal(r, e)
# one named group.
r = s.str.extractall('(?P<first>z)')
e = DataFrame(columns=["first"])
tm.assert_frame_equal(r, e)
# two named groups.
r = s.str.extractall('(?P<first>z)(?P<second>z)')
e = DataFrame(columns=["first", "second"])
tm.assert_frame_equal(r, e)
# one named, one un-named.
r = s.str.extractall('(z)(?P<second>z)')
e = DataFrame(columns=[0,
"second"])
tm.assert_frame_equal(r, e)
def test_extractall_stringindex(self):
s = Series(["a1a2", "b1", "c1"], name='xxx')
res = s.str.extractall(r"[ab](?P<digit>\d)")
exp_idx = MultiIndex.from_tuples([(0, 0), (0, 1), (1, 0)],
names=[None, 'match'])
exp = DataFrame({'digit': ["1", "2", "1"]}, index=exp_idx)
tm.assert_frame_equal(res, exp)
# index should return the same result as the default index without name
# thus index.name doesn't affect to the result
for idx in [Index(["a1a2", "b1", "c1"]),
Index(["a1a2", "b1", "c1"], name='xxx')]:
res = idx.str.extractall(r"[ab](?P<digit>\d)")
tm.assert_frame_equal(res, exp)
s = Series(["a1a2", "b1", "c1"], name='s_name',
index=Index(["XX", "yy", "zz"], name='idx_name'))
res = s.str.extractall(r"[ab](?P<digit>\d)")
exp_idx = MultiIndex.from_tuples([("XX", 0), ("XX", 1), ("yy", 0)],
names=["idx_name", 'match'])
exp = DataFrame({'digit': ["1", "2", "1"]}, index=exp_idx)
tm.assert_frame_equal(res, exp)
def test_extractall_errors(self):
# Does not make sense to use extractall with a regex that has
# no capture groups. (it returns DataFrame with one column for
# each capture group)
s = Series(['a3', 'b3', 'd4c2'], name='series_name')
with tm.assert_raises_regex(ValueError, "no capture groups"):
s.str.extractall(r'[a-z]')
def test_extract_index_one_two_groups(self):
s = Series(['a3', 'b3', 'd4c2'], index=["A3", "B3", "D4"],
name='series_name')
r = s.index.str.extract(r'([A-Z])', expand=True)
e = DataFrame(['A', "B", "D"])
tm.assert_frame_equal(r, e)
# Prior to v0.18.0, index.str.extract(regex with one group)
# returned Index. With more than one group, extract raised an
# error (GH9980). Now extract always returns DataFrame.
r = s.index.str.extract(
r'(?P<letter>[A-Z])(?P<digit>[0-9])', expand=True)
e_list = [
("A", "3"),
("B", "3"),
("D", "4"),
]
e = DataFrame(e_list, columns=["letter", "digit"])
tm.assert_frame_equal(r, e)
def test_extractall_same_as_extract(self):
s = Series(['a3', 'b3', 'c2'], name='series_name')
pattern_two_noname = r'([a-z])([0-9])'
extract_two_noname = s.str.extract(pattern_two_noname, expand=True)
has_multi_index = s.str.extractall(pattern_two_noname)
no_multi_index = has_multi_index.xs(0, level="match")
tm.assert_frame_equal(extract_two_noname, no_multi_index)
pattern_two_named = r'(?P<letter>[a-z])(?P<digit>[0-9])'
extract_two_named = s.str.extract(pattern_two_named, expand=True)
has_multi_index = s.str.extractall(pattern_two_named)
no_multi_index = has_multi_index.xs(0, level="match")
tm.assert_frame_equal(extract_two_named, no_multi_index)
pattern_one_named = r'(?P<group_name>[a-z])'
extract_one_named = s.str.extract(pattern_one_named, expand=True)
has_multi_index = s.str.extractall(pattern_one_named)
no_multi_index = has_multi_index.xs(0, level="match")
tm.assert_frame_equal(extract_one_named, no_multi_index)
pattern_one_noname = r'([a-z])'
extract_one_noname = s.str.extract(pattern_one_noname, expand=True)
has_multi_index = s.str.extractall(pattern_one_noname)
no_multi_index = has_multi_index.xs(0, level="match")
tm.assert_frame_equal(extract_one_noname, no_multi_index)
def test_extractall_same_as_extract_subject_index(self):
# same as above tests, but s has an MultiIndex.
i = MultiIndex.from_tuples([
("A", "first"),
("B", "second"),
("C", "third"),
], names=("capital", "ordinal"))
s = Series(['a3', 'b3', 'c2'], i, name='series_name')
pattern_two_noname = r'([a-z])([0-9])'
extract_two_noname = s.str.extract(pattern_two_noname, expand=True)
has_match_index = s.str.extractall(pattern_two_noname)
no_match_index = has_match_index.xs(0, level="match")
tm.assert_frame_equal(extract_two_noname, no_match_index)
pattern_two_named = r'(?P<letter>[a-z])(?P<digit>[0-9])'
extract_two_named = s.str.extract(pattern_two_named, expand=True)
has_match_index = s.str.extractall(pattern_two_named)
no_match_index = has_match_index.xs(0, level="match")
tm.assert_frame_equal(extract_two_named, no_match_index)
pattern_one_named = r'(?P<group_name>[a-z])'
extract_one_named = s.str.extract(pattern_one_named, expand=True)
has_match_index = s.str.extractall(pattern_one_named)
no_match_index = has_match_index.xs(0, level="match")
tm.assert_frame_equal(extract_one_named, no_match_index)
pattern_one_noname = r'([a-z])'
extract_one_noname = s.str.extract(pattern_one_noname, expand=True)
has_match_index = s.str.extractall(pattern_one_noname)
no_match_index = has_match_index.xs(0, level="match")
tm.assert_frame_equal(extract_one_noname, no_match_index)
def test_empty_str_methods(self):
empty_str = empty = Series(dtype=object)
empty_int = Series(dtype=int)
empty_bool = Series(dtype=bool)
empty_bytes = Series(dtype=object)
# GH7241
# (extract) on empty series
tm.assert_series_equal(empty_str, empty.str.cat(empty))
assert '' == empty.str.cat()
tm.assert_series_equal(empty_str, empty.str.title())
tm.assert_series_equal(empty_int, empty.str.count('a'))
tm.assert_series_equal(empty_bool, empty.str.contains('a'))
tm.assert_series_equal(empty_bool, empty.str.startswith('a'))
tm.assert_series_equal(empty_bool, empty.str.endswith('a'))
tm.assert_series_equal(empty_str, empty.str.lower())
tm.assert_series_equal(empty_str, empty.str.upper())
tm.assert_series_equal(empty_str, empty.str.replace('a', 'b'))
tm.assert_series_equal(empty_str, empty.str.repeat(3))
tm.assert_series_equal(empty_bool, empty.str.match('^a'))
tm.assert_frame_equal(
DataFrame(columns=[0], dtype=str),
empty.str.extract('()', expand=True))
tm.assert_frame_equal(
DataFrame(columns=[0, 1], dtype=str),
empty.str.extract('()()', expand=True))
tm.assert_series_equal(
empty_str,
empty.str.extract('()', expand=False))
tm.assert_frame_equal(
DataFrame(columns=[0, 1], dtype=str),
empty.str.extract('()()', expand=False))
tm.assert_frame_equal(DataFrame(dtype=str), empty.str.get_dummies())
tm.assert_series_equal(empty_str, empty_str.str.join(''))
tm.assert_series_equal(empty_int, empty.str.len())
tm.assert_series_equal(empty_str, empty_str.str.findall('a'))
tm.assert_series_equal(empty_int, empty.str.find('a'))
tm.assert_series_equal(empty_int, empty.str.rfind('a'))
tm.assert_series_equal(empty_str, empty.str.pad(42))
tm.assert_series_equal(empty_str, empty.str.center(42))
tm.assert_series_equal(empty_str, empty.str.split('a'))
tm.assert_series_equal(empty_str, empty.str.rsplit('a'))
tm.assert_series_equal(empty_str,
empty.str.partition('a', expand=False))
tm.assert_series_equal(empty_str,
empty.str.rpartition('a', expand=False))
tm.assert_series_equal(empty_str, empty.str.slice(stop=1))
tm.assert_series_equal(empty_str, empty.str.slice(step=1))
tm.assert_series_equal(empty_str, empty.str.strip())
tm.assert_series_equal(empty_str, empty.str.lstrip())
tm.assert_series_equal(empty_str, empty.str.rstrip())
tm.assert_series_equal(empty_str, empty.str.wrap(42))
tm.assert_series_equal(empty_str, empty.str.get(0))
tm.assert_series_equal(empty_str, empty_bytes.str.decode('ascii'))
tm.assert_series_equal(empty_bytes, empty.str.encode('ascii'))
tm.assert_series_equal(empty_str, empty.str.isalnum())
tm.assert_series_equal(empty_str, empty.str.isalpha())
tm.assert_series_equal(empty_str, empty.str.isdigit())
tm.assert_series_equal(empty_str, empty.str.isspace())
tm.assert_series_equal(empty_str, empty.str.islower())
tm.assert_series_equal(empty_str, empty.str.isupper())
tm.assert_series_equal(empty_str, empty.str.istitle())
tm.assert_series_equal(empty_str, empty.str.isnumeric())
tm.assert_series_equal(empty_str, empty.str.isdecimal())
tm.assert_series_equal(empty_str, empty.str.capitalize())
tm.assert_series_equal(empty_str, empty.str.swapcase())
tm.assert_series_equal(empty_str, empty.str.normalize('NFC'))
if compat.PY3:
table = str.maketrans('a', 'b')
else:
import string
table = string.maketrans('a', 'b')
tm.assert_series_equal(empty_str, empty.str.translate(table))
def test_empty_str_methods_to_frame(self):
empty = Series(dtype=str)
empty_df = DataFrame([])
tm.assert_frame_equal(empty_df, empty.str.partition('a'))
tm.assert_frame_equal(empty_df, empty.str.rpartition('a'))
def test_ismethods(self):
values = ['A', 'b', 'Xy', '4', '3A', '', 'TT', '55', '-', ' ']
str_s = Series(values)
alnum_e = [True, True, True, True, True, False, True, True, False,
False]
alpha_e = [True, True, True, False, False, False, True, False, False,
False]
digit_e = [False, False, False, True, False, False, False, True, False,
False]
# TODO: unused
num_e = [False, False, False, True, False, False, # noqa
False, True, False, False]
space_e = [False, False, False, False, False, False, False, False,
False, True]
lower_e = [False, True, False, False, False, False, False, False,
False, False]
upper_e = [True, False, False, False, True, False, True, False, False,
False]
title_e = [True, False, True, False, True, False, False, False, False,
False]
tm.assert_series_equal(str_s.str.isalnum(), Series(alnum_e))
tm.assert_series_equal(str_s.str.isalpha(), Series(alpha_e))
tm.assert_series_equal(str_s.str.isdigit(), Series(digit_e))
tm.assert_series_equal(str_s.str.isspace(), Series(space_e))
tm.assert_series_equal(str_s.str.islower(), Series(lower_e))
tm.assert_series_equal(str_s.str.isupper(), Series(upper_e))
tm.assert_series_equal(str_s.str.istitle(), Series(title_e))
assert str_s.str.isalnum().tolist() == [v.isalnum() for v in values]
assert str_s.str.isalpha().tolist() == [v.isalpha() for v in values]
assert str_s.str.isdigit().tolist() == [v.isdigit() for v in values]
assert str_s.str.isspace().tolist() == [v.isspace() for v in values]
assert str_s.str.islower().tolist() == [v.islower() for v in values]
assert str_s.str.isupper().tolist() == [v.isupper() for v in values]
assert str_s.str.istitle().tolist() == [v.istitle() for v in values]
def test_isnumeric(self):
# 0x00bc: ¼ VULGAR FRACTION ONE QUARTER
# 0x2605: ★ not number
# 0x1378: ፸ ETHIOPIC NUMBER SEVENTY
# 0xFF13: 3 Em 3
values = ['A', '3', u'¼', u'★', u'፸', u'3', 'four']
s = Series(values)
numeric_e = [False, True, True, False, True, True, False]
decimal_e = [False, True, False, False, False, True, False]
tm.assert_series_equal(s.str.isnumeric(), Series(numeric_e))
tm.assert_series_equal(s.str.isdecimal(), Series(decimal_e))
unicodes = [u'A', u'3', u'¼', u'★', u'፸', u'3', u'four']
assert s.str.isnumeric().tolist() == [v.isnumeric() for v in unicodes]
assert s.str.isdecimal().tolist() == [v.isdecimal() for v in unicodes]
values = ['A', np.nan, u'¼', u'★', np.nan, u'3', 'four']
s = Series(values)
numeric_e = [False, np.nan, True, False, np.nan, True, False]
decimal_e = [False, np.nan, False, False, np.nan, True, False]
tm.assert_series_equal(s.str.isnumeric(), Series(numeric_e))
tm.assert_series_equal(s.str.isdecimal(), Series(decimal_e))
def test_get_dummies(self):
s = Series(['a|b', 'a|c', np.nan])
result = s.str.get_dummies('|')
expected = DataFrame([[1, 1, 0], [1, 0, 1], [0, 0, 0]],
columns=list('abc'))
tm.assert_frame_equal(result, expected)
s = Series(['a;b', 'a', 7])
result = s.str.get_dummies(';')
expected = DataFrame([[0, 1, 1], [0, 1, 0], [1, 0, 0]],
columns=list('7ab'))
tm.assert_frame_equal(result, expected)
# GH9980, GH8028
idx = Index(['a|b', 'a|c', 'b|c'])
result = idx.str.get_dummies('|')
expected = MultiIndex.from_tuples([(1, 1, 0), (1, 0, 1),
(0, 1, 1)], names=('a', 'b', 'c'))
tm.assert_index_equal(result, expected)
def test_get_dummies_with_name_dummy(self):
# GH 12180
# Dummies named 'name' should work as expected
s = Series(['a', 'b,name', 'b'])
result = s.str.get_dummies(',')
expected = DataFrame([[1, 0, 0], [0, 1, 1], [0, 1, 0]],
columns=['a', 'b', 'name'])
tm.assert_frame_equal(result, expected)
idx = Index(['a|b', 'name|c', 'b|name'])
result = idx.str.get_dummies('|')
expected = MultiIndex.from_tuples([(1, 1, 0, 0), (0, 0, 1, 1),
(0, 1, 0, 1)],
names=('a', 'b', 'c', 'name'))
tm.assert_index_equal(result, expected)
def test_join(self):
values = Series(['a_b_c', 'c_d_e', np.nan, 'f_g_h'])
result = values.str.split('_').str.join('_')
tm.assert_series_equal(values, result)
# mixed
mixed = Series(['a_b', NA, 'asdf_cas_asdf', True, datetime.today(),
'foo', None, 1, 2.])
rs = Series(mixed).str.split('_').str.join('_')
xp = Series(['a_b', NA, 'asdf_cas_asdf', NA, NA, 'foo', NA, NA, NA])
assert isinstance(rs, Series)
tm.assert_almost_equal(rs, xp)
# unicode
values = Series([u('a_b_c'), u('c_d_e'), np.nan, u('f_g_h')])
result = values.str.split('_').str.join('_')
tm.assert_series_equal(values, result)
def test_len(self):
values = Series(['foo', 'fooo', 'fooooo', np.nan, 'fooooooo'])
result = values.str.len()
exp = values.map(lambda x: len(x) if notna(x) else NA)
tm.assert_series_equal(result, exp)
# mixed
mixed = Series(['a_b', NA, 'asdf_cas_asdf', True, datetime.today(),
'foo', None, 1, 2.])
rs = Series(mixed).str.len()
xp = Series([3, NA, 13, NA, NA, 3, NA, NA, NA])
assert isinstance(rs, Series)
tm.assert_almost_equal(rs, xp)
# unicode
values = Series([u('foo'), u('fooo'), u('fooooo'), np.nan, u(
'fooooooo')])
result = values.str.len()
exp = values.map(lambda x: len(x) if notna(x) else NA)
tm.assert_series_equal(result, exp)
def test_findall(self):
values = Series(['fooBAD__barBAD', NA, 'foo', 'BAD'])
result = values.str.findall('BAD[_]*')
exp = Series([['BAD__', 'BAD'], NA, [], ['BAD']])
tm.assert_almost_equal(result, exp)
# mixed
mixed = Series(['fooBAD__barBAD', NA, 'foo', True, datetime.today(),
'BAD', None, 1, 2.])
rs = Series(mixed).str.findall('BAD[_]*')
xp = Series([['BAD__', 'BAD'], NA, [], NA, NA, ['BAD'], NA, NA, NA])
assert isinstance(rs, Series)
tm.assert_almost_equal(rs, xp)
# unicode
values = Series([u('fooBAD__barBAD'), NA, u('foo'), u('BAD')])
result = values.str.findall('BAD[_]*')
exp = Series([[u('BAD__'), u('BAD')], NA, [], [u('BAD')]])
tm.assert_almost_equal(result, exp)
def test_find(self):
values = Series(['ABCDEFG', 'BCDEFEF', 'DEFGHIJEF', 'EFGHEF', 'XXXX'])
result = values.str.find('EF')
tm.assert_series_equal(result, Series([4, 3, 1, 0, -1]))
expected = np.array([v.find('EF') for v in values.values],
dtype=np.int64)
tm.assert_numpy_array_equal(result.values, expected)
result = values.str.rfind('EF')
tm.assert_series_equal(result, Series([4, 5, 7, 4, -1]))
expected = np.array([v.rfind('EF') for v in values.values],
dtype=np.int64)
tm.assert_numpy_array_equal(result.values, expected)
result = values.str.find('EF', 3)
tm.assert_series_equal(result, Series([4, 3, 7, 4, -1]))
expected = np.array([v.find('EF', 3) for v in values.values],
dtype=np.int64)
tm.assert_numpy_array_equal(result.values, expected)
result = values.str.rfind('EF', 3)
tm.assert_series_equal(result, Series([4, 5, 7, 4, -1]))
expected = np.array([v.rfind('EF', 3) for v in values.values],
dtype=np.int64)
tm.assert_numpy_array_equal(result.values, expected)
result = values.str.find('EF', 3, 6)
tm.assert_series_equal(result, Series([4, 3, -1, 4, -1]))
expected = np.array([v.find('EF', 3, 6) for v in values.values],
dtype=np.int64)
tm.assert_numpy_array_equal(result.values, expected)
result = values.str.rfind('EF', 3, 6)
tm.assert_series_equal(result, Series([4, 3, -1, 4, -1]))
expected = np.array([v.rfind('EF', 3, 6) for v in values.values],
dtype=np.int64)
tm.assert_numpy_array_equal(result.values, expected)
with tm.assert_raises_regex(TypeError,
"expected a string object, not int"):
result = values.str.find(0)
with tm.assert_raises_regex(TypeError,
"expected a string object, not int"):
result = values.str.rfind(0)
def test_find_nan(self):
values = Series(['ABCDEFG', np.nan, 'DEFGHIJEF', np.nan, 'XXXX'])
result = values.str.find('EF')
tm.assert_series_equal(result, Series([4, np.nan, 1, np.nan, -1]))
result = values.str.rfind('EF')
tm.assert_series_equal(result, Series([4, np.nan, 7, np.nan, -1]))
result = values.str.find('EF', 3)
tm.assert_series_equal(result, Series([4, np.nan, 7, np.nan, -1]))
result = values.str.rfind('EF', 3)
tm.assert_series_equal(result, Series([4, np.nan, 7, np.nan, -1]))
result = values.str.find('EF', 3, 6)
tm.assert_series_equal(result, Series([4, np.nan, -1, np.nan, -1]))
result = values.str.rfind('EF', 3, 6)
tm.assert_series_equal(result, Series([4, np.nan, -1, np.nan, -1]))
def test_index(self):
def _check(result, expected):
if isinstance(result, Series):
tm.assert_series_equal(result, expected)
else:
tm.assert_index_equal(result, expected)
for klass in [Series, Index]:
s = klass(['ABCDEFG', 'BCDEFEF', 'DEFGHIJEF', 'EFGHEF'])
result = s.str.index('EF')
_check(result, klass([4, 3, 1, 0]))
expected = np.array([v.index('EF') for v in s.values],
dtype=np.int64)
tm.assert_numpy_array_equal(result.values, expected)
result = s.str.rindex('EF')
_check(result, klass([4, 5, 7, 4]))
expected = np.array([v.rindex('EF') for v in s.values],
dtype=np.int64)
tm.assert_numpy_array_equal(result.values, expected)
result = s.str.index('EF', 3)
_check(result, klass([4, 3, 7, 4]))
expected = np.array([v.index('EF', 3) for v in s.values],
dtype=np.int64)
tm.assert_numpy_array_equal(result.values, expected)
result = s.str.rindex('EF', 3)
_check(result, klass([4, 5, 7, 4]))
expected = np.array([v.rindex('EF', 3) for v in s.values],
dtype=np.int64)
tm.assert_numpy_array_equal(result.values, expected)
result = s.str.index('E', 4, 8)
_check(result, klass([4, 5, 7, 4]))
expected = np.array([v.index('E', 4, 8) for v in s.values],
dtype=np.int64)
tm.assert_numpy_array_equal(result.values, expected)
result = s.str.rindex('E', 0, 5)
_check(result, klass([4, 3, 1, 4]))
expected = np.array([v.rindex('E', 0, 5) for v in s.values],
dtype=np.int64)
tm.assert_numpy_array_equal(result.values, expected)
with tm.assert_raises_regex(ValueError,
"substring not found"):
result = s.str.index('DE')
with tm.assert_raises_regex(TypeError,
"expected a string "
"object, not int"):
result = s.str.index(0)
# test with nan
s = Series(['abcb', 'ab', 'bcbe', np.nan])
result = s.str.index('b')
tm.assert_series_equal(result, Series([1, 1, 0, np.nan]))
result = s.str.rindex('b')
tm.assert_series_equal(result, Series([3, 1, 2, np.nan]))
def test_pad(self):
values = Series(['a', 'b', NA, 'c', NA, 'eeeeee'])
result = values.str.pad(5, side='left')
exp = Series([' a', ' b', NA, ' c', NA, 'eeeeee'])
tm.assert_almost_equal(result, exp)
result = values.str.pad(5, side='right')
exp = Series(['a ', 'b ', NA, 'c ', NA, 'eeeeee'])
tm.assert_almost_equal(result, exp)
result = values.str.pad(5, side='both')
exp = Series([' a ', ' b ', NA, ' c ', NA, 'eeeeee'])
tm.assert_almost_equal(result, exp)
# mixed
mixed = Series(['a', NA, 'b', True, datetime.today(), 'ee', None, 1, 2.
])
rs = Series(mixed).str.pad(5, side='left')
xp = Series([' a', NA, ' b', NA, NA, ' ee', NA, NA, NA])
assert isinstance(rs, Series)
tm.assert_almost_equal(rs, xp)
mixed = Series(['a', NA, 'b', True, datetime.today(), 'ee', None, 1, 2.
])
rs = Series(mixed).str.pad(5, side='right')
xp = Series(['a ', NA, 'b ', NA, NA, 'ee ', NA, NA, NA])
assert isinstance(rs, Series)
tm.assert_almost_equal(rs, xp)
mixed = Series(['a', NA, 'b', True, datetime.today(), 'ee', None, 1, 2.
])
rs = Series(mixed).str.pad(5, side='both')
xp = Series([' a ', NA, ' b ', NA, NA, ' ee ', NA, NA, NA])
assert isinstance(rs, Series)
tm.assert_almost_equal(rs, xp)
# unicode
values = Series([u('a'), u('b'), NA, u('c'), NA, u('eeeeee')])
result = values.str.pad(5, side='left')
exp = Series([u(' a'), u(' b'), NA, u(' c'), NA, u('eeeeee')])
tm.assert_almost_equal(result, exp)
result = values.str.pad(5, side='right')
exp = Series([u('a '), u('b '), NA, u('c '), NA, u('eeeeee')])
tm.assert_almost_equal(result, exp)
result = values.str.pad(5, side='both')
exp = Series([u(' a '), u(' b '), NA, u(' c '), NA, u('eeeeee')])
tm.assert_almost_equal(result, exp)
def test_pad_fillchar(self):
values = Series(['a', 'b', NA, 'c', NA, 'eeeeee'])
result = values.str.pad(5, side='left', fillchar='X')
exp = Series(['XXXXa', 'XXXXb', NA, 'XXXXc', NA, 'eeeeee'])
tm.assert_almost_equal(result, exp)
result = values.str.pad(5, side='right', fillchar='X')
exp = Series(['aXXXX', 'bXXXX', NA, 'cXXXX', NA, 'eeeeee'])
tm.assert_almost_equal(result, exp)
result = values.str.pad(5, side='both', fillchar='X')
exp = Series(['XXaXX', 'XXbXX', NA, 'XXcXX', NA, 'eeeeee'])
tm.assert_almost_equal(result, exp)
with tm.assert_raises_regex(TypeError,
"fillchar must be a "
"character, not str"):
result = values.str.pad(5, fillchar='XY')
with tm.assert_raises_regex(TypeError,
"fillchar must be a "
"character, not int"):
result = values.str.pad(5, fillchar=5)
def test_pad_width(self):
# GH 13598
s = Series(['1', '22', 'a', 'bb'])
for f in ['center', 'ljust', 'rjust', 'zfill', 'pad']:
with tm.assert_raises_regex(TypeError,
"width must be of "
"integer type, not*"):
getattr(s.str, f)('f')
def test_translate(self):
def _check(result, expected):
if isinstance(result, Series):
tm.assert_series_equal(result, expected)
else:
tm.assert_index_equal(result, expected)
for klass in [Series, Index]:
s = klass(['abcdefg', 'abcc', 'cdddfg', 'cdefggg'])
if not compat.PY3:
import string
table = string.maketrans('abc', 'cde')
else:
table = str.maketrans('abc', 'cde')
result = s.str.translate(table)
expected = klass(['cdedefg', 'cdee', 'edddfg', 'edefggg'])
_check(result, expected)
# use of deletechars is python 2 only
if not compat.PY3:
result = s.str.translate(table, deletechars='fg')
expected = klass(['cdede', 'cdee', 'eddd', 'ede'])
_check(result, expected)
result = s.str.translate(None, deletechars='fg')
expected = klass(['abcde', 'abcc', 'cddd', 'cde'])
_check(result, expected)
else:
with tm.assert_raises_regex(
ValueError, "deletechars is not a valid argument"):
result = s.str.translate(table, deletechars='fg')
# Series with non-string values
s = Series(['a', 'b', 'c', 1.2])
expected = Series(['c', 'd', 'e', np.nan])
result = s.str.translate(table)
tm.assert_series_equal(result, expected)
def test_center_ljust_rjust(self):
values = Series(['a', 'b', NA, 'c', NA, 'eeeeee'])
result = values.str.center(5)
exp = Series([' a ', ' b ', NA, ' c ', NA, 'eeeeee'])
tm.assert_almost_equal(result, exp)
result = values.str.ljust(5)
exp = Series(['a ', 'b ', NA, 'c ', NA, 'eeeeee'])
tm.assert_almost_equal(result, exp)
result = values.str.rjust(5)
exp = Series([' a', ' b', NA, ' c', NA, 'eeeeee'])
tm.assert_almost_equal(result, exp)
# mixed
mixed = Series(['a', NA, 'b', True, datetime.today(), 'c', 'eee', None,
1, 2.])
rs = Series(mixed).str.center(5)
xp = Series([' a ', NA, ' b ', NA, NA, ' c ', ' eee ', NA, NA, NA
])
assert isinstance(rs, Series)
tm.assert_almost_equal(rs, xp)
rs = Series(mixed).str.ljust(5)
xp = Series(['a ', NA, 'b ', NA, NA, 'c ', 'eee ', NA, NA, NA
])
assert isinstance(rs, Series)
tm.assert_almost_equal(rs, xp)
rs = Series(mixed).str.rjust(5)
xp = Series([' a', NA, ' b', NA, NA, ' c', ' eee', NA, NA, NA
])
assert isinstance(rs, Series)
tm.assert_almost_equal(rs, xp)
# unicode
values = Series([u('a'), u('b'), NA, u('c'), NA, u('eeeeee')])
result = values.str.center(5)
exp = Series([u(' a '), u(' b '), NA, u(' c '), NA, u('eeeeee')])
tm.assert_almost_equal(result, exp)
result = values.str.ljust(5)
exp = Series([u('a '), u('b '), NA, u('c '), NA, u('eeeeee')])
tm.assert_almost_equal(result, exp)
result = values.str.rjust(5)
exp = Series([u(' a'), u(' b'), NA, u(' c'), NA, u('eeeeee')])
tm.assert_almost_equal(result, exp)
def test_center_ljust_rjust_fillchar(self):
values = Series(['a', 'bb', 'cccc', 'ddddd', 'eeeeee'])
result = values.str.center(5, fillchar='X')
expected = Series(['XXaXX', 'XXbbX', 'Xcccc', 'ddddd', 'eeeeee'])
tm.assert_series_equal(result, expected)
expected = np.array([v.center(5, 'X') for v in values.values],
dtype=np.object_)
tm.assert_numpy_array_equal(result.values, expected)
result = values.str.ljust(5, fillchar='X')
expected = Series(['aXXXX', 'bbXXX', 'ccccX', 'ddddd', 'eeeeee'])
tm.assert_series_equal(result, expected)
expected = np.array([v.ljust(5, 'X') for v in values.values],
dtype=np.object_)
tm.assert_numpy_array_equal(result.values, expected)
result = values.str.rjust(5, fillchar='X')
expected = Series(['XXXXa', 'XXXbb', 'Xcccc', 'ddddd', 'eeeeee'])
tm.assert_series_equal(result, expected)
expected = np.array([v.rjust(5, 'X') for v in values.values],
dtype=np.object_)
tm.assert_numpy_array_equal(result.values, expected)
# If fillchar is not a charatter, normal str raises TypeError
# 'aaa'.ljust(5, 'XY')
# TypeError: must be char, not str
with tm.assert_raises_regex(TypeError,
"fillchar must be a "
"character, not str"):
result = values.str.center(5, fillchar='XY')
with tm.assert_raises_regex(TypeError,
"fillchar must be a "
"character, not str"):
result = values.str.ljust(5, fillchar='XY')
with tm.assert_raises_regex(TypeError,
"fillchar must be a "
"character, not str"):
result = values.str.rjust(5, fillchar='XY')
with tm.assert_raises_regex(TypeError,
"fillchar must be a "
"character, not int"):
result = values.str.center(5, fillchar=1)
with tm.assert_raises_regex(TypeError,
"fillchar must be a "
"character, not int"):
result = values.str.ljust(5, fillchar=1)
with tm.assert_raises_regex(TypeError,
"fillchar must be a "
"character, not int"):
result = values.str.rjust(5, fillchar=1)
def test_zfill(self):
values = Series(['1', '22', 'aaa', '333', '45678'])
result = values.str.zfill(5)
expected = Series(['00001', '00022', '00aaa', '00333', '45678'])
tm.assert_series_equal(result, expected)
expected = np.array([v.zfill(5) for v in values.values],
dtype=np.object_)
tm.assert_numpy_array_equal(result.values, expected)
result = values.str.zfill(3)
expected = Series(['001', '022', 'aaa', '333', '45678'])
tm.assert_series_equal(result, expected)
expected = np.array([v.zfill(3) for v in values.values],
dtype=np.object_)
tm.assert_numpy_array_equal(result.values, expected)
values = Series(['1', np.nan, 'aaa', np.nan, '45678'])
result = values.str.zfill(5)
expected = Series(['00001', np.nan, '00aaa', np.nan, '45678'])
tm.assert_series_equal(result, expected)
def test_split(self):
values = Series(['a_b_c', 'c_d_e', NA, 'f_g_h'])
result = values.str.split('_')
exp = Series([['a', 'b', 'c'], ['c', 'd', 'e'], NA, ['f', 'g', 'h']])
tm.assert_series_equal(result, exp)
# more than one char
values = Series(['a__b__c', 'c__d__e', NA, 'f__g__h'])
result = values.str.split('__')
tm.assert_series_equal(result, exp)
result = values.str.split('__', expand=False)
tm.assert_series_equal(result, exp)
# mixed
mixed = Series(['a_b_c', NA, 'd_e_f', True, datetime.today(), None, 1,
2.])
result = mixed.str.split('_')
exp = Series([['a', 'b', 'c'], NA, ['d', 'e', 'f'], NA, NA, NA, NA, NA
])
assert isinstance(result, Series)
tm.assert_almost_equal(result, exp)
result = mixed.str.split('_', expand=False)
assert isinstance(result, Series)
tm.assert_almost_equal(result, exp)
# unicode
values = Series([u('a_b_c'), u('c_d_e'), NA, u('f_g_h')])
result = values.str.split('_')
exp = Series([[u('a'), u('b'), u('c')], [u('c'), u('d'), u('e')], NA,
[u('f'), u('g'), u('h')]])
tm.assert_series_equal(result, exp)
result = values.str.split('_', expand=False)
tm.assert_series_equal(result, exp)
# regex split
values = Series([u('a,b_c'), u('c_d,e'), NA, u('f,g,h')])
result = values.str.split('[,_]')
exp = Series([[u('a'), u('b'), u('c')], [u('c'), u('d'), u('e')], NA,
[u('f'), u('g'), u('h')]])
tm.assert_series_equal(result, exp)
def test_rsplit(self):
values = Series(['a_b_c', 'c_d_e', NA, 'f_g_h'])
result = values.str.rsplit('_')
exp = Series([['a', 'b', 'c'], ['c', 'd', 'e'], NA, ['f', 'g', 'h']])
tm.assert_series_equal(result, exp)
# more than one char
values = Series(['a__b__c', 'c__d__e', NA, 'f__g__h'])
result = values.str.rsplit('__')
tm.assert_series_equal(result, exp)
result = values.str.rsplit('__', expand=False)
tm.assert_series_equal(result, exp)
# mixed
mixed = Series(['a_b_c', NA, 'd_e_f', True, datetime.today(), None, 1,
2.])
result = mixed.str.rsplit('_')
exp = Series([['a', 'b', 'c'], NA, ['d', 'e', 'f'], NA, NA, NA, NA, NA
])
assert isinstance(result, Series)
tm.assert_almost_equal(result, exp)
result = mixed.str.rsplit('_', expand=False)
assert isinstance(result, Series)
tm.assert_almost_equal(result, exp)
# unicode
values = Series([u('a_b_c'), u('c_d_e'), NA, u('f_g_h')])
result = values.str.rsplit('_')
exp = Series([[u('a'), u('b'), u('c')], [u('c'), u('d'), u('e')], NA,
[u('f'), u('g'), u('h')]])
tm.assert_series_equal(result, exp)
result = values.str.rsplit('_', expand=False)
tm.assert_series_equal(result, exp)
# regex split is not supported by rsplit
values = Series([u('a,b_c'), u('c_d,e'), NA, u('f,g,h')])
result = values.str.rsplit('[,_]')
exp = Series([[u('a,b_c')], [u('c_d,e')], NA, [u('f,g,h')]])
tm.assert_series_equal(result, exp)
# setting max number of splits, make sure it's from reverse
values = Series(['a_b_c', 'c_d_e', NA, 'f_g_h'])
result = values.str.rsplit('_', n=1)
exp = Series([['a_b', 'c'], ['c_d', 'e'], NA, ['f_g', 'h']])
tm.assert_series_equal(result, exp)
def test_split_noargs(self):
# #1859
s = Series(['<NAME>', '<NAME>'])
result = s.str.split()
expected = ['Travis', 'Oliphant']
assert result[1] == expected
result = s.str.rsplit()
assert result[1] == expected
def test_split_maxsplit(self):
# re.split 0, str.split -1
s = Series(['bd asdf jfg', 'kjasdflqw asdfnfk'])
result = s.str.split(n=-1)
xp = s.str.split()
tm.assert_series_equal(result, xp)
result = s.str.split(n=0)
tm.assert_series_equal(result, xp)
xp = s.str.split('asdf')
result = s.str.split('asdf', n=0)
tm.assert_series_equal(result, xp)
result = s.str.split('asdf', n=-1)
tm.assert_series_equal(result, xp)
def test_split_no_pat_with_nonzero_n(self):
s = Series(['split once', 'split once too!'])
result = s.str.split(n=1)
expected = Series({0: ['split', 'once'], 1: ['split', 'once too!']})
tm.assert_series_equal(expected, result, check_index_type=False)
def test_split_to_dataframe(self):
s = Series(['nosplit', 'alsonosplit'])
result = s.str.split('_', expand=True)
exp = DataFrame({0: Series(['nosplit', 'alsonosplit'])})
tm.assert_frame_equal(result, exp)
s = Series(['some_equal_splits', 'with_no_nans'])
result = s.str.split('_', expand=True)
exp = DataFrame({0: ['some', 'with'],
1: ['equal', 'no'],
2: ['splits', 'nans']})
tm.assert_frame_equal(result, exp)
s = Series(['some_unequal_splits', 'one_of_these_things_is_not'])
result = s.str.split('_', expand=True)
exp = DataFrame({0: ['some', 'one'],
1: ['unequal', 'of'],
2: ['splits', 'these'],
3: [NA, 'things'],
4: [NA, 'is'],
5: [NA, 'not']})
tm.assert_frame_equal(result, exp)
s = Series(['some_splits', 'with_index'], index=['preserve', 'me'])
result = s.str.split('_', expand=True)
exp = DataFrame({0: ['some', 'with'], 1: ['splits', 'index']},
index=['preserve', 'me'])
tm.assert_frame_equal(result, exp)
with tm.assert_raises_regex(ValueError, "expand must be"):
s.str.split('_', expand="not_a_boolean")
def test_split_to_multiindex_expand(self):
idx = Index(['nosplit', 'alsonosplit'])
result = idx.str.split('_', expand=True)
exp = idx
tm.assert_index_equal(result, exp)
assert result.nlevels == 1
idx = Index(['some_equal_splits', 'with_no_nans'])
result = idx.str.split('_', expand=True)
exp = MultiIndex.from_tuples([('some', 'equal', 'splits'), (
'with', 'no', 'nans')])
tm.assert_index_equal(result, exp)
assert result.nlevels == 3
idx = Index(['some_unequal_splits', 'one_of_these_things_is_not'])
result = idx.str.split('_', expand=True)
exp = MultiIndex.from_tuples([('some', 'unequal', 'splits', NA, NA, NA
), ('one', 'of', 'these', 'things',
'is', 'not')])
tm.assert_index_equal(result, exp)
assert result.nlevels == 6
with tm.assert_raises_regex(ValueError, "expand must be"):
idx.str.split('_', expand="not_a_boolean")
def test_rsplit_to_dataframe_expand(self):
s = Series(['nosplit', 'alsonosplit'])
result = s.str.rsplit('_', expand=True)
exp = DataFrame({0: Series(['nosplit', 'alsonosplit'])})
tm.assert_frame_equal(result, exp)
s = Series(['some_equal_splits', 'with_no_nans'])
result = s.str.rsplit('_', expand=True)
exp = DataFrame({0: ['some', 'with'],
1: ['equal', 'no'],
2: ['splits', 'nans']})
tm.assert_frame_equal(result, exp)
result = s.str.rsplit('_', expand=True, n=2)
exp = DataFrame({0: ['some', 'with'],
1: ['equal', 'no'],
2: ['splits', 'nans']})
tm.assert_frame_equal(result, exp)
result = s.str.rsplit('_', expand=True, n=1)
exp = DataFrame({0: ['some_equal', 'with_no'], 1: ['splits', 'nans']})
tm.assert_frame_equal(result, exp)
s = Series(['some_splits', 'with_index'], index=['preserve', 'me'])
result = s.str.rsplit('_', expand=True)
exp = DataFrame({0: ['some', 'with'], 1: ['splits', 'index']},
index=['preserve', 'me'])
tm.assert_frame_equal(result, exp)
def test_rsplit_to_multiindex_expand(self):
idx = Index(['nosplit', 'alsonosplit'])
result = idx.str.rsplit('_', expand=True)
exp = idx
tm.assert_index_equal(result, exp)
assert result.nlevels == 1
idx = Index(['some_equal_splits', 'with_no_nans'])
result = idx.str.rsplit('_', expand=True)
exp = MultiIndex.from_tuples([('some', 'equal', 'splits'), (
'with', 'no', 'nans')])
tm.assert_index_equal(result, exp)
assert result.nlevels == 3
idx = Index(['some_equal_splits', 'with_no_nans'])
result = idx.str.rsplit('_', expand=True, n=1)
exp = MultiIndex.from_tuples([('some_equal', 'splits'),
('with_no', 'nans')])
tm.assert_index_equal(result, exp)
assert result.nlevels == 2
def test_split_nan_expand(self):
# gh-18450
s = Series(["foo,bar,baz", NA])
result = s.str.split(",", expand=True)
exp = DataFrame([["foo", "bar", "baz"], [NA, NA, NA]])
tm.assert_frame_equal(result, exp)
# check that these are actually np.nan and not None
# TODO see GH 18463
# tm.assert_frame_equal does not differentiate
assert all(np.isnan(x) for x in result.iloc[1])
def test_split_with_name(self):
# GH 12617
# should preserve name
s = Series(['a,b', 'c,d'], name='xxx')
res = s.str.split(',')
exp = Series([['a', 'b'], ['c', 'd']], name='xxx')
tm.assert_series_equal(res, exp)
res = s.str.split(',', expand=True)
exp = DataFrame([['a', 'b'], ['c', 'd']])
tm.assert_frame_equal(res, exp)
idx = Index(['a,b', 'c,d'], name='xxx')
res = idx.str.split(',')
exp = Index([['a', 'b'], ['c', 'd']], name='xxx')
assert res.nlevels == 1
tm.assert_index_equal(res, exp)
res = idx.str.split(',', expand=True)
exp = MultiIndex.from_tuples([('a', 'b'), ('c', 'd')])
assert res.nlevels == 2
tm.assert_index_equal(res, exp)
def test_partition_series(self):
values = Series(['a_b_c', 'c_d_e', NA, 'f_g_h'])
result = values.str.partition('_', expand=False)
exp = Series([('a', '_', 'b_c'), ('c', '_', 'd_e'), NA,
('f', '_', 'g_h')])
tm.assert_series_equal(result, exp)
result = values.str.rpartition('_', expand=False)
exp = Series([('a_b', '_', 'c'), ('c_d', '_', 'e'), NA,
('f_g', '_', 'h')])
tm.assert_series_equal(result, exp)
# more than one char
values = Series(['a__b__c', 'c__d__e', NA, 'f__g__h'])
result = values.str.partition('__', expand=False)
exp = Series([('a', '__', 'b__c'), ('c', '__', 'd__e'), NA,
('f', '__', 'g__h')])
tm.assert_series_equal(result, exp)
result = values.str.rpartition('__', expand=False)
exp = Series([('a__b', '__', 'c'), ('c__d', '__', 'e'), NA,
('f__g', '__', 'h')])
tm.assert_series_equal(result, exp)
# None
values = Series(['a b c', 'c d e', NA, 'f g h'])
result = values.str.partition(expand=False)
exp = Series([('a', ' ', 'b c'), ('c', ' ', 'd e'), NA,
('f', ' ', 'g h')])
tm.assert_series_equal(result, exp)
result = values.str.rpartition(expand=False)
exp = Series([('a b', ' ', 'c'), ('c d', ' ', 'e'), NA,
('f g', ' ', 'h')])
tm.assert_series_equal(result, exp)
# Not splited
values = Series(['abc', 'cde', NA, 'fgh'])
result = values.str.partition('_', expand=False)
exp = Series([('abc', '', ''), ('cde', '', ''), NA, ('fgh', '', '')])
tm.assert_series_equal(result, exp)
result = values.str.rpartition('_', expand=False)
exp = Series([('', '', 'abc'), ('', '', 'cde'), NA, ('', '', 'fgh')])
tm.assert_series_equal(result, exp)
# unicode
values = Series([u'a_b_c', u'c_d_e', NA, u'f_g_h'])
result = values.str.partition('_', expand=False)
exp = Series([(u'a', u'_', u'b_c'), (u'c', u'_', u'd_e'),
NA, (u'f', u'_', u'g_h')])
tm.assert_series_equal(result, exp)
result = values.str.rpartition('_', expand=False)
exp = Series([(u'a_b', u'_', u'c'), (u'c_d', u'_', u'e'),
NA, (u'f_g', u'_', u'h')])
tm.assert_series_equal(result, exp)
# compare to standard lib
values = Series(['A_B_C', 'B_C_D', 'E_F_G', 'EFGHEF'])
result = values.str.partition('_', expand=False).tolist()
assert result == [v.partition('_') for v in values]
result = values.str.rpartition('_', expand=False).tolist()
assert result == [v.rpartition('_') for v in values]
def test_partition_index(self):
values = Index(['a_b_c', 'c_d_e', 'f_g_h'])
result = values.str.partition('_', expand=False)
exp = Index(np.array([('a', '_', 'b_c'), ('c', '_', 'd_e'), ('f', '_',
'g_h')]))
tm.assert_index_equal(result, exp)
assert result.nlevels == 1
result = values.str.rpartition('_', expand=False)
exp = Index(np.array([('a_b', '_', 'c'), ('c_d', '_', 'e'), (
'f_g', '_', 'h')]))
tm.assert_index_equal(result, exp)
assert result.nlevels == 1
result = values.str.partition('_')
exp = Index([('a', '_', 'b_c'), ('c', '_', 'd_e'), ('f', '_', 'g_h')])
tm.assert_index_equal(result, exp)
assert isinstance(result, MultiIndex)
assert result.nlevels == 3
result = values.str.rpartition('_')
exp = Index([('a_b', '_', 'c'), ('c_d', '_', 'e'), ('f_g', '_', 'h')])
tm.assert_index_equal(result, exp)
assert isinstance(result, MultiIndex)
assert result.nlevels == 3
def test_partition_to_dataframe(self):
values = Series(['a_b_c', 'c_d_e', NA, 'f_g_h'])
result = values.str.partition('_')
exp = DataFrame({0: ['a', 'c', np.nan, 'f'],
1: ['_', '_', np.nan, '_'],
2: ['b_c', 'd_e', np.nan, 'g_h']})
tm.assert_frame_equal(result, exp)
result = values.str.rpartition('_')
exp = DataFrame({0: ['a_b', 'c_d', np.nan, 'f_g'],
1: ['_', '_', np.nan, '_'],
2: ['c', 'e', np.nan, 'h']})
tm.assert_frame_equal(result, exp)
values = Series(['a_b_c', 'c_d_e', NA, 'f_g_h'])
result = values.str.partition('_', expand=True)
exp = DataFrame({0: ['a', 'c', np.nan, 'f'],
1: ['_', '_', np.nan, '_'],
2: ['b_c', 'd_e', np.nan, 'g_h']})
tm.assert_frame_equal(result, exp)
result = values.str.rpartition('_', expand=True)
exp = DataFrame({0: ['a_b', 'c_d', np.nan, 'f_g'],
1: ['_', '_', np.nan, '_'],
2: ['c', 'e', np.nan, 'h']})
tm.assert_frame_equal(result, exp)
def test_partition_with_name(self):
# GH 12617
s = Series(['a,b', 'c,d'], name='xxx')
res = s.str.partition(',')
exp = DataFrame({0: ['a', 'c'], 1: [',', ','], 2: ['b', 'd']})
tm.assert_frame_equal(res, exp)
# should preserve name
res = s.str.partition(',', expand=False)
exp = Series([('a', ',', 'b'), ('c', ',', 'd')], name='xxx')
tm.assert_series_equal(res, exp)
idx = Index(['a,b', 'c,d'], name='xxx')
res = idx.str.partition(',')
exp = MultiIndex.from_tuples([('a', ',', 'b'), ('c', ',', 'd')])
assert res.nlevels == 3
tm.assert_index_equal(res, exp)
# should preserve name
res = idx.str.partition(',', expand=False)
exp = Index(np.array([('a', ',', 'b'), ('c', ',', 'd')]), name='xxx')
assert res.nlevels == 1
tm.assert_index_equal(res, exp)
def test_pipe_failures(self):
# #2119
s = Series(['A|B|C'])
result = s.str.split('|')
exp = Series([['A', 'B', 'C']])
tm.assert_series_equal(result, exp)
result = s.str.replace('|', ' ')
exp = Series(['A B C'])
tm.assert_series_equal(result, exp)
def test_slice(self):
values = Series(['aafootwo', 'aabartwo', NA, 'aabazqux'])
result = values.str.slice(2, 5)
exp = Series(['foo', 'bar', NA, 'baz'])
tm.assert_series_equal(result, exp)
for start, stop, step in [(0, 3, -1), (None, None, -1), (3, 10, 2),
(3, 0, -1)]:
try:
result = values.str.slice(start, stop, step)
expected = Series([s[start:stop:step] if not isna(s) else NA
for s in values])
tm.assert_series_equal(result, expected)
except:
print('failed on %s:%s:%s' % (start, stop, step))
raise
# mixed
mixed = Series(['aafootwo', NA, 'aabartwo', True, datetime.today(),
None, 1, 2.])
rs = Series(mixed).str.slice(2, 5)
xp = Series(['foo', NA, 'bar', NA, NA, NA, NA, NA])
assert isinstance(rs, Series)
tm.assert_almost_equal(rs, xp)
rs = Series(mixed).str.slice(2, 5, -1)
xp = Series(['oof', NA, 'rab', NA, NA, NA, NA, NA])
# unicode
values = Series([u('aafootwo'), u('aabartwo'), NA, u('aabazqux')])
result = values.str.slice(2, 5)
exp = Series([u('foo'), u('bar'), NA, u('baz')])
tm.assert_series_equal(result, exp)
result = values.str.slice(0, -1, 2)
exp = Series([u('afow'), u('abrw'), NA, u('abzu')])
tm.assert_series_equal(result, exp)
def test_slice_replace(self):
values = Series(['short', 'a bit longer', 'evenlongerthanthat', '', NA
])
exp = Series(['shrt', 'a it longer', 'evnlongerthanthat', '', NA])
result = values.str.slice_replace(2, 3)
tm.assert_series_equal(result, exp)
exp = Series(['shzrt', 'a zit longer', 'evznlongerthanthat', 'z', NA])
result = values.str.slice_replace(2, 3, 'z')
tm.assert_series_equal(result, exp)
exp = Series(['shzort', 'a zbit longer', 'evzenlongerthanthat', 'z', NA
])
result = values.str.slice_replace(2, 2, 'z')
tm.assert_series_equal(result, exp)
exp = Series(['shzort', 'a zbit longer', 'evzenlongerthanthat', 'z', NA
])
result = values.str.slice_replace(2, 1, 'z')
tm.assert_series_equal(result, exp)
exp = Series(['shorz', 'a bit longez', 'evenlongerthanthaz', 'z', NA])
result = values.str.slice_replace(-1, None, 'z')
tm.assert_series_equal(result, exp)
exp = Series(['zrt', 'zer', 'zat', 'z', NA])
result = values.str.slice_replace(None, -2, 'z')
tm.assert_series_equal(result, exp)
exp = Series(['shortz', 'a bit znger', 'evenlozerthanthat', 'z', NA])
result = values.str.slice_replace(6, 8, 'z')
tm.assert_series_equal(result, exp)
exp = Series(['zrt', 'a zit longer', 'evenlongzerthanthat', 'z', NA])
result = values.str.slice_replace(-10, 3, 'z')
tm.assert_series_equal(result, exp)
def test_strip_lstrip_rstrip(self):
values = Series([' aa ', ' bb \n', NA, 'cc '])
result = values.str.strip()
exp = Series(['aa', 'bb', NA, 'cc'])
tm.assert_series_equal(result, exp)
result = values.str.lstrip()
exp = Series(['aa ', 'bb \n', NA, 'cc '])
tm.assert_series_equal(result, exp)
result = values.str.rstrip()
exp = Series([' aa', ' bb', NA, 'cc'])
tm.assert_series_equal(result, exp)
def test_strip_lstrip_rstrip_mixed(self):
# mixed
mixed = Series([' aa ', NA, ' bb \t\n', True, datetime.today(), None,
1, 2.])
rs = Series(mixed).str.strip()
xp = Series(['aa', NA, 'bb', NA, NA, NA, NA, NA])
assert isinstance(rs, Series)
tm.assert_almost_equal(rs, xp)
rs = Series(mixed).str.lstrip()
xp = Series(['aa ', NA, 'bb \t\n', NA, NA, NA, NA, NA])
assert isinstance(rs, Series)
tm.assert_almost_equal(rs, xp)
rs = Series(mixed).str.rstrip()
xp = Series([' aa', NA, ' bb', NA, NA, NA, NA, NA])
assert isinstance(rs, Series)
tm.assert_almost_equal(rs, xp)
def test_strip_lstrip_rstrip_unicode(self):
# unicode
values = Series([u(' aa '), u(' bb \n'), NA, u('cc ')])
result = values.str.strip()
exp = Series([u('aa'), u('bb'), NA, u('cc')])
tm.assert_series_equal(result, exp)
result = values.str.lstrip()
exp = Series([u('aa '), u('bb \n'), NA, u('cc ')])
tm.assert_series_equal(result, exp)
result = values.str.rstrip()
exp = Series([u(' aa'), u(' bb'), NA, u('cc')])
tm.assert_series_equal(result, exp)
def test_strip_lstrip_rstrip_args(self):
values = Series(['xxABCxx', 'xx BNSD', 'LDFJH xx'])
rs = values.str.strip('x')
xp = Series(['ABC', ' BNSD', 'LDFJH '])
assert_series_equal(rs, xp)
rs = values.str.lstrip('x')
xp = Series(['ABCxx', ' BNSD', 'LDFJH xx'])
assert_series_equal(rs, xp)
rs = values.str.rstrip('x')
xp = Series(['xxABC', 'xx BNSD', 'LDFJH '])
assert_series_equal(rs, xp)
def test_strip_lstrip_rstrip_args_unicode(self):
values = Series([u('xxABCxx'), u('xx BNSD'), u('LDFJH xx')])
rs = values.str.strip(u('x'))
xp = Series(['ABC', ' BNSD', 'LDFJH '])
assert_series_equal(rs, xp)
rs = values.str.lstrip(u('x'))
xp = Series(['ABCxx', ' BNSD', 'LDFJH xx'])
assert_series_equal(rs, xp)
rs = values.str.rstrip(u('x'))
xp = Series(['xxABC', 'xx BNSD', 'LDFJH '])
assert_series_equal(rs, xp)
def test_wrap(self):
# test values are: two words less than width, two words equal to width,
# two words greater than width, one word less than width, one word
# equal to width, one word greater than width, multiple tokens with
# trailing whitespace equal to width
values = Series([u('hello world'), u('hello world!'), u(
'hello world!!'), u('abcdefabcde'), u('abcdefabcdef'), u(
'abcdefabcdefa'), u('ab ab ab ab '), u('ab ab ab ab a'), u(
'\t')])
# expected values
xp = Series([u('hello world'), u('hello world!'), u('hello\nworld!!'),
u('abcdefabcde'), u('abcdefabcdef'), u('abcdefabcdef\na'),
u('ab ab ab ab'), u('ab ab ab ab\na'), u('')])
rs = values.str.wrap(12, break_long_words=True)
assert_series_equal(rs, xp)
# test with pre and post whitespace (non-unicode), NaN, and non-ascii
# Unicode
values = Series([' pre ', np.nan, u('\xac\u20ac\U00008000 abadcafe')
])
xp = Series([' pre', NA, u('\xac\u20ac\U00008000 ab\nadcafe')])
rs = values.str.wrap(6)
assert_series_equal(rs, xp)
def test_get(self):
values = Series(['a_b_c', 'c_d_e', np.nan, 'f_g_h'])
result = values.str.split('_').str.get(1)
expected = Series(['b', 'd', np.nan, 'g'])
tm.assert_series_equal(result, expected)
# mixed
mixed = Series(['a_b_c', NA, 'c_d_e', True, datetime.today(), None, 1,
2.])
rs = Series(mixed).str.split('_').str.get(1)
xp = | Series(['b', NA, 'd', NA, NA, NA, NA, NA]) | pandas.Series |
from typing import List, Text, Dict
from dataclasses import dataclass
import ssl
import urllib.request
from io import BytesIO
from zipfile import ZipFile
from urllib.parse import urljoin
from logging import exception
import os
from re import findall
from datetime import datetime, timedelta
import lxml.html as LH
from selenium.webdriver.common.by import By
from selenium.webdriver.support import expected_conditions as EC
import pandas as pd
import time
from selenium.webdriver.support.ui import WebDriverWait
import warnings
import string
import re
from bs4 import BeautifulSoup
import requests
import glob
import time
import os
from fake_useragent import UserAgent
import brFinance.utils as utils
import pickle
ssl._create_default_https_context = ssl._create_unverified_context
warnings.simplefilter(action='ignore', category=FutureWarning)
@dataclass
class SearchENET:
"""
Perform webscraping on the page https://www.rad.cvm.gov.br/ENET/frmConsultaExternaCVM.aspx according to the input parameters
"""
def __init__(self, cod_cvm: int = None, category: int = None, driver: utils.webdriver = None):
self.driver = driver
# self.cod_cvm_dataframe = self.cod_cvm_list()
self.cod_cvm = cod_cvm
if cod_cvm is not None:
self.check_cod_cvm_exist(self.cod_cvm)
self.category = category
if category is not None:
self.check_category_exist(self.category)
def cod_cvm_list(self) -> pd.DataFrame:
"""
Returns a dataframe of all CVM codes and Company names availble at https://www.rad.cvm.gov.br/ENET/frmConsultaExternaCVM.aspx
"""
if self.driver is None:
driver = utils.Browser.run_chromedriver()
else:
driver=self.driver
driver.get(f"https://www.rad.cvm.gov.br/ENET/frmConsultaExternaCVM.aspx")
#wait_pageload()
for retrie in range(50):
try:
html = str(driver.find_element_by_id('hdnEmpresas').get_attribute("value"))
listCodCVM = re.findall("(?<=\_)(.*?)(?=\')", html)
listNomeEmp = re.findall("(?<=\-)(.*?)(?=\')", html)
codigos_cvm = pd.DataFrame(list(zip(listCodCVM, listNomeEmp)),
columns=['codCVM', 'nome_empresa'])
codigos_cvm['codCVM'] = pd.to_numeric(codigos_cvm['codCVM'])
if len(codigos_cvm.index) > 0:
break
else:
time.sleep(1)
except:
time.sleep(1)
if self.driver is None:
driver.quit()
return codigos_cvm
def check_cod_cvm_exist(self, cod_cvm) -> bool:
codigos_cvm_available = self.cod_cvm_list()
cod_cvm_exists = str(cod_cvm) in [str(cod_cvm_aux) for cod_cvm_aux in codigos_cvm_available['codCVM'].values]
if cod_cvm_exists:
return True
else:
raise ValueError('Código CVM informado não encontrado.')
def check_category_exist(self, category) -> bool:
search_categories_list = [21, 39]
if category in search_categories_list:
return True
else:
raise ValueError('Invalid category value. Available categories are:', search_categories_list)
@property
def search(self) -> pd.DataFrame:
"""
Returns dataframe of search results including cod_cvm, report's url, etc.
"""
dataInicial = '01012010'
dataFinal = datetime.today().strftime('%d%m%Y')
option_text = str(self.category)
if self.driver is None:
driver = utils.Browser.run_chromedriver()
else:
driver=self.driver
driver.get(f"https://www.rad.cvm.gov.br/ENET/frmConsultaExternaCVM.aspx?codigoCVM={str(self.cod_cvm)}")
# Wait and click cboCategorias_chosen
for errors in range(10):
try:
driver.find_element_by_id('cboCategorias_chosen').click()
break
except:
time.sleep(1)
# Wait and click
for errors in range(10):
try:
driver.find_element_by_xpath(
f"//html/body/form[1]/div[3]/div/fieldset/div[5]/div[1]/div/div/ul/li[@data-option-array-index='{option_text}']").click()
break
except:
time.sleep(1)
# Wait and click
for errors in range(10):
try:
driver.find_element_by_xpath("//html/body/form[1]/div[3]/div/fieldset/div[4]/div[1]/label[4]").click()
break
except:
time.sleep(1)
# Wait and send keys txtDataIni
for errors in range(10):
try:
driver.find_element_by_id('txtDataIni').send_keys(dataInicial)
break
except:
time.sleep(1)
# Wait and send keys txtDataFim
for errors in range(10):
try:
driver.find_element_by_id('txtDataFim').send_keys(dataFinal)
break
except:
time.sleep(1)
# Wait and click btnConsulta
for errors in range(10):
try:
driver.find_element_by_id('btnConsulta').click()
break
except:
time.sleep(1)
# Wait html table load the results (grdDocumentos)
for errors in range(10):
try:
table_html = pd.read_html(str(driver.find_element_by_id('grdDocumentos').get_attribute("outerHTML")))[-1]
if len(table_html.index) > 0:
break
else:
time.sleep(1)
except:
time.sleep(1)
table_html = str(driver.find_element_by_id('grdDocumentos').get_attribute("outerHTML"))
table = LH.fromstring(table_html)
results = pd.read_html(table_html)
for df_result in results:
if len(df_result.index) > 0:
pattern = "OpenPopUpVer(\'(.*?)\')"
df_result['linkView'] = table.xpath('//tr/td/i[1]/@onclick')
df_result['linkDownload'] = table.xpath('//tr/td/i[2]/@onclick')
df_result['linkView'] = "https://www.rad.cvm.gov.br/ENET/" + \
df_result['linkView'].str.extract(r"(?<=\')(.*?)(?=\')", expand=False)
df3 = df_result['linkDownload'].str.split(',', expand=True)
df3.columns = ['COD{}'.format(x+1) for x in df3.columns]
df_result = df_result.join(df3)
df_result['linkDownload'] = "https://www.rad.cvm.gov.br/ENET/frmDownloadDocumento.aspx?Tela=ext&numSequencia=" + \
df_result['COD1'].str.extract(r"(?<=\')(.*?)(?=\')", expand=False) + \
"&numVersao=" + df_result['COD2'].str.extract(r"(?<=\')(.*?)(?=\')", expand=False) + \
"&numProtocolo=" + df_result['COD3'].str.extract(r"(?<=\')(.*?)(?=\')", expand=False) + \
"&descTipo=" + df_result['COD4'].str.extract(r"(?<=\')(.*?)(?=\')", expand=False) + \
"&CodigoInstituicao=1"
df_result = df_result[['Código CVM', 'Empresa', 'Categoria', 'Tipo', 'Espécie',
'Data Referência', 'Data Entrega', 'Status', 'V', 'Modalidade',
'linkView', 'linkDownload']]
df_result['Data Referência'] = df_result['Data Referência'].str.split(
' ', 1).str[1]
df_result['Data Referência'] = pd.to_datetime(
df_result["Data Referência"], format="%d/%m/%Y", errors="coerce")
df_result = df_result[df_result["Status"] == "Ativo"]
df_result["Código CVM"] = self.cod_cvm
df_result = df_result[['Código CVM', 'Empresa', 'Categoria', 'Tipo', 'Espécie',
'Data Referência', 'Data Entrega', 'Status', 'V', 'Modalidade',
'linkView', 'linkDownload']]
df_result = df_result.reset_index(drop=True)
break
if self.driver is None:
driver.quit()
print(f"Resultados da busca ENET: {len(df_result.index)}")
return df_result
@dataclass
class FinancialReport:
def __init__(self, link: str, driver: utils.webdriver = None):
self.link = link
self.driver = driver
@property
def financial_reports(self) -> Dict:
"""
Returns a dictionary with financial reports available in a page such as
Reports currently available:
-
"""
link = self.link
if self.driver is None:
driver = utils.Browser.run_chromedriver()
else:
driver=self.driver
erros = 0
max_retries = 10
dictDemonstrativos = None
while erros < max_retries:
try:
print("Coletando dados do link:", link)
driver.get(link)
# Wait page load the reports
for retrie in range(max_retries):
# Quando o captcha é que quebrado, options_text trás as opções de demonstrativos
options_text = [x.get_attribute("text") for x in driver.find_element_by_name(
"cmbQuadro").find_elements_by_tag_name("option")]
if len(options_text) > 0:
break
else:
time.sleep(1)
# Navega nos demonstrativos e salva o dataframe no dicionario
refDate = driver.find_element_by_id('lblDataDocumento').text
versaoDoc = driver.find_element_by_id(
'lblDescricaoCategoria').text.split(" - ")[-1].replace("V", "")
report = {"ref_date": refDate,
"versao": int(versaoDoc),
"cod_cvm": int(driver.find_element_by_id('hdnCodigoCvm').get_attribute("value"))
}
dictDemonstrativos = {}
for demonstrativo in options_text:
print(demonstrativo)
driver.find_element_by_xpath("//select[@name='cmbQuadro']/option[text()='{option_text}']".format(option_text=demonstrativo)).click()
iframe = driver.find_element_by_xpath(
"//iframe[@id='iFrameFormulariosFilho']")
driver.switch_to.frame(iframe)
html = driver.page_source
if demonstrativo == "Demonstração do Fluxo de Caixa":
index_moeda = -2
else:
index_moeda = -1
moedaUnidade = driver.find_element_by_id(
'TituloTabelaSemBorda').text.split(" - ")[index_moeda].replace("(", "").replace(")", "")
if demonstrativo == "Demonstração das Mutações do Patrimônio Líquido":
df = pd.read_html(html, header=0, decimal=',')[1]
converters = {c: lambda x: str(x) for c in df.columns}
df = pd.read_html(html, header=0, decimal=',',
converters=converters)[1]
else:
df = pd.read_html(html, header=0, decimal=',')[0]
converters = {c: lambda x: str(x) for c in df.columns}
df = pd.read_html(html, header=0, decimal=',',
converters=converters)[0]
for ind, column in enumerate(df.columns):
if column.strip() != "Conta" and column.strip() != "Descrição":
df[column] = df[column].astype(
str).str.strip().str.replace(".", "")
df[column] = | pd.to_numeric(df[column], errors='coerce') | pandas.to_numeric |
"""This module is meant to contain the Solscan class"""
from messari.dataloader import DataLoader
from messari.utils import validate_input
from string import Template
from typing import Union, List, Dict
from .helpers import unpack_dataframe_of_dicts
import pandas as pd
#### Block
BLOCK_LAST_URL = 'https://public-api.solscan.io/block/last'
BLOCK_TRANSACTIONS_URL = 'https://public-api.solscan.io/block/transactions'
BLOCK_BLOCK_URL = Template('https://public-api.solscan.io/block/$block')
#### Transaction
TRANSACTION_LAST_URL = 'https://public-api.solscan.io/transaction/last'
TRANSACTION_SIGNATURE_URL = Template('https://public-api.solscan.io/transaction/$signature')
#### Account
ACCOUNT_TOKENS_URL = 'https://public-api.solscan.io/account/tokens'
ACCOUNT_TRANSACTIONS_URL = 'https://public-api.solscan.io/account/transactions'
ACCOUNT_STAKE_URL = 'https://public-api.solscan.io/account/stakeAccounts'
ACCOUNT_SPL_TXNS_URL = 'https://public-api.solscan.io/account/splTransfers'
ACCOUNT_SOL_TXNS_URL = 'https://public-api.solscan.io/account/solTransfers'
ACCOUNT_EXPORT_TXNS_URL = 'https://public-api.solscan.io/account/exportTransactions'
ACCOUNT_ACCOUNT_URL = Template('https://public-api.solscan.io/account/$account')
#### Token
TOKEN_HOLDERS_URL = 'https://public-api.solscan.io/token/holders'
TOKEN_META_URL = 'https://public-api.solscan.io/token/meta'
TOKEN_LIST_URL = 'https://public-api.solscan.io/token/list'
#### Market
MARKET_INFO_URL = Template('https://public-api.solscan.io/market/token/$tokenAddress')
#### Chain Information
CHAIN_INFO_URL = 'https://public-api.solscan.io/chaininfo'
#TODO max this clean/ not hardcoded? look into how this works
HEADERS={'accept': 'application/json', 'user-agent': 'Mozilla/5.0 (Macintosh; Intel Mac OS X 10_15_7) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/96.0.4664.45 Safari/537.36'} # pylint: disable=line-too-long
class Solscan(DataLoader):
"""This class is a wrapper around the Solscan API
"""
def __init__(self):
DataLoader.__init__(self, api_dict=None, taxonomy_dict=None)
#################
# Block endpoints
def get_last_blocks(self, num_blocks=1) -> pd.DataFrame:
"""returns info for last blocks (default is 1, limit is 20)
Parameters
----------
num_blocks: int (default is 1)
number of blocks to return, max is 20
Returns
-------
DataFrame
DataFrame with block information
"""
# Max value is 20 or API bricks
limit=num_blocks if num_blocks < 21 else 20
params = {'limit': limit}
last_blocks = self.get_response(BLOCK_LAST_URL,
params=params,
headers=HEADERS)
last_blocks_df = pd.DataFrame(last_blocks)
last_blocks_df.set_index('currentSlot', inplace=True)
last_blocks_df = unpack_dataframe_of_dicts(last_blocks_df)
# TODO, extract data from 'result'
return last_blocks_df
def get_block_last_transactions(self, blocks_in: Union[str, List],
offset=0, num_transactions=10) -> pd.DataFrame:
"""get last num_transactions of given block numbers
Parameters
----------
blocks_in: str, List
single block in or list of blocks in
num_transactions: int (default is 10)
number of transactions to return
Returns
-------
DataFrame
dataframe with transaction details
"""
blocks = validate_input(blocks_in)
df_list = []
for block in blocks:
params = {'block': block,
'offset': offset,
'limit': num_transactions}
txns = self.get_response(BLOCK_TRANSACTIONS_URL,
params=params,
headers=HEADERS)
txns_df = pd.DataFrame(txns)
df_list.append(txns_df)
fin_df = pd.concat(df_list, keys=blocks, axis=1)
fin_df = unpack_dataframe_of_dicts(fin_df)
return fin_df
def get_block(self, blocks_in: Union[str, List]) -> pd.DataFrame:
"""Return information of given block(s)
Parameters
----------
blocks_in: str, List
single block in or list of blocks in
Returns
-------
DataFrame
DataFrame with block information
"""
blocks = validate_input(blocks_in)
df_list = []
for block in blocks:
endpoint_url = BLOCK_BLOCK_URL.substitute(block=block)
response = self.get_response(endpoint_url,
headers=HEADERS)
df = pd.DataFrame(response)
df.drop('currentSlot', axis=1)
df_list.append(df)
fin_df = pd.concat(df_list, keys=blocks, axis=1)
fin_df = fin_df.xs('result', axis=1, level=1)
return fin_df
#######################
# Transaction endpoints
def get_last_transactions(self, num_transactions=10) -> pd.DataFrame:
"""Return last num_transactions transactions
Parameters
----------
num_transactions: int (default is 10)
number of transactions to return, limit is 20
Returns
-------
DataFrame
dataframe with transaction details
"""
# 20
limit=num_transactions if num_transactions < 21 else 20
params = {'limit': limit}
response = self.get_response(TRANSACTION_LAST_URL,
params=params,
headers=HEADERS)
df = pd.DataFrame(response)
fin_df = unpack_dataframe_of_dicts(df)
return fin_df
def get_transaction(self, signatures_in: Union[str, List]) -> pd.DataFrame:
"""Return information of given transaction signature(s)
Parameters
----------
signatures_in: str, List
single signature in or list of signatures in
Returns
-------
DataFrame
DataFrame with transaction details
"""
signatures = validate_input(signatures_in)
series_list = []
for signature in signatures:
endpoint_url = TRANSACTION_SIGNATURE_URL.substitute(signature=signature)
response = self.get_response(endpoint_url,
headers=HEADERS)
#print(response)
series = pd.Series(response)
series_list.append(series)
fin_df = pd.concat(series_list, keys=signatures, axis=1)
return fin_df
###################
# Account endpoints
def get_account_tokens(self, accounts_in: Union[str, List]) -> pd.DataFrame:
"""Return token balances of the given account(s)
Parameters
----------
accounts_in: str, List
single account in or list of accounts in
Returns
-------
DataFrame
DataFrame with token balances of given accounts
"""
accounts = validate_input(accounts_in)
df_list=[]
for account in accounts:
params={'account':account}
response = self.get_response(ACCOUNT_TOKENS_URL,
params=params,
headers=HEADERS)
df = pd.DataFrame(response)
df_list.append(df)
fin_df = pd.concat(df_list, keys=accounts, axis=1)
return fin_df
def get_account_transactions(self, accounts_in: Union[str,List]) -> pd.DataFrame:
"""Return DataFrame of transactions of the given account(s)
Parameters
----------
accounts_in: str, List
single account in or list of accounts in
Returns
-------
DataFrame
DataFrame with transactions of given accounts
"""
accounts = validate_input(accounts_in)
df_list=[]
for account in accounts:
params={'account':account}
response = self.get_response(ACCOUNT_TRANSACTIONS_URL,
params=params,
headers=HEADERS)
df = pd.DataFrame(response)
df_list.append(df)
fin_df = pd.concat(df_list, keys=accounts, axis=1)
return fin_df
def get_account_stake(self, accounts_in: Union[str, List]) -> pd.DataFrame:
"""Get staking accounts of the given account(s)
Parameters
----------
accounts_in: str, List
single account in or list of accounts in
Returns
-------
DataFrame
DataFrame with staking accounts of given accounts
"""
accounts = validate_input(accounts_in)
df_list=[]
for account in accounts:
params={'account':account}
response = self.get_response(ACCOUNT_STAKE_URL,
params=params,
headers=HEADERS)
df = pd.DataFrame(response)
df_list.append(df)
fin_df = pd.concat(df_list, keys=accounts, axis=1)
return fin_df
def get_account_spl_transactions(self, accounts_in: Union[str, List],
from_time: int=None,
to_time: int=None,
offset: int=0,
limit: int=10) -> pd.DataFrame:
"""Return SPL transfers for given account(s)
Parameters
----------
accounts_in: str, List
single account in or list of accounts in
from_time: int
unix time to start transaction history
to_time: int
unix time to end transaction history
offset: int
Offset starting at 0. Increment value to offset paginated results
limit: int
Limit of assets to return. Default is 10
Returns
-------
DataFrame
DataFrame with SPL transfers for given account(s)
"""
accounts = validate_input(accounts_in)
df_list=[]
for account in accounts:
params={'account':account,
'toTime': to_time,
'fromTime': from_time,
'offset': offset,
'limit': limit}
response = self.get_response(ACCOUNT_SPL_TXNS_URL,
params=params,
headers=HEADERS)
df = pd.DataFrame(response)
df.drop('total', axis=1)
df_list.append(df)
fin_df = pd.concat(df_list, keys=accounts, axis=1)
fin_df = unpack_dataframe_of_dicts(fin_df)
return fin_df
def get_account_sol_transactions(self, accounts_in: Union[str, List],
from_time: int=None,
to_time: int=None,
offset: int=0,
limit: int=10) -> pd.DataFrame:
"""Return SOL transfers for given account(s)
Parameters
----------
accounts_in: str, List
single account in or list of accounts in
from_time: int
unix time to start transaction history
to_time: int
unix time to end transaction history
offset: int
Offset starting at 0. Increment value to offset paginated results
limit: int
Limit of assets to return. Default is 10
Returns
-------
DataFrame
DataFrame with SOL transfers for given account(s)
"""
accounts = validate_input(accounts_in)
df_list=[]
for account in accounts:
params={'account':account,
'toTime': to_time,
'fromTime': from_time,
'offset': offset,
'limit': limit}
response = self.get_response(ACCOUNT_SOL_TXNS_URL,
params=params,
headers=HEADERS)
df = pd.DataFrame(response)
df_list.append(df)
fin_df = pd.concat(df_list, keys=accounts, axis=1)
fin_df = unpack_dataframe_of_dicts(fin_df)
return fin_df
def get_account_export_transactions(self, accounts_in: Union[str, List],
type_in: str, from_time: int, to_time: int) -> List[str]:
"""Export transactions to CSV style string
Parameters
----------
accounts_in: str, List
single account in or list of accounts in
type_in: str
what type of transactions to export:
- tokenchange
- soltransfer
- all
from_time: int
unix time to start transaction history
to_time: int
unix time to end transaction history
Returns
-------
List[str]
list of strings to make csv document
"""
accounts = validate_input(accounts_in)
csv_list=[]
for account in accounts:
params={'account': account,
'type': type_in,
'fromTime': from_time,
'toTime': to_time}
# NOTE: need to do this to not return json
response = self.session.get(ACCOUNT_EXPORT_TXNS_URL, params=params, headers=HEADERS)
csv = response.content.decode('utf-8')
csv_list.append(csv)
return csv_list
def get_account(self, accounts_in: Union[str, List]) -> pd.DataFrame:
"""Return overall account(s) information, including program account,
NFT metadata information
Parameters
----------
accounts_in: str, List
single account in or list of accounts in
Returns
-------
DataFrame
DataFrame with account info
"""
accounts = validate_input(accounts_in)
series_list = []
for account in accounts:
endpoint_url = ACCOUNT_ACCOUNT_URL.substitute(account=account)
response = self.get_response(endpoint_url,
headers=HEADERS)
series = pd.Series(response)
series_list.append(series)
fin_df = pd.concat(series_list, keys=accounts, axis=1)
return fin_df
#################
# Token endpoints
def get_token_holders(self, tokens_in: Union[str, List],
limit: int=10, offset: int=0) -> pd.DataFrame:
"""Return top token holders for given token(s)
Parameters
----------
tokens_in: str, List
single token address in or list of token addresses, used to filter results
offset: int
Offset starting at 0. Increment value to offset paginated results
limit: int
Limit of assets to return. Default is 10
Returns
-------
DataFrame
DataFrame with top token holders
"""
tokens = validate_input(tokens_in)
df_list = []
for token in tokens:
params={'tokenAddress': token,
'limit': limit,
'offset': offset}
response = self.get_response(TOKEN_HOLDERS_URL,
params=params,
headers=HEADERS)
df = pd.DataFrame(response)
df.drop('total', axis=1)
df_list.append(df)
fin_df = pd.concat(df_list, keys=tokens, axis=1)
fin_df = unpack_dataframe_of_dicts(fin_df)
return fin_df
def get_token_meta(self, tokens_in: Union[str, List]) -> pd.DataFrame:
"""Return metadata of given token(s)
Parameters
----------
tokens_in: str, List
single token address in or list of token addresses, used to filter results
Returns
-------
DataFrame
DataFrame with token metadata
"""
tokens = validate_input(tokens_in)
series_list = []
for token in tokens:
params={'tokenAddress': token}
response = self.get_response(TOKEN_META_URL,
params=params,
headers=HEADERS)
series = pd.Series(response)
series_list.append(series)
fin_df = pd.concat(series_list, keys=tokens, axis=1)
return fin_df
def get_token_list(self, sort_by: str='market_cap', ascending: bool=True,
limit: int=10, offset: int=0) -> pd.DataFrame:
"""Returns DataFrame of tokens
Parameters
----------
sort_by: str (default 'market_cap')
how to sort results, options are:
- market_cap
- volume
- holder
- price
- price_change_24h
- price_change_7d
- price_change_14d
- price_change_30d
- price_change_60d
- price_change_200d
- price_change_1y
offset: int
Offset starting at 0. Increment value to offset paginated results
limit: int
Limit of assets to return. Default is 10
ascending: bool
return results ascending or descending (default True)
Returns
-------
DataFrame
DataFrame with tokens
"""
direction = 'asc' if ascending else 'desc'
params={'sortBy': sort_by,
'direction': direction,
'limit': limit,
'offset': offset}
response = self.get_response(TOKEN_LIST_URL,
params=params,
headers=HEADERS)
token_list_df = pd.DataFrame(response['data'])
return token_list_df
##################
# Market endpoints
def get_market_info(self, tokens_in: Union[str, List]) -> pd.DataFrame:
"""Get market information of the given token
Parameters
----------
tokens_in: str, List
single token address in or list of token addresses
Returns
-------
DataFrame
DataFrame containing market info for token(s)
"""
tokens = validate_input(tokens_in)
market_info_list = []
for token in tokens:
endpoint_url = MARKET_INFO_URL.substitute(tokenAddress=token)
market_info = self.get_response(endpoint_url,
headers=HEADERS)
market_info_series = | pd.Series(market_info) | pandas.Series |
import logging
import os
import re
import pandas as pd
import numpy as np
from pandas import DataFrame
from tqdm import tqdm
from joblib import dump, load
from nltk.tokenize import word_tokenize
from nltk import pos_tag
from ..MyMertrics import *
from pprint import pformat
from util.file_manager import remake_dir
def feature_filter(features_list):
res = []
for feature in features_list:
if len(feature) < 2:
res.append(feature)
return res
def merge_similar_feature(data, features):
column = data[features].sum(axis=1)
df = pd.DataFrame(data=column, columns=[features[0]])
data = data.drop(columns=features)
data.insert(2, features[0], df)
return data
def merge(f_path):
"""
:param f_path: file path
:return: results path
"""
if not os.path.exists("myData"):
remake_dir("myData")
logging.info("[*] Merging %s " % f_path)
data = pd.read_csv(f_path)
# features = feature_filter(data.columns[2:-1])
# data = data.drop(columns=features)
data_features = data.columns[2:-1]
features = [[], [], [], []]
for feature in data_features:
if 'aha' in feature:
features[0].append(feature)
if 'lmao' in feature:
features[1].append(feature)
if 'lmf' in feature or "fao" in feature:
features[2].append(feature)
if 'jus' in feature:
features[3].append(feature)
features.append(["huh", "hun"])
features.append(["taco", "tacos"])
features.append(["icheated", "icheatedbecause"])
features.append(["lt", "ltlt", "ltreply"])
features.append(["mad", "madd"])
features.append(["b", "be"])
features.append(["n", "and"])
features.append(["u", "you"])
features.append(["flex", "flexin"])
features.append(["dam", "damn", 'da'])
features.append(["kno", "know", 'knw'])
features.append(["dat", "dats"])
features.append(["gon", "gone"])
features.append(["iono", "ion"])
features.append(["factaboutme", "factsaboutme"])
features.append(["bt", "btwn"])
features.append(["loll", "lolss", "lolsz"])
features.append(["cali", "california"])
for f in features:
data = merge_similar_feature(data, f)
data['class'] = data['class'].map(MAP)
_columns = data.columns
users = set(data['user-id'])
new_df = DataFrame(columns=data.columns[2:])
id_map = []
for user_id in tqdm(users, unit=" users"):
tmp_line = data.loc[data['user-id'] == user_id]
id_map.append(tmp_line.index)
# line = tmp_line.iloc[:, 2:-1].sum(axis=0)/len(tmp_line.index)
line = tmp_line.iloc[:, 2:-1].sum(axis=0)
line['class'] = data.loc[data['user-id'] == user_id]['class'].iloc[0]
line_df = DataFrame(data=line, columns=[user_id]).T
new_df = new_df.append(line_df)
filename = os.path.basename(f_path)
# features selector
if "train" in filename:
from sklearn.feature_selection import RFE, mutual_info_classif, SelectFpr
from sklearn.naive_bayes import MultinomialNB
# nb = ComplementNB(alpha=1.0e-10)
nb = MultinomialNB(alpha=1.0e-10)
selector = RFE(estimator=nb, n_features_to_select=300, step=0.02)
# selector.fit(new_df.iloc[:, :-1], new_df["class"].to_list())
# selector = SelectFpr(mutual_info_classif, alpha=1e-3)
selector.fit_transform(new_df.iloc[:, :-1], new_df["class"].to_list())
features_map = selector.get_support(indices=True)
# mi = mutual_info_classif(new_df.iloc[:, :-1], new_df["class"].to_list())
# features_map = np.argsort(mi)[-300:]
dump(features_map, "myData/features.map")
else:
features_map = load("myData/features.map")
features_map = np.append(features_map, [new_df.shape[1] - 1])
new_df = new_df.iloc[:, features_map]
# add_one = new_df.iloc[:, :-1] + 0.001
# new_df.update(add_one)
f_path = os.path.join("myData", "merged_" + filename)
new_df.to_csv(f_path)
map_path = os.path.join("myData", "merged_" + filename + ".map")
dump(id_map, map_path)
logging.info("[*] Saved %s; %s \n" % (f_path, map_path))
return f_path
def word_type(data_path):
"""
:param data_path: data path
:return: None
"""
logging.info("[*] Counting word type %s" % data_path)
data = pd.read_csv(data_path, encoding="ISO-8859-1")
p_t = re.compile(r'@\w+|RT|[^\w ]')
_columns = ["prep", "pp", "topic", "adj_adv", "verb", "at_user"]
# _columns = ["prep", "pp", "topic", "adj_adv", "verb", "at_user"]
features = []
for idx, row in tqdm(data.iloc[:, 2:].iterrows(), unit=' tweets',
total=data.shape[0]):
prep, pp, topic, adj_adv, verb, at_user = 0, 0, 0, 0, 0, 0
# topic = int(row['tweet'].count('#') > 0)
# at_user = int(row['tweet'].count('@USER') > 0)
topic = int(row['tweet'].count('#') > 0)
at_user = int(row['tweet'].count('@USER') > 0)
text = re.sub(p_t, '', row['tweet']).replace('_', ' ').lower()
tokens = word_tokenize(text)
tags = pos_tag(tokens)
for idx_, token in enumerate(tags):
if token[1] in {'IN', 'TO'}:
prep += 1
if 'NN' in token[1]:
pp += 1
if 'JJ' in token[1]:
adj_adv += 1
if 'VB' in token[1]:
verb += 1
line = [prep, pp, topic, adj_adv, verb, at_user]
# line = [prep, topic, adj_adv, verb, at_user]
features.append(line)
df_features = pd.DataFrame(data=features, columns=_columns)
filename = os.path.basename(data_path)[:-4]
wt_path = "myData/my_features_{}.csv".format(filename)
df_features.to_csv(wt_path)
logging.info("[*] Saved word type %s \n" % wt_path)
def result_combination(is_train=True, evaluate_set_path=None):
"""
:param is_train: is train default True
:param evaluate_set_path: if is train, should give evaluate_set_path
:return: None
"""
if is_train:
sub_dir = "train"
else:
sub_dir = "predict"
results = []
for (t, t, filenames) in os.walk("results/" + sub_dir):
for filename in filenames:
res = pd.read_csv("results/" + sub_dir + "/" + filename)["class"]
results.append( | pd.DataFrame(res) | pandas.DataFrame |
"""
Copyright (c) 2020, <NAME> <NAME>
All rights reserved.
This is an information tool to retrieve official business financials (income statements, balance sheets, and cashflow statements) for a sepcified range of times. The code aims to be as vallina as possible by minimizing the depndencies and packages ued to construct functions. This code can be used immediately off the shelf and assumes no more than the following packages to be installed. As a reminder, please ensure that your directory has enough space, ideally at least 100 MB for newly serialized reports to reside on the disk until you decide to clear them.
"""
import libraries
import re
import requests
from bs4 import BeautifulSoup
import pandas as pd
import numpy as np
from datetime import datetime
from selenium import webdriver
import os
import pickle
class Business:
# Define a default constructor for the Business object
def __init__(self, foreign, symbol, report_type, start_period, end_period ):
self.foreign=foreign
self.symbol=symbol
self.report_type=report_type
self.start_period=start_period
self.end_period=end_period
#-------------Retrieving Annual/Quarter Reports----------
# Define a function to store the url(s) to a company's annual or quarter report(s)
def ghost_report_url(self):
############## Check validity of inputs #############
## Error Message if the foreign argument is not logical
if (type(self.foreign)!=bool):
raise TypeError("Invalid foreign type: foreign argument must be logical- True or False")
## Error message if the inputted ticker symbol is not a string
if(type(self.symbol)!=str):
raise TypeError("Invalid ticker symbol type: symbol argument must be a string")
## Error message if the inputted report type is neither 'annual' or 'quarter'
if(self.report_type!='annual' and self.report_type!='quarter'):
raise TypeError("Invalid report type: only 'annual' or 'quarter' report type is allowed")
## Error message if the specified start period or(and) end period is(are) not valid
if ((len(str(self.start_period)))| (len(str(self.end_period)))!=8):
raise ValueError("Invalid start period or(and) end period(s): start_period and end_period arguments must be in the form yyyymmdd")
## Error message to warn that foreign quarterly reports are not available on the SEC Edgar database
if(self.foreign==True and self.report_type=='quarter'):
raise ValueError("Foreign quarterly report(s) not available: try 'annual' report instead")
# Convert start_period and end_period inputs to a datetime object
start_period=datetime.strptime(str(self.start_period),"%Y%m%d").date()
end_period=datetime.strptime(str(self.end_period),"%Y%m%d").date()
################# Retrieving Annual Report(s) (10-K or 20-F) ################
if(self.report_type=='annual'):
# Get the url to the company's historic 10-K (including 10-K/A) or 20-F (including 20-F/A) filings(s)
historical_filings_url=r"http://www.sec.gov/cgi-bin/browse-edgar?action=getcompany&CIK="+self.symbol+"&type=10-k&dateb=&owner=exclude&count=100" if self.foreign==False else r"http://www.sec.gov/cgi-bin/browse-edgar?action=getcompany&CIK="+self.symbol+"&type=20-f&dateb=&owner=exclude&count=100"
# Get table containing descriptions of the company's 10-K(include 10-K/A and others) or 20-F(include 20F/A and others) filings(s)
filings_description_table=pd.read_html(str(BeautifulSoup(requests.get(historical_filings_url).content,"html.parser").find("table",{"class":"tableFile2"})))[0]
## Stop and return an error message if the company has no filing type of 10-K or 20-F, given the company symbol and foreign logic
if len(filings_description_table[(filings_description_table["Filings"]=="10-K")|(filings_description_table["Filings"]=="20-F")])==0:
raise NameError("Invalid company symbol or(and) foreign logical")
# Get the company's CIK (Central Index Key) number
cik_number=re.search(r"(\d{10})",BeautifulSoup(requests.get(historical_filings_url).content,"html.parser").find("span",{"class":"companyName"}).text)[0]
# Get a list of accession numbers of the historic 10-K or 20-F filing(s). raw_accesion_numbers because accession numbers seperated by dashes
raw_accession_numbers=filings_description_table[(filings_description_table["Filings"]=="10-K")| (filings_description_table["Filings"]=="20-F")].Description.str.extract(r"(\d{10}\-\d{2}\-\d{6})",expand=False)
# Get a list of url(s) to a company's historic 10-K or 20-F report(s) details
filing_details_url=r"https://www.sec.gov/Archives/edgar/data/"+cik_number+r"/"+raw_accession_numbers+r"-index.html"
filing_details_url=filing_details_url.to_list()
# Get a list of url(s) to a company's 10-K or 20-F report(s) documentations
document_details_url=r"https://www.sec.gov/cgi-bin/viewer?action=view&cik="+cik_number+"&accession_number="+raw_accession_numbers+"&xbrl_type=v"
document_details_url=document_details_url.to_list()
# Get report period(s), that is the 10-K or 20-F report(s) as of this(these) date(s)
report_periods=[datetime.strptime(BeautifulSoup(requests.get(url).content,"html.parser").find("div",text=re.compile("Period of Report")).find_next("div").text,"%Y-%m-%d").date() for url in filing_details_url]
# Get specified filing details url(s)
filing_details_url=[filing_details_url[url] for url in range(len(report_periods)) if report_periods[url]>start_period and report_periods[url]<=end_period]
# Get specified document details url(s)
document_details_url=[document_details_url[url] for url in range(len(report_periods)) if report_periods[url]>start_period and report_periods[url]<=end_period]
# Get download url(s) to the company's 10-K or 20F extracts
annual_download_url=[]
for url in document_details_url:
soup=BeautifulSoup(requests.get(url).content,"html.parser").find('a', text = re.compile('View Excel Document'), attrs = {'class' : 'xbrlviewer'})
if soup is not None:
annual_download_url.append(r"https://www.sec.gov"+soup['href'])
else:
annual_download_url.append(None)
# Get specified report period(s)
report_periods=[report_periods[rp] for rp in range(len(report_periods)) if report_periods[rp]>start_period and report_periods[rp]<=end_period]
# Get html table(s) of the document format files
tableFile=[BeautifulSoup(requests.get(url).content,"html.parser").find("table", { "summary" : "Document Format Files"}) for url in filing_details_url]
# Get url(s) to the annual report html
annual_report_url=[]
for tab in range(len(tableFile)):
if tableFile[tab].findAll("tr")[1].findAll("td")[2].find("a").text.strip()!='':
if ".htm" in tableFile[tab].findAll("tr")[1].findAll("td")[2].find("a").text.strip():
annual_report_url.append("https://www.sec.gov"+tableFile[tab].findAll("tr")[1].findAll("td")[2].find("a")["href"].replace("/ix?doc=",""))
else:
annual_report_url.append("annual report is not in HTML format")
else:
annual_report_url.append("annual report not available")
# Combine the company's report period(s), and annual report url(s) into a data frame
annual_report_df=pd.DataFrame({'report_periods':report_periods,'annual_report_url':annual_report_url,'annual_download_url':annual_download_url},index=[self.symbol]*len(report_periods))
# Return the data frame contructed above if it is not empty
if not annual_report_df.empty:
return annual_report_df
else:
return "No annual report filing(s) for "+ self.symbol + " between "+ start_period.strftime("%Y-%m-%d")+" and "+end_period.strftime("%Y-%m-%d")
################# Retrieving Quarter Report(s) (10-Q) #########################
if(self.report_type=='quarter'):
# Get the url to the company's historic 10-Q
historical_filings_url=r"http://www.sec.gov/cgi-bin/browse-edgar?action=getcompany&CIK="+self.symbol+"&type=10-q&dateb=&owner=exclude&count=100"
# Get table containing descriptions of the company's 10-Q(include 10-Q/A and others) filings(s)
filings_description_table=pd.read_html(str(BeautifulSoup(requests.get(historical_filings_url).content,"html.parser").find("table",{"class":"tableFile2"})))[0]
## Stop and return an error message if the company has no filing type of 10-Q, given the company symbol and foreign logic
if len(filings_description_table[filings_description_table["Filings"]=="10-Q"])==0:
raise NameError("Invalid company symbol or(and) foreign logical")
# Get the company's CIK (Central Index Key) number
cik_number=re.search(r"(\d{10})",BeautifulSoup(requests.get(historical_filings_url).content,"html.parser").find("span",{"class":"companyName"}).text)[0]
# Get a list of accession numbers of the historic 10-Q. raw_accesion_numbers because accession numbers seperated by dashes
raw_accession_numbers=filings_description_table[filings_description_table["Filings"]=="10-Q"].Description.str.extract(r"(\d{10}\-\d{2}\-\d{6})",expand=False)
# Get a list of url(s) to a company's historic 10-Q report(s) details
filing_details_url=r"https://www.sec.gov/Archives/edgar/data/"+cik_number+r"/"+raw_accession_numbers+r"-index.html"
filing_details_url=filing_details_url.to_list()
# Get a list of url(s) to a company's 10-Q report(s) documentations
document_details_url=r"https://www.sec.gov/cgi-bin/viewer?action=view&cik="+cik_number+"&accession_number="+raw_accession_numbers+"&xbrl_type=v"
document_details_url=document_details_url.to_list()
## At this moment, documents before 2009 are not available. Documents of this type are not normally needed anyway
# Get report period(s), that is the 10-Q report(s) as of this(these) date(s)
report_periods=[datetime.strptime(BeautifulSoup(requests.get(url).content,"html.parser").find("div",text=re.compile("Period of Report")).find_next("div").text,"%Y-%m-%d").date() for url in filing_details_url]
# Get specified filing details url(s)
filing_details_url=[filing_details_url[url] for url in range(len(report_periods)) if report_periods[url]>start_period and report_periods[url]<=end_period]
# Get specified document details url(s)
document_details_url=[document_details_url[url] for url in range(len(report_periods)) if report_periods[url]>start_period and report_periods[url]<=end_period]
# Get download url(s) to the company's 10-Q extracts
quarter_download_url=[]
for url in document_details_url:
soup=BeautifulSoup(requests.get(url).content,"html.parser").find('a', text = re.compile('View Excel Document'), attrs = {'class' : 'xbrlviewer'})
if soup is not None:
quarter_download_url.append(r"https://www.sec.gov"+soup['href'])
else:
quarter_download_url.append(None)
# Get specified report period(s)
report_periods=[report_periods[rp] for rp in range(len(report_periods)) if report_periods[rp]>start_period and report_periods[rp]<=end_period]
# Get html table(s) of the document format files
tableFile=[BeautifulSoup(requests.get(url).content,"html.parser").find("table", { "summary" : "Document Format Files"}) for url in filing_details_url]
# Get url(s) to the quarterly report html
quarter_report_url=[]
for tab in range(len(tableFile)):
if tableFile[tab].findAll("tr")[1].findAll("td")[2].find("a").text.strip()!='':
if ".htm" in tableFile[tab].findAll("tr")[1].findAll("td")[2].find("a").text.strip():
quarter_report_url.append("https://www.sec.gov"+tableFile[tab].findAll("tr")[1].findAll("td")[2].find("a")["href"].replace("/ix?doc=",""))
else:
quarter_report_url.append("quarterly report is not in HTML format")
else:
quarter_report_url.append("quarterly report not available")
# Combine the company's report period(s), and quarterly report url(s) into a data frame
quarter_report_df=pd.DataFrame({'report_periods':report_periods,'quarter_report_url':quarter_report_url,'quarter_download_url':quarter_download_url},index=[self.symbol]*len(report_periods))
# Return the data frame contructed above if it is not empty
if not quarter_report_df.empty:
return quarter_report_df
else:
return "No quarter report filing(s) for "+ self.symbol + " between "+ start_period.strftime("%Y-%m-%d")+" and "+end_period.strftime("%Y-%m-%d")
#------------------------ Best-scrolled to the most relevant financial exhibit------------------------
# A function to exhibit financial statements
def financial_statements_exhibit(self):
## Errors checked in the ghost_report_url()
# Target annual financial statements of U.S. businesses
# Prioritize in the order of 'Consolidated Statements of Cash Flows', 'Consolidated Income Statements', 'Consolidated Statements of Operations', 'Consolidated Balance Sheets', 'Consolidated Statements of Financial Position', 'Financial Statements and Supplementary Data', 'Selected Financial Data'
if (self.foreign==False and self.report_type=='annual'):
# Import annual_report_url dataframe
annual_report_url=self.ghost_report_url().annual_report_url
# Import report_periods dataframe
report_periods=self.ghost_report_url().report_periods
# Locate 'webdrive.exe' file to launch chrome browser
driver=webdriver.Chrome(os.getcwd()+'\\chromedriver.exe')
# Recurrently pull up the best_scrolled financial exhibits
for url_index in range(len(annual_report_url)):
driver.get(annual_report_url[url_index])
try:
driver.find_element_by_partial_link_text('Consolidated Statements of Cash Flows').click()
except:
try:
driver.find_element_by_partial_link_text('Consolidated Income Statements').click()
except:
try:
driver.find_element_by_partial_link_text('Consolidated Statements of Operations').click()
except:
try:
driver.find_element_by_partial_link_text('Consolidated Balance Sheets').click()
except:
try:
driver.find_element_by_partial_link_text('Consolidated Statements of Financial Position').click()
except:
try:
driver.find_element_by_partial_link_text('Financial Statements and Supplementary Data').click()
except:
try:
driver.find_element_by_partial_link_text('Selected Financial Data').click()
except:
print(self.symbol+' '+report_periods[url_index].strftime('%Y-%m-%d')+' annual financial statements require manual browsing.')
pass
# Open new tab after pulling up the best-scrolled financial exhibit
driver.execute_script("window.open('');")
# Focus on the new tab for the next loop
driver.switch_to.window(driver.window_handles[-1])
# Target annual financial statements of foreign businesses
# Prioritize in the order of 'Consolidated Statements of Cash Flows', 'Consolidated Income Statements', 'Consolidated Statements of Operations', 'Consolidated Balance Sheets', 'Consolidated Statements of Financial Position', 'FINANCIAL STATEMENTS', 'Financial Statements', 'Selected Financial Data'
if (self.foreign==True and self.report_type=='annual'):
# Import annual_report_url dataframe
annual_report_url=self.ghost_report_url().annual_report_url
# Import report_periods dataframe
report_periods=self.ghost_report_url().report_periods
# Locate 'webdrive.exe' file to launch chrome browser
driver=webdriver.Chrome(os.getcwd()+'\\chromedriver.exe')
# Recurrently pull up the most relevant financial exhibits
for url_index in range(len(annual_report_url)):
driver.get(annual_report_url[url_index])
try:
driver.find_element_by_partial_link_text('Consolidated Statements of Cash Flows').click()
except:
try:
driver.find_element_by_partial_link_text('Consolidated Income Statements').click()
except:
try:
driver.find_element_by_partial_link_text('Consolidated Statements of Operations').click()
except:
try:
driver.find_element_by_partial_link_text('Consolidated Balance Sheets').click()
except:
try:
driver.find_element_by_partial_link_text('Consolidated Statements of Financial Position').click()
except:
try:
driver.find_element_by_partial_link_text('FINANCIAL STATEMENTS').click()
except:
try:
# Since the query is case insensitive, search in other cases
driver.find_element_by_partial_link_text('Financial Statements').click()
except:
try:
driver.find_element_by_partial_link_text('Selected Financial Data').click()
except:
try:
driver.find_element_by_partial_link_text('KEY INFORMATION').click()
except:
print(self.symbol+' '+report_periods[url_index].strftime('%Y-%m-%d')+' annual financial statements require manual browsing.')
pass
# Open new tab after pulling up the best-scrolled financial exhibit
driver.execute_script("window.open('');")
# Focus on the new tab for the next loop
driver.switch_to.window(driver.window_handles[-1])
# Target quarter financial statements of U.S. businesses
# Prioritize in the order of 'Consolidated Balance Sheets', 'Consolidated Statements of Financial Position','Consolidated Statements of Cash Flows','Consolidated Income Statements' 'Consolidated Statements of Operations', 'FINANCIAL STATEMENTS', 'Financial Statements'
if(self.foreign==False and self.report_type=='quarter'):
# Import quarter_report_url dataframe
quarter_report_url=self.ghost_report_url().quarter_report_url
# Import report_periods dataframe
report_periods=self.ghost_report_url().report_periods
# Locate 'webdrive.exe' file to launch chrome browser
driver=webdriver.Chrome(os.getcwd()+'\\chromedriver.exe')
# Recurrently pull up best-scrolled financial exhibits
for url_index in range(len(quarter_report_url)):
driver.get(quarter_report_url[url_index])
try:
driver.find_element_by_partial_link_text('Consolidated Balance Sheets').click()
except:
try:
driver.find_element_by_partial_link_text('Consolidated Statements of Financial Position').click()
except:
try:
driver.find_element_by_partial_link_text('Consolidated Statements of Cash Flows').click()
except:
try:
driver.find_element_by_partial_link_text('Consolidated Income Statements').click()
except:
try:
driver.find_element_by_partial_link_text('Consolidated Statements of Operations').click()
except:
try:
driver.find_element_by_partial_link_text('FINANCIAL STATEMENTS').click()
except:
try:
driver.find_element_by_partial_link_text('Financial Statements').click()
except:
print(self.symbol+' '+report_periods[url_index].strftime('%Y-%m-%d')+' quarter financial statements require manual browsing.' )
pass
# Open new tab after pulling up the best-scrolled balance sheet section
driver.execute_script("window.open('');")
# Focus on the new tab for the next loop
driver.switch_to.window(driver.window_handles[-1])
#------------ Best-scrolled to the most relevant risk factor exhibit------------
# A function to exhibit risk factors
def risk_factors_exhibit(self, risk_type):
## Previous errors checked in the ghost_report_url()
## Error message if the inputted risk type is neither 'enterprise' or 'market'
if(risk_type!='enterprise' and risk_type!='market'):
raise TypeError("Invalid risk type: only 'enterprise' or 'market' risk type is allowed")
########################### Enterprise Risk Exhibit ##################################
if(risk_type=='enterprise'):
# Target annual and quarter enterprise risk factors of U.S. businesses
# Prioritize in the order of 'Risk Factors','RISK FACTORS'
if (self.foreign==False and self.report_type=='annual'):
# Import annual_report_url dataframe
annual_report_url=self.ghost_report_url().annual_report_url
# Import report_periods dataframe
report_periods=self.ghost_report_url().report_periods
# Locate 'webdrive.exe' file to launch chrome browser
driver=webdriver.Chrome(os.getcwd()+'\\chromedriver.exe')
# Recurrently pull up the best_scrolled enterprise risk factor exhibits
for url_index in range(len(annual_report_url)):
driver.get(annual_report_url[url_index])
try:
driver.find_element_by_partial_link_text('Risk Factors').click()
except:
try:
driver.find_element_by_partial_link_text('RISK FACTORS').click()
except:
print(self.symbol+' '+report_periods[url_index].strftime('%Y-%m-%d')+' annual enterprise risk factors require manual browsing.' )
pass
# Open new tab after pulling up the best-scrolled enterprise risk factor exhibits
driver.execute_script("window.open('');")
# Focus on the new tab for the next loop
driver.switch_to.window(driver.window_handles[-1])
elif (self.foreign==False and self.report_type=='quarter'):
# Import annual_report_url dataframe
quarter_report_url=self.ghost_report_url().quarter_report_url
# Import report_periods dataframe
report_periods=self.ghost_report_url().report_periods
# Locate 'webdrive.exe' file to launch chrome browser
driver=webdriver.Chrome(os.getcwd()+'\\chromedriver.exe')
# Recurrently pull up the best_scrolled enterprise risk factor exhibits
for url_index in range(len(quarter_report_url)):
driver.get(quarter_report_url[url_index])
try:
driver.find_element_by_partial_link_text('Risk Factors').click()
except:
try:
driver.find_element_by_partial_link_text('RISK FACTORS').click()
except:
print(self.symbol+' '+report_periods[url_index].strftime('%Y-%m-%d')+' quarter enterprise risk factors require manual browsing.')
pass
# Open new tab after pulling up the best-scrolled enterprise risk factor exhibits
driver.execute_script("window.open('');")
# Focus on the new tab for the next loop
driver.switch_to.window(driver.window_handles[-1])
# Target annual enterprise risk factors of foreign businesses
# Prioritize in the order of 'Risk Factors', 'RISK FACTORS', 'KEY INFORMATION', 'Key Information'
if (self.foreign==True and self.report_type=='annual'):
# Import annual_report_url dataframe
annual_report_url=self.ghost_report_url().annual_report_url
# Import report_periods dataframe
report_periods=self.ghost_report_url().report_periods
# Locate 'webdrive.exe' file to launch chrome browser
driver=webdriver.Chrome(os.getcwd()+'\\chromedriver.exe')
# Recurrently pull up the best_scrolled enterprise risk factor exhibits
for url_index in range(len(annual_report_url)):
driver.get(annual_report_url[url_index])
try:
driver.find_element_by_partial_link_text('Risk Factors').click()
except:
try:
driver.find_element_by_partial_link_text('RISK FACTORS').click()
except:
try:
driver.find_element_by_partial_link_text('KEY INFORMATION').click()
except:
try:
driver.find_element_by_partial_link_text('Key Information').click()
except:
print(self.symbol+' '+report_periods[url_index].strftime('%Y-%m-%d')+' annual enterprise risk factors require manual browsing.')
pass
# Open new tab after pulling up the best-scrolled enterprise risk factor exhibits
driver.execute_script("window.open('');")
# Focus on the new tab for the next loop
driver.switch_to.window(driver.window_handles[-1])
########################### Market Risk Exhibit #############################
elif(risk_type=='market'):
# Target annual and quarter market risk factors of U.S. businesses
# Prioritize in the order of 'Quantitative and Qualitative Disclosures About Market Risk', 'QUANTITATIVE AND QUALITATIVE DISCLOSURES ABOUT MARKET RISK'
if (self.foreign==False and self.report_type=='annual'):
# Import annual_report_url dataframe
annual_report_url=self.ghost_report_url().annual_report_url
# Import report_periods dataframe
report_periods=self.ghost_report_url().report_periods
# Locate 'webdrive.exe' file to launch chrome browser
driver=webdriver.Chrome(os.getcwd()+'\\chromedriver.exe')
# Recurrently pull up the best_scrolled market risk factor exhibits
for url_index in range(len(annual_report_url)):
driver.get(annual_report_url[url_index])
try:
driver.find_element_by_partial_link_text('Quantitative and Qualitative Disclosures about Market Risk').click()
except:
try:
driver.find_element_by_partial_link_text('Quantitative and Qualitative Disclosures About Market Risk').click()
except:
try:
driver.find_element_by_partial_link_text('QUANTITATIVE AND QUALITATIVE DISCLOSURES ABOUT MARKET RISK').click()
except:
print(self.symbol+' '+report_periods[url_index].strftime('%Y-%m-%d')+' annual market risk factors require manual browsing.')
pass
# Open new tab after pulling up the best-scrolled market risk factor exhibits
driver.execute_script("window.open('');")
# Focus on the new tab for the next loop
driver.switch_to.window(driver.window_handles[-1])
elif (self.foreign==False and self.report_type=='quarter'):
# Import annual_report_url dataframe
quarter_report_url=self.ghost_report_url().quarter_report_url
# Import report_periods dataframe
report_periods=self.ghost_report_url().report_periods
# Locate 'webdrive.exe' file to launch chrome browser
driver=webdriver.Chrome(os.getcwd()+'\\chromedriver.exe')
# Recurrently pull up the best_scrolled market risk factor exhibits
for url_index in range(len(quarter_report_url)):
driver.get(quarter_report_url[url_index])
try:
driver.find_element_by_partial_link_text('Quantitative and Qualitative Disclosures about Market Risk').click()
except:
try:
driver.find_element_by_partial_link_text('Quantitative and Qualitative Disclosures About Market Risk').click()
except:
try:
driver.find_element_by_partial_link_text('QUANTITATIVE AND QUALITATIVE DISCLOSURES ABOUT MARKET RISK').click()
except:
print(self.symbol+' '+report_periods[url_index].strftime('%Y-%m-%d')+' quarter market risk factors require manual browsing.')
pass
# Open new tab after pulling up the best-scrolled market risk factor exhibits
driver.execute_script("window.open('');")
# Focus on the new tab for the next loop
driver.switch_to.window(driver.window_handles[-1])
# Target annual market risk factors of foreign businesses
# Prioritize in the order of 'Quantitative and Qualitative Disclosures About Market Risk','QUANTITATIVE AND QUALITATIVE DISCLOSURES ABOUT MARKET RISK'
if (self.foreign==True and self.report_type=='annual'):
# Import annual_report_url dataframe
annual_report_url=self.ghost_report_url().annual_report_url
# Import report_periods dataframe
report_periods=self.ghost_report_url().report_periods
# Locate 'webdrive.exe' file to launch chrome browser
driver=webdriver.Chrome(os.getcwd()+'\\chromedriver.exe')
# Recurrently pull up the best_scrolled market risk factor exhibits
for url_index in range(len(annual_report_url)):
driver.get(annual_report_url[url_index])
try:
driver.find_element_by_partial_link_text('Quantitative and Qualitative Disclosures about Market Risk').click()
except:
try:
driver.find_element_by_partial_link_text('Quantitative and Qualitative Disclosures About Market Risk').click()
except:
try:
driver.find_element_by_partial_link_text('QUANTITATIVE AND QUALITATIVE DISCLOSURES ABOUT MARKET RISK').click()
except:
print(self.symbol+' '+report_periods[url_index].strftime('%Y-%m-%d')+' annual market risk factors require manual browsing.')
pass
# Open new tab after pulling up the best-scrolled market risk factor exhibits
driver.execute_script("window.open('');")
# Focus on the new tab for the next loop
driver.switch_to.window(driver.window_handles[-1])
#----------------------------- Curate Financial Statements -----------------------------------------
# A function to curate income statements, balance sheets, and cah flow statements for U.S. and foreign businesses
def curate_financial_statements(self,statement_type):
## Error message if inputted statement type is not available
if(statement_type!='income' and statement_type!='balance' and statement_type!='cashflow'):
raise TypeError("Statement type not available: only 'income', 'balance', or 'cashflow' statement type is allowed")
# Probable names for statement selection- may nave to update identifiers as different company uses different statement names
income_terms=['Consolidated Income Statement', 'Consolidated Statements of Income', 'Consolidated Statements of Earnings', 'Consolidated Statements of Operations','Consolidated Statements of Profit or Loss','Profit and Loss Statement','P&L Statement','P/L Statement','Consolidated Income Statement','Consolidated Statement of Income', 'Consolidated Statement of Earnings','Consolidated Statement of Operations','Consolidated Statement of Profit or Loss','Consolidated Profit and Loss Statement','Consolidated P&L Statement','Consolidated P/L Statement','Statement of Consolidated Operations','Statements of Consolidated Operations','Statement of Combined Operation','Statements of Combined Operation']
balance_terms=['Consolidated Balance Sheets', 'Consolidated Balance Sheet','Consolidated Statements of Financial Position', 'Consolidated Statements of Financial Condition','Consolidated Statement of Financial Positions','Consolidated Statement of Financial Conditions', 'Statement of Consolidated Financial Position','Statements of Consolidated Financial Position', 'Statement of Consolidated Financial Condition', 'Statements of Consolidated Financial Condition','Combined Balance Sheet']
cashflow_terms=['Consolidated Statements of Cash Flows','Consolidated Statement of Cash Flows','Cash Flow Statement','Consolidated Cash Flow Statement', 'Statement of Consolidated Cash Flows','Statements of Consolidated Cash Flows','Statement of Combined Cash Flow','Statements of Combined Cash Flow']
# Set root diectory for file access
root_path=os.getcwd()
########### Extract Annual and Quarter Financial Statements (U.S. and foreign businesses)#################
# Retrieve periods and url(s) from the url table called by ghost_report_url()
report_table=self.ghost_report_url()
report_periods=report_table.report_periods.to_list()
if(self.report_type=='annual'):
download_url_container=report_table.annual_download_url.to_list() # container to store the download urls of annual statements
elif(self.report_type=='quarter'):
download_url_container=report_table.quarter_download_url.to_list() # container to store the download urls of quarter statements
# Designate a directory to store downloaded statements (begin statement piling)
statement_pile_path=os.path.join(root_path,'statement_pile')
company_pile_path=os.path.join(statement_pile_path,self.symbol)
try:
os.mkdir(statement_pile_path) # Create the statement_pile_path path
os.mkdir(company_pile_path) # Create the company_pile_path path
os.chdir(company_pile_path) # Tab into the company_pile_path path
except:
try:
os.mkdir(company_pile_path) # Create the company_pile_path path
os.chdir(company_pile_path) # Tab into the company_pile_path path
except:
os.chdir(company_pile_path)
# Downlaod accessible statements into the statement_pile path
# Construct a data frame to store the specified statement type
period_container=[] # container to store statement periods
statement_container=[] # container to store statement table
for url_index in range(len(download_url_container)):
statement_period=report_periods[url_index].strftime("%Y-%m-%d")
if(download_url_container[url_index] is not None and download_url_container[url_index][download_url_container[url_index].rfind('.')+1:len(download_url_container[url_index])]!='xls'):
statement_file=requests.get(download_url_container[url_index])
file_name=self.symbol+statement_period+self.report_type+'.xlsx'
with open(file_name, 'wb+') as fs:
fs.write(statement_file.content) # populating statement contents
dfs=pd.ExcelFile(fs)
sheet_headers=list(map(lambda x: x.lower().replace(' ','').replace('_','').replace('-','').replace(',','').replace("'","").replace('&','').replace('/',''), [dfs.parse(sn).columns[0] for sn in dfs.sheet_names]))
############################ Income Statements ###################################
if (statement_type=='income'):
income_term_header=list(map(lambda x: x.lower().replace(' ','').replace('&','').replace('/',''),income_terms))
select_sheet_bool=[any(x in sheet_headers[i] for x in income_term_header) for i in range(len(sheet_headers))]
if(any(select_sheet_bool)):
# Identify income statement and store in dataframe form
income_statement=dfs.parse(dfs.sheet_names[select_sheet_bool.index(True)])
# Store income statement into the statement container
statement_container.append(income_statement)
# Store income statement period into the period container
period_container.append(statement_period)
# Serialize the income statement dataframe into '.pickle'- to be accessed faster next time
income_statement.to_pickle(self.symbol+statement_period+self.report_type.capitalize()+statement_type.capitalize()+'.pickle')
else:
# Store income statement as None in the statement container
## Because not identified or does not exist
statement_container.append(None)
# Store income statement period into the period container
period_container.append(statement_period)
# Message to warn that income statement may be non-identified or simply not available
print(self.symbol+' '+statement_period+ ' '+self.report_type+' income statement not identified or not available: update income statement identifiers or pass')
############################ Balance Sheets ###################################
if (statement_type=='balance'):
balance_term_header=list(map(lambda x: x.lower().replace(' ','').replace('&','').replace('/',''), balance_terms))
select_sheet_bool=[any(x in sheet_headers[i] for x in balance_term_header) for i in range(len(sheet_headers))]
if(any(select_sheet_bool)):
# Identify balance sheet and store in dataframe form
balance_sheet=dfs.parse(dfs.sheet_names[select_sheet_bool.index(True)])
# Store balacne sheet into the statement container
statement_container.append(balance_sheet)
# Store balance sheet period into the period container
period_container.append(statement_period)
# Serialize the balance sheet dataframe into '.pickle'- to be accessed faster next time
balance_sheet.to_pickle(self.symbol+statement_period+self.report_type.capitalize()+statement_type.capitalize()+'.pickle')
else:
# Store balance sheet as None in the statement container
## Because not identified or does not exist
statement_container.append(None)
# Store balance sheet period into the period container
period_container.append(statement_period)
# Message to warn that balance sheet may be non-identified or simply not available
print(self.symbol+' '+statement_period+ ' '+self.report_type+' balance sheet not identified or not available: update balance sheet identifiers or pass')
############################ Cash Flow Statements ###################################
if (statement_type=='cashflow'):
cashflow_term_header=list(map(lambda x: x.lower().replace(' ','').replace('&','').replace('/',''), cashflow_terms))
select_sheet_bool=[any(x in sheet_headers[i] for x in cashflow_term_header) for i in range(len(sheet_headers))]
if(any(select_sheet_bool)):
# Identify cash flow statement and store in dataframe form
cashflow_statement=dfs.parse(dfs.sheet_names[select_sheet_bool.index(True)])
# Store cash flow statement into the statement container
statement_container.append(cashflow_statement)
# Store cash flow statement period into the period container
period_container.append(statement_period)
# Serialize the cash flow statement dataframe into '.pickle'- to be accessed faster next time
cashflow_statement.to_pickle(self.symbol+statement_period+self.report_type.capitalize()+statement_type.capitalize()+'.pickle')
else:
# Store cash flow statement as None in the statement container
## Because not identified or does not exist
statement_container.append(None)
# Store cash flow statement period into the period container
period_container.append(statement_period)
# Message to warn that cash flow statement may be non-identified or simply not available
print(self.symbol+' '+statement_period+ ' '+self.report_type+' cashflow statement not identified or not available: update cash flow statement identifiers or pass')
fs.close() # close the downloaded '.xlsx' file
os.remove(file_name) # remove the downloaded '.xlsx' file after extracting financial statements
else:
print(self.symbol+' '+statement_period+' '+self.report_type+' '+statement_type+' statement not available')
# Combine the conpany's income statement(s) or balance sheet(s) or cash flow statement(s), and statement periods into a dataframe
statement_df=pd.DataFrame({'statement_periods':period_container,statement_type+'_statement':statement_container},index=[self.symbol]*len(period_container))
# Return back to root_path (end statement piling)
os.chdir(root_path)
# Return the data frame contructed above if it is not empty
if not statement_df.empty:
return statement_df
else:
return 'No '+self.report_type+' '+statement_type+' statement for '+self.symbol+' between '+self.start_period.strftime("%Y-%m-%d")+' and '+self.end_period.strftime("%Y-%m-%d")
#------------------------Extract Most Recent Income Statements--------------------------------
def ghost_income(self):
bin_path=r'.\\statement_pile\\'+self.symbol
if (os.path.isdir(bin_path)):
bin_files=os.listdir(bin_path)
pass
else:
os.makedirs(bin_path)
bin_files=os.listdir(bin_path)
# Convert start_period and end_period inputs to a datetime object
start_period=datetime.strptime(str(self.start_period),"%Y%m%d").date()
end_period=datetime.strptime(str(self.end_period),"%Y%m%d").date()
if(self.report_type=='annual'):
if any(["AnnualIncome" in s for s in bin_files]):
annual_income_file=[s for s in bin_files if "AnnualIncome" in s]
annual_income_periods=list(map(lambda x: datetime.strptime(re.search('\d{4}-\d{2}-\d{2}',x).group(),"%Y-%m-%d").date(),annual_income_file))
annual_income_file=[annual_income_file[i] for i in range(len(annual_income_file)) if annual_income_periods[i]>start_period and annual_income_periods[i]<=end_period]
annual_income_periods=[annual_income_periods[i] for i in range(len(annual_income_periods)) if annual_income_periods[i]>start_period and annual_income_periods[i]<=end_period]
annual_income_file.reverse()
annual_income_periods.reverse()
try:
binded_income=pd.concat([pd.read_pickle(bin_path+'\\'+annual_income_file[0]),pd.read_pickle(bin_path+'\\'+annual_income_file[3]), pd.read_pickle(bin_path+'\\'+annual_income_file[6])], axis = 1)
binded_message='Ghosted '+self.report_type+' income statments for '+re.search('\d{4}-\d{2}-\d{2}',annual_income_file[0]).group()+', '+re.search('\d{4}-\d{2}-\d{2}',annual_income_file[3]).group()+', '+re.search('\d{4}-\d{2}-\d{2}',annual_income_file[6]).group()
except:
try:
binded_income=pd.concat([pd.read_pickle(bin_path+'\\'+annual_income_file[0]),pd.read_pickle(bin_path+'\\'+annual_income_file[3]), pd.read_pickle(bin_path+'\\'+annual_income_file[5])], axis = 1)
binded_message='Ghosted '+self.report_type+' income statments for '+re.search('\d{4}-\d{2}-\d{2}',annual_income_file[0]).group()+', '+re.search('\d{4}-\d{2}-\d{2}',annual_income_file[3]).group()+', '+re.search('\d{4}-\d{2}-\d{2}',annual_income_file[5]).group()
except:
try:
binded_income=pd.concat([pd.read_pickle(bin_path+'\\'+annual_income_file[0]),pd.read_pickle(bin_path+'\\'+annual_income_file[3]), pd.read_pickle(bin_path+'\\'+annual_income_file[4])], axis = 1)
binded_message='Ghosted '+self.report_type+' income statments for '+re.search('\d{4}-\d{2}-\d{2}',annual_income_file[0]).group()+', '+re.search('\d{4}-\d{2}-\d{2}',annual_income_file[3]).group()+', '+re.search('\d{4}-\d{2}-\d{2}',annual_income_file[4]).group()
except:
try:
binded_income=pd.concat([pd.read_pickle(bin_path+'\\'+annual_income_file[0]),pd.read_pickle(bin_path+'\\'+annual_income_file[3])], axis = 1)
binded_message='Ghosted '+self.report_type+' income statments for '+re.search('\d{4}-\d{2}-\d{2}',annual_income_file[0]).group()+', '+re.search('\d{4}-\d{2}-\d{2}',annual_income_file[3]).group()
except:
try:
binded_income=pd.concat([pd.read_pickle(bin_path+'\\'+annual_income_file[0]),pd.read_pickle(bin_path+'\\'+annual_income_file[2])], axis = 1)
binded_message='Ghosted '+self.report_type+' income statments for '+re.search('\d{4}-\d{2}-\d{2}',annual_income_file[0]).group()+', '+re.search('\d{4}-\d{2}-\d{2}',annual_income_file[2]).group()
except:
try:
binded_income=pd.concat([pd.read_pickle(bin_path+'\\'+annual_income_file[0]),pd.read_pickle(bin_path+'\\'+annual_income_file[1])], axis = 1)
binded_message='Ghosted '+self.report_type+' income statments for '+re.search('\d{4}-\d{2}-\d{2}',annual_income_file[0]).group()+', '+re.search('\d{4}-\d{2}-\d{2}',annual_income_file[1]).group()
except:
try:
binded_income=pd.read_pickle(bin_path+'\\'+annual_income_file[0])
binded_message='Ghosted '+self.report_type+' income statments for '+re.search('\d{4}-\d{2}-\d{2}',annual_income_file[0]).group()
except:
binded_income=None
binded_message='The specified time range is not available, try including a larger time range'
if(len(annual_income_periods)>0):
if(end_period-annual_income_periods[0]).days>365:
print('Recommend updating to the latest annual income statements: update via .update_financial_statements("income"), then call this function again')
else:
business_income=self.curate_financial_statements('income')
try:
binded_income=pd.concat([business_income.income_statement[0],business_income.income_statement[3], business_income.income_statement[6]], axis = 1)
binded_message='Ghosted '+self.report_type+' income statments for '+business_income.statement_periods[0]+', '+business_income.statement_periods[3]+', '+business_income.statement_periods[6]
except:
try:
binded_income=pd.concat([business_income.income_statement[0],business_income.income_statement[3], business_income.income_statement[5]], axis = 1)
binded_message='Ghosted '+self.report_type+' income statments for '+business_income.statement_periods[0]+', '+business_income.statement_periods[3]+', '+business_income.statement_periods[5]
except:
try:
binded_income=pd.concat([business_income.income_statement[0],business_income.income_statement[3], business_income.income_statement[4]], axis = 1)
binded_message='Ghosted '+self.report_type+' income statments for '+business_income.statement_periods[0]+', '+business_income.statement_periods[3]+', '+business_income.statement_periods[4]
except:
try:
binded_income=pd.concat([business_income.income_statement[0],business_income.income_statement[3]], axis = 1)
binded_message='Ghosted '+self.report_type+' income statments for '+business_income.statement_periods[0]+', '+business_income.statement_periods[3]
except:
try:
binded_income=pd.concat([business_income.income_statement[0],business_income.income_statement[2]], axis = 1)
binded_message='Ghosted '+self.report_type+' income statments for '+business_income.statement_periods[0]+', '+business_income.statement_periods[2]
except:
try:
binded_income=pd.concat([business_income.income_statement[0],business_income.income_statement[1]], axis = 1)
binded_message='Ghosted '+self.report_type+' income statments for '+business_income.statement_periods[0]+', '+business_income.statement_periods[1]
except:
try:
binded_income=business_income.income_statement[0]
binded_message='Ghosted '+self.report_type+' income statments for '+business_income.statement_periods[0]
except:
binded_income=None
binded_message='No '+self.report_type+' income statements for '+self.symbol+' between '+datetime.strptime(str(self.start_period),"%Y%m%d").strftime("%Y-%m-%d")+' and '+datetime.strptime(str(self.end_period),"%Y%m%d").strftime("%Y-%m-%d")
elif(self.report_type=='quarter'):
if any(["QuarterIncome" in s for s in bin_files]):
quarter_income_file=[s for s in bin_files if "QuarterIncome" in s]
quarter_income_periods=list(map(lambda x: datetime.strptime(re.search('\d{4}-\d{2}-\d{2}',x).group(),"%Y-%m-%d").date(),quarter_income_file))
quarter_income_file=[quarter_income_file[i] for i in range(len(quarter_income_file)) if quarter_income_periods[i]>start_period and quarter_income_periods[i]<=end_period]
quarter_income_periods=[quarter_income_periods[i] for i in range(len(quarter_income_periods)) if quarter_income_periods[i]>start_period and quarter_income_periods[i]<=end_period]
quarter_income_file.reverse()
quarter_income_periods.reverse()
try:
binded_income=pd.concat([pd.read_pickle(bin_path+'\\'+f) for f in quarter_income_file], axis = 1)
binded_message='Ghosted '+self.report_type+' income statments for '+', '.join([re.search('\d{4}-\d{2}-\d{2}',f).group() for f in quarter_income_file])
except:
binded_income=None
binded_message='The specified time range is not available, try including a larger time range'
if(len(quarter_income_periods)>0):
if(end_period-quarter_income_periods[0]).days>180:
print('Recommend updating to the latest quarter income statements: update via .update_financial_statements("income") function, then call this function again')
else:
business_income=self.curate_financial_statements('income')
try:
binded_income=pd.concat(business_income.income_statement.to_list(), axis = 1)
binded_message='Ghosted '+self.report_type+' income statments for '+', '.join([business_income.statement_periods[i] for i in range(len(business_income.statement_periods))])
except:
binded_income=None
binded_message='No '+self.report_type+' income statements for '+self.symbol+' between '+datetime.strptime(str(self.start_period),"%Y%m%d").strftime("%Y-%m-%d")+' and '+datetime.strptime(str(self.end_period),"%Y%m%d").strftime("%Y-%m-%d")
print(binded_message)
return binded_income
#------------------------Extract Most Recent Balance Sheets--------------------------------
def ghost_balance(self):
bin_path=r'.\statement_pile\\'+self.symbol
if (os.path.isdir(bin_path)):
bin_files=os.listdir(bin_path)
pass
else:
os.makedirs(bin_path)
bin_files=os.listdir(bin_path)
# Convert start_period and end_period inputs to a datetime object
start_period=datetime.strptime(str(self.start_period),"%Y%m%d").date()
end_period=datetime.strptime(str(self.end_period),"%Y%m%d").date()
if(self.report_type=='annual'):
if any(["AnnualBalance" in s for s in bin_files]):
annual_balance_file=[s for s in bin_files if "AnnualBalance" in s]
annual_balance_periods=list(map(lambda x: datetime.strptime(re.search('\d{4}-\d{2}-\d{2}',x).group(),"%Y-%m-%d").date(),annual_balance_file))
annual_balance_file=[annual_balance_file[i] for i in range(len(annual_balance_file)) if annual_balance_periods[i]>start_period and annual_balance_periods[i]<=end_period]
annual_balance_periods=[annual_balance_periods[i] for i in range(len(annual_balance_periods)) if annual_balance_periods[i]>start_period and annual_balance_periods[i]<=end_period]
annual_balance_file.reverse()
annual_balance_periods.reverse()
try:
binded_balance=pd.concat([pd.read_pickle(bin_path+'\\'+annual_balance_file[0]),pd.read_pickle(bin_path+'\\'+annual_balance_file[2]), pd.read_pickle(bin_path+'\\'+annual_balance_file[4]), pd.read_pickle(bin_path+'\\'+annual_balance_file[6]), pd.read_pickle(bin_path+'\\'+annual_balance_file[8])], axis = 1)
binded_message='Ghosted '+self.report_type+' balance sheets for '+re.search('\d{4}-\d{2}-\d{2}',annual_balance_file[0]).group()+', '+re.search('\d{4}-\d{2}-\d{2}',annual_balance_file[2]).group()+', '+re.search('\d{4}-\d{2}-\d{2}',annual_balance_file[4]).group()+', '+re.search('\d{4}-\d{2}-\d{2}',annual_balance_file[6]).group()+', '+re.search('\d{4}-\d{2}-\d{2}',annual_balance_file[8]).group()
except:
try:
binded_balance=pd.concat([pd.read_pickle(bin_path+'\\'+annual_balance_file[0]),pd.read_pickle(bin_path+'\\'+annual_balance_file[2]), pd.read_pickle(bin_path+'\\'+annual_balance_file[4]), pd.read_pickle(bin_path+'\\'+annual_balance_file[6]), pd.read_pickle(bin_path+'\\'+annual_balance_file[7])], axis = 1)
binded_message='Ghosted '+self.report_type+' balance sheets for '+re.search('\d{4}-\d{2}-\d{2}',annual_balance_file[0]).group()+', '+re.search('\d{4}-\d{2}-\d{2}',annual_balance_file[2]).group()+', '+re.search('\d{4}-\d{2}-\d{2}',annual_balance_file[4]).group()+', '+re.search('\d{4}-\d{2}-\d{2}',annual_balance_file[6]).group()+', '+re.search('\d{4}-\d{2}-\d{2}',annual_balance_file[7]).group()
except:
try:
binded_balance=pd.concat([pd.read_pickle(bin_path+'\\'+annual_balance_file[0]),pd.read_pickle(bin_path+'\\'+annual_balance_file[2]), pd.read_pickle(bin_path+'\\'+annual_balance_file[4]), pd.read_pickle(bin_path+'\\'+annual_balance_file[6])], axis = 1)
binded_message='Ghosted '+self.report_type+' balance sheets for '+re.search('\d{4}-\d{2}-\d{2}',annual_balance_file[0]).group()+', '+re.search('\d{4}-\d{2}-\d{2}',annual_balance_file[2]).group()+', '+re.search('\d{4}-\d{2}-\d{2}',annual_balance_file[4]).group()+', '+re.search('\d{4}-\d{2}-\d{2}',annual_balance_file[6]).group()
except:
try:
binded_balance=pd.concat([pd.read_pickle(bin_path+'\\'+annual_balance_file[0]),pd.read_pickle(bin_path+'\\'+annual_balance_file[2]), pd.read_pickle(bin_path+'\\'+annual_balance_file[4]), pd.read_pickle(bin_path+'\\'+annual_balance_file[5])], axis = 1)
binded_message='Ghosted '+self.report_type+' balance sheets for '+re.search('\d{4}-\d{2}-\d{2}',annual_balance_file[0]).group()+', '+re.search('\d{4}-\d{2}-\d{2}',annual_balance_file[2]).group()+', '+re.search('\d{4}-\d{2}-\d{2}',annual_balance_file[4]).group()+', '+re.search('\d{4}-\d{2}-\d{2}',annual_balance_file[5]).group()
except:
try:
binded_balance=pd.concat([pd.read_pickle(bin_path+'\\'+annual_balance_file[0]),pd.read_pickle(bin_path+'\\'+annual_balance_file[2]), pd.read_pickle(bin_path+'\\'+annual_balance_file[4])], axis = 1)
binded_message='Ghosted '+self.report_type+' balance sheets for '+re.search('\d{4}-\d{2}-\d{2}',annual_balance_file[0]).group()+', '+re.search('\d{4}-\d{2}-\d{2}',annual_balance_file[2]).group()+', '+re.search('\d{4}-\d{2}-\d{2}',annual_balance_file[4]).group()
except:
try:
binded_balance=pd.concat([pd.read_pickle(bin_path+'\\'+annual_balance_file[0]),pd.read_pickle(bin_path+'\\'+annual_balance_file[2]), pd.read_pickle(bin_path+'\\'+annual_balance_file[3])], axis = 1)
binded_message='Ghosted '+self.report_type+' balance sheets for '+re.search('\d{4}-\d{2}-\d{2}',annual_balance_file[0]).group()+', '+re.search('\d{4}-\d{2}-\d{2}',annual_balance_file[2]).group()+', '+re.search('\d{4}-\d{2}-\d{2}',annual_balance_file[3]).group()
except:
try:
binded_balance=pd.concat([pd.read_pickle(bin_path+'\\'+annual_balance_file[0]),pd.read_pickle(bin_path+'\\'+annual_balance_file[2])], axis = 1)
binded_message='Ghosted '+self.report_type+' balance sheets for '+re.search('\d{4}-\d{2}-\d{2}',annual_balance_file[0]).group()+', '+re.search('\d{4}-\d{2}-\d{2}',annual_balance_file[2]).group()
except:
try:
binded_balance=pd.concat([pd.read_pickle(bin_path+'\\'+annual_balance_file[0]),pd.read_pickle(bin_path+'\\'+annual_balance_file[1])], axis = 1)
binded_message='Ghosted '+self.report_type+' balance sheets for '+re.search('\d{4}-\d{2}-\d{2}',annual_balance_file[0]).group()+', '+re.search('\d{4}-\d{2}-\d{2}',annual_balance_file[1]).group()
except:
try:
binded_balance=pd.read_pickle(bin_path+'\\'+annual_balance_file[0])
binded_message='Ghosted '+self.report_type+' balance sheets for '+re.search('\d{4}-\d{2}-\d{2}',annual_balance_file[0]).group()
except:
binded_balance=None
binded_message='The specified time range is not available, try including a larger time range'
if(len(annual_balance_periods)>0):
if(end_period-annual_balance_periods[0]).days>365:
print('Recommend updating to the latest annual balance sheets: update via .update_financial_statements("balance") function, then call this function again')
else:
business_balance=self.curate_financial_statements('balance')
try:
binded_balance=pd.concat([business_balance.balance_statement[0],business_balance.balance_statement[2], business_balance.balance_statement[4],business_balance.balance_statement[6],business_balance.balance_statement[8]], axis = 1)
binded_message='Ghosted '+self.report_type+' balance sheets for '+business_balance.statement_periods[0]+', '+business_balance.statement_periods[2]+', '+business_balance.statement_periods[4]+', '+business_balance.statement_periods[6]+', '+business_balance.statement_periods[8]
except:
try:
binded_balance=pd.concat([business_balance.balance_statement[0],business_balance.balance_statement[2], business_balance.balance_statement[4],business_balance.balance_statement[6],business_balance.balance_statement[7]], axis = 1)
binded_message='Ghosted '+self.report_type+' balance sheets for '+business_balance.statement_periods[0]+', '+business_balance.statement_periods[2]+', '+business_balance.statement_periods[4]+', '+business_balance.statement_periods[6]+', '+business_balance.statement_periods[7]
except:
try:
binded_balance=pd.concat([business_balance.balance_statement[0],business_balance.balance_statement[2], business_balance.balance_statement[4],business_balance.balance_statement[6]], axis = 1)
binded_message='Ghosted '+self.report_type+' balance sheets for '+business_balance.statement_periods[0]+', '+business_balance.statement_periods[2]+', '+business_balance.statement_periods[4]+', '+business_balance.statement_periods[6]
except:
try:
binded_balance=pd.concat([business_balance.balance_statement[0],business_balance.balance_statement[2], business_balance.balance_statement[4],business_balance.balance_statement[5]], axis = 1)
binded_message='Ghosted '+self.report_type+' balance sheets for '+business_balance.statement_periods[0]+', '+business_balance.statement_periods[2]+', '+business_balance.statement_periods[4]+', '+business_balance.statement_periods[5]
except:
try:
binded_balance=pd.concat([business_balance.balance_statement[0],business_balance.balance_statement[2], business_balance.balance_statement[4]], axis = 1)
binded_message='Ghosted '+self.report_type+' balance sheets for '+business_balance.statement_periods[0]+', '+business_balance.statement_periods[2]+', '+business_balance.statement_periods[4]
except:
try:
binded_balance=pd.concat([business_balance.balance_statement[0],business_balance.balance_statement[2], business_balance.balance_statement[3]], axis = 1)
binded_message='Ghosted '+self.report_type+' balance sheets for '+business_balance.statement_periods[0]+', '+business_balance.statement_periods[2]+', '+business_balance.statement_periods[3]
except:
try:
binded_balance=pd.concat([business_balance.balance_statement[0],business_balance.balance_statement[2]], axis = 1)
binded_message='Ghosted '+self.report_type+' balance sheets for '+business_balance.statement_periods[0]+', '+business_balance.statement_periods[2]
except:
try:
binded_balance=pd.concat([business_balance.balance_statement[0],business_balance.balance_statement[1]], axis = 1)
binded_message='Ghosted '+self.report_type+' balance sheets for '+business_balance.statement_periods[0]+', '+business_balance.statement_periods[1]
except:
try:
binded_balance=business_balance.balance_statement[0]
binded_message='Ghosted '+self.report_type+' balance sheets for '+business_balance.statement_periods[0]
except:
binded_balance=None
binded_message='No '+self.report_type+' balance sheets for '+self.symbol+' between '+datetime.strptime(str(self.start_period),"%Y%m%d").strftime("%Y-%m-%d")+' and '+datetime.strptime(str(self.end_period),"%Y%m%d").strftime("%Y-%m-%d")
elif(self.report_type=='quarter'):
if any(["QuarterBalance" in s for s in bin_files]):
quarter_balance_file=[s for s in bin_files if "QuarterBalance" in s]
quarter_balance_periods=list(map(lambda x: datetime.strptime(re.search('\d{4}-\d{2}-\d{2}',x).group(),"%Y-%m-%d").date(),quarter_balance_file))
quarter_balance_file=[quarter_balance_file[i] for i in range(len(quarter_balance_file)) if quarter_balance_periods[i]>start_period and quarter_balance_periods[i]<=end_period]
quarter_balance_periods=[quarter_balance_periods[i] for i in range(len(quarter_balance_periods)) if quarter_balance_periods[i]>start_period and quarter_balance_periods[i]<=end_period]
quarter_balance_file.reverse()
quarter_balance_periods.reverse()
try:
binded_balance=pd.concat([pd.read_pickle(bin_path+'\\'+f) for f in quarter_balance_file], axis = 1)
binded_message='Ghosted '+self.report_type+' balance sheets for '+', '.join([re.search('\d{4}-\d{2}-\d{2}',f).group() for f in quarter_balance_file])
except:
binded_balance=None
binded_message='The specified time range is not available, try including a larger time range'
if(len(quarter_balance_periods)>0):
if(end_period-quarter_balance_periods[0]).days>180:
print('Recommend updating to the latest quarter balance sheets: update via .update_financial_statements("balance") function, then call this function again')
else:
business_balance=self.curate_financial_statements('balance')
try:
binded_balance=pd.concat(business_balance.balance_statement.to_list(), axis = 1)
binded_message='Ghosted '+self.report_type+' balance sheets for '+', '.join([business_balance.statement_periods[i] for i in range(len(business_balance.statement_periods))])
except:
binded_balance=None
binded_message='No '+self.report_type+' balance sheets for '+self.symbol+' between '+datetime.strptime(str(self.start_period),"%Y%m%d").strftime("%Y-%m-%d")+' and '+datetime.strptime(str(self.end_period),"%Y%m%d").strftime("%Y-%m-%d")
print(binded_message)
return binded_balance
#------------------------Extract Most Recent Statement of Cash Flows--------------------------------
def ghost_cashflow(self):
bin_path=r'.\statement_pile\\'+self.symbol
if (os.path.isdir(bin_path)):
bin_files=os.listdir(bin_path)
pass
else:
os.makedirs(bin_path)
bin_files=os.listdir(bin_path)
# Convert start_period and end_period inputs to a datetime object
start_period=datetime.strptime(str(self.start_period),"%Y%m%d").date()
end_period=datetime.strptime(str(self.end_period),"%Y%m%d").date()
if(self.report_type=='annual'):
if any(["AnnualCashflow" in s for s in bin_files]):
annual_cashflow_file=[s for s in bin_files if "AnnualCashflow" in s]
annual_cashflow_periods=list(map(lambda x: datetime.strptime(re.search('\d{4}-\d{2}-\d{2}',x).group(),"%Y-%m-%d").date(),annual_cashflow_file))
annual_cashflow_file=[annual_cashflow_file[i] for i in range(len(annual_cashflow_file)) if annual_cashflow_periods[i]>start_period and annual_cashflow_periods[i]<=end_period]
annual_cashflow_periods=[annual_cashflow_periods[i] for i in range(len(annual_cashflow_periods)) if annual_cashflow_periods[i]>start_period and annual_cashflow_periods[i]<=end_period]
annual_cashflow_file.reverse()
annual_cashflow_periods.reverse()
try:
binded_cashflow=pd.concat([pd.read_pickle(bin_path+'\\'+annual_cashflow_file[0]),pd.read_pickle(bin_path+'\\'+annual_cashflow_file[3]), pd.read_pickle(bin_path+'\\'+annual_cashflow_file[6])], axis = 1)
binded_message='Ghosted '+self.report_type+' cashflow statements for '+re.search('\d{4}-\d{2}-\d{2}',annual_cashflow_file[0]).group()+', '+re.search('\d{4}-\d{2}-\d{2}',annual_cashflow_file[3]).group()+', '+re.search('\d{4}-\d{2}-\d{2}',annual_cashflow_file[6]).group()
except:
try:
binded_cashflow=pd.concat([pd.read_pickle(bin_path+'\\'+annual_cashflow_file[0]),pd.read_pickle(bin_path+'\\'+annual_cashflow_file[3]), pd.read_pickle(bin_path+'\\'+annual_cashflow_file[5])], axis = 1)
binded_message='Ghosted '+self.report_type+' cashflow statements for '+re.search('\d{4}-\d{2}-\d{2}',annual_cashflow_file[0]).group()+', '+re.search('\d{4}-\d{2}-\d{2}',annual_cashflow_file[3]).group()+', '+re.search('\d{4}-\d{2}-\d{2}',annual_cashflow_file[5]).group()
except:
try:
binded_cashflow=pd.concat([pd.read_pickle(bin_path+'\\'+annual_cashflow_file[0]),pd.read_pickle(bin_path+'\\'+annual_cashflow_file[3]), pd.read_pickle(bin_path+'\\'+annual_cashflow_file[4])], axis = 1)
binded_message='Ghosted '+self.report_type+' cashflow statements for '+re.search('\d{4}-\d{2}-\d{2}',annual_cashflow_file[0]).group()+', '+re.search('\d{4}-\d{2}-\d{2}',annual_cashflow_file[3]).group()+', '+re.search('\d{4}-\d{2}-\d{2}',annual_cashflow_file[4]).group()
except:
try:
binded_cashflow=pd.concat([pd.read_pickle(bin_path+'\\'+annual_cashflow_file[0]),pd.read_pickle(bin_path+'\\'+annual_cashflow_file[3])], axis = 1)
binded_message='Ghosted '+self.report_type+' cashflow statements for '+re.search('\d{4}-\d{2}-\d{2}',annual_cashflow_file[0]).group()+', '+re.search('\d{4}-\d{2}-\d{2}',annual_cashflow_file[3]).group()
except:
try:
binded_cashflow=pd.concat([pd.read_pickle(bin_path+'\\'+annual_cashflow_file[0]),pd.read_pickle(bin_path+'\\'+annual_cashflow_file[2])], axis = 1)
binded_message='Ghosted '+self.report_type+' cashflow statements for '+re.search('\d{4}-\d{2}-\d{2}',annual_cashflow_file[0]).group()+', '+re.search('\d{4}-\d{2}-\d{2}',annual_cashflow_file[2]).group()
except:
try:
binded_cashflow=pd.concat([pd.read_pickle(bin_path+'\\'+annual_cashflow_file[0]),pd.read_pickle(bin_path+'\\'+annual_cashflow_file[1])], axis = 1)
binded_message='Ghosted '+self.report_type+' cashflow statements for '+re.search('\d{4}-\d{2}-\d{2}',annual_cashflow_file[0]).group()+', '+re.search('\d{4}-\d{2}-\d{2}',annual_cashflow_file[1]).group()
except:
try:
binded_cashflow=pd.read_pickle(bin_path+'\\'+annual_cashflow_file[0])
binded_message='Ghosted '+self.report_type+' cashflow statements for '+re.search('\d{4}-\d{2}-\d{2}',annual_cashflow_file[0]).group()
except:
binded_cashflow=None
binded_message='The specified time range is not available, try including a larger time range'
if(len(annual_cashflow_periods)>0):
if(end_period-annual_cashflow_periods[0]).days>365:
print('Recommend updating to the latest annual cashflow statements: update via .update_financial_statements("cashflow") function, then call this function again')
else:
business_cashflow=self.curate_financial_statements('cashflow')
try:
binded_cashflow=pd.concat([business_cashflow.cashflow_statement[0],business_cashflow.cashflow_statement[3], business_cashflow.cashflow_statement[6]], axis = 1)
binded_message='Ghosted '+self.report_type+' cashflow statements for '+business_cashflow.statement_periods[0]+', '+business_cashflow.statement_periods[3]+', '+business_cashflow.statement_periods[6]
except:
try:
binded_cashflow= | pd.concat([business_cashflow.cashflow_statement[0],business_cashflow.cashflow_statement[3], business_cashflow.cashflow_statement[5]], axis = 1) | pandas.concat |
#!/usr/bin/env python
# -*- coding:utf-8 -*-
"""
Date: 2022/2/14 18:19
Desc: 新浪财经-股票期权
https://stock.finance.sina.com.cn/option/quotes.html
期权-中金所-沪深 300 指数
https://stock.finance.sina.com.cn/futures/view/optionsCffexDP.php
期权-上交所-50ETF
期权-上交所-300ETF
https://stock.finance.sina.com.cn/option/quotes.html
"""
import json
import datetime
from typing import Dict, List, Tuple
import requests
from bs4 import BeautifulSoup
import pandas as pd
# 期权-中金所-沪深300指数
def option_cffex_hs300_list_sina() -> Dict[str, List[str]]:
"""
新浪财经-中金所-沪深300指数-所有合约, 返回的第一个合约为主力合约
目前新浪财经-中金所只有 沪深300指数 一个品种的数据
:return: 中金所-沪深300指数-所有合约
:rtype: dict
"""
url = "https://stock.finance.sina.com.cn/futures/view/optionsCffexDP.php"
r = requests.get(url)
soup = BeautifulSoup(r.text, "lxml")
symbol = soup.find(attrs={"id": "option_symbol"}).find("li").text
temp_attr = soup.find(attrs={"id": "option_suffix"}).find_all("li")
contract = [item.text for item in temp_attr]
return {symbol: contract}
def option_cffex_hs300_spot_sina(symbol: str = "io2104") -> pd.DataFrame:
"""
中金所-沪深300指数-指定合约-实时行情
https://stock.finance.sina.com.cn/futures/view/optionsCffexDP.php
:param symbol: 合约代码; 用 option_cffex_hs300_list_sina 函数查看
:type symbol: str
:return: 中金所-沪深300指数-指定合约-看涨看跌实时行情
:rtype: pd.DataFrame
"""
url = "https://stock.finance.sina.com.cn/futures/api/openapi.php/OptionService.getOptionData"
params = {
"type": "futures",
"product": "io",
"exchange": "cffex",
"pinzhong": symbol,
}
r = requests.get(url, params=params)
data_text = r.text
data_json = json.loads(data_text[data_text.find("{") : data_text.rfind("}") + 1])
option_call_df = pd.DataFrame(
data_json["result"]["data"]["up"],
columns=[
"看涨合约-买量",
"看涨合约-买价",
"看涨合约-最新价",
"看涨合约-卖价",
"看涨合约-卖量",
"看涨合约-持仓量",
"看涨合约-涨跌",
"行权价",
"看涨合约-标识",
],
)
option_put_df = pd.DataFrame(
data_json["result"]["data"]["down"],
columns=[
"看跌合约-买量",
"看跌合约-买价",
"看跌合约-最新价",
"看跌合约-卖价",
"看跌合约-卖量",
"看跌合约-持仓量",
"看跌合约-涨跌",
"看跌合约-标识",
],
)
data_df = pd.concat([option_call_df, option_put_df], axis=1)
data_df['看涨合约-买量'] = pd.to_numeric(data_df['看涨合约-买量'])
data_df['看涨合约-买价'] = pd.to_numeric(data_df['看涨合约-买价'])
data_df['看涨合约-最新价'] = pd.to_numeric(data_df['看涨合约-最新价'])
data_df['看涨合约-卖价'] = pd.to_numeric(data_df['看涨合约-卖价'])
data_df['看涨合约-卖量'] = pd.to_numeric(data_df['看涨合约-卖量'])
data_df['看涨合约-持仓量'] = pd.to_numeric(data_df['看涨合约-持仓量'])
data_df['看涨合约-涨跌'] = pd.to_numeric(data_df['看涨合约-涨跌'])
data_df['行权价'] = pd.to_numeric(data_df['行权价'])
data_df['看跌合约-买量'] = pd.to_numeri | c(data_df['看跌合约-买量']) | pandas.to_numeric |
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
from mpl_toolkits.mplot3d import Axes3D
from sklearn.decomposition import PCA, KernelPCA
from sklearn.model_selection import train_test_split
from sklearn import svm
from sklearn.linear_model import LogisticRegression
from sklearn import preprocessing
def noise(r): # generate 500 noise
return (np.random.rand(500) - 0.5) * np.random.rand() * r /5
def base():# generate a basis randomly for 3D Transformation
n = np.random.rand(5)
n = n - 0.5
x = [n[0], n[1], n[2]]
y = [n[3], n[4]]
norm = np.linalg.norm(x)
for i in range(0, len(x)):
x[i] = x[i] / norm
y.append((-x[0]*y[0] - x[1]*y[1]) / x[2])
norm = np.linalg.norm(y)
for i in range(0, len(y)):
y[i] = y[i] / norm
z = np.cross(x, y)
return([x, y, z])
def circle(O,r): # generate a circle
angle = np.linspace(0, 2*3.1416, 500)
x = O + np.cos(angle) * r + noise(r)
y = np.sin(angle) * r + noise(r)
z = np.zeros(len(x)) + noise(r)
return x, y, z
def dataset(r): # generate dataset of two interlocked rings, 0.5<=r<=1, so the rings are coupled
O1=1
O2=0
d = abs(O1 - O2)
angle = np.linspace(0, 2*3.1416, 250)
x1, y1, z1 = circle(O1, r)
x2, z2, y2 = circle(O2, r) # get two circles
A = base() # get a random 3D basis for transformation
D1 = np.dot(A, np.array([x1, y1, z1]))
D2 = np.dot(A, np.array([x2, y2, z2])) # apply transformation
return D1, D2
def draw_sca(ax, D1, D2, title='', lw =0.1): # drawing the scatter figure
ax.scatter(D1[0], D1[1], D1[2], color='red', linewidths = lw)
ax.scatter(D2[0], D2[1], D2[2], color='blue', linewidths = lw)
ax.set_xlabel('x', color = 'red')
ax.set_ylabel('y', color = 'blue')
ax.set_zlabel('z', color = 'green')
ax.set_title(title, x = 0.5, y = -0.1)
def SVM_train(D1, D2, model): # SVM Train
D1 = pd.DataFrame(np.c_[D1.T, np.zeros((len(D1[0]),1))])
D2 = pd.DataFrame(np.c_[D2.T, np.ones((len(D2[0]),1))]) # Add label
D = pd.concat([D1, D2])
X = pd.DataFrame(preprocessing.minmax_scale(D.iloc[:, :-1], feature_range=(-0.5, 0.5))) # normalization
Y = D.iloc[:, -1]
x_train, x_test, y_train, y_test = train_test_split(X, Y, test_size = 0.9)
model.fit(x_train, y_train)
predict = model.predict(X)
R = []
B = []
for i in range(predict.shape[0]): # label the result
R.append([X.iloc[i,0], X.iloc[i,1], X.iloc[i,2]]) if predict[i] == 1 else B.append([X.iloc[i,0], X.iloc[i,1], X.iloc[i,2]])
S = model.support_vectors_.T
score = model.score(X, Y)
return R, B, S, score
def SVM_Analysis(): #Performance Comparsion Analysis on different SVM model
r = np.linspace(1, 0.5, 25) #generte different x for different P
y = [[],[]]
fig = plt.figure()
ax = fig.add_subplot(1, 1, 1)
ax.set_title('SVM Performace Analysis score vs. p')
for i in range(len(r)):
s=[[0],[0]]
print(i)
for j in range(10): # for each p, we test the two svm models for 10 times
D1, D2 = dataset(r[i])
R, B, S, score = SVM_train(D1, D2, svm.SVC(kernel='rbf'))
s[0] += score
R, B, S, score = SVM_train(D1, D2, svm.SVC(kernel='rbf', gamma=25))
s[1] += score
y[0].append(s[0]/10)
y[1].append(s[1]/10)
r = 2 * x - 1 # calculate the p
ax.set_ylim([0.9, 1])
ax.set_xlim([1,0])
ax.plot(r, y[0],label='gamma=default')
ax.plot(r,y[1],label='gamma=25')
ax.legend( fontsize=15)
plt.show()
def SVM(r): # generate the dataset, train the SVM model, and draw the classification result and support vector
SVM_Analysis() # draw the score vs different p
fig = plt.figure()
r=0.8
D1, D2 = dataset(r)
plt.title('p='+str(round(2*r-1,2))) # set r and p and generate data for specific SVM training
R, B, S, score = SVM_train(D1, D2, svm.SVC(kernel='rbf', gamma=250)) # different gamma for KSVM
ax = fig.add_subplot(1, 2, 1, projection='3d')
draw_sca(ax,np.array(R).T, np.array(B).T,'SVM score=' + str(score))
ax.scatter(S[0], S[1], S[2], color='yellow', linewidths = 1)
R, B, S, score = SVM_train(D1, D2, svm.SVC(kernel='rbf', gamma=25))
ax = fig.add_subplot(1, 2, 2, projection='3d')
draw_sca(ax,np.array(R).T, np.array(B).T,'SVM score=' + str(score))
ax.scatter(S[0], S[1], S[2], color='yellow', linewidths = 1)
plt.show() # draw two model classification result
def PCA_train(D1, D2, model): # train a PCA model
D1 = pd.DataFrame(D1.T)
D2 = | pd.DataFrame(D2.T) | pandas.DataFrame |
# brightwind is a library that provides wind analysts with easy to use tools for working with meteorological data.
# Copyright (C) 2021 <NAME>
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Lesser General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public License
# along with this program. If not, see <https://www.gnu.org/licenses/>.
from brightwind.load.load import _is_file
import numpy as np
import pandas as pd
import requests
import json
import copy
__all__ = ['MeasurementStation']
def _replace_none_date(list_or_dict):
if isinstance(list_or_dict, list):
renamed = []
for item in list_or_dict:
renamed.append(_replace_none_date(item))
return renamed
elif isinstance(list_or_dict, dict):
for date_str in ['date_from', 'date_to']:
if list_or_dict.get(date_str) is None:
list_or_dict[date_str] = DATE_INSTEAD_OF_NONE
return list_or_dict
def _get_title(property_name, schema, property_section=None):
"""
Get the title for the property name from the WRA Data Model Schema. Optionally, you can send the section of the
schema where the property should be found. This avoids finding the wrong property name when the name
is not unique.
If the property name is not found it will return itself.
:param property_name: The property name to find.
:type property_name: str
:param schema: The WRA Data Model Schema.
:type schema: dict
:param property_section: The section in the schema where the property can be found. This avoids the case where the
property_name is not unique in the schema.
:type property_section: str or None
:return: The title as stated in the schema.
:rtype: str
"""
# search through definitions first
if schema.get('definitions') is not None:
if property_name in schema.get('definitions').keys():
return schema.get('definitions').get(property_name).get('title')
# search through properties
if schema.get('properties') is not None:
# is property_name in the main properties
if property_name in schema.get('properties').keys() and property_section is None:
return schema.get('properties').get(property_name).get('title')
# is property_section part of the main properties
if property_section in schema.get('properties').keys():
property_type = schema.get('properties').get(property_section).get('type')
if property_type is not None and 'array' in property_type:
# move down into an array
result = _get_title(property_name, schema.get('properties').get(property_section)['items'])
if result != property_name:
return result
elif property_type is not None and 'object' in property_type:
# move down into an object
result = _get_title(property_name, schema.get('properties').get(property_section))
if result != property_name:
return result
# don't recognise either property_name or property_section.
# loop through each property to find an array or object to move down to
for k, v in schema.get('properties').items():
if v.get('type') is not None and 'array' in v['type']:
# move down into an array
result = _get_title(property_name, v['items'], property_section)
if result != property_name:
return result
elif v.get('type') is not None and 'object' in v['type']:
# move down into an object
result = _get_title(property_name, v, property_section)
if result != property_name:
return result
# can't find the property_name in the schema, return itself
return property_name
def _rename_to_title(list_or_dict, schema):
"""
Rename the names in a list to it's equivalent title in the schema or the keys in a dictionary. If there are
prefixes from raising a child property up to a parent level, this will find the normal schema title and add
the prefixed title to it.
:param list_or_dict: List of names or dictionary with keys to rename.
:type list_or_dict: list or dict
:param schema: The WRA Data Model Schema.
:type schema: dict
:return: A renamed list or keys in dictionary.
:rtype: list or dict
"""
prefixed_names = {}
# find all possible prefixed names and build a dict to contain it and the separator and title.
for key in PREFIX_DICT.keys():
for col in PREFIX_DICT[key]['keys_to_prefix']:
prefixed_name = key + PREFIX_DICT[key]['prefix_separator'] + col
prefixed_names[prefixed_name] = {'prefix_separator': PREFIX_DICT[key]['prefix_separator'],
'title_prefix': PREFIX_DICT[key]['title_prefix']}
if isinstance(list_or_dict, dict):
renamed_dict = {}
for k, v in list_or_dict.items():
if k in list(prefixed_names.keys()):
# break out the property name and the name, get the title and then add title_prefix to it.
property_section = k[0:k.find(prefixed_names[k]['prefix_separator'])]
property_name = k[k.find(prefixed_names[k]['prefix_separator']) + 1:]
if k in ['sensor_config.slope', 'sensor_config.offset', 'sensor_config.sensitivity',
'calibration.slope', 'calibration.offset', 'calibration.sensitivity']:
# Special cases don't add a title prefix as there is already one in the schema title
renamed_dict[_get_title(property_name, schema, property_section)] = v
else:
renamed_dict[prefixed_names[k]['title_prefix'] + _get_title(property_name, schema,
property_section)] = v
else:
# if not in the list of prefixed_names then just find the title as normal.
renamed_dict[_get_title(k, schema)] = v
return renamed_dict
elif isinstance(list_or_dict, list):
renamed_list = []
for name in list_or_dict:
if name in list(prefixed_names.keys()):
# break out the property name and the name, get the title and then add title_prefix to it.
property_section = name[0:name.find(prefixed_names[name]['prefix_separator'])]
property_name = name[name.find(prefixed_names[name]['prefix_separator']) + 1:]
if name in ['sensor_config.slope', 'sensor_config.offset', 'sensor_config.sensitivity',
'calibration.slope', 'calibration.offset', 'calibration.sensitivity']:
# Special cases don't add a title prefix as there is already one in the schema title
renamed_list.append(_get_title(property_name, schema, property_section))
else:
renamed_list.append(prefixed_names[name]['title_prefix'] + _get_title(property_name, schema,
property_section))
else:
# if not in the list of prefixed_names then just find the title as normal.
renamed_list.append(_get_title(name, schema))
return renamed_list
def _extract_keys_to_unique_list(lists_of_dictionaries):
"""
Extract the keys for a list of dictionaries and merge them into a unique list.
:param lists_of_dictionaries: List of dictionaries to pull unique keys from.
:type lists_of_dictionaries: list(dict)
:return: Merged list of keys into a unique list.
:rtype: list
"""
merged_list = list(lists_of_dictionaries[0].keys())
for idx, d in enumerate(lists_of_dictionaries):
if idx != 0:
merged_list = merged_list + list(set(list(d.keys())) - set(merged_list))
return merged_list
def _add_prefix(dictionary, property_section):
"""
Add a prefix to certain keys in the dictionary.
:param dictionary: The dictionary containing the keys to rename.
:type dictionary: dict
:return: The dictionary with the keys prefixed.
:rtype: dict
"""
prefixed_dict = {}
for k, v in dictionary.items():
if k in PREFIX_DICT[property_section]['keys_to_prefix']:
prefixed_dict[property_section + PREFIX_DICT[property_section]['prefix_separator'] + k] = v
else:
prefixed_dict[k] = v
return prefixed_dict
def _merge_two_dicts(x, y):
"""
Given two dictionaries, merge them into a new dict as a shallow copy.
"""
z = x.copy()
z.update(y)
return z
def _filter_parent_level(dictionary):
"""
Pull only the parent level keys and values i.e. do not return any child lists or dictionaries or nulls/Nones.
:param dictionary:
:return:
"""
parent = {}
for key, value in dictionary.items():
if (type(value) != list) and (type(value) != dict) and (value is not None):
parent.update({key: value})
return parent
def _flatten_dict(dictionary, property_to_bring_up):
"""
Bring a child level in a dictionary up to the parent level.
This is usually when there is an array of child levels and so the parent level is repeated.
:param dictionary: Dictionary with keys to prefix.
:type dictionary: dict
:param property_to_bring_up: The child property name to raise up to the parent level.
:type property_to_bring_up: str
:return: A list of merged dictionaries
:rtype: list(dict)
"""
result = []
parent = _filter_parent_level(dictionary)
for key, value in dictionary.items():
if (type(value) == list) and (key == property_to_bring_up):
for item in value:
child = _filter_parent_level(item)
child = _add_prefix(child, property_section=property_to_bring_up)
result.append(_merge_two_dicts(parent, child))
if (type(value) == dict) and (key == property_to_bring_up):
child = _filter_parent_level(value)
child = _add_prefix(child, property_section=property_to_bring_up)
# return a dictionary and not a list
result = _merge_two_dicts(parent, child)
# result.append(_merge_two_dicts(parent, child))
if not result:
result.append(parent)
return result
def _raise_child(dictionary, child_to_raise):
"""
:param dictionary:
:param child_to_raise:
:return:
"""
# FUTURE DEV: ACCOUNT FOR 'DATE_OF_CALIBRATION' WHEN RAISING UP MULTIPLE CALIBRATIONS
if dictionary is None:
return None
new_dict = dictionary.copy()
for key, value in dictionary.items():
if (key == child_to_raise) and (value is not None):
# Found the key to raise. Flattening dictionary.
return _flatten_dict(dictionary, child_to_raise)
# didn't find the child to raise. search down through each nested dict or list
for key, value in dictionary.items():
if (type(value) == dict) and (value is not None):
# 'key' is a dict, looping through it's own keys.
flattened_dicts = _raise_child(value, child_to_raise)
if flattened_dicts:
new_dict[key] = flattened_dicts
return new_dict
elif (type(value) == list) and (value is not None):
# 'key' is a list, looping through it's items.
temp_list = []
for idx, item in enumerate(value):
flattened_dicts = _raise_child(item, child_to_raise)
if flattened_dicts:
if isinstance(flattened_dicts, list):
for flat_dict in flattened_dicts:
temp_list.append(flat_dict)
else:
# it is a dictionary so just append it
temp_list.append(flattened_dicts)
if temp_list:
# Temp_list is not empty. Replacing 'key' with this.
new_dict[key] = temp_list
return new_dict
return None
PREFIX_DICT = {
'mast_properties': {
'prefix_separator': '.',
'title_prefix': 'Mast ',
'keys_to_prefix': ['notes', 'update_at']
},
'vertical_profiler_properties': {
'prefix_separator': '.',
'title_prefix': 'Vert. Prof. Prop. ',
'keys_to_prefix': ['notes', 'update_at']
},
'lidar_config': {
'prefix_separator': '.',
'title_prefix': 'Lidar Specific Configs ',
'keys_to_prefix': ['date_from', 'date_to', 'notes', 'update_at']
},
'sensor_config': {
'prefix_separator': '.',
'title_prefix': 'Logger ',
'keys_to_prefix': ['height_m', 'height_reference_id', 'serial_number',
'slope', 'offset', 'sensitivity',
'notes', 'update_at']
},
'column_name': {
'prefix_separator': '.',
'title_prefix': 'Column Name ',
'keys_to_prefix': ['notes', 'update_at']
},
'sensor': {
'prefix_separator': '.',
'title_prefix': 'Sensor ',
'keys_to_prefix': ['serial_number', 'notes', 'update_at']
},
'calibration': {
'prefix_separator': '.',
'title_prefix': 'Calibration ',
'keys_to_prefix': ['slope', 'offset', 'sensitivity', 'report_file_name', 'report_link',
'uncertainty_k_factor', 'date_from', 'date_to', 'notes', 'update_at']
},
'calibration_uncertainty': {
'prefix_separator': '.',
'title_prefix': 'Calibration Uncertainty ',
'keys_to_prefix': []
},
'mounting_arrangement': {
'prefix_separator': '.',
'title_prefix': 'Mounting Arrangement ',
'keys_to_prefix': ['notes', 'update_at']
},
'interference_structures': {
'prefix_separator': '.',
'title_prefix': 'Interference Structure ',
'keys_to_prefix': ['structure_type_id', 'orientation_from_mast_centre_deg', 'orientation_reference_id',
'distance_from_mast_centre_mm',
'date_from', 'date_to', 'notes', 'update_at']
}
}
DATE_INSTEAD_OF_NONE = '2100-12-31'
SENSOR_TYPE_ORDER = ['anemometer', '2d_ultrasonic', '3d_ultrasonic', 'propeller_anemometer', 'gill_propeller',
'wind_vane', 'pyranometer', 'pyrheliometer', 'thermometer', 'hygrometer', 'barometer',
'rain_gauge', 'voltmeter', 'ammeter',
'ice_detection_sensor', 'fog_sensor', 'illuminance_sensor', 'gps', 'compass', 'other']
MEAS_TYPE_ORDER = ['wind_speed', 'wind_direction', 'vertical_wind_speed',
'global_horizontal_irradiance', 'direct_normal_irradiance', 'diffuse_horizontal_irradiance',
'global_tilted_irradiance', 'global_normal_irradiance', 'soiling_loss_index', 'illuminance',
'wind_speed_turbulence',
'air_temperature', 'temperature', 'relative_humidity', 'air_pressure', 'precipitation',
'ice_detection', 'voltage', 'current',
'fog', 'carrier_to_noise_ratio', 'doppler_spectral_broadening',
'gps_coordinates', 'orientation', 'compass_direction', 'true_north_offset',
'elevation', 'altitude', 'azimuth', 'status', 'counter', 'availability', 'quality',
'tilt_x', 'tilt_y', 'tilt_z', 'timestamp', 'other']
class MeasurementStation:
"""
Create a Measurement Station object by loading in an IEA Wind Resource Assessment Data Model.
The IEA Wind: Task 43 Work Package 4 WRA Data Model was first released in January 2021. Versions of the
Data Model Schema can be found at https://github.com/IEA-Task-43/digital_wra_data_standard
The Schema associated with this data model file will be downloaded from GitHub and used to parse the data model.
:param wra_data_model: The filepath to an implementation of the WRA Data Model as a .json file or
a json formatted string or
a dictionary format of the data model.
:type wra_data_model: str or dict
:return: A simplified object to represent the data model
:rtype: MeasurementStation
"""
def __init__(self, wra_data_model):
self.__data_model = self._load_wra_data_model(wra_data_model)
version = self.__data_model.get('version')
self.__schema = self._get_schema(version=version)
self.__header = _Header(dm=self.__data_model, schema=self.__schema)
self.__meas_loc_data_model = self._get_meas_loc_data_model(dm=self.__data_model)
self.__meas_loc_properties = self.__get_properties()
self.__logger_configs = _LoggerConfigs(meas_loc_dm=self.__meas_loc_data_model,
schema=self.__schema, station_type=self.type)
self.__measurements = _Measurements(meas_loc_dm=self.__meas_loc_data_model, schema=self.__schema)
# self.__mast_section_geometry = _MastSectionGeometry()
def __getitem__(self, item):
return self.__meas_loc_properties[item]
def __iter__(self):
return iter(self.__meas_loc_properties)
def __repr__(self):
return repr(self.__meas_loc_properties)
@staticmethod
def _load_wra_data_model(wra_data_model):
"""
Load a IEA Wind Resource Assessment Data Model.
The IEA Wind: Task 43 Work Package 4 WRA Data Model was first released in January 2021. Versions of the
Data Model Schema can be found at https://github.com/IEA-Task-43/digital_wra_data_standard
*** SHOULD INCLUDE CHECKING AGAINST THE JSON SCHEMA (WHICH WOULD MEAN GETTING THE CORRECT VERSION FROM GITHUB)
AND MAKE SURE PROPER JSON
:param wra_data_model: The filepath to an implementation of the WRA Data Model as a .json file or
a json formatted string or
a dictionary format of the data model.
:return: Python dictionary of the data model.
:rtype: dict
"""
# Assess whether filepath or json str sent.
dm = dict()
if isinstance(wra_data_model, str) and '.json' == wra_data_model[-5:]:
if _is_file(wra_data_model):
with open(wra_data_model) as json_file:
dm = json.load(json_file)
elif isinstance(wra_data_model, str):
dm = json.loads(wra_data_model)
else:
# it is most likely already a dict so return itself
dm = wra_data_model
return dm
@staticmethod
def _get_schema(version):
"""
Get the JSON Schema from GitHub based on the version number in the data model.
:param version: The version from the header information from the data model json file.
:type version: str
:return: The IEA Wind Task 43 WRA Data Model Schema.
:rtype: dict
"""
schema_link = 'https://github.com/IEA-Task-43/digital_wra_data_standard/releases/download/v{}' \
'/iea43_wra_data_model.schema.json'
response = requests.get(schema_link.format(version))
if response.status_code == 404:
raise ValueError('Schema could not be downloaded from GitHub. Please check the version number in the '
'data model json file.')
schema = json.loads(response.content)
return schema
@staticmethod
def _get_meas_loc_data_model(dm):
if len(dm.get('measurement_location')) > 1:
raise Exception('More than one measurement location found in the data model. Only processing'
'the first one found. Please remove extra measurement locations.')
return dm.get('measurement_location')[0]
@property
def data_model(self):
"""
The data model from the measurement_location onwards i.e. excluding the header.
:return:
"""
return self.__meas_loc_data_model
@property
def schema(self):
return self.__schema
@property
def name(self):
return self.__meas_loc_data_model.get('name')
@property
def lat(self):
return self.__meas_loc_data_model.get('latitude_ddeg')
@property
def long(self):
return self.__meas_loc_data_model.get('longitude_ddeg')
@property
def type(self):
return self.__meas_loc_data_model.get('measurement_station_type_id')
def __get_properties(self):
meas_loc_prop = []
if self.type == 'mast':
meas_loc_prop = _flatten_dict(self.__meas_loc_data_model, property_to_bring_up='mast_properties')
elif self.type in ['lidar', 'sodar', 'flidar']:
meas_loc_prop = _flatten_dict(self.__meas_loc_data_model,
property_to_bring_up='vertical_profiler_properties')
return meas_loc_prop
def get_table(self, horizontal_table_orientation=False):
"""
Get a table representation of the attributes for the measurement station and it's mast or vertical profiler
properties.
:param horizontal_table_orientation: horizontal or vertical table orientation.
:type horizontal_table_orientation: bool
:return: A table showing all the information for the measurement station. If a
horizontal table then a pd.DataFrame is returned. If a vertical table
then a styled pd.DataFrame is returned which does not have the same
properties as a standard DataFrame.
:rtype: pd.DataFrame or pd.io.formats.style.Styler
"""
list_for_df = self.__meas_loc_properties
df = pd.DataFrame()
if horizontal_table_orientation:
list_for_df_with_titles = []
if isinstance(list_for_df, dict):
list_for_df_with_titles = [_rename_to_title(list_or_dict=list_for_df, schema=self.__schema)]
elif isinstance(list_for_df, list):
for row in list_for_df:
list_for_df_with_titles.append(_rename_to_title(list_or_dict=row, schema=self.__schema))
df = pd.DataFrame(list_for_df_with_titles, columns=_extract_keys_to_unique_list(list_for_df_with_titles))
df.set_index('Name', inplace=True)
elif horizontal_table_orientation is False:
if isinstance(list_for_df, dict):
# if a dictionary, it only has 1 row of data
titles = list(_rename_to_title(list_or_dict=list_for_df, schema=self.__schema).keys())
df = pd.DataFrame({1: list(list_for_df.values())}, index=titles)
elif isinstance(list_for_df, list):
for idx, row in enumerate(list_for_df):
titles = list(_rename_to_title(list_or_dict=row, schema=self.__schema).keys())
df_temp = pd.DataFrame({idx + 1: list(row.values())}, index=titles)
df = pd.concat([df, df_temp], axis=1, sort=False)
df = df.style.set_properties(**{'text-align': 'left'})
df = df.set_table_styles([dict(selector='th', props=[('text-align', 'left')])])
return df
@property
def properties(self):
return self.__meas_loc_properties
@property
def header(self):
# return the header info
return self.__header
@property
def logger_configs(self):
return self.__logger_configs
@property
def measurements(self):
return self.__measurements
@property
def mast_section_geometry(self):
return 'Not yet implemented.'
# return self.__mast_section_geometry
class _Header:
def __init__(self, dm, schema):
"""
Extract the header info from the data model and return either a dict or table
"""
self._schema = schema
keys = []
values = []
header_dict = {}
for key, value in dm.items():
if key != 'measurement_location':
keys.append(key)
values.append(value)
header_dict[key] = value
self._header_properties = header_dict
self._keys = keys
self._values = values
def __getitem__(self, item):
return self._header_properties[item]
def __iter__(self):
return iter(self._header_properties)
def __repr__(self):
return repr(self._header_properties)
@property
def properties(self):
return self._header_properties
def get_table(self):
# get titles for each property
titles = []
for key in self._keys:
titles.append(_get_title(key, self._schema))
df = pd.DataFrame({'': self._values}, index=titles)
df_styled = df.style.set_properties(**{'text-align': 'left'})
df_styled = df_styled.set_table_styles([dict(selector='th', props=[('text-align', 'left')])])
return df_styled
class _LoggerConfigs:
def __init__(self, meas_loc_dm, schema, station_type):
self._log_cfg_data_model = meas_loc_dm.get('logger_main_config')
self._schema = schema
self._type = station_type
self.__log_cfg_properties = self.__get_properties()
def __getitem__(self, item):
return self.__log_cfg_properties[item]
def __iter__(self):
return iter(self.__log_cfg_properties)
def __repr__(self):
return repr(self.__log_cfg_properties)
@property
def data_model(self):
"""
This is the original data model unchanged from this level down.
:return: The data model from this level down.
:rtype: Dict or List
"""
return self._log_cfg_data_model
def __get_properties(self):
log_cfg_props = []
if self._type == 'mast':
# if mast, there are no child dictionaries
log_cfg_props = self._log_cfg_data_model # logger config data model is already a list
elif self._type in ['lidar', 'flidar']:
for log_config in self._log_cfg_data_model:
log_configs_flat = _flatten_dict(log_config, property_to_bring_up='lidar_config')
for log_config_flat in log_configs_flat:
log_cfg_props.append(log_config_flat)
return log_cfg_props
def get_table(self, horizontal_table_orientation=False):
"""
Get a table representation of the attributes for the logger configurations.
If a LiDAR then the lidar specific configurations are also presented.
:param horizontal_table_orientation: horizontal or vertical table orientation.
:type horizontal_table_orientation: bool
:return: A table showing all the information for the measurement station. If a
horizontal table then a pd.DataFrame is returned. If a vertical table
then a styled pd.DataFrame is returned which does not have the same
properties as a standard DataFrame.
:rtype: pd.DataFrame or pd.io.formats.style.Styler
"""
list_for_df = self.__log_cfg_properties
df = pd.DataFrame()
if horizontal_table_orientation:
list_for_df_with_titles = []
for row in list_for_df:
list_for_df_with_titles.append(_rename_to_title(list_or_dict=row, schema=self._schema))
df = pd.DataFrame(list_for_df_with_titles, columns=_extract_keys_to_unique_list(list_for_df_with_titles))
df.set_index('Logger Name', inplace=True)
elif horizontal_table_orientation is False:
for idx, row in enumerate(list_for_df):
titles = list(_rename_to_title(list_or_dict=row, schema=self._schema).keys())
df_temp = pd.DataFrame({idx + 1: list(row.values())}, index=titles)
df = pd.concat([df, df_temp], axis=1, sort=False)
df = df.style.set_properties(**{'text-align': 'left'})
df = df.set_table_styles([dict(selector='th', props=[('text-align', 'left')])])
return df
@property
def properties(self):
return self.__log_cfg_properties
class _Measurements:
def __init__(self, meas_loc_dm, schema):
# for meas_loc in dm['measurement_location']:
self._meas_data_model = meas_loc_dm.get('measurement_point')
self._schema = schema
self.__meas_properties = self.__get_properties()
self.__meas_dict = self.__get_properties_as_dict()
# Making _Measurements emulate a dictionary.
# Not using super(_Measurements, self).__init__(*arg, **kw) as I do not want the user to __setitem__,
# __delitem__, clear, update or pop. Therefore, writing out the specific behaviour I want for the dictionary.
def __getitem__(self, key):
return self.__meas_dict[key]
def __iter__(self):
return iter(self.__meas_dict)
def __repr__(self):
return repr(self.__meas_dict)
def __len__(self):
return len(self.__meas_dict)
def __contains__(self, key):
return key in self.__meas_dict
# Don't allow copy as user needs to use copy.deepcopy to copy the dictionary, might also confuse with the object.
# def copy(self):
# return self.__meas_dict.copy()
def keys(self):
return self.__meas_dict.keys()
def values(self):
return self.__meas_dict.values()
def items(self):
return self.__meas_dict.items()
@property
def data_model(self):
return self._meas_data_model
def __get_parent_properties(self):
meas_props = []
for meas_point in self._meas_data_model:
meas_props.append(_filter_parent_level(meas_point))
return meas_props
@property
def properties(self):
return self.__meas_properties
@property
def names(self):
"""
The names of all the measurements.
:return: The list of names.
:rtype: list(str)
"""
return self.__get_names()
@property
def wspds(self):
return self.__get_properties_as_dict(measurement_type_id='wind_speed')
@property
def wspd_names(self):
return self.__get_names(measurement_type_id='wind_speed')
@property
def wspd_heights(self):
return self.get_heights(measurement_type_id='wind_speed')
@property
def wdirs(self):
return self.__get_properties_as_dict(measurement_type_id='wind_direction')
@property
def wdir_names(self):
return self.__get_names(measurement_type_id='wind_direction')
@property
def wdir_heights(self):
return self.get_heights(measurement_type_id='wind_direction')
@staticmethod
def __meas_point_merge(sensor_cfgs, sensors=None, mount_arrgmts=None):
"""
Merge the properties from sensor_cfgs, sensors and mounting_arrangements. This will account for when
each property was changed over time.
:param sensor_cfgs: Sensor cfgs properties
:type sensor_cfgs: list
:param sensors: Sensor properties
:type sensors: list
:param mount_arrgmts: Mounting arrangement properties
:type mount_arrgmts: list
:return: The properties merged together.
:rtype: list(dict)
"""
sensor_cfgs = _replace_none_date(sensor_cfgs)
sensors = _replace_none_date(sensors)
mount_arrgmts = _replace_none_date(mount_arrgmts)
date_from = [sen_config.get('date_from') for sen_config in sensor_cfgs]
date_to = [sen_config.get('date_to') for sen_config in sensor_cfgs]
if sensors is not None:
for sensor in sensors:
date_from.append(sensor.get('date_from'))
date_to.append(sensor.get('date_to'))
if mount_arrgmts is not None:
for mount_arrgmt in mount_arrgmts:
date_from.append(mount_arrgmt['date_from'])
date_to.append(mount_arrgmt['date_to'])
date_from.extend(date_to)
dates = list(set(date_from))
dates.sort()
meas_points_merged = []
for i in range(len(dates) - 1):
good_sen_config = {}
for sen_config in sensor_cfgs:
if (sen_config['date_from'] <= dates[i]) & (sen_config.get('date_to') > dates[i]):
good_sen_config = sen_config.copy()
if good_sen_config != {}:
if sensors is not None:
for sensor in sensors:
if (sensor['date_from'] <= dates[i]) & (sensor['date_to'] > dates[i]):
good_sen_config.update(sensor)
if mount_arrgmts is not None:
for mount_arrgmt in mount_arrgmts:
if (mount_arrgmt['date_from'] <= dates[i]) & (mount_arrgmt['date_to'] > dates[i]):
good_sen_config.update(mount_arrgmt)
good_sen_config['date_to'] = dates[i + 1]
good_sen_config['date_from'] = dates[i]
meas_points_merged.append(good_sen_config)
# replace 'date_to' if equals to 'DATE_INSTEAD_OF_NONE'
for meas_point in meas_points_merged:
if meas_point.get('date_to') is not None and meas_point.get('date_to') == DATE_INSTEAD_OF_NONE:
meas_point['date_to'] = None
return meas_points_merged
def __get_properties(self):
meas_props = []
for meas_point in self._meas_data_model:
# col_names_raised = _raise_child(meas_point, child_to_raise='column_name')
# sen_cfgs = _raise_child(col_names_raised, child_to_raise='sensor_config')
sen_cfgs = _raise_child(meas_point, child_to_raise='sensor_config')
calib_raised = _raise_child(meas_point, child_to_raise='calibration')
if calib_raised is None:
sensors = _raise_child(meas_point, child_to_raise='sensor')
else:
sensors = _raise_child(calib_raised, child_to_raise='sensor')
mounting_arrangements = _raise_child(meas_point, child_to_raise='mounting_arrangement')
if mounting_arrangements is None:
meas_point_merged = self.__meas_point_merge(sensor_cfgs=sen_cfgs, sensors=sensors)
else:
meas_point_merged = self.__meas_point_merge(sensor_cfgs=sen_cfgs, sensors=sensors,
mount_arrgmts=mounting_arrangements)
for merged_meas_point in meas_point_merged:
meas_props.append(merged_meas_point)
return meas_props
def __get_properties_by_type(self, measurement_type_id):
merged_properties = copy.deepcopy(self.__meas_properties)
meas_list = []
for meas_point in merged_properties:
meas_type = meas_point.get('measurement_type_id')
if meas_type is not None and meas_type == measurement_type_id:
meas_list.append(meas_point)
return meas_list
def __get_properties_as_dict(self, measurement_type_id=None):
"""
Get the flattened properties as a dictionary with name as the key. This is for easy use for accessing a
measurement point.
e.g. mm1.measurements['Spd1']
:return: Flattened properties as a dictionary
:rtype: dict
"""
meas_dict = {}
merged_properties = copy.deepcopy(self.__meas_properties)
for meas_point in merged_properties:
meas_point_name = meas_point['name']
if meas_point['measurement_type_id'] == measurement_type_id or measurement_type_id is None:
if meas_point_name in meas_dict.keys():
meas_dict[meas_point_name].append(meas_point)
else:
meas_dict[meas_point_name] = [meas_point]
return meas_dict
def __get_table_for_cols(self, columns_to_show):
"""
Get table of measurements for specific columns.
:param columns_to_show: Columns required to show in table.
:type columns_to_show: list(str)
:return: Table as a pandas DataFrame
:rtype: pd.DataFrame
"""
temp_df = pd.DataFrame(self.__meas_properties)
# select the common columns that are available
avail_cols = [col for col in columns_to_show if col in temp_df.columns]
if not avail_cols:
raise KeyError('No data to show from the list of columns provided')
# Drop all rows that have no data for the avail_cols
temp_df.dropna(axis=0, subset=avail_cols, how='all', inplace=True)
if temp_df.empty:
raise KeyError('No data to show from the list of columns provided')
# Name needs to be included in the grouping but 'date_from' and 'date_to' should not be
# as we filter for them later
required_in_avail_cols = {'include': ['name'], 'remove': ['date_from', 'date_to']}
for include_col in required_in_avail_cols['include']:
if include_col not in avail_cols:
avail_cols.insert(0, include_col)
for remove_col in required_in_avail_cols['remove']:
if remove_col in avail_cols:
avail_cols.remove(remove_col)
# Remove duplicates resulting from other info been dropped.
temp_df.sort_values(['name', 'date_from'], ascending=[True, True], inplace=True)
temp_df.fillna('-', inplace=True) # groupby drops nan so need to fill them in
# group duplicate data for the columns available
grouped_by_avail_cols = temp_df.groupby(avail_cols)
# get date_to from the last row in each group to assign to the first row.
new_date_to = grouped_by_avail_cols.last()['date_to']
df = grouped_by_avail_cols.first()[['date_from', 'date_to']]
df['date_to'] = new_date_to
df.reset_index(level=avail_cols, inplace=True)
df.sort_values(['name', 'date_from'], ascending=[True, True], inplace=True)
# get titles
title_cols = _rename_to_title(list_or_dict=list(df.columns), schema=self._schema)
df.columns = title_cols
df.set_index('Name', inplace=True)
df.replace(DATE_INSTEAD_OF_NONE, '-', inplace=True)
return df
def get_table(self, detailed=False, wind_speeds=False, wind_directions=False, calibrations=False,
mounting_arrangements=False, columns_to_show=None):
"""
Get tables to show information about the measurements made.
:param detailed: For a more detailed table that includes how the sensor is programmed into the logger,
information about the sensor itself and how it is mounted on the mast if it was.
:type detailed: bool
:param wind_speeds: Wind speed specific details.
:type wind_speeds: bool
:param wind_directions: Wind speed specific details.
:type wind_directions: bool
:param calibrations: Wind speed specific details.
:type calibrations: bool
:param mounting_arrangements: Wind speed specific details.
:type mounting_arrangements: bool
:param columns_to_show: Optionally provide a list of column names you want to see in a table. This list
should be pulled from the list of keys available in the measurements.properties.
'name', 'date_from' and 'date_to' are always inserted so no need to include them
in your list.
:type columns_to_show: list(str) or None
:return: A table showing information about the measurements made by this measurement station.
:rtype: pd.DataFrame
**Example usage**
::
import brightwind as bw
mm1 = bw.MeasurementStation(bw.demo_datasets.demo_wra_data_model)
mm1.measurements.get_table()
To get a more detailed table::
mm1.measurements.get_table(detailed=True)
To get wind speed specific details::
mm1.measurements.get_table(wind_speeds=True)
To get wind speed specific details::
mm1.measurements.get_table(wind_directions=True)
To get calibration specific details::
mm1.measurements.get_table(calibrations=True)
To get mounting specific details::
mm1.measurements.get_table(mounting_arrangements=True)
To make your own table::
columns = ['calibration.slope', 'calibration.offset', 'calibration.report_file_name', 'date_of_calibration']
mm1.measurements.get_table(columns_to_show=columns)
"""
df = pd.DataFrame()
if detailed is False and wind_speeds is False and wind_directions is False \
and calibrations is False and mounting_arrangements is False and columns_to_show is None:
# default summary table
list_for_df = self.__get_parent_properties()
list_for_df_with_titles = []
for row in list_for_df:
list_for_df_with_titles.append(_rename_to_title(list_or_dict=row, schema=self._schema))
df = pd.DataFrame(list_for_df_with_titles, columns=_extract_keys_to_unique_list(list_for_df_with_titles))
# order rows
order_index = dict(zip(MEAS_TYPE_ORDER, range(len(MEAS_TYPE_ORDER))))
df['meas_type_rank'] = df['Measurement Type'].map(order_index)
df.sort_values(['meas_type_rank', 'Height [m]'], ascending=[True, False], inplace=True)
df.drop('meas_type_rank', 1, inplace=True)
df.set_index('Name', inplace=True)
df.fillna('-', inplace=True)
elif detailed is True:
cols_required = ['name', 'oem', 'model', 'sensor_type_id', 'sensor.serial_number',
'height_m', 'boom_orientation_deg',
'date_from', 'date_to', 'connection_channel', 'measurement_units_id',
'sensor_config.slope', 'sensor_config.offset', 'calibration.slope', 'calibration.offset',
'sensor_config.notes', 'sensor.notes']
df = pd.DataFrame(self.__meas_properties)
# get what is common from both lists and use this to filter df
cols_required = [col for col in cols_required if col in df.columns]
df = df[cols_required]
# order rows
if 'sensor_type_id' in df.columns:
order_index = dict(zip(SENSOR_TYPE_ORDER, range(len(SENSOR_TYPE_ORDER))))
df['sensor_rank'] = df['sensor_type_id'].map(order_index)
df.sort_values(['sensor_rank', 'height_m'], ascending=[True, False], inplace=True)
df.drop('sensor_rank', 1, inplace=True)
else:
df.sort_values(['name', 'height_m'], ascending=[True, False], inplace=True)
# get titles
title_cols = _rename_to_title(list_or_dict=list(df.columns), schema=self._schema)
df.columns = title_cols
# tidy up
df.set_index('Name', inplace=True)
df.fillna('-', inplace=True)
df.replace(DATE_INSTEAD_OF_NONE, '-', inplace=True)
elif wind_speeds is True:
cols_required = ['name', 'measurement_type_id', 'oem', 'model', 'sensor.serial_number', 'is_heated',
'height_m', 'boom_orientation_deg', 'mounting_type_id',
'date_from', 'date_to', 'connection_channel',
'sensor_config.slope', 'sensor_config.offset', 'calibration.slope', 'calibration.offset',
'sensor_config.notes', 'sensor.notes']
df = | pd.DataFrame(self.__meas_properties) | pandas.DataFrame |
import pandas as pd
conversiones=pd.read_csv("conversiones.csv", sep=";")
navegacion=pd.read_csv("navegacion.csv", sep=";")
nav=pd.DataFrame(navegacion)
conv= | pd.DataFrame(conversiones) | pandas.DataFrame |
# Web Spider for AFL Data
# MIT LICENSE
# Author: Z.WANG
# Date: 07/Apr/2022
# import libraries
import pandas as pd
import requests
from fake_useragent import UserAgent
from time import sleep
import random
from bs4 import BeautifulSoup
import re
import warnings
from tqdm import tqdm
# Supresses scientific notation
pd.set_option('display.float_format', lambda x: '%.2f' % x)
# display the full tables
| pd.set_option('display.max_columns', None) | pandas.set_option |
# Copyright (c) 2020, NVIDIA CORPORATION.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import json
import logging
import os
from collections import defaultdict
import cupy
import numpy as np
import pandas as pd
import torch
from torch.nn import functional as F
from torch.utils.data import DataLoader, TensorDataset
from torch.utils.dlpack import from_dlpack
from transformers import (
BertForTokenClassification,
DistilBertForTokenClassification,
ElectraForTokenClassification
)
log = logging.getLogger(__name__)
ARCH_MAPPING = {
'BertForTokenClassification': BertForTokenClassification,
'DistilBertForTokenClassification': DistilBertForTokenClassification,
'ElectraForTokenClassification': ElectraForTokenClassification}
MODEL_MAPPING = {
'BertForTokenClassification': 'bert-base-cased',
'DistilBertForTokenClassification': 'distilbert-base-cased',
'ElectraForTokenClassification': 'rapids/electra-small-discriminator'}
class Cybert:
"""
Cyber log parsing using BERT, DistilBERT, or ELECTRA. This class provides methods
for loading models, prediction, and postprocessing.
"""
def __init__(self):
self._model = None
self._label_map = {}
resources_dir = "%s/resources" % os.path.dirname(os.path.realpath(__file__))
vocabpath = "%s/bert-base-cased-vocab.txt" % resources_dir
self._vocab_lookup = {}
with open(vocabpath) as f:
for index, line in enumerate(f):
self._vocab_lookup[index] = line.split()[0]
self._hashpath = "%s/bert-base-cased-hash.txt" % resources_dir
def load_model(self, model_filepath, config_filepath):
"""
Load cybert model.
:param model_filepath: Filepath of the model (.pth or .bin) to
be loaded
:type model_filepath: str
:param label_map_filepath: Config file (.json) to be
used
:type label_map_filepath: str
Examples
--------
>>> from clx.analytics.cybert import Cybert
>>> cyparse = Cybert()
>>> cyparse.load_model('/path/to/model.pth', '/path/to/config.json')
"""
with open(config_filepath) as f:
config = json.load(f)
model_arch = config["architectures"][0]
self._label_map = {int(k): v for k, v in config["id2label"].items()}
model_state_dict = torch.load(model_filepath)
self._model = ARCH_MAPPING[model_arch].from_pretrained(
MODEL_MAPPING[model_arch],
state_dict=model_state_dict,
num_labels=len(self._label_map),
)
self._model.cuda()
self._model.eval()
def preprocess(self, raw_data_col, stride_len=116, max_seq_len=128):
"""
Preprocess and tokenize data for cybert model inference.
:param raw_data_col: logs to be processed
:type raw_data_col: cudf.Series
:param stride_len: Max stride length for processing, default is 116
:type stride_len: int
:param max_seq_len: Max sequence length for processing, default is 128
:type max_seq_len: int
Examples
--------
>>> import cudf
>>> from clx.analytics.cybert import Cybert
>>> cyparse = Cybert()
>>> cyparse.load_model('/path/to/model.pth', '/path/to/config.json')
>>> raw_df = cudf.Series(['Log event 1', 'Log event 2'])
>>> input_ids, attention_masks, meta_data = cyparse.preprocess(raw_df)
"""
raw_data_col = raw_data_col.str.replace('"', "")
raw_data_col = raw_data_col.str.replace("\\r", " ")
raw_data_col = raw_data_col.str.replace("\\t", " ")
raw_data_col = raw_data_col.str.replace("=", "= ")
raw_data_col = raw_data_col.str.replace("\\n", " ")
byte_count = raw_data_col.str.byte_count()
max_num_chars = byte_count.sum()
max_rows_tensor = int((byte_count / 120).ceil().sum())
input_ids, att_mask, meta_data = raw_data_col.str.subword_tokenize(
self._hashpath,
128,
116,
max_num_strings=len(raw_data_col),
max_num_chars=max_num_chars,
max_rows_tensor=max_rows_tensor,
do_lower=False,
do_truncate=False,
)
num_rows = int(len(input_ids) / 128)
input_ids = from_dlpack(
(input_ids.reshape(num_rows, 128).astype(cupy.float)).toDlpack()
)
att_mask = from_dlpack(
(att_mask.reshape(num_rows, 128).astype(cupy.float)).toDlpack()
)
meta_data = meta_data.reshape(num_rows, 3)
return input_ids.type(torch.long), att_mask.type(torch.long), meta_data
def inference(self, raw_data_col, batch_size=160):
"""
Cybert inference and postprocessing on dataset
:param raw_data_col: logs to be processed
:type raw_data_col: cudf.Series
:param batch_size: Log data is processed in batches using a Pytorch dataloader.
The batch size parameter refers to the batch size indicated in torch.utils.data.DataLoader.
:type batch_size: int
:return: parsed_df
:rtype: pandas.DataFrame
:return: confidence_df
:rtype: pandas.DataFrame
Examples
--------
>>> import cudf
>>> from clx.analytics.cybert import Cybert
>>> cyparse = Cybert()
>>> cyparse.load_model('/path/to/model.pth', '/path/to/config.json')
>>> raw_data_col = cudf.Series(['Log event 1', 'Log event 2'])
>>> processed_df, confidence_df = cy.inference(raw_data_col)
"""
input_ids, attention_masks, meta_data = self.preprocess(raw_data_col)
dataset = TensorDataset(input_ids, attention_masks)
dataloader = DataLoader(dataset=dataset, shuffle=False, batch_size=batch_size)
confidences_list = []
labels_list = []
for step, batch in enumerate(dataloader):
in_ids, att_masks = batch
with torch.no_grad():
logits = self._model(in_ids, att_masks)[0]
logits = F.softmax(logits, dim=2)
confidences, labels = torch.max(logits, 2)
confidences_list.extend(confidences.detach().cpu().numpy().tolist())
labels_list.extend(labels.detach().cpu().numpy().tolist())
infer_pdf = | pd.DataFrame(meta_data) | pandas.DataFrame |
import builtins
from io import StringIO
import numpy as np
import pytest
from pandas.errors import UnsupportedFunctionCall
import pandas as pd
from pandas import DataFrame, Index, MultiIndex, Series, Timestamp, date_range, isna
import pandas._testing as tm
import pandas.core.nanops as nanops
from pandas.util import _test_decorators as td
@pytest.fixture(
params=[np.int32, np.int64, np.float32, np.float64],
ids=["np.int32", "np.int64", "np.float32", "np.float64"],
)
def numpy_dtypes_for_minmax(request):
"""
Fixture of numpy dtypes with min and max values used for testing
cummin and cummax
"""
dtype = request.param
min_val = (
np.iinfo(dtype).min if np.dtype(dtype).kind == "i" else np.finfo(dtype).min
)
max_val = (
np.iinfo(dtype).max if np.dtype(dtype).kind == "i" else np.finfo(dtype).max
)
return (dtype, min_val, max_val)
@pytest.mark.parametrize("agg_func", ["any", "all"])
@pytest.mark.parametrize("skipna", [True, False])
@pytest.mark.parametrize(
"vals",
[
["foo", "bar", "baz"],
["foo", "", ""],
["", "", ""],
[1, 2, 3],
[1, 0, 0],
[0, 0, 0],
[1.0, 2.0, 3.0],
[1.0, 0.0, 0.0],
[0.0, 0.0, 0.0],
[True, True, True],
[True, False, False],
[False, False, False],
[np.nan, np.nan, np.nan],
],
)
def test_groupby_bool_aggs(agg_func, skipna, vals):
df = DataFrame({"key": ["a"] * 3 + ["b"] * 3, "val": vals * 2})
# Figure out expectation using Python builtin
exp = getattr(builtins, agg_func)(vals)
# edge case for missing data with skipna and 'any'
if skipna and all(isna(vals)) and agg_func == "any":
exp = False
exp_df = DataFrame([exp] * 2, columns=["val"], index=Index(["a", "b"], name="key"))
result = getattr(df.groupby("key"), agg_func)(skipna=skipna)
tm.assert_frame_equal(result, exp_df)
def test_max_min_non_numeric():
# #2700
aa = DataFrame({"nn": [11, 11, 22, 22], "ii": [1, 2, 3, 4], "ss": 4 * ["mama"]})
result = aa.groupby("nn").max()
assert "ss" in result
result = aa.groupby("nn").max(numeric_only=False)
assert "ss" in result
result = aa.groupby("nn").min()
assert "ss" in result
result = aa.groupby("nn").min(numeric_only=False)
assert "ss" in result
def test_min_date_with_nans():
# GH26321
dates = pd.to_datetime(
pd.Series(["2019-05-09", "2019-05-09", "2019-05-09"]), format="%Y-%m-%d"
).dt.date
df = pd.DataFrame({"a": [np.nan, "1", np.nan], "b": [0, 1, 1], "c": dates})
result = df.groupby("b", as_index=False)["c"].min()["c"]
expected = pd.to_datetime(
pd.Series(["2019-05-09", "2019-05-09"], name="c"), format="%Y-%m-%d"
).dt.date
tm.assert_series_equal(result, expected)
result = df.groupby("b")["c"].min()
expected.index.name = "b"
tm.assert_series_equal(result, expected)
def test_intercept_builtin_sum():
s = Series([1.0, 2.0, np.nan, 3.0])
grouped = s.groupby([0, 1, 2, 2])
result = grouped.agg(builtins.sum)
result2 = grouped.apply(builtins.sum)
expected = grouped.sum()
tm.assert_series_equal(result, expected)
tm.assert_series_equal(result2, expected)
# @pytest.mark.parametrize("f", [max, min, sum])
# def test_builtins_apply(f):
@pytest.mark.parametrize("f", [max, min, sum])
@pytest.mark.parametrize("keys", ["jim", ["jim", "joe"]]) # Single key # Multi-key
def test_builtins_apply(keys, f):
# see gh-8155
df = pd.DataFrame(np.random.randint(1, 50, (1000, 2)), columns=["jim", "joe"])
df["jolie"] = np.random.randn(1000)
fname = f.__name__
result = df.groupby(keys).apply(f)
ngroups = len(df.drop_duplicates(subset=keys))
assert_msg = f"invalid frame shape: {result.shape} (expected ({ngroups}, 3))"
assert result.shape == (ngroups, 3), assert_msg
tm.assert_frame_equal(
result, # numpy's equivalent function
df.groupby(keys).apply(getattr(np, fname)),
)
if f != sum:
expected = df.groupby(keys).agg(fname).reset_index()
expected.set_index(keys, inplace=True, drop=False)
tm.assert_frame_equal(result, expected, check_dtype=False)
tm.assert_series_equal(getattr(result, fname)(), getattr(df, fname)())
def test_arg_passthru():
# make sure that we are passing thru kwargs
# to our agg functions
# GH3668
# GH5724
df = pd.DataFrame(
{
"group": [1, 1, 2],
"int": [1, 2, 3],
"float": [4.0, 5.0, 6.0],
"string": list("abc"),
"category_string": pd.Series(list("abc")).astype("category"),
"category_int": [7, 8, 9],
"datetime": pd.date_range("20130101", periods=3),
"datetimetz": pd.date_range("20130101", periods=3, tz="US/Eastern"),
"timedelta": pd.timedelta_range("1 s", periods=3, freq="s"),
},
columns=[
"group",
"int",
"float",
"string",
"category_string",
"category_int",
"datetime",
"datetimetz",
"timedelta",
],
)
expected_columns_numeric = Index(["int", "float", "category_int"])
# mean / median
expected = pd.DataFrame(
{
"category_int": [7.5, 9],
"float": [4.5, 6.0],
"timedelta": [pd.Timedelta("1.5s"), pd.Timedelta("3s")],
"int": [1.5, 3],
"datetime": [
pd.Timestamp("2013-01-01 12:00:00"),
pd.Timestamp("2013-01-03 00:00:00"),
],
"datetimetz": [
pd.Timestamp("2013-01-01 12:00:00", tz="US/Eastern"),
pd.Timestamp("2013-01-03 00:00:00", tz="US/Eastern"),
],
},
index=Index([1, 2], name="group"),
columns=["int", "float", "category_int", "datetime", "datetimetz", "timedelta"],
)
for attr in ["mean", "median"]:
result = getattr(df.groupby("group"), attr)()
tm.assert_index_equal(result.columns, expected_columns_numeric)
result = getattr(df.groupby("group"), attr)(numeric_only=False)
tm.assert_frame_equal(result.reindex_like(expected), expected)
# TODO: min, max *should* handle
# categorical (ordered) dtype
expected_columns = Index(
[
"int",
"float",
"string",
"category_int",
"datetime",
"datetimetz",
"timedelta",
]
)
for attr in ["min", "max"]:
result = getattr(df.groupby("group"), attr)()
tm.assert_index_equal(result.columns, expected_columns)
result = getattr(df.groupby("group"), attr)(numeric_only=False)
tm.assert_index_equal(result.columns, expected_columns)
expected_columns = Index(
[
"int",
"float",
"string",
"category_string",
"category_int",
"datetime",
"datetimetz",
"timedelta",
]
)
for attr in ["first", "last"]:
result = getattr(df.groupby("group"), attr)()
tm.assert_index_equal(result.columns, expected_columns)
result = getattr(df.groupby("group"), attr)(numeric_only=False)
tm.assert_index_equal(result.columns, expected_columns)
expected_columns = Index(["int", "float", "string", "category_int", "timedelta"])
result = df.groupby("group").sum()
tm.assert_index_equal(result.columns, expected_columns_numeric)
result = df.groupby("group").sum(numeric_only=False)
tm.assert_index_equal(result.columns, expected_columns)
expected_columns = Index(["int", "float", "category_int"])
for attr in ["prod", "cumprod"]:
result = getattr(df.groupby("group"), attr)()
tm.assert_index_equal(result.columns, expected_columns_numeric)
result = getattr(df.groupby("group"), attr)(numeric_only=False)
tm.assert_index_equal(result.columns, expected_columns)
# like min, max, but don't include strings
expected_columns = Index(
["int", "float", "category_int", "datetime", "datetimetz", "timedelta"]
)
for attr in ["cummin", "cummax"]:
result = getattr(df.groupby("group"), attr)()
# GH 15561: numeric_only=False set by default like min/max
tm.assert_index_equal(result.columns, expected_columns)
result = getattr(df.groupby("group"), attr)(numeric_only=False)
tm.assert_index_equal(result.columns, expected_columns)
expected_columns = Index(["int", "float", "category_int", "timedelta"])
result = getattr(df.groupby("group"), "cumsum")()
tm.assert_index_equal(result.columns, expected_columns_numeric)
result = getattr(df.groupby("group"), "cumsum")(numeric_only=False)
tm.assert_index_equal(result.columns, expected_columns)
def test_non_cython_api():
# GH5610
# non-cython calls should not include the grouper
df = DataFrame(
[[1, 2, "foo"], [1, np.nan, "bar"], [3, np.nan, "baz"]], columns=["A", "B", "C"]
)
g = df.groupby("A")
gni = df.groupby("A", as_index=False)
# mad
expected = DataFrame([[0], [np.nan]], columns=["B"], index=[1, 3])
expected.index.name = "A"
result = g.mad()
tm.assert_frame_equal(result, expected)
expected = DataFrame([[1, 0.0], [3, np.nan]], columns=["A", "B"], index=[0, 1])
result = gni.mad()
tm.assert_frame_equal(result, expected)
# describe
expected_index = pd.Index([1, 3], name="A")
expected_col = pd.MultiIndex(
levels=[["B"], ["count", "mean", "std", "min", "25%", "50%", "75%", "max"]],
codes=[[0] * 8, list(range(8))],
)
expected = pd.DataFrame(
[
[1.0, 2.0, np.nan, 2.0, 2.0, 2.0, 2.0, 2.0],
[0.0, np.nan, np.nan, np.nan, np.nan, np.nan, np.nan, np.nan],
],
index=expected_index,
columns=expected_col,
)
result = g.describe()
tm.assert_frame_equal(result, expected)
expected = pd.concat(
[
df[df.A == 1].describe().unstack().to_frame().T,
df[df.A == 3].describe().unstack().to_frame().T,
]
)
expected.index = pd.Index([0, 1])
result = gni.describe()
tm.assert_frame_equal(result, expected)
# any
expected = DataFrame(
[[True, True], [False, True]], columns=["B", "C"], index=[1, 3]
)
expected.index.name = "A"
result = g.any()
tm.assert_frame_equal(result, expected)
# idxmax
expected = DataFrame([[0.0], [np.nan]], columns=["B"], index=[1, 3])
expected.index.name = "A"
result = g.idxmax()
tm.assert_frame_equal(result, expected)
def test_cython_api2():
# this takes the fast apply path
# cumsum (GH5614)
df = DataFrame([[1, 2, np.nan], [1, np.nan, 9], [3, 4, 9]], columns=["A", "B", "C"])
expected = DataFrame([[2, np.nan], [np.nan, 9], [4, 9]], columns=["B", "C"])
result = df.groupby("A").cumsum()
tm.assert_frame_equal(result, expected)
# GH 5755 - cumsum is a transformer and should ignore as_index
result = df.groupby("A", as_index=False).cumsum()
tm.assert_frame_equal(result, expected)
# GH 13994
result = df.groupby("A").cumsum(axis=1)
expected = df.cumsum(axis=1)
tm.assert_frame_equal(result, expected)
result = df.groupby("A").cumprod(axis=1)
expected = df.cumprod(axis=1)
tm.assert_frame_equal(result, expected)
def test_cython_median():
df = DataFrame(np.random.randn(1000))
df.values[::2] = np.nan
labels = np.random.randint(0, 50, size=1000).astype(float)
labels[::17] = np.nan
result = df.groupby(labels).median()
exp = df.groupby(labels).agg(nanops.nanmedian)
tm.assert_frame_equal(result, exp)
df = DataFrame(np.random.randn(1000, 5))
rs = df.groupby(labels).agg(np.median)
xp = df.groupby(labels).median()
| tm.assert_frame_equal(rs, xp) | pandas._testing.assert_frame_equal |
import pandas as pd
from unittest2 import TestCase # or `from unittest import ...` if on Python 3.4+
import numpy as np
import category_encoders.tests.helpers as th
import category_encoders as encoders
np_X = th.create_array(n_rows=100)
np_X_t = th.create_array(n_rows=50, extras=True)
np_y = np.random.randn(np_X.shape[0]) > 0.5
np_y_t = np.random.randn(np_X_t.shape[0]) > 0.5
X = th.create_dataset(n_rows=100)
X_t = th.create_dataset(n_rows=50, extras=True)
y = pd.DataFrame(np_y)
y_t = pd.DataFrame(np_y_t)
class TestOneHotEncoderTestCase(TestCase):
def test_one_hot(self):
enc = encoders.OneHotEncoder(verbose=1, return_df=False)
enc.fit(X)
self.assertEqual(enc.transform(X_t).shape[1],
enc.transform(X).shape[1],
'We have to get the same count of columns despite the presence of a new value')
enc = encoders.OneHotEncoder(verbose=1, return_df=True, handle_unknown='indicator')
enc.fit(X)
out = enc.transform(X_t)
self.assertIn('extra_-1', out.columns.values)
enc = encoders.OneHotEncoder(verbose=1, return_df=True, handle_unknown='return_nan')
enc.fit(X)
out = enc.transform(X_t)
self.assertEqual(len([x for x in out.columns.values if str(x).startswith('extra_')]), 3)
enc = encoders.OneHotEncoder(verbose=1, return_df=True, handle_unknown='error')
# The exception is already raised in fit() because transform() is called there to get
# feature_names right.
enc.fit(X)
with self.assertRaises(ValueError):
enc.transform(X_t)
enc = encoders.OneHotEncoder(verbose=1, return_df=True, handle_unknown='return_nan', use_cat_names=True)
enc.fit(X)
out = enc.transform(X_t)
self.assertIn('extra_A', out.columns.values)
enc = encoders.OneHotEncoder(verbose=1, return_df=True, use_cat_names=True, handle_unknown='indicator')
enc.fit(X)
out = enc.transform(X_t)
self.assertIn('extra_-1', out.columns.values)
# test inverse_transform
X_i = th.create_dataset(n_rows=100, has_none=False)
X_i_t = th.create_dataset(n_rows=50, has_none=False)
cols = ['underscore', 'none', 'extra', 321, 'categorical']
enc = encoders.OneHotEncoder(verbose=1, use_cat_names=True, cols=cols)
enc.fit(X_i)
obtained = enc.inverse_transform(enc.transform(X_i_t))
th.verify_inverse_transform(X_i_t, obtained)
def test_fit_transform_HaveMissingValuesAndUseCatNames_ExpectCorrectValue(self):
encoder = encoders.OneHotEncoder(cols=[0], use_cat_names=True, handle_unknown='indicator')
result = encoder.fit_transform([[-1]])
self.assertListEqual([[1, 0]], result.get_values().tolist())
def test_inverse_transform_HaveDedupedColumns_ExpectCorrectInverseTransform(self):
encoder = encoders.OneHotEncoder(cols=['match', 'match_box'], use_cat_names=True)
value = pd.DataFrame({'match': pd.Series('box_-1'), 'match_box': pd.Series(-1)})
transformed = encoder.fit_transform(value)
inverse_transformed = encoder.inverse_transform(transformed)
assert value.equals(inverse_transformed)
def test_inverse_transform_HaveNoCatNames_ExpectCorrectInverseTransform(self):
encoder = encoders.OneHotEncoder(cols=['match', 'match_box'], use_cat_names=False)
value = pd.DataFrame({'match': pd.Series('box_-1'), 'match_box': pd.Series(-1)})
transformed = encoder.fit_transform(value)
inverse_transformed = encoder.inverse_transform(transformed)
assert value.equals(inverse_transformed)
def test_fit_transform_HaveColumnAppearTwice_ExpectColumnsDeduped(self):
encoder = encoders.OneHotEncoder(cols=['match', 'match_box'], use_cat_names=True, handle_unknown='indicator')
value = pd.DataFrame({'match': pd.Series('box_-1'), 'match_box': pd.Series('-1')})
result = encoder.fit_transform(value)
columns = result.columns.tolist()
self.assertSetEqual({'match_box_-1', 'match_-1', 'match_box_-1#', 'match_box_-1##'}, set(columns))
def test_fit_transform_HaveHandleUnknownValueAndUnseenValues_ExpectAllZeroes(self):
train = pd.DataFrame({'city': ['Chicago', 'Seattle']})
test = pd.DataFrame({'city': ['Chicago', 'Detroit']})
expected_result = pd.DataFrame({'city_1': [1, 0],
'city_2': [0, 0]},
columns=['city_1', 'city_2'])
enc = encoders.OneHotEncoder(handle_unknown='value')
result = enc.fit(train).transform(test)
pd.testing.assert_frame_equal(expected_result, result)
def test_fit_transform_HaveHandleUnknownValueAndSeenValues_ExpectMappingUsed(self):
train = pd.DataFrame({'city': ['Chicago', 'Seattle']})
expected_result = pd.DataFrame({'city_1': [1, 0],
'city_2': [0, 1]},
columns=['city_1', 'city_2'])
enc = encoders.OneHotEncoder(handle_unknown='value')
result = enc.fit(train).transform(train)
pd.testing.assert_frame_equal(expected_result, result)
def test_fit_transform_HaveHandleUnknownIndicatorAndNoMissingValue_ExpectExtraColumn(self):
train = pd.DataFrame({'city': ['Chicago', 'Seattle']})
expected_result = pd.DataFrame({'city_1': [1, 0],
'city_2': [0, 1],
'city_-1': [0, 0]},
columns=['city_1', 'city_2', 'city_-1'])
enc = encoders.OneHotEncoder(handle_unknown='indicator')
result = enc.fit(train).transform(train)
pd.testing.assert_frame_equal(expected_result, result)
def test_fit_transform_HaveHandleUnknownIndicatorAndMissingValue_ExpectValueSet(self):
train = pd.DataFrame({'city': ['Chicago', 'Seattle']})
test = pd.DataFrame({'city': ['Chicago', 'Detroit']})
expected_result = pd.DataFrame({'city_1': [1, 0],
'city_2': [0, 0],
'city_-1': [0, 1]},
columns=['city_1', 'city_2', 'city_-1'])
enc = encoders.OneHotEncoder(handle_unknown='indicator')
result = enc.fit(train).transform(test)
pd.testing.assert_frame_equal(expected_result, result)
def test_HandleMissingIndicator_NanInTrain_ExpectAsColumn(self):
train = ['A', 'B', np.nan]
encoder = encoders.OneHotEncoder(handle_missing='indicator', handle_unknown='value')
result = encoder.fit_transform(train)
expected = [[1, 0, 0],
[0, 1, 0],
[0, 0, 1]]
self.assertEqual(result.values.tolist(), expected)
def test_HandleMissingIndicator_HaveNoNan_ExpectSecondColumn(self):
train = ['A', 'B']
encoder = encoders.OneHotEncoder(handle_missing='indicator', handle_unknown='value')
result = encoder.fit_transform(train)
expected = [[1, 0, 0],
[0, 1, 0]]
self.assertEqual(result.values.tolist(), expected)
def test_HandleMissingIndicator_NanNoNanInTrain_ExpectAsNanColumn(self):
train = ['A', 'B']
test = ['A', 'B', np.nan]
encoder = encoders.OneHotEncoder(handle_missing='indicator', handle_unknown='value')
encoder.fit(train)
result = encoder.transform(test)
expected = [[1, 0, 0],
[0, 1, 0],
[0, 0, 1]]
self.assertEqual(result.values.tolist(), expected)
def test_HandleUnknown_HaveNoUnknownInTrain_ExpectIndicatorInTest(self):
train = ['A', 'B']
test = ['A', 'B', 'C']
encoder = encoders.OneHotEncoder(handle_unknown='indicator', handle_missing='value')
encoder.fit(train)
result = encoder.transform(test)
expected = [[1, 0, 0],
[0, 1, 0],
[0, 0, 1]]
self.assertEqual(result.values.tolist(), expected)
def test_HandleUnknown_HaveOnlyKnown_ExpectSecondColumn(self):
train = ['A', 'B']
encoder = encoders.OneHotEncoder(handle_unknown='indicator', handle_missing='value')
result = encoder.fit_transform(train)
expected = [[1, 0, 0],
[0, 1, 0]]
self.assertEqual(result.values.tolist(), expected)
def test_inverse_transform_HaveNanInTrainAndHandleMissingValue_ExpectReturnedWithNan(self):
train = | pd.DataFrame({'city': ['chicago', np.nan]}) | pandas.DataFrame |
import pandas as pd
from sklearn.metrics import confusion_matrix
import sklearn.linear_model as lm
clf=lm.LogisticRegression(C=10.0)
grades= | pd.read_table("grades.csv") | pandas.read_table |
import pandas as pd
import numpy as np
from functions import fao_regions as regions
data = 'data/'
def data_build(crop_proxie, diet_div_crop, diet_source_crop, diet_ls_only, diet_ls_only_source, min_waste):
"""*** Import of country data to build national diets ***"""
WPR_height = pd.read_csv(r"data/worldpopulationreview_height_data.csv")
WPR_height.loc[WPR_height.Area == "North Korea", "Area"] = "Democratic People's Republic of Korea"
Countrycodes = pd.read_csv(r"data/countrycodes.csv", sep = ";")
#FAO_pop = pd.read_excel(data+"/FAOSTAT_Population_v3.xlsx")
FAO_pop = pd.read_excel(data+"/FAOSTAT_2018_population.xlsx")
FAO_pop.loc[FAO_pop.Area == "Cote d'Ivoire", "Area"] = "Côte d'Ivoire"
FAO_pop.loc[FAO_pop.Area == "French Guyana", "Area"] = "French Guiana"
FAO_pop.loc[FAO_pop.Area == "Réunion", "Area"] = "Réunion"
"""*** Import and sorting of data ***"""
FAO_crops = pd.read_csv(data+"/FAOSTAT_crop Production.csv")
FAO_crops["group"] = FAO_crops.apply(lambda x: regions.group(x["Item Code"]), axis=1)
FAO_crops = FAO_crops.rename(columns={"Value" : "Production"})
FAO_crops["Production"] = FAO_crops["Production"] / 1000
FAO_crops["Unit"] = "1000 tonnes"
FAO_animals = pd.read_csv(data+"/FAO_animal_prod_2016.csv")
FAO_animals["group"] = FAO_animals.apply(lambda x: regions.group(x["Item Code"]), axis=1)
FAO_animals.loc[FAO_animals.Area == "United Kingdom of Great Britain and Northern Ireland", "Area"] = "United Kingdom"
FAO_animals = FAO_animals.rename(columns={"Value" : "Production"})
FAO_animals.drop(FAO_animals[FAO_animals.Unit != 'tonnes'].index, inplace = True)
FAO_animals["Production"] = FAO_animals["Production"] / 1000
FAO_animals["Unit"] = "1000 tonnes"
FAO_animals_5 = pd.read_csv(data+"/FAOSTAT_animal_prod_5.csv")
FAO_animals_5["group"] = FAO_animals_5.apply(lambda x: regions.group(x["Item Code (FAO)"]), axis=1)
FAO_animals_5.loc[FAO_animals_5.Area == "United Kingdom of Great Britain and Northern Ireland", "Area"] = "United Kingdom"
FAO_animals_5 = FAO_animals_5.rename(columns={"Value" : "Production"})
FAO_animals_5.drop(FAO_animals_5[FAO_animals_5.Unit != 'tonnes'].index, inplace = True)
FAO_animals_5["Production"] = FAO_animals_5["Production"] / 1000
FAO_animals_5["Unit"] = "1000 tonnes"
FAO_animals_5 = FAO_animals_5.groupby(['Area', 'Item']).mean().reset_index()
FAO_animals = pd.merge(FAO_animals, FAO_animals_5[['Area', 'Item', 'Production']], on = ["Area", "Item"], how = 'left')
FAO_animals["Production"] = FAO_animals["Production_y"]
FAO_animals = FAO_animals.drop(columns = ["Production_x", "Production_y"])
FAO_fish = pd.read_csv(data+"FAOSTAT_Fish.csv")
FAO_fish = FAO_fish.rename(columns={"Value" : "Production"})
FAO_fish["group"] = FAO_fish.apply(lambda x: regions.group(x["Item Code"]), axis=1)
meat_products = ["eggs", "beef and lamb", "chicken and other poultry",\
"pork", "whole milk or derivative equivalents"]
fish_products = ["Freshwater Fish", "Demersal Fish", "Pelagic Fish",\
"Marine Fish, Other", "Crustaceans", "Cephalopods",\
"Molluscs, Other", "Meat, Aquatic Mammals", "Aquatic Animals, Others",
"Aquatic Plants", "Fish, Body Oil", "Fish, Liver Oil"]
other_items = ["Honey, natural", "Beeswax", "Silk-worm cocoons, reelable"]
other_items = ["Beeswax", "Silk-worm cocoons, reelable"]
"""*** Import of protein data ***"""
FAO_Protein = pd.read_csv(data+"protein.csv")
FAO_Protein["group"] = FAO_Protein["group"].str.replace("dairy", "whole milk or derivative equivalents")
FAO_Protein = FAO_Protein.rename(columns = {"Country": "Area"})
"""*** Build main dataframe ***"""
POM_data = pd.concat([FAO_crops])
FAO_pop_temp = FAO_pop.set_index("Area")
if crop_proxie == True:
for i,j in zip(diet_ls_only, diet_ls_only_source):
fao_fix = POM_data.loc[POM_data.Area == j]
fao_fix['Area'] = i
factor = FAO_pop_temp['Value'][i] / FAO_pop_temp['Value'][j]
fao_fix['Production'] *= factor
POM_data = POM_data[POM_data.Area != i]
POM_data = pd.concat([POM_data,fao_fix])
POM_data = pd.concat([POM_data, FAO_animals, FAO_fish])
if crop_proxie == True:
for i,j in zip (diet_div_crop, diet_source_crop):
fao_fix = POM_data.loc[POM_data.Area == j]
fao_fix['Area'] = i
factor = FAO_pop_temp['Value'][i] / FAO_pop_temp['Value'][j]
fao_fix['Production'] *= factor
POM_data = POM_data[POM_data.Area != i]
POM_data = pd.concat([POM_data,fao_fix])
POM_data = POM_data.reset_index()
"""*** Food source scenarios ***"""
POM_data = POM_data.reset_index(drop = True)
POM_data = POM_data[~POM_data.Item.isin(other_items)]
POM_data = POM_data.rename(columns={"group" : "group_nf"})
POM_data["Production"].clip(lower = 0, inplace = True)
POM_data = pd.merge(POM_data, WPR_height, on = ["Area"], how = 'left')
POM_data = pd.merge(POM_data, Countrycodes, left_on = "Area", right_on = "COUNTRY", how = 'left')
POM_data = pd.merge(POM_data, FAO_pop[["Area", "Value"]], on = ["Area"], how = 'left')
POM_data = POM_data.rename(columns={"Value" : "Population (2016), 1000person"})
"""*** Fix China data from China to China, mainland to seperate out Taiwan ***"""
POM_data.loc[POM_data.Area == 'China, mainland', 'REGION'] = 'CHN'
POM_data.loc[POM_data.Area == 'China, mainland', 'CODE'] = '156'
temp_height = POM_data['avg height'][1761]
temp_weight = POM_data['avg weight'][1761]
POM_data.loc[POM_data.Area == 'China, mainland', 'avg height'] = temp_height
POM_data.loc[POM_data.Area == 'China, mainland', 'avg weight'] = temp_weight
POM_data.drop(POM_data[POM_data.Area == 'China'].index)
POM_data = POM_data.reset_index(drop = True)
""" Remove microstates """
FAO_land = pd.read_csv(data+"FAOSTAT_Ag Land.csv")
FAO_land = FAO_land.rename(columns={"Value" : "1000 Ha"})
POM_data = pd.merge(POM_data, FAO_land[["Area", "1000 Ha"]], on = ["Area"], how = 'left')
France_ot = ['French Guiana', 'Guadeloupe', 'Martinique', 'Réunion']
for overseas in France_ot:
x = POM_data.loc[POM_data.Area == overseas, 'Population (2016), 1000person'].unique()[0]
POM_data.loc[POM_data.Area == 'France', 'Population (2016), 1000person'] += x
y = POM_data.loc[POM_data.Area == overseas, '1000 Ha'].unique()[0]
POM_data.loc[POM_data.Area == 'France', '1000 Ha'] += y
POM_micro = POM_data.loc[(POM_data["1000 Ha"] <= 100)]
POM_micro = POM_micro[~POM_micro["Area"].isin(France_ot)]
POM_micro = POM_micro[['Area', '1000 Ha', 'Population (2016), 1000person']]
POM_micro = POM_micro.drop_duplicates()
POM_data = POM_data.loc[(POM_data["1000 Ha"] > 0)]
POM_data = POM_data[~POM_data["Area"].isin(France_ot)]
POM_data["GROUP"] = POM_data.apply(lambda row: regions.region(row), axis = 1)
POM_data["IMAGEGROUP"] = POM_data.apply(lambda row: regions.imageregion(row), axis = 1)
"""*** Calculate remaining body weights based on GROUP average ***"""
POM_Group_Data = POM_data.groupby(["GROUP"]).apply(lambda x: x["avg height"].mean())
for i in POM_data["avg height"].index:
if np.isnan(POM_data["avg height"][i]) == True:
POM_data["avg height"][i] = POM_Group_Data[POM_data["GROUP"][i]]
POM_data["avg weight"][i] = POM_data["avg height"][i]**2*22
POM_data = POM_data.drop(["cca2", "maleMetricHeight", "femaleMetricHeight", "Unnamed: 7", "Year",\
"COUNTRY","Area Code", "pop2019", "Domain", "Domain Code", "Element", "Flag", "Flag Description", "Year Code", "REGION", "CODE"], axis = 1 )
POM_data = POM_data.rename(columns={"Production" : "POM"})
"""*** Add in protein data ***"""
POM_data = pd.merge(POM_data, FAO_Protein[["Area", "g protein per g product", "Item", "Item Code"]], on = ["Area", "Item", "Item Code"], how = 'left')
POM_data = POM_data.rename(columns = {"g protein per g product": "% Protein" })
POM_data = POM_data.drop_duplicates()
POM_data = POM_data.drop(["Element Code"],axis =1)
POM_data ["% Protein"] = POM_data ["% Protein"]*100
"""*** Determine the % of groups production of each item based on POM ***"""
POM_data["group"] = POM_data.apply(lambda x: regions.group(x["Item Code"]), axis=1)
"""*** Remove the waste fraction to get a snapshot of what people eat ***"""
wastefractions = pd.read_csv(data+"waste_fractions.csv", sep = ";")
for i in wastefractions.index:
if wastefractions['Region'][i] == 'LatinAmerica':
wastefractions['Region'][i] = 'South&CentralAmerica'
groups = wastefractions.groupby(["Region"])
POM_data["POM no waste"] = POM_data["POM"]
for n, gr in POM_data.groupby("GROUP"):
if n == 'Other':
continue
POM_data.loc[(POM_data["GROUP"] == n) & (POM_data["group"] == "potatoes and cassava"), "POM no waste"] *= (1 - wastefractions.loc[(wastefractions["Region"] == n) & (wastefractions["Foodtype"] == "Rootsandtubers"), "Total"].values)
POM_data.loc[(POM_data["GROUP"] == n) & (POM_data["group"].isin(["dry beans lentils and peas", "soy foods", "peanuts", "tree nuts", "palm oil", "unsaturated oils", "all sweeteners"])), "POM no waste"] *= (1 - wastefractions.loc[(wastefractions["Region"] == n) & (wastefractions["Foodtype"] == "Oilseedsandpulses"), "Total"].values)
POM_data.loc[(POM_data["GROUP"] == n) & (POM_data["group"] == "rice wheat corn and other"), "POM no waste"] *= (1 - wastefractions.loc[(wastefractions["Region"] == n) & (wastefractions["Foodtype"] == "Cereals"), "Total"].values)
POM_data.loc[(POM_data["GROUP"] == n) & (POM_data["group"].isin(["all fruit", "all vegetables", "dark green vegetables", "red and orange vegetables"])), "POM no waste"] *= (1 - wastefractions.loc[(wastefractions["Region"] == n) & (wastefractions["Foodtype"] == "Fruitsandvegetables"), "Total"].values)
POM_data.loc[(POM_data["GROUP"] == n) & (POM_data["group"].isin(["eggs", "beef and lamb", "chicken and other poultry", "pork"])), "POM no waste"] *= (1 - wastefractions.loc[(wastefractions["Region"] == n) & (wastefractions["Foodtype"] == "Meat"), "Total"].values)
POM_data.loc[(POM_data["GROUP"] == n) & (POM_data["group"].isin(["whole milk or derivative equivalents"])), "POM no waste"] *= (1 - wastefractions.loc[(wastefractions["Region"] == n) & (wastefractions["Foodtype"] == "Milk"), "Total"].values)
POM_data.loc[(POM_data["GROUP"] == n) & (POM_data["group"].isin(["fish"])), "POM no waste"] *= (1 - wastefractions.loc[(wastefractions["Region"] == n) & (wastefractions["Foodtype"] == "Fishandseafood"), "Total"].values)
extra_nations = ['Puerto Rico', 'Palestine', 'Greenland', 'Falkland Islands (Malvinas)'\
'New Caledonia', 'China', 'China, Taiwan Province of' ]
POM_data = POM_data[~POM_data['Area'].isin(extra_nations)]
"""*** Adjust waste fraction based on scenario ***"""
if min_waste == True:
for i in wastefractions:
if i in ['Foodtype', 'Region']:
continue
for j in wastefractions.Foodtype:
wastefractions.loc[wastefractions.Foodtype == j, i] = wastefractions[i].loc[wastefractions.Foodtype == j].min()
wastefractions['Total'] = wastefractions['Agricultural_production'] + wastefractions['Postharvest_handling_and_storage']\
+ wastefractions['Processing_and_packaging'] + wastefractions["Distribution"] + wastefractions["Consumption"]
return POM_data, meat_products, fish_products, FAO_animals, wastefractions, FAO_pop, POM_micro
def bau_diet_ratios(POM_data):
POM_data["POM with waste"] = POM_data["POM"]
POM_percent = POM_data.groupby(["group", "Area"]).apply(lambda x: x["POM no waste"]/x["POM no waste"].sum()*100)
POM_percent = POM_percent.reset_index(level = ["group", "Area"])
POM_percent = POM_percent.fillna(value = 0)
POM_percent = POM_percent.rename(columns = {"POM no waste": "POM (no waste) group %"})
POM_data = POM_data.merge(POM_percent["POM (no waste) group %"].to_frame(), left_index=True, right_index=True)
POM_percent = POM_data.groupby(["Area"]).apply(lambda x: (x["POM no waste"]/x["POM no waste"].sum())*100)
POM_percent = POM_percent.reset_index(level = ["Area"])
POM_percent = POM_percent.fillna(value = 0)
POM_data = POM_data.merge(POM_percent["POM no waste"].to_frame(), left_index=True, right_index=True)
POM_data = POM_data.rename(columns = {"POM no waste_x": "POM (no waste)", "POM no waste_y":"POM (no waste) total %"})
return POM_data
def nutrition(POM_data):
Nutrition_values = pd.read_csv(r"data/nutritionvalues.csv", sep = ";")
Nutrition_values = Nutrition_values.rename(columns = {"type": "group"})
N_to_P_conversion = pd.read_csv(data+"FAOnitrogen_protein_conversion_factors.csv", sep = ";")
Nutrition_values["nitrogen(%)"] = np.where(Nutrition_values["item number"].eq(N_to_P_conversion["item number"]),\
Nutrition_values["protein(%)"]/N_to_P_conversion["conversion factor"], 0)
Protein_percent = Nutrition_values.groupby(["group"]).apply(lambda x: x["protein(%)"].mean())
Nutrient_percent = Protein_percent.reset_index(level = ["group"])
Nutrient_percent = Nutrient_percent.fillna(value = 0)
Nutrient_percent = Nutrient_percent.rename(columns = {0: "%protein"})
Calorie_percent = Nutrition_values.groupby(["group"]).apply(lambda x: x["calories (100g)"].mean()/100)
Calorie_percent = Calorie_percent.reset_index(level = ["group"])
Calorie_percent = Calorie_percent.fillna(value = 0)
Calorie_percent = Calorie_percent.rename(columns = {0: "calories per g"})
Fat_percent = Nutrition_values.groupby(["group"]).apply(lambda x: x["fat(%)"].mean())
Fat_percent = Fat_percent.reset_index(level = ["group"])
Fat_percent = Fat_percent.fillna(value = 0)
Fat_percent = Fat_percent.rename(columns = {0: "%fat"})
Nutrient_percent["calories per g"] = Calorie_percent["calories per g"]
Nutrient_percent["%fat"] = Fat_percent['%fat']
POM_data = pd.merge(POM_data, Nutrient_percent, on = ["group"])
POM_data["% Protein"].fillna(POM_data["%protein"], inplace = True)
POM_data = POM_data.drop(["%protein"], axis = 1)
POM_data = POM_data.dropna(subset = ['POM'])
"""*** Calculate protein and calorie demand of each nation ***"""
POM_data["Protein Needed (g)"] = POM_data["avg weight"] * 1.6
POM_data["Calories Needed (cal)"] = POM_data["avg weight"] * 15 + 587.5
"""*** Determine the ratio of what people eat based on EAT Lancet Categories *** """
POM_data["EAT_group"] = POM_data.apply(lambda x: regions.EAT_Lancet_Group(x["group"]), axis =1)
POM_data["POM CAL (no waste)"] = POM_data['POM (no waste)']*POM_data['calories per g']
POM_data["POM fat (no waste)"] = POM_data['POM (no waste)']*POM_data['%fat']/100
#POM_data["POM EAT Group %"] = POM_data["EAT_group"]
POM_eat_group = POM_data.groupby(["Area", "EAT_group"]).apply(lambda x: (x["POM CAL (no waste)"])/(x["POM CAL (no waste)"]).sum()) #fix the last definition of POM group %
POM_eat_group = POM_eat_group.to_frame() #set_index("Index", inplace = True)
POM_eat_group = POM_eat_group.reset_index(level = ["Area", "EAT_group"])
POM_eat_group = POM_eat_group.rename(columns={0 : "POM CAL (no waste)"})
#POM_eat_group.set_index("Index", inplace = True)
POM_data = POM_data.merge(POM_eat_group["POM CAL (no waste)"], left_index=True, right_index = True)
POM_data = POM_data.rename(columns={"POM CAL (no waste)_x" : "POM CAL (no waste)",\
"POM CAL (no waste)_y" : "POM EAT Group cal %"})
POM_eat_group = POM_data.groupby(["Area", "EAT_group"]).apply(lambda x: x["POM (no waste)"]/x["POM (no waste)"].sum()) #fix the last definition of POM group %
POM_eat_group = POM_eat_group.to_frame() #set_index("Index", inplace = True)
POM_eat_group = POM_eat_group.reset_index(level = ["Area", "EAT_group"])
POM_data = POM_data.merge(POM_eat_group["POM (no waste)"], left_index=True, right_index = True)
POM_data = POM_data.rename(columns={"POM (no waste)_x" : "POM (no waste)",\
"POM (no waste)_y" : "POM EAT Group g %"})
POM_data["POM EAT Group cal %"] = POM_data["POM EAT Group cal %"] * 100
POM_data["POM EAT Group g %"] = POM_data["POM EAT Group g %"] * 100
return POM_data
def eat_diet_build(POM_data, wastefractions, diet_main):
Lancet_Targets = {"group": ["whole grains", "tubers or starchy vegetables", "vegetables",\
"all fruit", "dairy foods", "beef, lamb and pork", "chicken and other poultry",\
"eggs", "fish", "legumes", "nuts", "added fats", "added sugars"],
"caloric intake": diet_main}
Lancet_Targets = | pd.DataFrame(Lancet_Targets) | pandas.DataFrame |
"""Export CNVkit objects and files to other formats."""
import logging
import time
from collections import OrderedDict as OD
import numpy as np
import pandas as pd
from skgenome import tabio
from . import call
from .cmdutil import read_cna
from ._version import __version__
def merge_samples(filenames):
"""Merge probe values from multiple samples into a 2D table (of sorts).
Input:
dict of {sample ID: (probes, values)}
Output:
list-of-tuples: (probe, log2 coverages...)
"""
def label_with_gene(cnarr):
row2label = lambda row: "{}:{}-{}:{}".format(
row.chromosome, row.start, row.end, row.gene)
return cnarr.data.apply(row2label, axis=1)
if not filenames:
return []
first_cnarr = read_cna(filenames[0])
out_table = first_cnarr.data.loc[:, ["chromosome", "start", "end", "gene"]]
out_table["label"] = label_with_gene(first_cnarr)
out_table[first_cnarr.sample_id] = first_cnarr["log2"]
for fname in filenames[1:]:
cnarr = read_cna(fname)
if not (len(cnarr) == len(out_table)
and (label_with_gene(cnarr) == out_table["label"]).all()):
raise ValueError("Mismatched row coordinates in %s" % fname)
# Copy the next column by sample ID
if cnarr.sample_id in out_table.columns:
raise ValueError("Duplicate sample ID: %s" % cnarr.sample_id)
out_table[cnarr.sample_id] = cnarr["log2"]
del cnarr
return out_table
# Supported formats:
def fmt_cdt(sample_ids, table):
"""Format as CDT.
See:
- http://jtreeview.sourceforge.net/docs/JTVUserManual/ch02s11.html
- http://www.eisenlab.org/FuzzyK/cdt.html
"""
outheader = ['GID', 'CLID', 'NAME', 'GWEIGHT'] + sample_ids
header2 = ['AID', '', '', '']
header2.extend(['ARRY' + str(i).zfill(3) + 'X'
for i in range(len(sample_ids))])
header3 = ['EWEIGHT', '', '', ''] + ['1'] * len(sample_ids)
outrows = [header2, header3]
outtable = pd.concat([
pd.DataFrame.from_dict(OD([
("GID", pd.Series(table.index).apply(lambda x: "GENE%dX" % x)),
("CLID", pd.Series(table.index).apply(lambda x: "IMAGE:%d" % x)),
("NAME", table["label"]),
("GWEIGHT", 1),
])),
table.drop(["chromosome", "start", "end", "gene", "label"],
axis=1)],
axis=1)
outrows.extend(outtable.itertuples(index=False))
return outheader, outrows
# TODO
def fmt_gct(sample_ids, table):
return NotImplemented
def fmt_jtv(sample_ids, table):
"""Format for Java TreeView."""
outheader = ["CloneID", "Name"] + sample_ids
outtable = pd.concat([
pd.DataFrame({
"CloneID": "IMAGE:",
"Name": table["label"],
}),
table.drop(["chromosome", "start", "end", "gene", "label"],
axis=1)],
axis=1)
outrows = outtable.itertuples(index=False)
return outheader, outrows
# Special cases
def export_nexus_basic(cnarr):
"""Biodiscovery Nexus Copy Number "basic" format.
Only represents one sample per file.
"""
out_table = cnarr.data.loc[:, ['chromosome', 'start', 'end', 'gene', 'log2']]
out_table['probe'] = cnarr.labels()
return out_table
def export_nexus_ogt(cnarr, varr, min_weight=0.0):
"""Biodiscovery Nexus Copy Number "Custom-OGT" format.
To create the b-allele frequencies column, alterate allele frequencies from
the VCF are aligned to the .cnr file bins. Bins that contain no variants
are left blank; if a bin contains multiple variants, then the frequencies
are all "mirrored" to be above or below .5 (majority rules), then the median
of those values is taken.
"""
if min_weight and "weight" in cnarr:
mask_low_weight = (cnarr["weight"] < min_weight)
logging.info("Dropping %d bins with weight below %f",
mask_low_weight.sum(), min_weight)
cnarr.data = cnarr.data[~mask_low_weight]
bafs = varr.baf_by_ranges(cnarr, above_half=True)
logging.info("Placed %d variants into %d bins",
sum(~np.isnan(bafs)), len(cnarr))
out_table = cnarr.data.loc[:, ['chromosome', 'start', 'end', 'log2']]
out_table = out_table.rename(columns={
"chromosome": "Chromosome",
"start": "Position",
"end": "Position",
"log2": "Log R Ratio",
})
out_table["B-Allele Frequency"] = bafs
return out_table
def export_seg(sample_fnames, chrom_ids=False):
"""SEG format for copy number segments.
Segment breakpoints are not the same across samples, so samples are listed
in serial with the sample ID as the left column.
"""
dframes, sample_ids = zip(*(_load_seg_dframe_id(fname)
for fname in sample_fnames))
out_table = tabio.seg.write_seg(dframes, sample_ids, chrom_ids)
return out_table
def _load_seg_dframe_id(fname):
segarr = read_cna(fname)
assert segarr is not None
assert segarr.data is not None
assert segarr.sample_id is not None
return segarr.data, segarr.sample_id
# _____________________________________________________________________________
# BED
def export_bed(segments, ploidy, is_reference_male, is_sample_female,
label, show):
"""Convert a copy number array to a BED-like DataFrame.
For each region in each sample (possibly filtered according to `show`),
the columns are:
- reference sequence name
- start (0-indexed)
- end
- sample name or given label
- integer copy number
By default (show="ploidy"), skip regions where copy number is the default
ploidy, i.e. equal to 2 or the value set by --ploidy.
If show="variant", skip regions where copy number is neutral, i.e. equal to
the reference ploidy on autosomes, or half that on sex chromosomes.
"""
out = segments.data.loc[:, ["chromosome", "start", "end"]]
out["label"] = label if label else segments["gene"]
out["ncopies"] = (segments["cn"] if "cn" in segments
else call.absolute_pure(segments, ploidy, is_reference_male)
.round().astype('int'))
if show == "ploidy":
# Skip regions of default ploidy
out = out[out["ncopies"] != ploidy]
elif show == "variant":
# Skip regions of non-neutral copy number
exp_copies = call.absolute_expect(segments, ploidy, is_sample_female)
out = out[out["ncopies"] != exp_copies]
return out
# _____________________________________________________________________________
# VCF
VCF_HEADER = """\
##fileformat=VCFv4.2
##fileDate={date}
##source=CNVkit v{version}
##INFO=<ID=CIEND,Number=2,Type=Integer,Description="Confidence interval around END for imprecise variants">
##INFO=<ID=CIPOS,Number=2,Type=Integer,Description="Confidence interval around POS for imprecise variants">
##INFO=<ID=END,Number=1,Type=Integer,Description="End position of the variant described in this record">
##INFO=<ID=IMPRECISE,Number=0,Type=Flag,Description="Imprecise structural variation">
##INFO=<ID=SVLEN,Number=1,Type=Integer,Description="Difference in length between REF and ALT alleles">
##INFO=<ID=SVTYPE,Number=1,Type=String,Description="Type of structural variant">
##INFO=<ID=FOLD_CHANGE,Number=1,Type=Float,Description="Fold change">
##INFO=<ID=FOLD_CHANGE_LOG,Number=1,Type=Float,Description="Log fold change">
##INFO=<ID=PROBES,Number=1,Type=Integer,Description="Number of probes in CNV">
##ALT=<ID=DEL,Description="Deletion">
##ALT=<ID=DUP,Description="Duplication">
##ALT=<ID=CNV,Description="Copy number variable region">
##FORMAT=<ID=GT,Number=1,Type=String,Description="Genotype">
##FORMAT=<ID=GQ,Number=1,Type=Float,Description="Genotype quality">
##FORMAT=<ID=CN,Number=1,Type=Integer,Description="Copy number genotype for imprecise events">
##FORMAT=<ID=CNQ,Number=1,Type=Float,Description="Copy number genotype quality for imprecise events">
""".format(date=time.strftime("%Y%m%d"), version=__version__)
# #CHROM POS ID REF ALT QUAL FILTER INFO FORMAT NA00001
# 1 2827693 . CCGTGGATGCGGGGACCCGCATCCCCTCTCCCTTCACAGCTGAGTGACCCACATCCCCTCTCCCCTCGCA C . PASS SVTYPE=DEL;END=2827680;BKPTID=Pindel_LCS_D1099159;HOMLEN=1;HOMSEQ=C;SVLEN=-66 GT:GQ 1/1:13.9
# 2 321682 . T <DEL> 6 PASS IMPRECISE;SVTYPE=DEL;END=321887;SVLEN=-105;CIPOS=-56,20;CIEND=-10,62 GT:GQ 0/1:12
# 3 12665100 . A <DUP> 14 PASS IMPRECISE;SVTYPE=DUP;END=12686200;SVLEN=21100;CIPOS=-500,500;CIEND=-500,500 GT:GQ:CN:CNQ ./.:0:3:16.2
# 4 18665128 . T <DUP:TANDEM> 11 PASS IMPRECISE;SVTYPE=DUP;END=18665204;SVLEN=76;CIPOS=-10,10;CIEND=-10,10 GT:GQ:CN:CNQ ./.:0:5:8.3
def export_vcf(segments, ploidy, is_reference_male, is_sample_female,
sample_id=None, cnarr=None):
"""Convert segments to Variant Call Format.
For now, only 1 sample per VCF. (Overlapping CNVs seem tricky.)
Spec: https://samtools.github.io/hts-specs/VCFv4.2.pdf
"""
vcf_columns = ["#CHROM", "POS", "ID", "REF", "ALT", "QUAL", "FILTER",
"INFO", "FORMAT", sample_id or segments.sample_id]
if cnarr:
segments = assign_ci_start_end(segments, cnarr)
vcf_rows = segments2vcf(segments, ploidy, is_reference_male,
is_sample_female)
table = pd.DataFrame.from_records(vcf_rows, columns=vcf_columns)
vcf_body = table.to_csv(sep='\t', header=True, index=False,
float_format="%.3g")
return VCF_HEADER, vcf_body
def assign_ci_start_end(segarr, cnarr):
"""Assign ci_start and ci_end fields to segments.
Values for each segment indicate the CI boundary points within that segment,
i.e. the right CI boundary for the left-side breakpoint (segment start), and
left CI boundary for the right-side breakpoint (segment end).
This is a little unintuitive because the CI refers to the breakpoint, not
the segment, but we're storing the info in an array of segments.
Calculation: Just use the boundaries of the bins left- and right-adjacent to
each segment breakpoint.
"""
lefts_rights = ((bins.end.iat[0], bins.start.iat[-1])
for _seg, bins in cnarr.by_ranges(segarr, mode="outer"))
ci_lefts, ci_rights = zip(*lefts_rights)
return segarr.as_dataframe(
segarr.data.assign(ci_left=ci_lefts, ci_right=ci_rights))
def segments2vcf(segments, ploidy, is_reference_male, is_sample_female):
"""Convert copy number segments to VCF records."""
out_dframe = segments.data.loc[:, ["chromosome", "end", "log2", "probes", "aberrant_cell_frac"]]
out_dframe["start"] = segments.start.replace(0, 1)
if "cn" in segments:
out_dframe["ncopies"] = segments["cn"]
abs_expect = call.absolute_expect(segments, ploidy, is_sample_female)
else:
abs_dframe = call.absolute_dataframe(segments, ploidy, 1.0,
is_reference_male,
is_sample_female)
out_dframe["ncopies"] = abs_dframe["absolute"].round().astype('int')
abs_expect = abs_dframe["expect"]
idx_losses = (out_dframe["ncopies"] < abs_expect)
svlen = segments.end - segments.start
svlen[idx_losses] *= -1
out_dframe["svlen"] = svlen
out_dframe["svtype"] = "DUP"
out_dframe.loc[idx_losses, "svtype"] = "DEL"
out_dframe["format"] = "GT:GQ:CN:CNQ"
out_dframe.loc[idx_losses, "format"] = "GT:GQ" # :CN:CNQ ?
if "ci_left" in segments and "ci_right" in segments:
has_ci = True
# Calculate fuzzy left&right coords for CIPOS and CIEND
left_margin = segments["ci_left"].values - segments.start.values
right_margin = segments.end.values - segments["ci_right"].values
out_dframe["ci_pos_left"] = np.r_[0, -right_margin[:-1]]
out_dframe["ci_pos_right"] = left_margin
out_dframe["ci_end_left"] = right_margin
out_dframe["ci_end_right"] = np.r_[left_margin[1:], 0]
else:
has_ci = False
# Reformat this data to create INFO and genotype
# TODO be more clever about this
for out_row, abs_exp in zip(out_dframe.itertuples(index=False), abs_expect):
if (out_row.ncopies == abs_exp or
# Survive files from buggy v0.7.1 (#53)
not str(out_row.probes).isdigit()):
# Skip regions of neutral copy number
continue # or "CNV" for subclonal?
if out_row.ncopies > abs_exp:
genotype = "0/1:0:%d:%d" % (out_row.ncopies, out_row.probes)
elif out_row.ncopies < abs_exp:
# TODO XXX handle non-diploid ploidies, haploid chroms
if out_row.ncopies == 0:
# Complete deletion, 0 copies
gt = "1/1"
else:
# Single copy deletion
gt = "0/1"
genotype = "%s:%d" % (gt, out_row.probes)
fields = ["IMPRECISE",
"SVTYPE=%s" % out_row.svtype,
"END=%d" % out_row.end,
"SVLEN=%d" % out_row.svlen,
"FOLD_CHANGE=%f" % 2.0 ** out_row.log2,
"FOLD_CHANGE_LOG=%f" % out_row.log2,
"PROBES=%d" % out_row.probes,
"FRAC=%f" % out_row.aberrant_cell_frac
]
if has_ci:
fields.extend([
"CIPOS=(%d,%d)" % (out_row.ci_pos_left, out_row.ci_pos_right),
"CIEND=(%d,%d)" % (out_row.ci_end_left, out_row.ci_end_right),
])
info = ";".join(fields)
yield (out_row.chromosome, out_row.start, '.', 'N',
"<%s>" % out_row.svtype, '.', '.',
info, out_row.format, genotype)
# _____________________________________________________________________________
# GISTIC
def export_gistic_markers(cnr_fnames):
"""Generate a GISTIC 2.0 "markers" file from a set of .cnr files.
GISTIC documentation:
ftp://ftp.broadinstitute.org/pub/GISTIC2.0/GISTICDocumentation_standalone.htm
http://genepattern.broadinstitute.org/ftp/distribution/genepattern/modules_public_server_doc/GISTIC2.pdf
http://gdac.broadinstitute.org/runs/analyses__2013_05_23/reports/cancer/KICH-TP/CopyNumber_Gistic2/nozzle.html
The markers file identifies the marker names and positions of the markers in
the original dataset (before segmentation). It is a three column,
tab-delimited file with an optional header. The column headers are:
(1) Marker Name
(2) Chromosome
(3) Marker Position (in bases)
GISTIC also needs an accompanying SEG file generated from corresponding .cns
files.
"""
colnames = ["ID", "CHROM", "POS"]
out_chunks = []
# TODO since markers will mostly be the same,
# detect duplicates & exclude them
# seen_marker_ids = None
for fname in cnr_fnames:
cna = read_cna(fname).autosomes()
marker_ids = cna.labels()
tbl = pd.concat([
pd.DataFrame({
"ID": marker_ids,
"CHROM": cna.chromosome,
"POS": cna.start + 1,
}, columns=colnames),
pd.DataFrame({
"ID": marker_ids,
"CHROM": cna.chromosome,
"POS": cna.end,
}, columns=colnames),
], ignore_index=True)
out_chunks.append(tbl)
return | pd.concat(out_chunks) | pandas.concat |
import pandas as pd
from typing import List
import sys
metabolites_path = sys.argv[1]
proteins_path = sys.argv[2]
pathways_path = sys.argv[3]
def make_node_set(df):
return df.reindex(columns=['id', 'name', 'category', 'description', 'synonyms', 'xrefs'])
def make_edge_set(df):
return df.reindex(columns=['subject_id', 'predicate', 'object_id'])
def build_pathways(path) -> pd.DataFrame:
"""
Builds the pathway node set
"""
df = pd.read_csv(path, dtype=str)
df = df.rename(columns={
'SMPDB ID' : 'id',
'Name' : 'name',
'Description' : 'description'
})
df['category'] = 'pathway'
df['id'] = df.apply(lambda row: "SMP:{}".format(row['id']), axis=1)
df = make_node_set(df)
return df
def build_metabolites(path) -> (pd.DataFrame, pd.DataFrame):
"""
Builds the metabolite node set and edge set for the chemical_to_pathway_association
predicate.
"""
def build(row):
options = [
('ChEBI ID', 'CHEBI'),
('KEGG ID', 'KEGG'),
('HMDB ID', 'HMDB'),
('DrugBank ID', 'DRUGBANK'),
('Metabolite ID', 'PW'),
]
for column, prefix in options:
if isinstance(row[column], str):
return '{}:{}'.format(prefix, row[column])
print(row)
raise Exception('Could not find a metabolite ID')
df = pd.read_csv(path, dtype=str)
df['id'] = df.apply(build, axis=1)
df = df.drop_duplicates('id')
df['SMPDB ID'] = df.apply(lambda row: "SMP:{}".format(row['SMPDB ID']), axis=1)
nodes = df
edges = df
nodes['category'] = 'metabolite'
nodes = nodes.rename(columns={
'Metabolite Name' : 'name',
'IUPAC' : 'synonyms',
})
edges['predicate'] = 'chemical_to_pathway_association'
edges = df.rename(columns={
'id' : 'subject_id',
'SMPDB ID' : 'object_id',
})
nodes = make_node_set(nodes)
edges = make_edge_set(edges)
return nodes, edges
def build_proteins(path) -> (pd.DataFrame, pd.DataFrame):
"""
Builds the protein node set and edge set for the chemical_to_pathway_association
predicate.
"""
def build(row):
options = [
('Uniprot ID', 'UNIPROT'),
('DrugBank ID', 'DRUGBANK'),
('HMDBP ID', 'HMDB'),
('GenBank ID', 'GENBANK'),
]
# xrefs = []
for column, prefix in options:
if isinstance(row[column], str):
return '{}:{}'.format(prefix, row[column])
# xrefs.append(f'{prefix}:{row[column]}')
# if xrefs == []:
# raise Exception('Cannot find ID for above row')
# else:
# row['id'] = xrefs[0]
# row['xrefs'] = ';'.join(xrefs[1:])
# return row
df = pd.read_csv(path, dtype=str)
df['id'] = df.apply(build, axis=1)
df = df.drop_duplicates('id')
df['SMPDB ID'] = df.apply(lambda row: "SMP:{}".format(row['SMPDB ID']), axis=1)
nodes = df
edges = df
nodes['category'] = 'protein'
nodes = nodes.rename(columns={
'Protein Name' : 'name',
'Gene Name' : 'synonyms',
})
edges['predicate'] = 'chemical_to_pathway_association'
edges = df.rename(columns={
'id' : 'subject_id',
'SMPDB ID' : 'object_id',
})
nodes = make_node_set(nodes)
edges = make_edge_set(edges)
return nodes, edges
def infer_edges(chemical_to_pathway_edges) -> List[pd.DataFrame]:
e = chemical_to_pathway_edges.drop(columns='predicate')
A = e.rename(columns={'subject_id' : 'A'})
B = e.rename(columns={'subject_id' : 'B'})
df = A.merge(B, on='object_id', how='inner')
df = df[df['A'] < df['B']]
df = df.drop(columns='object_id')
df = df.rename(columns={'A' : 'subject_id', 'B' : 'object_id'})
df['predicate'] = 'shares_pathway_with'
df = make_edge_set(df)
return pd.concat([chemical_to_pathway_edges, df])
if __name__ == '__main__':
print('building proteins')
p_node, p_edge = build_proteins(proteins_path)
print('building metabolites')
m_node, m_edge = build_metabolites(metabolites_path)
print('building pathways')
pathway_nodes = build_pathways(pathways_path)
nodes = | pd.concat([p_node, m_node, pathway_nodes]) | pandas.concat |
import config as cfg
from pfeparser.pfe import PillarFeatureNet, PointPillarsScatter
from pfeparser.pre_voxel import preprocess
import torch
from collections import OrderedDict
import pandas as pd
import heapq
import numpy as np
import logging.config
logging.config.fileConfig(cfg.log_cfg_path)
import configparser
config = configparser.ConfigParser(delimiters=":")
from onnxparser.onnx_parser import OnnxParserV2
from quantize_base.quantize_base_kl_v2 import ThresholdLayerOutputs
class PFEQuantizer(object):
__max_input_ = OrderedDict()
__min_input_ = OrderedDict()
__max_output_ = OrderedDict()
__min_output_ = OrderedDict()
quanti_layer_name = ['164']
def __init__(self,
voxel_size,
point_clound_range,
scatter_output_shape,
max_points_per_voxel,
input_quanti_bits,
output_quanti_method,
quanti_bits,
ini_file):
super().__init__()
self.name_ = 'PFEQuantizer'
# self.pfe_net_ = PillarFeatureNet(voxel_size=voxel_size, pc_range=point_clound_range)
self.pfe_net_ = PillarFeatureNet(voxel_size=voxel_size, pc_range=point_clound_range)
self.pfe_model()
self.scatter_ = PointPillarsScatter(output_shape=scatter_output_shape)
self.voxel_size_ = voxel_size
self.point_clound_range_ = point_clound_range
self.max_points_per_voxel_ = max_points_per_voxel
self.input_quanti_bits_ = input_quanti_bits
self.output_quanti_method_ = output_quanti_method
self.quanti_bits_ = quanti_bits
self.ini_file_ = ini_file
def __del__(self):
del self.name_
del self.pfe_net_
del self.scatter_
del self.voxel_size_
del self.point_clound_range_
del self.max_points_per_voxel_
del self.input_quanti_bits_
del self.output_quanti_method_
del self.quanti_bits_
del self.ini_file_
def pfe_model(self):
# pfe_net_dict = self.pfe_net_.state_dict()
# model = torch.load(cfg.PFE_MODEL_PATH, map_location="cuda" if torch.cuda.is_available() else "cpu")
#
# pfe_key = 'voxel_feature_extractor'
# model_pfe_param = {}
# for k, v in model.items():
# pfe_layer_key = k.lstrip(pfe_key).lstrip('.')
# if pfe_layer_key in pfe_net_dict:
# model_pfe_param[pfe_layer_key] = v
# else:
# pass
#
# self.__weights_ = model_pfe_param['pfn_layers.0.linear.weight'].detach().numpy()
#
# pfe_net_dict.update(model_pfe_param)
# self.pfe_net_.load_state_dict(pfe_net_dict)
# self.pfe_net_.eval()
pfe_parser = OnnxParserV2(cfg.PFE_MODEL_PATH)
pfe_net_dict_ = self.pfe_net_.state_dict()
# model_ = torch.load(cfg.PFE_MODEL_PATH, map_location="cuda" if torch.cuda.is_available() else "cpu")
params = pfe_parser.get_param()
self.__weights_ = params['pfn_layers.0.linear.weight'].detach().numpy()
pfe_net_dict_.update(params)
self.pfe_net_.load_state_dict(pfe_net_dict_)
self.pfe_net_.eval()
print('fpe model success')
def forword(self, x):
print('*********************file_name: {}**********************'.format(x))
example = preprocess(x, self.voxel_size_, self.point_clound_range_, self.max_points_per_voxel_)
features = torch.from_numpy(example['voxels'])
num_voxels = torch.from_numpy(example['num_points'])
coors = torch.from_numpy(example['coordinates'])
pfe_output = self.pfe_net_(features, num_voxels, coors)
# pfe_output = self.pfe_net_(features, num_voxels, coors)
scatter_output = self.scatter_(pfe_output, coors, 1)
self.get_layer_min_max(OrderedDict([(self.quanti_layer_name[0], self.quanti_layer_name[0]), ]),
[scatter_output.detach().numpy()])
self.get_input_min_max(self.quanti_layer_name, features.detach().numpy())
del example, features, num_voxels, coors
return scatter_output
def save_param(self):
parm_list = []
val_list = []
# 权重量化
weight_scale, weight_zero_point, weight_max, weight_min = \
self.quanti_weight_channel_symmetry_quantitative(self.__weights_, self.quanti_bits_[0])
# 输入量化
input_quanti_bits_ = self.input_quanti_bits_
min_val_input, max_val_input, input_scale_float, input_zero_point_float = \
self.get_quanti_param(self.__min_input_[self.quanti_layer_name[0]],
self.__max_input_[self.quanti_layer_name[0]],
input_quanti_bits_)
parm_list.extend(cfg.key_list_input_quan_param)
val_list.extend([min_val_input, max_val_input, input_scale_float, input_zero_point_float])
# 输出量化
min_val, max_val, outputs_scale_float, outputs_zero_point_float = \
self.get_quanti_param(self.__min_output_[self.quanti_layer_name[0]],
self.__max_output_[self.quanti_layer_name[0]],
self.quanti_bits_[1])
parm_list.extend(cfg.key_list_weight_quan_param + cfg.key_list_output_quan_param)
val_list.extend([weight_min,
weight_max,
str(weight_scale).strip('[]'),
str(weight_zero_point).strip('[]'),
self.quanti_bits_[0],
min_val,
max_val,
outputs_scale_float,
outputs_zero_point_float,
input_quanti_bits_,
self.quanti_bits_[1]])
self.write_ini_file(self.ini_file_, self.quanti_layer_name[0], parm_list, val_list)
pass
def write_ini_file(self, ini_file, layer_name, parm_list, val_list):
"""
Write the quantization parameters to a file with the suffix ini
:param layer_name: layer name to be write
:param parm_list: a list of parameters name
:param val_list: a list of values corresponding to the parameters name order in parm_list
:return:
"""
dict_parm_val = OrderedDict()
for k, v in zip(parm_list, val_list):
print(k)
dict_parm_val[k] = v
config[layer_name] = dict_parm_val
with open(ini_file, 'w') as configfile:
config.write(configfile, space_around_delimiters=False)
del dict_parm_val
def get_quanti_param(self, min_list_all, max_list_all, quanti_bits, n=100):
min_list = pd.Series(min_list_all)
data_count = min_list.value_counts(bins=2, normalize=True, sort=False)
print(data_count)
for k, v in data_count.items():
if v > 0.7:
num_thresh = str(k).split(',')[-1].split(']')[0] # k.right即可代替,取区间的右值
min_list = np.array(min_list[min_list.astype(np.float32) <= float(num_thresh)])
if len(min_list) > n:
min_list = np.array(min_list)
min_list_index = heapq.nsmallest(n, range(len(min_list)), min_list.take)
min_list = min_list[min_list_index]
elif len(min_list) == 0:
min_list = min_list_all
min_input_all = np.mean(min_list)
max_list = | pd.Series(max_list_all) | pandas.Series |
from selenium import webdriver
import concurrent.futures
import pandas as pd
from bs4 import BeautifulSoup
from datetime import datetime
import time
import codecs
import os
import glob
import gc
from pytz import timezone
tz = timezone('US/Eastern')
| pd.set_option('display.max_columns', 500) | pandas.set_option |
import random
import sys
import os
import dill
import argparse
import pandas as pd
import numpy as np
from tqdm import tqdm
import itertools
import torch
#from .kalman_filter import NonlinearKinematicBicycle
# sys.path.append("/home/adeshkin/Desktop/shifts/sdc")
from ysdc_dataset_api.utils import get_file_paths, scenes_generator, get_latest_track_state_by_id, get_to_track_frame_transform
from ysdc_dataset_api.features import FeatureRenderer
sys.path.append("../../../trajectron")
from environment import Environment, Scene, Node
from environment import derivative_of as derivative_of
FREQUENCY = 5
dt = 1. / FREQUENCY
data_columns_vehicle = pd.MultiIndex.from_product([['position', 'velocity', 'acceleration', 'heading'], ['x', 'y']])
data_columns_vehicle = data_columns_vehicle.append(pd.MultiIndex.from_tuples([('heading', '°'), ('heading', 'd°')]))
data_columns_vehicle = data_columns_vehicle.append( | pd.MultiIndex.from_product([['velocity', 'acceleration'], ['norm']]) | pandas.MultiIndex.from_product |
# -*- coding:utf-8 -*-
# !/usr/bin/env python
"""
Date: 2021/11/18 18:18
Desc: 天天基金网-基金数据-分红送配
http://fund.eastmoney.com/data/fundfenhong.html
"""
import pandas as pd
import requests
from tqdm import tqdm
def fund_fh_em() -> pd.DataFrame:
"""
天天基金网-基金数据-分红送配-基金分红
http://fund.eastmoney.com/data/fundfenhong.html#DJR,desc,1,,,
:return: 基金分红
:rtype: pandas.DataFrame
"""
url = "http://fund.eastmoney.com/Data/funddataIndex_Interface.aspx"
params = {
"dt": "8",
"page": "1",
"rank": "DJR",
"sort": "desc",
"gs": "",
"ftype": "",
"year": "",
}
r = requests.get(url, params=params)
data_text = r.text
total_page = eval(data_text[data_text.find("=") + 1: data_text.find(";")])[0]
big_df = pd.DataFrame()
for page in tqdm(range(1, total_page + 1)):
params.update({"page": page})
r = requests.get(url, params=params)
data_text = r.text
temp_list = eval(
data_text[data_text.find("[["): data_text.find(";var jjfh_jjgs")]
)
temp_df = pd.DataFrame(temp_list)
big_df = big_df.append(temp_df, ignore_index=True)
big_df.reset_index(inplace=True)
big_df["index"] = big_df.index + 1
big_df.columns = [
"序号",
"基金代码",
"基金简称",
"权益登记日",
"除息日期",
"分红",
"分红发放日",
"-",
]
big_df = big_df[["序号", "基金代码", "基金简称", "权益登记日", "除息日期", "分红", "分红发放日"]]
big_df['权益登记日'] = pd.to_datetime(big_df['权益登记日']).dt.date
big_df['除息日期'] = pd.to_datetime(big_df['除息日期']).dt.date
big_df['分红发放日'] = pd.to_datetime(big_df['分红发放日']).dt.date
big_df['分红'] = pd.to_numeric(big_df['分红'])
return big_df
def fund_cf_em() -> pd.DataFrame:
"""
天天基金网-基金数据-分红送配-基金拆分
http://fund.eastmoney.com/data/fundchaifen.html#FSRQ,desc,1,,,
:return: 基金拆分
:rtype: pandas.DataFrame
"""
url = "http://fund.eastmoney.com/Data/funddataIndex_Interface.aspx"
params = {
"dt": "9",
"page": "1",
"rank": "FSRQ",
"sort": "desc",
"gs": "",
"ftype": "",
"year": "",
}
r = requests.get(url, params=params)
data_text = r.text
total_page = eval(data_text[data_text.find("=") + 1: data_text.find(";")])[0]
big_df = pd.DataFrame()
for page in tqdm(range(1, total_page + 1)):
params.update({"page": page})
r = requests.get(url, params=params)
data_text = r.text
temp_list = eval(
data_text[data_text.find("[["): data_text.find(";var jjcf_jjgs")]
)
temp_df = pd.DataFrame(temp_list)
big_df = big_df.append(temp_df, ignore_index=True)
big_df.reset_index(inplace=True)
big_df["index"] = big_df.index + 1
big_df.columns = [
"序号",
"基金代码",
"基金简称",
"拆分折算日",
"拆分类型",
"拆分折算",
"-",
]
big_df = big_df[["序号", "基金代码", "基金简称", "拆分折算日", "拆分类型", "拆分折算"]]
big_df['拆分折算日'] = pd.to_datetime(big_df['拆分折算日']).dt.date
big_df['拆分折算'] = pd.to_numeric(big_df['拆分折算'], errors="coerce")
return big_df
def fund_fh_rank_em() -> pd.DataFrame:
"""
天天基金网-基金数据-分红送配-基金分红排行
http://fund.eastmoney.com/data/fundleijifenhong.html#FHFCZ,desc,1,,
:return: 基金分红排行
:rtype: pandas.DataFrame
"""
url = "http://fund.eastmoney.com/Data/funddataIndex_Interface.aspx"
params = {
"dt": "10",
"page": "1",
"rank": "FHFCZ",
"sort": "desc",
"gs": "",
"ftype": "",
"year": "",
}
r = requests.get(url, params=params)
data_text = r.text
total_page = eval(data_text[data_text.find("=") + 1: data_text.find(";")])[0]
big_df = pd.DataFrame()
for page in tqdm(range(1, total_page + 1)):
params.update({"page": page})
r = requests.get(url, params=params)
data_text = r.text
temp_list = eval(
data_text[data_text.find("[["): data_text.find(";var fhph_jjgs")]
)
temp_df = pd.DataFrame(temp_list)
big_df = big_df.append(temp_df, ignore_index=True)
big_df.reset_index(inplace=True)
big_df["index"] = big_df.index + 1
big_df.columns = [
"序号",
"基金代码",
"基金简称",
"累计分红",
"累计次数",
"成立日期",
"-",
]
big_df = big_df[["序号", "基金代码", "基金简称", "累计分红", "累计次数", "成立日期"]]
big_df['成立日期'] = pd.to_da | tetime(big_df['成立日期']) | pandas.to_datetime |
import os
import cv2
import numpy as np
import pandas as pd
upperBound_s1 = np.array([200, 150, 255])
lowerBound_s1 = np.array([100, 0, 85])
# [130, 0, 85]
upperBound_fish = np.array([50, 255, 197])
lowerBound_fish = np.array([20, 215, 147])
upperBound_chest = np.array([58, 86, 215])
lowerBound_chest = np.array([8, 36, 165])
# Makes the x coordinate of the center of the fish and of the rectangle to be in the right place
x_center_calibration_value = 10
def find_green_rectangle(green_bar_win) -> dict:
"""
Image parameter needs to be in BGR
"""
# Chose this color scheme because it seemed to be one of the few who worked. BGR2Lab also seems to work.
img_YCrCb = cv2.cvtColor(green_bar_win, cv2.COLOR_RGB2YCrCb)
# Lower and upper bounds were manually found to always detect the green rectangle
img_green = cv2.inRange(img_YCrCb, lowerBound_s1, upperBound_s1)
# TODO: maybe remove this
# Eroding to reduce the noise formed by the green algaes at the bottom of the mini game
kernel = np.ones((2, 2), np.uint8)
img_green = cv2.erode(img_green, kernel, iterations=2)
# Finding contours of the green rectangle (always finds it + some noise):
_, conts, hierarchy = cv2.findContours(img_green, cv2.RETR_EXTERNAL, cv2.CHAIN_APPROX_SIMPLE)
cnt_list = []
for cnt in conts:
area = cv2.contourArea(cnt)
# filter noise of those damn algaes
if area > 100: # 200
# x1, y1, w, h = cv2.boundingRect(cnt)
# x2 = x1 + w # (x1, y1) = top-left vertex
# y2 = y1 + h # (x2, y2) = bottom-right vertex
# rect = cv2.rectangle(green_bar_win, (x1, y1), (x2, y2), (255,255,255), 2) # really useful to uncomment this to debug
cnt_list.append(cnt)
# Finding bottom-most/top-most points, then calculate center point:
# if it find only 1 rectangle. This means that the fish is not at the bar.
if len(cnt_list) == 1:
cnt1 = cnt_list[0]
topmost = tuple(cnt1[cnt1[:, :, 1].argmin()][0])
bottommost = tuple(cnt1[cnt1[:, :, 1].argmax()][0])
lowest_point = int(bottommost[1])
highest_point = int(topmost[1])
rect_center_heigth = int(np.round((lowest_point + highest_point) / 2, 0))
rect_size = lowest_point - highest_point
# bot_point = cv2.circle(green_bar_win, (topmost[0] + x_center_calibration_value, lowest_point), 1, (255, 255, 0), 4) # very useful to know where the bottom point is being found
# top_point = cv2.circle(green_bar_win, (topmost[0] + x_center_calibration_value, highest_point), 1, (255, 255, 0), 4) # very useful to know where the top point is being found
# center_point = cv2.circle(green_bar_win, (topmost[0] + x_center_calibration_value, rect_center_heigth), 1, (255, 0, 255), 2) # Draws magenta point aroud center
return {"Found": True, "Center Height": rect_center_heigth, "Lowest point": lowest_point,
"Highest Point": highest_point, "Rect Size": rect_size, "Fish Inside": False}
# if it find 2 rectangles (which happens when the fish is in the middle of the bar)
elif len(cnt_list) == 2:
# bottom rect
cnt1 = cnt_list[0]
# top rect
cnt2 = cnt_list[1]
# the top-most point of the top rect
topmost = tuple(cnt2[cnt2[:, :, 1].argmin()][0])
# the bottom-most point of the bottom rect
bottommost = tuple(cnt1[cnt1[:, :, 1].argmax()][0])
lowest_point = int(bottommost[1])
highest_point = int(topmost[1])
rect_center_heigth = int(np.round((lowest_point + highest_point) / 2, 0))
rect_size = lowest_point - highest_point
# bot_point = cv2.circle(green_bar_win, (topmost[0] + x_center_calibration_value, lowest_point), 1, (255, 255, 0), 4) # very useful to know where the bottom point is being found
# top_point = cv2.circle(green_bar_win, (topmost[0] + x_center_calibration_value, highest_point), 1, (255, 255, 0), 4) # very useful to know where the top point is being found
# center_point = cv2.circle(green_bar_win, (topmost[0] + x_center_calibration_value, rect_center_heigth), 1, (255, 0, 255), 2) # Draws magenta point aroud center
return {"Found": True, "Center Height": rect_center_heigth, "Lowest point": lowest_point,
"Highest Point": highest_point, "Rect Size": rect_size, "Fish Inside": True}
# return 'img_green' to see what the script is seeing when finding contours
return {"Found": False}
def find_fish(green_bar_win) -> dict:
"""
Image parameter needs to ne in BGR
"""
# If there is no fish found in the image sets this as the height. 400 hundred is at the bottom of the mini-game because point (0, 0) is the top-left corner
# fish_center_height = 400
# fish_x_calibration = 0 # 58
img_HSV = cv2.cvtColor(green_bar_win, cv2.COLOR_RGB2HSV)
img_fish = cv2.inRange(img_HSV, lowerBound_fish, upperBound_fish)
_, conts, hierarchy = cv2.findContours(img_fish, cv2.RETR_EXTERNAL, cv2.CHAIN_APPROX_SIMPLE)
for cnt in conts:
area = cv2.contourArea(cnt)
if area > 25:
# (x, y), radius = cv2.minEnclosingCircle(cnt)
# fish_center_point = (int(x), int(y))
# fish_center_height = fish_center_point[1]
# radius = int(radius)
# fish_center_point = cv2.circle(green_bar_win, fish_center_point, 15, (100, 0, 255), 2)
# x1, y1, w, h = cv2.boundingRect(cnt)
# x2 = x1 + w
# y2 = y1 + h
# cv2.rectangle(green_bar_win, (x1, y1), (x2, y2), (255, 255, 255), 2)
topmost = tuple(cnt[cnt[:, :, 1].argmin()][0])
bottommost = tuple(cnt[cnt[:, :, 1].argmax()][0])
lowest_point = int(bottommost[1])
highest_point = int(topmost[1])
fish_center_height = int(np.round((lowest_point + highest_point) / 2, 0))
# cv2.circle(green_bar_win, (10, fish_center_height), 2, (100, 0, 255), 2)
return {"Found": True, "Center Height": fish_center_height, "Lowest Point": lowest_point,
"Highest Point": highest_point}
return {"Found": False}
def find_chest(green_bar_win) -> dict:
img_chest = cv2.cvtColor(green_bar_win, cv2.COLOR_BGRA2BGR)
img_chest = cv2.inRange(img_chest, lowerBound_chest, upperBound_chest)
_, conts, hierarchy = cv2.findContours(img_chest, cv2.RETR_EXTERNAL, cv2.CHAIN_APPROX_SIMPLE)
if not conts:
return {"Found": False}
else:
return {"Found": True}
def verify_too_similar_frames(training_data) -> bool:
"""
Verifies if the button was clicked at least once and verifies if the captured frames are too similar too one another
"""
# region ONLY FOR TESTING TODO: Remove
final_result = []
# endregion
# Stripping clicks from frames
session_imgs = [data[0] for data in training_data]
session_clicks = [data[1] for data in training_data]
# Checking if the session was active enough:
n_clicks = 0
for click in session_clicks:
if click == 1:
n_clicks += 1
active_ratio = round(100 * n_clicks / len(session_clicks), 2)
# TODO: Decide which percent should be minimum
if active_ratio <= 10:
print("Session not active enough. Deleting save.")
# region ONLY FOR TEST. TODO: Replace with "return False"
final_result.append(False)
# endregion
# Checking if there are enough different frames
# Making a list of 1 and 0, with 1 meaning this frame and the next one are equal and 0 meaning that this frame and the next one
# are different.
result = []
for i, frame in enumerate(session_imgs):
try:
next_frame = session_imgs[i + 1]
equal = int(np.array_equal(frame, next_frame))
result.append(equal)
except IndexError:
break
# Counting the number of equal frames (ones) and different frames (zeros)
equals = result.count(1)
diffs = result.count(0)
try:
equality_ratio = round(100 * equals / (equals + diffs), 2)
except ZeroDivisionError:
equality_ratio = 100
# TODO: test this constant (same as above?)
if equality_ratio > 25:
print("There are too many similar frames in this session. Deleting.")
# region ONLY FOR TEST. TODO: Replace with "return False"
final_result.append(False)
# endregion
# region ONLY FOR TEST. TODO: Replace with "return True"
if os.path.isfile('Data\\constant_control.pkl'):
df = pd.read_pickle('Data\\constant_control.pkl')
new_df = | pd.DataFrame([[equality_ratio, active_ratio]], columns=['Equality Ratio', 'Activeness Ratio']) | pandas.DataFrame |
"""Download data from GapMinder
countries.csv - last available info for most features
cze.csv - Czechia features evolution 1993-2013
"""
import pandas as pd
import gapminderdata as gmd
def run():
# Get country metadata
countries = gmd.read_countries()
countries = countries[country_columns].copy()
countries["is_eu"] = countries["name"].isin(eu.keys())
assert (
countries["is_eu"].sum() == 28
), f"Only {countries['is_eu'].sum()} countries in EU."
countries["is_oecd"] = countries["name"].isin(oecd)
assert (
countries["is_oecd"].sum() == 36
), f"Only {countries['is_oecd'].sum()} countries in OECD: {list(countries.query('is_oecd').index)}"
countries["eu_accession"] = pd.to_datetime(
countries["name"].apply(lambda n: eu.get(n, None))
)
# Get data points
data = gmd.read_columns(data_columns)
data = (
countries.merge(data.reset_index(), right_on="geo", left_index=True)
.reset_index(drop=True)
.sort_values(["name", "time"])
)
# Rename
data = data.rename(rename_columns, axis=1)
gap_data = (
data[data["year"] < 2019]
.dropna(thresh=10)
.sort_values("year", ascending=False)
.groupby("name")
.first()
.drop(["geo"], axis=1)
# .drop(["iso3166_1_alpha3", "geo"], axis=1)
.reset_index()
)
# Rename to "canonical" form
gap_data["name"] = gap_data["name"].replace(rename_countries)
# Limit to UN
un_data = pd.read_csv("data_external/un.csv")
un_data["un_accession"] = pd.to_datetime(un_data["un_accession"])
gap_data = | pd.merge(gap_data, un_data, how="right", on="name") | pandas.merge |
import io
import textwrap
from collections import namedtuple
import numpy as np
import pandas as pd
import statsmodels.api as sm
from estimagic.config import EXAMPLE_DIR
from estimagic.visualization.estimation_table import _convert_model_to_series
from estimagic.visualization.estimation_table import _create_statistics_sr
from estimagic.visualization.estimation_table import _process_body_df
from estimagic.visualization.estimation_table import _process_model
from estimagic.visualization.estimation_table import estimation_table
from pandas.testing import assert_frame_equal as afe
from pandas.testing import assert_series_equal as ase
# test process_model for different model types
NamedTup = namedtuple("NamedTup", "params info")
fix_path = EXAMPLE_DIR / "diabetes.csv"
df_ = pd.read_csv(fix_path, index_col=0)
est = sm.OLS(endog=df_["target"], exog=sm.add_constant(df_[df_.columns[0:4]])).fit()
def test_estimation_table():
models = [est]
return_type = "python"
res = estimation_table(models, return_type, append_notes=False)
exp = {}
body_str = """
index,{(1)}
const,152.13$^{*** }$
,(2.85)
Age,37.24$^{ }$
,(64.12)
Sex,-106.58$^{* }$
,(62.13)
BMI,787.18$^{*** }$
,(65.42)
ABP,416.67$^{*** }$
,(69.49)
"""
exp["body_df"] = _read_csv_string(body_str).fillna("")
exp["body_df"].set_index("index", inplace=True)
footer_str = """
,{(1)}
Observations,442.0
R$^2$,0.4
Adj. R$^2$,0.39
Residual Std. Error,59.98
F Statistic,72.91$^{***}$
"""
exp["footer_df"] = _read_csv_string(footer_str).fillna("")
exp["footer_df"].set_index(" ", inplace=True)
exp["footer_df"].index.names = [None]
exp["footer_df"].index = pd.MultiIndex.from_arrays([exp["footer_df"].index])
exp["notes_tex"] = "\\midrule\n"
exp[
"notes_html"
] = """<tr><td colspan="2" style="border-bottom: 1px solid black">
</td></tr>"""
afe(exp["footer_df"], res["footer_df"])
afe(exp["body_df"], res["body_df"], check_index_type=False)
ase(pd.Series(exp["notes_html"]), pd.Series(res["notes_html"]))
ase(pd.Series(exp["notes_tex"]), pd.Series(res["notes_tex"]))
def test_process_model_namedtuple():
# checks that process_model doesn't alter values
df = pd.DataFrame(columns=["value", "p_value", "ci_lower", "ci_upper"])
df["value"] = np.arange(10)
df["p_value"] = np.arange(10)
df["ci_lower"] = np.arange(10)
df["ci_upper"] = np.arange(10)
info = {"stat1": 0, "stat2": 0}
model = NamedTup(params=df, info=info)
res = _process_model(model)
afe(res.params, df)
ase(pd.Series(res.info), pd.Series(info))
def test_process_model_stats_model():
par_df = pd.DataFrame(
columns=["value", "p_value", "standard_error", "ci_lower", "ci_upper"],
index=["const", "Age", "Sex", "BMI", "ABP"],
)
par_df["value"] = [152.133484, 37.241211, -106.577520, 787.179313, 416.673772]
par_df["p_value"] = [
2.048808e-193,
5.616557e-01,
8.695658e-02,
5.345260e-29,
4.245663e-09,
]
par_df["standard_error"] = [2.852749, 64.117433, 62.125062, 65.424126, 69.494666]
par_df["ci_lower"] = [146.526671, -88.775663, -228.678572, 658.594255, 280.088446]
par_df["ci_upper"] = [157.740298, 163.258084, 15.523532, 915.764371, 553.259097]
info_dict = {}
info_dict["rsquared"] = 0.40026108237714
info_dict["rsquared_adj"] = 0.39477148130050055
info_dict["fvalue"] = 72.91259907398705
info_dict["f_pvalue"] = 2.700722880950139e-47
info_dict["df_model"] = 4.0
info_dict["df_resid"] = 437.0
info_dict["dependent_variable"] = "target"
info_dict["resid_std_err"] = 59.97560860753488
info_dict["n_obs"] = 442.0
res = _process_model(est)
afe(res.params, par_df)
ase(pd.Series(res.info), pd.Series(info_dict))
def test_process_model_dict():
df = pd.DataFrame(columns=["value", "p_value", "standard_error"])
df["value"] = np.arange(10)
df["p_value"] = np.arange(10)
df["standard_error"] = np.arange(10)
info = {"stat1": 0, "stat2": 0}
mod = {}
mod["params"] = df
mod["info"] = info
res = _process_model(mod)
afe(res.params, mod["params"])
ase( | pd.Series(res.info) | pandas.Series |
import os
os.chdir('STARmap_AllenVISp/')
import numpy as np
import pandas as pd
import pickle
import matplotlib
matplotlib.use('qt5agg')
matplotlib.rcParams['pdf.fonttype'] = 42
matplotlib.rcParams['ps.fonttype'] = 42
import matplotlib.pyplot as plt
import scipy.stats as st
from sklearn.neighbors import NearestNeighbors
from matplotlib import cm
with open ('data/SpaGE_pkl/Starmap.pkl', 'rb') as f:
datadict = pickle.load(f)
coords = datadict['coords']
Starmap_data = datadict['Starmap_data']
del datadict
with open ('data/SpaGE_pkl/Allen_VISp.pkl', 'rb') as f:
datadict = pickle.load(f)
RNA_data = datadict['RNA_data']
del datadict
all_centroids = np.vstack([c.mean(0) for c in coords])
plt.style.use('dark_background')
cmap = cm.get_cmap('viridis',20)
def Moran_I(SpatialData,XYmap):
XYnbrs = NearestNeighbors(n_neighbors=5, algorithm='auto',metric = 'euclidean').fit(XYmap)
XYdistances, XYindices = XYnbrs.kneighbors(XYmap)
W = np.zeros((SpatialData.shape[0],SpatialData.shape[0]))
for i in range(0,SpatialData.shape[0]):
W[i,XYindices[i,:]]=1
for i in range(0,SpatialData.shape[0]):
W[i,i]=0
I = pd.Series(index=SpatialData.columns)
for k in SpatialData.columns:
X_minus_mean = np.array(SpatialData[k] - np.mean(SpatialData[k]))
X_minus_mean = np.reshape(X_minus_mean,(len(X_minus_mean),1))
Nom = np.sum(np.multiply(W,np.matmul(X_minus_mean,X_minus_mean.T)))
Den = np.sum(np.multiply(X_minus_mean,X_minus_mean))
I[k] = (len(SpatialData[k])/np.sum(W))*(Nom/Den)
return(I)
Moran_Is = Moran_I(Starmap_data,all_centroids)
Gene_Order = np.intersect1d(Starmap_data.columns,RNA_data.columns)
Moran_Is = Moran_Is[Gene_Order]
### SpaGE
SpaGE_imputed = pd.read_csv('Results/SpaGE_LeaveOneOut.csv',header=0,index_col=0,sep=',')
SpaGE_imputed = SpaGE_imputed.loc[:,Gene_Order]
SpaGE_Corr = pd.Series(index = Gene_Order)
for i in Gene_Order:
SpaGE_Corr[i] = st.spearmanr(Starmap_data[i],SpaGE_imputed[i])[0]
### gimVI
gimVI_imputed = pd.read_csv('Results/gimVI_LeaveOneOut.csv',header=0,index_col=0,sep=',')
gimVI_imputed.columns = Gene_Order
gimVI_Corr = pd.Series(index = Gene_Order)
for i in Gene_Order:
gimVI_Corr[i] = st.spearmanr(Starmap_data[i],gimVI_imputed[i])[0]
gimVI_Corr[np.isnan(gimVI_Corr)] = 0
### Seurat
Seurat_imputed = | pd.read_csv('Results/Seurat_LeaveOneOut.csv',header=0,index_col=0,sep=',') | pandas.read_csv |
""" Fred View """
__docformat__ = "numpy"
from typing import List, Tuple
import fred
import pandas as pd
from fredapi import Fred
from gamestonk_terminal import config_terminal as cfg
def get_series_notes(series_term: str, num: int) -> str:
"""Get Series notes. [Source: FRED]
Parameters
----------
series_term : str
Search for this series term
num : int
Maximum number of series notes to display
Returns
----------
notes : str
Series notes output
"""
fred.key(cfg.API_FRED_KEY)
d_series = fred.search(series_term)
if "seriess" not in d_series:
return "No Series found using this term!\n"
df_fred = pd.DataFrame(d_series["seriess"])
if df_fred.empty:
return "No Series found using this term!\n"
df_fred = df_fred.sort_values(by=["popularity"], ascending=False).head(num)
notes = ""
for _, series in df_fred.iterrows():
if series["notes"]:
notes += series["id"] + "\n"
notes += "-" * len(series["id"]) + "\n"
notes += series["notes"] + "\n\n"
if not notes:
return "Series notes not found!\n"
return notes
def get_series_ids(series_term: str, num: int) -> Tuple[List[str], List[str]]:
"""Get Series IDs. [Source: FRED]
Parameters
----------
series_term : str
Search for this series term
num : int
Maximum number of series IDs to output
Returns
----------
List[str]
List of series IDs
List[str]
List of series Titles
"""
fred.key(cfg.API_FRED_KEY)
d_series = fred.search(series_term)
if "seriess" not in d_series:
return [], []
if not d_series["seriess"]:
return [], []
df_series = | pd.DataFrame(d_series["seriess"]) | pandas.DataFrame |
"""
concavity_automator comports multiple scripts automating concavity constraining method for landscape
"""
import lsdtopytools as lsd
import numpy as np
import numba as nb
import pandas as pd
from matplotlib import pyplot as plt
import sys
import matplotlib
from matplotlib.patches import Polygon
from matplotlib.collections import PatchCollection
import math
from lsdtopytools.numba_tools import travelling_salesman_algortihm, remove_outliers_in_drainage_divide
import random
import matplotlib.gridspec as gridspec
from multiprocessing import Pool, current_process
from scipy import spatial,stats
import numba as nb
import copy
from pathlib import Path
import pylab as pl
from scipy.signal import savgol_filter
from scipy.interpolate import interp1d
def norm_by_row(A):
"""
Subfunction used to vectorised normalisation of disorder by max of row using apply_along_axis function
B.G
"""
return A/A.max()
def norm_by_row_by_range(A):
"""
Subfunction used to vectorised normalisation of disorder by range of concavity using apply_along_axis function
B.G
"""
return (A - A.min())/(A.max() - A.min())
def numfmt(x, pos):
"""
Plotting subfunction to automate tick formatting from metres to kilometres
B.G
"""
s = '{:d}'.format(int(round(x / 1000.0)))
return s
def get_best_bit_and_err_from_Dstar(thetas, medD, fstD, thdD):
"""
Takes ouput from concavity calculation to calculate the best-fit theta and its error
"""
# Calculating the index of minimum medium disorder to get the best-fit
index_of_BF = np.argmin(medD)
# Getting the Dstar value of the best-fit
dstar_val = medD[index_of_BF]
# Getting the acutal best-fit
BF = thetas[index_of_BF]
# Preformatting 2 arrays for calculating the error: I am just interested by the first half for the first error and the second for the second
A = np.copy(fstD)
A[index_of_BF+1:] = 9999
B = np.copy(fstD)
B[:index_of_BF] = 9999
# calculating the error by extracting the closest theta with a Dstar close to the median best fit ones
err = ( thetas[np.abs(A - dstar_val).argmin()] , thetas[np.abs(B - dstar_val).argmin()] )
# REturning a tuple with [0] being the best fit and [1] another tuple f error
return BF,err
def process_basin(ls, **kwargs):
"""
Main function processing the concavity. It looks a bit convoluted but it is required for clean multiprocessing.
Takes at least one argument: ls, which is a list of arguments
ls[0] -> the number of the basin (heavily used by automatic multiprocessing)
ls[1] -> the X coordinate of the basin outlet
ls[2] -> the Y coordinate of the basin outlet
ls[3] -> area_threshold used for the analysis
ls[4] -> prefix befor the number of the basin to read the file input
Also takes option kwargs argument:
ignore_numbering: jsut use the prefix as name for the DEM
extension: if your extension is not .tif, you can give it here WITHOUT THE DOT
overwrite_dem_name: used if you want to use thefunction from outside the automations: you need to provide the dem name WITH THE EXTENSION
"""
number = ls[0]
X = ls[1]
Y = ls[2]
area_threshold = ls[3]
prefix = ls[4]
print("Processing basin ", number, " with proc ", current_process())
if("ignore_numbering" not in kwargs):
kwargs["ignore_numbering"] = False
if("extension" not in kwargs):
kwargs["extension"] = "tif"
if("n_tribs_by_combo" not in kwargs):
kwargs["n_tribs_by_combo"] = 4
if(kwargs["ignore_numbering"] == True):
name = prefix
else:
name = prefix + "%s"%(number)
if(kwargs["precipitation_raster"] == ""):
precipitation = False
else:
precipitation = True
# I spent a significant amount of time preprocessing it, see SM
n_rivers = 0
dem_name ="%s.%s"%(name,kwargs["extension"])
if("overwrite_dem_name" in kwargs):
dem_name = kwargs["overwrite_dem_name"]
MD = lsd.LSDDEM(file_name = dem_name, already_preprocessed = True)
# Extracting basins
if(precipitation):
MD.CommonFlowRoutines( ingest_precipitation_raster = kwargs["precipitation_raster"], precipitation_raster_multiplier = 1, discharge = True)
else:
MD.CommonFlowRoutines()
MD.ExtractRiverNetwork( method = "area_threshold", area_threshold_min = area_threshold)
print("River extracted")
MD.DefineCatchment( method="from_XY", X_coords = [X], Y_coords = [Y], coord_search_radius_nodes = 10 )#, X_coords = [X_coordinates_outlets[7]], Y_coords = [Y_coordinates_outlets[7]])
print("CAtchment defined")
MD.GenerateChi(theta = 0.4, A_0 = 1)
print("River_network_generated")
n_rivers = MD.df_base_river.source_key.unique().shape[0]
print("You have", n_rivers, "rivers and",MD.df_base_river.shape[0],"river pixels")
MD.df_base_river.to_feather("%s_rivers.feather"%(name))
print("Starting the movern calculation")
MD.cppdem.calculate_movern_disorder(0.05, 0.025, 38, 1, area_threshold, kwargs["n_tribs_by_combo"])
print("DONE with movern, let's format the output")
OVR_dis = MD.cppdem.get_disorder_dict()[0]
OVR_tested = MD.cppdem.get_disorder_vec_of_tested_movern()
pd.DataFrame({"overall_disorder":OVR_dis, "tested_movern":OVR_tested }).to_feather("%s_overall_test.feather"%(name))
normalizer = MD.cppdem.get_n_pixels_by_combinations()[0]
np.save("%s_disorder_normaliser.npy"%(name), normalizer)
all_disorder = MD.cppdem.get_best_fits_movern_per_BK()
np.save("%s_concavity_tot.npy"%(name), all_disorder[0])
print("Getting results")
results = np.array(MD.cppdem.get_all_disorder_values()[0])
np.save("%s_disorder_tot.npy"%(name), results)
XY = MD.cppdem.query_xy_for_each_basin()["0"]
tdf = pd.DataFrame(XY)
tdf.to_feather("%s_XY.feather"%(name))
return 0
def theta_quick_constrain_single_basin(MD,X_coordinate_outlet = 0, Y_coordinate_outlet = 0, area_threshold = 1500):
"""
Main function processing the concavity. It looks a bit convoluted but it is required for clean multiprocessing.
Takes at least one argument: ls, which is a list of arguments
ls[0] -> the number of the basin (heavily used by automatic multiprocessing)
ls[1] -> the X coordinate of the basin outlet
ls[2] -> the Y coordinate of the basin outlet
ls[3] -> area_threshold used for the analysis
ls[4] -> prefix befor the number of the basin to read the file input
Also takes option kwargs argument:
ignore_numbering: jsut use the prefix as name for the DEM
extension: if your extension is not .tif, you can give it here WITHOUT THE DOT
overwrite_dem_name: used if you want to use thefunction from outside the automations: you need to provide the dem name WITH THE EXTENSION
"""
# number = ls[0]
# X = ls[1]
# Y = ls[2]
# area_threshold = ls[3]
# prefix = ls[4]
# print("Processing basin ", number, " with proc ", current_process())
# if("ignore_numbering" not in kwargs):
# kwargs["ignore_numbering"] = False
# if("extension" not in kwargs):
# kwargs["extension"] = "tif"
# if("n_tribs_by_combo" not in kwargs):
# kwargs["n_tribs_by_combo"] = 4
# if(kwargs["ignore_numbering"] == True):
# name = prefix
# else:
# name = prefix + "%s"%(number)
# if(kwargs["precipitation_raster"] == ""):
# precipitation = False
# else:
# precipitation = True
# I spent a significant amount of time preprocessing it, see SM
n_rivers = 0
# dem_name ="%s.%s"%(name,kwargs["extension"])
# if("overwrite_dem_name" in kwargs):
# dem_name = kwargs["overwrite_dem_name"]
# MD = lsd.LSDDEM(file_name = dem_name, already_preprocessed = True)
# # Extracting basins
# if(precipitation):
# MD.CommonFlowRoutines( ingest_precipitation_raster = kwargs["precipitation_raster"], precipitation_raster_multiplier = 1, discharge = True)
# else:
# MD.CommonFlowRoutines()
# print("Experimental function (Gailleton et al., submitted), if it crashes restart from a clean LSDDEM object with only the flow routines processed.")
MD.ExtractRiverNetwork( method = "area_threshold", area_threshold_min = area_threshold)
# print("River pre-extracted")
MD.DefineCatchment( method="from_XY", X_coords = X_coordinate_outlet, Y_coords = Y_coordinate_outlet, coord_search_radius_nodes = 10 )#, X_coords = [X_coordinates_outlets[7]], Y_coords = [Y_coordinates_outlets[7]])
# print("CAtchment defined")
MD.GenerateChi(theta = 0.4, A_0 = 1)
# print("River_network_generated")
n_rivers = MD.df_base_river.source_key.unique().shape[0]
print("DEBUG::You have", n_rivers, "rivers and",MD.df_base_river.shape[0],"river pixels \n")
# MD.df_base_river.to_feather("%s_rivers.feather"%(name))
# print("Starting the movern calculation")
MD.cppdem.calculate_movern_disorder(0.05, 0.025, 38, 1, area_threshold, 4)
# print("DONE with movern, let's format the output")
OVR_dis = MD.cppdem.get_disorder_dict()[0]
OVR_tested = MD.cppdem.get_disorder_vec_of_tested_movern()
# pd.DataFrame({"overall_disorder":OVR_dis, "tested_movern":OVR_tested }).to_feather("%s_overall_test.feather"%(name))
normalizer = MD.cppdem.get_n_pixels_by_combinations()[0]
# np.save("%s_disorder_normaliser.npy"%(name), normalizer)
all_disorder = MD.cppdem.get_best_fits_movern_per_BK()
# np.save("%s_concavity_tot.npy"%(name), all_disorder[0])
# print("Getting results")
results = np.array(MD.cppdem.get_all_disorder_values()[0])
# np.save("%s_disorder_tot.npy"%(name), results)
# XY = MD.cppdem.query_xy_for_each_basin()["0"]
# tdf = pd.DataFrame(XY)
# tdf.to_feather("%s_XY.feather"%(name))
# print("\n\n")
try:
from IPython.display import display, Markdown, Latex
todusplay = r"""
**Thanks for constraning** $\theta$ with the disorder algorithm from _Mudd et al., 2018_ and _Gailleton et al, submitted_.
Keep in mind that it is not straightforward and that the "best fit" we suggest is most of the time the "least worst" value maximising the collinearity in $\chi$ space.
Especially in large, complex basin, several $\theta$ actually fit different areas and the best fit is just a try to make everyone happy where it is not necessarily possible.
$\theta$ constraining results:
median $\theta$ | $1^{st}$ Q | $3^{rd}$ Q
--- | --- | ---
%s | %s | %s
"""%(round(np.nanmedian(all_disorder[0]),3), round(np.nanpercentile(all_disorder[0],25),3), round(np.nanpercentile(all_disorder[0],75),3))
display(Markdown(todusplay))
except:
pass
return all_disorder
def get_median_first_quartile_Dstar(ls):
"""
Function which post-process results from one analysis to return the median and first quartile curve of all best-fits
param:
ls: full prefix (= including basin number if needed)
B.G
"""
print("Normalising D* for ", ls)
name_to_load = ls
# loading the file containng ALL the data
all_data = np.load(name_to_load + "_disorder_tot.npy")
if(all_data.shape[0]>1):
# normalise by max each row
all_data = np.apply_along_axis(norm_by_row,1,all_data)
# Median by column
ALLDmed = np.apply_along_axis(np.median,0,all_data)
# Percentile by column
ALLDfstQ = np.apply_along_axis(lambda z: np.percentile(z,25),0,all_data)
else:
return name_to_load
return ALLDmed, ALLDfstQ, ls
def get_median_first_quartile_Dstar_r(ls):
"""
Function which post-process results from one analysis to return the median and first quartile curve of all best-fits
param:
ls: full prefix (= including basin number if needed)
B.G
"""
print("Normalising D*_r for ", ls)
name_to_load = ls
# loading the file containng ALL the data
all_data = np.load(name_to_load + "_disorder_tot.npy")
if(all_data.shape[0]>1):
# normalise by max each row
all_data = np.apply_along_axis(norm_by_row_by_range,1,all_data)
# Median by column
ALLDmed = np.apply_along_axis(np.median,0,all_data)
# Percentile by column
ALLDfstQ = np.apply_along_axis(lambda z: np.percentile(z,25),0,all_data)
else:
return name_to_load
return ALLDmed, ALLDfstQ, ls
def plot_single_theta(ls, **kwargs):
"""
For a multiple analysis on the same DEM this plot the global with each basins colored by D^*
Need post-processing function to pre-analyse the ouputs.
The layout of this function might seems a bit convoluted, but that's making multiprocessing easy, as they take time to plot
param
"""
this_theta = ls[0]
prefix = ls[1]
# Loading the small summary df
df = pd.read_csv(prefix +"summary_results.csv")
# Loading the HillShade
HS = lsd.raster_loader.load_raster(prefix + "HS.tif")
# Formatting ticks
import matplotlib.ticker as tkr # has classes for tick-locating and -formatting
yfmt = tkr.FuncFormatter(numfmt)
xfmt = tkr.FuncFormatter(numfmt)
print("plotting D* for theta", this_theta)
# Getting the Figure and the ticks right
fig,ax = plt.subplots()
ax.yaxis.set_major_formatter(yfmt)
ax.xaxis.set_major_formatter(xfmt)
# Normalising the Hillshade and taking care of the no data
HS["array"] = HS["array"]/HS["array"].max()
HS["array"][HS["array"]<0] = np.nan
# Plotting the hillshade
ax.imshow(HS["array"], extent = HS["extent"], cmap = "gray", vmin= 0.2, vmax = 0.8)
# Building the array of concavity
A = np.zeros(HS["array"].shape)
A[:,:] = np.nan
# For each raster, I am reading rows and col corresponding to the main raster and potting it with the requested value
for name in df["raster_name"]:
row = np.load(name + "_row.npy")
col = np.load(name + "_col.npy")
val = df["D*_%s"%this_theta][df["raster_name"] == name].values[0] # A wee convoluted but it work and it is fast so...
A[row,col] = val
# PLOTTING THE D*
ax.imshow(A, extent = HS["extent"], cmap= "gnuplot2", zorder = 2, alpha = 0.75, vmin = 0.1, vmax = 0.9)
# You may want to change the extents of the plot
if("xlim" in kwargs):
ax.set_xlim(kwargs["xlim"])
if("ylim" in kwargs):
ax.set_ylim(kwargs["ylim"])
ax.set_xlabel("Easting (km)")
ax.set_ylabel("Northing (km)")
# Saving the figure
plt.tight_layout()#
plt.savefig(prefix + "MAP_disorder_%s.png"%(this_theta), dpi = 500)
plt.close(fig)
print("plotting D*_r for theta", this_theta)
# Getting the Figure and the ticks right
fig,ax = plt.subplots()
ax.yaxis.set_major_formatter(yfmt)
ax.xaxis.set_major_formatter(xfmt)
# Plotting the hillshade
ax.imshow(HS["array"], extent = HS["extent"], cmap = "gray", vmin= 0.2, vmax = 0.8)
# Building the array of concavity
A = np.zeros(HS["array"].shape)
A[:,:] = np.nan
# For each raster, I am reading rows and col corresponding to the main raster and potting it with the requested value
for name in df["raster_name"]:
row = np.load(name + "_row.npy")
col = np.load(name + "_col.npy")
val = df["D*_r_%s"%this_theta][df["raster_name"] == name].values[0] # A wee convoluted but it work and it is fast so...
A[row,col] = val
# PLOTTING THE D*
ax.imshow(A, extent = HS["extent"], cmap= "gnuplot2", zorder = 2, alpha = 0.75, vmin = 0.1, vmax = 0.9)
# You may want to change the extents of the plot
if("xlim" in kwargs):
ax.set_xlim(kwargs["xlim"])
if("ylim" in kwargs):
ax.set_ylim(kwargs["ylim"])
ax.set_xlabel("Easting (km)")
ax.set_ylabel("Northing (km)")
# Saving the figure
plt.tight_layout()#
plt.savefig(prefix + "MAP_disorder_by_range_%s.png"%(this_theta), dpi = 500)
plt.close(fig)
def plot_min_D_star_map(ls, **kwargs):
"""
For a multiple analysis on the same DEM this plot the global with each basins colored by D^*
Need post-processing function to pre-analyse the ouputs.
The layout of this function might seems a bit convoluted, but that's making multiprocessing easy, as they take time to plot
param
"""
this_theta = ls[0]
prefix = ls[1]
# Loading the small summary df
df = pd.read_csv(prefix +"summary_results.csv")
# Loading the HillShade
HS = lsd.raster_loader.load_raster(prefix + "HS.tif")
# Formatting ticks
import matplotlib.ticker as tkr # has classes for tick-locating and -formatting
yfmt = tkr.FuncFormatter(numfmt)
xfmt = tkr.FuncFormatter(numfmt)
print("plotting D* for theta", this_theta)
# Getting the Figure and the ticks right
fig,ax = plt.subplots()
ax.yaxis.set_major_formatter(yfmt)
ax.xaxis.set_major_formatter(xfmt)
# Normalising the Hillshade and taking care of the no data
HS["array"] = HS["array"]/HS["array"].max()
HS["array"][HS["array"]<0] = np.nan
# Plotting the hillshade
ax.imshow(HS["array"], extent = HS["extent"], cmap = "gray", vmin= 0.2, vmax = 0.8)
# Building the array of concavity
A = np.zeros(HS["array"].shape)
A[:,:] = np.nan
df_theta = pd.read_csv(prefix + "all_raster_names.csv")
thetas = np.round(pd.read_feather(df["raster_name"].iloc[0] + "_overall_test.feather")["tested_movern"].values,decimals = 3)
# For each raster, I am reading rows and col corresponding to the main raster and potting it with the requested value
for name in df["raster_name"]:
row = np.load(name + "_row.npy")
col = np.load(name + "_col.npy")
val = 1e12
for tval in thetas:
valtest = df["D*_%s"%tval][df["raster_name"] == name].values[0] # A wee convoluted but it work and it is fast so...
if(valtest<val):
val=valtest
A[row,col] = val
# PLOTTING THE D*
ax.imshow(A, extent = HS["extent"], cmap= "gnuplot2", zorder = 2, alpha = 0.75, vmin = 0.1, vmax = 0.9)
# You may want to change the extents of the plot
if("xlim" in kwargs):
ax.set_xlim(kwargs["xlim"])
if("ylim" in kwargs):
ax.set_ylim(kwargs["ylim"])
ax.set_xlabel("Easting (km)")
ax.set_ylabel("Northing (km)")
# Saving the figure
plt.tight_layout()#
plt.savefig(prefix + "MAP_minimum_disorder_across_theta_%s.png"%(this_theta), dpi = 500)
plt.close(fig)
def post_process_analysis_for_Dstar(prefix, n_proc = 1, base_raster_full_name = "SEC_PP.tif"):
# Loading the list of raster
df = pd.read_csv(prefix + "all_raster_names.csv")
# Preparing the multiprocessing
d_of_med = {}
d_of_fst = {}
d_of_med_r = {}
d_of_fst_r = {}
params = df["raster_name"].tolist()
ras_to_ignore = {}
ras_to_ignore_list = []
for i in params:
ras_to_ignore[i] = False
# running the multiprocessing
with Pool(n_proc) as p:
fprocesses = []
for i in params:
fprocesses.append(p.apply_async(get_median_first_quartile_Dstar, args = (i,)))
for gut in fprocesses:
gut.wait()
# getting the results in the right dictionaries
for gut in fprocesses:
# print(gut.get())
if(isinstance(gut.get(),tuple)):
d_of_med[gut.get()[2]] = gut.get()[0]
d_of_fst[gut.get()[2]] = gut.get()[1]
else:
# print("IGNORING",gut.get() )
ras_to_ignore[gut.get()] = True
ras_to_ignore_list.append(gut.get())
# running the multiprocessing
with Pool(n_proc) as p:
fprocesses = []
for i in params:
fprocesses.append(p.apply_async(get_median_first_quartile_Dstar_r, args = (i,)))
for gut in fprocesses:
gut.wait()
# getting the results in the right dictionaries
for gut in fprocesses:
# print(gut.get())
if(isinstance(gut.get(),tuple)):
d_of_med_r[gut.get()[2]] = gut.get()[0]
d_of_fst_r[gut.get()[2]] = gut.get()[1]
else:
# print("IGNORING",gut.get() )
ras_to_ignore[gut.get()] = True
ras_to_ignore_list.append(gut.get())
# Getting the list of thetas tested
thetas = np.round(pd.read_feather(params[0] + "_overall_test.feather")["tested_movern"].values,decimals = 3)
df["best_fit"] = pd.Series(np.zeros(df.shape[0]), index = df.index)
df["err_neg"] = pd.Series(np.zeros(df.shape[0]), index = df.index)
df["err_pos"] = pd.Series(np.zeros(df.shape[0]), index = df.index)
df["best_fit_norm_by_range"] = pd.Series(np.zeros(df.shape[0]), index = df.index)
df["err_neg_norm_by_range"] = pd.Series(np.zeros(df.shape[0]), index = df.index)
df["err_pos_norm_by_range"] = pd.Series(np.zeros(df.shape[0]), index = df.index)
# Preparing my dataframe to ingest
for t in thetas:
df["D*_%s"%t] = pd.Series(np.zeros(df.shape[0]), index = df.index)
df["D*_r_%s"%t] = pd.Series(np.zeros(df.shape[0]), index = df.index)
# Ingesting hte results
for i in range(df.shape[0]):
if(ras_to_ignore[df["raster_name"].iloc[i]]):
continue
BF,err = get_best_bit_and_err_from_Dstar(thetas, d_of_med[df["raster_name"].iloc[i]], d_of_fst[df["raster_name"].iloc[i]], 10)
BF_r,err_r = get_best_bit_and_err_from_Dstar(thetas, d_of_med_r[df["raster_name"].iloc[i]], d_of_fst_r[df["raster_name"].iloc[i]], 10)
df["best_fit"].iloc[i] = BF
df["err_neg"].iloc[i] = err[0]
df["err_pos"].iloc[i] = err[1]
df["best_fit_norm_by_range"].iloc[i] = BF_r
df["err_neg_norm_by_range"].iloc[i] = err_r[0]
df["err_pos_norm_by_range"].iloc[i] = err_r[1]
for t in range(thetas.shape[0]):
df["D*_%s"%thetas[t]].iloc[i] = d_of_med[df["raster_name"].iloc[i]][t]
df["D*_r_%s"%thetas[t]].iloc[i] = d_of_med_r[df["raster_name"].iloc[i]][t]
# Getting the hillshade
mydem = lsd.LSDDEM(file_name = base_raster_full_name,already_preprocessed = True)
HS = mydem.get_hillshade(altitude = 45, angle = 315, z_exageration = 1)
mydem.save_array_to_raster_extent( HS, name = prefix + "HS", save_directory = "./")
# will add X-Y to the sumarry dataframe
df["X_median"] = pd.Series(np.zeros(df.shape[0]), index = df.index)
df["X_firstQ"] = pd.Series(np.zeros(df.shape[0]), index = df.index)
df["X_thirdtQ"] = pd.Series(np.zeros(df.shape[0]), index = df.index)
df["Y_median"] = pd.Series(np.zeros(df.shape[0]), index = df.index)
df["Y_firstQ"] = pd.Series(np.zeros(df.shape[0]), index = df.index)
df["Y_thirdtQ"] = pd.Series(np.zeros(df.shape[0]), index = df.index)
# I do not mutiprocess here: it would require load the mother raster for each process and would eat a lot of memory
for i in params:
if(ras_to_ignore[i]):
continue
XY = pd.read_feather(i + "_XY.feather")
row,col = mydem.cppdem.query_rowcol_from_xy(XY["X"].values, XY["Y"].values)
np.save(i + "_row.npy", row)
np.save(i + "_col.npy", col)
df["X_median"][df["raster_name"] == i] = XY["X"].median()
df["X_firstQ"][df["raster_name"] == i] = XY["X"].quantile(0.25)
df["X_thirdtQ"][df["raster_name"] == i] = XY["X"].quantile(0.75)
df["Y_median"][df["raster_name"] == i] = XY["Y"].median()
df["Y_firstQ"][df["raster_name"] == i] = XY["Y"].quantile(0.25)
df["Y_thirdtQ"][df["raster_name"] == i] = XY["Y"].quantile(0.75)
#Removing the unwanted
df = df[~df["raster_name"].isin(ras_to_ignore_list)]
# Saving the DataFrame
df.to_csv(prefix +"summary_results.csv", index = False)
print("Done with the post processing")
def plot_main_figures(prefix, **kwargs):
# Loading the list of raster
dfrast = | pd.read_csv(prefix + "all_raster_names.csv") | pandas.read_csv |
from torchtools import *
from collections import OrderedDict
import math
import os
import numpy as np
import pandas as pd
# encoder for imagenet dataset
class EmbeddingImagenet(nn.Module):
def __init__(self,
emb_size):
super(EmbeddingImagenet, self).__init__()
# set size
self.hidden = 64
self.last_hidden = self.hidden * 25
self.emb_size = emb_size
# set layers
self.conv_1 = nn.Sequential(nn.Conv2d(in_channels=3,
out_channels=self.hidden,
kernel_size=3,
padding=1,
bias=False),
nn.BatchNorm2d(num_features=self.hidden),
nn.MaxPool2d(kernel_size=2),
nn.LeakyReLU(negative_slope=0.2, inplace=True))
self.conv_2 = nn.Sequential(nn.Conv2d(in_channels=self.hidden,
out_channels=int(self.hidden*1.5),
kernel_size=3,
bias=False),
nn.BatchNorm2d(num_features=int(self.hidden*1.5)),
nn.MaxPool2d(kernel_size=2),
nn.LeakyReLU(negative_slope=0.2, inplace=True))
self.conv_3 = nn.Sequential(nn.Conv2d(in_channels=int(self.hidden*1.5),
out_channels=self.hidden*2,
kernel_size=3,
padding=1,
bias=False),
nn.BatchNorm2d(num_features=self.hidden * 2),
nn.MaxPool2d(kernel_size=2),
nn.LeakyReLU(negative_slope=0.2, inplace=True),
nn.Dropout2d(0.4))
self.conv_4 = nn.Sequential(nn.Conv2d(in_channels=self.hidden*2,
out_channels=self.hidden*4,
kernel_size=3,
padding=1,
bias=False),
nn.BatchNorm2d(num_features=self.hidden * 4),
nn.MaxPool2d(kernel_size=2),
nn.LeakyReLU(negative_slope=0.2, inplace=True),
nn.Dropout2d(0.5))
self.layer_last = nn.Sequential(nn.Linear(in_features=self.last_hidden * 4,
out_features=self.emb_size, bias=True),
nn.BatchNorm1d(self.emb_size))
def forward(self, input_data):
output_data = self.conv_4(self.conv_3(self.conv_2(self.conv_1(input_data))))
return self.layer_last(output_data.view(output_data.size(0), -1))
class GraphUnpool(nn.Module):
def __init__(self):
super(GraphUnpool, self).__init__()
def forward(self, A, X, idx_batch):
# optimized by Gai
batch = X.shape[0]
new_X = torch.zeros(batch, A.shape[1], X.shape[-1]).to(tt.arg.device)
new_X[torch.arange(idx_batch.shape[0]).unsqueeze(-1), idx_batch] = X
#
return A, new_X
class GraphPool(nn.Module):
def __init__(self, k, in_dim, num_classes, num_queries):
super(GraphPool, self).__init__()
self.k = k
self.num_queries = num_queries
self.num_classes = num_classes
self.proj = nn.Linear(in_dim, 1).to(tt.arg.device)
self.sigmoid = nn.Sigmoid()
def forward(self, A, X):
batch = X.shape[0]
idx_batch = []
new_X_batch = []
new_A_batch = []
if tt.arg.visual == True:
if tt.arg.pool_count == None:
tt.arg.pool_count = 0
# for each batch
for i in range(batch):
num_nodes = A[i, 0].shape[0]
scores = self.proj(X[i])
scores = torch.squeeze(scores)
scores = self.sigmoid(scores / 100)
#visual scores
if tt.arg.visual == True:
np_scores = scores.detach().cpu().numpy()
if tt.arg.pool_count == 0:
np_idx = np.arange(scores.size(0))
data = [['idx_%d' % tt.arg.pool_count]+list(np_idx),
['pool_%d_scores' % tt.arg.pool_count] + list(np_scores)]
else:
data = [['pool_%d_scores' % tt.arg.pool_count] + list(np_scores)]
df = pd.DataFrame(data)
if tt.arg.pool_count == 0:
if os.path.exists('visual_%s/%03d/pool_record.csv' % (tt.arg.exp_name, tt.arg.iter * batch + i)):
os.remove('visual_%s/%03d/pool_record.csv' % (tt.arg.exp_name, tt.arg.iter * batch + i))
df.to_csv('visual_%s/%03d/pool_record.csv' % (tt.arg.exp_name, tt.arg.iter*batch + i),header=False,index=False,mode='a')
if tt.arg.pool_mode == 'way':
num_spports = int((num_nodes - self.num_queries) / self.num_classes)
idx = []
values = []
# pooling by each way
for j in range(self.num_classes):
way_values, way_idx = torch.topk(scores[j * num_spports:(j + 1) * num_spports],
int(self.k * num_spports))
way_idx = way_idx + j * num_spports
idx.append(way_idx)
values.append(way_values)
query_values = scores[num_nodes - self.num_queries:]
query_idx = torch.arange(num_nodes - self.num_queries, num_nodes).long().to(tt.arg.device)
values = torch.cat(values + [query_values], dim=0)
idx = torch.cat(idx + [query_idx], dim=0)
elif tt.arg.pool_mode == 'support':
num_supports = num_nodes - self.num_queries
support_values, support_idx = torch.topk(scores[:num_supports], int(self.k * num_supports),
largest=True)
query_values = scores[num_supports:]
query_idx = torch.arange(num_nodes - self.num_queries, num_nodes).long().to(tt.arg.device)
values = torch.cat([support_values, query_values], dim=0)
idx = torch.cat([support_idx, query_idx], dim=0)
elif tt.arg.pool_mode == 'way&kn':
num_supports = int((num_nodes - self.num_queries) / self.num_classes)
idx = []
values = []
# pooling by each way
for j in range(self.num_classes):
way_scores = scores[j * num_supports:(j + 1) * num_supports]
intra_scores = way_scores - way_scores.mean()
_, way_idx = torch.topk(intra_scores,
int(self.k * num_supports), largest=False)
way_values = way_scores[way_idx]
way_idx = way_idx + j * num_supports
idx.append(way_idx)
values.append(way_values)
query_values = scores[num_nodes - self.num_queries:]
query_idx = torch.arange(num_nodes - self.num_queries, num_nodes).long().to(tt.arg.device)
values = torch.cat(values + [query_values], dim=0)
idx = torch.cat(idx + [query_idx], dim=0)
elif tt.arg.pool_mode == 'kn':
num_supports = num_nodes - self.num_queries
support_scores = scores[:num_supports]
intra_scores = support_scores - support_scores.mean()
_, support_idx = torch.topk(intra_scores,
int(self.k * num_supports), largest=False)
support_values = support_scores[support_idx]
query_values = scores[num_nodes - self.num_queries:]
query_idx = torch.arange(num_nodes - self.num_queries, num_nodes).long().to(tt.arg.device)
values = torch.cat([support_values, query_values], dim=0)
idx = torch.cat([support_idx, query_idx], dim=0)
else:
print('wrong pool_mode setting!!!')
raise NameError('wrong pool_mode setting!!!')
new_X = X[i, idx, :]
values = torch.unsqueeze(values, -1)
new_X = torch.mul(new_X, values)
new_A = A[i, idx, :]
new_A = new_A[:, idx]
idx_batch.append(idx)
new_X_batch.append(new_X)
new_A_batch.append(new_A)
A = torch.stack(new_A_batch, dim=0).to(tt.arg.device)
new_X = torch.stack(new_X_batch, dim=0).to(tt.arg.device)
idx_batch = torch.stack(idx_batch, dim=0).to(tt.arg.device)
# visual pool idx result
if tt.arg.visual == True:
for i in range(batch):
old_idx = pd.read_csv('visual_%s/%03d/pool_record.csv' % (tt.arg.exp_name, tt.arg.iter * batch + i),
skiprows=tt.arg.pool_count*2,nrows=1,header=None).to_numpy(copy=True).reshape(-1)[1:].astype(np.int32)
np_idx = old_idx[idx_batch[i].cpu().numpy()]
data = [['idx_%d' % (tt.arg.pool_count+1)] + list(np_idx)]
df = pd.DataFrame(data)
df.to_csv('visual_%s/%03d/pool_record.csv' % (tt.arg.exp_name, tt.arg.iter * batch + i), header=False,
index=False, mode='a')
tt.arg.pool_count = tt.arg.pool_count + 1
return A, new_X, idx_batch
class Unet(nn.Module):
def __init__(self, ks, in_dim, num_classes, num_queries):
super(Unet, self).__init__()
l_n = len(ks)
self.l_n = l_n
start_mlp = MLP(in_dim=in_dim)
start_gcn = GCN(in_dim=in_dim, out_dim=in_dim)
self.add_module('start_mlp', start_mlp)
self.add_module('start_gcn', start_gcn)
for l in range(l_n):
down_mlp = MLP(in_dim=in_dim)
down_gcn = GCN(in_dim=in_dim, out_dim=in_dim)
up_mlp = MLP(in_dim=in_dim)
up_gcn = GCN(in_dim=in_dim, out_dim=in_dim)
pool = GraphPool(ks[l], in_dim=in_dim, num_classes=num_classes, num_queries=num_queries)
unpool = GraphUnpool()
self.add_module('down_mlp_{}'.format(l), down_mlp)
self.add_module('down_gcn_{}'.format(l), down_gcn)
self.add_module('up_mlp_{}'.format(l), up_mlp)
self.add_module('up_gcn_{}'.format(l), up_gcn)
self.add_module('pool_{}'.format(l), pool)
self.add_module('unpool_{}'.format(l), unpool)
bottom_mlp = MLP(in_dim=in_dim)
bottom_gcn = GCN(in_dim=in_dim, out_dim=in_dim)
self.add_module('bottom_mlp', bottom_mlp)
self.add_module('bottom_gcn', bottom_gcn)
out_mlp = MLP(in_dim=in_dim * 2)
out_gcn = GCN(in_dim=in_dim * 2, out_dim=num_classes)
self.add_module('out_mlp', out_mlp)
self.add_module('out_gcn', out_gcn)
def forward(self, A_init, X):
adj_ms = []
indices_list = []
down_outs = []
A_old = A_init
# visual_X(1)
if tt.arg.visual == True:
batch = X.size(0)
for j in range(batch):
np_X = X[j].detach().cpu().numpy()
data = [['Input_feature'] + list(np_X)]
df = pd.DataFrame(data)
df.to_csv('visual_%s/%03d/feature_record.csv' % (tt.arg.exp_name, tt.arg.iter * batch + j),
header=False,
index=False, mode='a')
A_new = self._modules['start_mlp'](X)
X = self._modules['start_gcn'](A_new, A_old, X)
org_X = X
# visual_X(2)
if tt.arg.visual == True:
for j in range(batch):
np_X = X[j].detach().cpu().numpy()
data = [['start_gcn'] + list(np_X)]
df = | pd.DataFrame(data) | pandas.DataFrame |
import os
import pandas as pd
import numpy as np
class Analysis:
def __init__(self, Class_name, subject_list, subject_full_score_list, subject_total_list, subject_total_score):
self.Class_name = Class_name
self.subject_list = subject_list
self.subject_full_score_list = subject_full_score_list
self.subject_total_list = subject_total_list
self.subject_total_score = subject_total_score
self.score_line = pd.read_excel("./OUTPUT/全区/2-2-2总分及各科上线的有效分数线.xlsx")
def sector_output(self):
Class_name = self.Class_name
subject_list = self.subject_list
subject_full_score_list = self.subject_full_score_list
subject_total_list = self.subject_total_list
subject_total_score = self.subject_total_score
output_dict = {}
total_score_analysis = self.total_score_analysis(Class_name, subject_total_score, subject_full_score_list)
output_dict["1-3-2总分得分情况"] = total_score_analysis
subject_score_analysis = self.subject_score_analysis(subject_list, subject_total_list,
subject_total_score, subject_full_score_list)
output_dict["1-3-1科目得分情况"] = subject_score_analysis
total_score_distribution_analysis = self.total_score_distribution_analysis(Class_name, subject_total_score,
subject_full_score_list)
output_dict["1-5-2总分分数段分布表"] = total_score_distribution_analysis
total_score_line_analysis = self.total_score_line_analysis(Class_name, subject_total_score)
output_dict["1-4总分上线人数(目标1-5)"] = total_score_line_analysis
score_line = self.score_line
output_dict["2-2-2总分及各科上线的有效分数线"] = score_line
return output_dict
def total_score_analysis(self, Class_name, subject_total_score, subject_full_score_list):
full_score = subject_full_score_list[-1]
score_dict = {"班级": Class_name, "最高分": [], "最低分": [], "中位数": [], "平均分": [], "标准差": [],
"满分率": [], "超优率": [], "优秀率": [], "良好率": [], "及格率": [], "待及格": [], "低分率": [],
"全距": []}
append_list = ["超优率", "优秀率", "良好率", "及格率"]
for Class in Class_name:
if Class == "全校":
this_Class = subject_total_score
else:
this_Class = subject_total_score.groupby("班级").get_group(Class)
student_score = this_Class['分数.7']
stu_num = len(student_score)
score_dict["最高分"].append(student_score.max())
score_dict["最低分"].append(student_score.min())
score_dict["中位数"].append(np.round(student_score.median(), 2))
score_dict["平均分"].append(np.round(student_score.mean(), 2))
score_dict["标准差"].append(np.round(student_score.std(), 2))
full_score_student = student_score.where(student_score == full_score).dropna()
full_score_stu_num = len(full_score_student)
full_score_rate = str(np.round(full_score_stu_num / stu_num * 100, 2))
full_score_rate = "{}%".format(full_score_rate)
score_dict["满分率"].append(full_score_rate)
for interval in range(10, 6, -1):
append_index = 10 - interval
append_name = append_list[append_index]
coefficient_high = interval / 10
coefficient_low = (interval - 1) / 10
this_rate_student = student_score.where(student_score < full_score * coefficient_high).dropna()
this_rate_student = this_rate_student.where(this_rate_student >= full_score * coefficient_low).dropna()
this_rate_stu_num = len(this_rate_student)
this_rate = str(np.round(this_rate_stu_num / stu_num * 100, 2))
this_rate = "{}%".format(this_rate)
score_dict[append_name].append(this_rate)
almost_pass_student = student_score.where(student_score < full_score * 0.6).dropna()
almost_pass_student = student_score.where(almost_pass_student >= full_score * 0.4).dropna()
almost_pass_stu_num = len(almost_pass_student)
almost_pass_rate = str(np.round(almost_pass_stu_num / stu_num * 100, 2))
almost_pass_rate = "{}%".format(almost_pass_rate)
score_dict["待及格"].append(almost_pass_rate)
low_student = student_score.where(student_score < full_score * 0.4).dropna()
low_student = low_student.where(low_student >= 0).dropna()
low_stu_num = len(low_student)
low_rate = str(np.round(low_stu_num / stu_num * 100, 2))
low_rate = "{}%".format(low_rate)
score_dict["低分率"].append(low_rate)
score_dict["全距"].append(np.round((student_score.max() - student_score.min()), 1))
score_df = pd.DataFrame(score_dict)
return score_df
def subject_score_analysis(self, subject_list, subject_total_list, subject_total_score, subject_full_score_list):
score_dict = {"学科": subject_list, "总分": subject_full_score_list, "最高分": [],
"最低分": [], "中位数": [], "平均分": [],
"标准差": [], "满分率": [], "超优率": [],
"优秀率": [], "良好率": [], "及格率": [],
"待及格": [], "低分率": [], "全距": []}
append_list = ["超优率", "优秀率", "良好率", "及格率"]
for index in range(8):
if index == 7:
this_subject = subject_total_score['分数.7']
else:
subject = subject_total_list[index]
this_subject = subject['全卷']
full_score = subject_full_score_list[index]
stu_num = len(this_subject)
score_dict["最高分"].append(this_subject.max())
score_dict["最低分"].append(this_subject.min())
score_dict["中位数"].append(np.round(this_subject.median(), 2))
score_dict["平均分"].append(np.round(this_subject.mean(), 2))
score_dict["标准差"].append(np.round(this_subject.std(), 2))
full_score_student = this_subject.where(this_subject == full_score).dropna()
full_score_stu_num = len(full_score_student)
full_score_rate = str(np.round(full_score_stu_num / stu_num * 100, 2))
full_score_rate = "{}%".format(full_score_rate)
score_dict["满分率"].append(full_score_rate)
for interval in range(10, 6, -1):
append_index = 10 - interval
append_name = append_list[append_index]
coefficient_high = interval / 10
coefficient_low = (interval - 1) / 10
this_rate_student = this_subject.where(this_subject < full_score * coefficient_high).dropna()
this_rate_student = this_rate_student.where(this_rate_student >= full_score * coefficient_low).dropna()
this_rate_stu_num = len(this_rate_student)
this_rate = str(np.round(this_rate_stu_num / stu_num * 100, 2))
this_rate = "{}%".format(this_rate)
score_dict[append_name].append(this_rate)
almost_pass_student = this_subject.where(this_subject < full_score * 0.6).dropna()
almost_pass_student = this_subject.where(almost_pass_student >= full_score * 0.4).dropna()
almost_pass_stu_num = len(almost_pass_student)
almost_pass_rate = str(np.round(almost_pass_stu_num / stu_num * 100, 2))
almost_pass_rate = "{}%".format(almost_pass_rate)
score_dict["待及格"].append(almost_pass_rate)
low_student = this_subject.where(this_subject < full_score * 0.4).dropna()
low_student = low_student.where(low_student >= 0).dropna()
low_stu_num = len(low_student)
low_rate = str(np.round(low_stu_num / stu_num * 100, 2))
low_rate = "{}%".format(low_rate)
score_dict["低分率"].append(low_rate)
score_dict["全距"].append(np.round((this_subject.max() - this_subject.min()), 1))
score_df = pd.DataFrame(score_dict)
return score_df
def total_score_distribution_analysis(self, Class_name, subject_total_score, subject_full_score_list):
full_score = subject_full_score_list[-1]
analysis_dict = {"班级": Class_name, "【0,10%)": [], "【10%,20%)": [], "【20%,30%)": [],
"【30%,40%)": [], "【40%,50%)": [], "【50%,60%)": [],
"【60%,70%)": [], "【70%,80%)": [], "【80%,90%)": [],
"【90%-100%)": [], "人数": []}
append_list = ["【0,10%)", "【10%,20%)", "【20%,30%)", "【30%,40%)", "【40%,50%)", "【50%,60%)",
"【60%,70%)", "【70%,80%)", "【80%,90%)", "【90%-100%)"]
for Class in Class_name:
if Class == "全校":
this_Class = subject_total_score['分数.7']
else:
this_Class = subject_total_score.groupby('班级').get_group(Class)
this_Class = this_Class['分数.7']
stu_num = len(this_Class)
analysis_dict["人数"].append(stu_num)
for interval in range(0, 10):
append_name = append_list[interval]
high_boundary = full_score * (interval + 1) / 10
low_boundary = full_score * interval / 10
student = this_Class.where(this_Class < high_boundary).dropna()
student = student.where(this_Class >= low_boundary).dropna()
num = len(student)
analysis_dict[append_name].append(num)
analysis_df = | pd.DataFrame(analysis_dict) | pandas.DataFrame |
'''
Created on Sep 2, 2016
@author: Gully
'''
from __future__ import print_function, division
import argparse
import argparse_config
import codecs
import os
import numpy as np
import pandas as pd
import warnings
from sets import Set
import re
from sets import Set
import re
from bokeh.plotting import figure, show, save, output_notebook, output_file
from bokeh.models import ColumnDataSource, Range1d
#
# This function checks to see if there is a boundary condition between clause 1 and clause 2
# Returns a tuple: (True / False, Explanation)
#
def checkForStartBoundary(clause1, clause2, expt_codes, tsv, c_s_lookup, s_c_lookup):
row1 = tsv.loc[clause1]
row2 = tsv.loc[clause2]
# both clauses are in the same sentence => false
if( row1['SentenceId'] == row2['SentenceId'] ):
return (False,"Same sentence")
# clause 1 is a title paragraph => true
elif( "header" in row1['Codes'] ):
return (True, "?/header")
#
# clause 1 is in a sentence where
# (A) there are hypotheses/problems/facts
# (B) there are results/implications with exLinks present
# clause 2 is in a sentence where
# (A) there are goals/methods
# (B) there are results/implications with no exLinks
#
sentence1 = c_s_lookup[clause1]
sentence2 = c_s_lookup[clause2]
go_condition_2 = False
for cs2 in s_c_lookup[sentence2]:
disc2 = tsv.loc[cs2]['Discourse Type']
inExHead2 = tsv.loc[cs2]['Codes']
if( (disc2 == 'result' or disc2 == 'implication')
and "exLink" not in inExHead2):
go_condition_2 = True
elif( disc2 == 'goal' or disc2 == 'method'):
go_condition_2 = True
if( go_condition_2 ) :
for cs1 in s_c_lookup[sentence1]:
disc1 = tsv.loc[cs1]['Discourse Type']
inExHead1 = tsv.loc[cs1]['Codes']
if(disc1 == 'hypothesis' or disc1 == 'problem' or disc1 == 'fact'):
#print(tsv.loc[cs2])
return (True, "A:"+disc1+inExHead1+"/"+disc2+inExHead2)
elif((disc1 == 'result' or disc1 != 'implication') and "exLink" in inExHead1):
#print(tsv.loc[cs2])
return (True, "B:"+disc1+inExHead1+"/"+disc2+inExHead2)
es1 = row1['ExperimentValues']
if( es1 == es1 and len(set(expt_codes).intersection(es1.split('|'))) == 0 ):
return (True, "|".join(expt_codes) + "!=" + es1 + "(1)")
es2 = row2['ExperimentValues']
if( es2 == es2 and len(set(expt_codes).intersection(es2.split('|'))) == 0 ):
return (True, "|".join(expt_codes) + "!=" + es2 + "(2)")
return (False,"end")
#
# This function checks to see if there is a boundary condition between clause 1 and clause 2
# Returns a tuple: (True / False, Explanation)
#
def checkForEndBoundary(clause1, clause2, expt_codes, tsv, c_s_lookup, s_c_lookup):
row1 = tsv.loc[clause1]
row2 = tsv.loc[clause2]
# both clauses are in the same sentence => false
if( row1['SentenceId'] == row2['SentenceId'] ):
return (False,"Same sentence")
# clause 2 is a title paragraph => true
elif( "header" in row2['Codes'] ):
return (True, "?/header")
#
# clause 1 is in a sentence where there are results/implications with no exLinks and
# clause 2 is in a sentence where
# (A) there are goals/methods/hypotheses/problems/facts
# (B) there are results/implications with exLinks present
#
sentence1 = c_s_lookup[clause1]
sentence2 = c_s_lookup[clause2]
go_condition_1 = False
for cs1 in s_c_lookup[sentence1]:
disc1 = tsv.loc[cs1]['Discourse Type']
inExHead1 = tsv.loc[cs1]['Codes']
if( (disc1 == 'result' or disc1 == 'implication')
and "exLink" not in inExHead1):
go_condition_1 = True
if( go_condition_1 ) :
for cs2 in s_c_lookup[sentence2]:
disc2 = tsv.loc[cs2]['Discourse Type']
inExHead2 = tsv.loc[cs2]['Codes']
if(disc1 != 'result' and disc1 != 'implication'):
#print(tsv.loc[cs2])
return (True, "C"+disc1+inExHead1+"/"+disc2+inExHead2)
elif((disc1 == 'result' or disc1 != 'implication') and "exLink" in inExHead2):
#print(tsv.loc[cs2])
return (True, "D"+disc1+inExHead1+"/"+disc2+inExHead2)
es1 = row1['ExperimentValues']
if( es1 == es1 and len(set(expt_codes).intersection(es1.split('|'))) == 0 ):
return (True, "|".join(expt_codes) + "!=" + es1 + "(1)")
es2 = row2['ExperimentValues']
if( es2 == es2 and len(set(expt_codes).intersection(es2.split('|'))) == 0 ):
return (True, "|".join(expt_codes) + "!=" + es2 + "(2)")
return (False,"end")
def add_spans(tsv):
c_s_lookup = {}
c_p_lookup = {}
s_c_lookup = {}
p_c_lookup = {}
fig_ref_set = Set()
expt_code_set = Set()
clause_max = -1
clause_min = 1000
for i,row in tsv.iterrows():
es = row['ExperimentValues']
dt = row['Discourse Type']
inExHead = row['Codes']
sid = row['SentenceId']
paragraph = row['Paragraph']
heading = str(row['Headings'])
floatingBox = row['FloatingBox?']
#print("i: " + str(i))
#print("refs: " + str(es))
#print("~~~~~~~~~~~~~~~~~~")
s = int(sid[1:])
if(paragraph!=paragraph):
continue
p = 0
if( paragraph == '-'):
p = 0
elif( paragraph[0:1] == 'p'):
p = int(paragraph[1:])
elif( paragraph[0:5] == 'title'):
p = int(paragraph[5:])
c_s_lookup[i] = s
c_p_lookup[i] = p
if( s_c_lookup.get(s) is None ):
s_c_lookup[s] = [i]
else:
s_c_lookup.get(s).append(i)
if( p_c_lookup.get(p) is None ):
p_c_lookup[p] = [i]
else:
p_c_lookup.get(p).append(i)
if( heading != heading ):
heading = ""
if( re.match('^Result', heading) is None or floatingBox):
continue
if( i > clause_max):
clause_max = i
if( i < clause_min):
clause_min = i
if(es!=es):
continue
try:
codes = str(es).split('|')
except AttributeError:
print(str(es) + " is not a string. Skipping...")
continue
fig_ref_set.add(i)
for c in codes:
expt_code_set.add(c)
fig_refs = sorted(fig_ref_set)
fig_spans = {}
for i_fig in fig_refs:
row = tsv.loc[i_fig]
es = row['ExperimentValues']
dt = row['Discourse Type']
inExHead = row['Codes']
sid = row['SentenceId']
paragraph = row['Paragraph']
heading = str(row['Headings'])
floatingBox = row['FloatingBox?']
try:
expt_codes = str(es).split('|')
except AttributeError:
print(str(es) + " is not a string. Skipping...")
continue
# search backwards for a boundary condition between sentences
c1 = i_fig - 1
c2 = i_fig
while( checkForStartBoundary(c1, c2, expt_codes, tsv, c_s_lookup, s_c_lookup)[0] is False ):
c1 = c1-1
c2 = c2-1
expt_start = c2
# search forwards for a boundary condition between sentences
c1 = i_fig
c2 = i_fig + 1
while( checkForEndBoundary(c1, c2, expt_codes, tsv, c_s_lookup, s_c_lookup)[0] is False ):
c1 = c1+1
c2 = c2+1
expt_end = c1
for c in range(expt_start, expt_end+1):
if( fig_spans.get(c) is None ):
fig_spans[c] = set(expt_codes)
else:
fig_spans.get(c).update(set(expt_codes))
#print("Figure Location: " + str(i_fig) )
#print("Experiment Label: " + es )
#print("Expt Start: " + str(expt_start) )
#print("Expt Start Expl: " + str(checkForStartBoundary(expt_start-1, expt_start, expt_codes, tsv, c_s_lookup, s_c_lookup)) )
#print("Expt End: " + str(expt_end) )
#print("Expt End Expl: " + str(checkForEndBoundary(expt_end, expt_end+1, expt_codes, tsv, c_s_lookup, s_c_lookup)) )
#print( "~~~~~~~~~~~~~~~~~~~~" )
for i in fig_spans:
fig_spans[i] = "|".join(fig_spans.get(i))
#print(fig_spans[i])
tsv['fig_spans'] = pd.Series(fig_spans, index=fig_spans)
return tsv
def prepare_and_draw_gannt(filename, title, tsv):
gantt_rows = []
gantt_rows2 = []
gantt_rows3 = []
dtypes = ["fact","hypothesis","problem","goal" ,"method","result","implication"]
colors = ["Snow" ,"Snow" ,"Snow" ,"LightGray","Gray" ,"LightBlue" ,"LightGreen"]
colors_s = | pd.Series(colors, index=dtypes) | pandas.Series |
# -*- coding:utf-8 -*-
from calendar import leapdays
import numpy as np
import pandas as pd
import random
from datetime import datetime, timedelta
import time
import re
import joblib
import requests
# from draw import *
import xlrd
from pre_train import model_predict
from unit import *
xlrd.xlsx.ensure_elementtree_imported(False, None)
xlrd.xlsx.Element_has_iter = True
base_path_1 = "./dataset/"
base_path_2 = "./dataset/tmp/"
base_path_3 = "./output/"
station_id_change = {
'miyunshuiku_aq': 'miyunshuik_aq',
'wanshouxigong_aq': 'wanshouxig_aq',
'nongzhanguan_aq': 'nongzhangu_aq',
'xizhimenbei_aq': 'xizhimenbe_aq',
'fengtaihuayuan_aq': 'fengtaihua_aq',
'aotizhongxin_aq': 'aotizhongx_aq',
'yongdingmennei_aq': 'yongdingme_aq'
}
# 从网站下载数据
def get_data(city, start_time, end_time, current_day=False):
if current_day == True:
end_time = '2018-07-01-23'
link1 = 'https://biendata.com/competition/airquality/' + city + '/' + start_time + '/' + end_time + '/2k0d1d8'
respones = requests.get(link1)
if current_day == False:
with open(base_path_2 + city + "_airquality_" + start_time + "_" + end_time + ".csv", 'w') as f:
f.write(respones.text)
else:
with open(base_path_2 + city + "_airquality_current_day.csv", 'w') as f:
f.write(respones.text)
if city == "bj":
link2 = 'https://biendata.com/competition/meteorology/' + city + '/' + start_time + '/' + end_time + '/2k0d1d8'
respones = requests.get(link2)
if current_day == False:
with open(base_path_2 + city + "_meteorology_" + start_time + "_" + end_time + ".csv", 'w') as f:
f.write(respones.text)
else:
with open(base_path_2 + city + "_meteorology_current_day.csv", 'w') as f:
f.write(respones.text)
link3 = 'https://biendata.com/competition/meteorology/' + city + '_grid/' + start_time + '/' + end_time + '/2k0d1d8'
respones = requests.get(link3)
if current_day == False:
with open(base_path_2 + city + "_meteorology_grid_" + start_time + "_" + end_time + ".csv", 'w') as f:
f.write(respones.text)
else:
with open(base_path_2 + city + "_meteorology_grid_current_day.csv", 'w') as f:
f.write(respones.text)
# 加载 站点
def load_station():
filename = base_path_1 + "Beijing_AirQuality_Stations_cn.xlsx"
data = xlrd.open_workbook(filename)
table = data.sheet_by_name(u'Sheet2')
nrows = table.nrows
#print(nrows)
bj_stations = {}
for i in range(0, nrows):
row = table.row_values(i)
print (row)
bj_stations[row[0]] = {}
bj_stations[row[0]]['lng'] = row[1]
bj_stations[row[0]]['lat'] = row[2]
bj_stations[row[0]]['type_id'] = int(row[-1])
#print(int(row[-1]))
bj_stations[row[0]]['station_num_id'] = i
filename = base_path_1 + "London_AirQuality_Stations.csv"
fr = open(filename)
ld_stations = {}
flag = 0
i = 0
for line in fr.readlines():
if flag == 0:
flag = 1
continue
row = line.strip().split(",")
ld_stations[row[0]] = {}
if row[2] == "TRUE":
ld_stations[row[0]]['predict'] = True
else:
ld_stations[row[0]]['predict'] = False
ld_stations[row[0]]['lng'] = float(row[5])
ld_stations[row[0]]['lat'] = float(row[4])
ld_stations[row[0]]['type_id'] = int(row[-1])
ld_stations[row[0]]['station_num_id'] = i
i += 1
stations = {}
stations["bj"] = bj_stations
stations["ld"] = ld_stations
return stations
# 加载原始数据
def load_data(city, start_time, end_time, current_day=False):
if current_day == False:
#filename = base_path_2 + city + "_airquality_" + start_time + "_" + end_time + ".csv"
filename = "C:/Users/Nobody/Documents/aau/6/jacob/KDD_CUP_2018-master/dataset/tmp/beijing_17_18_aq.csv"
else:
#filename = "C:/Users/Nobody/Documents/aau/6/jacob/KDD_CUP_2018-master/dataset/tmp/beijing_17_18_aq.csv"
filename = base_path_1 + city + "_aq_online.csv"
df = pd.read_csv(filename,low_memory=False, sep=',')
#df.rename(columns={'SO2': 'SO2_Concentration', 'NO2': 'NO2_Concentration',
# 'PM10': 'PM10_Concentration', 'PM2.5': 'PM25_Concentration',"SO2":"SO2_Concentration",
# 'utc_time': 'time', 'stationId': 'station_id'}, inplace=True)
# print df.size
if current_day == False:
if city == 'ld':
filename = base_path_1 + 'London_historical_aqi_forecast_stations_20180331.csv'
df1 = pd.read_csv(filename, sep=',')
df1.rename(columns={'SO2': 'SO2_Concentration', 'NO2 (ug/m3)': 'NO2_Concentration',
'PM10 (ug/m3)': 'PM10_Concentration', 'PM2.5 (ug/m3)': 'PM25_Concentration',
'MeasurementDateGMT': 'time', 'Station_ID': 'station_id'}, inplace=True)
df = pd.concat([df, df1])
filename = base_path_1 + 'London_historical_aqi_other_stations_20180331.csv'
df1 = pd.read_csv(filename, sep=',')
df1.rename(columns={'SO2': 'SO2_Concentration', 'NO2 (ug/m3)': 'NO2_Concentration',
'PM10 (ug/m3)': 'PM10_Concentration', 'PM2.5 (ug/m3)': 'PM25_Concentration',
'MeasurementDateGMT': 'time', 'Station_ID': 'station_id'}, inplace=True)
df = pd.concat([df, df1])
else:
filename = base_path_1 + 'beijing_17_18_aq.csv'
#filename = "C:/Users/Nobody/Documents/aau/6/jacob/KDD_CUP_2018-master/dataset/tmp/beijing_17_18_aq.csv"
df1 = pd.read_csv(filename, sep=',')
df1.rename(columns={'SO2': 'SO2_Concentration', 'O3': 'O3_Concentration', 'CO': 'CO_Concentration',
'NO2': 'NO2_Concentration', 'PM10': 'PM10_Concentration', 'PM2.5': 'PM25_Concentration',
'utc_time': 'time', 'stationId': 'station_id'}, inplace=True)
df = pd.concat([df, df1])
filename = base_path_1 + 'beijing_201802_201803_aq.csv'
df1 = pd.read_csv(filename, sep=',')
df1.rename(columns={'SO2': 'SO2_Concentration', 'O3': 'O3_Concentration', 'CO': 'CO_Concentration',
'NO2': 'NO2_Concentration', 'PM10': 'PM10_Concentration', 'PM2.5': 'PM25_Concentration',
'utc_time': 'time', 'stationId': 'station_id'}, inplace=True)
df = pd.concat([df, df1])
# print df.size
print("\n")
print(df)
df['time'] = pd.to_datetime(df['time'])
df.index = df['time']
df['time_week'] = df.index.map(lambda x: x.weekday)
df['time_year'] = df.index.map(lambda x: x.year)
df['time_month'] = df.index.map(lambda x: x.month)
df['time_day'] = df.index.map(lambda x: x.day)
df['time_hour'] = df.index.map(lambda x: x.hour)
print
print(df)
if city == "ld":
df = df[["station_id", "PM25_Concentration", "PM10_Concentration", "NO2_Concentration",
'time_year', 'time_month', 'time_week', 'time_day', 'time_hour']]
else:
df = df[["station_id", "PM25_Concentration", "PM10_Concentration", "O3_Concentration", "CO_Concentration",
"NO2_Concentration", "SO2_Concentration", 'time_year', 'time_month', 'time_week', 'time_day',
'time_hour']]
# print df
# df = df.dropna(axis=0)
# print df.size
# process_loss_data(df, city, stations, length = 24*3, pre_train_flag=pre_train_flag)
# df.to_csv(base_path_3 + city + '.csv', index=True, sep=',')
return df
# 加载处理后的数据
def load_data_process(city, current_day=False):
if current_day == False:
filename = base_path_2 + city + "_airquality_processing.csv"
else:
filename = base_path_2 + city + "_current_day_processing.csv"
df = pd.read_csv(filename, sep=',')
df['time'] = pd.to_datetime(df['time'])
df.index = df['time']
return df
# 计算相似度
def cal_similar(df1, df2):
# print df1.mean(), df2.mean()
df3 = df1.sub(df2)
return np.sqrt(df3.mul(df3).mean())
# 计算最接近的K个站点
def KNN(station_group, attr, k=6):
tmp = {}
for station, group in station_group.items():
# print group.size
tmp[station] = group[attr]
neighborhood_k = {}
for station1 in tmp.keys():
dist = {}
print (station1, tmp[station1].mean())
for station2 in tmp.keys():
if station1 == station2:
continue
distance = cal_similar(tmp[station1], tmp[station2])
dist[station2] = distance
dist = sorted(dist.items(), key=lambda d: d[1])
print (dist[:k])
neighborhood_k[station1] = [x[0] for x in dist[:k] if x[1] / tmp[station1].mean() < 0.20]
return neighborhood_k
# 缺失值只有1,2,或者3个
def between_two_point(station_group, attr_need):
num = 0
for station, group in station_group.items():
# print "group.values.shape: ", group.values.shape
values1 = group[attr_need].values
# print values1.shape
# print np.isnan(values1).sum()
for i in range(1, values1.shape[0] - 1):
for j in range(values1.shape[1]):
if np.isnan(values1[i, j]):
if not np.isnan(values1[i - 1, j]) and not np.isnan(values1[i + 1, j]):
values1[i, j] = (values1[i - 1, j] + values1[i + 1, j]) / 2
num += 1
continue
if i < 2:
continue
if not np.isnan(values1[i - 2, j]) and not np.isnan(values1[i + 1, j]):
values1[i, j] = (values1[i - 2, j] + values1[i + 1, j] * 2) / 3
values1[i - 1, j] = (values1[i - 2, j] * 2 + values1[i + 1, j]) / 3
num += 2
continue
if i >= values1.shape[0] - 2:
continue
if not np.isnan(values1[i - 1, j]) and not np.isnan(values1[i + 2, j]):
values1[i, j] = (values1[i - 1, j] * 2 + values1[i + 2, j]) / 3
values1[i + 1, j] = (values1[i - 1, j] + values1[i + 2, j] * 2) / 3
num += 2
continue
if not np.isnan(values1[i - 2, j]) and not np.isnan(values1[i + 2, j]):
values1[i - 1, j] = (values1[i - 2, j] * 3 + values1[i + 2, j]) / 4
values1[i, j] = (values1[i - 2, j] * 2 + values1[i + 2, j] * 2) / 4
values1[i + 1, j] = (values1[i - 2, j] + values1[i + 2, j] * 3) / 4
num += 3
continue
# print np.isnan(values1).sum()
# group[["PM25_Concentration", "PM10_Concentration", "O3_Concentration"]].values = values1
group.loc[:, attr_need] = values1
# print "group.values.shape: ", group.values.shape
print ("num: ", num)
# 利用预训练的模型 填补缺失值
def pre_train(station_group, city, stations, attr_need, length, day=None):
model_file = base_path_2 + city + '_PM25_best.model'
reg_PM25 = joblib.load(model_file)
model_file = base_path_2 + city + '_PM10_best.model'
reg_PM10 = joblib.load(model_file)
nan_num = 0
total_error = 0.0
total_num = 0
if city == "bj":
model_file = base_path_2 + city + '_O3_best.model'
reg_O3 = joblib.load(model_file)
for station, group in station_group.items():
print(station)
if day is None:
values1 = group[attr_need].values
else:
values1 = group[day:][attr_need].values
print(values1.shape[0]- length)
for i in range(0, values1.shape[0] - length):
#print(i)
# print i
tmp = [stations[city][station]["type_id"], stations[city][station]["station_num_id"]]
tmp += list(values1[i + length, -2:])
if city == "bj":
values2 = values1[i: i + length, :3]
values2 = list(values2.T.flatten())
tmp += values2
values2 = values1[i + length, :3]
# values2 = list(values2.flatten())
# tmp += values2
else:
values2 = values1[i: i + length, :2]
values2 = list(values2.T.flatten())
tmp += values2
values2 = values1[i + length, :2]
# values2 = list(values2.flatten())
# tmp += values2
# print tmp
tmp = np.array(tmp)
if np.isnan(tmp).sum() > 0:
continue
ans_PM25 = model_predict(tmp, reg_PM25, city, stations, attribution="PM25")
ans_PM10 = model_predict(tmp, reg_PM10, city, stations, attribution="PM10")
# print ans_PM25, ans_PM10
if np.isnan(values2[0]) or values2[0] < 0:
values1[i + length, 0] = ans_PM25
nan_num += 1
else:
total_num += 1
total_error += np.abs(values1[i + length, 0] - ans_PM25) / (values1[i + length, 0] + ans_PM25) * 2
if np.isnan(values2[1]) or values2[1] < 0:
values1[i + length, 1] = ans_PM10
nan_num += 1
else:
total_num += 1
total_error += np.abs(values1[i + length, 1] - ans_PM10) / (values1[i + length, 1] + ans_PM10) * 2
if city == "bj":
ans_O3 = model_predict(tmp, reg_O3, city, stations, attribution="O3")
# print ans_O3
if np.isnan(values2[2]) or values2[2] < 0:
values1[i + length, 2] = ans_O3
nan_num += 1
else:
total_num += 1
total_error += np.abs(values1[i + length, 2] - ans_O3) / (values1[i + length, 2] + ans_O3) * 2
if day is None:
group.loc[:, attr_need] = values1
else:
group[day:].loc[:, attr_need] = values1
if total_num == 0:
total_error = 0.0
total_num = 1
print (nan_num, total_error / total_num)
# 加载预训练的数据
def pre_train_data(station_group, stations, city, attr_need, length):
ans = []
for station, group in station_group.items():
values1 = group[attr_need].values
# print "length: ", length
# print "values1.shape", values1.shape
for i in range(0, values1.shape[0] - length):
# print i
#print("pre_train_data ",i)
#print(stations[city][station]["type_id"], stations[city][station]["station_num_id"])
tmp = [stations[city][station]["type_id"], stations[city][station]["station_num_id"]]
tmp += list(values1[i + length, -2:])
if city == "bj":
values2 = values1[i: i + length, :3]
values2 = list(values2.T.flatten())
tmp += values2
values2 = values1[i + length, :3]
values2 = list(values2.flatten())
tmp += values2
else:
values2 = values1[i: i + length, :2]
values2 = list(values2.T.flatten())
tmp += values2
values2 = values1[i + length, :2]
values2 = list(values2.flatten())
tmp += values2
# print tmp
tmp = np.array(tmp)
if np.isnan(tmp).sum() > 0:
continue
ans.append(tmp)
ans = np.array(ans)
print ("ans.shape", ans.shape)
np.savetxt(base_path_2 + city + '_training_pre.csv', ans, delimiter=',')
return ans
# 加载最终训练的数据
def train_data(station_group, stations, city, attr_need, length):
ans = []
for station, group in station_group.items():
values1 = group[attr_need].values
for i in range(0, values1.shape[0] - length + 1, 24):
# print i
tmp = [stations[city][station]["type_id"], stations[city][station]["station_num_id"]]
tmp += list(values1[i + length - 24, -4: -1])
tmp += list(values1[i + length - 48, -4: -1])
if city == "bj":
values2 = values1[i: i + length - 48, :3]
values2 = list(values2.T.flatten())
tmp += values2
values2 = values1[i + length - 48: i + length, :3]
values2 = list(values2.T.flatten())
tmp += values2
else:
values2 = values1[i: i + length - 48, :2]
values2 = list(values2.T.flatten())
tmp += values2
values2 = values1[i + length - 48:i + length, :2]
values2 = list(values2.T.flatten())
tmp += values2
# print tmp
tmp = np.array(tmp)
if np.isnan(tmp).sum() > 0:
continue
ans.append(tmp)
ans = np.array(ans)
np.savetxt(base_path_2 + city + '_training.csv', ans, delimiter=',')
return ans
# KF1和KC1同一个站点,保留KF1
def KF1_padding(station_group, attr_need):
KF1_values = station_group["KF1"][attr_need].values
KC1_values = station_group["KC1"][attr_need].values
print ("KF1_padding ",KF1_values.shape, KC1_values.shape)
for i in range(KC1_values.shape[0]):
for j in range(KC1_values.shape[1]):
if np.isnan(KF1_values[i, j]):
KF1_values[i, j] = KC1_values[i, j]
station_group["KF1"].loc[:, attr_need] = KF1_values
# 前四天用整体的平均值填补
def four_days(df, station_group, city, attr_need):
date_start = "2017-01-01"
group_ = df[date_start:].groupby(["station_id", 'time_week', 'time_hour'])
for station, group in station_group.items():
# print group["2017-01-01"]
values1 = group["2017-01-01":"2017-01-04"][attr_need].values
# print "1 ", values1
for i in range(values1.shape[0]):
if city == "bj":
values = group_.get_group((station, values1[i, -2], values1[i, -1]))[
["PM25_Concentration", "PM10_Concentration", "O3_Concentration"]].mean().values
if np.isnan(values1[i, 0]) or values1[i, 0] < 0:
values1[i, 0] = values[0]
if np.isnan(values1[i, 1]) or values1[i, 1] < 0:
values1[i, 1] = values[1]
if np.isnan(values1[i, 2]) or values1[i, 2] < 0:
values1[i, 2] = values[2]
else:
values = group_.get_group((station, values1[i, -2], values1[i, -1]))[
["PM25_Concentration", "PM10_Concentration"]].mean().values
# if np.isnan(values).sum() > 0:
# print "station:", station
if np.isnan(values1[i, 0]) or values1[i, 0] < 0:
values1[i, 0] = values[0]
if np.isnan(values1[i, 1]) or values1[i, 1] < 0:
values1[i, 1] = values[1]
# print group["2017-01-01"]
# print "2 ", values1
group["2017-01-01":"2017-01-04"].loc[:, attr_need] = values1
# 统计大量nan的天
def nan_data_static(station_group, city):
for station, group in station_group.items():
if city == "bj":
value1 = group[["PM25_Concentration", "PM10_Concentration", "O3_Concentration"]].values
pass
# for
# 处理丢失数据
def process_loss_data(df, city, stations, length=24 * 3, pre_train_flag=True):
ans_df = df.sort_index()
group = ans_df.groupby("station_id")
station_group = {}
for name, g in group:
station_group[name] = g.sort_index()
# print station_group[name]
if city == 'bj':
attr_need = ["PM25_Concentration", "PM10_Concentration", "O3_Concentration", 'time_year',
'time_month', 'time_day', 'time_week', 'time_hour']
attr_need2 = ["PM25_Concentration", "PM10_Concentration", "O3_Concentration",
"CO_Concentration", "NO2_Concentration", "SO2_Concentration", 'time_year', 'time_month',
'time_day', 'time_week', 'time_hour']
else:
attr_need = ["PM25_Concentration", "PM10_Concentration", 'time_year', 'time_month',
'time_day', 'time_week', 'time_hour']
attr_need2 = ["PM25_Concentration", "PM10_Concentration",
"NO2_Concentration", 'time_year', 'time_month',
'time_day', 'time_week', 'time_hour']
pass
if city == "ld":
# KC1填补KF1 同一个站点
KF1_padding(station_group, attr_need)
pass
between_two_point(station_group, attr_need)
# neighborhood_k = KNN(station_group, attr="PM10_Concentration")
# print neighborhood_k
# for station in neighborhood_k.keys():
# neighborhood = neighborhood_k[station]
# if len(neighborhood)==0:
# continue
if pre_train_flag == True:
print("test",station_group)
ans = pre_train_data(station_group, stations, city, attr_need, length)
else:
print("before four days")
four_days(ans_df, station_group, city, attr_need)
print("else in process_loss_data",station_group)
pre_train(station_group, city, stations, attr_need, length)
print("before train_data")
ans = train_data(station_group, stations, city, attr_need, length=24 * 5)
import pickle
f1 = open(base_path_3 + city + '_data_processing.pkl', 'wb')
pickle.dump(station_group, f1, True)
return ans
# # 画图分析
# def analysis(df, stations, city):
# day = "2018-01-01"
# num = 800
# # draw_single_station_day(df, city, stations, start_day=day, num=num)
# draw_single_station(df, city, stations, start_day=day)
# 从处理好的历史数据中获取对应时间的数据
def history_data(city, stations, start_day="2017-01-01", end_day="2018-04-10"):
import pickle
f1 = open(base_path_3 + city + '_data_processing.pkl', 'rb')
# f1 = file(base_path_3 + city + '_data_history_KNN.pkl', 'rb')
#station_group = pickle.load(f1)
station_group = pd.read_pickle(f1)
city_station = stations[city]
ans = {}
for station, group in station_group.items():
group = group[start_day: end_day]
# print group["station_id"]
# group["station_id"] = np.array([station]*group.values.shape[0])
if city == "ld":
# if city_station[station]["predict"] == False:
# continue
values = group[
["station_id", 'PM25_Concentration', 'PM10_Concentration', 'NO2_Concentration', 'time_year',
'time_month', 'time_week', 'time_day', 'time_hour']] # .values
else:
values = group[
["station_id", 'PM25_Concentration', 'PM10_Concentration', 'O3_Concentration', 'CO_Concentration',
'NO2_Concentration', 'SO2_Concentration', 'time_year', 'time_month', 'time_week', 'time_day',
'time_hour']] # .values
ans[station] = values
# print values
return ans
def history_data_1(city, stations, start_day="2017-01-01", end_day="2018-04-10"):
import pickle
# f1 = file(base_path_3 + city + '_data_processing.pkl', 'rb')
f1 = open(base_path_3 + city + '_data_history_KNN.pkl', 'rb')
station_group = pickle.load(f1)
city_station = stations[city]
ans = {}
for station, group in station_group.items():
group = group[start_day: end_day]
# print group["station_id"]
# group["station_id"] = np.array([station]*group.values.shape[0])
if city == "ld":
# if city_station[station]["predict"] == False:
# continue
values = group[
["station_id", 'PM25_Concentration', 'PM10_Concentration', 'NO2_Concentration', 'time_year',
'time_month', 'time_week', 'time_day', 'time_hour']] # .values
else:
values = group[
["station_id", 'PM25_Concentration', 'PM10_Concentration', 'O3_Concentration', 'CO_Concentration',
'NO2_Concentration', 'SO2_Concentration', 'time_year', 'time_month', 'time_week', 'time_day',
'time_hour']] # .values
ans[station] = values
# print values
return ans
'''
处理好的数据保存在本地
最近的数据
base_path_3 + city + '_data_post.pkl'
历史数据
base_path_3 + city + '_data_history.pkl'
最新的数据
base_path_2 + city + "_current_day_processing.csv"
'''
def post_data(city="bj"):
stations = load_station()
ans_post = history_data(city=city, stations=stations, start_day="2018-04-01", end_day="2018-04-10")
import pickle
f1 = open(base_path_3 + city + '_data_post.pkl', 'wb')
# print ans_post
pickle.dump(ans_post, f1, True)
ans_history = history_data(city=city, stations=stations, start_day="2018-03-01", end_day="2018-03-31")
f2 = open(base_path_3 + city + '_data_history.pkl', 'wb')
pickle.dump(ans_history, f2, True)
# time_now = datetime.now()
# time_now = time_now - timedelta(hours=32)
# time_now = time_now.strftime('%Y-%m-%d')
# start_time = str(time_now) + "-0"
#
# time_now = datetime.now()
# time_now = time_now - timedelta(hours=8)
# time_now = time_now.strftime('%Y-%m-%d')
# end_time = str(time_now) + "-23"
start_time = "2018-04-11-0"
end_time = "2018-04-15-23"
# get_data(start_time=start_time, end_time=end_time, city=city, current_day=True)
df = load_data(city=city, start_time=start_time, end_time=end_time, current_day=True)
filename = base_path_2 + city + "_current_day_processing.csv"
start_time_1 = "2018-04-11" + " 00:00:00"
end_time_1 = "2018-04-17" + " 23:00:00"
write_to_process(df, start_time=start_time_1, end_time=end_time_1, filename=filename)
# 获取所有的数据进行预测
def get_all_processing_data_1(city, start_day, end_day, down_load=False):
stations = load_station()
import pickle
f1 = open(base_path_3 + city + '_data_post.pkl', 'rb')
data_post = pickle.load(f1)
start_time = start_day + "-0"
end_time = end_day + "-23"
three_day_before_start_day = datetime_toString(string_toDatetime(start_day) - timedelta(days=3))
one_day_before_end_day = datetime_toString(string_toDatetime(end_day) - timedelta(days=1))
one_day_before_start_day = datetime_toString(string_toDatetime(start_day) - timedelta(days=1))
if down_load:
get_data(start_time=start_time, end_time=end_time, city=city, current_day=True)
df = load_data(city=city, start_time=start_time, end_time=end_time, current_day=True)
filename = base_path_2 + city + "_current_day_processing.csv"
start_time_1 = start_day + " 00:00:00"
end_time_1 = datetime_toString(string_toDatetime(end_day) + timedelta(days=2)) + " 23:00:00"
write_to_process(df, start_time=start_time_1, end_time=end_time_1, filename=filename)
data_current = load_data_process(city=city, current_day=True)
current_group = data_current.groupby("station_id")
data_current = {}
for station, group in current_group:
data_current[station] = group
# print data_post, data_history, data_current
station_group = {}
for station in data_post.keys():
if city == "ld":
if stations[city][station]["predict"] == False:
continue
station_group[station] = pd.concat([data_post[station][:one_day_before_start_day], data_current[station]],
axis=0).sort_index() # data_history[station],
print (data_post[station][:one_day_before_start_day])
print (station_group[station].values.shape, one_day_before_start_day)
# print station_group[station]
if city == 'bj':
attr_need = ["PM25_Concentration", "PM10_Concentration", "O3_Concentration", 'time_year',
'time_month', 'time_day', 'time_week', 'time_hour']
else:
attr_need = ["PM25_Concentration", "PM10_Concentration", 'time_year', 'time_month',
'time_day', 'time_week', 'time_hour']
between_two_point(station_group, attr_need)
pre_train(station_group, city, stations, attr_need, length=24 * 3, day=three_day_before_start_day)
ans_post_1 = {}
for station, group in station_group.items():
ans_post_1[station] = group[:one_day_before_end_day]
f1 = open(base_path_3 + city + '_data_post.pkl', 'wb')
pickle.dump(ans_post_1, f1, True)
return station_group
# 获取所有的数据进行预测
def get_all_processing_data(city, start_day, end_day, down_load=False):
stations = load_station()
import pickle
f1 = open(base_path_3 + city + '_data_post.pkl', 'rb')
data_post = pd.read_pickle(f1)
#data_post = pickle.load(f1)
list_data_post = list(data_post.keys())
print(data_post.keys())
#print(data_post.keys()[1])
max_post_day = datetime_toString(data_post[list_data_post[0]]['time'].max() - timedelta(hours=23))
# print df2
one_day_after_max_post_day = datetime_toString(string_toDatetime(max_post_day) + timedelta(days=1))
# data_post = data_post
start_time = start_day + "-0"
end_time = end_day + "-23"
three_day_before_start_day = datetime_toString(string_toDatetime(start_day) - timedelta(days=3))
one_day_before_end_day = datetime_toString(string_toDatetime(end_day) - timedelta(days=1))
# one_day_before_start_day = datetime_toString(string_toDatetime(start_day)-timedelta(days=1))
if down_load:
get_data(start_time=start_time, end_time=end_time, city=city, current_day=True)
df = load_data(city=city, start_time=start_time, end_time=end_time, current_day=True)
filename = base_path_2 + city + "_current_day_processing.csv"
start_time_1 = start_day + " 00:00:00"
end_time_1 = datetime_toString(string_toDatetime(end_day) + timedelta(days=2)) + " 23:00:00"
write_to_process(df, start_time=start_time_1, end_time=end_time_1, filename=filename)
data_current = load_data_process(city=city, current_day=True)
current_group = data_current.groupby("station_id")
data_current = {}
for station, group in current_group:
data_current[station] = group
# print data_post, data_history, data_current
station_group = {}
for station in data_post.keys():
if city == "ld":
if stations[city][station]["predict"] == False:
continue
station_group[station] = pd.concat(
[data_post[station][: max_post_day], data_current[station][one_day_after_max_post_day:]],
axis=0).sort_index() # data_history[station],
# print data_post[station][:max_post_day]
# print station_group[station].values.shape,
# print max_post_day, one_day_after_max_post_day
# print station_group[station]
if city == 'bj':
attr_need = ["PM25_Concentration", "PM10_Concentration", "O3_Concentration", 'time_year',
'time_month', 'time_day', 'time_week', 'time_hour']
else:
attr_need = ["PM25_Concentration", "PM10_Concentration", 'time_year', 'time_month',
'time_day', 'time_week', 'time_hour']
between_two_point(station_group, attr_need)
if string_toDatetime(max_post_day) >= string_toDatetime(end_day):
pass
else:
pre_train(station_group, city, stations, attr_need, length=24 * 3, day=three_day_before_start_day)
ans_post_1 = {}
for station, group in station_group.items():
if string_toDatetime(max_post_day) <= string_toDatetime(one_day_before_end_day):
ans_post_1[station] = group[:one_day_before_end_day].drop_duplicates()
else:
ans_post_1[station] = group[:max_post_day].drop_duplicates()
f1 = open(base_path_3 + city + '_data_post.pkl', 'wb')
pickle.dump(ans_post_1, f1, True)
ans_post_2 = {}
for station, group in station_group.items():
ans_post_2[station] = group[
:datetime_toString(string_toDatetime(end_day) + timedelta(days=2))].drop_duplicates()
return ans_post_2
def model_1(city):
stations = load_station()
import pickle
f1 = open(base_path_3 + city + '_data_post.pkl', 'rb')
data_post = pickle.load(f1)
f2 = open(base_path_3 + city + '_data_history.pkl', 'rb')
data_history = pickle.load(f2)
# filename3 = base_path_2 + city + "_current_day_processing.csv"
data_current = load_data_process(city=city, current_day=True)
current_group = data_current.groupby("station_id")
data_current = {}
for station, group in current_group:
data_current[station] = group
# print data_post, data_history, data_current
station_group = {}
for station in data_history.keys():
if city == "ld":
if stations[city][station]["predict"] == False:
continue
# print data_current[station]
station_group[station] = pd.concat([data_post[station], data_current[station]],
axis=0).sort_index() # data_history[station],
print (station_group[station])
if city == 'bj':
attr_need = ["PM25_Concentration", "PM10_Concentration", "O3_Concentration", 'time_year',
'time_month', 'time_day', 'time_week', 'time_hour']
attr_need2 = ["PM25_Concentration", "PM10_Concentration", "O3_Concentration",
"CO_Concentration", "NO2_Concentration", "SO2_Concentration", 'time_year', 'time_month',
'time_day', 'time_week', 'time_hour']
else:
attr_need = ["PM25_Concentration", "PM10_Concentration", 'time_year', 'time_month',
'time_day', 'time_week', 'time_hour']
attr_need2 = ["PM25_Concentration", "PM10_Concentration",
"NO2_Concentration", 'time_year', 'time_month',
'time_day', 'time_week', 'time_hour']
between_two_point(station_group, attr_need)
pre_train(station_group, city, stations, attr_need, length=24 * 3)
# print station_group
tmp = ""
for station, group in station_group.items():
if station_id_change.has_key(station):
station = station_id_change[station]
values = group[attr_need].values
if city == 'bj':
values = values[-48:, :3]
for i in range(values.shape[0]):
tmp += station + "#" + str(i) + "," + str(values[i, 0]) + "," + str(values[i, 1]) + "," + str(
values[i, 2]) + "\n"
else:
values = values[-48:, :2]
for i in range(values.shape[0]):
tmp += station + "#" + str(i) + "," + str(values[i, 0]) + "," + str(values[i, 1]) + ",0.0\n"
return tmp
# 缺失值填充入口
def loss_data_process_main(pre_train_flag=True):
stations = load_station()
city = 'bj'
df = load_data_process(city=city, current_day=False)
process_loss_data(df, city, stations, length=24 * 3, pre_train_flag=pre_train_flag)
# analysis(df, stations, city)
city = 'ld'
df = load_data_process(city=city, current_day=False)
process_loss_data(df, city, stations, length=24 * 3, pre_train_flag=pre_train_flag)
# analysis(df, stations, city)
'''
存储处理后的结果
filename = base_path_2 + city + "_airquality_processing.csv"
filename = base_path_2 + city + "_current_day_processing.csv"
'''
def write_to_process(df, start_time="2017-01-01 00:00:00", end_time="2018-04-10 23:00:00",
filename=base_path_2 + "bj_airquality_processing.csv"):
df = df.drop_duplicates(["station_id", 'time_year', "time_month", "time_day", "time_hour"])
start_day = datetime.strptime(start_time, '%Y-%m-%d %H:%M:%S')
end_day = datetime.strptime(end_time, '%Y-%m-%d %H:%M:%S')
dates = pd.date_range(start_day, end_day, freq='60min')
df1 = | pd.DataFrame(index=dates) | pandas.DataFrame |
"""
Name : c7_25_generateffcMonthly_pkl.py
Book : Python for Finance (2nd ed.)
Publisher: Packt Publishing Ltd.
Author : <NAME>
Date : 6/6/2017
email : <EMAIL>
<EMAIL>
"""
import pandas as pd
ff=pd.read_pickle("c:/temp/ffMonthly.pkl")
print(ff.head(2))
mom= | pd.read_pickle("c:/temp/ffMomMonthly.pkl") | pandas.read_pickle |
import pandas as pd
import numpy as np
index = pd.date_range('1/1/2000', periods=8)
s = pd.Series(np.random.randn(5), index=['a', 'b', 'c', 'd', 'e'])
df = pd.DataFrame(np.random.randn(8, 3),
index=index,
columns=['A', 'B', 'C'])
wp = pd.Panel(np.random.randn(2, 5, 4), items=['Item1', 'Item2'],
major_axis= | pd.date_range('1/1/2000', periods=5) | pandas.date_range |
import json
import matplotlib.pyplot as plt
import pandas as pd
import seaborn as sns
def plot_WDT_usages_vs_PD_usages():
# gather the data
information_path = "data/WDT_vs_PS_usage.json"
csv_ready_dict = {}
csv_ready_dict["timeframe"] = []
csv_ready_dict["value"] = []
csv_ready_dict["type"] = []
with open(information_path, "r") as information_data:
information_dict = json.load(information_data)
# add the Total Queries
csv_ready_dict["timeframe"].append("total")
csv_ready_dict["value"].append(information_dict["TOTAL queries"])
csv_ready_dict["type"].append("Total Queries")
# add the Total Usages
csv_ready_dict["timeframe"].append("total")
csv_ready_dict["value"].append(information_dict["TOTAL usages"])
csv_ready_dict["type"].append("Total Usages")
# add the wdt Usages
csv_ready_dict["timeframe"].append("total")
csv_ready_dict["value"].append(information_dict["WDT usages"])
csv_ready_dict["type"].append("wdt: Usages")
# add the ps Usages
csv_ready_dict["timeframe"].append("total")
csv_ready_dict["value"].append(information_dict["PS usages"])
csv_ready_dict["type"].append("ps: Usages")
for timeframe in information_dict["timeframe data"]:
# add the Total Queries
csv_ready_dict["timeframe"].append(timeframe)
csv_ready_dict["value"].append(information_dict["timeframe data"][timeframe]["TOTAL queries"])
csv_ready_dict["type"].append("Total Queries")
# add the Total Usages
csv_ready_dict["timeframe"].append(timeframe)
csv_ready_dict["value"].append(information_dict["timeframe data"][timeframe]["TOTAL usages"])
csv_ready_dict["type"].append("Total Usages")
# add the wdt Usages
csv_ready_dict["timeframe"].append(timeframe)
csv_ready_dict["value"].append(information_dict["timeframe data"][timeframe]["WDT usages"])
csv_ready_dict["type"].append("wdt: Usages")
# add the ps Usages
csv_ready_dict["timeframe"].append(timeframe)
csv_ready_dict["value"].append(information_dict["timeframe data"][timeframe]["PS usages"])
csv_ready_dict["type"].append("ps: Usages")
# plot the data
df = | pd.DataFrame(csv_ready_dict) | pandas.DataFrame |
''' Computes probabilities for HPC model '''
# downloading model for transfer learning
from keras.applications.vgg19 import VGG19
from keras.applications.vgg19 import preprocess_input
from sklearn.svm import *
from sklearn.model_selection import GridSearchCV
from sklearn.model_selection import StratifiedShuffleSplit
from sklearn import metrics
import pandas as pd
import numpy as np
import argparse
#################################
import keras
from keras import backend as K
from keras.callbacks import Callback, EarlyStopping, ReduceLROnPlateau, ModelCheckpoint
from keras.preprocessing.image import ImageDataGenerator
from keras.utils.np_utils import to_categorical
from keras.models import Sequential, model_from_json
from keras.optimizers import SGD, RMSprop, Adam, Adagrad, Adadelta
from keras.applications import VGG16
from keras.layers import Dense, Dropout, Activation, Flatten, BatchNormalization, Conv2D, MaxPool2D, MaxPooling2D
from sklearn.metrics import auc, roc_curve
from sklearn.metrics import confusion_matrix
# from sklearn.externals import joblib
from sklearn.metrics import classification_report
from keras.models import Model
#####################################
# ----------------
META_AVG = 'avg'
META_STD = 'std'
# ----------------
def get_full_rbf_svm_clf(train_x, train_y, c_range=None, gamma_range=None):
param_grid = dict(gamma=gamma_range, C=c_range)
cv = StratifiedShuffleSplit(n_splits=2, test_size=0.2, random_state=42)
grid = GridSearchCV(SVC(cache_size=1024),
param_grid=param_grid, cv=cv, n_jobs=14, verbose=10)
grid.fit(train_x, train_y)
print("The best parameters are %s with a score of %0.2f" %
(grid.best_params_, grid.best_score_))
scores = grid.cv_results_['mean_test_score'].reshape(
len(c_range), len(gamma_range))
print("Scores:")
print(scores)
print("c_range:", c_range)
print("gamma_range:", gamma_range)
c_best = grid.best_params_['C']
gamma_best = grid.best_params_['gamma']
clf = SVC(C=c_best, gamma=gamma_best, verbose=True)
return clf
# ----------------
def prep(data):
data[feats] = (data[feats] - meta[META_AVG]) / meta[META_STD] # normalize
data.fillna(0, inplace=True) # impute NaNs with mean=0
if '_count' in data.columns:
data.drop('_count', axis=1, inplace=True)
data_x = data.iloc[:, 0:-1].astype('float32').values
data_y = data.iloc[:, -1].astype('int32').values
return data_x, data_y
# ----------------
parser = argparse.ArgumentParser()
parser.add_argument('-dataset', required=True, help="dataset name")
parser.add_argument('-svmgamma', type=float, help="SVM gamma parameter")
parser.add_argument('-svmc', type=float, help="SVM C parameter")
args = parser.parse_args()
DATASET = args.dataset
DATA_FILE = '../data/' + DATASET + '-train'
VAL_FILE = '../data/' + DATASET + '-val'
TEST_FILE = '../data/' + DATASET + '-test'
META_FILE = '../data/' + DATASET + '-meta'
HPC_FILE = '../data/' + DATASET + '-hpc'
print("Using dataset", DATASET)
# ----------------
data_train = pd.read_pickle(DATA_FILE)
data_val = | pd.read_pickle(VAL_FILE) | pandas.read_pickle |
import logging
import sys, os
from src.evaluation.logger_config import init_logging
sys.path.append(os.path.expanduser('~/Projects/multivar-ts-ano/'))
import pickle
import traceback
import copy
import time
from typing import List
import json
import numpy as np
import pandas as pd
from src.algorithms.algorithm_utils import save_torch_algo
from src.datasets import MultiEntityDataset
from src.evaluation.evaluation_utils import fit_distributions, get_scores_channelwise
class Trainer:
def __init__(self, ds_class, algo_class, algo_config_base: dict, output_dir: str, ds_seed: int=42,
ds_kwargs: dict=None, algo_seeds: List[int]=[42], runs_per_seed: int=1, logger=None):
"""
1 trainer per MultiEntityDataset.
:param ds_class: Just the class without arguments. eg. Swat
:param algo_class: eg. AutoEncoder.
:param ds_seed
:param ds_kwargs: dictionary of arguments for dataset, aside from seed. eg: {"shorten_long"=False} for swat
:param algo_config_base: base configuration.
:param output_dir: results directory where results will be saved in a predecided folder structure.
:param algo_seeds: this will be passed to the algo along with algo_config_base. If seed is provided in algo_
config_base, this value will take precedence.
:param runs_per_seed: for doing multiple runs per seed. In general, this will be 1.
"""
self.ds_class = ds_class
self.algo_class = algo_class
self.ds_seed = ds_seed
self.ds_kwargs = ds_kwargs
self.algo_config_base = algo_config_base
self.train_summary = []
self.output_dir = output_dir
self.seeds = algo_seeds
self.runs_per_seed = runs_per_seed
self.ds_multi = MultiEntityDataset(dataset_class=ds_class, seed=ds_seed, ds_kwargs=ds_kwargs)
self.ds_multi_name = self.ds_multi.name
self.algo_name = self.algo_class(**self.algo_config_base).name
self.algo_dir = os.path.join(self.output_dir, self.ds_multi_name, self.algo_name)
os.makedirs(self.algo_dir, exist_ok=True)
if logger is None:
init_logging(os.path.join(self.output_dir, 'logs'), prefix="trainer")
self.logger = logging.getLogger(__name__)
else:
self.logger = logger
@staticmethod
def timestamp():
return time.strftime('%Y-%m-%d-%H%M%S')
@staticmethod
def predict(algo, entity, entity_dir, logger):
X_train, _, X_test, _ = entity.data()
try:
algo.batch_size = 4*algo.batch_size
except:
logger.warning("couldn't increase batch_size for predict")
try:
logger.info("predicting train")
train_predictions = algo.predict(X_train.copy())
except Exception as e:
logger.error(f"encountered exception {e} while predicting train. Will fill with zeros")
logger.error(traceback.format_exc())
train_predictions = {'score_t': np.zeros(X_train.shape[0]),
'score_tc': None,
'error_t': None,
'error_tc': None,
'recons_tc': None,
}
try:
logger.info("predicting test")
test_predictions = algo.predict(X_test.copy())
except Exception as e:
logger.error(
f"encountered exception {e} while predicting test without starts argument. Will fill with zeros")
logger.error(traceback.format_exc())
test_predictions = {'score_t': np.zeros(X_test.shape[0]),
'score_tc': None,
'error_t': None,
'error_tc': None,
'recons_tc': None,
}
if algo.name == 'OmniAnoAlgo':
algo.close_session()
# Put train and test predictions in the same dictionary
predictions_dic = {}
for key, value in train_predictions.items():
predictions_dic[key + "_train"] = value
for key, value in test_predictions.items():
predictions_dic[key + "_test"] = value
predictions_dic["val_recons_err"] = algo.get_val_err()
predictions_dic["val_loss"] = algo.get_val_loss()
# When raw errors are available, fit univar gaussians to them to obtain channelwise and time wise anomaly scores
if predictions_dic["error_tc_test"] is not None and predictions_dic["score_tc_test"] is None and \
predictions_dic["score_t_test"] is None:
# go from error_tc to score_tc and score_t using utils functions
distr_names = ["univar_gaussian"]
distr_par_file = os.path.join(entity_dir, "distr_parameters")
distr_params = fit_distributions(distr_par_file, distr_names, predictions_dic=
{"train_raw_scores": predictions_dic["error_tc_train"]})[distr_names[0]]
score_t_train, _, score_t_test, score_tc_train, _, score_tc_test = \
get_scores_channelwise(distr_params, train_raw_scores=predictions_dic["error_tc_train"],
val_raw_scores=None, test_raw_scores=predictions_dic["error_tc_test"],
drop_set=set([]), logcdf=True)
predictions_dic["score_t_train"] = score_t_train
predictions_dic["score_tc_train"] = score_tc_train
predictions_dic["score_t_test"] = score_t_test
predictions_dic["score_tc_test"] = score_tc_test
with open(os.path.join(entity_dir, "raw_predictions"), "wb") as file:
pickle.dump(predictions_dic, file)
print("Saved predictions_dic")
return predictions_dic
def train_predict(self, algo_config=None, config_name="config"):
"""
:param algo_config:
:param config_name:
:return:
"""
if algo_config is None:
algo_config = self.algo_config_base
if config_name is "config":
config_name = "base-config"
config_name = config_name.replace("_", "-") + "_" + self.timestamp()
config_dir = os.path.join(self.algo_dir, config_name)
os.makedirs(config_dir, exist_ok=True)
with open(os.path.join(config_dir, 'config.json'), 'w') as file:
json.dump(algo_config, file)
for seed in self.seeds:
algo_config["seed"] = seed
for run_num in range(self.runs_per_seed):
run_dir = os.path.join(config_dir, str(seed) + "-run" + str(run_num) + "_" + self.timestamp())
os.makedirs(run_dir)
self.logger.info("Will train models for {} entities".format(self.ds_multi.num_entities))
for entity in self.ds_multi.datasets:
entity_dir = os.path.join(run_dir, entity.name)
os.makedirs(entity_dir)
# if self.algo_class == TelemanomAlgo:
# algo_config["entity_id"] = entity.name.split("-", 1)[-1]
algo = self.algo_class(**algo_config)
algo.set_output_dir(entity_dir)
self.logger.info("Training algo {} on entity {} of me_dataset {} with config {}, algo seed {}, "
"run_num {}".format(
algo.name, entity.name, self.ds_multi_name, config_name, str(seed), str(run_num)
))
X_train, y_train, X_test, y_test = entity.data()
try:
algo.fit(X_train.copy())
train_summary = [config_name, algo_config, algo.get_val_loss()]
self.train_summary.append(train_summary)
self.logger.info("config {} : {}, val loss {}".format(*train_summary))
except Exception as e:
self.logger.error(f"encountered exception {e} while training or saving loss")
self.logger.error(traceback.format_exc())
continue
if algo.torch_save:
try:
save_torch_algo(algo, out_dir=entity_dir)
except Exception as e:
self.logger.error(f"encountered exception {e} while saving model")
self.logger.error(traceback.format_exc())
try:
self.predict(algo=algo, entity=entity, entity_dir=entity_dir, logger=self.logger)
except Exception as e:
self.logger.error(f"encountered exception {e} while running predictions")
self.logger.error(traceback.format_exc())
continue
def train_modified_configs(self, configs: dict):
"""
:param configs: dict of configs. The key is the config name. The value is a dict that must specify a valid value
of an input parameter for the algo. Values not specified will be the ones in the self.algo_config_base, and not
the default value specified by algo
:return:
"""
for config_name, config in configs.items():
merged_config = copy.deepcopy(self.algo_config_base)
for key, value in config.items():
merged_config[key] = value
self.train_predict(algo_config=merged_config, config_name=config_name)
def get_best_config(self):
best_config_name, best_config, best_config_loss = None, None, None
if len(self.train_summary) == 0:
self.logger.error(f"Train summary not found. Maybe training hasn't been run yet. Call this function after"
f"training is done.")
if len(self.train_summary) == 1:
best_config_name, best_config, best_config_loss = self.train_summary[0]
self.train_summary = | pd.DataFrame(self.train_summary, columns=["config name", "config", "val_loss"]) | pandas.DataFrame |
import pandas as pd
from pandas_datareader import data as pd_data
import numpy as np
import matplotlib.pyplot as plt
p = print
google = pd.read_csv('data/goog.csv', index_col='Date', parse_dates=True)
def data_intro():
p(pd.__version__)
p(google)
start = pd.Timestamp('2010-1-1')
end = pd.Timestamp('2014-12-31')
p(type(google))
p(google.head())
p(google.info())
p(google['Open'])
p(google['Open'].iloc[0: 5])
p(google.loc['2010-01-04':'2010-01-08', 'Open'])
google_up = google[google['Close'] > google['Open']]
p(google_up.head())
google_filtered = google[pd.isnull(google['Volume']) == False]
print(google_filtered.head())
google.describe()
def data_computations():
google['Return'] = google['Close'].pct_change()
p(google['Return'].iloc[0:5])
google['LogReturn'] = np.log(1 + google['Return'])
p(google['LogReturn'].iloc[0: 5])
window_size = 252
google['Volatility'] = google['LogReturn'].rolling(window_size).std() * np.sqrt(window_size)
p(google['Volatility'].iloc[window_size - 5: window_size + 5])
p(google.info())
google[['Close', 'Volatility']].plot(subplots=True, figsize=(15, 6))
def data_structures():
file = 'data/exoplanets.csv'
data = pd.read_csv(file)
series = data['NAME']
print(f'\n{series}\n{type(series)}')
new_list = [5, 10, 15, 20, 25]
new_series = | pd.Series(new_list) | pandas.Series |
#!/usr/bin/env python3
import pandas as pd
from io import StringIO
import datetime
import json
import geopandas as gp
pd.set_option('display.max_columns', None)
HEADER = {
'LOC': ['Reference',
'Action Code',
'Location Code',
'Name',
'Start',
'End',
'Easting',
'Northing',
'Timing Point',
'Zone',
'STANOX',
'Off-Network indicator',
'Force LPB',],
'NWK': ['Record',
'Action Code',
'Origin',
'Destination',
'Running Line Code',
'Running Line Description',
'Start',
'End',
'Initial direction',
'Final direction',
'Distance',
'DOO (Passenger)',
'DOO (Non-Passenger)',
'RETB',
'Zone',
'Reversible line',
'Power supply type',
'RA',
'Maximum train length',
],
'PIF': ['Type',
'Version',
'Source',
'TOC ID',
'Start',
'End',
'Cycle type',
'Cycle stage',
'Creation',],
'PIT': ['Record',
'REF Record',
'REF Addition',
'REF Change',
'REF Delete',
'TLD Record',
'TLD Addition',
'TLD Change',
'TLD Delete',
'LOC Record',
'LOC Addition',
'LOC Change',
'LOC Delete',
'PLT Record',
'PLT Addition',
'PLT Change',
'PLT Delete',
'NWK Record',
'NWK Addition',
'NWK Change',
'NWK Delete',
'TLK Record',
'TLK Count'
],
'PLT': ['Record',
'Action Code',
'Location Code',
'Platform ID',
'Start date',
'End date',
'Platform/Siding length',
'Power supply type',
'DOO (Passenger)',
'DOO (Non-Passenger)',
],
'REF': ['Record',
'Action',
'Reference type',
'Reference',
'Description',],
'TLD': ['Record Type',
'Action Code',
'Traction type',
'Trailing Load',
'Speed',
'RA/Gauge',
'Description',
'ITPS Power Type',
'ITPS Load',
'Limiting Speed',],
'TLK': ['Record',
'Action Code',
'Origin',
'Destination',
'Running Line Code',
'Traction Type',
'Trailing Load',
'Speed',
'RA/Gauge',
'Entry speed',
'Exit speed',
'Start',
'End',
'Sectional Running Time',
'Description',
],
}
print('Load Geography\t{}'.format(datetime.datetime.now()))
FILENAME = 'Geography_current.txt'
COLS = [(0, 3), (4, 80)]
DF0 = pd.read_fwf(FILENAME, colspecs=COLS, header=None, index_col=False)
RECORDS = list(HEADER.keys())
DATA = {}
for i in RECORDS:
data = [[i] + j.split('\t') for j in DF0[DF0[0] == i][1]]
DATA[i] = | pd.DataFrame(data) | pandas.DataFrame |
import pickle
import numpy as np
import matplotlib
matplotlib.use('Agg')
import matplotlib.pyplot as plt
import scipy.stats as scp_stats
import pandas as pd
import matplotlib
matplotlib.rcParams.update({'font.size': 15})
def box_plot_data(tot_df, label, units, type_order, type_color, y_lim_top, out_fig_name):
# Drop NaN elements.
tmp_df = tot_df[tot_df[label].notnull()]
# Arrange data into a list of numpy arrays.
type_data = []
for type_key in type_order:
type_data.append(tmp_df[tmp_df['type']==type_key][label].values)
fig, ax = plt.subplots(figsize = (7, 5))
box = ax.boxplot(type_data, patch_artist=True, sym='c.') # notch=True
for patch, color in zip(box['boxes'], [type_color[type_key] for type_key in type_order]):
patch.set_facecolor(color)
for i, type_key in enumerate(type_order):
ax.errorbar([i+1], [type_data[i].mean()], yerr=[type_data[i].std() / np.sqrt(1.0 * type_data[i].size)], marker='o', ms=8, color='k', linewidth=2, capsize=5, markeredgewidth=2, ecolor='k', elinewidth=2)
ind = np.where(type_data[i] > y_lim_top)[0]
ax.annotate(u'$\u2191$'+'\n%d/%d' % (ind.size, type_data[i].size), xy=(i+1.2, 1.0*y_lim_top), fontsize=12)
ax.set_ylim((0.0, y_lim_top))
ax.set_xticks(range(1, len(type_order)+1))
ax.set_xticklabels(type_order)
if (units == ''):
ax.set_ylabel('%s' % (label))
else:
ax.set_ylabel('%s (%s)' % (label, units))
ax.spines['right'].set_visible(False)
ax.spines['top'].set_visible(False)
ax.yaxis.set_ticks_position('left')
ax.xaxis.set_ticks_position('bottom')
ax.tick_params(size=10)
plt.savefig(out_fig_name, format='eps')
plt.show()
cell_db_path = '/allen/aibs/mat/antona/network/14-simulations/9-network/analysis/'
# Decide which systems we are doing analysis for.
sys_dict = {}
# sys_dict['ll1'] = { 'cells_file': cell_db_path + '../build/ll1.csv', 'f_out': cell_db_path + 'Ori/ll1_rates.npy', 'f_out_pref': cell_db_path + 'Ori/ll1_pref_stat.csv'}
# sys_dict['ll2'] = { 'cells_file': cell_db_path + '../build/ll2.csv', 'f_out': cell_db_path + 'Ori/ll2_rates.npy', 'f_out_pref': cell_db_path + 'Ori/ll2_pref_stat.csv'}
# sys_dict['ll3'] = { 'cells_file': cell_db_path + '../build/ll3.csv', 'f_out': cell_db_path + 'Ori/ll3_rates.npy', 'f_out_pref': cell_db_path + 'Ori/ll3_pref_stat.csv'}
#sys_dict['rl1'] = { 'cells_file': '../build/rl1.csv', 'f_out': 'Ori/rl1_rates.npy', 'f_out_pref': 'Ori/rl1_pref_stat.csv'}
#sys_dict['rl2'] = { 'cells_file': '../build/rl2.csv', 'f_out': 'Ori/rl2_rates.npy', 'f_out_pref': 'Ori/rl2_pref_stat.csv'}
#sys_dict['rl3'] = { 'cells_file': '../build/rl3.csv', 'f_out': 'Ori/rl3_rates.npy', 'f_out_pref': 'Ori/rl3_pref_stat.csv'}
#sys_dict['lr1'] = { 'cells_file': '../build/lr1.csv', 'f_out': 'Ori/lr1_rates.npy', 'f_out_pref': 'Ori/lr1_pref_stat.csv'}
#sys_dict['lr2'] = { 'cells_file': '../build/lr2.csv', 'f_out': 'Ori/lr2_rates.npy', 'f_out_pref': 'Ori/lr2_pref_stat.csv'}
#sys_dict['lr3'] = { 'cells_file': '../build/lr3.csv', 'f_out': 'Ori/lr3_rates.npy', 'f_out_pref': 'Ori/lr3_pref_stat.csv'}
#sys_dict['rr1'] = { 'cells_file': '../build/rr1.csv', 'f_out': 'Ori/rr1_rates.npy', 'f_out_pref': 'Ori/rr1_pref_stat.csv'}
#sys_dict['rr2'] = { 'cells_file': '../build/rr2.csv', 'f_out': 'Ori/rr2_rates.npy', 'f_out_pref': 'Ori/rr2_pref_stat.csv'}
#sys_dict['rr3'] = { 'cells_file': '../build/rr3.csv', 'f_out': 'Ori/rr3_rates.npy', 'f_out_pref': 'Ori/rr3_pref_stat.csv'}
#sys_dict['ll2_TF4Hz'] = { 'cells_file': '../build/ll2.csv', 'f_out': 'Ori/ll2_rates_4Hz.npy', 'f_out_pref': 'Ori/ll2_pref_stat_4Hz.csv' }
# sys_dict['ll1_LIF'] = { 'cells_file': cell_db_path + '../build/ll1.csv', 'f_out': '../analysis_intFire1/analysis_ll/Ori/ll1_rates.npy', 'f_out_pref': '../analysis_intFire1/analysis_ll/Ori/ll1_pref_stat.csv'}
# sys_dict['ll2_LIF'] = { 'cells_file': cell_db_path + '../build/ll2.csv', 'f_out': '../analysis_intFire1/analysis_ll/Ori/ll2_rates.npy', 'f_out_pref': '../analysis_intFire1/analysis_ll/Ori/ll2_pref_stat.csv'}
# sys_dict['ll3_LIF'] = { 'cells_file': cell_db_path + '../build/ll3.csv', 'f_out': '../analysis_intFire1/analysis_ll/Ori/ll3_rates.npy', 'f_out_pref': '../analysis_intFire1/analysis_ll/Ori/ll3_pref_stat.csv'}
#sys_dict['rl1_LIF'] = { 'cells_file': '../build/rl1.csv', 'f_out': 'Ori_LIF/rl1_rates.npy', 'f_out_pref': 'Ori_LIF/rl1_pref_stat.csv'}
#sys_dict['rl2_LIF'] = { 'cells_file': '../build/rl2.csv', 'f_out': 'Ori_LIF/rl2_rates.npy', 'f_out_pref': 'Ori_LIF/rl2_pref_stat.csv'}
#sys_dict['rl3_LIF'] = { 'cells_file': '../build/rl3.csv', 'f_out': 'Ori_LIF/rl3_rates.npy', 'f_out_pref': 'Ori_LIF/rl3_pref_stat.csv'}
#sys_dict['lr1_LIF'] = { 'cells_file': '../build/lr1.csv', 'f_out': 'Ori_LIF/lr1_rates.npy', 'f_out_pref': 'Ori_LIF/lr1_pref_stat.csv'}
#sys_dict['lr2_LIF'] = { 'cells_file': '../build/lr2.csv', 'f_out': 'Ori_LIF/lr2_rates.npy', 'f_out_pref': 'Ori_LIF/lr2_pref_stat.csv'}
#sys_dict['lr3_LIF'] = { 'cells_file': '../build/lr3.csv', 'f_out': 'Ori_LIF/lr3_rates.npy', 'f_out_pref': 'Ori_LIF/lr3_pref_stat.csv'}
#sys_dict['rr1_LIF'] = { 'cells_file': '../build/rr1.csv', 'f_out': 'Ori_LIF/rr1_rates.npy', 'f_out_pref': 'Ori_LIF/rr1_pref_stat.csv'}
#sys_dict['rr2_LIF'] = { 'cells_file': '../build/rr2.csv', 'f_out': 'Ori_LIF/rr2_rates.npy', 'f_out_pref': 'Ori_LIF/rr2_pref_stat.csv'}
#sys_dict['rr3_LIF'] = { 'cells_file': '../build/rr3.csv', 'f_out': 'Ori_LIF/rr3_rates.npy', 'f_out_pref': 'Ori_LIF/rr3_pref_stat.csv'}
sys_dict['ll1_LIF'] = { 'cells_file': cell_db_path + '../build/ll1.csv', 'f_out': '../analysis_intFire4/analysis_ll/Ori/ll1_rates.npy', 'f_out_pref': '../analysis_intFire4/analysis_ll/Ori/ll1_pref_stat.csv'}
sys_dict['ll2_LIF'] = { 'cells_file': cell_db_path + '../build/ll2.csv', 'f_out': '../analysis_intFire4/analysis_ll/Ori/ll2_rates.npy', 'f_out_pref': '../analysis_intFire4/analysis_ll/Ori/ll2_pref_stat.csv'}
sys_dict['ll3_LIF'] = { 'cells_file': cell_db_path + '../build/ll3.csv', 'f_out': '../analysis_intFire4/analysis_ll/Ori/ll3_rates.npy', 'f_out_pref': '../analysis_intFire4/analysis_ll/Ori/ll3_pref_stat.csv'}
# result_fig_prefix = 'Ori/new_Ori_bio_ll'
# result_fig_prefix = 'Ori/new_Ori_lif1_ll'
result_fig_prefix = 'Ori/new_Ori_lif4_ll'
result_fig_CV_ori = result_fig_prefix + '_CV_ori.eps'
result_fig_DSI = result_fig_prefix + '_DSI.eps'
type_color = {'Scnn1a': 'darkorange', 'Rorb': 'red', 'Nr5a1': 'magenta', 'PV1': 'blue', 'PV2': 'cyan', 'AnL4E': 'gray', 'AwL4E': 'gray', 'AnI': 'gray', 'AwI': 'gray'}
type_order = ['Scnn1a', 'Rorb', 'Nr5a1', 'AnL4E', 'AwL4E', 'PV1', 'PV2', 'AnI', 'AwI']
# Read files with OSI and DSI from simulations.
sim_df = pd.DataFrame()
for sys_name in sys_dict.keys():
tmp_df = pd.read_csv(sys_dict[sys_name]['f_out_pref'], sep=' ')
cells_df = pd.read_csv(sys_dict[sys_name]['cells_file'], sep=' ')
cells_df_1 = pd.DataFrame()
cells_df_1['id'] = cells_df['index'].values
cells_df_1['type'] = cells_df['type'].values
tmp_df = | pd.merge(tmp_df, cells_df_1, on='id', how='inner') | pandas.merge |
"""
This script serves as the primary means for creating
heatmap and scatter plot outputs from the YCOM and census
data. Running this script loads and prepares the raw data,
performs regression calculations, plots the results and
saves these plots.
Creates output files:
"heatmap.html"
"scatter.html"
Example:
To run use:
$ python create_heatmap.py
"""
# Third party imports
from bokeh.io import output_file, show
from bokeh.layouts import column
from bokeh.models.widgets import Panel, Tabs
from bokeh.models import ColumnDataSource, LinearAxis
from bokeh.plotting import figure, save
import pandas as pd
# Local imports
import calculate_statistics
import plot_heatmap
import prepare_data
output_file("heatmap.html")
# Preparing census data
# Loading census data
CENSUS = pd.read_csv('../climops/data/acs2015_county_data.csv')
# Scaling Men, Women, Employed and Citizen by TotalPop to get a percentage
CENSUS = prepare_data.scale_census_variables(CENSUS)
# Removing counties not in ycom data (i.e. puerto rico)
CENSUS = prepare_data.remove_census_not_in_ycom(CENSUS)
# Removing counties not in land area data
CENSUS = prepare_data.remove_not_in_land_area(CENSUS)
# Getting list of census variables
N_CENSUS = list(CENSUS)[3:]
# Preparing YCOM data
# Loading ycom data
YCOM = pd.read_csv('../climops/data/YCOM_2018_Data.csv', encoding='latin-1')
YCOM_META = pd.read_csv('../climops/data/YCOM_2018_Metadata.csv', encoding='latin-1')
# Get county level data matching census county names
YCOM_COUNTY = prepare_data.get_ycom_counties(YCOM)
# Removing counties not in land area data
YCOM_COUNTY = prepare_data.remove_not_in_land_area(YCOM_COUNTY)
# Getting list of YCOM variables
N_YCOM = list(YCOM_COUNTY)[3:-2]
# Editing and getting list of YCOM variable descriptions
YCOM_META = prepare_data.fix_ycom_descriptions(YCOM_META)
N_YCOM_META = list(YCOM_META['VARIABLE DESCRIPTION'])[3:]
# Preparing land area data
# Loading land_area_data
LAND_AREA_DATA = pd.read_excel('../climops/data/LND01.xls')
# Selecting only counties
LAND_AREA_DATA = prepare_data.select_land_area_county(LAND_AREA_DATA)
# Removing rows which are in land area but not census
LAND_AREA_DATA = prepare_data.remove_land_area_not_in_census(LAND_AREA_DATA)
# Fixing land area data county names so that they match those in census data
LAND_AREA_DATA = prepare_data.fix_land_area_county_names(LAND_AREA_DATA, CENSUS)
# Adding land area values where missing
LAND_AREA_DATA = prepare_data.add_missing_land_areas(LAND_AREA_DATA)
# Getting one dataframe from the three datasets
N_CENSUS.append('LogPopDensity')
COMBINED_DATA = prepare_data.join_data(YCOM_COUNTY, CENSUS, LAND_AREA_DATA)
# Generate correlation (R), regression (b) and pvalues for relationships between variables
STATS_OUTPUTS = calculate_statistics.calculate_stats_outputs(N_YCOM, N_CENSUS, YCOM_COUNTY, CENSUS)
STATS_OUTPUTS_STANDARD = calculate_statistics.calculate_stats_outputs_standard(
N_YCOM, N_CENSUS, YCOM_COUNTY, CENSUS)
# Making dataframe of regression coefficients
REGS = pd.DataFrame(STATS_OUTPUTS_STANDARD[:, :, 0], columns=N_CENSUS, index=N_YCOM)
#making dataframe of correlation coefficients
CORS = | pd.DataFrame(STATS_OUTPUTS[:, :, 2], columns=N_CENSUS, index=N_YCOM) | pandas.DataFrame |
# -*- coding: utf-8 -*-
"""
.. codeauthor:: <NAME> <<EMAIL>>
.. affiliation::
Laboratory of Protein Design and Immunoengineering <lpdi.epfl.ch>
<NAME> <<EMAIL>>
.. func:: multiple_distributions
"""
# Standard Libraries
import itertools
# External Libraries
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
import seaborn as sns
# This Library
from rstoolbox.utils import add_top_title, add_column, discrete_cmap_from_colors
__all__ = ['multiple_distributions', 'plot_in_context', 'distribution_quality']
def multiple_distributions( df, fig, grid, igrid=None, values="*", titles=None, labels=None,
refdata=None, ref_equivalences=None, violins=True, legends=False, **kwargs ):
"""Automatically plot boxplot distributions for multiple score types of the
decoy population.
A part from the fixed options, the function accepst any option of
:func:`~seaborn.boxplot`, except for ``y``, ``data`` and ``ax``, which
are used internally by this function.
:param df: Data container.
:type df: :class:`~pandas.DataFrame`
:param fig: Figure into which the data is going to be plotted.
:type fig: :class:`~matplotlib.figure.Figure`
:param grid: Shape of the grid to plot the values in the figure (rows x columns).
:type grid: :class:`tuple` with two :class:`int`
:param igrid: Initial position of the grid. Defaults to (0, 0)
:type igrid: :class:`tuple` with two :class:`int`
:param values: Contents from the data container that are expected to be plotted.
:type values: :func:`list` of :class:`str`
:param titles: Titles to assign to the value of each plot (if provided).
:type titles: :func:`list` of :class:`str`
:param labels: Y labels to assign to the value of each plot. By default this will be
the name of the value.
:type labels: :func:`list` of :class:`str`
:param refdata: Data content to use as reference.
:type refdata: :class:`~pandas.DataFrame`
:param dict ref_equivalences: When names between the query data and the provided data are the
same, they will be directly assigned. Here a dictionary ``db_name``:``query_name`` can be
provided otherwise.
:param bool violins: When :data:`True`, plot refdata comparisson with violins, otherwise do it
with kdplots.
:param bool legends: When :data:`True`, show the legends of each axis.
:return: :func:`list` of :class:`~matplotlib.axes.Axes`
:raises:
:ValueError: If columns are requested that do not exist in the :class:`~pandas.DataFrame`.
:ValueError: If the given grid does not have enought positions for all the requested values.
:ValueError: If the number of values and titles do not match.
:ValueError: If the number of values and labels do not match.
:ValueError: If ``refdata`` is not :class:`~pandas.DataFrame`.
.. rubric:: Example 1: Raw design population data.
.. ipython::
:okwarning:
In [1]: from rstoolbox.io import parse_rosetta_file
...: from rstoolbox.plot import multiple_distributions
...: import matplotlib.pyplot as plt
...: df = parse_rosetta_file("../rstoolbox/tests/data/input_2seq.minisilent.gz")
...: values = ["score", "hbond_sr_bb", "B_ni_rmsd", "hbond_bb_sc",
...: "cav_vol", "design_score", "packstat", "rmsd_drift"]
...: fig = plt.figure(figsize=(25, 10))
...: axs = multiple_distributions(df, fig, (2, 4), values=values)
...: plt.tight_layout()
@savefig multiple_distributions_docs.png width=5in
In [2]: plt.show()
In [3]: plt.close()
.. rubric:: Example 2: Design population data vs. DB reference.
.. ipython::
:okwarning:
In [1]: from rstoolbox.io import parse_rosetta_file
...: from rstoolbox.plot import multiple_distributions
...: from rstoolbox.utils import load_refdata
...: import matplotlib.pyplot as plt
...: df = parse_rosetta_file("../rstoolbox/tests/data/input_2seq.minisilent.gz",
...: {'sequence': 'A'})
...: slength = len(df.iloc[0]['sequence_A'])
...: refdf = load_refdata('scop2')
...: refdf = refdf[(refdf['length'] >= slength - 5) &
...: (refdf['length'] <= slength + 5)]
...: values = ["score", "hbond_sr_bb", "B_ni_rmsd", "hbond_bb_sc",
...: "cav_vol", "design_score", "packstat", "rmsd_drift"]
...: fig = plt.figure(figsize=(25, 10))
...: axs = multiple_distributions(df, fig, (2, 4), values=values, refdata=refdf)
...: plt.tight_layout()
@savefig multiple_distributions_docs2.png width=5in
In [2]: plt.show()
In [3]: plt.close()
"""
if igrid is None:
igrid = (0, 0)
if values == "*":
values = df.select_dtypes(include=[np.number]).columns.tolist()
if len(set(values).difference(set(list(df.columns)))) > 0:
raise ValueError("Some of the requested values do not exist "
"in the data container.")
if (grid[0] * grid[1]) - (igrid[0] * igrid[1]) < len(values):
raise ValueError("The grid does not provide enought positions for all"
" requested values.")
if titles is not None and len(titles) != len(values):
raise ValueError("Number of expected plots and titles do not match.")
if labels is not None and len(labels) != len(values):
raise ValueError("Number of expected labels and titles do not match.")
if refdata is not None:
if not isinstance(refdata, pd.DataFrame):
raise ValueError('Unknown reference data format.')
if ref_equivalences is not None:
refdata = refdata.rename(columns=ref_equivalences)
refvalues = refdata.select_dtypes(include=[np.number]).columns.tolist()
else:
refvalues = []
kwargs.pop("y", None)
kwargs.pop("data", None)
kwargs.pop("axis", None)
axis_args = {'rowspan': kwargs.pop('rowspan', 1), 'colspan': kwargs.pop('colspan', 1)}
axis = []
for _, pgrid in enumerate(itertools.product(*[range(grid[0]), range(grid[1])])):
if _ >= len(values):
break
pgrid = list(pgrid)
pgrid[0] += igrid[0]
pgrid[1] += igrid[1]
ax = plt.subplot2grid(grid, pgrid, fig=fig, rowspan=axis_args['rowspan'])
if values[_] not in refvalues:
sns.boxplot(y=values[_], data=df, ax=ax, **kwargs)
else:
s1 = add_column(pd.DataFrame(df[values[_]]), 'target', 'query')
s1 = add_column(s1, 'violinx', 1)
s2 = add_column(pd.DataFrame(refdata[values[_]]), 'target', 'reference')
s2 = add_column(s2, 'violinx', 1)
qd = pd.concat([s1, s2])
if violins:
sns.violinplot(x='violinx', y=values[_], hue='target', data=qd, ax=ax,
hue_order=["query", "reference"], split=True)
if not legends:
ax.get_legend().remove()
ax.set_xlabel('')
ax.set_xticklabels('')
else:
sns.kdeplot(s1[values[_]], ax=ax, shade=True)
sns.kdeplot(s2[values[_]], ax=ax, shade=True)
if not legends:
ax.get_legend().remove()
ax.set_xlabel(values[_])
if titles is not None:
add_top_title(ax, titles[_])
if labels is not None:
ax.set_ylabel(labels[_])
axis.append(ax)
return axis
def plot_in_context( df, fig, grid, refdata, igrid=None, values='*', ref_equivalences=None,
legends=False, **kwargs ):
"""Plot position of decoys in a backgroud reference dataset.
.. note::
Personalization is possible by providing argument keys for :func:`~seaborn.kdeplot`
with the prefix ``kde_`` and for :func:`~matplotlib.plot` (for the points) with the
prefix ``point_``.
:param df: Data container.
:type df: :class:`~pandas.DataFrame`
:param fig: Figure into which the data is going to be plotted.
:type fig: :class:`~matplotlib.figure.Figure`
:param grid: Shape of the grid to plot the values in the figure (rows x columns).
:type grid: :class:`tuple` with two :class:`int`
:param refdata: Data content to use as reference.
:type refdata: :class:`~pandas.DataFrame`
:param igrid: Initial position of the grid. Defaults to (0, 0)
:type igrid: :class:`tuple` with two :class:`int`
:param values: Contents from the data container that are expected to be plotted.
:type values: :func:`list` of :class:`str`
:param dict ref_equivalences: When names between the query data and the provided data are the
same, they will be directly assigned. Here a dictionary ``db_name``:``query_name`` can be
provided otherwise.
:param bool legends: When :data:`True`, show the legends of each axis.
:return: :func:`list` of :class:`~matplotlib.axes.Axes`
:raises:
:ValueError: If columns are requested that do not exist in the :class:`~pandas.DataFrame` of
data **and** reference.
:ValueError: If the given grid does not have enought positions for all the requested values.
:ValueError: If ``refdata`` or ``df`` are not :class:`~pandas.DataFrame`.
.. rubric:: Example:
.. ipython::
:okwarning:
In [1]: from rstoolbox.plot import plot_in_context
...: from rstoolbox.utils import load_refdata
...: import matplotlib.pyplot as plt
...: df = load_refdata('scop2')
...: slength = 100
...: refdf = load_refdata('scop2', 50)
...: refdf = refdf[(refdf['length'] >= slength - 5) &
...: (refdf['length'] <= slength + 5)]
...: values = ["score", "hbond_sr_bb", "avdegree", "hbond_bb_sc",
...: "cavity", "CYDentropy", "pack", "radius"]
...: fig = plt.figure(figsize=(25, 10))
...: axs = plot_in_context(df, fig, (2, 4), refdata=refdf, values=values)
...: plt.tight_layout()
@savefig plot_in_context_docs1.png width=5in
In [2]: plt.show()
In [3]: plt.close()
"""
if igrid is None:
igrid = (0, 0)
if not isinstance(df, pd.DataFrame):
raise ValueError('Unknown data format.')
if not isinstance(refdata, pd.DataFrame):
raise ValueError('Unknown reference data format.')
if values == "*":
values = df.select_dtypes(include=[np.number]).columns.tolist()
if ref_equivalences is not None:
refdata = refdata.rename(columns=ref_equivalences)
if len(set(values).difference(set(list(df.columns)))) > 0:
raise ValueError("Some of the requested values do not exist "
"in the data container.")
if len(set(values).difference(set(list(refdata.columns)))) > 0:
raise ValueError("Some of the requested values do not exist "
"in the reference data container.")
if (grid[0] * grid[1]) - (igrid[0] * igrid[1]) < len(values):
raise ValueError("The grid does not provide enought positions for all"
" requested values.")
pk = [k for k in kwargs if k.startswith('point_')]
kk = [k for k in kwargs if k.startswith('kde_')]
kwargs_point = {}
kwargs_kde = {}
for p in pk:
kwargs_point.setdefault(p.replace('point_', ''), kwargs[p])
kwargs_point.setdefault('marker', 'o')
kwargs_point.setdefault('color', 'orange')
for k in kk:
kwargs_kde.setdefault(k.replace('kde_', ''), kwargs[k])
kwargs_kde.setdefault('shade', True)
axis = []
for _, pgrid in enumerate(itertools.product(*[range(grid[0]), range(grid[1])])):
pgrid = list(pgrid)
pgrid[0] += igrid[0]
pgrid[1] += igrid[1]
if _ >= len(values):
break
ax = plt.subplot2grid(grid, pgrid, fig=fig)
kde = sns.kdeplot(refdata[values[_]], ax=ax, **kwargs_kde)
data_x, data_y = kde.lines[0].get_data()
for __, row in df.iterrows():
ref_x = row[values[_]]
ref_y = np.interp(ref_x, data_x, data_y)
kde.plot([ref_x], [ref_y], **kwargs_point)
if not legends:
ax.get_legend().remove()
ax.set_xlabel(values[_])
axis.append(ax)
return axis
def distribution_quality( df, refdata, values, ascending, names, fig):
"""Locate the quantile position of each putative :class:`.DesingSerie`
in a list of score distributions.
:param df: Data container.
:type df: :class:`~pandas.DataFrame`
:param grid: Shape of the grid to plot the values in the figure (rows x columns).
:type grid: :class:`tuple` with two :class:`int`
:param refdata: Data content to use as reference.
:type refdata: :class:`~pandas.DataFrame`
:param values: Contents from the data container that are expected to be plotted.
:type values: :func:`list` of :class:`str`
:param ascending: Way the data should be sorted. :data:`True` if the score is better
when lower, :data:`False` otherwise.
:type ascending: :func:`list` of :class:`bool`
:param names: Columns to use as identifiers for the query data.
:type names: :func:`list` of :class:`str`
:param fig: Figure into which the data is going to be plotted.
:type fig: :class:`~matplotlib.figure.Figure`
:return: :class:`~matplotlib.axes.Axes`
:raises:
:ValueError: If columns are requested that do not exist in the :class:`~pandas.DataFrame` of
data **and** reference.
:ValueError: If there isn't a ``ascending`` definition for each ``value``.
:ValueError: If ``refdata`` or ``df`` are not :class:`~pandas.DataFrame`.
:valueError: If the requested names do not exist in the input data.
.. rubric:: Example:
.. ipython::
:okwarning:
In [1]: from rstoolbox.plot import distribution_quality
...: from rstoolbox.utils import load_refdata
...: import matplotlib.pyplot as plt
...: df = load_refdata('scop')
...: qr = pd.DataFrame([['2F4V', 'C'], ['3BFU', 'B'], ['2APJ', 'C'],
...: ['2C37', 'V'], ['2I6E', 'H']],
...: columns=['pdb', 'chain'])
...: qr = qr.merge(df, on=['pdb', 'chain'])
...: refs = []
...: for i, t in qr.iterrows():
...: refs.append(df[(df['length'] >= (t['length'] - 5)) &
...: (df['length'] <= (t['length'] + 5))])
...: fig = plt.figure(figsize=(25, 6))
...: ax = distribution_quality(df=qr, refdata=refs,
...: values=['score', 'pack', 'avdegree',
...: 'cavity', 'psipred'],
...: ascending=[True, False, True, True, False],
...: names=['pdb', 'chain'], fig=fig)
...: plt.tight_layout()
@savefig distribution_quality_docs1.png width=5in
In [2]: plt.show()
In [3]: plt.close()
"""
if not isinstance(df, pd.DataFrame):
raise ValueError('Unknown data format.')
if not isinstance(refdata, (pd.DataFrame, list)):
raise ValueError('Unknown reference data format.')
if len(set(values).difference(set(list(df.columns)))) > 0:
raise ValueError("Some of the requested values do not exist "
"in the data container.")
if len(set(names).difference(set(list(df.columns)))) > 0:
raise ValueError("Some of the requested identifiers do not exist "
"in the data container.")
if isinstance(refdata, list):
if len(refdata) != df.shape[0]:
raise ValueError('If multiple references are provided, '
'there should be the same as queries.')
for i, x in enumerate(refdata):
if not isinstance(x, pd.DataFrame):
raise ValueError('Unknown reference {} data format.'.format(i))
if len(set(values).difference(set(list(x.columns)))) > 0:
raise ValueError("Some of the requested values do not exist "
"in the {} reference container.".format(i))
else:
if len(set(values).difference(set(list(refdata.columns)))) > 0:
raise ValueError("Some of the requested values do not exist "
"in the {} reference container.".format(i))
refdata = [refdata, ] * len(df.shape[0])
if len(values) != len(ascending):
raise ValueError("Number of values and orders should match.")
ax = plt.subplot2grid((1, 1), (0, 0), fig=fig)
cmap = discrete_cmap_from_colors([(144.0 / 255, 238.0 / 255, 144.0 / 255),
(135.0 / 255, 206.0 / 255, 250.0 / 255),
(255.0 / 255, 165.0 / 255, 0.0 / 255),
(205.0 / 255, 92.0 / 255, 92.0 / 255)])
data = []
labs = []
identifiers = df[names[0]].map(str)
for i in range(1, len(names)):
identifiers += '_' + df[names[i]].map(str)
df = df.reset_index(drop=True)
for i, row in df.iterrows():
data.append([])
labs.append([])
for isc, sc in enumerate(values):
qt = refdata[i][sc].quantile([.25, .5, .75])
if row[sc] <= qt[.25]:
data[-1].append(.12 if ascending[isc] else .87)
labs[-1].append('Q1' if ascending[isc] else 'Q4')
elif row[sc] <= qt[.5]:
data[-1].append(.37 if ascending[isc] else .67)
labs[-1].append('Q2' if ascending[isc] else 'Q3')
elif row[sc] <= qt[.75]:
data[-1].append(.67 if ascending[isc] else .37)
labs[-1].append('Q3' if ascending[isc] else 'Q2')
else:
data[-1].append(.87 if ascending[isc] else .12)
labs[-1].append('Q4' if ascending[isc] else 'Q1')
df = | pd.DataFrame(data, columns=values, index=identifiers) | pandas.DataFrame |
# -*- coding: utf-8 -*-
"""
Created on Sat Oct 13 17:45:11 2018
@author: <NAME>
@e-mail: <EMAIL>
Program for analysis and creation of fragmentation diagrams in mass spectrometry out of .csv files
"""
import os
import time
from tkinter import filedialog
import pandas as pd
import numpy as np
from numpy import trapz
from scipy.signal import savgol_filter
from sklearn.svm import SVR
import matplotlib.pyplot as plt
from matplotlib import cm
from mpl_toolkits.mplot3d import axes3d
import pickle as pl
xMin = 15
xMax = 30
stepWidth = 1
def prepare_data(ms_info):
global filepath
data = pd.io.parsers.read_csv(filepath)
data.drop(data[data.m > (ms_info + 2)].index, inplace=True)
data.drop(data[data.m < (ms_info - 1)].index, inplace=True)
#data.intensity = savgol_filter(data.intensity, 23, 6, mode='wrap')
#data.intensity = savgol_filter(data.intensity, 21, 7, mode='nearest')
global highest_value_overall
global ms_info_overall
highest_value = 0
scan = 0
index = 0
d = {'scan': [scan],
'intensity': [highest_value]}
data_new = pd.DataFrame(d)
data_new_scaled = pd.DataFrame(d)
for index, row in data.iterrows():
scan_new = row['scan']
if scan_new == scan:
highest_value_new = row['intensity']
if highest_value_new > highest_value:
highest_value = highest_value_new
else:
d = {'scan': [scan],
'intensity': [highest_value]}
data_new = data_new.append(pd.DataFrame(d))
scan = scan_new
highest_value = 0
data_new = data_new.iloc[2:]
data_new.intensity = savgol_filter(data_new.intensity, 11, 6, mode='nearest')
if ms_info < ms_info_overall:
data_new['intensity'].iloc[0] = 0
for index, row in data_new.iterrows():
highest_value = row['intensity']
if highest_value >= highest_value_overall:
highest_value_overall = highest_value
for i, row in data_new.iterrows():
scan = row['scan']
highest_value = row['intensity']
d = {'scan': [scan],
'intensity': [(highest_value/highest_value_overall)*100]}
data_new_scaled = data_new_scaled.append(pd.DataFrame(d))
data_new_scaled = data_new_scaled.iloc[2:]
if ms_info < ms_info_overall:
data_new_scaled['intensity'].iloc[0] = 0
return data_new, data_new_scaled
def plot_diag(catab, plant, category, version, catabolite, fragmentation_mode):
global time
fig_1 = plt.figure(1)
ax = plt.axes()
ax.yaxis.grid()
overall_length = 0
dataframe = pd.DataFrame()
dataframe_scaled = | pd.DataFrame() | pandas.DataFrame |
import numpy as np
import pandas
import random
import re
import sys
from scipy.stats import pearsonr, spearmanr
# ausiliary functions
def buildSeriesByCategory(df, categories):
res = []
for cat in categories:
occ = df.loc[df["category"] == cat].shape[0]
res.append(occ)
res_series = pandas.Series(res, index=categories)
return res_series
def fillList(series):
list = []
for label, value in series.items():
if label != "reacted":
num = value
while num > 0:
list.append(label)
num = num - 1
return pandas.Series(list)
def printStatsByCategory(df, categories):
for cat in categories:
occ = df.loc[df["category"] == cat].shape[0]
print(cat + ": " + str(occ))
def buildOverallResultSeries(resulting_series):
res_list = []
for label, serie in resulting_series.items():
sum = 0;
for value in serie:
sum += value
res_list.append(sum)
return res_list
def fillTwoLists(typ, cat, labels_string, cat_list, label_list):
labels = labels_string.split(",")
for label in labels:
if label != 'nan':
cat_list.append(cat)
label_list.append(typ + ":" + label.strip())
def computeProjectSet(list, splitRule, categories):
resulting_set = set()
base_link = "https://gitlab.com"
for name in list:
if splitRule != "":
splittedName = name.split(splitRule)
new_name = base_link
for token in splittedName:
if token in categories:
break
else:
token = re.sub(r"[ <>#%\"{}|^'`;\[\]/?:@&=+$,\.()\\\\]", my_replace, token)
new_name += "/" + token
resulting_set.add(new_name)
return resulting_set
def my_replace(match):
return "-"
def recallSample(start_data_file, rqone_file, rqtwo_file, output_file, categories):
with open(start_data_file) as dataset_reader, open(rqone_file) as rqone_reader, open(rqtwo_file) as rqtwo_reader:
dataset = pandas.read_csv(dataset_reader)
# hasYaml hasRequirements hasPoms
dataset = dataset.loc[dataset['hasYaml']]
rqone = | pandas.read_csv(rqone_reader) | pandas.read_csv |
"""
Copyright 2018 Novartis Institutes for BioMedical Research Inc.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
import base64
import bbi
import cooler
import numpy as np
import os
import pandas as pd
TILE_SIZE = 1024
TILESET_INFO = {
"filetype": "bigbed",
"datatype": "bedlike",
"coordSystem": "hg19",
"coordSystem2": "hg19",
}
FILE_EXT = {"bigbed", "bb"}
def is_bigwig(filepath=None, filetype=None):
if filetype == "bigbed":
return True
filename, file_ext = os.path.splitext(filepath)
if file_ext[1:].lower() in FILE_EXT:
return True
return False
def get_quadtree_depth(chromsizes):
tile_size_bp = TILE_SIZE
min_tile_cover = np.ceil(sum(chromsizes) / tile_size_bp)
return int(np.ceil(np.log2(min_tile_cover)))
def get_zoom_resolutions(chromsizes):
return [2 ** x for x in range(get_quadtree_depth(chromsizes) + 1)][::-1]
def get_chromsizes(bwpath):
"""
TODO: replace this with negspy
Also, return NaNs from any missing chromosomes in bbi.fetch
"""
chromsizes = bbi.chromsizes(bwpath)
chromosomes = cooler.util.natsorted(chromsizes.keys())
return | pd.Series(chromsizes) | pandas.Series |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
import pandas as pd
import json
from matplotlib import pyplot as plt
import numpy as np
from numpy.fft import fft, fftfreq
# Configuration
anomaly_color = 'sandybrown'
prediction_color = 'yellowgreen'
training_color = 'yellowgreen'
validation_color = 'gold'
test_color = 'coral'
figsize=(9, 3)
autoclose = True
def load_series(file_name, data_folder):
# Load the input data
data_path = f'{data_folder}/data/{file_name}'
data = pd.read_csv(data_path)
data['timestamp'] = pd.to_datetime(data['timestamp'])
data.set_index('timestamp', inplace=True)
# Load the labels
label_path = f'{data_folder}/labels/combined_labels.json'
with open(label_path) as fp:
labels = pd.Series(json.load(fp)[file_name])
labels = pd.to_datetime(labels)
# Load the windows
window_path = f'{data_folder}/labels/combined_windows.json'
window_cols = ['begin', 'end']
with open(window_path) as fp:
windows = pd.DataFrame(columns=window_cols,
data=json.load(fp)[file_name])
windows['begin'] = pd.to_datetime(windows['begin'])
windows['end'] = pd.to_datetime(windows['end'])
# Return data
return data, labels, windows
def plot_series(data, labels=None,
windows=None,
predictions=None,
highlights=None,
val_start=None,
test_start=None,
figsize=figsize,
show_sampling_points=False,
show_markers=False,
filled_version=None):
# Open a new figure
if autoclose: plt.close('all')
plt.figure(figsize=figsize)
# Plot data
if not show_markers:
plt.plot(data.index, data.values, zorder=0)
else:
plt.plot(data.index, data.values, zorder=0,
marker='.', markersize=3)
if filled_version is not None:
filled = filled_version.copy()
filled[~data['value'].isnull()] = np.nan
plt.scatter(filled.index, filled,
marker='.', c='tab:orange', s=5);
if show_sampling_points:
vmin = data.min()
lvl = np.full(len(data.index), vmin)
plt.scatter(data.index, lvl, marker='.',
c='tab:red', s=5)
# Rotated x ticks
plt.xticks(rotation=45)
# Plot labels
if labels is not None:
plt.scatter(labels.values, data.loc[labels],
color=anomaly_color, zorder=2, s=5)
# Plot windows
if windows is not None:
for _, wdw in windows.iterrows():
plt.axvspan(wdw['begin'], wdw['end'],
color=anomaly_color, alpha=0.3, zorder=1)
# Plot training data
if val_start is not None:
plt.axvspan(data.index[0], val_start,
color=training_color, alpha=0.1, zorder=-1)
if val_start is None and test_start is not None:
plt.axvspan(data.index[0], test_start,
color=training_color, alpha=0.1, zorder=-1)
if val_start is not None:
plt.axvspan(val_start, test_start,
color=validation_color, alpha=0.1, zorder=-1)
if test_start is not None:
plt.axvspan(test_start, data.index[-1],
color=test_color, alpha=0.3, zorder=0)
# Predictions
if predictions is not None:
plt.scatter(predictions.values, data.loc[predictions],
color=prediction_color, alpha=.4, zorder=3,
s=5)
plt.tight_layout()
def plot_autocorrelation(data, max_lag=100, figsize=figsize):
# Open a new figure
if autoclose: plt.close('all')
plt.figure(figsize=figsize)
# Autocorrelation plot
pd.plotting.autocorrelation_plot(data['value'])
# Customized x limits
plt.xlim(0, max_lag)
# Rotated x ticks
plt.xticks(rotation=45)
plt.tight_layout()
def plot_histogram(data, bins=10, vmin=None, vmax=None, figsize=figsize):
# Build a new figure
if autoclose: plt.close('all')
plt.figure(figsize=figsize)
# Plot a histogram
plt.hist(data, density=True, bins=bins)
# Update limits
lims = plt.xlim()
if vmin is not None:
lims = (vmin, lims[1])
if vmax is not None:
lims = (lims[0], vmax)
plt.xlim(lims)
plt.tight_layout()
def plot_histogram2d(xdata, ydata, bins=10, figsize=figsize):
# Build a new figure
if autoclose: plt.close('all')
plt.figure(figsize=figsize)
# Plot a histogram
plt.hist2d(xdata, ydata, density=True, bins=bins)
plt.tight_layout()
def plot_density_estimator_1D(estimator, xr, figsize=figsize):
# Build a new figure
if autoclose: plt.close('all')
plt.figure(figsize=figsize)
# Plot the estimated density
xvals = xr.reshape((-1, 1))
dvals = np.exp(estimator.score_samples(xvals))
plt.plot(xvals, dvals)
plt.tight_layout()
def plot_density_estimator_2D(estimator, xr, yr, figsize=figsize):
# Plot the estimated density
nx = len(xr)
ny = len(yr)
xc = np.repeat(xr, ny)
yc = np.tile(yr, nx)
data = np.vstack((xc, yc)).T
dvals = np.exp(estimator.score_samples(data))
dvals = dvals.reshape((nx, ny))
# Build a new figure
if autoclose: plt.close('all')
plt.figure(figsize=figsize)
plt.pcolor(dvals)
plt.tight_layout()
# plt.xticks(np.arange(0, len(xr)), xr)
# plt.yticks(np.arange(0, len(xr)), yr)
def plot_distribution_2D(f, xr, yr, figsize=figsize):
# Build the input
nx = len(xr)
ny = len(yr)
xc = np.repeat(xr, ny)
yc = np.tile(yr, nx)
data = np.vstack((xc, yc)).T
dvals = np.exp(f.pdf(data))
dvals = dvals.reshape((nx, ny))
# Build a new figure
if autoclose: plt.close('all')
plt.figure(figsize=figsize)
plt.pcolor(dvals)
plt.tight_layout()
xticks = np.linspace(0, len(xr), 6)
xlabels = np.linspace(xr[0], xr[-1], 6)
plt.xticks(xticks, xlabels)
yticks = np.linspace(0, len(yr), 6)
ylabels = np.linspace(yr[0], yr[-1], 6)
plt.yticks(yticks, ylabels)
def get_pred(signal, thr):
return pd.Series(signal.index[signal >= thr])
def get_metrics(pred, labels, windows):
tp = [] # True positives
fp = [] # False positives
fn = [] # False negatives
advance = [] # Time advance, for true positives
# Loop over all windows
used_pred = set()
for idx, w in windows.iterrows():
# Search for the earliest prediction
pmin = None
for p in pred:
if p >= w['begin'] and p < w['end']:
used_pred.add(p)
if pmin is None or p < pmin:
pmin = p
# Compute true pos. (incl. advance) and false neg.
l = labels[idx]
if pmin is None:
fn.append(l)
else:
tp.append(l)
advance.append(l-pmin)
# Compute false positives
for p in pred:
if p not in used_pred:
fp.append(p)
# Return all metrics as pandas series
return pd.Series(tp), \
| pd.Series(fp) | pandas.Series |
import datetime
from distutils import dir_util
import os
import pandas as pd
import shutil
import warnings
def copytree(src, dst, symlinks=False, ignore=None):
'''
Function to copy directories.
'''
if not os.path.exists(dst):
os.makedirs(dst)
for item in os.listdir(src):
s = os.path.join(src, item)
d = os.path.join(dst, item)
if os.path.isdir(s):
shutil.copytree(s, d, symlinks, ignore)
else:
shutil.copy2(s, d)
def save(files, experiment_name='', params={}, scores={}, other={},
comments='', update_html_flag=False, working_dir='',
kinoa_dir_name='__kinoa__', use_spaces=False,
sort_log_by='experiment_datetime', sort_log_ascending=True,
columns_order=[]):
'''
Function to save experiment.
Inputs:
- files (list of str) - List of files and directories to save.
- experiment_name (str) - String with name of an experiment. If empty - date and time used
to define the name in a format %Y-%m-%d_%H-%M-%S.
- params (dict) - Dictionary with parameters of experiment.
- scores (dict) - Dictionary with evaluation results.
- other (dict) - Dictionary with other data needed in log.
- comments (str) - String with comments to the experiment.
- working_dir (str) - Path to the directory, where log of experiments will be stored. kinoa_dir_name directory
will be created within working_dir.
- kinoa_dir_name (str) - Name of the directory, where logs will be stored.
- use_spaces (bool) - Flag if spaces should be used in a directory name for current experiment.
- sort_log_by (str or list of str) - Specify which columns to use to sort rows in the log
file.
- sort_log_ascending (bool or list of bool) - Sort ascending vs. descending. Specify list
for multiple sort orders. If this is a list of bools, must match the length of the
sort_log_by.
- columns_order (list of str or dict in format ('col_name': index)) - Specify order of
columns in the log file. Columns that are not present in columns_order will remain in the
file, but after specified columns.
'''
# Get date time of experiment log
now = datetime.datetime.now()
experiment_datetime = str(now.strftime('%Y-%m-%d_%H-%M-%S'))
if len(experiment_name) == 0:
experiment_name = experiment_datetime
# Define delimiter for new directories
if use_spaces:
delimiter = ' '
else:
delimiter = '_'
experiment_name = experiment_name.replace(' ', delimiter)
# Define directory name for current experiment
if len(working_dir) == 0:
if experiment_name == experiment_datetime:
working_dir = os.path.join(kinoa_dir_name, experiment_datetime)
else:
working_dir = os.path.join(kinoa_dir_name, experiment_datetime +
delimiter + experiment_name)
else:
if experiment_name == experiment_datetime:
working_dir = os.path.join(working_dir, kinoa_dir_name, experiment_datetime)
else:
working_dir = os.path.join(working_dir, kinoa_dir_name, experiment_datetime +
delimiter + experiment_name)
if not os.path.exists(working_dir):
os.makedirs(working_dir)
# Copy files and directories
if isinstance(files, list):
for file in files:
# print(file)
if os.path.isdir(file):
copytree(file, os.path.join(working_dir, file))
else:
file_dir = os.path.dirname(file)
if len(file_dir) > 0:
if not os.path.exists(os.path.join(working_dir, file_dir)):
os.makedirs(os.path.join(working_dir, file_dir))
shutil.copy2(file, os.path.join(working_dir, file))
# Prepare experiment description
experiment_dict = {
'experiment_name': experiment_name,
'experiment_datetime': experiment_datetime,
'comments': comments
}
header_cols = ['experiment_datetime', 'experiment_name', 'comments']
# Update dictionaries
params_cols = []
for k in params.keys():
col_name = 'params.' + str(k)
experiment_dict[col_name] = params[k]
params_cols.append(col_name)
scores_cols = []
for k in scores.keys():
col_name = 'scores.' + str(k)
experiment_dict['scores.' + str(k)] = scores[k]
scores_cols.append(col_name)
other_cols = []
for k in other.keys():
col_name = 'other.' + str(k)
experiment_dict['other.' + str(k)] = other[k]
other_cols.append(col_name)
# Append experiment to experiments log
log_fname = os.path.join(kinoa_dir_name, 'log.csv')
if os.path.isfile(log_fname):
log_df = | pd.read_csv(log_fname) | pandas.read_csv |
import torch
import torch.nn as nn
import time
import errno
import os
import gc
import pickle
import shutil
import json
import os
import pandas as pd
from skimage import io, transform
import numpy as np
import calculate_ap_classwise as ap
import matplotlib.pyplot as plt
import random
import helpers_preprocess as helpers_pre
import pred_vis as viss
import proper_inferance_file as proper
from tqdm import tqdm
import post_test
with open('../infos/directory.json') as fp: all_data_dir = json.load(fp)
pd.options.display.max_columns = 50 # None -> No Restrictions
pd.options.display.max_rows = 200 # None -> Be careful with
with open(all_data_dir + "hico_infos/mask.pickle", 'rb') as fp:
u = pickle._Unpickler(fp)
u.encoding = 'latin1'
mask_f = u.load()
#mask_f = pickle.load(fp)
mask_t = mask_f[0]
count_t = mask_f[1]
sigmoid = nn.Sigmoid()
### Fixing Seeds#######
device = torch.device("cuda")
seed = 10
torch.manual_seed(seed)
torch.backends.cudnn.deterministic = True
torch.backends.cudnn.benchmark = False
np.random.seed(seed)
torch.cuda.manual_seed(seed)
torch.cuda.manual_seed_all(seed)
random.seed(seed)
softmax = nn.Softmax()
##########################################
########## Paramets for person to object class mapping#####
SCORE_TH = 0.6
SCORE_OBJ = 0.3
epoch_to_change = 400
thresh_hold = -1
##############################################################
#### Loss functions Definition########
loss_com = nn.BCEWithLogitsLoss(reduction='sum')
loss_com_class = nn.BCEWithLogitsLoss(reduction='none')
loss_com_combine = nn.BCELoss(reduction='none')
loss_com_single = nn.BCEWithLogitsLoss(reduction='sum')
##################################
# import pdb;pdb.set_trace()
no_of_classes = 117
##Helper Functions##
### Fixing the seed for all threads#####
def _init_fn(worker_id):
np.random.seed(int(seed))
#######Extending Number of People############
def extend(inputt, extend_number):
# import pdb;pdb.set_trace()
res = np.zeros([1, np.shape(inputt)[-1]])
for a in inputt:
x = np.repeat(a.reshape(1, np.shape(inputt)[-1]), extend_number, axis=0)
res = np.concatenate([res, x], axis=0)
# import pdb;pdb.set_trace()
return res[1:]
######################################
####### Extening Number of Objects##########
def extend_object(inputt, extend_number):
# import pdb;pdb.set_trace()
res = np.zeros([1, np.shape(inputt)[-1]])
# import pdb;pdb.set_trace()
x = np.array(inputt.tolist() * extend_number)
# import pdb;pdb.set_trace()
res = np.concatenate([res, x], axis=0)
# import pdb;pdb.set_trace()
return res[1:]
#############################################
############## Filtering results for preparing the output as per as v-coco###############################
def filtering(predicted_HOI, true, persons_np, objects_np, filters, pairs_info, image_id, class_ids):
res1 = np.zeros([1, no_of_classes])
res2 = np.zeros([1, no_of_classes])
res3 = np.zeros([1, no_of_classes])
res4 = np.zeros([1, 4])
res5 = np.zeros([1, 4])
res6 = np.zeros([1, 1])
dict1 = {}
a = 0
increment = [int(i[0] * i[1]) for i in pairs_info]
# import pdb;pdb.set_trace()
start = 0
for index, i in enumerate(filters):
res1 = np.concatenate([res1, predicted_HOI[index].reshape(1, no_of_classes)], axis=0)
res2 = np.concatenate([res2, true[index].reshape(1, no_of_classes)], axis=0)
res3 = np.concatenate([res3, predicted_HOI[index].reshape(1, no_of_classes)], axis=0)
res4 = np.concatenate([res4, persons_np[index].reshape(1, 4)], axis=0)
res5 = np.concatenate([res5, objects_np[index].reshape(1, 4)], axis=0)
res6 = np.concatenate([res6, class_ids[index].reshape(1, 1)], axis=0)
if index == start + increment[a] - 1:
# import pdb;pdb.set_trace()
dict1[int(image_id[a]), 'score'] = res3[1:]
dict1[int(image_id[a]), 'pers_bbx'] = res4[1:]
dict1[int(image_id[a]), 'obj_bbx'] = res5[1:]
dict1[int(image_id[a]), 'class_ids'] = res6[1:]
res3 = np.zeros([1, no_of_classes])
res4 = np.zeros([1, 4])
res5 = np.zeros([1, 4])
res6 = np.zeros([1, 1])
start += increment[a]
a += 1
# import pdb;pdb.set_trace()
return dict1
#### Saving CheckPoint##########
def save_checkpoint(state, filename='checkpoint.pth.tar'):
torch.save(state, filename)
###################################
### LIS function from https://github.com/DirtyHarryLYL/Transferable-Interactiveness-Network##########
def LIS(x, T, k, w):
return T / (1 + np.exp(k - w * x))
#####################################################################################################
def train_test(model, optimizer, scheduler, dataloader, number_of_epochs, break_point, saving_epoch, folder_name,
batch_size, infr, start_epoch, mean_best, visualize):
#### Creating the folder where the results would be stored##########
try:
os.mkdir(folder_name)
except OSError as exc:
if exc.errno != errno.EEXIST:
raise
pass
file_name = folder_name + '/' + 'result.pickle'
####################################################################
loss_epoch_train = []
loss_epoch_val = []
loss_epoch_test = []
initial_time = time.time()
result = []
##### Freeing out the cache memories from gpus and declaring the phases######
torch.cuda.empty_cache()
phases = ['train', 'test']
if infr == 't' and visualize == 'f': ### If running from a pretrained model only for saving best result ######
start_epoch = start_epoch - 1
phases = ['test']
end_of_epochs = start_epoch + 1
print('Only doing testing for storing result from a model')
elif visualize != 'f':
if visualize not in phases:
print("ERROR! Asked to show result from a unknown set.The choice should be among train,val,test")
return
else:
phases = [visualize]
end_of_epochs = start_epoch + 1
print('Only showing predictions from a model')
else:
end_of_epochs = start_epoch + number_of_epochs
##### Starting the Epochs#############
for epoch in range(start_epoch, end_of_epochs):
scheduler.step()
print('Epoch {}/{}'.format(epoch + 1, end_of_epochs))
print('-' * 10)
# print('Lr: {}'.format(scheduler.get_lr()))
initial_time_epoch = time.time()
for phase in phases:
if phase == 'train':
model.train()
else:
model.eval()
print('In {}'.format(phase))
detections_train = {}
detections_test = {}
true_scores_class = np.ones([1, 80], dtype=int)
true_scores = np.ones([1, no_of_classes], dtype=int)
true_scores_single = np.ones([1, 1], dtype=int)
predicted_scores = np.ones([1, no_of_classes], dtype=float)
predicted_scores_single = np.ones([1, 1], dtype=float)
predicted_scores_class = np.ones([1, 80], dtype=float)
acc_epoch = 0
iteration = 1
torch.cuda.empty_cache()
####Starting the iterations##################
for iterr, i in enumerate(tqdm(dataloader[phase])):
if iterr % 20 == 0:
torch.cuda.empty_cache()
inputs = i[0].to(device)
labels = i[1].to(device)
labels_single = i[2].to(device)
image_id = i[3]
pairs_info = i[4]
minbatch_size = len(pairs_info)
optimizer.zero_grad()
if phase == 'train':
nav = torch.tensor([[0, epoch]] * minbatch_size).to(device)
else:
nav = torch.tensor([[2, epoch]] * minbatch_size).to(device)
# import pdb;pdb.set_trace()
true = (labels.data).cpu().numpy()
true_single = (labels_single.data).cpu().numpy()
with torch.set_grad_enabled(phase == 'train' or phase == 'val'):
model_out = model(inputs, pairs_info, pairs_info, image_id, nav, phase)
outputs = model_out[0]
outputs_single = model_out[1]
outputs_combine = model_out[2]
outputs_gem = model_out[3]
predicted_HOI = sigmoid(outputs).data.cpu().numpy()
predicted_HOI_combine = sigmoid(outputs_combine).data.cpu().numpy()
predicted_single = sigmoid(outputs_single).data.cpu().numpy()
predicted_gem = sigmoid(outputs_gem).data.cpu().numpy()
predicted_HOI_pair = predicted_HOI
start_index = 0
start_obj = 0
start_pers = 0
start_tot = 0
pers_index = 1
persons_score_extended = np.zeros([1, 1])
objects_score_extended = np.zeros([1, 1])
class_ids_extended = np.zeros([1, 1])
persons_np_extended = np.zeros([1, 4])
objects_np_extended = np.zeros([1, 4])
start_no_obj = 0
class_ids_total = []
############# Extending Person and Object Boxes and confidence scores to Multiply with all Pairs##########
for batch in range(len(pairs_info)):
persons_score = []
objects_score = []
class_ids = []
objects_score.append(float(1))
this_image = int(image_id[batch])
scores_total = helpers_pre.get_compact_detections(this_image, phase)
persons_score, objects_score, persons_np, objects_np, class_ids = scores_total[
'person_bbx_score'], \
scores_total[
'objects_bbx_score'], \
scores_total['person_bbx'], \
scores_total['objects_bbx'], \
scores_total[
'class_id_objects']
temp_scores = extend(np.array(persons_score).reshape(len(persons_score), 1),
int(pairs_info[batch][1]))
persons_score_extended = np.concatenate([persons_score_extended, temp_scores])
temp_scores = extend(persons_np, int(pairs_info[batch][1]))
persons_np_extended = np.concatenate([persons_np_extended, temp_scores])
temp_scores = extend_object(np.array(objects_score).reshape(len(objects_score), 1),
int(pairs_info[batch][0]))
objects_score_extended = np.concatenate([objects_score_extended, temp_scores])
temp_scores = extend_object(objects_np, int(pairs_info[batch][0]))
objects_np_extended = np.concatenate([objects_np_extended, temp_scores])
temp_scores = extend_object(np.array(class_ids).reshape(len(class_ids), 1),
int(pairs_info[batch][0]))
class_ids_extended = np.concatenate([class_ids_extended, temp_scores])
class_ids_total.append(class_ids)
start_pers = start_pers + int(pairs_info[batch][0])
start_obj = start_obj + int(pairs_info[batch][1])
start_tot = start_tot + int(pairs_info[batch][1]) * int(pairs_info[batch][0])
###################################################################################################################
#### Applying LIS#######
persons_score_extended = LIS(persons_score_extended, 8.3, 12, 10)
objects_score_extended = LIS(objects_score_extended, 8.3, 12, 10)
##################################
predicted_HOI = predicted_HOI * predicted_HOI_combine * predicted_single * predicted_gem * objects_score_extended[
1:] * persons_score_extended[
1:]
index_mask = class_ids_extended[1:].reshape(len(class_ids_extended[1:]), ).astype('int32')
loss_mask, count_weight = mask_t[index_mask], count_t[index_mask]
predicted_HOI = loss_mask * predicted_HOI
#### Calculating Loss############
N_b = minbatch_size * no_of_classes # *int(total_elements[0])#*no_of_classes #pairs_info[1]*pairs_info[2]*pairs_info[3]
hum_obj_mask = torch.Tensor(
objects_score_extended[1:] * persons_score_extended[1:] * loss_mask).cuda()
lossf = torch.sum(loss_com_combine(
sigmoid(outputs) * sigmoid(outputs_combine) * sigmoid(outputs_single) * hum_obj_mask * sigmoid(
outputs_gem), labels.float())) / N_b
lossc = lossf.item()
acc_epoch += lossc
iteration += 1
if phase == 'train' or phase == 'val': #### Flowing the loss backwards#########
lossf.backward()
optimizer.step()
###########################################################
del lossf
del model_out
del inputs
del outputs
del labels
####### If we want to do Visualization#########
if visualize != 'f':
viss.visual(image_id, phase, pairs_info, predicted_HOI, predicted_single,
objects_score_extended[1:], persons_score_extended[1:], predicted_HOI_combine,
predicted_HOI_pair, true)
#####################################################################
##### Preparing for Storing Results##########
predicted_scores = np.concatenate((predicted_scores, predicted_HOI), axis=0)
true_scores = np.concatenate((true_scores, true), axis=0)
predicted_scores_single = np.concatenate((predicted_scores_single, predicted_single), axis=0)
true_scores_single = np.concatenate((true_scores_single, true_single), axis=0)
#############################################
#### Storing the result in V-COCO Format##########
if phase == 'test':
if (epoch + 1) % saving_epoch == 0 or infr == 't':
all_scores = filtering(predicted_HOI, true, persons_np_extended[1:], objects_np_extended[1:],
predicted_single, pairs_info, image_id, class_ids_extended[1:])
# prep.infer_format(image_id,all_scores,phase,detections_test,pairs_info)
proper.infer_format(image_id, all_scores, phase, detections_test, pairs_info)
######################################################
## Breaking in particular number of epoch####
if iteration == break_point + 1:
break;
#############################################
if phase == 'train':
loss_epoch_train.append((acc_epoch))
AP, AP_single = ap.class_AP(predicted_scores[1:, :], true_scores[1:, :], predicted_scores_single[1:, ],
true_scores_single[1:, ])
AP_train = pd.DataFrame(AP, columns=['Name_TRAIN', 'Score_TRAIN'])
AP_train_single = pd.DataFrame(AP_single, columns=['Name_TRAIN', 'Score_TRAIN'])
elif phase == 'test':
loss_epoch_test.append((acc_epoch))
AP, AP_single = ap.class_AP(predicted_scores[1:, :], true_scores[1:, :], predicted_scores_single[1:, ],
true_scores_single[1:, ])
AP_test = | pd.DataFrame(AP, columns=['Name_TEST', 'Score_TEST']) | pandas.DataFrame |
print('Importing packages...')
import pandas as pd
import matplotlib.pyplot as plt
import datetime as dt
import seaborn as sns
import numpy as np
import matplotlib.dates as mdates
import datetime
#sns.set(color_codes=True)
import matplotlib as mpl
mpl.rcParams['pdf.fonttype'] = 42
import statistics as st
sns.set_style('whitegrid', {'axes.linewidth' : 0.5})
from statsmodels.distributions.empirical_distribution import ECDF
import scipy
import gc
column_list = ['scen_num', 'reopening_multiplier_4']
for ems_region in range(1,12):
column_list.append('hosp_det_EMS-' + str(ems_region))
column_list.append('hosp_det_cumul_EMS-' + str(ems_region))
column_list.append('detected_cumul_EMS-' + str(ems_region))
#Specify paths to trajectories. For this run, all trajectories were temporarily stored in the same folder.
print('Reading trajectories...')
sub1 = pd.read_csv('trajectoriesDat_1.csv', usecols=column_list) #0.08 - 0.09
print('Trajectory 1 read.')
sub2 = pd.read_csv('trajectoriesDat_2.csv', usecols=column_list) #0.10 - 0.115
print('Trajectory 2 read.')
sub3 = | pd.read_csv('trajectoriesDat_3.csv', usecols=column_list) | pandas.read_csv |
"""Contains code for the state trajectory graph.
Adapted from [<NAME>'s work on blockconstruction](https://github.com/cogtoolslab/block_construction)
This has A LOT of hardcoded variables and most likely isn't going to work well with worlds that are not 8x8 or in the standard set."""
import os
import sys
import urllib, io
proj_dir = os.path.dirname(os.path.dirname(os.path.dirname(os.path.realpath(__file__))))
utils_dir = os.path.join(proj_dir,'utils')
sys.path.append(utils_dir)
analysis_dir = os.path.join(proj_dir,'analysis')
analysis_utils_dir = os.path.join(analysis_dir,'utils')
sys.path.append(analysis_utils_dir)
import analysis_helper
if os.path.join(proj_dir,'stimuli') not in sys.path:
sys.path.append(os.path.join(proj_dir,'stimuli'))
import numpy as np
import scipy.stats as stats
import pandas as pd
from scipy.spatial import distance
from scipy import ndimage
from random import random
from sklearn.cluster import SpectralBiclustering
import sklearn.metrics as metrics
import itertools
import pymongo as pm
from collections import Counter
import json
import re
import ast
# import cv2
from PIL import Image, ImageOps, ImageDraw, ImageFont
from io import BytesIO
import base64
import matplotlib
from matplotlib import pylab, mlab, pyplot
from IPython.core.pylabtools import figsize, getfigs
plt = pyplot
import matplotlib as mpl
mpl.rcParams['pdf.fonttype'] = 42
from matplotlib import colors
import seaborn as sns
sns.set_context('talk')
sns.set_style('darkgrid')
from IPython.display import clear_output
import warnings
warnings.filterwarnings("ignore", category=DeprecationWarning)
warnings.filterwarnings("ignore", message="numpy.dtype size changed")
warnings.filterwarnings("ignore", message="numpy.ufunc size changed")
import blockworld_helpers as utils
import importlib
import scoring
from scipy.stats import entropy
import plotly.graph_objects as go
import plotly
import plotly.io as pio
def agentdf_to_dfic(df):
"""Takes in a dataframe generated by experiment_runner in projection_block_construction and returns a dataframe formatted for use with the trajectory graph"""
dfic = | pd.DataFrame() | pandas.DataFrame |
from requests import Session
from bs4 import BeautifulSoup
import pandas as pd
import numpy as np
from datetime import datetime
HEADERS = {'User-Agent': 'Mozilla/5.0 (X11; Linux x86_64) '\
'AppleWebKit/537.36 (KHTML, like Gecko) '\
'Chrome/75.0.3770.80 Safari/537.36'}
def search_symbol(symbol):
"""
Search for symbol's link in Simply Wall Street
"""
# Create Session
s = Session()
# Add headers
s.headers.update(HEADERS)
# JSON Key Field
url = f'https://api.simplywall.st/api/search/{symbol}'
# Request and transform response in json
screener = s.get(url)
json = screener.json()
if len(json) != 0:
# Stock URL
stock_url = json[0]['url']
else:
stock_url = 'not found'
return stock_url
def extract_all_urls(stocks_path='../docs/my_stocks.feather'):
"""
Create file with urls to call api
"""
# Read csv with stocks
my_stocks_df = pd.read_feather(stocks_path)
# Create List with stocks
my_stocks_list = list(my_stocks_df['symbol'].unique())
# Find all urls and store in a dataframe
results = []
for stock in my_stocks_list:
print(stock)
url = search_symbol(stock)
results.append([stock, url])
# Convert into a dataframe
results_df = pd.DataFrame(results, columns=['symbol', 'url'])
# Export to csv
results_df.to_csv('../docs/simplywallurls.csv', index=0)
return results_df
def symbol_data(stock_url):
"""
Extract data from Simply Wall Steet
"""
# Create Session
s = Session()
# Add headers
s.headers.update(HEADERS)
# JSON Key Field
metrics_url = f'https://api.simplywall.st/api/company{stock_url}?include=info%2Cscore%2Cscore.snowflake%2Canalysis.extended.raw_data%2Canalysis.extended.raw_data.insider_transactions&version=2.0'
# Request and transform response in json
screener = s.get(metrics_url)
# check status
if screener.status_code == 200:
json = screener.json()
else:
json = 'not found'
return json
def extract_values(json_response, symbol):
"""
Extract important values from json_response for each symbol
"""
# Define important fields
fields_dictionary = {'total_assets': 'total_assets',
'total_current_assets': 'total_ca',
'cash_st_investments': 'cash_st_invest',
'total_receivables': 'total_receiv',
'inventory': 'inventory',
'net_property_plant_equip': 'nppe',
'short_term_debt': 'current_port_capital_leases',
'total_current_liabilities': 'total_cl',
'long_term_debt': 'lt_debt',
'total_liabilities': 'total_liabilities',
'total_equity': 'total_equity',
'accounts_payable': 'ap',
'total_revenue_ttm': 'total_rev',
'ebt_ttm':'ebt',
'ebitda_ttm': 'ebitda',
'ebit_ttm': 'ebit',
'pre_tax_income': 'earning_co',
'gross_profit_ttm': 'gross_profit',
'net_income_ttm': 'ni',
'g_a_expense_ttm': 'g_a_expense',
'income_tax_ttm': 'income_tax',
'interest_exp_ttm': 'interest_exp',
'basic_eps_ttm': 'basic_eps',
'net_oper_cf_ttm': 'cash_oper',
'net_investing_cf_ttm': 'cash_f_investing',
'net_financing_cf_ttm': 'cash_f_financing',
'levered_fcf_ttm': 'levered_fcf',
'capex_ttm': 'capex',
'beta_5yr': 'beta_5yr'}
# Check response code
if json_response != 'not found':
# Get to fields that really matter
assets = json_response['data']['analysis']['data']['extended']['data']['raw_data']['data']['past']
# check if there's data
if len(assets) > 0:
# Extract Available dates
dates = assets.keys()
# Create empty list to store results
results = []
# Create first row with headers
headers = []
headers.append('date')
headers.append('symbol')
[headers.append(row) for row in list(fields_dictionary.keys())]
results.append(headers)
# For each date in dates
for date in dates:
# Create Temporary list to append results for each date
temp_results = []
temp_results.append(date)
temp_results.append(symbol)
# See available keys - not all fields are available all the time
available_keys = assets[date].keys()
# For field in list of fields to pull
for field in fields_dictionary.values():
# if field is available
if field in available_keys:
# create value and append that
value = assets[date][field]['value']
temp_results.append(value)
# if field doesn't exist then append NaN
else:
temp_results.append(np.nan)
# Append to results
results.append(temp_results)
return results
else:
return 'not found'
def extract_fundamentals(update_urls=False, urls_path='../docs/simplywallurls.csv'):
"""
Function to extract all fundamentals for all stocks
"""
# Check if we need to update list of urls
if update_urls == False:
# Read csv with stocks
urls_df = pd.read_csv(urls_path, header=0)
else:
urls_df = extract_all_urls()
# Create variable with total number of stocks so we can track progress
length = len(urls_df)
# create list to store results
results = []
# Loop through symbols
for index, row in urls_df.iterrows():
# Extract values
stock_url = row['url']
symbol = row['symbol']
# Print progress
print( str( round((((index + 1) / length) * 100), 2)) + '% Complete', symbol)
# If url is different than 'not found'
if row['url'] != 'not found':
# Extract json with values
stock_json_response = symbol_data(stock_url)
# Check if there's data
if stock_json_response != 'not found':
# Keep onlu relevant values
stock_numbers = extract_values(stock_json_response, symbol)
# Add that to results list
results.append(stock_numbers)
# Transform results into a dataframe, first create a list where every row is one record for each stock
to_df_list = [i for stock in results for i in stock]
# Convert it to a dataframe - dropping duplicates for headers (not the best solution)
df = pd.DataFrame(to_df_list, columns=to_df_list[0]).drop_duplicates()
# Remove first row with headers
df = df[1:]
# Export that
df.to_csv('../docs/my_stocks_fundamentals.csv', index=0)
return df
def update_fundamental_dates():
"""
Function to update fundamental data from Simply Wall Street
"""
# Import Fundamental Data and Earnings
df_fund = pd.read_csv('../docs/my_stocks_fundamentals.csv')
df_earnings = pd.read_csv('../docs/earnings.csv')
# Remove duplicates from df_earnings
df_earnings['earnings_date'] = pd.to_datetime(df_earnings['earnings_date']).dt.date
df_earnings = df_earnings.drop_duplicates(keep='first', subset=['symbol', 'earnings_date'])
# Create columns with previous Qs numbers
# First we need to define the relevant columns
relevant_columns = list(set(list(df_fund.columns)) - set(['date', 'symbol']))
relevant_columns = ['basic_eps_ttm', 'net_income_ttm', 'net_oper_cf_ttm', 'total_revenue_ttm']
# Loop through columns and create a new column with previous numbers
for column in relevant_columns:
for i in range(1,17):
number = i * -1
df_fund[f'{column}_{i}Q'] = df_fund.groupby('symbol')[column].shift(number)
# Now we need to pull data from earnings, because we need to tell exactly when all the data was available
# Transform dataframes
df_fund['date_str'] = df_fund['date'].astype(str).str[:-3]
df_fund['earnings_quarter'] = pd.to_datetime(df_fund['date_str'], unit='s')
# Figure out the correct dates in which earnings was released
df_earnings['key'] = 0
df_fund['key'] = 0
# Merge all together, looking at all possibilities
clean_df = pd.merge(df_earnings, df_fund, on=['symbol', 'key'])
clean_df['earnings_quarter'] = pd.to_datetime(clean_df['earnings_quarter']).dt.date
clean_df['earnings_date'] = pd.to_datetime(clean_df['earnings_date']).dt.date
clean_df['difference'] = (clean_df['earnings_date'] - clean_df['earnings_quarter']).dt.days
check = clean_df[(clean_df['difference'] >= 0)].groupby(['symbol','earnings_quarter']).min()['difference'].reset_index()
final = pd.merge(clean_df, check, on=['symbol', 'earnings_quarter', 'difference'])
# Drop columns
final.drop('date', axis=1, inplace=True)
# Export to csv
final.to_feather('../docs/my_stock_fundamentals_correct_dates.feather')
return final
def update_prices(fundamentals, all_prices):
"""
Function to Update Prices using Fundamental Data
"""
# Convert date columns to datetime
fundamentals['earnings_date'] = | pd.to_datetime(fundamentals['earnings_date']) | pandas.to_datetime |
import warnings
from typing import List, Optional, Dict, Tuple, TYPE_CHECKING
import pandas as pd
import numpy as np
from sklearn.decomposition import TruncatedSVD, NMF
import matplotlib.pyplot as plt
import seaborn as sns
from data_describe.config._config import get_option
from data_describe.text.text_preprocessing import (
create_doc_term_matrix,
create_tfidf_matrix,
filter_dictionary,
)
from data_describe.backends import _get_viz_backend
from data_describe.compat import _compat, _requires, _in_notebook
from data_describe._widget import BaseWidget
warnings.filterwarnings("ignore", category=UserWarning, module="gensim")
if TYPE_CHECKING:
gensim = _compat["gensim"]
def topic_model(
text_docs: List[str],
model_type: str = "LDA",
num_topics: Optional[int] = None,
min_topics: int = 2,
max_topics: int = 10,
no_below: int = 10,
no_above: float = 0.2,
tfidf: bool = True,
model_kwargs: Optional[Dict] = None,
):
"""Topic modeling.
Unsupervised methods of identifying topics in documents.
Args:
text_docs: A list of text documents in string format. These documents should
generally be pre-processed
model_type: {'LDA', 'LSA', 'LSI', 'SVD', 'NMF'}
Defines the type of model/algorithm which will be used.
num_topics: Sets the number of topics for the model. If None, will be optimized
using coherence values
min_topics: Starting number of topics to optimize for if number of topics not
provided. Default is 2
max_topics: Maximum number of topics to optimize for if number of topics not
provided. Default is 10
no_below: Minimum number of documents a word must appear in to be used in
training. Default is 10
no_above: Maximum proportion of documents a word may appear in to be used in
training. Default is 0.2
tfidf: If True, model created using TF-IDF matrix. Otherwise, document-term
matrix with wordcounts is used. Default is True
model_kwargs: Keyword arguments for the model, should be in agreement with
`model_type`
Returns:
Topic model widget.
"""
topicwidget = TopicModelWidget(model_type, num_topics, model_kwargs)
topicwidget.fit(
text_docs,
model_type,
min_topics,
max_topics,
no_below,
no_above,
tfidf,
model_kwargs,
)
return topicwidget
@_requires("tqdm")
@_requires("gensim")
@_requires("pyLDAvis")
class TopicModelWidget(BaseWidget):
"""Create topic model widget."""
def __init__(
self,
model_type: str = "LDA",
num_topics: Optional[int] = None,
model_kwargs: Optional[Dict] = None,
):
"""Topic Modeling made for easier training and understanding of topics.
The exact model type, number of topics, and keyword arguments can be input
to initialize the object. The object can then be used to train the model
using the 'fit' function, and visualizations of the model can be displayed,
such as an interactive visual (for LDA models only), an elbow plot displaying
coherence values (for LDA or LSA/LSI models only), a DataFrame displaying the
top keywords per topic, and a DataFrame displaying the top documents per topic.
Args:
model_type: {'LDA', 'LSA', 'LSI', 'SVD', 'NMF'}
Defines the type of model/algorithm which will be used.
num_topics: Sets the number of topics for the model. If None, will be
optimized using coherence values (LDA or LSA/LSI) or becomes
3 (SVD/NMF). Default is None.
model_kwargs: Keyword arguments for the model, should be in agreement
with `model_type`.
Raises:
ValueError: Invalid `model_type`.
"""
self._model_type = model_type.upper()
if self._model_type not in ["LDA", "LSA", "LSI", "SVD", "NMF"]:
raise ValueError(
"Model type must be one of either: 'LDA', 'LSA', 'LSI', 'SVD' or 'NMF'"
)
self._num_topics = num_topics
self._model_kwargs = model_kwargs
def __str__(self):
return "data-describe Topic Model Widget"
@property
def model(self):
"""Trained topic model."""
return self._model
@property
def model_type(self):
"""Type of model which either already has been or will be trained."""
return self._model_type
@property
def num_topics(self):
"""The number of topics in the model."""
return self._num_topics
@property
def coherence_values(self):
"""A list of coherence values mapped from min_topics to max_topics."""
return self._coherence_values
@property
def dictionary(self):
"""A Gensim dictionary mapping the words from the documents to their token_ids."""
return self._dictionary
@property
def corpus(self):
"""Bag of Words (BoW) representation of documents (token_id, token_count)."""
return self._corpus
@property
def matrix(self):
"""Either TF-IDF or document-term matrix with documents as rows and words as columns."""
return self._matrix
@property
def min_topics(self):
"""If num_topics is None, this number is the first number of topics a model will be trained on."""
return self._min_topics
@property
def max_topics(self):
"""If num_topics is None, this number is the last number of topics a model will be trained on."""
return self._max_topics
def show(self, num_topic_words: int = 10, topic_names: Optional[List[str]] = None):
"""Displays most relevant terms for each topic.
Args:
num_topic_words: The number of words to be displayed for each topic. Default is 10
topic_names: A list of pre-defined names set for each of the topics. Default is None
Returns:
display_topics_df: Pandas DataFrame displaying topics as columns and their
relevant terms as rows. LDA/LSI models will display an extra column to
the right of each topic column, showing each term's corresponding
coefficient value
"""
return self.display_topic_keywords(
num_topic_words=num_topic_words, topic_names=topic_names
)
def _compute_lsa_svd_model(self, text_docs: List[str], tfidf: bool = True):
"""Trains LSA TruncatedSVD scikit-learn model.
Args:
text_docs: A list of text documents in string format. These documents should
generally be pre-processed.
tfidf: If True, model created using TF-IDF matrix. Otherwise, document-term
matrix with wordcounts is used. Default is True.
Returns:
lsa_model: Trained LSA topic model
"""
if not self._num_topics:
self._num_topics = 3
if tfidf:
self._matrix = create_tfidf_matrix(text_docs)
else:
self._matrix = create_doc_term_matrix(text_docs)
if not self._model_kwargs:
self._model_kwargs = {}
self._model_kwargs.update({"n_components": self._num_topics})
lsa_model = TruncatedSVD(**self._model_kwargs)
lsa_model.fit(self._matrix)
return lsa_model
def _compute_lsi_model(
self,
text_docs: List[str],
min_topics: int = 2,
max_topics: int = 10,
no_below: int = 10,
no_above: float = 0.2,
):
"""Trains LSA Gensim model.
Args:
text_docs: A list of text documents in string format. These documents should
generally be pre-processed
min_topics: Starting number of topics to optimize for if number of topics
not provided. Default is 2
max_topics: Maximum number of topics to optimize for if number of topics not
provided. Default is 10
no_below: Minimum number of documents a word must appear in to be used in
training. Default is 10
no_above: Maximum proportion of documents a word may appear in to be used in
training. Default is 0.2
Returns:
lsa_model: Trained LSA topic model
"""
tokenized_text_docs = [text_doc.split() for text_doc in text_docs]
self._min_topics = min_topics
self._max_topics = max_topics
self._dictionary, self._corpus = filter_dictionary(
tokenized_text_docs, no_below, no_above
)
lsa_model_list = []
if not self._model_kwargs:
self._model_kwargs = {}
self._model_kwargs.update(
{
"corpus": self._corpus,
"num_topics": self._num_topics,
"id2word": self._dictionary,
}
)
with warnings.catch_warnings():
warnings.filterwarnings("ignore", category=DeprecationWarning)
if self._num_topics is None:
self._coherence_values = []
pbar = _compat["tqdm"].tqdm( # type: ignore
range(self._min_topics, self._max_topics + 1),
desc="Fitting topic model",
)
for num in pbar:
self._model_kwargs.update({"num_topics": num})
lsa_model = _compat[ # type: ignore
"gensim"
].models.lsimodel.LsiModel(**self._model_kwargs)
coherence_model = _compat[ # type: ignore
"gensim"
].models.coherencemodel.CoherenceModel(
model=lsa_model,
texts=tokenized_text_docs,
dictionary=self._dictionary,
coherence="c_v",
)
score = coherence_model.get_coherence()
self._coherence_values.append(score)
lsa_model_list.append(lsa_model)
max_coherence_index = self._coherence_values.index(
max(self._coherence_values)
)
self._num_topics = len(lsa_model_list[max_coherence_index].get_topics())
return lsa_model_list[max_coherence_index]
else:
lsa_model = _compat["gensim"].models.lsimodel.LsiModel( # type: ignore
corpus=self._corpus,
id2word=self._dictionary,
num_topics=self._num_topics,
)
return lsa_model
def _compute_lda_model(
self,
text_docs: List[str],
min_topics: int = 2,
max_topics: int = 10,
no_below: int = 10,
no_above: float = 0.2,
):
"""Trains LDA Gensim model.
Args:
text_docs: A list of text documents in string format. These documents
should generally be pre-processed
min_topics: Starting number of topics to optimize for if number of topics
not provided. Default is 2
max_topics: Maximum number of topics to optimize for if number of topics
not provided. Default is 10
no_below: Minimum number of documents a word must appear in to be used in
training. Default is 10
no_above: Maximum proportion of documents a word may appear in to be used in
training. Default is 0.2
Returns:
lda_model (Gensim LdaModel): Trained LDA topic model
"""
tokenized_text_docs = [text_doc.split() for text_doc in text_docs]
self._min_topics = min_topics
self._max_topics = max_topics
self._dictionary, self._corpus = filter_dictionary(
tokenized_text_docs, no_below, no_above
)
lda_model_list = []
if not self._model_kwargs:
self._model_kwargs = {}
self._model_kwargs = {
**{
"random_state": 1,
"corpus": self._corpus,
"num_topics": self._num_topics,
"id2word": self._dictionary,
},
**self._model_kwargs,
}
with warnings.catch_warnings():
warnings.filterwarnings("ignore", category=DeprecationWarning)
if self._num_topics is None:
self._coherence_values = []
pbar = _compat["tqdm"].tqdm( # type: ignore
range(self._min_topics, self._max_topics + 1),
desc="Fitting topic model",
)
for num in pbar:
self._model_kwargs.update({"num_topics": num})
lda_model = _compat[ # type: ignore
"gensim"
].models.ldamodel.LdaModel(**self._model_kwargs)
coherence_model = _compat[ # type: ignore
"gensim"
].models.coherencemodel.CoherenceModel(
model=lda_model,
texts=tokenized_text_docs,
dictionary=self._dictionary,
coherence="c_v",
)
score = coherence_model.get_coherence()
self._coherence_values.append(score)
lda_model_list.append(lda_model)
max_coherence_index = self._coherence_values.index(
max(self._coherence_values)
)
self._num_topics = len(lda_model_list[max_coherence_index].get_topics())
return lda_model_list[max_coherence_index]
else:
lda_model = _compat["gensim"].models.ldamodel.LdaModel( # type: ignore
**self._model_kwargs
)
return lda_model
def _compute_nmf_model(self, text_docs: List[str], tfidf: bool = True):
"""Trains NMF scikit-learn model.
Args:
text_docs: A list of text documents in string format. These documents should
generally be pre-processed
tfidf: If True, model created using TF-IDF matrix. Otherwise, document-term
matrix with wordcounts is used. Default is True.
Returns:
lsa_model (scikit-learn NMF model): Trained NMF topic model
"""
if not self._num_topics:
self._num_topics = 3
if tfidf:
self._matrix = create_tfidf_matrix(text_docs)
else:
self._matrix = create_doc_term_matrix(text_docs)
if not self._model_kwargs:
self._model_kwargs = {}
self._model_kwargs.update({"n_components": self._num_topics})
nmf_model = NMF(**self._model_kwargs)
nmf_model.fit(self._matrix)
return nmf_model
def fit(
self,
text_docs: List[str],
model_type: Optional[str] = None,
min_topics: int = 2,
max_topics: int = 10,
no_below: int = 10,
no_above: float = 0.2,
tfidf: bool = True,
model_kwargs: Optional[Dict] = None,
):
"""Trains topic model and assigns model to object as attribute.
Args:
text_docs: A list of text documents in string format. These documents should
generally be pre-processed
model_type: {'LDA', 'LSA', 'LSI', 'SVD', 'NMF'}
Defines the type of model/algorithm which will be used.
min_topics: Starting number of topics to optimize for if number of topics
not provided. Default is 2
max_topics: Maximum number of topics to optimize for if number of topics not
provided. Default is 10
no_below: Minimum number of documents a word must appear in to be used in
training. Default is 10
no_above: Maximum proportion of documents a word may appear in to be used in
training. Default is 0.2
tfidf: If True, model created using TF-IDF matrix. Otherwise, document-term
matrix with wordcounts is used. Default is True.
model_kwargs: Keyword arguments for the model, should be in agreement with
`model_type`.
Raises:
ValueError: Invalid `model_type`.
"""
if model_kwargs is not None:
self._model_kwargs = model_kwargs
if model_type is not None:
self._model_type = model_type.upper()
if self._model_type not in ["LDA", "LSA", "LSI", "SVD", "NMF"]:
raise ValueError(
"Model type must be one of either: 'LDA', 'LSA', 'LSI', 'SVD' or 'NMF'"
)
if self._model_type == "LDA":
self._model = self._compute_lda_model(
text_docs, min_topics, max_topics, no_below, no_above
)
elif self._model_type == "LSA" or self._model_type == "LSI":
self._model = self._compute_lsi_model(
text_docs, min_topics, max_topics, no_below, no_above
)
elif self._model_type == "SVD":
self._model = self._compute_lsa_svd_model(text_docs, tfidf)
elif self._model_type == "NMF":
self._model = self._compute_nmf_model(text_docs, tfidf)
def elbow_plot(self, viz_backend: str = None):
"""Creates an elbow plot displaying coherence values vs number of topics.
Args:
viz_backend: The visualization backend.
Raises:
ValueError: No coherence values to plot.
Returns:
fig: Elbow plot showing coherence values vs number of topics
"""
try:
self._coherence_values
except AttributeError:
raise ValueError(
"Coherence values not defined. At least 2 LDA or LSI models need to be"
" trained with different numbers of topics."
)
else:
return _get_viz_backend(viz_backend).viz_elbow_plot(
self._min_topics, self._max_topics, self._coherence_values
)
def get_topic_nums(self):
"""Obtains topic distributions (LDA model) or scores (LSA/NMF model).
Returns:
doc_topics: Array of topic distributions (LDA model) or scores (LSA/NMF model)
"""
if self._model_type == "NMF" or self._model_type == "SVD":
return self._model.transform(self._matrix)
elif self._model_type == "LDA":
doc_topics = []
for doc in list(
self._model.get_document_topics(self._corpus, minimum_probability=0)
):
current_doc = [topic[1] for topic in doc]
doc_topics.append(current_doc)
return np.array(doc_topics)
elif self._model_type == "LSI" or self._model_type == "LSA":
doc_topics = []
for doc in self._model[self._corpus]:
current_doc = [topic[1] for topic in doc]
if current_doc:
doc_topics.append(current_doc)
else:
doc_topics.append([0] * len(self._model.get_topics()))
return np.array(doc_topics)
def display_topic_keywords(
self, num_topic_words: int = 10, topic_names: Optional[List[str]] = None
):
"""Creates Pandas DataFrame to display most relevant terms for each topic.
Args:
num_topic_words: The number of words to be displayed for each topic.
Default is 10
topic_names: A list of pre-defined names set for each of the topics.
Default is None
Returns:
display_topics_df: Pandas DataFrame displaying topics as columns and their
relevant terms as rows. LDA/LSI models will display an extra column to
the right of each topic column, showing each term's corresponding
coefficient value
"""
display_topics_dict = {}
if self._model_type == "NMF" or self._model_type == "SVD":
for topic_num, topic in enumerate(self._model.components_):
if not topic_names or not topic_names[topic_num]:
key = "Topic {}".format(topic_num + 1)
else:
key = "Topic: {}".format(topic_names[topic_num])
display_topics_dict[key] = [
self._matrix.columns[i]
for i in topic.argsort()[: -num_topic_words - 1 : -1]
]
elif (
self._model_type == "LSI"
or self._model_type == "LSA"
or self._model_type == "LDA"
):
for topic_num, topic in self._model.print_topics(num_words=num_topic_words):
topic_words = [
topic.split()[num].split("*")[1].replace('"', "")
for num in range(0, len(topic.split()), 2)
]
topic_coefficients = [
topic.split()[num].split("*")[0]
for num in range(0, len(topic.split()), 2)
]
if not topic_names or not topic_names[topic_num]:
key = "Topic {}".format(topic_num + 1)
coefficient_key = "Topic {} Coefficient Value".format(topic_num + 1)
else:
key = "Topic: {}".format(topic_names[topic_num])
coefficient_key = "Topic: {} - Coefficient Value".format(
topic_names[topic_num]
)
display_topics_dict[key], display_topics_dict[coefficient_key] = (
topic_words,
topic_coefficients,
)
term_numbers = ["Term {}".format(num + 1) for num in range(num_topic_words)]
display_topics_df = | pd.DataFrame(display_topics_dict, index=term_numbers) | pandas.DataFrame |
# --------------
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
# code starts here
df = | pd.read_csv(path) | pandas.read_csv |
import copy
from typing import Any, Dict, List, Optional, Union
import numpy as np
import pandas as pd
from scipy.stats import chisquare
def _chi2(bad_rates: List[Dict], overall_rate: float) -> float:
f_obs = [_bin["bad"] for _bin in bad_rates]
f_exp = [_bin["total"] * overall_rate for _bin in bad_rates]
chi2 = chisquare(f_obs=f_obs, f_exp=f_exp)[0]
return chi2
def _check_diff_woe(bad_rates: List[Dict], diff_woe_threshold: float) -> Union[None, int]:
woe_delta: np.ndarray = np.abs(np.diff([bad_rate["woe"] for bad_rate in bad_rates]))
min_diff_woe = min(sorted(list(set(woe_delta))))
if min_diff_woe < diff_woe_threshold:
return list(woe_delta).index(min_diff_woe)
else:
return None
def _mono_flags(bad_rates: List[Dict]) -> bool:
bad_rate_diffs = np.diff([bad_rate["bad_rate"] for bad_rate in bad_rates])
positive_mono_diff = np.all(bad_rate_diffs > 0)
negative_mono_diff = np.all(bad_rate_diffs < 0)
return True in [positive_mono_diff, negative_mono_diff]
def _find_index_of_diff_flag(bad_rates: List[Dict]) -> int:
bad_rate_diffs = np.diff([bad_rate["bad_rate"] for bad_rate in bad_rates])
idx = list(bad_rate_diffs > 0).index(pd.Series(bad_rate_diffs > 0).value_counts().sort_values().index.tolist()[0])
return idx
def _merge_bins_chi(x: np.ndarray, y: np.ndarray, bad_rates: List[Dict], bins: List):
idx = _find_index_of_diff_flag(bad_rates)
if idx == 0:
del bins[1]
elif idx == len(bad_rates) - 2:
del bins[len(bins) - 2]
else:
temp_bins = copy.deepcopy(bins)
del temp_bins[idx + 1]
temp_bad_rates, temp_overall_rate = bin_bad_rate(x, y, temp_bins)
chi_1 = _chi2(temp_bad_rates, temp_overall_rate)
del temp_bins
temp_bins = copy.deepcopy(bins)
del temp_bins[idx + 2]
temp_bad_rates, temp_overall_rate = bin_bad_rate(x, y, temp_bins)
chi_2 = _chi2(temp_bad_rates, temp_overall_rate)
if chi_1 < chi_2:
del bins[idx + 1]
else:
del bins[idx + 2]
bad_rates, _ = bin_bad_rate(x, y, bins)
return bad_rates, bins
def _merge_bins_min_pct(
x: np.ndarray, y: np.ndarray, bad_rates: List[Dict], bins: List, cat: bool = False
):
idx = [
pct for pct in [bad_rates[i]["pct"] for i in range(len(bad_rates))]
].index(min([bad_rate["pct"] for bad_rate in bad_rates]))
if cat:
if idx == 0:
bins[idx + 1] += bins[idx]
elif idx == len(bad_rates) - 1:
bins[idx - 1] += bins[idx]
else:
if bad_rates[idx - 1]["pct"] < bad_rates[idx + 1]["pct"]:
bins[idx - 1] += bins[idx]
else:
bins[idx + 1] += bins[idx]
del bins[idx]
else:
if idx == 0:
del bins[1]
elif idx == len(bad_rates) - 1:
del bins[len(bins) - 2]
else:
if bad_rates[idx - 1]["pct"] < bad_rates[idx + 1]["pct"]:
del bins[idx]
else:
del bins[idx + 1]
bad_rates, _ = bin_bad_rate(x, y, bins, cat=cat)
if cat:
bins = [bad_rate["bin"] for bad_rate in bad_rates]
return bad_rates, bins
def bin_bad_rate(
x: np.ndarray, y: np.ndarray, bins: List, cat: bool = False
) -> tuple[list[dict[str, Union[Union[list, int, float], Any]]], Union[Optional[float], Any]]:
bad_rates = []
if cat:
max_idx = len(bins)
else:
max_idx = len(bins) - 1
for i in range(max_idx):
if cat:
value = bins[i]
else:
value = [bins[i], bins[i + 1]]
x_not_na = x[~pd.isna(x)]
y_not_na = y[~pd.isna(x)]
if cat:
x_in = x_not_na[pd.Series(x_not_na).isin(value)]
else:
x_in = x_not_na[
np.where((x_not_na >= np.min(value)) & (x_not_na < np.max(value)))
]
total = len(x_in)
all_bad = y[~ | pd.isna(x) | pandas.isna |
import os
import sys
import glob
import cc3d
import pandas as pd
import numpy as np
import itertools
sys.path.insert(0, os.path.abspath('../'))
from core.metrics import numpy_soft_dice_coeff
from core.generator import get_onehot_labelmap, get_unique_patient_finger_list
from contextlib import redirect_stdout
from cnn.convert import get_cc_map
def eval_settings(prediction_type):
if prediction_type == 'hnh':
labels = (0, 4)
hnh = True
rcnn = False
header = ["Healthy", "Non-healthy"]
elif prediction_type == 'fm':
labels = (0, 3)
hnh = False
rcnn = False
header = ["Background", "Erosion"]
elif prediction_type in ['tuber', 'resnet', 'vggnet']:
labels = (0, 2, 3,)
hnh = False
rcnn = True
header = ["Background", "Cyst", "Erosion"]
else:
labels = (0, 1, 2, 3)
hnh = False
rcnn = False
header = ["Background", "Bone", "Cyst", "Erosion"]
return labels, header, hnh, rcnn
def eval_dice(labels, truth, truth_onehot, prediction_onehot):
# Find dice_scores
dice = numpy_soft_dice_coeff(truth_onehot, prediction_onehot)
# Ensure that empty classes are not counted towards scores
ignored_classes = np.setxor1d(np.unique(truth), labels)
for label in ignored_classes:
if (len(labels) == 1) and (label in labels):
dice[1] = None
else:
dice[np.where(labels == label)] = None
return dice
def eval_quantification(quant_scores, y_true, y_pred, axis=(-3, -2, -1)):
# Calc positives
true_positive = np.sum(y_true * y_pred, axis=axis)
false_negative = np.sum(y_true, axis=axis) - true_positive
# Find inverse
y_true_inv = np.logical_not(y_true).astype(np.float64)
y_pred_inv = np.logical_not(y_pred).astype(np.float64)
# Calc negatives
true_negative = np.sum(y_true_inv * y_pred_inv, axis=axis)
false_positive = np.sum(y_true_inv, axis=axis) - true_negative
# Update quant_scores
quant_scores[0] += true_positive
quant_scores[1] += false_negative
quant_scores[2] += true_negative
quant_scores[3] += false_positive
def eval_detection(y_true, y_pred):
"""
lav en liste over alle CC indexes
for hver true-CC, se om nogen af pred-CC har samme indexes
TP hvis ja
FN hvis nej
FP = for hver pred-CC, se om der er ingen af true-CC der har samme indexes
"""
n_skips = detection_n_skips(y_true.shape[0])
if n_skips == -1:
print("ERROR! UNKNOWN NUMBER OF SKIPS, DETECTION SCORE NOT EVALUATED.")
return 0, 0, 0
# For detection
d_true_positive = np.zeros(y_true.shape[0] - n_skips) # we do not test cc on background
d_false_negative = np.zeros(y_true.shape[0] - n_skips)
d_false_positive = np.zeros(y_true.shape[0] - n_skips)
# For detection segmentation
detect_dice_scores = [[] for _ in range(y_true.shape[0] - n_skips)]
# s_true_positive = np.zeros(y_true.shape[0] - n_skips)
# s_true_negative = np.zeros(y_true.shape[0] - n_skips)
# s_false_negative = np.zeros(y_true.shape[0] - n_skips)
# s_false_positive = np.zeros(y_true.shape[0] - n_skips)
for i in range(n_skips, y_true.shape[0]):
true_cc = get_cc_map(y_true[i])
pred_cc = get_cc_map(y_pred[i])
# find TP and FN
for tlabel, tcc in cc3d.each(true_cc, binary=True, in_place=True):
intersect = pred_cc[tcc > 0]
if np.count_nonzero(intersect):
d_true_positive[i - n_skips] += 1
## Find detected segmentation accuracy ##
intersecting_regions = np.zeros_like(tcc)
# Find all regions that overlaps with the truth
for plabel, pcc in cc3d.each(pred_cc, binary=True, in_place=True):
tmp_intersect = pcc[tcc > 0]
if np.count_nonzero(tmp_intersect):
intersecting_regions += pcc
# Calculate detected dice score
# print(np.count_nonzero(intersecting_regions))
tmp_quant_scores = [0, 0, 0, 0]
eval_quantification(tmp_quant_scores, tcc, intersecting_regions)
s_true_positive = tmp_quant_scores[0]
s_false_negative = tmp_quant_scores[1]
s_false_positive = tmp_quant_scores[3]
dice = (2 * s_true_positive) / (2 * s_true_positive + s_false_positive + s_false_negative)
detect_dice_scores[i - n_skips].append(dice)
else:
d_false_negative[i - n_skips] += 1
# find FP
for label, cc in cc3d.each(pred_cc, binary=True, in_place=True):
intersect = true_cc[cc > 0]
if np.count_nonzero(intersect) == 0:
d_false_positive[i - n_skips] += 1
# # Update detection_scores
# detection_scores[0] += true_positive
# detection_scores[1] += false_negative
# detection_scores[2] += false_positive
return d_true_positive, d_false_negative, d_false_positive, detect_dice_scores
def eval_reliability(detection_dict, subject_ids):
# Find number classes that have been detected:
n_classes = len(next(iter(detection_dict.values()))[0])
# Make list of fingers with more than one scan
unique_list = get_unique_patient_finger_list(None, subject_ids)
consecutive_list = [x for x in unique_list if len(x) > 1]
# Find erosion count increase for every pair
increase_list = list() # [0] = first, [1] = second, [2] = increase
for finger_scans in consecutive_list:
for i in range(1, len(finger_scans)):
first_subject = subject_ids[finger_scans[i - 1]]
second_subject = subject_ids[finger_scans[i]]
increment_tp = detection_dict[first_subject][0] - detection_dict[second_subject][0]
increment_fp = detection_dict[first_subject][1] - detection_dict[second_subject][1]
increment_tot = increment_tp # + increment_fp
increase_list.append([first_subject, second_subject, increment_tot])
# Sort in positive and negative
increase_list = np.array(increase_list)
zero_or_positive = list()
negative = list()
n_positive = list()
n_negative = list()
var_positive = list()
var_negative = list()
for i in range(n_classes):
zero_or_positive.append(increase_list[np.stack(increase_list[:, 2])[:, i] >= 0])
negative.append(increase_list[np.stack(increase_list[:, 2])[:, i] < 0])
# Count N_positive and N_negative
n_positive.append(len(zero_or_positive[i]))
n_negative.append(len(negative[i]))
# Compute variance of N_positive and N_negative
try:
var_positive.append(np.var(np.stack(zero_or_positive[i][:, 2])[:, i]))
except IndexError:
var_positive.append(0.0)
try:
var_negative.append(np.var(np.stack(negative[i][:, 2])[:, i]))
except IndexError:
var_negative.append(0.0)
return (n_positive, n_negative, var_positive, var_negative)
def detection_n_skips(n_labels):
if n_labels == 4: # skip background and bone
n_skips = 2
elif n_labels == 3: # skip background
n_skips = 1
elif n_labels == 2: # skip background
n_skips = 1
else:
n_skips = -1
return n_skips
def save_dice(dice_coeffs, header, subject_ids, output_path, prediction_type):
# Expand header
new_header = header.copy()
for i in range(len(header)):
new_header[i] = 'DICE_' + new_header[i]
# Save dice coefficients
dice_dataframe = pd.DataFrame.from_records(dice_coeffs, columns=header, index=subject_ids)
dice_score_path = os.path.join(output_path, prediction_type + "_dice_scores.csv")
dice_dataframe.to_csv(dice_score_path, float_format='%.4f', na_rep='nan')
# Save dice summary
summary_path = os.path.join(output_path, prediction_type + '_dice_summary.txt')
pd.options.display.float_format = '{:,.4f}'.format
with open(summary_path, 'w') as f:
with redirect_stdout(f):
# Print out median and max values of all classes.
print("Ncols: {}".format(dice_dataframe.shape[0]))
print("Max scores:")
max_score = dice_dataframe.max(axis=0).rename('max_score')
max_index = dice_dataframe.idxmax(axis=0).rename('max_index')
print(pd.concat([max_score, max_index], axis=1))
print()
print("Median scores:")
median_score = dice_dataframe.median(axis=0).rename('median_score')
print(median_score)
print()
print("Average of individual scores:")
average_score = dice_dataframe.mean(axis=0).rename('average_score')
print(average_score)
print()
print("Count of non-NAN values:")
print(dice_dataframe.count())
print()
print("Count of non-zero values:")
print(dice_dataframe.fillna(0).astype(bool).sum(axis=0))
def save_detection(detection_scores, header, output_path, prediction_type):
n_skips = detection_n_skips(len(header))
if n_skips == -1:
print("ERROR! UNKNOWN NUMBER OF SKIPS, DETECTION SCORE NOT EVALUATED.")
return detection_scores
# Change to names we understand
true_positive = detection_scores[0]
false_negative = detection_scores[1]
false_positive = detection_scores[2]
# Calculate stats
sensitivity = true_positive / (true_positive + false_negative)
ppv = true_positive / (true_positive + false_positive)
# Expand header
n_stats = 5
new_header = [''] * (len(header) - n_skips) * n_stats # we do not save stats for background
for i in range(len(header) - n_skips):
new_header[i + 0 * (len(header) - n_skips)] = 'TP_' + header[i + n_skips]
new_header[i + 1 * (len(header) - n_skips)] = 'FN_' + header[i + n_skips]
new_header[i + 2 * (len(header) - n_skips)] = 'FP_' + header[i + n_skips]
new_header[i + 3 * (len(header) - n_skips)] = 'TPR_' + header[i + n_skips]
new_header[i + 4 * (len(header) - n_skips)] = 'PPV_' + header[i + n_skips]
# Save values
dataframe = pd.DataFrame.from_records(
np.concatenate([true_positive, false_negative, false_positive, sensitivity, ppv]).reshape([len(new_header), 1]),
index=new_header)
dataframe_path = os.path.join(output_path, prediction_type + "_detection_scores.csv")
dataframe.to_csv(dataframe_path, float_format='%.4f', na_rep='nan', header=False)
def save_quantification(quant_scores, header, output_path, prediction_type):
# Change to names we understand
true_positive = quant_scores[0]
false_negative = quant_scores[1]
true_negative = quant_scores[2]
false_positive = quant_scores[3]
# Calculate stats
sensitivity = true_positive / (true_positive + false_negative)
specificity = true_negative / (true_negative + false_positive)
ppv = true_positive / (true_positive + false_positive)
dice = (2 * true_positive) / (2 * true_positive + false_positive + false_negative)
# Expand header
n_stats = 4
new_header = [''] * len(header) * n_stats
for i in range(len(header)):
new_header[i] = 'TPR_' + header[i]
new_header[i + 1 * len(header)] = 'TNR_' + header[i]
new_header[i + 2 * len(header)] = 'PPV_' + header[i]
new_header[i + 3 * len(header)] = 'DICE_' + header[i]
# Save values
dataframe = pd.DataFrame.from_records(
np.concatenate([sensitivity, specificity, ppv, dice]).reshape([len(new_header), 1]),
index=new_header)
dataframe_path = os.path.join(output_path, prediction_type + "_quantification_scores.csv")
dataframe.to_csv(dataframe_path, float_format='%.4f', na_rep='nan', header=False)
def save_reliability(reliability_stats, header, output_path, prediction_type):
n_skips = detection_n_skips(len(header))
if n_skips == -1:
print("ERROR! UNKNOWN NUMBER OF SKIPS, RELIABILITY SCORE NOT EVALUATED.")
return reliability_stats
# Change to names we understand
n_positive = np.array(reliability_stats[0])
n_negative = np.array(reliability_stats[1])
var_positive = np.array(reliability_stats[2])
var_negative = np.array(reliability_stats[3])
# Calculate stats
pos_ratio = n_positive / (n_positive + n_negative)
# Expand header
n_stats = 5
new_header = [''] * (len(header) - n_skips) * n_stats # we do not save stats for background
for i in range(len(header) - n_skips):
new_header[i + 0 * (len(header) - n_skips)] = 'n_positive' + '_' + header[i + n_skips]
new_header[i + 1 * (len(header) - n_skips)] = 'n_negative' + '_' + header[i + n_skips]
new_header[i + 2 * (len(header) - n_skips)] = 'var_positive' + '_' + header[i + n_skips]
new_header[i + 3 * (len(header) - n_skips)] = 'var_negative' + '_' + header[i + n_skips]
new_header[i + 4 * (len(header) - n_skips)] = 'pos_ratio' + '_' + header[i + n_skips]
# Save values
dataframe = pd.DataFrame.from_records(
np.concatenate([n_positive, n_negative, var_positive, var_negative, pos_ratio]).reshape([len(new_header), 1]),
index=new_header)
dataframe_path = os.path.join(output_path, prediction_type + "_reliability_scores.csv")
dataframe.to_csv(dataframe_path, float_format='%.4f', na_rep='nan', header=False)
def save_detected_seg(detected_dice_scores, header, output_path, prediction_type):
n_skips = detection_n_skips(len(header))
if n_skips == -1:
print("ERROR! UNKNOWN NUMBER OF SKIPS, DETECTION SCORE NOT EVALUATED.")
return detected_dice_scores
new_header = header.copy()
new_header = new_header[n_skips:]
for i in range(len(new_header)):
new_header[i] = 'DICE_' + new_header[i]
# Save dice coefficients
zip_tuple = (_ for _ in itertools.zip_longest(*detected_dice_scores))
dice_dataframe = pd.DataFrame.from_records(zip_tuple, columns=new_header)
dataframe_path = os.path.join(output_path, prediction_type + "_dq_scores.csv")
dice_dataframe.to_csv(dataframe_path, float_format='%.4f', na_rep='nan')
# Save dice summary
summary_path = os.path.join(output_path, prediction_type + '_dq_summary.txt')
pd.options.display.float_format = '{:,.4f}'.format
with open(summary_path, 'w') as f:
with redirect_stdout(f):
# Print out median and max values of all classes.
print("Ncols: {}".format(dice_dataframe.shape[0]))
print("Max scores:")
max_score = dice_dataframe.max(axis=0).rename('max_score')
max_index = dice_dataframe.idxmax(axis=0).rename('max_index')
print(pd.concat([max_score, max_index], axis=1))
print()
print("Min scores:")
min_score = dice_dataframe.min(axis=0).rename('min_score')
min_index = dice_dataframe.idxmin(axis=0).rename('min_index')
print( | pd.concat([min_score, min_index], axis=1) | pandas.concat |
import argparse
from glob import glob
import pandas as pd
from pathlib import Path
from tqdm import tqdm
# internal
from utils import xml_utils
from utils.constants import *
from models.deep_tree_model import get_model
class DeepTreePredictor:
"""
Given a model, predicts on images.
Model is currently expected to be from 'deepforest' lib, so current class is a wrapper.
Apart from wrapping the 'deepforest' predictor, DeepTreePredictor implements some utility methods
used for fast predict on folder with images.
Outputting the xml/image is useful for tagging afterwards via LabelImg.
Outputting the csv is useful for fast handling and read/write via pandas.
The output will be in the same given folder, right near each image,
since the output will be in the same name as the image.
e.g. folder_X/img1.png folder_X/img2.png
after calling: folder_X/img1.png folder_X/img1.csv folder_X/img1.xml
folder_X/img2.png folder_X/img2.csv folder_X/img2.xml
Outputting the csv or the xml is optional. Apart from these outputs, also a csv for the whole
images is generated under ALL_PREDICTIONS_CSV.
"""
def __init__(self, model) -> None:
self.model = model
def predict_image(self, image_path):
bboxes = self.model.predict_image(path=image_path)
return bboxes
def predict_on_folder(self, folder_path, write_csv=False, write_xml=False):
"""
Predicts on all images in folder
Args:
folder_path: where is the folder to run on.
write_csv: Bool, weather it should write the csv for each image near that image, or not
DataFrame with format <detection bbox, label, confidence>
write_xml: Writes the xml at the same path as the image it describes
Returns:
DataFrame with format <image_path, detection bbox, label, confidence>
"""
images_paths = sorted(glob(f"{str(folder_path)}/*.png"))
accumulator_bboxes_dfs = []
for img_path_str in tqdm(images_paths, desc="Predicting on images"):
img_path = Path(img_path_str)
bboxes_df = self.predict_image(img_path_str)
bboxes_df = bboxes_df if bboxes_df is not None else pd.DataFrame()
if write_xml:
xml_utils.annotations_to_xml(bboxes_df, img_path_str, write_file=True)
if write_csv:
bboxes_df.to_csv(img_path.parent / f'{img_path.stem}.csv', index=False)
bboxes_df[IMAGE_PATH] = img_path.name
accumulator_bboxes_dfs.append(bboxes_df)
folder_bboxes_df = | pd.concat(accumulator_bboxes_dfs) | pandas.concat |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.