prompt
stringlengths 19
1.03M
| completion
stringlengths 4
2.12k
| api
stringlengths 8
90
|
---|---|---|
import pytest
import pandas as pd
import pandas._testing as tm
@pytest.mark.parametrize(
"values, dtype",
[
([1, 2, 3], "int64"),
([1.0, 2.0, 3.0], "float64"),
(["a", "b", "c"], "object"),
(["a", "b", "c"], "string"),
([1, 2, 3], "datetime64[ns]"),
([1, 2, 3], "datetime64[ns, CET]"),
([1, 2, 3], "timedelta64[ns]"),
(["2000", "2001", "2002"], "Period[D]"),
([1, 0, 3], "Sparse"),
([pd.Interval(0, 1), pd.Interval(1, 2), pd.Interval(3, 4)], "interval"),
],
)
@pytest.mark.parametrize(
"mask", [[True, False, False], [True, True, True], [False, False, False]]
)
@pytest.mark.parametrize("box_mask", [True, False])
@pytest.mark.parametrize("frame", [True, False])
def test_series_mask_boolean(values, dtype, mask, box_mask, frame):
ser = pd.Series(values, dtype=dtype, index=["a", "b", "c"])
if frame:
ser = ser.to_frame()
mask = pd.array(mask, dtype="boolean")
if box_mask:
mask = | pd.Series(mask, index=ser.index) | pandas.Series |
import numpy as np
import pandas as pd; pd.options.mode.chained_assignment = None
import matplotlib.pyplot as plt
from tqdm import tqdm
from scipy.stats import pearsonr
from scipy.stats import spearmanr
from scipy.optimize import minimize
from scipy.optimize import least_squares
import os
def is_const(x):
if np.linalg.norm(x - np.mean(x)) < 1e-13 * np.abs(np.mean(x)):
return True
elif np.all( x==x[0]):
return True
else:
return False
def calc_eval_metrics(y, y_hat, y_hat_map=None, d=None, ci=None):
'''
Calculate RMSE, Pearson's correlation.
'''
r = {
'r_p': np.nan,
'r_p_map': np.nan,
'rmse': np.nan,
'rmse_map': np.nan,
'rmse_star_map': np.nan,
}
if is_const(y_hat):
r['r_p'] = np.nan
else:
r['r_p'] = pearsonr(y, y_hat)[0]
r['rmse'] = calc_rmse(y, y_hat)
if y_hat_map is not None:
r['rmse_map'] = calc_rmse(y, y_hat_map, d=d)
r['r_p_map'] = pearsonr(y, y_hat_map)[0]
if ci is not None:
r['rmse_star_map'] = calc_rmse_star(y, y_hat_map, ci, d)[0]
return r
def calc_rmse(y_true, y_pred, d=0):
if d== 0:
rmse = np.sqrt(np.mean(np.square(y_true - y_pred)))
else:
N = y_true.shape[0]
if (N - d) < 1:
rmse = np.nan
else:
rmse = np.sqrt(1 / (N - d) * np.sum(np.square(y_true - y_pred))) # Eq (7-29) P.1401
return rmse
def calc_rmse_star(mos_sub, mos_obj, ci, d):
N = mos_sub.shape[0]
error = mos_sub - mos_obj
if ci[0] == -1:
p_error = np.nan
rmse_star = np.nan
else:
p_error = (abs(error) - ci).clip(min=0) # Eq (7-27) P.1401
if (N - d) < 1:
rmse_star = np.nan
else:
rmse_star = np.sqrt(1 / (N - d) * sum(p_error) ** 2) # Eq (7-29) P.1401
return rmse_star, p_error, error
def calc_mapped(x, b):
N = x.shape[0]
order = b.shape[0] - 1
A = np.zeros([N, order + 1])
for i in range(order + 1):
A[:, i] = x ** (i)
return A @ b
def fit_first_order(y_con, y_con_hat):
A = np.vstack([np.ones(len(y_con_hat)), y_con_hat]).T
b = np.linalg.lstsq(A, y_con, rcond=None)[0]
return b
def fit_second_order(y_con, y_con_hat):
A = np.vstack([np.ones(len(y_con_hat)), y_con_hat, y_con_hat ** 2]).T
b = np.linalg.lstsq(A, y_con, rcond=None)[0]
return b
def fit_third_order(y_con, y_con_hat):
A = np.vstack([np.ones(len(y_con_hat)), y_con_hat, y_con_hat ** 2, y_con_hat ** 3]).T
b = np.linalg.lstsq(A, y_con, rcond=None)[0]
p = np.poly1d(np.flipud(b))
p2 = np.polyder(p)
rr = np.roots(p2)
r = rr[np.imag(rr) == 0]
monotonic = all(np.logical_or(r > max(y_con_hat), r < min(y_con_hat)))
if monotonic == False:
print('Not monotonic!!!')
return b
def fit_monotonic_third_order(
dfile_db,
dcon_db=None,
pred=None,
target_mos=None,
target_ci=None,
mapping=None):
y = dfile_db[target_mos].to_numpy()
y_hat = dfile_db[pred].to_numpy()
if dcon_db is None:
if target_ci in dfile_db:
ci = dfile_db[target_ci].to_numpy()
else:
ci = 0
else:
y_con = dcon_db[target_mos].to_numpy()
if target_ci in dcon_db:
ci = dcon_db[target_ci].to_numpy()
else:
ci = 0
x = y_hat
y_hat_min = min(y_hat) - 0.01
y_hat_max = max(y_hat) + 0.01
def polynomial(p, x):
return p[0] + p[1] * x + p[2] * x ** 2 + p[3] * x ** 3
def constraint_2nd_der(p):
return 2 * p[2] + 6 * p[3] * x
def constraint_1st_der(p):
x = np.arange(y_hat_min, y_hat_max, 0.1)
return p[1] + 2 * p[2] * x + 3 * p[3] * x ** 2
def objective_con(p):
x_map = polynomial(p, x)
dfile_db['x_map'] = x_map
x_map_con = dfile_db.groupby('con').mean().x_map.to_numpy()
err = x_map_con - y_con
if mapping == 'pError':
p_err = (abs(err) - ci).clip(min=0)
return (p_err ** 2).sum()
elif mapping == 'error':
return (err ** 2).sum()
else:
raise NotImplementedError
def objective_file(p):
x_map = polynomial(p, x)
err = x_map - y
if mapping == 'pError':
p_err = (abs(err) - ci).clip(min=0)
return (p_err ** 2).sum()
elif mapping == 'error':
return (err ** 2).sum()
else:
raise NotImplementedError
cons = dict(type='ineq', fun=constraint_1st_der)
if dcon_db is None:
res = minimize(
objective_file,
x0=np.array([0., 1., 0., 0.]),
method='SLSQP',
constraints=cons,
)
else:
res = minimize(
objective_con,
x0=np.array([0., 1., 0., 0.]),
method='SLSQP',
constraints=cons,
)
b = res.x
return b
def calc_mapping(
dfile_db,
mapping=None,
dcon_db=None,
target_mos=None,
target_ci=None,
pred=None,
):
if dcon_db is not None:
y = dcon_db[target_mos].to_numpy()
y_hat = dfile_db.groupby('con').mean().get(pred).to_numpy()
else:
y = dfile_db[target_mos].to_numpy()
y_hat = dfile_db[pred].to_numpy()
if mapping == None:
b = np.array([0, 1, 0, 0])
d_map = 0
elif mapping == 'first_order':
b = fit_first_order(y, y_hat)
d_map = 1
elif mapping == 'second_order':
b = fit_second_order(y, y_hat)
d_map = 3
elif mapping == 'third_order':
b = fit_third_order(y, y_hat)
d_map = 4
elif mapping == 'monotonic_third_order':
b = fit_monotonic_third_order(
dfile_db,
dcon_db=dcon_db,
pred=pred,
target_mos=target_mos,
target_ci=target_ci,
mapping='error',
)
d_map = 4
else:
raise NotImplementedError
return b, d_map
def eval_results(
df,
target_mos='mos',
target_ci='mos_ci',
pred='mos_pred',
mapping=None,
do_print=False
):
'''
Evaluates a trained model on given dataset.
'''
# Loop through databases
db_results_df = []
df['y_hat_map'] = np.nan
# s = df.db.astype("category").cat.categories
for db_name in df.db.astype("category").cat.categories:
df_db = df.loc[df.db == db_name]
# per file -----------------------------------------------------------
y = df_db[target_mos].to_numpy()
if np.isnan(y).any():
r = {'r_p': np.nan, 'r_s': np.nan, 'rmse': np.nan, 'r_p_map': np.nan,
'r_s_map': np.nan, 'rmse_map': np.nan}
else:
y_hat = df_db[pred].to_numpy()
b, d = calc_mapping(
df_db,
mapping=mapping,
target_mos=target_mos,
target_ci=target_ci,
pred=pred
)
y_hat_map = calc_mapped(y_hat, b)
r = calc_eval_metrics(y, y_hat, y_hat_map=y_hat_map, d=d)
r.pop('rmse_star_map')
r = {f'{k}_file': v for k, v in r.items()}
if do_print and (not np.isnan(y).any()):
print('%-30s r_p_file: %0.2f, rmse_file: %0.2f, rmse_map_file: %0.2f'
% (db_name + ':', r['r_p_file'],r['rmse_file'], r['rmse_map_file']))
db_results_df.append({'db': db_name, **r})
# Save individual database results in DataFrame
db_results_df = pd.DataFrame(db_results_df)
r_average = {}
r_average['r_p_mean_file'] = db_results_df.r_p_file.mean()
r_average['rmse_mean_file'] = db_results_df.rmse_file.mean()
r_average['rmse_map_mean_file'] = db_results_df.rmse_map_file.mean()
y = df[target_mos].to_numpy()
y_hat = df[pred].to_numpy()
r_total_file = calc_eval_metrics(y, y_hat)
r_total_file = {'r_p_all': r_total_file['r_p'], 'rmse_all': r_total_file['rmse']}
overall_results = {
**r_total_file,
**r_average
}
return db_results_df, overall_results
def evaluate_mos(csv_mos,csv_mos_pre):
df = pd.read_csv(csv_mos)
df2 = pd.read_csv(csv_mos_pre)
dbs = []
degs =[]
moss =[]
pre_moss=[]
pre_score_dict ={}
for i in range(len(df2)):
pre_score_dict[df2.iloc[i]['deg_wav']]=df2.iloc[i]['predict_mos']
for i in range(len(df)):
dbs.append(df.iloc[i]['db'])
degs.append(df.iloc[i]['deg_wav'])
moss.append(df.iloc[i]['mos'])
pre_moss.append(pre_score_dict[df.iloc[i]['deg_wav']])
df_merge =pd.DataFrame({'db':dbs,'deg_wav':degs,'mos':moss,'mos_pred':pre_moss})
db_results_df, overall_results = eval_results(
df_merge,
target_mos='mos',
target_ci= 'mos_ci',
pred='mos_pred',
mapping = 'monotonic_third_order',
do_print=True)
print('r_p_mean_file {:0.4f} rmse_mean_file {:0.4f} rmse_map_mean_file {:0.4f} '
.format(overall_results['r_p_mean_file'], overall_results['rmse_mean_file'], overall_results['rmse_map_mean_file'])
)
return db_results_df, overall_results
def evaluate_mos_dir(dir,csv):
teams =[]
teams_plcc =[]
teams_rmse =[]
teams_rmse_map =[]
MSFT_PSTN_Test_plcc =[]
MSFT_PSTN_Test_rmse = []
MSFT_PSTN_Test_rmse_map = []
TUB_IS22_DB1_plcc =[]
TUB_IS22_DB1_rmse = []
TUB_IS22_DB1_rmse_map = []
TencentCorupsVal_plcc =[]
TencentCorupsVal_rmse = []
TencentCorupsVal_rmse_map = []
for path,subpath,files in os.walk(dir):
for file in files:
if file.endswith('.csv'):
fullpath = os.path.join(path,file)
assert os.path.exists(fullpath)
print('results of team ' +fullpath+':')
# sym = check_audio_order(csv,fullpath)
# if not sym:
# print(fullpath)
db_results_df, overall_results= evaluate_mos(csv,fullpath)
teams.append(fullpath.split('\\')[-2])
teams_plcc.append(overall_results['r_p_mean_file'])
teams_rmse.append(overall_results['rmse_mean_file'])
teams_rmse_map.append(overall_results['rmse_map_mean_file'])
s = db_results_df['r_p_file']
MSFT_PSTN_Test_plcc.append(db_results_df['r_p_file'][0])
MSFT_PSTN_Test_rmse.append(db_results_df['rmse_file'][0])
MSFT_PSTN_Test_rmse_map.append(db_results_df['rmse_map_file'][0])
TUB_IS22_DB1_plcc.append(db_results_df['r_p_file'][1])
TUB_IS22_DB1_rmse.append(db_results_df['rmse_file'][1])
TUB_IS22_DB1_rmse_map.append(db_results_df['rmse_map_file'][1])
TencentCorupsVal_plcc.append(db_results_df['r_p_file'][2])
TencentCorupsVal_rmse.append(db_results_df['rmse_file'][2])
TencentCorupsVal_rmse_map.append(db_results_df['rmse_map_file'][2])
df = pd.DataFrame({'team_name':teams,'plcc_all_mean':teams_plcc,'rmse_all_mean':teams_rmse,'rmse_map_all_mean':teams_rmse_map,'MSFT_PSTN_Test_plcc':MSFT_PSTN_Test_plcc,'MSFT_PSTN_Test_rmse':MSFT_PSTN_Test_rmse,'MSFT_PSTN_Test_rmse_map':MSFT_PSTN_Test_rmse_map
, 'TUB_IS22_DB1_plcc': TUB_IS22_DB1_plcc, 'TUB_IS22_DB1_rmse': TUB_IS22_DB1_rmse,
'TUB_IS22_DB1t_rmse_map': TUB_IS22_DB1_rmse_map ,'TencentCorupsVal_plcc':TencentCorupsVal_plcc,'TencentCorupsVal_rmse':TencentCorupsVal_rmse,'TencentCorupsVal_rmse_map':TencentCorupsVal_rmse_map})
df.to_csv(os.path.join(dir,'result.csv'),index=False,encoding="utf-8-sig")
def check_audio_order(csv1,csv2):
df1 = pd.read_csv(csv1)
df2 = | pd.read_csv(csv2) | pandas.read_csv |
# Copyright (c) 2019, MD2K Center of Excellence
# - <NAME> <<EMAIL>>, <NAME> <<EMAIL>>
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
# * Redistributions of source code must retain the above copyright notice, this
# list of conditions and the following disclaimer.
#
# * Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions and the following disclaimer in the documentation
# and/or other materials provided with the distribution.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
# DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
# FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
# DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
# SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
# CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
# OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
import numpy as np
import pandas as pd
from geopy.distance import great_circle
from pyspark.sql.functions import pandas_udf, PandasUDFType
from pyspark.sql.group import GroupedData
from pyspark.sql.types import StructField, StructType, DoubleType, IntegerType
from scipy.spatial import ConvexHull
from shapely.geometry.multipoint import MultiPoint
from sklearn.cluster import DBSCAN
from cerebralcortex.algorithms.utils.mprov_helper import CC_MProvAgg
from cerebralcortex.algorithms.utils.util import update_metadata
from cerebralcortex.core.datatypes import DataStream
from cerebralcortex.core.metadata_manager.stream.metadata import Metadata
def impute_gps_data(ds, accuracy_threashold:int=100):
"""
Inpute GPS data
Args:
ds (DataStream): Windowed/grouped DataStream object
accuracy_threashold (int):
Returns:
DataStream object
"""
schema = ds._data.schema
@pandas_udf(schema, PandasUDFType.GROUPED_MAP)
def gps_imputer(data):
data = data.sort_values('localtime').reset_index(drop=True)
data['latitude'][data.accuracy > accuracy_threashold] = np.nan
data['longitude'][data.accuracy > accuracy_threashold] = np.nan
data = data.fillna(method='ffill').dropna()
return data
# check if datastream object contains grouped type of DataFrame
if not isinstance(ds._data, GroupedData):
raise Exception(
"DataStream object is not grouped data type. Please use 'window' operation on datastream object before running this algorithm")
data = ds._data.apply(gps_imputer)
results = DataStream(data=data, metadata=Metadata())
metadta = update_metadata(stream_metadata=results.metadata,
stream_name="gps--org.md2k.imputed",
stream_desc="impute GPS data",
module_name="cerebralcortex.algorithms.gps.clustering.impute_gps_data",
module_version="1.0.0",
authors=[{"Azim": "<EMAIL>"}])
results.metadata = metadta
return results
def cluster_gps(ds: DataStream, epsilon_constant:int = 1000,
km_per_radian:int = 6371.0088,
geo_fence_distance:int = 30,
minimum_points_in_cluster:int = 1,
latitude_column_name:str = 'latitude',
longitude_column_name:str = 'longitude'):
"""
Cluster GPS data - Algorithm used to cluster GPS data is based on DBScan
Args:
ds (DataStream): Windowed/grouped DataStream object
epsilon_constant (int):
km_per_radian (int):
geo_fence_distance (int):
minimum_points_in_cluster (int):
latitude_column_name (str):
longitude_column_name (str):
Returns:
DataStream object
"""
centroid_id_name = 'centroid_id'
features_list = [StructField('centroid_longitude', DoubleType()),
StructField('centroid_latitude', DoubleType()),
StructField('centroid_id', IntegerType()),
StructField('centroid_area', DoubleType())]
schema = StructType(ds._data._df.schema.fields + features_list)
column_names = [a.name for a in schema.fields]
def reproject(latitude, longitude):
from math import pi, cos, radians
earth_radius = 6371009 # in meters
lat_dist = pi * earth_radius / 180.0
y = [lat * lat_dist for lat in latitude]
x = [long * lat_dist * cos(radians(lat))
for lat, long in zip(latitude, longitude)]
return np.column_stack((x, y))
def get_centermost_point(cluster: np.ndarray) -> object:
"""
Get center most point of a cluster
Args:
cluster (np.ndarray):
Returns:
"""
try:
if cluster.shape[0]>=3:
points_project = reproject(cluster[:,0],cluster[:,1])
hull = ConvexHull(points_project)
area = hull.area
else:
area = 1
except:
area = 1
centroid = (
MultiPoint(cluster).centroid.x, MultiPoint(cluster).centroid.y)
centermost_point = min(cluster, key=lambda point: great_circle(point,
centroid).m)
return list(centermost_point) + [area]
@pandas_udf(schema, PandasUDFType.GROUPED_MAP)
@CC_MProvAgg('gps--org.md2k.phonesensor--phone', 'gps_clustering', 'gps--org.md2k.clusters', ['user', 'timestamp'], ['user', 'timestamp'])
def gps_clustering(data):
if data.shape[0] < minimum_points_in_cluster:
return pd.DataFrame([], columns=column_names)
elif data.shape[0] < 2:
data['centroid_area'] = 1
data['centroid_id'] = 0
data['centroid_latitude'] = data[latitude_column_name].values[0]
data['centroid_longitude'] = data[longitude_column_name].values[0]
return data
coords = np.float64(data[[latitude_column_name, longitude_column_name]].values)
epsilon = geo_fence_distance / (
epsilon_constant * km_per_radian)
db = DBSCAN(eps=epsilon, min_samples= minimum_points_in_cluster,
algorithm='ball_tree', metric='haversine').fit(
np.radians(coords))
data[centroid_id_name] = db.labels_
cluster_labels = db.labels_
clusters = pd.Series(
[coords[cluster_labels == n] for n in np.unique(cluster_labels)])
cluster_names = np.array([n for n in np.unique(cluster_labels)])
centermost_points = clusters.map(get_centermost_point)
centermost_points = np.array(centermost_points)
all_dict = []
for i, col in enumerate(cluster_names):
cols = np.array(centermost_points[i])
all_dict.append([col, cols[0], cols[1], cols[2]])
temp_df = | pd.DataFrame(all_dict, columns=[centroid_id_name, 'centroid_latitude', 'centroid_longitude', 'centroid_area']) | pandas.DataFrame |
# -*- coding: utf-8 -*-
"""
Functions for cleaning mdredze Sandy Twitter dataset.
"""
import matplotlib.pyplot as plt
import numpy as np
import pandas as pd
from statsmodels.graphics.tsaplots import plot_acf
from twitterinfrastructure.tools import cross_corr, output, query
def create_timeseries_diff(df, col1, col2, zone_col, write_path=None):
"""Creates a dataframe where col1 and col2 columns are replaced by
first differenced time series.
Parameters
----------
df : Dataframe
Dataframe to containing time series data to difference (e.g. from
create_timeseries). Assumes dataframe is multi-indexed by zone_col and
timedelta (in hours).
col1 : str
Name of column containing first time series.
col2 : str
Name of column containing second time series.
zone_col : str
Name of zone column: 'zone_id' (nyiso zone), 'location_id' (taxi
zone), or 'borough' (taxi borough).
write_path : str or None
If str, then write a csv of the time series dataframe to the
specified path. Else, do not write.
Returns
-------
df_diff : dataframe
Notes
-----
"""
# create differenced time series dataframe
df_diff = pd.DataFrame(columns=[zone_col, 'timedelta',
col1, col2])
df_diff.set_index([zone_col, 'timedelta'], inplace=True)
zones = pd.unique(df.index.get_level_values(level=zone_col))
for zone in zones:
s_y1 = df[col1].xs(zone, level=0).dropna()
s_y2 = df[col2].xs(zone, level=0).dropna()
s_y1.index = pd.to_timedelta(s_y1.index.values, unit='h')
s_y2.index = pd.to_timedelta(s_y2.index.values, unit='h')
# difference both timeseries
s_y1_diff = pd.Series(data=np.diff(s_y1), index=s_y1.index.values[0:-1],
name=col1)
s_y2_diff = pd.Series(data=np.diff(s_y2), index=s_y2.index.values[0:-1],
name=col2)
df_zone = pd.concat([s_y1_diff, s_y2_diff], axis=1)
df_zone.index.name = 'timedelta'
df_zone = df_zone.reset_index()
df_zone[zone_col] = zone
df_zone = df_zone.set_index([zone_col, 'timedelta'])
# add zone to differenced dataframe
df_diff = df_diff.append(df_zone, ignore_index=False, sort='False')
# save to csv
if write_path:
df_csv = df_diff.reset_index()
df_csv['timedelta'] = df_csv['timedelta'].astype('timedelta64[h]')
df_csv.to_csv(write_path, index=False)
return df_diff
def create_timeseries_shift(df, df_max_rho, col1, col2, zone_col,
write_path=None):
"""Creates a dataframe where the 2nd time series column is time-shifted.
Parameters
----------
df : Dataframe
Dataframe to containing time series data to shift (e.g. from
create_timeseries). Assumes dataframe is multi-indexed by zone_col and
timedelta (in hours).
df_max_rho : Dataframe
Dataframe containing desired shifts for col2 in a 'max-lag' column,
indexed by zone_col.
col1 : str
Name of column containing first time series (copied).
col2 : str
Name of column containing second time series. This is the shifted
time series, where col2_shifted = col2 + shift.
zone_col : str
Name of zone column: 'zone_id' (nyiso zone), 'location_id' (taxi
zone), or 'borough' (taxi borough).
write_path : str or None
If str, then write a csv of the time series dataframe to the
specified path. Else, do not write.
Returns
-------
df_shift : dataframe
Notes
-----
"""
# create shifted time series dataframe
df_shift = pd.DataFrame(columns=[zone_col, 'timedelta', col1, col2])
df_shift.set_index([zone_col, 'timedelta'], inplace=True)
for zone in df_max_rho.index.values:
if not np.isnan(df_max_rho.loc[zone, 'max-rho']):
s_y1 = df[col1].xs(zone, level=0).dropna()
s_y2 = df[col2].xs(zone, level=0).dropna()
s_y1.index = pd.to_timedelta(s_y1.index.values, unit='h')
s_y2.index = pd.to_timedelta(s_y2.index.values, unit='h')
# shift 2nd time series
shift = df_max_rho.loc[zone, 'max-shift']
s_y2_shift = s_y2.shift(1, freq=pd.Timedelta(shift, unit='h'))
df_zone = pd.concat([s_y1, s_y2_shift], axis=1)
df_zone.index.name = 'timedelta'
df_zone = df_zone.reset_index()
df_zone[zone_col] = zone
df_zone = df_zone.set_index([zone_col, 'timedelta'])
# add zone to shifted dataframe
df_shift = df_shift.append(df_zone, ignore_index=False,
sort='False')
# save to csv
if write_path:
df_csv = df_shift.reset_index()
df_csv['timedelta'] = df_csv['timedelta'].astype('timedelta64[h]')
df_csv.to_csv(write_path, index=False)
return df_shift
def create_timeseries(df, zone_col, min_count, write_path=None, verbose=0):
"""Creates a time series dataframe where each column of df is
independently linearly interpolated over the total range of timedeltas of
each zone. Only time series with at least min_count data points are
included. Assumes the dataframe is indexed by a zone column (zone_col)
and a timedelta column (e.g. using index_timedelta).
Parameters
----------
df : Dataframe
Dataframe to calculate time series from.
zone_col : str
Name of zone column: 'zone_id' (nyiso zone), 'location_id' (taxi
zone), or 'borough' (taxi borough).
min_count : int
Minimum number of data points needed to convert to a time series.
write_path : str or None
If str, then write a csv of the time series dataframe to the
specified path. Else, do not write.
verbose : int
Defines verbosity for output statements.
Returns
-------
df_ts : dataframe
Notes
-----
"""
# loop through zones
df_ts = pd.DataFrame()
skipped = []
zones = pd.unique(df.index.get_level_values(zone_col))
for zone in zones:
df_zone = df.xs(zone, level=0)
# loop through columns (i.e. data to convert to time series)
y_interps = []
cols = df_zone.columns.values
for col in cols:
s = df_zone[col].dropna()
if s.count() < min_count:
skipped.append((zone, col))
else:
timedeltas = range(s.index.astype('timedelta64[h]').min(),
s.index.astype('timedelta64[h]').max() + 1)
y_interp = pd.Series(data=np.interp(
timedeltas, s.index.astype('timedelta64[h]'), s.values),
index=timedeltas, name=col)
y_interps.append(y_interp)
# add interpolated data to dataframe
if y_interps:
df_temp = pd.concat(objs=y_interps, axis=1, join='outer')
df_temp = df_temp.set_index(
pd.to_timedelta(df_temp.index.values, unit='h'))
df_temp[zone_col] = zone
df_temp.set_index(zone_col, append=True, inplace=True)
df_temp.index.names = ['timedelta', zone_col]
df_temp = df_temp.reorder_levels([1, 0])
df_ts = df_ts.append(df_temp, sort=False)
# save to csv
if write_path:
df_csv = df_ts.reset_index()
df_csv['timedelta'] = df_csv['timedelta'].astype('timedelta64[h]')
df_csv.to_csv(write_path, index=False)
if verbose >= 1:
output('skipped zones for having less than {min_count} data points '
'in original column data: {skipped}'.format(skipped=skipped,
min_count=min_count))
return df_ts
def index_timedelta(df, datetime_ref, datetime_col):
"""Indexes a dataframe on a timedelta calculated from datetime_col
relative to datetime_ref.
Parameters
----------
df : Dataframe
Dataframe to reindex on timedelta.
datetime_ref : Timestamp
Reference datetime to calculate timedelta relative to, specified as a
timezone-aware Pandas Timestamp object. Calculates timedelta as
datetime_col - datetime_ref.
e.g. enddate = pd.Timestamp('2012-11-03 00:00:00',
tz='America/New_York')
datetime_col : str
Name of column (or index) containing the datetime data to calculate
timedelta from.
Returns
-------
df : dataframe
Notes
-----
"""
indexes = df.index.names
df = df.reset_index()
# calculate and add timedelta
df['timedelta'] = df['datetimeNY'] - datetime_ref
# df['timedelta'] = [int(td.total_seconds() / 3600) for td
# in df['timedelta']]
# df['timedelta'] = pd.to_timedelta(df['timedelta'], unit='h')
# drop columns and reindex with datetime_col replaced by timedelta
df = df.drop([datetime_col], axis=1)
indexes = ['timedelta' if ind == datetime_col else ind for ind in indexes]
df = df.set_index(indexes)
df = df.sort_index(level=0)
return df
def load_nyctlc_zone(startdate, enddate, trip_type, trip_count_filter,
db_path, verbose=0):
"""Query and clean nyctlc dropoff or pickup data for the specified date
range from a sqlite database, grouped by zone. Assumes the database
contains a standard_zonedropoff_hour_sandy or
standard_zonepickup_hour_sandy table created using
create_standard_zone_hour.
Parameters
----------
startdate : Timestamp
Start date to include tweets from (inclusive), specified as a
timezone-aware Pandas Timestamp object.
E.g. startdate = pd.Timestamp('2012-10-28 00:00:00',
tz='America/New_York')
enddate : Timestamp
End date to include tweets from (exclusive), specified as a
timezone-aware Pandas Timestamp object.
e.g. enddate = pd.Timestamp('2012-11-03 00:00:00',
tz='America/New_York')
trip_type : str
Trip type: 'dropoff' or 'pickup'.
trip_count_filter : int
Minimum number of trips required to load a data point.
db_path : str
Path to sqlite database containing table.
verbose : int
Defines verbosity for output statements.
Returns
-------
df_taxi : dataframe
Notes
-----
Sqlite date queries are inclusive for start and end, datetimes in nyctlc
database are local (i.e. NY timezone).
"""
df_taxi = load_nyctlc_zone_hour(startdate, enddate, trip_type,
trip_count_filter, db_path, verbose=verbose)
# remove index, remove columns, and group by zone
df_taxi = df_taxi.reset_index()
df_taxi = df_taxi.drop(['datetimeNY'], axis=1)
df_taxi = df_taxi.groupby(['location_id']).mean()
if verbose >= 1:
if trip_type == 'dropoff':
output('[min, max] taxi pace and trips mean z-score: [' +
str(np.nanmin(df_taxi['zpace-drop'])) + ', ' +
str(np.nanmax(df_taxi['zpace-drop'])) + '], [' +
str(np.nanmin(df_taxi['ztrips-drop'])) + ', ' +
str(np.nanmax(df_taxi['ztrips-drop'])) + '].')
elif trip_type == 'pickup':
output('[min, max] taxi pace and trips mean z-score: [' +
str(np.nanmin(df_taxi['zpace-pick'])) + ', ' +
str(np.nanmax(df_taxi['zpace-pick'])) + '], [' +
str(np.nanmin(df_taxi['ztrips-pick'])) + ', ' +
str(np.nanmax(df_taxi['ztrips-pick'])) + '].')
return df_taxi
def load_nyctlc_zone_date(startdate, enddate, trip_type, trip_count_filter,
db_path, verbose=0):
"""Query and clean nyctlc dropoff or pickup data for the specified date
range from a sqlite database, grouped by zone and date. Assumes the database
contains a standard_zonedropoff_hour_sandy or
standard_zonepickup_hour_sandy table created using
create_standard_zone_hour.
Parameters
----------
startdate : Timestamp
Start date to include tweets from (inclusive), specified as a
timezone-aware Pandas Timestamp object.
E.g. startdate = pd.Timestamp('2012-10-28 00:00:00',
tz='America/New_York')
enddate : Timestamp
End date to include tweets from (exclusive), specified as a
timezone-aware Pandas Timestamp object.
e.g. enddate = pd.Timestamp('2012-11-03 00:00:00',
tz='America/New_York')
trip_type : str
Trip type: 'dropoff' or 'pickup'.
trip_count_filter : int
Minimum number of trips required to load a data point.
db_path : str
Path to sqlite database containing table.
verbose : int
Defines verbosity for output statements.
Returns
-------
df_taxi : dataframe
Notes
-----
Sqlite date queries are inclusive for start and end, datetimes in nyctlc
database are local (i.e. NY timezone).
"""
df_taxi = load_nyctlc_zone_hour(startdate, enddate, trip_type,
trip_count_filter, db_path, verbose=verbose)
# remove index, adjust datetime to date, and group by zone and date
df_taxi = df_taxi.reset_index()
df_taxi['datetimeNY'] = pd.to_datetime(df_taxi['datetimeNY']).dt.date
df_taxi = df_taxi.groupby(['location_id', 'datetimeNY']).mean()
if verbose >= 1:
if trip_type == 'dropoff':
output('[min, max] taxi pace and trips mean z-score: [' +
str(np.nanmin(df_taxi['zpace-drop'])) + ', ' +
str(np.nanmax(df_taxi['zpace-drop'])) + '], [' +
str(np.nanmin(df_taxi['ztrips-drop'])) + ', ' +
str(np.nanmax(df_taxi['ztrips-drop'])) + '].')
elif trip_type == 'pickup':
output('[min, max] taxi pace and trips mean z-score: [' +
str(np.nanmin(df_taxi['zpace-pick'])) + ', ' +
str(np.nanmax(df_taxi['zpace-pick'])) + '], [' +
str(np.nanmin(df_taxi['ztrips-pick'])) + ', ' +
str(np.nanmax(df_taxi['ztrips-pick'])) + '].')
return df_taxi
def load_nyctlc_zone_hour(startdate, enddate, trip_type, trip_count_filter,
db_path, verbose=0):
"""Query and clean nyctlc dropoff or pickup data for the specified date
range from a sqlite database, grouped by zone and hour. Assumes the
database contains a standard_zonedropoff_hour_sandy or
standard_zonepickup_hour_sandy table created using
create_standard_zone_hour.
Parameters
----------
startdate : Timestamp
Start date to include tweets from (inclusive), specified as a
timezone-aware Pandas Timestamp object.
E.g. startdate = pd.Timestamp('2012-10-28 00:00:00',
tz='America/New_York')
enddate : Timestamp
End date to include tweets from (exclusive), specified as a
timezone-aware Pandas Timestamp object.
e.g. enddate = pd.Timestamp('2012-11-03 00:00:00',
tz='America/New_York')
trip_type : str
Trip type: 'dropoff' or 'pickup'.
trip_count_filter : int
Minimum number of trips required to load a data point.
db_path : str
Path to sqlite database containing table.
verbose : int
Defines verbosity for output statements.
Returns
-------
df_taxi : dataframe
Notes
-----
Sqlite date queries are inclusive for start and end, datetimes in nyctlc
database are local (i.e. NY timezone).
"""
if verbose >= 1:
output('Started query.')
# define trip type
if trip_type not in ['dropoff', 'pickup']:
raise ValueError('Invalid trip_type argument: {arg}.'.format(
arg=trip_type))
# convert datetimes
enddate_exclusive = enddate - pd.Timedelta('1 second')
startdate_sql = startdate.strftime("%Y-%m-%d %H:%M:%S")
enddate_sql = enddate_exclusive.strftime("%Y-%m-%d %H:%M:%S")
# load dropoff/pickup data
sql = """
SELECT {trip_type}_datetime AS datetimeNY,
{trip_type}_location_id AS location_id,
z_mean_pace AS zpace, z_trip_count AS ztrips
FROM standard_zone{trip_type}_hour_sandy
WHERE
trip_count > {trip_count_filter} AND
{trip_type}_datetime BETWEEN
"{startdate_sql}" AND "{enddate_sql}"
;""".format(trip_count_filter=trip_count_filter,
startdate_sql=startdate_sql, enddate_sql=enddate_sql,
trip_type=trip_type)
df_taxi = query(db_path, sql)
# add columns
df_taxi['abs-zpace'] = abs(df_taxi['zpace'])
df_taxi['abs-ztrips'] = abs(df_taxi['ztrips'])
# convert datetimes
df_taxi['datetimeNY'] = pd.to_datetime(df_taxi['datetimeNY'])
df_taxi['datetimeNY'] = [dt.tz_localize(tz='America/New_York') for dt in
df_taxi['datetimeNY']]
# index and sort
df_taxi = df_taxi.set_index(['location_id', 'datetimeNY'])
df_taxi = df_taxi.sort_index(level=0)
if verbose >= 1:
output('[min, max] taxi datetimeNY (hourly): [' +
str(min(df_taxi.index.get_level_values(level=1))) + ', ' +
str(max(df_taxi.index.get_level_values(level=1))) + '].')
output('[min, max] taxi pace and trips mean z-score (hourly): [' +
str(np.nanmin(df_taxi['zpace'])) + ', ' +
str(np.nanmax(df_taxi['zpace'])) + '], [' +
str(np.nanmin(df_taxi['ztrips'])) + ', ' +
str(np.nanmax(df_taxi['ztrips'])) + '].')
# add drop or pick to column names
if trip_type == 'dropoff':
val = '-drop'
elif trip_type == 'pickup':
val = '-pick'
else:
pass
col_dict = {}
for col in df_taxi.columns.values:
col_dict[col] = col + val
df_taxi = df_taxi.rename(col_dict, axis='columns')
return df_taxi
def load_nyiso(startdate, enddate, db_path, verbose=0):
"""Query and clean nyiso load forecast error data for the specified date
range from a sqlite database. Assumes the database contains a
forecast_error table created using create_forecast_err.
Parameters
----------
startdate : Timestamp
Start date to include tweets from (inclusive), specified as a
timezone-aware Pandas Timestamp object.
E.g. startdate = pd.Timestamp('2012-10-28 00:00:00',
tz='America/New_York')
enddate : Timestamp
End date to include tweets from (exclusive), specified as a
timezone-aware Pandas Timestamp object.
e.g. enddate = pd.Timestamp('2012-11-03 00:00:00',
tz='America/New_York')
db_path : str
Path to sqlite database containing table.
verbose : int
Defines verbosity for output statements.
Returns
-------
df : dataframe
Notes
-----
Sqlite date queries are inclusive for start and end, forecast_error
datetimes are UTC.
"""
if verbose >= 1:
output('Started query.')
# convert datetimes
startdateUTC = startdate.tz_convert('UTC')
enddateUTC = enddate.tz_convert('UTC') - pd.Timedelta('1 second')
startdate_sql = startdateUTC.strftime("%Y-%m-%d %H:%M:%S")
enddate_sql = enddateUTC.strftime("%Y-%m-%d %H:%M:%S")
# load nyiso load data
sql = """
SELECT datetimeUTC, zone_id AS nyiso_zone,
forecast_error_p0 AS err0
FROM forecast_error
WHERE
datetimeUTC BETWEEN "{startdate_sql}" AND "{enddate_sql}"
;""".format(startdate_sql=startdate_sql, enddate_sql=enddate_sql)
df = query(db_path, sql)
# convert datetimes
df['datetimeUTC'] = pd.to_datetime(df['datetimeUTC'])
df['datetimeUTC'] = [datetime.tz_localize(tz='UTC') for datetime in
df['datetimeUTC']]
df['datetimeNY'] = [datetime.tz_convert('America/New_York') for
datetime in df['datetimeUTC']]
# add and drop columns
df['percent-err0'] = df['err0'] * 100
df = df.drop(['datetimeUTC'], axis=1)
# index and sort
df = df.set_index(['nyiso_zone', 'datetimeNY'])
df = df.sort_index(level=0)
if verbose >= 1:
output('[min, max] forecast error datetimeNY: [' +
str(min(df.index.get_level_values(level=1))) + ', ' +
str(max(df.index.get_level_values(level=1))) + '].')
output('[min, max] forecast error: [' +
str(np.nanmin(df['err0'])) + ', ' +
str(np.nanmax(df['err0'])) + '].')
output('Finished query.')
return df
def max_cross_corr(df, col1, col2, zone_col, shifts, min_overlap, verbose=0):
"""Creates a dataframe containing the time shift that maximizes
cross-correlation between two time series, the max cross-correlation value,
and the number of overlapping data points in those series.
Parameters
----------
df : Dataframe
Dataframe to containing time series data (e.g. from
create_timeseries). Assumes dataframe is multi-indexed by zone_col and
timedelta (in hours).
col1 : str
Name of column containing first time series.
col2 : str
Name of column containing second time series. This is the shifted
time series, where col2_shifted = col2 + shift.
zone_col : str
Name of spatial zone index.
shifts : list
List of time shifts to apply to 2nd time series (in hours).
min_overlap : int
Minimum number of overlapping data points (after the 2nd series is time
shifted) needed to calculate cross-correlation.
verbose : int
Defines verbosity for output statements.
Returns
-------
df_max_rho : dataframe
Dataframe of max cross-correlations and associated shifts and counts.
df_rho : dataframe
Dataframe of cross-correlations and associated shifts and counts for
all shifts.
Notes
-----
"""
df_rho = pd.DataFrame(columns=['shift', zone_col, 'rho'])
df_count = pd.DataFrame(columns=['shift', zone_col, 'count'])
skipped = []
zones = pd.unique(df.index.get_level_values(zone_col))
for shift in shifts:
for zone in zones:
s_y1 = df[col1].xs(zone, level=0).dropna()
s_y2 = df[col2].xs(zone, level=0).dropna()
s_y1.index = | pd.to_timedelta(s_y1.index.values, unit='h') | pandas.to_timedelta |
import shutil
import random
import tempfile
import pandas as pd
from catalyst.exchange.exchange_bundle import ExchangeBundle
from catalyst.exchange.exchange_bcolz import BcolzExchangeBarWriter, \
BcolzExchangeBarReader
from catalyst.exchange.bundle_utils import get_df_from_arrays
from nose.tools import assert_equals
class TestBcolzWriter(object):
@classmethod
def setup_class(cls):
cls.columns = ['open', 'high', 'low', 'close', 'volume']
def setUp(self):
self.root_dir = tempfile.mkdtemp() # Create a temporary directory
def tearDown(self):
shutil.rmtree(self.root_dir) # Remove the directory after the test
def generate_df(self, exchange_name, freq, start, end):
bundle = ExchangeBundle(exchange_name)
index = bundle.get_calendar_periods_range(start, end, freq)
df = pd.DataFrame(index=index, columns=self.columns)
df.fillna(random.random(), inplace=True)
return df
def test_bcolz_write_daily_past(self):
start = pd.to_datetime('2016-01-01')
end = pd.to_datetime('2016-12-31')
freq = 'daily'
df = self.generate_df('bitfinex', freq, start, end)
writer = BcolzExchangeBarWriter(
rootdir=self.root_dir,
start_session=start,
end_session=end,
data_frequency=freq,
write_metadata=True)
data = []
data.append((1, df))
writer.write(data)
pass
def test_bcolz_write_daily_present(self):
start = pd.to_datetime('2017-01-01')
end = pd.to_datetime('today')
freq = 'daily'
df = self.generate_df('bitfinex', freq, start, end)
writer = BcolzExchangeBarWriter(
rootdir=self.root_dir,
start_session=start,
end_session=end,
data_frequency=freq,
write_metadata=True)
data = []
data.append((1, df))
writer.write(data)
pass
def test_bcolz_write_minute_past(self):
start = | pd.to_datetime('2015-04-01 00:00') | pandas.to_datetime |
import pandas
from dmscripts.models.writecsv import csv_path
from dmscripts.models.modeltrawler import ModelTrawler
def base_model(base_model, keys, get_data_kwargs, client, logger=None, limit=None):
"""Fetch all the data for a given Digital Marketplace model from the api.
:param base_model: A Digital Marketplace model (client must have a 'find_{model}_iter' method)
:param keys: The attributes we require of that model.
:param get_data_kwargs: Additional kwargs for the get request the client will make.
:param client: Instantiated Digital Marketplace APIClient
:param logger:
:param limit: Maximum number of requests for the client to perform.
:return: A pandas DataFrame of the requested data. Columns as model attributes, rows as instances.
"""
mt = ModelTrawler(base_model, client)
data = list(mt.get_data(keys=keys, limit=limit, **get_data_kwargs))
if logger:
logger.info(
'{} {} returned after {}s'.format(len(data), base_model, mt.get_time_running())
)
return pandas.DataFrame(data) if data else | pandas.DataFrame(columns=keys) | pandas.DataFrame |
import fcntl
import sys
import time
import numpy as np
import pandas as pd
from keras.layers import Dense, BatchNormalization, Dropout
from keras.models import Sequential
from keras.optimizers import SGD
from keras.wrappers.scikit_learn import KerasClassifier
from sklearn.model_selection import GridSearchCV
from sklearn.model_selection import RandomizedSearchCV
from sklearn.model_selection import StratifiedKFold
# random search
#
# grid search
#
opt = str(sys.argv[1])
seed = 7
np.random.seed(seed)
start_time = time.time()
print('Start running..................')
datadir= './data/'
predictions = "./predictions/"
logs = './logs/'
training_data = pd.read_csv(datadir + 'numerai_training_data.csv', header=0)
tournament_data = pd.read_csv(datadir + 'numerai_tournament_data.csv', header=0)
validation_data = tournament_data[tournament_data.data_type == 'validation']
complete_training_data = pd.concat([training_data, validation_data])
features = [f for f in list(complete_training_data) if "feature" in f]
X = complete_training_data[features]
Y = complete_training_data["target"]
def create_model(learn_rate=0.01, momentum=0,optimizer='adam',
activation='sigmoid', neurons=500, neurons2=100, neurons3=50, dropout=0.2):
model = Sequential()
model.add(Dense(neurons, input_shape=(50,), kernel_initializer='glorot_uniform', activation='relu', use_bias=False))
model.add(Dropout(dropout))
model.add(BatchNormalization())
model.add(Dense(neurons2, kernel_initializer='uniform', activation='relu'))
model.add(Dropout(dropout))
model.add(BatchNormalization())
model.add(Dense(neurons3, kernel_initializer='uniform', activation='relu'))
model.add(Dropout(dropout))
model.add(BatchNormalization())
model.add(Dense(1, kernel_initializer='glorot_normal', activation=activation))
if optimizer == 'SGD':
optimizer = SGD(lr=learn_rate, momentum=momentum)
model.compile(loss='binary_crossentropy', optimizer=optimizer, metrics=['binary_crossentropy', 'accuracy'])
return model
model = KerasClassifier(build_fn=create_model, epochs=8, batch_size=128, verbose=0)
# parameters to tune
learn_rate = [0.001, 0.01, 0.1, 0.2, 0.3]
momentum = [0.0, 0.2, 0.4, 0.6, 0.8, 0.9]
batch_size = [128, 256]
epochs = [8, 17, 19, 40, 60]
optimizer = ['SGD', 'RMSprop', 'Adagrad', 'Adadelta', 'Adam', 'Adamax', 'Nadam']
activation = ['softmax', 'softplus', 'softsign', 'relu', 'tanh', 'sigmoid', 'hard_sigmoid', 'linear']
dropout = [0.01, 0.26, 0.37,0.40]
neurons = [7, 10, 14, 600, 800, 1000, 1200]
neurons2 = [300, 200, 100]
neurons3 = [50, 75]
param_grid = dict(learn_rate=learn_rate, momentum=momentum, batch_size=batch_size,
epochs=epochs, optimizer=optimizer, activation=activation,
neurons=neurons, neurons2=neurons2, neurons3=neurons3, dropout=dropout)
stratified_kf = StratifiedKFold(n_splits=10)
kfold_split = stratified_kf.split(X, Y, groups=complete_training_data.era)
model = RandomizedSearchCV(estimator=model, param_distributions=param_grid, cv=kfold_split, scoring='neg_log_loss',
n_jobs=1,
verbose=3)
model_result = model.fit(X.values, Y.values)
print("Best: %f using %s" % (model_result.best_score_, model_result.best_params_))
means = model_result.cv_results_['mean_test_score']
stds = model_result.cv_results_['std_test_score']
params = model_result.cv_results_['params']
for mean, stdev, param in zip(means, stds, params):
print("%f (%f) with: %r" % (mean, stdev, param))
model.best_estimator_.model.save('./my_model_2017-11-07_IV.h5')
model.best_estimator_.model.summary()
def check_consistency(model, valid_data):
eras = valid_data.era.unique()
count = 0
count_consistent = 0
for era in eras:
count += 1
current_valid_data = valid_data[validation_data.era == era]
features = [f for f in list(complete_training_data) if "feature" in f]
X_valid = current_valid_data[features]
Y_valid = current_valid_data["target"]
loss = model.evaluate(X_valid.values, Y_valid.values, batch_size=128, verbose=0)[0]
if (loss < 0.693):
consistent = True
count_consistent += 1
else:
consistent = False
print("{}: loss - {} consistent: {}".format(era, loss, consistent))
consistency = count_consistent / count
print("Consistency: {}".format(consistency))
return consistency
consistency = check_consistency(model.best_estimator_.model, validation_data)
if consistency > 0.58:
x_prediction = tournament_data[features]
t_id = tournament_data["id"]
y_prediction = model.best_estimator_.model.predict_proba(x_prediction.values, batch_size=128)
ll = model_result.best_score_
results = np.reshape(y_prediction, -1)
results_df = | pd.DataFrame(data={'probability': results}) | pandas.DataFrame |
import bedrock.viz
import bedrock.common
from sklearn.ensemble import ExtraTreesClassifier
from sklearn.feature_selection import RFE
from sklearn.naive_bayes import BernoulliNB, MultinomialNB
from tpot import TPOTClassifier
from sklearn.svm import LinearSVC
from sklearn.feature_selection import SelectFwe, f_classif
from sklearn.pipeline import make_pipeline
from sklearn.ensemble import RandomForestClassifier
from tpot.builtins import OneHotEncoder
from sklearn.neighbors import KNeighborsClassifier
from sklearn.model_selection import GridSearchCV
from sklearn.model_selection import KFold, RepeatedKFold
from sklearn.model_selection import cross_val_score, cross_validate
from sklearn.model_selection import StratifiedKFold
import matplotlib.pyplot as plt
import numpy as np
from sklearn.feature_extraction.text import TfidfVectorizer
def tpot_search(X_train, X_test, y_train, y_test, target_column):
pipeline_optimizer = TPOTClassifier(
generations=30,
population_size=30,
cv=5,
random_state=42,
verbosity=2,
config_dict='TPOT sparse'
)
pipeline_optimizer.fit(X_train, y_train)
# print(pipeline_optimizer.score(X_test, y_test))
pipeline_optimizer.export('output/tpot_exported_pipeline_' + target_column + '.py')
# from sklearn.metrics import classification_report
# target_names = ['class 0', 'class 1', 'class 2']
# print(classification_report(y_true, y_pred, target_names=target_names))
def grid_search(x, y):
bernoulli = BernoulliNB()
parameters = {
'alpha': np.arange(0.01, 2, 0.1),
'binarize': np.arange(0, 1, 0.1),
'fit_prior': [True, False]
}
grid = GridSearchCV(bernoulli, parameters)
grid.fit(x, y)
def cross_bnb(x, y):
clf = BernoulliNB()
skf = StratifiedKFold(n_splits=10)
# skf.get_n_splits(x,y)
scores = cross_val_score(clf, x, y, cv=skf)
print(scores)
n, bins, patches = plt.hist(scores, 5)
plt.show()
def bnb(X_train, X_test, y_train, y_test, persist_path=None, do_viz=False):
# Feature selection before SVC
# print('BeronulliNB')
pipeline = make_pipeline(
RFE(
estimator=ExtraTreesClassifier(
criterion="gini",
max_features=0.1,
n_estimators=100
),
step=0.4
),
BernoulliNB(alpha=0.01, fit_prior=False)
)
pipeline.fit(X_train, y_train)
predicted = pipeline.predict(X_test)
if do_viz:
bedrock.viz.show_stats(y_test, predicted)
from sklearn.metrics import accuracy_score
print('Accuracy', accuracy_score(y_test, predicted))
bedrock.common.save_pickle(pipeline, persist_path)
# returns train and test classified
return pipeline
tpot_pipelines = {
'ICB': make_pipeline(
TfidfVectorizer(),
KNeighborsClassifier(n_neighbors=11, p=1, weights="distance")), # LinearSVC(C=10.0, dual=True, loss="hinge", penalty="l2", tol=0.001),
'fracture': make_pipeline(
TfidfVectorizer(),
LinearSVC(C=10.0, dual=False, loss="squared_hinge", penalty="l1", tol=0.01)),
'hydrocephalus': make_pipeline(
TfidfVectorizer(),
SelectFwe(score_func=f_classif, alpha=0.006),
RandomForestClassifier(bootstrap=False, criterion="gini", max_features=0.3, min_samples_leaf=2, min_samples_split=17, n_estimators=100, )
),
'midline': make_pipeline(
TfidfVectorizer(),
RandomForestClassifier(bootstrap=False, criterion="entropy", max_features=0.25, min_samples_leaf=2, min_samples_split=20, n_estimators=100)),
'vessels': make_pipeline(
TfidfVectorizer(),
RFE(estimator=ExtraTreesClassifier(criterion="entropy", max_features=0.15000000000000002, n_estimators=100), step=0.6500000000000001),
#OneHotEncoder(minimum_fraction=0.25),
LinearSVC(C=0.5, dual=True, loss="hinge", penalty="l2", tol=0.001)
)
}
def tpot_models(X_train, X_test, y_train, y_test, target_column, persist_path=None, do_viz=False):
pipeline = tpot_pipelines[target_column]
pipeline.fit(X_train, y_train)
y_hat = pipeline.predict(X_test)
if do_viz:
bedrock.viz.show_stats(y_test, y_hat)
bedrock.common.save_pickle(pipeline, persist_path)
def get_model_map():
return {
'BNB': make_pipeline(
TfidfVectorizer(),
MultinomialNB()
),
'SVM': make_pipeline(
TfidfVectorizer(),
LinearSVC(),
),
'RFC': make_pipeline(
TfidfVectorizer(),
RandomForestClassifier()
),
'EXT': make_pipeline(
TfidfVectorizer(),
ExtraTreesClassifier()
)
}
import pandas as pd
class DataFrameScorer:
def __init__(self, groups):
# groups is a df column on which you want to group by
self.groups = groups
def __call__(self, estimator, X, y):
df = pd.DataFrame({'X': X, 'y': y, 'group': self.groups})
df['predicted'] = estimator.predict(X)
f = {'predicted': ['max'], 'y': ['max']}
df_agg = df.groupby(['group']).agg(f)
correct = df_agg['predicted', 'max'] == df_agg['y', 'max']
return sum(correct)/len(correct)
def group_accuracy(estimator, X, y, groups):
df = pd.DataFrame({'X': X, 'y': y, 'group': groups})
df['predicted'] = estimator.predict(X)
f = {'predicted': ['max'], 'y': ['max']}
df_agg = df.groupby(['group']).agg(f)
correct = df_agg['predicted', 'max'] == df_agg['y', 'max']
return sum(correct)/len(correct)
def group_accuracy_2(y_df, y_hat, groups_df):
df = | pd.DataFrame({'y': y_df, 'y_hat': y_hat, 'group': groups_df[y_df.index]}) | pandas.DataFrame |
import pandas as pd
import numpy as np
import datetime
def min2day_v2(df,lag_ps):
intraday = df;
#preparation
intraday['range1']=intraday['high'].rolling(lag_ps).max()-intraday['close'].rolling(lag_ps).min()
intraday['range2']=intraday['close'].rolling(lag_ps).max()-intraday['low'].rolling(lag_ps).min()
intraday['range']=np.where(intraday['range1']>intraday['range2'], intraday['range1'], intraday['range2'])
return intraday
#signal generation
#even replace assignment with pandas.at
#it still takes a while for us to get the result
#any optimization suggestion besides using numpy array?
def signal_generation(df, intraday, param, column, lag_ps, stop_pr, is_prophetic):
#as the lags of days have been set to 5
#we should start our backtesting after 4 workdays(這裡改成前四分鐘算) of current month
#cumsum is to control the holding of underlying asset
#sigup and siglo are the variables to store the upper/lower threshold
#upper and lower are for the purpose of tracking sigup and siglo
signals=df[df.index>=intraday['date0'].iloc[lag_ps-1]]
signals['signals']=0
signals['cumsum']=0
signals['upper']=0.0
signals['lower']=0.0
sigup=float(0)
siglo=float(0)
#for traversal on time series
#the tricky part is the slicing
#we have to either use [i:i] or pd.Series
#first we set up thresholds at the beginning of london market which is est 3am
#if the price exceeds either threshold
#we will take long/short positions
for i in signals.index:
#note that intraday and dataframe have different frequencies
#obviously different metrics for indexes
#we use variable date for index convertion
# """ date: 年月日時分"""
# date ='%s-%s-%s %s-%s-%s%s' %(i.year,i.month, i.day, i.hour, i.minute, i.second, i.second)
## date = '2015-1-5 9-20-00'
# market opening
if is_prophetic:
# 從Daily return來算開般的action signal?
time = pd.to_datetime(i)
td1 = datetime.timedelta(hours=6)
td2 = datetime.timedelta(minutes=-31)#
time_shift = time + td1 + td2
time_shift = str(time_shift)
if (i.hour==9 and i.minute==31):
if signals['open'][i] > signals['close'][time_shift]:
signals.at[i,'signals']=-1
if signals['open'][i] <= signals['close'][time_shift]:
signals.at[i,'signals']=1
#set up thresholds
# if (i.hour==9 and i.minute==16):
# sigup=float(param*intraday['range'][date]+pd.Series(signals[column])[i])
# siglo=float(-(1-param)*intraday['range'][date]+pd.Series(signals[column])[i])
if (i.hour==9 and i.minute==31) or (i.hour==10 and i.minute==31) \
or (i.hour==13 and i.minute==1) or (i.hour==14 and i.minute==1):
sigup=float(param[0]*intraday['range'][i]+pd.Series(signals[column])[i])
siglo=float(-(1-param[1])*intraday['range'][i]+pd.Series(signals[column])[i])
#thresholds got breached
#signals generating
if (sigup!=0 and pd.Series(signals[column])[i]>sigup):
signals.at[i,'signals']=1
if (siglo!=0 and pd.Series(signals[column])[i]<siglo):
signals.at[i,'signals']=-1
#check if signal has been generated
#if so, use cumsum to verify that we only generate one signal for each situation
if | pd.Series(signals['signals']) | pandas.Series |
# coding=utf-8
# pylint: disable-msg=E1101,W0612
from datetime import datetime, timedelta
from numpy import nan
import numpy as np
import pandas as pd
from pandas.types.common import is_integer, is_scalar
from pandas import Index, Series, DataFrame, isnull, date_range
from pandas.core.index import MultiIndex
from pandas.core.indexing import IndexingError
from pandas.tseries.index import Timestamp
from pandas.tseries.offsets import BDay
from pandas.tseries.tdi import Timedelta
from pandas.compat import lrange, range
from pandas import compat
from pandas.util.testing import assert_series_equal, assert_almost_equal
import pandas.util.testing as tm
from pandas.tests.series.common import TestData
JOIN_TYPES = ['inner', 'outer', 'left', 'right']
class TestSeriesIndexing(TestData, tm.TestCase):
_multiprocess_can_split_ = True
def test_get(self):
# GH 6383
s = Series(np.array([43, 48, 60, 48, 50, 51, 50, 45, 57, 48, 56, 45,
51, 39, 55, 43, 54, 52, 51, 54]))
result = s.get(25, 0)
expected = 0
self.assertEqual(result, expected)
s = Series(np.array([43, 48, 60, 48, 50, 51, 50, 45, 57, 48, 56,
45, 51, 39, 55, 43, 54, 52, 51, 54]),
index=pd.Float64Index(
[25.0, 36.0, 49.0, 64.0, 81.0, 100.0,
121.0, 144.0, 169.0, 196.0, 1225.0,
1296.0, 1369.0, 1444.0, 1521.0, 1600.0,
1681.0, 1764.0, 1849.0, 1936.0],
dtype='object'))
result = s.get(25, 0)
expected = 43
self.assertEqual(result, expected)
# GH 7407
# with a boolean accessor
df = pd.DataFrame({'i': [0] * 3, 'b': [False] * 3})
vc = df.i.value_counts()
result = vc.get(99, default='Missing')
self.assertEqual(result, 'Missing')
vc = df.b.value_counts()
result = vc.get(False, default='Missing')
self.assertEqual(result, 3)
result = vc.get(True, default='Missing')
self.assertEqual(result, 'Missing')
def test_delitem(self):
# GH 5542
# should delete the item inplace
s = Series(lrange(5))
del s[0]
expected = Series(lrange(1, 5), index=lrange(1, 5))
assert_series_equal(s, expected)
del s[1]
expected = Series(lrange(2, 5), index=lrange(2, 5))
assert_series_equal(s, expected)
# empty
s = Series()
def f():
del s[0]
self.assertRaises(KeyError, f)
# only 1 left, del, add, del
s = Series(1)
del s[0]
assert_series_equal(s, Series(dtype='int64', index=Index(
[], dtype='int64')))
s[0] = 1
assert_series_equal(s, Series(1))
del s[0]
assert_series_equal(s, Series(dtype='int64', index=Index(
[], dtype='int64')))
# Index(dtype=object)
s = Series(1, index=['a'])
del s['a']
assert_series_equal(s, Series(dtype='int64', index=Index(
[], dtype='object')))
s['a'] = 1
assert_series_equal(s, Series(1, index=['a']))
del s['a']
assert_series_equal(s, Series(dtype='int64', index=Index(
[], dtype='object')))
def test_getitem_setitem_ellipsis(self):
s = Series(np.random.randn(10))
np.fix(s)
result = s[...]
assert_series_equal(result, s)
s[...] = 5
self.assertTrue((result == 5).all())
def test_getitem_negative_out_of_bounds(self):
s = Series(tm.rands_array(5, 10), index=tm.rands_array(10, 10))
self.assertRaises(IndexError, s.__getitem__, -11)
self.assertRaises(IndexError, s.__setitem__, -11, 'foo')
def test_pop(self):
# GH 6600
df = DataFrame({'A': 0, 'B': np.arange(5, dtype='int64'), 'C': 0, })
k = df.iloc[4]
result = k.pop('B')
self.assertEqual(result, 4)
expected = Series([0, 0], index=['A', 'C'], name=4)
assert_series_equal(k, expected)
def test_getitem_get(self):
idx1 = self.series.index[5]
idx2 = self.objSeries.index[5]
self.assertEqual(self.series[idx1], self.series.get(idx1))
self.assertEqual(self.objSeries[idx2], self.objSeries.get(idx2))
self.assertEqual(self.series[idx1], self.series[5])
self.assertEqual(self.objSeries[idx2], self.objSeries[5])
self.assertEqual(
self.series.get(-1), self.series.get(self.series.index[-1]))
self.assertEqual(self.series[5], self.series.get(self.series.index[5]))
# missing
d = self.ts.index[0] - BDay()
self.assertRaises(KeyError, self.ts.__getitem__, d)
# None
# GH 5652
for s in [Series(), Series(index=list('abc'))]:
result = s.get(None)
self.assertIsNone(result)
def test_iget(self):
s = Series(np.random.randn(10), index=lrange(0, 20, 2))
# 10711, deprecated
with tm.assert_produces_warning(FutureWarning):
s.iget(1)
# 10711, deprecated
with tm.assert_produces_warning(FutureWarning):
s.irow(1)
# 10711, deprecated
with tm.assert_produces_warning(FutureWarning):
s.iget_value(1)
for i in range(len(s)):
result = s.iloc[i]
exp = s[s.index[i]]
assert_almost_equal(result, exp)
# pass a slice
result = s.iloc[slice(1, 3)]
expected = s.ix[2:4]
assert_series_equal(result, expected)
# test slice is a view
result[:] = 0
self.assertTrue((s[1:3] == 0).all())
# list of integers
result = s.iloc[[0, 2, 3, 4, 5]]
expected = s.reindex(s.index[[0, 2, 3, 4, 5]])
assert_series_equal(result, expected)
def test_iget_nonunique(self):
s = Series([0, 1, 2], index=[0, 1, 0])
self.assertEqual(s.iloc[2], 2)
def test_getitem_regression(self):
s = Series(lrange(5), index=lrange(5))
result = s[lrange(5)]
assert_series_equal(result, s)
def test_getitem_setitem_slice_bug(self):
s = Series(lrange(10), lrange(10))
result = s[-12:]
assert_series_equal(result, s)
result = s[-7:]
assert_series_equal(result, s[3:])
result = s[:-12]
assert_series_equal(result, s[:0])
s = Series(lrange(10), lrange(10))
s[-12:] = 0
self.assertTrue((s == 0).all())
s[:-12] = 5
self.assertTrue((s == 0).all())
def test_getitem_int64(self):
idx = np.int64(5)
self.assertEqual(self.ts[idx], self.ts[5])
def test_getitem_fancy(self):
slice1 = self.series[[1, 2, 3]]
slice2 = self.objSeries[[1, 2, 3]]
self.assertEqual(self.series.index[2], slice1.index[1])
self.assertEqual(self.objSeries.index[2], slice2.index[1])
self.assertEqual(self.series[2], slice1[1])
self.assertEqual(self.objSeries[2], slice2[1])
def test_getitem_boolean(self):
s = self.series
mask = s > s.median()
# passing list is OK
result = s[list(mask)]
expected = s[mask]
assert_series_equal(result, expected)
self.assert_index_equal(result.index, s.index[mask])
def test_getitem_boolean_empty(self):
s = Series([], dtype=np.int64)
s.index.name = 'index_name'
s = s[s.isnull()]
self.assertEqual(s.index.name, 'index_name')
self.assertEqual(s.dtype, np.int64)
# GH5877
# indexing with empty series
s = Series(['A', 'B'])
expected = Series(np.nan, index=['C'], dtype=object)
result = s[Series(['C'], dtype=object)]
assert_series_equal(result, expected)
s = Series(['A', 'B'])
expected = Series(dtype=object, index=Index([], dtype='int64'))
result = s[Series([], dtype=object)]
assert_series_equal(result, expected)
# invalid because of the boolean indexer
# that's empty or not-aligned
def f():
s[Series([], dtype=bool)]
self.assertRaises(IndexingError, f)
def f():
s[Series([True], dtype=bool)]
self.assertRaises(IndexingError, f)
def test_getitem_generator(self):
gen = (x > 0 for x in self.series)
result = self.series[gen]
result2 = self.series[iter(self.series > 0)]
expected = self.series[self.series > 0]
assert_series_equal(result, expected)
assert_series_equal(result2, expected)
def test_type_promotion(self):
# GH12599
s = pd.Series()
s["a"] = pd.Timestamp("2016-01-01")
s["b"] = 3.0
s["c"] = "foo"
expected = Series([pd.Timestamp("2016-01-01"), 3.0, "foo"],
index=["a", "b", "c"])
assert_series_equal(s, expected)
def test_getitem_boolean_object(self):
# using column from DataFrame
s = self.series
mask = s > s.median()
omask = mask.astype(object)
# getitem
result = s[omask]
expected = s[mask]
assert_series_equal(result, expected)
# setitem
s2 = s.copy()
cop = s.copy()
cop[omask] = 5
s2[mask] = 5
assert_series_equal(cop, s2)
# nans raise exception
omask[5:10] = np.nan
self.assertRaises(Exception, s.__getitem__, omask)
self.assertRaises(Exception, s.__setitem__, omask, 5)
def test_getitem_setitem_boolean_corner(self):
ts = self.ts
mask_shifted = ts.shift(1, freq=BDay()) > ts.median()
# these used to raise...??
self.assertRaises(Exception, ts.__getitem__, mask_shifted)
self.assertRaises(Exception, ts.__setitem__, mask_shifted, 1)
# ts[mask_shifted]
# ts[mask_shifted] = 1
self.assertRaises(Exception, ts.ix.__getitem__, mask_shifted)
self.assertRaises(Exception, ts.ix.__setitem__, mask_shifted, 1)
# ts.ix[mask_shifted]
# ts.ix[mask_shifted] = 2
def test_getitem_setitem_slice_integers(self):
s = Series(np.random.randn(8), index=[2, 4, 6, 8, 10, 12, 14, 16])
result = s[:4]
expected = s.reindex([2, 4, 6, 8])
assert_series_equal(result, expected)
s[:4] = 0
self.assertTrue((s[:4] == 0).all())
self.assertTrue(not (s[4:] == 0).any())
def test_getitem_out_of_bounds(self):
# don't segfault, GH #495
self.assertRaises(IndexError, self.ts.__getitem__, len(self.ts))
# GH #917
s = Series([])
self.assertRaises(IndexError, s.__getitem__, -1)
def test_getitem_setitem_integers(self):
# caused bug without test
s = Series([1, 2, 3], ['a', 'b', 'c'])
self.assertEqual(s.ix[0], s['a'])
s.ix[0] = 5
self.assertAlmostEqual(s['a'], 5)
def test_getitem_box_float64(self):
value = self.ts[5]
tm.assertIsInstance(value, np.float64)
def test_getitem_ambiguous_keyerror(self):
s = Series(lrange(10), index=lrange(0, 20, 2))
self.assertRaises(KeyError, s.__getitem__, 1)
self.assertRaises(KeyError, s.ix.__getitem__, 1)
def test_getitem_unordered_dup(self):
obj = Series(lrange(5), index=['c', 'a', 'a', 'b', 'b'])
self.assertTrue(is_scalar(obj['c']))
self.assertEqual(obj['c'], 0)
def test_getitem_dups_with_missing(self):
# breaks reindex, so need to use .ix internally
# GH 4246
s = Series([1, 2, 3, 4], ['foo', 'bar', 'foo', 'bah'])
expected = s.ix[['foo', 'bar', 'bah', 'bam']]
result = s[['foo', 'bar', 'bah', 'bam']]
assert_series_equal(result, expected)
def test_getitem_dups(self):
s = Series(range(5), index=['A', 'A', 'B', 'C', 'C'], dtype=np.int64)
expected = Series([3, 4], index=['C', 'C'], dtype=np.int64)
result = s['C']
assert_series_equal(result, expected)
def test_getitem_dataframe(self):
rng = list(range(10))
s = pd.Series(10, index=rng)
df = pd.DataFrame(rng, index=rng)
self.assertRaises(TypeError, s.__getitem__, df > 5)
def test_getitem_callable(self):
# GH 12533
s = pd.Series(4, index=list('ABCD'))
result = s[lambda x: 'A']
self.assertEqual(result, s.loc['A'])
result = s[lambda x: ['A', 'B']]
tm.assert_series_equal(result, s.loc[['A', 'B']])
result = s[lambda x: [True, False, True, True]]
tm.assert_series_equal(result, s.iloc[[0, 2, 3]])
def test_setitem_ambiguous_keyerror(self):
s = Series(lrange(10), index=lrange(0, 20, 2))
# equivalent of an append
s2 = s.copy()
s2[1] = 5
expected = s.append(Series([5], index=[1]))
assert_series_equal(s2, expected)
s2 = s.copy()
s2.ix[1] = 5
expected = s.append(Series([5], index=[1]))
assert_series_equal(s2, expected)
def test_setitem_float_labels(self):
# note labels are floats
s = Series(['a', 'b', 'c'], index=[0, 0.5, 1])
tmp = s.copy()
s.ix[1] = 'zoo'
tmp.iloc[2] = 'zoo'
assert_series_equal(s, tmp)
def test_setitem_callable(self):
# GH 12533
s = pd.Series([1, 2, 3, 4], index=list('ABCD'))
s[lambda x: 'A'] = -1
tm.assert_series_equal(s, pd.Series([-1, 2, 3, 4], index=list('ABCD')))
def test_setitem_other_callable(self):
# GH 13299
inc = lambda x: x + 1
s = pd.Series([1, 2, -1, 4])
s[s < 0] = inc
expected = pd.Series([1, 2, inc, 4])
tm.assert_series_equal(s, expected)
def test_slice(self):
numSlice = self.series[10:20]
numSliceEnd = self.series[-10:]
objSlice = self.objSeries[10:20]
self.assertNotIn(self.series.index[9], numSlice.index)
self.assertNotIn(self.objSeries.index[9], objSlice.index)
self.assertEqual(len(numSlice), len(numSlice.index))
self.assertEqual(self.series[numSlice.index[0]],
numSlice[numSlice.index[0]])
self.assertEqual(numSlice.index[1], self.series.index[11])
self.assertTrue(tm.equalContents(numSliceEnd, np.array(self.series)[
-10:]))
# test return view
sl = self.series[10:20]
sl[:] = 0
self.assertTrue((self.series[10:20] == 0).all())
def test_slice_can_reorder_not_uniquely_indexed(self):
s = | Series(1, index=['a', 'a', 'b', 'b', 'c']) | pandas.Series |
#!/usr/bin/env python
# encoding: utf-8
import os
import numpy as np
import scipy as sp
import matplotlib as mpl
mpl.use("TkAgg")
mpl.rcParams['pdf.fonttype'] = 42
import matplotlib.pylab as plt
import seaborn as sns
import pandas as pd
from IPython import embed as shell
from tqdm import tqdm
from sim_tools import get_OU_traces, apply_bounds_diff_trace, _bounds, _bounds_collapse_linear, _bounds_collapse_hyperbolic
from sim_tools import summary_plot, conditional_response_plot
sns.set(style='ticks', font='Arial', font_scale=1, rc={
'axes.linewidth': 0.25,
'axes.labelsize': 7,
'axes.titlesize': 7,
'xtick.labelsize': 6,
'ytick.labelsize': 6,
'legend.fontsize': 6,
'xtick.major.width': 0.25,
'ytick.major.width': 0.25,
'text.color': 'Black',
'axes.labelcolor':'Black',
'xtick.color':'Black',
'ytick.color':'Black',} )
sns.plotting_context()
def do_simulations(params):
rt = []
response = []
stimulus = []
for stim in [1,0]:
# get traces:
x = get_OU_traces(v=params['v'],
ll=params['ll'],
dc=params['dc'],
z=params['z'],
pre_generated=False,
stim=stim,
nr_trials=params['nr_trials'],
tmax=tmax,
dt=dt,)
# get bounds:
if params['bound'] == 'default':
b1, b0 = _bounds(a=params['a'], lower_is_0=False, tmax=tmax, dt=dt)
elif params['bound'] == 'collapse_linear':
b1, b0 = _bounds_collapse_linear(a=params['a'], c1=params['c1'], c0=params['c0'], lower_is_0=False, tmax=tmax, dt=dt)
elif params['bound'] == 'collapse_hyperbolic':
b1, b0 = _bounds_collapse_hyperbolic(a=params['a'], c=params['c'], lower_is_0=False, tmax=tmax, dt=dt)
# apply bounds:
rt_dum, response_dum = apply_bounds_diff_trace(x=x, b1=b1, b0=b0)
# store results:
rt.append((rt_dum*dt)+ndt)
response.append(response_dum)
stimulus.append(np.ones(params['nr_trials']) * stim)
df = | pd.DataFrame() | pandas.DataFrame |
import glob
import os
from networkx.readwrite import json_graph
import json
import networkx as nx
import pandas as pd
from subs2network.utils import add_prefix_to_dict_keys
from subs2network.imdb_dataset import imdb_data
from subs2network.consts import MOVIE_YEAR
def get_node_features(g):
closeness = nx.closeness_centrality(g)
betweenness = nx.betweenness_centrality(g)
betweenness_weight = nx.betweenness_centrality(g, weight="weight")
degree_centrality = nx.degree_centrality(g)
pr = nx.pagerank(g, weight=None)
pr_weight = nx.pagerank(g, weight="weight")
clustering = nx.clustering(g)
for v in g.nodes():
res = {"total_weight": g.degree(v, weight="weight"), "degree": g.degree(v), "movie_name": g.graph["movie_name"],
"year": g.graph["movie_year"], "imdb_rating": g.graph["imdb_rating"], "closeness": closeness[v],
"betweenness_weight": betweenness_weight[v], "betweenness": betweenness[v],
"degree_centrality": degree_centrality[v], "clustering": clustering[v], "pagerank": pr[v],
"pr_weight": pr_weight[v], "gender": imdb_data.get_actor_gender(v), "name": v}
yield res
def get_actor_features(g, actor):
res = {}
closeness = nx.closeness_centrality(g)
betweenness = nx.betweenness_centrality(g)
betweenness_weight = nx.betweenness_centrality(g, weight="weight")
degree_centrality = nx.degree_centrality(g)
clustering = nx.clustering(g)
pr = nx.pagerank(g, weight=None)
pr_weight = nx.pagerank(g)
v = actor
res["total_weight"] = g.degree(v, weight="weight")
res["degree"] = g.degree(v)
res["closeness"] = closeness[v]
res["betweenness"] = betweenness[v]
res["betweenness_weight"] = betweenness_weight[v]
res["degree_centrality"] = degree_centrality[v]
res["clustering"] = clustering[v]
res["movie_rating"] = g.graph["imdb_rating"]
res["pagerank"] = pr[v]
res["pagerank_weight"] = pr_weight[v]
# res["gender"] = imdb_data.get_actor_gender(v)
return res
def average_graph_weight(g):
stats = pd.Series(list(nx.get_edge_attributes(g, "weight").values())).describe()
del stats["count"]
return add_prefix_to_dict_keys(stats.to_dict(), "weight")
def average_graph_degree(g):
stats = pd.Series([d for n, d in nx.degree(g, g.nodes())]).describe()
del stats["count"]
return add_prefix_to_dict_keys(stats.to_dict(), "degree")
def average_actor_appearance(g):
stats = pd.Series([g.degree(v, weight="weight") for v in g.nodes()]).describe()
del stats["count"]
return add_prefix_to_dict_keys(stats.to_dict(), "appearance")
def average_closeness_centrality(g):
stats = pd.Series(list(nx.closeness_centrality(g).values())).describe()
del stats["count"]
return add_prefix_to_dict_keys(stats.to_dict(), "closeness")
def average_eigenvector_centrality(g):
stats = pd.Series(list(nx.eigenvector_centrality(g).values())).describe()
del stats["count"]
return add_prefix_to_dict_keys(stats.to_dict(), "eigenvector")
def average_betweenness_centrality(g):
stats = pd.Series(list(nx.betweenness_centrality(g).values())).describe()
del stats["count"]
return add_prefix_to_dict_keys(stats.to_dict(), "betweenness")
def average_pagerank(g):
stats = pd.Series(list(nx.nx.pagerank(g, weight=None).values())).describe()
del stats["count"]
return add_prefix_to_dict_keys(stats.to_dict(), "pagerank")
def average_weighted_pagerank(g):
stats = pd.Series(list(nx.nx.pagerank(g, weight="weight").values())).describe()
del stats["count"]
return add_prefix_to_dict_keys(stats.to_dict(), "weighted_pagerank")
def average_weighted_betweenness_centrality(g):
stats = pd.Series(list(nx.betweenness_centrality(g).values())).describe()
del stats["count"]
return add_prefix_to_dict_keys(stats.to_dict(), "weighted_betweenness")
def average_clustering(g):
try:
return {"average_clustering": nx.average_clustering(g)}
except:
return {"average_clustering": 0}
def average_weighted_clustering(g):
try:
return {"average_weighted_clustering": nx.average_clustering(g, weight="weight")}
except:
return {"average_clustering": 0}
def graph_clique_number(g):
return {"clique_number": nx.graph_clique_number(g)}
def average_degree_connectivity(g):
return {"average_degree_connectivity": nx.average_degree_connectivity(g)}
def get_edge_number(g):
return {"edge_number": len(g.edges)}
def get_node_number(g):
return {"node_number": len(g.node)}
def analyze_movies():
p = "../temp/movies/"
res = []
for movie in tqdm(os.listdir(p)):
path = os.path.join(p, movie)
g_pth = os.path.join(path, f"json/{movie}.json")
if not os.path.exists(g_pth):
g_pth = glob.glob(os.path.join(path, f"json/*.json"))
if g_pth:
g_pth = g_pth[0]
if g_pth:
# try:
with open(g_pth) as f:
g = json_graph.node_link_graph(json.load(f))
if g.number_of_nodes() == 0:
continue
d = extract_graph_features(g)
#
# with open(os.path.join(path, f"{movie}.json")) as f:
# movie_info = json.load(f)
# d.update(json.loads(movie_info))
res.append(d)
# except:
# pass
# else:
# print(movie)
pd.DataFrame(res).to_csv(f"../temp//graph_features.csv", index=False)
def analyze_directors():
p = "../temp/directors/"
for director in os.listdir(p):
res = []
json_path = os.path.join(p, director, "json")
graphs = []
for g_pth in glob.glob(os.path.join(json_path, f"*roles*")):
if g_pth:
try:
with open(g_pth) as f:
g = json_graph.node_link_graph(json.load(f))
d = extract_graph_features(g)
d.update({"rating": g.graph["imdb_rating"], "year": g.graph["movie_year"],
"name": g.graph["movie_name"]})
graphs.append(g)
res.append(d)
except:
pass
if graphs:
joined_grpah = nx.compose_all(graphs)
d = extract_graph_features(joined_grpah)
d["name"] = "combined"
res.append(d)
pd.DataFrame(res).to_csv(f"../temp/output/{director}.csv", index=False)
def get_triangles(g):
all_cliques = nx.enumerate_all_cliques(g)
return [x for x in all_cliques if len(x) == 3]
def analyze_triangles():
p = "../temp/movies/"
res = []
json_path = os.path.join(p, "*", "json")
for g_pth in tqdm(glob.glob(os.path.join(json_path, f"*roles.json"))):
if g_pth:
with open(g_pth) as f:
g = json_graph.node_link_graph(json.load(f))
tr = get_triangles(g)
for t in tr:
t.append(g.graph["movie_name"].replace(" - roles", ""))
t.append(g.graph["movie_year"])
res += tr
| pd.DataFrame(res) | pandas.DataFrame |
# pylint: disable=too-many-lines
"""Field class."""
import os
import sys
from copy import deepcopy
import weakref
from functools import partial
from string import Template
import logging
import numpy as np
import pandas as pd
import h5py
import pyvista as pv
from anytree import PreOrderIter
from deprecated.sphinx import deprecated
from .base_spatial import SpatialComponent
from .grids import OrthogonalUniformGrid, CornerPointGrid, Grid
from .rock import Rock
from .states import States
from .aquifer import Aquifers
from .wells import Wells
from .tables import Tables
from .rates import calc_rates, calc_rates_multiprocess
from .decorators import state_check, cached_property
from .parse_utils import (tnav_ascii_parser, preprocess_path,
dates_to_str, read_dates_from_buffer)
from .plot_utils import lines_from_points
from .template_models import (ORTHOGONAL_GRID, CORNERPOINT_GRID,
DEFAULT_TN_MODEL, DEFAULT_ECL_MODEL)
from .utils import get_single_path, get_well_mask, get_spatial_well_control, get_spatial_cf_and_perf
from .configs import default_config
from .dump_ecl_utils import egrid, init, restart, summary
ACTOR = None
COMPONENTS_DICT = {'cornerpointgrid': ['grid', CornerPointGrid],
'orthogonaluniformgrid': ['grid', OrthogonalUniformGrid],
'grid': ['grid', Grid],
'rock': ['rock', Rock],
'states': ['states', States],
'wells': ['wells', Wells],
'tables': ['tables', Tables],
'aquifers': ['aquifers', Aquifers]
}
DEFAULT_HUNITS = {'METRIC': ['sm3/day', 'ksm3/day', 'ksm3', 'Msm3', 'bara'],
'FIELD': ['stb/day', 'Mscf/day', 'Mstb', 'MMscf', 'psia']}
#pylint: disable=protected-access
class FieldState:
"""State holder."""
def __init__(self, field):
self._field = weakref.ref(field)
@property
def field(self):
"""Reference Field."""
return self._field()
@property
def spatial(self):
"""Common state of spatial components."""
states = np.array([comp.state.spatial for comp in self.field._components.values()
if isinstance(comp, SpatialComponent)])
if 'wells' in self.field.components:
states = np.concatenate([states, [self.field.wells.state.spatial]])
if np.all(states):
return True
if np.all(~states):
return False
raise ValueError('Spatial components have different states.')
class Field:
"""Reservoir model.
Contains components of reservoir model and preprocessing actions.
Parameters
----------
path : str, optional
Path to source model files.
config : dict, optional
Components and attributes to load.
logfile : str, optional
Path to log file.
enoding : str, optional
Files encoding. Set 'auto' to infer encoding from initial file block.
Sometimes it might help to specify block size, e.g. 'auto:3000' will
read first 3000 bytes to infer encoding.
loglevel : str, optional
Log level to be printed while loading. Default to 'INFO'.
"""
def __init__(self, path=None, config=None, logfile=None, encoding='auto', loglevel='INFO'):
self._path = preprocess_path(path) if path is not None else None
self._encoding = encoding
self._components = {}
self._config = None
self._meta = {'UNITS': 'METRIC',
'DATES': pd.to_datetime([]),
'FLUIDS': [],
'HUNITS': DEFAULT_HUNITS['METRIC']}
self._state = FieldState(self)
logging.shutdown()
handlers = [logging.StreamHandler(sys.stdout)]
if logfile is not None:
handlers.append(logging.FileHandler(logfile, mode='w'))
logging.basicConfig(handlers=handlers)
self._logger = logging.getLogger('Field')
self._logger.setLevel(getattr(logging, loglevel))
if self._path is not None:
self._init_components(config)
self._pyvista_grid = None
self._pyvista_grid_params = {'use_only_active': True, 'cell_size': None, 'scaling': True}
def _init_components(self, config):
"""Initialize components."""
fmt = self._path.suffix.strip('.').upper()
if config is not None:
config = {k.lower(): self._config_parser(v) for k, v in config.items()}
if fmt == 'HDF5':
with h5py.File(self._path, 'r') as f:
keys = [k.lower() for k in f]
if config is None:
config = {k: {'attrs': None, 'kwargs': {}} for k in keys}
elif 'grid' in config:
if 'cornerpointgrid' in keys:
config['cornerpointgrid'] = config.pop('grid')
elif 'orthogonaluniformgrid' in keys:
config['orthogonaluniformgrid'] = config.pop('grid')
elif config is None:
self._logger.info('Using default config.')
config = {k.lower(): self._config_parser(v) for k, v in default_config.items()}
self._components = {COMPONENTS_DICT[k][0]: COMPONENTS_DICT[k][1]() for k in config}
self._config = {COMPONENTS_DICT[k][0]: v for k, v in config.items()}
@staticmethod
def _config_parser(value):
"""Separate config into attrs and kwargs."""
if isinstance(value, str):
attrs = [value.upper()]
kwargs = {}
elif isinstance(value, (list, tuple)):
attrs = [x.upper() for x in value]
kwargs = {}
elif isinstance(value, dict):
attrs = value['attrs']
if attrs is None:
pass
elif isinstance(attrs, str):
attrs = [attrs.upper()]
else:
attrs = [x.upper() for x in attrs]
kwargs = {k: v for k, v in value.items() if k != 'attrs'}
else:
raise TypeError("Component's config should be of type str, list, tuple or dict. Found {}."
.format(type(value)))
return {'attrs': attrs, 'kwargs': kwargs}
def assert_components_available(self, *comp_names):
# FIXME add to all the methods where required
"""Raises ValueError in case comp_names are not presented in self."""
for comp in comp_names:
if not hasattr(self, comp):
raise ValueError('Component %s is not loaded!' % comp)
@property
def meta(self):
""""Model meta data."""
return self._meta
@property
def state(self):
""""Field state."""
return self._state
@property
def start(self):
"""Model start time in a datetime format."""
return | pd.to_datetime(self.meta['START']) | pandas.to_datetime |
import pytest
from mapping import mappings
from pandas.util.testing import assert_frame_equal, assert_series_equal
import pandas as pd
from pandas import Timestamp as TS
import numpy as np
from pandas.tseries.offsets import BDay
@pytest.fixture
def dates():
return pd.Series(
[ | TS('2016-10-20') | pandas.Timestamp |
import pandas as pd
import numpy as np
import time
import sys
import json
from jsmin import jsmin
from collections import Counter
import os.path
from xlrd.biffh import XLRDError
from aenum import IntEnum
import time
# set up logging (to console)
import logging
logger = logging.getLogger(__name__)
logger.setLevel(logging.DEBUG)
formatter = logging.Formatter('[%(levelname)s] %(message)s')
handler = logging.StreamHandler(stream=sys.stdout)
handler.setFormatter(formatter)
handler.setLevel(logging.DEBUG)
logger.addHandler(handler)
logger.setLevel(logging.DEBUG)
PATH_TO_SETTINGS = 'settings/settings.json'
DEFAULT_SETTINGS = dict(
# path to panel structure excel file
path_panel='settings/panel.xlsx',
# name of sheets in panel structure excel file
sheet_persons='persons',
sheet_panel_entities='panel_entities',
sheet_companies='companies',
sheet_groups='groups',
sheet_questions='data dictionary',
sheet_scales='scales',
# path to survey data - {} will be replaced with year (YYYY)
path_results='survey-results/results_{}.xlsx',
# path to resulting output file
path_output='output/dnp_panel-data_{}-{}',
# years to include
years=[2013, 2014, 2015, 2016, 2017, 2018, 2019],
# exclude deprecated variables
exclude_deprecated=True,
# exclude variables with personal information
exclude_personal=True,
# exclude variables with auxiliary information
exclude_auxiliary=True,
# exclude variables from special sections
exclude_special_sections=True,
# exclude din meta data
exclude_din_meta_data=True,
# exclude variables with less x years in which they were part of the questionnaire
exclude_years_min=0,
# allow missing observations: include panel entities which did not complete all specified years
allow_missing=True,
# save the results to an excel file
save_output=True
)
def load_excel_sheet(file, sheet=None, exit_on_error=True):
"""
Load an Excel sheet and return as Pandas dataframe with user friendly error-handling.
:param file: Path to excel file
:param sheet: Name of sheet to load. Default: None -> Loads first sheet.
:param exit_on_error: Whether to call exit(0) on exception.
:returns: Pandas DataFrame, or None in case of error.
"""
df = None
try:
if sheet is not None:
df = pd.read_excel(file, sheet_name=sheet)
else:
df = pd.read_excel(file)
except FileNotFoundError:
logger.error("Could not find file '{}'.".format(file))
if exit_on_error:
exit(0)
except pd.errors.EmptyDataError:
logger.error("File '{}' is empty.".format(file))
if exit_on_error:
exit(0)
except pd.errors.ParserError:
logger.error("File '{}' is broken.".format(file))
if exit_on_error:
exit(0)
except XLRDError:
logger.error("Sheet '{}' is missing in {}.".format(sheet, file))
if exit_on_error:
exit(0)
except Exception:
logger.error("There was an error while loading '{}'".format(file))
if exit_on_error:
exit(0)
return df
class DataSetType:
SINGLE_YEAR = 1
PANEL = 2
class SelectionReason(IntEnum):
"""
Reasons why a participant was / was not selected for sample;
Reason > 0 : selected, Reason <= 0 : not selected
"""
NOT_ENOUGH_QUESTIONS_ANSWERED = -1
NOT_SELECTED_OTHER_REPRESENTATIVE_WAS_SELECTED = 0
SELECTED_AS_COMPANY_REPRESENTATIVE = 1
SELECTED_AS_COMPANY_REPRESENTATIVE_NO_OTHERS_FOUND = 2
SELECTED_AS_INDUSTRY_REPRESENTATIVE = 3
class DataSetCreator(object):
dataset_extra_cols = {
'selected', 'algorithmic_selection',
'algorithmic_selection_comment', 'historic_selection',
'panel_entity_id', 'person_id', 'year', 'cat_position', 'email'
}
required_settings = ['path_panel', 'sheet_persons', 'sheet_panel_entities',
'sheet_companies', 'sheet_groups', 'sheet_questions',
'sheet_scales', 'sheet_selection', 'path_results',
'path_output', 'years', 'exclude_deprecated',
'exclude_personal', 'exclude_auxiliary',
'exclude_din_meta_data', 'exclude_special_sections',
'exclude_years_min', 'allow_missing', 'save_output',
'use_selection_history']
def __init__(self, settings=DEFAULT_SETTINGS):
# settings: dataframe
self.settings = settings
self.years = self.settings['years']
self._validate_settings()
# list people (per year) that can't be found when creating dataset
self.missing_persons = {y: set() for y in self.years}
# load data according to settings...
# data: dictionary year:dataframe per self.years
# questions, q_dict, scales: data structure dataframes
# participant database: dataframes
# selection: historic (who was in the samples of the past?), dataframes
logger.info("Loading data...")
self.data, self.questions, self.q_dict, self.scales, self.persons, self.companies,\
self.panel_entities, self.groups,\
self.selection = DataSetCreator._load_data(
settings=self.settings
)
# create lookup for persons
self.persons_lookup = (self.persons
.drop_duplicates(
subset=["email", "wave_added"],
keep="last"
)
.set_index(
["email", "wave_added"]
)
[["id", "panel_entity_id"]]
.rename(
columns={
"id": "person_id"
})
.to_dict(
orient='index'
)
)
logger.info("Mapping scales...")
self.data = self._map_scales(
data=self.data,
scales=self.scales,
q_dict=self.q_dict
)
logger.info("Generating panel data...")
self.panel_df, self.available_questions_per_year = self._make_panel_df()
# warn if persons couldn't be identified
for y, missing in self.missing_persons.items():
if len(missing) > 0:
logger.warning("Missing in {}: {} persons".format(y, len(missing))) # ", ".join(missing)))
def _validate_settings(self):
for s in self.required_settings:
if s not in self.settings.keys():
logger.error("Setting '{}' is missing.".format(s))
exit(0)
if not os.path.isfile(self.settings["path_panel"]):
logger.error("Panel file '{}' could not be found.".format(self.settings["path_panel"]))
exit(0)
outdir = "/".join(self.settings["path_output"].split("/")[:-1])
if not os.path.isdir(outdir):
logger.error("Can't find output directory '{}'.".format(outdir))
exit(0)
def _set_sheet_settings_attribute(self, attr_name):
"""
Sets an object attribute (=attr_name) to a df generate from the sheet
of the same name in the panel file ("panel.xlsx").
:param attr_name: Object attribute = sheet name to set
:return: None
"""
setattr(self, attr_name, load_excel_sheet(
self.settings['path_panel'],
self.settings['sheet_{}'.format(attr_name)]
))
def _get_relevant_questions(self, dataset_type=DataSetType.SINGLE_YEAR):
"""
Get a set of question names that should be included in dataset
according to settings: in/exclude deprecated, personal, auxiliary questions,
meta data, questions that appear in a minimum number of years, special sections, etc.
:param dataset_type: default: DataSetType.SINGLE_YEAR. If DataSetType.PANEL, exclude
special section questions
:return: Set of question names
"""
query = 'name != "NaN"'
query += ' and not deprecated' if self.settings['exclude_deprecated'] else ""
query += ' and not personal_data' if self.settings['exclude_personal'] else ""
query += ' and not auxiliary' if self.settings['exclude_auxiliary'] else ""
query += ' and not name.str.startswith("din_")' if self.settings['exclude_din_meta_data'] else ""
query += ' and num_years_observed >= {}'.format(self.settings['exclude_years_min'])
if dataset_type==DataSetType.PANEL:
query += ' and special_section_year == "NaN"' if self.settings[
'exclude_special_sections'] else ""
return set(self.questions.query(query, engine='python')['name'].tolist())
@staticmethod
def _load_data(settings):
"""
Load survey data files and additional settings from panel.xlsx.
:param settings:
:return: DataFrames (data, questions, q_dict, scales, persons,
companies, groups, panel_entities, selection)
"""
# load survey data
data = {y: load_excel_sheet(
settings['path_results'].format(y))
for y in settings['years']
}
# load sheets from panel.xlsx
questions = load_excel_sheet(settings['path_panel'], settings['sheet_questions'])
scales = load_excel_sheet(settings['path_panel'], settings['sheet_scales'])
persons = load_excel_sheet(settings['path_panel'], settings['sheet_persons'])
companies = load_excel_sheet(settings['path_panel'], settings['sheet_companies'])
groups = load_excel_sheet(settings['path_panel'], settings['sheet_groups'])
panel_entities = load_excel_sheet(settings['path_panel'], settings['sheet_panel_entities'])
selection = load_excel_sheet(settings['path_panel'], settings['sheet_selection'])
# group scale entries
scales = scales.groupby('scale')
# make sure deprecated works...
questions['deprecated'] = questions['deprecated'].astype('bool')
# save dict representation of questions
try:
q_dict = questions.set_index('name').to_dict(orient='index')
except ValueError:
logger.error("questions are not unique")
counter = Counter(questions['name'].tolist())
logger.error(["{}({})".format(i, counter[i]) for i in counter if counter[i] > 1])
exit(0)
except Exception as ex:
logger.error("Can not generate question dictionary, questions not properly loaded or defined")
raise ex
return data, questions, q_dict, scales, persons,\
companies, groups, panel_entities, selection
def _was_selected(self, email, year):
selected = np.nan
selection_col = "selection_{}".format(year)
# try all indices
for y in self.years:
try:
sel = self.selection.set_index("email_{}".format(y))
selected = True if sel.loc[email][selection_col] == 1 else False
break
except KeyError:
pass
# logger.warning("could not find {} in {}".format(email, y))
return selected
def _select_by_history(self, df):
return df.apply(lambda x: self._was_selected(x['email'], x['year']), axis=1)
@staticmethod
def _get_q_map(q, q_dict, scales):
"""
Map observations to desired values, using the corresponding
question's scale definition.
:param q: question name
:param q_dict: question dictionary (q->{...,scale: scalename})
:param scales: Pandas DataFrame
:return: dict {original_data_value: mapped_data_value}
"""
q_scale = q_dict[q]['scale']
q_map = {}
# if question is not associated with any scale, no mapping is necessary
if q_scale in scales.groups.keys():
for r in scales.get_group(q_scale).iterrows():
r = r[1]
for n in range(1, 50):
key_name = 'alternative_{}'.format(n)
if key_name in r.keys():
original = r['alternative_{}'.format(n)]
if not pd.isnull(original):
# cast all values to str to avoid type mismatches
q_map[str(original)] = str(r['value'])
return q_map
def identify(self, email, year):
"""
Get id of associated panel entity and person based on email and year of observation.
:param email: person's e-mail address
:param year: year of entry in participant DB
:return: {panel_entity_id:int value, person_id:int value}
"""
res = None
for i in range(year, min(self.persons.wave_added)-1, -1):
try:
res = self.persons_lookup[(email, i)]
break
except KeyError:
pass
if res is None:
self.missing_persons[year].add(email)
return {
'panel_entity_id': None,
'person_id': None
}
else:
return res
@staticmethod
def _select_company_representative(group):
"""
Algorithm for selecting DNP participants:
When there are multiple participants who answer for the same company in
one wave, a set of rules has to determine the one participant whose
answers to take into account.
Prerequisites: All participants need to be matched to a company.
Company names have to be cleaned / coded, considering a
threshold up to which organization subdivisions are
regarded as separate units.
:param group: A Pandas group of participants, where each participant represents
the same company.
:return: Pandas Series with bool selected
"""
# selected = []
# order by fraction of answered questions (most first)
# ranked = group.sort_values(['fill'], ascending=False)
fill = group["total_fill"]
max_fill = max(fill)
# de-select all participants with too much missing data
# group.loc[fill < min_fill, "selected"] = SelectionReason.NOT_ENOUGH_QUESTIONS_ANSWERED
# CASE A: only one participant in group
if len(group) == 1:
group.loc[
:, "selected"
] = SelectionReason.SELECTED_AS_COMPANY_REPRESENTATIVE_NO_OTHERS_FOUND
# CASE B: multiple potential representatives
else:
# find best candidate = first cand. with max fill and standardization position
candidates = (
group[(fill == max_fill) & group.std_position].index
if group[fill == max_fill].std_position.any()
else group[fill == max_fill].index
)
# the representative
group.loc[
group.index == candidates[0],
"selected"
] = SelectionReason.SELECTED_AS_COMPANY_REPRESENTATIVE
# the rest is not selected
group.loc[
pd.isnull(group.selected),
"selected"
] = SelectionReason.NOT_SELECTED_OTHER_REPRESENTATIVE_WAS_SELECTED
return group.selected
def _select(self, df):
"""
Select one participant from all participant groups in the data.
Calls DataSetCreator._select on each group. Appends selection reasons.
:param df: Pandas DataFrame
:return:
"""
participants = None
try:
participants = df[["panel_entity_id", "person_id", "year", "total_fill", "cat_position", "view"]].copy()
except Exception as ex:
logger.error("Data is missing for {}:".format(df['year'].unique()[0]))
logger.error(ex)
logger.info("Make sure that the following variables exist and have completely mapped scales:")
logger.info("email, cat_position, num_empl, num_turnover, view")
exit(0)
participants["selected"] = np.nan
participants.loc[
participants.total_fill < self.settings["min_fill"],
"selected"
] = SelectionReason.NOT_ENOUGH_QUESTIONS_ANSWERED
participants["std_position"] = participants['cat_position'].str.contains('standard', case=False)
selection = []
if self.settings["include_privates_in_representative_selection"]:
# group all participants that answered enough questions
# by their panel_entity_id and the year of participantion
for name, group in participants.groupby(['panel_entity_id', 'year']):
group_selection = self._select_company_representative(group.copy())
selection.append(group_selection)
else:
# select all participants with view=private & enough answered questions
participants.loc[
(participants.view == "private") &
(participants.total_fill >= self.settings["min_fill"]),
"selected"
] = SelectionReason.SELECTED_AS_INDUSTRY_REPRESENTATIVE
selection.append(participants[participants.view == "private"].selected)
# group all view=company participants that answered enough questions
# by their panel_entity_id and the year of participantion
remaining = participants[
(participants.view != "private") &
(pd.isnull(participants.selected))
].copy()
for name, group in remaining.groupby(['panel_entity_id', 'year']):
group_selection = DataSetCreator._select_company_representative(group)
selection.append(group_selection)
selected = pd.concat(selection)
return (
selected > 0,
selected.replace({r.value: r.name.lower() for r in SelectionReason})
)
@staticmethod
def get_fill(df):
"""
Get percentage of questions filled in by participants as Pandas Series.
:param df: Pandas DataFrame
:return: Pandas Series
"""
col_delta = set(df.columns).intersection(DataSetCreator.dataset_extra_cols)
return df.count(axis=1).divide(len(df.columns) - len(col_delta))
def make_dataset(self, data, selected_years):
"""
Prepare a dataset from given data for the specified years:
- select a sample of participants based on DataSetCreator.select()
- include historical selection if available
- drop variables that are not available in time-span
:param data: Pandas DataFrame (stacked panel data in long format with var 'year')
:param selected_years: list<int> of years
:return: dataset (Pandas DataFrame)
"""
# keep only observations for selected years
df = data[data['year'].isin(selected_years)].dropna(axis=1, how='all').copy()
# drop questions that are not required
try:
relevant_questions = self._get_relevant_questions(
DataSetType.SINGLE_YEAR if len(selected_years) == 1
else DataSetType.PANEL
)
drop_cols = set(df.columns) - relevant_questions - DataSetCreator.dataset_extra_cols
df = df.drop(drop_cols, axis=1)
except Exception as ex:
logger.error("An error occured when trying to drop irrelevant questions:")
raise ex
exit(0)
# calculate % of questions filled in
df['total_fill'] = DataSetCreator.get_fill(df)
logger.info("{}: {} participants dropped due to too many missings".format(
", ".join([str(y) for y in selected_years]),
len(df.total_fill[df.total_fill < self.settings["min_fill"]].index)
))
# create "selection" column
selection, selection_reason = self._select(df)
df['algorithmic_selection'] = selection
df['algorithmic_selection_comment'] = selection_reason
# create "historic selection" column
df['historic_selection'] = self._select_by_history(df)
# create merged selection column (history if available, otherwise alg. selection)
# TODO: review
# df['selected'] = df['historic_selection'].fillna(df['algorithmic_selection'])
df['selected'] = df['algorithmic_selection']
logger.info("{}: selected {}/{} participants".format(
", ".join([str(y) for y in selected_years]),
len(df.selected[df.selected > 0].index),
len(df.selected)
))
# drop email address
if self.settings['exclude_personal']:
df = df.drop(['email'], axis=1)
return df
@staticmethod
def _map_scales(data, scales, q_dict):
"""
Replaces all data values for questions that have defined scales.
A default mapped value can be set by setting the $default$ flag,
all original data values that can't be mapped will be mapped
to the default value.
:param data: dict{year:DataFrame} of survey data
:param scales:
:param q_dict:
:return:
"""
obs_map = {}
for y in data.keys():
for q in q_dict.keys():
obs_map[q_dict[q]['name_{}'.format(y)]] = DataSetCreator._get_q_map(q, q_dict, scales)
for y in data.keys():
for c in data[y].columns:
if c in obs_map.keys():
observations = data[y][c]
scale = q_dict[c] if c in q_dict.keys() else None
scale_map = obs_map[c]
default_map = {}
# if a default value is set,
# replace all original values for which no mapping exists
# with that default mapping value
if "$default$" in scale_map.keys():
# all values in the data that do not appear in map:
unmapped = set(observations.unique()) - set(scale_map.keys()) - {'', np.nan, 'nan'}
default_map = {str(k): scale_map["$default$"] for k in unmapped}
# replace values with mapped + default values
data[y][c] = observations.astype(str).replace({
**scale_map,
**default_map,
**{
'nan': np.nan,
'': np.nan
}
})
# if scale is in defined as numeric, cast column to numeric
# if c in q_dict.keys() and q_dict[c]['format'] == 'numeric':
if scale is not None and scale['format'] == 'numeric':
data[y][c] = pd.to_numeric(observations, errors='coerce')
return data
def _make_panel_df(self):
"""
Create panel DataFrame by stacking yearly data, adding year variable, and identifying
persons and panel entities by <year, email> information.
:return: panel (Pandas DataFrame), number of available questions per year (dict{year:num})
"""
panel_data = []
available_questions_per_year = {}
# iterate over columns (questions)
for y, d in self.data.items():
qs = {q_info['name_{}'.format(y)]: q_panel for q_panel, q_info in self.q_dict.items()}
found_qs = [k for k in qs.keys() if k in d.columns]
not_found_qs = list(set(qs.keys()) - set(found_qs))
available_questions_per_year[y] = [qs[f] for f in found_qs]
for _ in d.iterrows():
row = _[1]
panel_data.append({
**{'year': y},
**{qs[q]: row[q] for q in found_qs},
**{qs[nf_q]: np.nan for nf_q in not_found_qs}
})
# identify panel entities
panel_df = pd.DataFrame(panel_data).replace({'nan': np.nan})
# t = time.process_time()
ident = panel_df.apply(
lambda x: self.identify(x['email'], x['year']), axis=1).apply(pd.Series)
# logger.info("identifying took {} s".format(time.process_time() - t))
panel_df['panel_entity_id'] = ident['panel_entity_id']
panel_df['person_id'] = ident['person_id']
return panel_df, available_questions_per_year
def get_datasets(self, years=None):
if years is None:
years = self.years
datasets = {}
if len(years) == 1:
datasets = {
'{}'.format(years[0]): self.make_dataset(self.panel_df, years)
}
elif len(years) > 1:
datasets = {
'panel {}-{}'.format(years[0], years[-1]): self.make_dataset(self.panel_df, years),
**{'{}'.format(y): self.make_dataset(self.panel_df, [y]) for y in years}
}
return datasets
def to_excel(self):
# prepare datasets
logger.info("Preparing all datasets...")
datasets = self.get_datasets()
fn = self.settings['path_output'].format(min(self.years), max(self.years))
filename = '{}_{}.xlsx'.format(fn, time.strftime("%Y%m%d-%H%M%S"))
writer = pd.ExcelWriter(filename, engine='xlsxwriter')
info_years = "the years {} to {}".format(
min(self.years), max(self.years)) if len(self.years) > 1 else "{}".format(self.years[0])
info = [
"German Standardization Panel",
"Deutsches Normungspanel (DNP)",
"",
"This data set contains pseudonymized survey data from " + info_years + ".",
"It was created automatically on " + time.strftime("%Y-%m-%d") + ".",
"The data structure is described in the sheets 'variables' and 'scales'.",
"Observations:"
]
for n, dat in datasets.items():
c_text = "Sheet {}: Selected observations: {}. Total: {}.".format(
n,
len(dat[dat['selected'] == True].index),
len(dat.index)
)
if "-" in n:
c_text += " Panel observations: "
selected = dat[dat['selected'] == True]
try:
selected_grouped = selected.groupby('panel_entity_id').agg({'year': pd.Series.nunique})
yearly = selected_grouped['year'].value_counts().to_dict()
c_text += ". ".join(["{} year(s): {}".format(y, c) for y, c in yearly.items()])
except KeyError:
logger.error(selected.groupby('panel_entity_id'))
info.append(c_text)
# write questions and scales
questions_drop_cols = set(self.questions.columns) - {
'name', 'question', 'label', 'scale', 'format'
}
scales_drop_cols = set(self.scales.obj.columns) - {'scale', 'value'}
# sheet 'info'
pd.DataFrame({'info': info}).to_excel(writer, sheet_name='info')
# sheet 'variables'
self.questions.drop(questions_drop_cols, axis=1).to_excel(writer, sheet_name='variables')
# sheet 'scales'
self.scales.obj.drop(scales_drop_cols, axis=1).to_excel(writer, sheet_name='scales')
# sheets yyyy
for n, dat in datasets.items():
dat.to_excel(writer, sheet_name=n)
# stack all years and put them in one sheet
years_dat = [d for y, d in datasets.items() if y.isdigit()]
# sheet 'data'
| pd.concat(years_dat, sort=False) | pandas.concat |
"""Amazon Neptune Module."""
import logging
import re
from typing import Any
import pandas as pd
from gremlin_python.process.graph_traversal import GraphTraversalSource, __
from gremlin_python.process.translator import Translator
from gremlin_python.process.traversal import Cardinality, T
from gremlin_python.structure.graph import Graph
from awswrangler import exceptions
from awswrangler.neptune.client import NeptuneClient
_logger: logging.Logger = logging.getLogger(__name__)
def execute_gremlin(client: NeptuneClient, query: str) -> pd.DataFrame:
"""Return results of a Gremlin traversal as pandas dataframe.
Parameters
----------
client : neptune.Client
instance of the neptune client to use
traversal : str
The gremlin traversal to execute
Returns
-------
Union[pandas.DataFrame, Iterator[pandas.DataFrame]]
Results as Pandas DataFrame
Examples
--------
Run a Gremlin Query
>>> import awswrangler as wr
>>> client = wr.neptune.connect(neptune_endpoint, neptune_port, iam_enabled=False)
>>> df = wr.neptune.execute_gremlin(client, "g.V().limit(1)")
"""
results = client.read_gremlin(query)
df = pd.DataFrame.from_records(results)
return df
def execute_opencypher(client: NeptuneClient, query: str) -> pd.DataFrame:
"""Return results of a openCypher traversal as pandas dataframe.
Parameters
----------
client : NeptuneClient
instance of the neptune client to use
query : str
The openCypher query to execute
Returns
-------
Union[pandas.DataFrame, Iterator[pandas.DataFrame]]
Results as Pandas DataFrame
Examples
--------
Run an openCypher query
>>> import awswrangler as wr
>>> client = wr.neptune.connect(neptune_endpoint, neptune_port, iam_enabled=False)
>>> resp = wr.neptune.execute_opencypher(client, "MATCH (n) RETURN n LIMIT 1")
"""
resp = client.read_opencypher(query)
df = pd.DataFrame.from_dict(resp)
return df
def execute_sparql(client: NeptuneClient, query: str) -> pd.DataFrame:
"""Return results of a SPARQL query as pandas dataframe.
Parameters
----------
client : NeptuneClient
instance of the neptune client to use
query : str
The SPARQL traversal to execute
Returns
-------
Union[pandas.DataFrame, Iterator[pandas.DataFrame]]
Results as Pandas DataFrame
Examples
--------
Run a SPARQL query
>>> import awswrangler as wr
>>> client = wr.neptune.connect(neptune_endpoint, neptune_port, iam_enabled=False)
>>> df = wr.neptune.execute_sparql(client, "PREFIX foaf: <http://xmlns.com/foaf/0.1/>
SELECT ?name
WHERE {
?person foaf:name ?name .
"""
data = client.read_sparql(query)
df = None
if "results" in data and "bindings" in data["results"]:
df = pd.DataFrame(data["results"]["bindings"])
df.applymap(lambda x: x["value"])
else:
df = pd.DataFrame(data)
return df
def to_property_graph(
client: NeptuneClient, df: pd.DataFrame, batch_size: int = 50, use_header_cardinality: bool = True
) -> bool:
"""Write records stored in a DataFrame into Amazon Neptune.
If writing to a property graph then DataFrames for vertices and edges must be written separately.
DataFrames for vertices must have a ~label column with the label and a ~id column for the vertex id.
If the ~id column does not exist, the specified id does not exists, or is empty then a new vertex will be added.
If no ~label column exists an exception will be thrown.
DataFrames for edges must have a ~id, ~label, ~to, and ~from column. If the ~id column does not exist
the specified id does not exists, or is empty then a new edge will be added. If no ~label, ~to, or ~from column
exists an exception will be thrown.
If you would like to save data using `single` cardinality then you can postfix (single) to the column header and
set use_header_cardinality=True (default). e.g. A column named `name(single)` will save the `name` property
as single
cardinality. You can disable this by setting by setting `use_header_cardinality=False`.
Parameters
----------
client : NeptuneClient
instance of the neptune client to use
df : pandas.DataFrame
Pandas DataFrame https://pandas.pydata.org/pandas-docs/stable/reference/api/pandas.DataFrame.html
batch_size: int
The number of rows to save at a time. Default 50
use_header_cardinality: bool
If True, then the header cardinality will be used to save the data. Default True
Returns
-------
bool
True if records were written
Examples
--------
Writing to Amazon Neptune
>>> import awswrangler as wr
>>> client = wr.neptune.connect(neptune_endpoint, neptune_port, iam_enabled=False)
>>> wr.neptune.gremlin.to_property_graph(
... df=df
... )
"""
# check if ~id and ~label column exist and if not throw error
g = Graph().traversal()
is_edge_df = False
is_update_df = True
if "~id" in df.columns:
if "~label" in df.columns:
is_update_df = False
if "~to" in df.columns and "~from" in df.columns:
is_edge_df = True
else:
raise exceptions.InvalidArgumentValue(
"Dataframe must contain at least a ~id and a ~label column to be saved to Amazon Neptune"
)
# Loop through items in the DF
for (index, row) in df.iterrows():
# build up a query
if is_update_df:
g = _build_gremlin_update(g, row, use_header_cardinality)
elif is_edge_df:
g = _build_gremlin_insert_edges(g, row.to_dict(), use_header_cardinality)
else:
g = _build_gremlin_insert_vertices(g, row.to_dict(), use_header_cardinality)
# run the query
if index > 0 and index % batch_size == 0:
res = _run_gremlin_insert(client, g)
if res:
g = Graph().traversal()
return _run_gremlin_insert(client, g)
def to_rdf_graph(
client: NeptuneClient,
df: pd.DataFrame,
batch_size: int = 50,
subject_column: str = "s",
predicate_column: str = "p",
object_column: str = "o",
graph_column: str = "g",
) -> bool:
"""Write records stored in a DataFrame into Amazon Neptune.
The DataFrame must consist of triples with column names for the subject, predicate, and object specified.
If you want to add data into a named graph then you will also need the graph column.
Parameters
----------
client (NeptuneClient) :
instance of the neptune client to use
df (pandas.DataFrame) :
Pandas DataFrame https://pandas.pydata.org/pandas-docs/stable/reference/api/pandas.DataFrame.html
subject_column (str, optional) :
The column name in the dataframe for the subject. Defaults to 's'
predicate_column (str, optional) :
The column name in the dataframe for the predicate. Defaults to 'p'
object_column (str, optional) :
The column name in the dataframe for the object. Defaults to 'o'
graph_column (str, optional) :
The column name in the dataframe for the graph if sending across quads. Defaults to 'g'
Returns
-------
bool
True if records were written
Examples
--------
Writing to Amazon Neptune
>>> import awswrangler as wr
>>> client = wr.neptune.connect(neptune_endpoint, neptune_port, iam_enabled=False)
>>> wr.neptune.gremlin.to_rdf_graph(
... df=df
... )
"""
is_quads = False
if pd.Series([subject_column, object_column, predicate_column]).isin(df.columns).all():
if graph_column in df.columns:
is_quads = True
else:
raise exceptions.InvalidArgumentValue(
"""Dataframe must contain at least the subject, predicate, and object columns defined or the defaults
(s, p, o) to be saved to Amazon Neptune"""
)
query = ""
# Loop through items in the DF
for (index, row) in df.iterrows():
# build up a query
if is_quads:
insert = f"""INSERT DATA {{ GRAPH <{row[graph_column]}> {{<{row[subject_column]}>
<{str(row[predicate_column])}> <{row[object_column]}> . }} }}; """
query = query + insert
else:
insert = f"""INSERT DATA {{ <{row[subject_column]}> <{str(row[predicate_column])}>
<{row[object_column]}> . }}; """
query = query + insert
# run the query
if index > 0 and index % batch_size == 0:
res = client.write_sparql(query)
if res:
query = ""
return client.write_sparql(query)
def connect(host: str, port: int, iam_enabled: bool = False, **kwargs: Any) -> NeptuneClient:
"""Create a connection to a Neptune cluster.
Parameters
----------
host : str
The host endpoint to connect to
port : int
The port endpoint to connect to
iam_enabled : bool, optional
True if IAM is enabled on the cluster. Defaults to False.
Returns
-------
NeptuneClient
[description]
"""
return NeptuneClient(host, port, iam_enabled, **kwargs)
def _get_column_name(column: str) -> str:
if "(single)" in column.lower():
return re.compile(r"\(single\)", re.IGNORECASE).sub("", column)
return column
def _set_properties(g: GraphTraversalSource, use_header_cardinality: bool, row: Any) -> GraphTraversalSource:
for (column, value) in row.items():
if column not in ["~id", "~label", "~to", "~from"]:
# If the column header is specifying the cardinality then use it
if use_header_cardinality:
if column.lower().find("(single)") > 0 and pd.notna(value):
g = g.property(Cardinality.single, _get_column_name(column), value)
else:
g = _expand_properties(g, _get_column_name(column), value)
else:
# If not using header cardinality then use the default of set
g = _expand_properties(g, column, value)
return g
def _expand_properties(g: GraphTraversalSource, column: str, value: Any) -> GraphTraversalSource:
# If this is a list then expand it out into multiple property calls
if isinstance(value, list) and len(value) > 0:
for item in value:
g = g.property(Cardinality.set_, column, item)
elif pd.notna(value):
g = g.property(Cardinality.set_, column, value)
return g
def _build_gremlin_update(g: GraphTraversalSource, row: Any, use_header_cardinality: bool) -> GraphTraversalSource:
g = g.V(str(row["~id"]))
g = _set_properties(g, use_header_cardinality, row)
return g
def _build_gremlin_insert_vertices(
g: GraphTraversalSource, row: Any, use_header_cardinality: bool = False
) -> GraphTraversalSource:
g = g.V(str(row["~id"])).fold().coalesce(__.unfold(), __.addV(row["~label"]).property(T.id, str(row["~id"])))
g = _set_properties(g, use_header_cardinality, row)
return g
def _build_gremlin_insert_edges(
g: GraphTraversalSource, row: pd.Series, use_header_cardinality: bool
) -> GraphTraversalSource:
g = (
g.V(str(row["~from"]))
.fold()
.coalesce(__.unfold(), _build_gremlin_insert_vertices(__, {"~id": row["~from"], "~label": "Vertex"}))
.addE(row["~label"])
.property(T.id, str(row["~id"]))
.to(
__.V(str(row["~to"]))
.fold()
.coalesce(__.unfold(), _build_gremlin_insert_vertices(__, {"~id": row["~to"], "~label": "Vertex"}))
)
)
g = _set_properties(g, use_header_cardinality, row)
return g
def _run_gremlin_insert(client: NeptuneClient, g: GraphTraversalSource) -> bool:
translator = Translator("g")
s = translator.translate(g.bytecode)
s = s.replace("Cardinality.", "") # hack to fix parser error for set cardinality
_logger.debug(s)
res = client.write_gremlin(s)
return res
def flatten_nested_df(
df: pd.DataFrame, include_prefix: bool = True, seperator: str = "_", recursive: bool = True
) -> pd.DataFrame:
"""Flatten the lists and dictionaries of the input data frame.
Parameters
----------
df : pd.DataFrame
The input data frame
include_prefix : bool, optional
If True, then it will prefix the new column name with the original column name.
Defaults to True.
seperator : str, optional
The seperator to use between field names when a dictionary is exploded.
Defaults to "_".
recursive : bool, optional
If True, then this will recurse the fields in the data frame. Defaults to True.
Returns
-------
pd.DataFrame: The flattened data frame
"""
if seperator is None:
seperator = "_"
df = df.reset_index()
# search for list and map
s = (df.applymap(type) == list).all()
list_columns = s[s].index.tolist()
s = (df.applymap(type) == dict).all()
dict_columns = s[s].index.tolist()
if len(list_columns) > 0 or len(dict_columns) > 0:
new_columns = []
for col in dict_columns:
# expand dictionaries horizontally
expanded = None
if include_prefix:
expanded = pd.json_normalize(df[col], sep=seperator).add_prefix(f"{col}{seperator}")
else:
expanded = pd.json_normalize(df[col], sep=seperator).add_prefix(f"{seperator}")
expanded.index = df.index
df = | pd.concat([df, expanded], axis=1) | pandas.concat |
import io
import os
import re
import sys
import time
import pandas
import datetime
import requests
import mplfinance
from matplotlib import dates
# Basic Data
file_name = __file__[:-3]
absolute_path = os.path.dirname(os.path.abspath(__file__))
# <editor-fold desc='common'>
def load_json_config():
global file_directory
config_file = os.path.join(os.sep, absolute_path, 'Config.cfg')
with open(config_file, 'r') as file_handler:
config_data = file_handler.read()
regex = 'FILE_DIRECTORY=.*'
match = re.findall(regex, config_data)
file_directory = match[0].split('=')[1].strip()
# </editor-fold>
# <editor-fold desc='daily update'>
def save_dict_to_file(dic, txt):
f = open(txt, 'w', encoding='utf-8')
f.write(dic)
f.close()
def load_dict_from_file(txt):
f = open(txt, 'r', encoding='utf-8')
data = f.read()
f.close()
return eval(data)
def crawl_price(date=datetime.datetime.now()):
date_str = str(date).split(' ')[0].replace('-', '')
r = requests.post('http://www.twse.com.tw/exchangeReport/MI_INDEX?response=csv&date=' + date_str + '&type=ALL')
ret = pandas.read_csv(io.StringIO('\n'.join([i.translate({ord(c): None for c in ' '}) for i in r.text.split('\n') if
len(i.split(',')) == 17 and i[0] != '='])), header=0,
index_col='證券代號')
ret['成交金額'] = ret['成交金額'].str.replace(',', '')
ret['成交股數'] = ret['成交股數'].str.replace(',', '')
return ret
def original_crawl_price(date='2011-01-01 00:00:00'):
print('Begin: original_crawl_price!')
data = {}
success = False
dateFormatter = '%Y-%m-%d %H:%M:%S'
date = datetime.datetime.strptime(date, dateFormatter)
while not success:
print('parsing', date)
try:
data[date.date()] = crawl_price(date)
print('success!')
success = True
except pandas.errors.EmptyDataError:
# 假日爬不到
print('fail! check the date is holiday')
# 減一天
date += datetime.timedelta(days=1)
time.sleep(10)
writer = pandas.ExcelWriter(stock_file_path, engine='xlsxwriter')
stock_volume = pandas.DataFrame({k: d['成交股數'] for k, d in data.items()}).transpose()
stock_volume.index = pandas.to_datetime(stock_volume.index)
stock_volume.to_excel(writer, sheet_name='stock_volume', index=True)
stock_open = pandas.DataFrame({k: d['開盤價'] for k, d in data.items()}).transpose()
stock_open.index = pandas.to_datetime(stock_open.index)
stock_open.to_excel(writer, sheet_name='stock_open', index=True)
stock_close = pandas.DataFrame({k: d['收盤價'] for k, d in data.items()}).transpose()
stock_close.index = pandas.to_datetime(stock_close.index)
stock_close.to_excel(writer, sheet_name='stock_close', index=True)
stock_high = pandas.DataFrame({k: d['最高價'] for k, d in data.items()}).transpose()
stock_high.index = pandas.to_datetime(stock_high.index)
stock_high.to_excel(writer, sheet_name='stock_high', index=True)
stock_low = pandas.DataFrame({k: d['最低價'] for k, d in data.items()}).transpose()
stock_low.index = pandas.to_datetime(stock_low.index)
stock_low.to_excel(writer, sheet_name='stock_low', index=True)
writer.save()
print('End: original_crawl_price!')
def update_stock_info():
print('Begin: update_stock_info!')
data = {}
count = 1
fail_count = 0
allow_continuous_fail_count = 20
try:
pandas.read_excel(stock_file_path, sheet_name='stock_volume', index_col=0)
print(r'{} Exist.'.format(stock_file_path))
except FileNotFoundError:
print(r'{} Not Exist.'.format(stock_file_path))
original_crawl_price()
stock_volume_old = pandas.read_excel(stock_file_path, sheet_name='stock_volume', index_col=0)
stock_volume_old.index = pandas.to_datetime(stock_volume_old.index)
stock_open_old = pandas.read_excel(stock_file_path, sheet_name='stock_open', index_col=0)
stock_open_old.index = pandas.to_datetime(stock_open_old.index)
stock_close_old = pandas.read_excel(stock_file_path, sheet_name='stock_close', index_col=0)
stock_close_old.index = pandas.to_datetime(stock_close_old.index)
stock_high_old = pandas.read_excel(stock_file_path, sheet_name='stock_high', index_col=0)
stock_high_old.index = pandas.to_datetime(stock_high_old.index)
stock_low_old = pandas.read_excel(stock_file_path, sheet_name='stock_low', index_col=0)
stock_low_old.index = pandas.to_datetime(stock_low_old.index)
last_date = stock_volume_old.index[-1]
dateFormatter = '%Y-%m-%d %H:%M:%S'
date = datetime.datetime.strptime(str(last_date), dateFormatter)
date += datetime.timedelta(days=1)
if date > datetime.datetime.now():
print('Finish update_stock_info!')
sys.exit(0)
while date < datetime.datetime.now() and count <= 100:
print('parsing', date)
try:
data[date.date()] = crawl_price(date)
print('success {} times!'.format(count))
fail_count = 0
count += 1
except pandas.errors.EmptyDataError:
# 假日爬不到
print('fail! check the date is holiday')
fail_count += 1
if fail_count == allow_continuous_fail_count:
raise
date += datetime.timedelta(days=1)
time.sleep(10)
writer = pandas.ExcelWriter(stock_file_path, engine='xlsxwriter')
stock_volume_new = pandas.DataFrame({k: d['成交股數'] for k, d in data.items()}).transpose()
stock_volume_new.index = pandas.to_datetime(stock_volume_new.index)
stock_volume = pandas.concat([stock_volume_old, stock_volume_new], join='outer')
stock_volume.to_excel(writer, sheet_name='stock_volume', index=True)
stock_open_new = pandas.DataFrame({k: d['開盤價'] for k, d in data.items()}).transpose()
stock_open_new.index = pandas.to_datetime(stock_open_new.index)
stock_open = | pandas.concat([stock_open_old, stock_open_new], join='outer') | pandas.concat |
#!/usr/bin/env python
# Author: <NAME> (jsh) [<EMAIL>]
import itertools
import joblib
import logging
import os.path
import pathlib
import random
import shutil
import sys
import numpy as np
import pandas as pd
from pandas.api.types import CategoricalDtype
from sklearn import preprocessing as skpreproc
from keras.layers import Dense
from keras.models import Sequential
logging.basicConfig(level=logging.INFO,
format='%(asctime)s %(levelname)s %(message)s')
np.set_printoptions(precision=4, suppress=True)
_CODEDIR = pathlib.Path(__file__).parent
MODELDIR = _CODEDIR / 'model'
MODELFILE = MODELDIR / 'model.d5'.format(**locals())
XS_FILE = MODELDIR / 'xscaler.dump'
YS_FILE = MODELDIR / 'yscaler.dump'
_EPOCHS = 30
_BATCH_SIZE = 32
def _build_linear_model(num_features):
model = Sequential()
model.add(Dense(1, input_dim=num_features, activation='linear'))
model.compile(loss='mse', metrics=['mse'], optimizer='adam')
return model
def _expand_dummies(frame):
categories = dict()
bases = ['A', 'C', 'G', 'T']
idxs = [x for x in range(20)] # Magic number because guidelen is fixed.
pairs = [''.join(pair) for pair in itertools.product(bases, bases)]
categories['mm_idx'] = idxs
categories['mm_trans'] = pairs
widecols = list()
for column in frame.columns:
if column not in categories:
continue
frame[column] = frame[column].astype(CategoricalDtype(categories[column]))
return pd.get_dummies(frame)
def _get_linear_encoder():
def encoder(inrow):
vari = inrow.variant
orig = inrow.original
mm_idx = None
for i in range(len(vari)):
if vari[i] != orig[i]:
if orig != vari[:i] + orig[i] + vari[i+1:]:
template = 'too many mismatches in pair {vari} <- {orig}'
raise ValueError(template.format(**locals()))
mm_idx = i
if mm_idx == None:
template = 'no mismatch in pair {vari} <- {orig}'
raise ValueError(template.format(**locals()))
features = dict()
features['mm_idx'] = mm_idx
mm_trans = ''.join([orig[mm_idx], vari[mm_idx]])
features['mm_trans'] = mm_trans
features['gc_cont'] = orig.count('G') + orig.count('C')
row = pd.Series(features)
return row
return encoder
def train_and_save_mismatch_model(voframe, yframe):
if voframe.shape[0] != yframe.shape[0]:
logging.fatal('voframe and training values had different length')
sys.exit(2)
if 'variant' not in voframe.columns or 'original' not in voframe.columns:
logging.fatal('voframe missing variant and/or original')
sys.exit(2)
encoder = _get_linear_encoder()
encodings = voframe.apply(encoder, axis=1)
Xframe = encodings.set_index(voframe.variant)
Xframe = _expand_dummies(Xframe)
X = np.array(Xframe, dtype=float)
y = np.array(yframe.y, dtype=float).reshape(-1, 1)
shutil.rmtree(MODELDIR, ignore_errors=True)
while os.path.exists(MODELDIR):
continue
MODELDIR.mkdir(parents=True, exist_ok=True)
y_orig = y
X_scaler = skpreproc.StandardScaler()
X = X_scaler.fit_transform(X)
y_scaler = skpreproc.StandardScaler()
y = y_scaler.fit_transform(y)
model = _build_linear_model(X.shape[1])
# Feed training Data
model.fit(X, y, epochs=_EPOCHS, batch_size=_BATCH_SIZE)
joblib.dump(X_scaler, XS_FILE)
joblib.dump(y_scaler, YS_FILE)
joblib.dump(model, MODELFILE)
def _retrieve_mismatch_model():
try:
return (joblib.load(MODELFILE), joblib.load(XS_FILE), joblib.load(YS_FILE))
except FileNotFoundError:
logging.fatal('Tried to make predictions without a model in place')
sys.exit(2)
def predict_mismatch_scores(reference):
if 'variant' not in reference.columns or 'original' not in reference.columns:
logging.fatal('reference missing variant and/or original')
sys.exit(2)
refsize = len(reference)
logging.info('Applying model to {refsize} guides...'.format(**locals()))
model, xscaler, yscaler = _retrieve_mismatch_model()
encoder = _get_linear_encoder()
voframe = reference[['variant', 'original']]
voframe = voframe.drop_duplicates()
matchmask = (voframe.variant == voframe.original)
parents = | pd.DataFrame(voframe.loc[matchmask]) | pandas.DataFrame |
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Wed Sep 12 17:13:29 2018
@author: pamelaanderson
"""
from difflib import SequenceMatcher
import json
import numpy as np
import os
import operator
import pandas as pd
def load_adverse_events(path, year, q):
""" Loading adverse drug events while performing basic pre-processing"""
path_w_year = path + year + '/' + q + '/'
json_files = os.listdir(path_w_year)
df_adverse_ev = | pd.DataFrame() | pandas.DataFrame |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
#
# QTPyLib: Quantitative Trading Python Library
# https://github.com/ranaroussi/qtpylib
#
# Copyright 2016-2018 <NAME>
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import argparse
import atexit
import json
import logging
import os
import pickle
import sys
import tempfile
import time
import glob
import subprocess
from datetime import datetime
from abc import ABCMeta
import zmq
import pandas as pd
from dateutil.parser import parse as parse_date
import pymysql
from pymysql.constants.CLIENT import MULTI_STATEMENTS
from numpy import (
isnan as np_isnan,
nan as np_nan,
int64 as np_int64
)
from ezibpy import (
ezIBpy, dataTypes as ibDataTypes
)
from qtpylib import (
tools, asynctools, path, futures, __version__
)
# =============================================
# check min, python version
if sys.version_info < (3, 4):
raise SystemError("QTPyLib requires Python version >= 3.4")
# =============================================
# Configure logging
tools.createLogger(__name__, logging.INFO)
# Disable ezIBpy logging (Blotter handles errors itself)
logging.getLogger('ezibpy').setLevel(logging.CRITICAL)
# =============================================
# set up threading pool
__threads__ = tools.read_single_argv("--threads")
__threads__ = __threads__ if tools.is_number(__threads__) else None
asynctools.multitasking.createPool(__name__, __threads__)
# =============================================
cash_ticks = {}
class Blotter():
"""Broker class initilizer
:Optional:
name : string
name of the blotter (used by other modules)
symbols : str
IB contracts CSV database (default: ./symbols.csv)
ibport : int
TWS/GW Port to use (default: 4001)
ibclient : int
TWS/GW Client ID (default: 999)
ibserver : str
IB TWS/GW Server hostname (default: localhost)
zmqport : str
ZeroMQ Port to use (default: 12345)
zmqtopic : str
ZeroMQ string to use (default: _qtpylib_BLOTTERNAME_)
orderbook : str
Get Order Book (Market Depth) data (default: False)
dbhost : str
MySQL server hostname (default: localhost)
dbport : str
MySQL server port (default: 3306)
dbname : str
MySQL server database (default: qpy)
dbuser : str
MySQL server username (default: root)
dbpass : str
MySQL server password (default: none)
dbskip : str
Skip MySQL logging (default: False)
"""
__metaclass__ = ABCMeta
def __init__(self, name=None, symbols="symbols.csv",
ibport=4001, ibclient=999, ibserver="localhost",
dbhost="localhost", dbport="3306", dbname="qtpy",
dbuser="root", dbpass="", dbskip=False, orderbook=False,
zmqport="12345", zmqtopic=None, **kwargs):
# whats my name?
self.name = str(self.__class__).split('.')[-1].split("'")[0].lower()
if name is not None:
self.name = name
# initilize class logger
self.log_blotter = logging.getLogger(__name__)
# do not act on first tick (timezone is incorrect)
self.first_tick = True
self._bars = pd.DataFrame(
columns=['open', 'high', 'low', 'close', 'volume'])
self._bars.index.names = ['datetime']
self._bars.index = pd.to_datetime(self._bars.index, utc=True)
# self._bars.index = self._bars.index.tz_convert(settings['timezone'])
self._bars = {"~": self._bars}
self._raw_bars = pd.DataFrame(columns=['last', 'volume'])
self._raw_bars.index.names = ['datetime']
self._raw_bars.index = pd.to_datetime(self._raw_bars.index, utc=True)
self._raw_bars = {"~": self._raw_bars}
# global objects
self.dbcurr = None
self.dbconn = None
self.context = None
self.socket = None
self.ibConn = None
self.symbol_ids = {} # cache
self.cash_ticks = cash_ticks # outside cache
self.rtvolume = set() # has RTVOLUME?
# -------------------------------
# work default values
# -------------------------------
if zmqtopic is None:
zmqtopic = "_qtpylib_" + str(self.name.lower()) + "_"
# if no path given for symbols' csv, use same dir
if symbols == "symbols.csv":
symbols = path['caller'] + '/' + symbols
# -------------------------------
# override args with any (non-default) command-line args
self.args = {arg: val for arg, val in locals().items(
) if arg not in ('__class__', 'self', 'kwargs')}
self.args.update(kwargs)
self.args.update(self.load_cli_args())
# read cached args to detect duplicate blotters
self.duplicate_run = False
self.cahced_args = {}
self.args_cache_file = "%s/%s.qtpylib" % (
tempfile.gettempdir(), self.name)
if os.path.exists(self.args_cache_file):
self.cahced_args = self._read_cached_args()
# don't display connection errors on ctrl+c
self.quitting = False
# do stuff on exit
atexit.register(self._on_exit)
# track historical data download status
self.backfilled = False
self.backfilled_symbols = []
self.backfill_resolution = "1 min"
# be aware of thread count
self.threads = asynctools.multitasking.getPool(__name__)['threads']
# -------------------------------------------
def _on_exit(self, terminate=True):
if "as_client" in self.args:
return
self.log_blotter.info("Blotter stopped...")
if self.ibConn is not None:
self.log_blotter.info("Cancel market data...")
self.ibConn.cancelMarketData()
self.log_blotter.info("Disconnecting...")
self.ibConn.disconnect()
if not self.duplicate_run:
self.log_blotter.info("Deleting runtime args...")
self._remove_cached_args()
if not self.args['dbskip']:
self.log_blotter.info("Disconnecting from MySQL...")
try:
self.dbcurr.close()
self.dbconn.close()
except Exception as e:
pass
if terminate:
os._exit(0)
# -------------------------------------------
@staticmethod
def _detect_running_blotter(name):
return name
# -------------------------------------------
@staticmethod
def _blotter_file_running():
try:
# not sure how this works on windows...
command = 'pgrep -f ' + sys.argv[0]
process = subprocess.Popen(
command, shell=True, stdout=subprocess.PIPE)
stdout_list = process.communicate()[0].decode('utf-8').split("\n")
stdout_list = list(filter(None, stdout_list))
return len(stdout_list) > 0
except Exception as e:
return False
# -------------------------------------------
def _check_unique_blotter(self):
if os.path.exists(self.args_cache_file):
# temp file found - check if really running
# or if this file wasn't deleted due to crash
if not self._blotter_file_running():
# print("REMOVING OLD TEMP")
self._remove_cached_args()
else:
self.duplicate_run = True
self.log_blotter.error("Blotter is already running...")
sys.exit(1)
self._write_cached_args()
# -------------------------------------------
def _remove_cached_args(self):
if os.path.exists(self.args_cache_file):
os.remove(self.args_cache_file)
def _read_cached_args(self):
if os.path.exists(self.args_cache_file):
return pickle.load(open(self.args_cache_file, "rb"))
return {}
def _write_cached_args(self):
pickle.dump(self.args, open(self.args_cache_file, "wb"))
tools.chmod(self.args_cache_file)
# -------------------------------------------
def load_cli_args(self):
parser = argparse.ArgumentParser(
description='QTPyLib Blotter',
formatter_class=argparse.ArgumentDefaultsHelpFormatter)
parser.add_argument('--symbols', default=self.args['symbols'],
help='IB contracts CSV database', required=False)
parser.add_argument('--ibport', default=self.args['ibport'],
help='TWS/GW Port to use', required=False)
parser.add_argument('--ibclient', default=self.args['ibclient'],
help='TWS/GW Client ID', required=False)
parser.add_argument('--ibserver', default=self.args['ibserver'],
help='IB TWS/GW Server hostname', required=False)
parser.add_argument('--zmqport', default=self.args['zmqport'],
help='ZeroMQ Port to use', required=False)
parser.add_argument('--orderbook', action='store_true',
help='Get Order Book (Market Depth) data',
required=False)
parser.add_argument('--dbhost', default=self.args['dbhost'],
help='MySQL server hostname', required=False)
parser.add_argument('--dbport', default=self.args['dbport'],
help='MySQL server port', required=False)
parser.add_argument('--dbname', default=self.args['dbname'],
help='MySQL server database', required=False)
parser.add_argument('--dbuser', default=self.args['dbuser'],
help='MySQL server username', required=False)
parser.add_argument('--dbpass', default=self.args['dbpass'],
help='MySQL server password', required=False)
parser.add_argument('--dbskip', default=self.args['dbskip'],
required=False, help='Skip MySQL logging (flag)',
action='store_true')
# only return non-default cmd line args
# (meaning only those actually given)
cmd_args, _ = parser.parse_known_args()
args = {arg: val for arg, val in vars(
cmd_args).items() if val != parser.get_default(arg)}
return args
# -------------------------------------------
def ibCallback(self, caller, msg, **kwargs):
if caller == "handleConnectionClosed":
self.log_blotter.info("Lost conncetion to Interactive Brokers...")
self._on_exit(terminate=False)
self.run()
elif caller == "handleHistoricalData":
self.on_ohlc_received(msg, kwargs)
elif caller == "handleTickString":
self.on_tick_string_received(msg.tickerId, kwargs)
elif caller == "handleTickPrice" or caller == "handleTickSize":
self.on_quote_received(msg.tickerId)
elif caller in "handleTickOptionComputation":
self.on_option_computation_received(msg.tickerId)
elif caller == "handleMarketDepth":
self.on_orderbook_received(msg.tickerId)
elif caller == "handleError":
# don't display connection errors on ctrl+c
if self.quitting and \
msg.errorCode in ibDataTypes["DISCONNECT_ERROR_CODES"]:
return
# errorCode can be None...
if 1100 <= msg.errorCode < 2200 or msg.errorCode == 0:
self.log_blotter.warning(
'[IB #%d] %s', msg.errorCode, msg.errorMsg)
elif msg.errorCode not in (502, 504): # connection error
self.log_blotter.error(
'[IB #%d] %s', msg.errorCode, msg.errorMsg)
# -------------------------------------------
def on_ohlc_received(self, msg, kwargs):
symbol = self.ibConn.tickerSymbol(msg.reqId)
if kwargs["completed"]:
self.backfilled_symbols.append(symbol)
tickers = set(
{v: k for k, v in self.ibConn.tickerIds.items() if v.upper() != "SYMBOL"}.keys())
if tickers == set(self.backfilled_symbols):
self.backfilled = True
print(".")
try:
self.ibConn.cancelHistoricalData(
self.ibConn.contracts[msg.reqId])
except Exception as e:
pass
else:
data = {
"symbol": symbol,
"symbol_group": tools.gen_symbol_group(symbol),
"asset_class": tools.gen_asset_class(symbol),
"timestamp": tools.datetime_to_timezone(
datetime.fromtimestamp(int(msg.date)), tz="UTC"
).strftime("%Y-%m-%d %H:%M:%S"),
}
# incmoing second data
if "sec" in self.backfill_resolution:
data["last"] = tools.to_decimal(msg.close)
data["lastsize"] = int(msg.volume) # msg.count?
data["bid"] = 0
data["bidsize"] = 0
data["ask"] = 0
data["asksize"] = 0
data["kind"] = "TICK"
else:
data["open"] = tools.to_decimal(msg.open)
data["high"] = tools.to_decimal(msg.high)
data["low"] = tools.to_decimal(msg.low)
data["close"] = tools.to_decimal(msg.close)
data["volume"] = int(msg.volume)
data["kind"] = "BAR"
# print(data)
# store in db
self.log2db(data, data["kind"])
# -------------------------------------------
@asynctools.multitasking.task
def on_tick_string_received(self, tickerId, kwargs):
# kwargs is empty
if not kwargs:
return
data = None
symbol = self.ibConn.tickerSymbol(tickerId)
# for instruments that receive RTVOLUME events
if "tick" in kwargs:
self.rtvolume.add(symbol)
data = {
# available data from ib
"symbol": symbol,
"symbol_group": tools.gen_symbol_group(symbol), # ES_F, ...
"asset_class": tools.gen_asset_class(symbol),
"timestamp": kwargs['tick']['time'],
"last": tools.to_decimal(kwargs['tick']['last']),
"lastsize": int(kwargs['tick']['size']),
"bid": tools.to_decimal(kwargs['tick']['bid']),
"ask": tools.to_decimal(kwargs['tick']['ask']),
"bidsize": int(kwargs['tick']['bidsize']),
"asksize": int(kwargs['tick']['asksize']),
# "wap": kwargs['tick']['wap'],
}
# for instruments that DOESN'T receive RTVOLUME events (exclude options)
elif symbol not in self.rtvolume and \
self.ibConn.contracts[tickerId].m_secType not in ("OPT", "FOP"):
tick = self.ibConn.marketData[tickerId]
if not tick.empty and tick['last'].values[-1] > 0 < tick['lastsize'].values[-1]:
data = {
# available data from ib
"symbol": symbol,
# ES_F, ...
"symbol_group": tools.gen_symbol_group(symbol),
"asset_class": tools.gen_asset_class(symbol),
"timestamp": tick.index.values[-1],
"last": tools.to_decimal(tick['last'].values[-1]),
"lastsize": int(tick['lastsize'].values[-1]),
"bid": tools.to_decimal(tick['bid'].values[-1]),
"ask": tools.to_decimal(tick['ask'].values[-1]),
"bidsize": int(tick['bidsize'].values[-1]),
"asksize": int(tick['asksize'].values[-1]),
# "wap": kwargs['tick']['wap'],
}
# proceed if data exists
if data is not None:
# cache last tick
if symbol in self.cash_ticks.keys():
if data == self.cash_ticks[symbol]:
return
self.cash_ticks[symbol] = data
# add options fields
data = tools.force_options_columns(data)
# print('.', end="", flush=True)
self.on_tick_received(data)
# -------------------------------------------
@asynctools.multitasking.task
def on_quote_received(self, tickerId):
try:
symbol = self.ibConn.tickerSymbol(tickerId)
if self.ibConn.contracts[tickerId].m_secType in ("OPT", "FOP"):
quote = self.ibConn.optionsData[tickerId].to_dict(orient='records')[
0]
quote['type'] = self.ibConn.contracts[tickerId].m_right
quote['strike'] = tools.to_decimal(
self.ibConn.contracts[tickerId].m_strike)
quote["symbol_group"] = self.ibConn.contracts[tickerId].m_symbol + \
'_' + self.ibConn.contracts[tickerId].m_secType
quote = tools.mark_options_values(quote)
else:
quote = self.ibConn.marketData[tickerId].to_dict(orient='records')[
0]
quote["symbol_group"] = tools.gen_symbol_group(symbol)
quote["symbol"] = symbol
quote["asset_class"] = tools.gen_asset_class(symbol)
quote['bid'] = tools.to_decimal(quote['bid'])
quote['ask'] = tools.to_decimal(quote['ask'])
quote['last'] = tools.to_decimal(quote['last'])
quote["kind"] = "QUOTE"
# cash markets do not get RTVOLUME (handleTickString)
if quote["asset_class"] == "CSH":
quote['last'] = round(
float((quote['bid'] + quote['ask']) / 2), 5)
quote['timestamp'] = datetime.utcnow(
).strftime("%Y-%m-%d %H:%M:%S.%f")
# create synthetic tick
if symbol in self.cash_ticks.keys() and quote['last'] != self.cash_ticks[symbol]:
self.on_tick_received(quote)
else:
self.broadcast(quote, "QUOTE")
self.cash_ticks[symbol] = quote['last']
else:
self.broadcast(quote, "QUOTE")
except Exception as e:
pass
# -------------------------------------------
@asynctools.multitasking.task
def on_option_computation_received(self, tickerId):
# try:
symbol = self.ibConn.tickerSymbol(tickerId)
tick = self.ibConn.optionsData[tickerId].to_dict(orient='records')[0]
# must have values!
for key in ('bid', 'ask', 'last', 'bidsize', 'asksize', 'lastsize',
'volume', 'delta', 'gamma', 'vega', 'theta'):
if tick[key] == 0:
return
tick['type'] = self.ibConn.contracts[tickerId].m_right
tick['strike'] = tools.to_decimal(
self.ibConn.contracts[tickerId].m_strike)
tick["symbol_group"] = self.ibConn.contracts[tickerId].m_symbol + \
'_' + self.ibConn.contracts[tickerId].m_secType
tick['volume'] = int(tick['volume'])
tick['bid'] = tools.to_decimal(tick['bid'])
tick['bidsize'] = int(tick['bidsize'])
tick['ask'] = tools.to_decimal(tick['ask'])
tick['asksize'] = int(tick['asksize'])
tick['last'] = tools.to_decimal(tick['last'])
tick['lastsize'] = int(tick['lastsize'])
tick['price'] = tools.to_decimal(tick['price'], 2)
tick['underlying'] = tools.to_decimal(tick['underlying'])
tick['dividend'] = tools.to_decimal(tick['dividend'])
tick['volume'] = int(tick['volume'])
tick['iv'] = tools.to_decimal(tick['iv'])
tick['oi'] = int(tick['oi'])
tick['delta'] = tools.to_decimal(tick['delta'])
tick['gamma'] = tools.to_decimal(tick['gamma'])
tick['vega'] = tools.to_decimal(tick['vega'])
tick['theta'] = tools.to_decimal(tick['theta'])
tick["symbol"] = symbol
tick["symbol_group"] = tools.gen_symbol_group(symbol)
tick["asset_class"] = tools.gen_asset_class(symbol)
tick = tools.mark_options_values(tick)
# is this a really new tick?
prev_last = 0
prev_lastsize = 0
if symbol in self.cash_ticks.keys():
prev_last = self.cash_ticks[symbol]['last']
prev_lastsize = self.cash_ticks[symbol]['lastsize']
if tick == self.cash_ticks[symbol]:
return
self.cash_ticks[symbol] = dict(tick)
# assign timestamp
tick['timestamp'] = self.ibConn.optionsData[tickerId].index[0]
if tick['timestamp'] == 0:
tick['timestamp'] = datetime.utcnow().strftime(
ibDataTypes['DATE_TIME_FORMAT_LONG_MILLISECS'])
# treat as tick if last/volume changed
if tick['last'] != prev_last or tick['lastsize'] != prev_lastsize:
tick["kind"] = "TICK"
self.on_tick_received(tick)
# otherwise treat as quote
else:
tick["kind"] = "QUOTE"
self.broadcast(tick, "QUOTE")
# except Exception as e:
# pass
# -------------------------------------------
@asynctools.multitasking.task
def on_orderbook_received(self, tickerId):
orderbook = self.ibConn.marketDepthData[tickerId].dropna(
subset=['bid', 'ask']).fillna(0).to_dict(orient='list')
# add symbol data to list
symbol = self.ibConn.tickerSymbol(tickerId)
orderbook['symbol'] = symbol
orderbook["symbol_group"] = tools.gen_symbol_group(symbol)
orderbook["asset_class"] = tools.gen_asset_class(symbol)
orderbook["kind"] = "ORDERBOOK"
# broadcast
self.broadcast(orderbook, "ORDERBOOK")
# -------------------------------------------
@asynctools.multitasking.task
def on_tick_received(self, tick):
# data
symbol = tick['symbol']
timestamp = datetime.strptime(tick['timestamp'],
ibDataTypes["DATE_TIME_FORMAT_LONG_MILLISECS"])
# do not act on first tick (timezone is incorrect)
if self.first_tick:
self.first_tick = False
return
try:
timestamp = parse_date(timestamp)
except Exception as e:
pass
# placeholders
if symbol not in self._raw_bars:
self._raw_bars[symbol] = self._raw_bars['~']
if symbol not in self._bars:
self._bars[symbol] = self._bars['~']
# send tick to message self.broadcast
tick["kind"] = "TICK"
self.broadcast(tick, "TICK")
self.log2db(tick, "TICK")
# add tick to raw self._bars
tick_data = pd.DataFrame(index=['timestamp'],
data={'timestamp': timestamp,
'last': tick['last'],
'volume': tick['lastsize']})
tick_data.set_index(['timestamp'], inplace=True)
_raw_bars = self._raw_bars[symbol].copy()
_raw_bars = _raw_bars.append(tick_data)
# add tools.resampled raw to self._bars
ohlc = _raw_bars['last'].resample('1T').ohlc()
vol = _raw_bars['volume'].resample('1T').sum()
vol = _raw_bars['volume'].resample('1T').sum()
opened_bar = ohlc
opened_bar['volume'] = vol
# add bar to self._bars object
previous_bar_count = len(self._bars[symbol])
self._bars[symbol] = self._bars[symbol].append(opened_bar)
self._bars[symbol] = self._bars[symbol].groupby(
self._bars[symbol].index).last()
if len(self._bars[symbol].index) > previous_bar_count:
bar = self._bars[symbol].to_dict(orient='records')[0]
bar["symbol"] = symbol
bar["symbol_group"] = tick['symbol_group']
bar["asset_class"] = tick['asset_class']
bar["timestamp"] = self._bars[symbol].index[0].strftime(
ibDataTypes["DATE_TIME_FORMAT_LONG"])
bar["kind"] = "BAR"
self.broadcast(bar, "BAR")
self.log2db(bar, "BAR")
self._bars[symbol] = self._bars[symbol][-1:]
_raw_bars.drop(_raw_bars.index[:], inplace=True)
self._raw_bars[symbol] = _raw_bars
# -------------------------------------------
def broadcast(self, data, kind):
def int64_handler(o):
if isinstance(o, np_int64):
try:
return pd.to_datetime(o, unit='ms').strftime(
ibDataTypes["DATE_TIME_FORMAT_LONG"])
except Exception as e:
return int(o)
raise TypeError
string2send = "%s %s" % (
self.args["zmqtopic"], json.dumps(data, default=int64_handler))
# print(kind, string2send)
try:
self.socket.send_string(string2send)
except Exception as e:
pass
# -------------------------------------------
def log2db(self, data, kind):
if self.args['dbskip'] or len(data["symbol"].split("_")) > 2:
return
# connect to mysql per call (thread safe)
if self.threads > 0:
dbconn = self.get_mysql_connection()
dbcurr = dbconn.cursor()
else:
dbconn = self.dbconn
dbcurr = self.dbcurr
# set symbol details
symbol_id = 0
symbol = data["symbol"].replace("_" + data["asset_class"], "")
if symbol in self.symbol_ids.keys():
symbol_id = self.symbol_ids[symbol]
else:
symbol_id = get_symbol_id(
data["symbol"], dbconn, dbcurr, self.ibConn)
self.symbol_ids[symbol] = symbol_id
# insert to db
if kind == "TICK":
try:
mysql_insert_tick(data, symbol_id, dbcurr)
except Exception as e:
pass
elif kind == "BAR":
try:
mysql_insert_bar(data, symbol_id, dbcurr)
except Exception as e:
pass
# commit
try:
dbconn.commit()
except Exception as e:
pass
# disconect from mysql
if self.threads > 0:
dbcurr.close()
dbconn.close()
# -------------------------------------------
def run(self):
"""Starts the blotter
Connects to the TWS/GW, processes and logs market data,
and broadcast it over TCP via ZeroMQ (which algo subscribe to)
"""
self._check_unique_blotter()
# connect to mysql
self.mysql_connect()
self.context = zmq.Context(zmq.REP)
self.socket = self.context.socket(zmq.PUB)
self.socket.bind("tcp://*:" + str(self.args['zmqport']))
db_modified = 0
contracts = []
prev_contracts = []
first_run = True
self.log_blotter.info("Connecting to Interactive Brokers...")
self.ibConn = ezIBpy()
self.ibConn.ibCallback = self.ibCallback
while not self.ibConn.connected:
self.ibConn.connect(clientId=int(self.args['ibclient']),
port=int(self.args['ibport']), host=str(self.args['ibserver']))
time.sleep(1)
if not self.ibConn.connected:
print('*', end="", flush=True)
self.log_blotter.info("Connection established...")
try:
while True:
if not os.path.exists(self.args['symbols']):
pd.DataFrame(columns=['symbol', 'sec_type', 'exchange',
'currency', 'expiry', 'strike', 'opt_type']
).to_csv(self.args['symbols'], header=True, index=False)
tools.chmod(self.args['symbols'])
else:
time.sleep(0.1)
# read db properties
db_data = os.stat(self.args['symbols'])
db_size = db_data.st_size
db_last_modified = db_data.st_mtime
# empty file
if db_size == 0:
if prev_contracts:
self.log_blotter.info('Cancel market data...')
self.ibConn.cancelMarketData()
time.sleep(0.1)
prev_contracts = []
continue
# modified?
if not first_run and db_last_modified == db_modified:
continue
# continue...
db_modified = db_last_modified
# read contructs db
df = pd.read_csv(self.args['symbols'], header=0)
if df.empty:
continue
# removed expired
df = df[(
(df['expiry'] < 1000000) & (
df['expiry'] >= int(datetime.now().strftime('%Y%m')))) | (
(df['expiry'] >= 1000000) & (
df['expiry'] >= int(datetime.now().strftime('%Y%m%d')))) |
np_isnan(df['expiry'])
]
# fix expiry formatting (no floats)
df['expiry'] = df['expiry'].fillna(
0).astype(int).astype(str)
df.loc[df['expiry'] == "0", 'expiry'] = ""
df = df[df['sec_type'] != 'BAG']
df.fillna("", inplace=True)
df.to_csv(self.args['symbols'], header=True, index=False)
tools.chmod(self.args['symbols'])
# ignore commentee
df = df[~df['symbol'].str.contains("#")]
contracts = [tuple(x) for x in df.values]
if first_run:
first_run = False
else:
if contracts != prev_contracts:
# cancel market data for removed contracts
for contract in prev_contracts:
if contract not in contracts:
self.ibConn.cancelMarketData(
self.ibConn.createContract(contract))
if self.args['orderbook']:
self.ibConn.cancelMarketDepth(
self.ibConn.createContract(contract))
time.sleep(0.1)
contract_string = self.ibConn.contractString(
contract).split('_')[0]
self.log_blotter.info(
'Contract Removed [%s]', contract_string)
# request market data
for contract in contracts:
if contract not in prev_contracts:
self.ibConn.requestMarketData(
self.ibConn.createContract(contract))
if self.args['orderbook']:
self.ibConn.requestMarketDepth(
self.ibConn.createContract(contract))
time.sleep(0.1)
contract_string = self.ibConn.contractString(
contract).split('_')[0]
self.log_blotter.info(
'Contract Added [%s]', contract_string)
# update latest contracts
prev_contracts = contracts
time.sleep(2)
except (KeyboardInterrupt, SystemExit):
self.quitting = True # don't display connection errors on ctrl+c
print(
"\n\n>>> Interrupted with Ctrl-c...\n(waiting for running tasks to be completed)\n")
# asynctools.multitasking.killall() # stop now
asynctools.multitasking.wait_for_tasks() # wait for threads to complete
sys.exit(1)
# -------------------------------------------
# CLIENT / STATIC
# -------------------------------------------
def _fix_history_sequence(self, df, table):
""" fix out-of-sequence ticks/bars """
# remove "Unnamed: x" columns
cols = df.columns[df.columns.str.startswith('Unnamed:')].tolist()
df.drop(cols, axis=1, inplace=True)
# remove future dates
df['datetime'] = | pd.to_datetime(df['datetime'], utc=True) | pandas.to_datetime |
import subprocess, gzip, datetime, pickle, glob, os, openpyxl, shutil, math
import pandas as pd
from plotly.subplots import make_subplots
from pathlib import Path
from joblib import Parallel, delayed
import plotly.graph_objects as go
import plotly.express as px
from statistics import mean
from statistics import median
from statistics import stdev
import numpy as np
import PySimpleGUI as sg
############################################################################
## general function
def calculate_read_stats(lst):
minimum = min(lst)
maximum = max(lst)
average = round(mean(lst),2)
med = round(median(lst),2)
deviation = round(stdev(lst),2)
return([minimum, maximum, average, med, deviation])
## Plot a heatmap of the ESV or OTU table
def plot_heatmap(file, unit, project):
""" Function to plot a heatmap of the OTU or ESV table """
## load OTU table and extract read numbers
df = pd.read_excel(file)
ID_list = df['ID'].values.tolist()
df = df.drop(columns=['ID', 'Seq'])
Sample_list = df.columns.tolist()
z = []
for i in df.values.tolist():
sublist=[]
for num in i:
if num == 0:
sublist.append(0)
else:
sublist.append(math.log(num))
z.append(sublist)
## create heatmap
fig = px.imshow(z, y=ID_list, x=Sample_list, aspect="auto")
## calculate optimal height and trim maximum height and adjust layout
h = len(ID_list)*15
if h >= 3000:
h = 3000
fig.update_layout(template='simple_white', width=1500, height=h, title='log('+unit+')', coloraxis_showscale=False)
fig.update_yaxes(showticklabels=False, title=unit+'s')
else:
fig.update_layout(template='simple_white', width=1500, height=h, title='log('+unit+')', coloraxis_showscale=False)
fig.update_yaxes(tickmode='linear')
fig.update_xaxes(tickmode='linear')
## write image
out_pdf = Path(project).joinpath('0_statistics', 'Summary_statistics', unit + '_heatmap.pdf')
fig.write_image(str(out_pdf))
out_html = Path(project).joinpath('0_statistics', 'Summary_statistics', unit + '_heatmap.html')
fig.write_html(str(out_html))
def plot_reads(Project_report, project, ESV_table_lulu, OTU_table_lulu):
df_3 = pd.read_excel(Project_report, sheet_name='3_PE merging')
df_4 = pd.read_excel(Project_report, sheet_name='4_primer_trimming')
df_5 = pd.read_excel(Project_report, sheet_name='5_quality_filtering')
df_7 = pd.read_excel(Project_report, sheet_name='7_otu_clustering')
df_9_OTUs = pd.read_excel(OTU_table_lulu)
df_9_ESVs = pd.read_excel(ESV_table_lulu)
samples = df_7['File'].values.tolist()
raw_reads = df_3['processed reads'].values.tolist()
merged_reads = df_3['merged reads'].values.tolist()
trimmed_reads = df_4['trimmed reads'].values.tolist()
filtered_reads = df_5['passed reads'].values.tolist()
mapped_reads_OTUs = [sum(df_9_OTUs[sample].values.tolist()) for sample in samples]
mapped_reads_ESVs = [sum(df_9_ESVs[sample].values.tolist()) for sample in samples]
stats_raw_reads = calculate_read_stats(raw_reads)
stats_merged_reads = calculate_read_stats(merged_reads)
stats_trimmed_reads = calculate_read_stats(trimmed_reads)
stats_filtered_reads = calculate_read_stats(filtered_reads)
stats_mapped_reads_OTUs = calculate_read_stats(mapped_reads_OTUs)
stats_mapped_reads_ESVs = calculate_read_stats(mapped_reads_ESVs)
## dataframe
df_stats = | pd.DataFrame() | pandas.DataFrame |
""" test fancy indexing & misc """
from datetime import datetime
import re
import weakref
import numpy as np
import pytest
import pandas.util._test_decorators as td
from pandas.core.dtypes.common import (
is_float_dtype,
is_integer_dtype,
)
import pandas as pd
from pandas import (
DataFrame,
Index,
NaT,
Series,
date_range,
offsets,
timedelta_range,
)
import pandas._testing as tm
from pandas.core.api import Float64Index
from pandas.tests.indexing.common import _mklbl
from pandas.tests.indexing.test_floats import gen_obj
# ------------------------------------------------------------------------
# Indexing test cases
class TestFancy:
"""pure get/set item & fancy indexing"""
def test_setitem_ndarray_1d(self):
# GH5508
# len of indexer vs length of the 1d ndarray
df = DataFrame(index=Index(np.arange(1, 11)))
df["foo"] = np.zeros(10, dtype=np.float64)
df["bar"] = np.zeros(10, dtype=complex)
# invalid
msg = "Must have equal len keys and value when setting with an iterable"
with pytest.raises(ValueError, match=msg):
df.loc[df.index[2:5], "bar"] = np.array([2.33j, 1.23 + 0.1j, 2.2, 1.0])
# valid
df.loc[df.index[2:6], "bar"] = np.array([2.33j, 1.23 + 0.1j, 2.2, 1.0])
result = df.loc[df.index[2:6], "bar"]
expected = Series(
[2.33j, 1.23 + 0.1j, 2.2, 1.0], index=[3, 4, 5, 6], name="bar"
)
tm.assert_series_equal(result, expected)
def test_setitem_ndarray_1d_2(self):
# GH5508
# dtype getting changed?
df = DataFrame(index=Index(np.arange(1, 11)))
df["foo"] = np.zeros(10, dtype=np.float64)
df["bar"] = np.zeros(10, dtype=complex)
msg = "Must have equal len keys and value when setting with an iterable"
with pytest.raises(ValueError, match=msg):
df[2:5] = np.arange(1, 4) * 1j
def test_getitem_ndarray_3d(
self, index, frame_or_series, indexer_sli, using_array_manager
):
# GH 25567
obj = gen_obj(frame_or_series, index)
idxr = indexer_sli(obj)
nd3 = np.random.randint(5, size=(2, 2, 2))
msgs = []
if frame_or_series is Series and indexer_sli in [tm.setitem, tm.iloc]:
msgs.append(r"Wrong number of dimensions. values.ndim > ndim \[3 > 1\]")
if using_array_manager:
msgs.append("Passed array should be 1-dimensional")
if frame_or_series is Series or indexer_sli is tm.iloc:
msgs.append(r"Buffer has wrong number of dimensions \(expected 1, got 3\)")
if using_array_manager:
msgs.append("indexer should be 1-dimensional")
if indexer_sli is tm.loc or (
frame_or_series is Series and indexer_sli is tm.setitem
):
msgs.append("Cannot index with multidimensional key")
if frame_or_series is DataFrame and indexer_sli is tm.setitem:
msgs.append("Index data must be 1-dimensional")
if isinstance(index, pd.IntervalIndex) and indexer_sli is tm.iloc:
msgs.append("Index data must be 1-dimensional")
if isinstance(index, (pd.TimedeltaIndex, pd.DatetimeIndex, pd.PeriodIndex)):
msgs.append("Data must be 1-dimensional")
if len(index) == 0 or isinstance(index, pd.MultiIndex):
msgs.append("positional indexers are out-of-bounds")
msg = "|".join(msgs)
potential_errors = (IndexError, ValueError, NotImplementedError)
with pytest.raises(potential_errors, match=msg):
idxr[nd3]
def test_setitem_ndarray_3d(self, index, frame_or_series, indexer_sli):
# GH 25567
obj = gen_obj(frame_or_series, index)
idxr = indexer_sli(obj)
nd3 = np.random.randint(5, size=(2, 2, 2))
if indexer_sli is tm.iloc:
err = ValueError
msg = f"Cannot set values with ndim > {obj.ndim}"
else:
err = ValueError
msg = "|".join(
[
r"Buffer has wrong number of dimensions \(expected 1, got 3\)",
"Cannot set values with ndim > 1",
"Index data must be 1-dimensional",
"Data must be 1-dimensional",
"Array conditional must be same shape as self",
]
)
with pytest.raises(err, match=msg):
idxr[nd3] = 0
def test_getitem_ndarray_0d(self):
# GH#24924
key = np.array(0)
# dataframe __getitem__
df = DataFrame([[1, 2], [3, 4]])
result = df[key]
expected = Series([1, 3], name=0)
tm.assert_series_equal(result, expected)
# series __getitem__
ser = Series([1, 2])
result = ser[key]
assert result == 1
def test_inf_upcast(self):
# GH 16957
# We should be able to use np.inf as a key
# np.inf should cause an index to convert to float
# Test with np.inf in rows
df = DataFrame(columns=[0])
df.loc[1] = 1
df.loc[2] = 2
df.loc[np.inf] = 3
# make sure we can look up the value
assert df.loc[np.inf, 0] == 3
result = df.index
expected = Float64Index([1, 2, np.inf])
tm.assert_index_equal(result, expected)
def test_setitem_dtype_upcast(self):
# GH3216
df = DataFrame([{"a": 1}, {"a": 3, "b": 2}])
df["c"] = np.nan
assert df["c"].dtype == np.float64
df.loc[0, "c"] = "foo"
expected = DataFrame(
[{"a": 1, "b": np.nan, "c": "foo"}, {"a": 3, "b": 2, "c": np.nan}]
)
tm.assert_frame_equal(df, expected)
@pytest.mark.parametrize("val", [3.14, "wxyz"])
def test_setitem_dtype_upcast2(self, val):
# GH10280
df = DataFrame(
np.arange(6, dtype="int64").reshape(2, 3),
index=list("ab"),
columns=["foo", "bar", "baz"],
)
left = df.copy()
left.loc["a", "bar"] = val
right = DataFrame(
[[0, val, 2], [3, 4, 5]],
index=list("ab"),
columns=["foo", "bar", "baz"],
)
tm.assert_frame_equal(left, right)
assert is_integer_dtype(left["foo"])
assert is_integer_dtype(left["baz"])
def test_setitem_dtype_upcast3(self):
left = DataFrame(
np.arange(6, dtype="int64").reshape(2, 3) / 10.0,
index=list("ab"),
columns=["foo", "bar", "baz"],
)
left.loc["a", "bar"] = "wxyz"
right = DataFrame(
[[0, "wxyz", 0.2], [0.3, 0.4, 0.5]],
index=list("ab"),
columns=["foo", "bar", "baz"],
)
tm.assert_frame_equal(left, right)
assert is_float_dtype(left["foo"])
assert is_float_dtype(left["baz"])
def test_dups_fancy_indexing(self):
# GH 3455
df = tm.makeCustomDataframe(10, 3)
df.columns = ["a", "a", "b"]
result = df[["b", "a"]].columns
expected = | Index(["b", "a", "a"]) | pandas.Index |
import pytest
import jax.numpy as np
import pandas as pd
from pzflow import Flow
from pzflow.bijectors import Chain, Reverse, Scale
from pzflow.distributions import *
@pytest.mark.parametrize(
"data_columns,bijector,info,file",
[
(None, None, None, None),
(("x", "y"), None, None, None),
(None, Reverse(), None, None),
(("x", "y"), None, None, "file"),
(None, Reverse(), None, "file"),
(None, None, "fake", "file"),
],
)
def test_bad_inputs(data_columns, bijector, info, file):
with pytest.raises(ValueError):
Flow(data_columns, bijector=bijector, info=info, file=file)
@pytest.mark.parametrize(
"flow",
[
Flow(("redshift", "y"), Reverse(), latent=Normal(2)),
Flow(("redshift", "y"), Reverse(), latent=Tdist(2)),
Flow(("redshift", "y"), Reverse(), latent=Uniform((-3, 3), (-3, 3))),
],
)
def test_returns_correct_shape(flow):
xarray = np.array([[1.0, 2.0], [3.0, 4.0], [5.0, 6.0]])
x = pd.DataFrame(xarray, columns=("redshift", "y"))
conditions = flow._get_conditions(x, xarray.shape[0])
x_with_errs = flow._array_with_errs(x)
assert x_with_errs.shape == (3, 4)
x_with_errs = flow._array_with_errs(x, skip="redshift")
assert x_with_errs.shape == (3, 3)
xfwd, xfwd_log_det = flow._forward(flow._params, xarray, conditions=conditions)
assert xfwd.shape == x.shape
assert xfwd_log_det.shape == (x.shape[0],)
xinv, xinv_log_det = flow._inverse(flow._params, xarray, conditions=conditions)
assert xinv.shape == x.shape
assert xinv_log_det.shape == (x.shape[0],)
J = flow._jacobian(flow._params, xarray, conditions=conditions)
assert J.shape == (3, 2, 2)
nsamples = 4
assert flow.sample(nsamples).shape == (nsamples, x.shape[1])
assert flow.log_prob(x).shape == (x.shape[0],)
grid = np.arange(0, 2.1, 0.12)
pdfs = flow.posterior(x, column="y", grid=grid)
assert pdfs.shape == (x.shape[0], grid.size)
pdfs = flow.posterior(x.iloc[:, 1:], column="redshift", grid=grid)
assert pdfs.shape == (x.shape[0], grid.size)
pdfs = flow.posterior(x.iloc[:, 1:], column="redshift", grid=grid, batch_size=2)
assert pdfs.shape == (x.shape[0], grid.size)
assert len(flow.train(x, epochs=11, verbose=True)) == 12
def test_error_convolution():
xarray = np.array([[1.0, 2.0], [3.0, 4.0], [5.0, 6.0]])
x = pd.DataFrame(xarray, columns=("redshift", "y"))
flow = Flow(("redshift", "y"), Reverse(), latent=Normal(2))
assert flow.log_prob(x, convolve_err=True).shape == (x.shape[0],)
assert np.allclose(
flow.log_prob(x, convolve_err=True),
flow.log_prob(x, convolve_err=False),
)
grid = np.arange(0, 2.1, 0.12)
pdfs = flow.posterior(x, column="y", grid=grid, convolve_err=True)
assert pdfs.shape == (x.shape[0], grid.size)
assert (
len(flow.train(x, epochs=11, convolve_err=True, burn_in_epochs=4, verbose=True))
== 17
)
flow = Flow(("redshift", "y"), Reverse(), latent=Tdist(2))
with pytest.raises(ValueError):
flow.log_prob(x, convolve_err=True).shape
with pytest.raises(ValueError):
flow.posterior(x, column="y", grid=grid, convolve_err=True)
with pytest.raises(ValueError):
flow.train(x, epochs=11, convolve_err=True, burn_in_epochs=4, verbose=True)
def test_columns_with_errs():
columns = ("redshift", "y")
flow = Flow(columns, Reverse())
xarray = np.array([[1, 2, 0.4, 0.2], [3, 4, 0.1, 0.9]])
x = | pd.DataFrame(xarray, columns=("redshift", "y", "y_err", "redshift_err")) | pandas.DataFrame |
import requests
import pandas as pd
from io import StringIO, BytesIO
from lxml import etree as et
API_KEY = '<GREATSCHOOLS.ORG API KEY GOES HERE>'
def generate_file(name, response):
d = {}
df = | pd.DataFrame() | pandas.DataFrame |
import numpy as np
import pandas as pd
import random
from rpy2.robjects.packages import importr
utils = importr('utils')
prodlim = importr('prodlim')
survival = importr('survival')
#KMsurv = importr('KMsurv')
#cvAUC = importr('pROC')
#utils.install_packages('pseudo')
#utils.install_packages('prodlim')
#utils.install_packages('survival')
#utils.install_packages('KMsurv')
#utils.install_packages('pROC')
import rpy2.robjects as robjects
from rpy2.robjects import r
def sim_event_times_case1(trainset, num_samples):
train_n = int( .8 * num_samples)
test_n = int( (.2) * num_samples)
cov = np.random.standard_normal(size=(num_samples, 9))
treatment = np.random.binomial(n=1,p=.5,size=num_samples)
treatment=np.expand_dims(treatment,1)
clinical_data = np.concatenate((treatment, cov), axis=1)
index = np.arange(len(trainset.targets))
idx_sample = np.random.choice(index, num_samples,replace=False)
digits = np.array(trainset.targets)[idx_sample]
denom = np.exp( 1.7* digits+ .6*np.cos(digits)*clinical_data[:,0]+.2*clinical_data[:,1]+.3*clinical_data[:,0] )
true_times = np.sqrt(-np.log( np.random.uniform(low=0,high=1,size=num_samples) )/ denom )
censored_times = np.random.uniform(low=0,high=true_times)
censored_indicator = np.random.binomial(n=1,p=.3,size=digits.shape[0])
times = np.where(censored_indicator==1, censored_times,true_times)
event = np.where(censored_indicator==1,0,1)
cutoff = np.array(np.quantile(true_times,(.2,.3,.4,.5,.6)))
event_1= np.where(true_times<= cutoff[0],1,0)
event_2= np.where(true_times<= cutoff[1],1,0)
event_3= np.where(true_times<= cutoff[2],1,0)
event_4= np.where(true_times<= cutoff[3],1,0)
event_5= np.where(true_times<= cutoff[4],1,0)
cens_perc = np.sum(censored_indicator)/num_samples
cens_perc_train = np.sum(censored_indicator[:train_n])/train_n
df = np.concatenate((np.expand_dims(idx_sample,axis=1), np.expand_dims(times,axis=1),np.expand_dims(event,axis=1),
np.expand_dims(event_1,axis=1),np.expand_dims(event_2,axis=1),np.expand_dims(event_3,axis=1),np.expand_dims(event_4,axis=1),np.expand_dims(event_5,axis=1),clinical_data),axis=1)
df = pd.DataFrame(df,columns= ('ID','time','event','event_1','event_2','event_3','event_4','event_5','cov1','cov2','cov3','cov4','cov5','cov6','cov7','cov8','cov9','cov10')) # the ID is the image chosen
#split data
train_clindata_all = df.iloc[0:train_n,:]
order_time = np.argsort(train_clindata_all['time'])
train_clindata_all = train_clindata_all.iloc[order_time,:]
test_clindata_all = df.iloc[train_n:,:]
time_r = robjects.FloatVector(train_clindata_all['time'])
event_r = robjects.BoolVector(train_clindata_all['event'])
cutoff_r = robjects.FloatVector(cutoff)
robjects.globalenv["time_r"] = time_r
robjects.globalenv["event_r"] = event_r
robjects.globalenv["cutoff"] = cutoff_r
r('km_out <- prodlim(Hist(time_r,event_r)~1)')
r(' surv_pso <- jackknife(km_out,times=cutoff) ' )
risk_pso1 = r('1-surv_pso[,1]')
risk_pso2 = r('1-surv_pso[,2]')
risk_pso3 = r('1-surv_pso[,3]')
risk_pso4 = r('1-surv_pso[,4]')
risk_pso5 = r('1-surv_pso[,5]')
train_clindata_all = train_clindata_all.assign(risk_pso1 = np.array(risk_pso1,dtype=np.float64),
risk_pso2 = np.array(risk_pso2,dtype=np.float64),
risk_pso3 = np.array(risk_pso3,dtype=np.float64),
risk_pso4 = np.array(risk_pso4,dtype=np.float64),
risk_pso5 = np.array(risk_pso5,dtype=np.float64)
)
long_df = pd.melt(train_clindata_all, id_vars=['ID'],value_vars=['risk_pso1','risk_pso2','risk_pso3','risk_pso4','risk_pso5'] )
long_df.rename(columns={'variable': 'time_point','value': 'ps_risk'}, inplace=True)
mymap= {'risk_pso1': 'time1', 'risk_pso2': 'time2', 'risk_pso3': 'time3', 'risk_pso4': 'time4', 'risk_pso5': 'time5' }
long_df = long_df.applymap(lambda s : mymap.get(s) if s in mymap else s)
train_val_clindata = pd.get_dummies(long_df, columns=['time_point'])
test_clindata_all = test_clindata_all.assign( time_point1=1,time_point2=2,time_point3=3,time_point4=4,time_point5=5 )
long_test_df = pd.melt(test_clindata_all, id_vars=['ID'],value_vars=['time_point1','time_point2','time_point3','time_point4','time_point5'] )
long_test_df.rename(columns={'value': 'time_point'}, inplace=True)
long_test_clindata_all = pd.merge(left=long_test_df, right=test_clindata_all, how='left',left_on='ID' ,right_on='ID')
cols_test = long_test_clindata_all.columns.tolist()
long_test_clindata = long_test_clindata_all[ ['ID'] + ['time_point'] + ['time'] + ['event'] + ['event_1'] + ['event_2'] + ['event_3'] + ['event_4'] + ['event_5']]
long_test_clindata = pd.get_dummies(long_test_clindata, columns=['time_point'])
covariates = df[['ID'] + df.columns.tolist()[8:]]
clindata = {'train_val':train_val_clindata , 'test':long_test_clindata, 'covariates': covariates,'time_train': train_clindata_all['time'], 'event_train': train_clindata_all['event'], 'slide_id_test': test_clindata_all['ID'], 'cutoff': cutoff , 'cens': cens_perc, 'cens_train': cens_perc_train}
return clindata
def sim_event_times_case2(trainset, num_samples):
train_n = int( .8 * num_samples)
test_n = int( (.2) * num_samples)
cov = np.random.standard_normal(size=(num_samples, 9))
treatment = np.random.binomial(n=1,p=.5,size=num_samples)
treatment=np.expand_dims(treatment,1)
clinical_data = np.concatenate((treatment, cov), axis=1)
index = np.arange(len(trainset.targets))
idx_sample = np.random.choice(index, num_samples,replace=False)
digits = np.array(trainset.targets)[idx_sample]
denom = np.exp( 1.7* digits+ .6*np.cos(digits)*clinical_data[:,0]+.2*clinical_data[:,1]+.3*clinical_data[:,0] )
true_times = np.sqrt(-np.log( np.random.uniform(low=0,high=1,size=num_samples) )/ denom )
denom = np.exp( 1.4*clinical_data[:,0]+2.6*clinical_data[:,1] -.2*clinical_data[:,2] )*6
censored_times = np.sqrt(-np.log(np.random.uniform(low=0,high=1,size=num_samples))/denom )
censored_indicator = (true_times > censored_times)*1
times = np.where(censored_indicator==1, censored_times,true_times)
event = np.where(censored_indicator==1,0,1)
cutoff = np.array(np.quantile(true_times,(.2,.3,.4,.5,.6)))
event_1= np.where(true_times<= cutoff[0],1,0)
event_2= np.where(true_times<= cutoff[1],1,0)
event_3= np.where(true_times<= cutoff[2],1,0)
event_4= np.where(true_times<= cutoff[3],1,0)
event_5= np.where(true_times<= cutoff[4],1,0)
cens_perc = np.sum(censored_indicator)/num_samples
cens_perc_train = np.sum(censored_indicator[:train_n])/train_n
df = np.concatenate((np.expand_dims(idx_sample,axis=1), np.expand_dims(times,axis=1),np.expand_dims(event,axis=1),
np.expand_dims(event_1,axis=1),np.expand_dims(event_2,axis=1),np.expand_dims(event_3,axis=1),np.expand_dims(event_4,axis=1),np.expand_dims(event_5,axis=1),clinical_data),axis=1)
df = pd.DataFrame(df,columns= ('ID','time','event','event_1','event_2','event_3','event_4','event_5','cov1','cov2','cov3','cov4','cov5','cov6','cov7','cov8','cov9','cov10')) # the ID is the image chosen
train_clindata_all = df.iloc[0:train_n,:]
order_time = np.argsort(train_clindata_all['time'])
train_clindata_all = train_clindata_all.iloc[order_time,:]
test_clindata_all = df.iloc[train_n:,:]
time_r = robjects.FloatVector(train_clindata_all['time'])
event_r = robjects.BoolVector(train_clindata_all['event'])
cutoff_r = robjects.FloatVector(cutoff)
robjects.globalenv["time_r"] = time_r
robjects.globalenv["event_r"] = event_r
robjects.globalenv["cutoff"] = cutoff_r
r('km_out <- prodlim(Hist(time_r,event_r)~1)')
r(' surv_pso <- jackknife(km_out,times=cutoff) ' )
risk_pso1 = r('1-surv_pso[,1]')
risk_pso2 = r('1-surv_pso[,2]')
risk_pso3 = r('1-surv_pso[,3]')
risk_pso4 = r('1-surv_pso[,4]')
risk_pso5 = r('1-surv_pso[,5]')
train_clindata_all = train_clindata_all.assign(risk_pso1 = np.array(risk_pso1,dtype=np.float64),
risk_pso2 = np.array(risk_pso2,dtype=np.float64),
risk_pso3 = np.array(risk_pso3,dtype=np.float64),
risk_pso4 = np.array(risk_pso4,dtype=np.float64),
risk_pso5 = np.array(risk_pso5,dtype=np.float64)
)
long_df = pd.melt(train_clindata_all, id_vars=['ID'],value_vars=['risk_pso1','risk_pso2','risk_pso3','risk_pso4','risk_pso5'] )
long_df.rename(columns={'variable': 'time_point','value': 'ps_risk'}, inplace=True)
mymap= {'risk_pso1': 'time1', 'risk_pso2': 'time2', 'risk_pso3': 'time3', 'risk_pso4': 'time4', 'risk_pso5': 'time5' }
long_df = long_df.applymap(lambda s : mymap.get(s) if s in mymap else s)
train_val_clindata = pd.get_dummies(long_df, columns=['time_point'])
test_clindata_all = test_clindata_all.assign( time_point1=1,time_point2=2,time_point3=3,time_point4=4,time_point5=5 )
long_test_df = pd.melt(test_clindata_all, id_vars=['ID'],value_vars=['time_point1','time_point2','time_point3','time_point4','time_point5'] )
long_test_df.rename(columns={'value': 'time_point'}, inplace=True)
long_test_clindata_all = pd.merge(left=long_test_df, right=test_clindata_all, how='left',left_on='ID' ,right_on='ID')
cols_test = long_test_clindata_all.columns.tolist()
long_test_clindata = long_test_clindata_all[ ['ID'] + ['time_point'] + ['time'] + ['event'] + ['event_1'] + ['event_2'] + ['event_3'] + ['event_4'] + ['event_5']]
long_test_clindata = pd.get_dummies(long_test_clindata, columns=['time_point'])
covariates = df[['ID'] + df.columns.tolist()[8:]]
clindata = {'train_val':train_val_clindata , 'test':long_test_clindata, 'covariates': covariates,'time_train': train_clindata_all['time'], 'event_train': train_clindata_all['event'], 'slide_id_test': test_clindata_all['ID'], 'cutoff': cutoff , 'cens': cens_perc, 'cens_train': cens_perc_train}
return clindata
def sim_event_times_case3(trainset, num_samples):
train_n = int( .8 * num_samples)
test_n = int( (.2) * num_samples)
cov = np.random.standard_normal(size=(num_samples, 9))
treatment = np.random.binomial(n=1,p=.5,size=num_samples)
treatment=np.expand_dims(treatment,1)
clinical_data = np.concatenate((treatment, cov), axis=1)
index = np.arange(len(trainset.targets))
idx_sample = np.random.choice(index, num_samples,replace=False)
digits = np.array(trainset.targets)[idx_sample]
denom = np.exp( 1* digits- 1.6*np.cos(digits)*clinical_data[:,0]+.3*clinical_data[:,1]*clinical_data[:,0] )* (.7/2)
true_times = np.sqrt(-np.log( np.random.uniform(low=0,high=1,size=num_samples) )/ denom )
#denom = np.exp( 1.4*clinical_data[:,0]+2.6*clinical_data[:,1] -.2*clinical_data[:,2] )*6
shape_c = np.maximum(0.001,np.exp(-1.8*clinical_data[:,0]+1.4*clinical_data[:,1]+1.5 *clinical_data[:,0]*clinical_data[:,1]))
censored_times = np.random.gamma(shape_c,digits, num_samples)
censored_indicator = (true_times > censored_times)*1
times = np.where(censored_indicator==1, censored_times,true_times)
event = np.where(censored_indicator==1,0,1)
cutoff = np.array(np.quantile(true_times,(.2,.3,.4,.5,.6)))
event_1= np.where(true_times<= cutoff[0],1,0)
event_2= np.where(true_times<= cutoff[1],1,0)
event_3= np.where(true_times<= cutoff[2],1,0)
event_4= np.where(true_times<= cutoff[3],1,0)
event_5= np.where(true_times<= cutoff[4],1,0)
cens_perc = np.sum(censored_indicator)/num_samples
cens_perc_train = np.sum(censored_indicator[:train_n])/train_n
df = np.concatenate((np.expand_dims(idx_sample,axis=1), np.expand_dims(times,axis=1),np.expand_dims(event,axis=1),
np.expand_dims(event_1,axis=1),np.expand_dims(event_2,axis=1),np.expand_dims(event_3,axis=1),np.expand_dims(event_4,axis=1),np.expand_dims(event_5,axis=1),clinical_data),axis=1)
df = pd.DataFrame(df,columns= ('ID','time','event','event_1','event_2','event_3','event_4','event_5','cov1','cov2','cov3','cov4','cov5','cov6','cov7','cov8','cov9','cov10')) # the ID is the image chosen
train_clindata_all = df.iloc[0:train_n,:]
order_time = np.argsort(train_clindata_all['time'])
train_clindata_all = train_clindata_all.iloc[order_time,:]
test_clindata_all = df.iloc[train_n:,:]
time_r = robjects.FloatVector(train_clindata_all['time'])
event_r = robjects.BoolVector(train_clindata_all['event'])
cutoff_r = robjects.FloatVector(cutoff)
robjects.globalenv["time_r"] = time_r
robjects.globalenv["event_r"] = event_r
robjects.globalenv["cutoff"] = cutoff_r
r('km_out <- prodlim(Hist(time_r,event_r)~1)')
r(' surv_pso <- jackknife(km_out,times=cutoff) ' )
risk_pso1 = r('1-surv_pso[,1]')
risk_pso2 = r('1-surv_pso[,2]')
risk_pso3 = r('1-surv_pso[,3]')
risk_pso4 = r('1-surv_pso[,4]')
risk_pso5 = r('1-surv_pso[,5]')
train_clindata_all = train_clindata_all.assign(risk_pso1 = np.array(risk_pso1,dtype=np.float64),
risk_pso2 = np.array(risk_pso2,dtype=np.float64),
risk_pso3 = np.array(risk_pso3,dtype=np.float64),
risk_pso4 = np.array(risk_pso4,dtype=np.float64),
risk_pso5 = np.array(risk_pso5,dtype=np.float64)
)
long_df = pd.melt(train_clindata_all, id_vars=['ID'],value_vars=['risk_pso1','risk_pso2','risk_pso3','risk_pso4','risk_pso5'] )
long_df.rename(columns={'variable': 'time_point','value': 'ps_risk'}, inplace=True)
mymap= {'risk_pso1': 'time1', 'risk_pso2': 'time2', 'risk_pso3': 'time3', 'risk_pso4': 'time4', 'risk_pso5': 'time5' }
long_df = long_df.applymap(lambda s : mymap.get(s) if s in mymap else s)
train_val_clindata = pd.get_dummies(long_df, columns=['time_point'])
test_clindata_all = test_clindata_all.assign( time_point1=1,time_point2=2,time_point3=3,time_point4=4,time_point5=5 )
long_test_df = pd.melt(test_clindata_all, id_vars=['ID'],value_vars=['time_point1','time_point2','time_point3','time_point4','time_point5'] )
long_test_df.rename(columns={'value': 'time_point'}, inplace=True)
long_test_clindata_all = pd.merge(left=long_test_df, right=test_clindata_all, how='left',left_on='ID' ,right_on='ID')
cols_test = long_test_clindata_all.columns.tolist()
long_test_clindata = long_test_clindata_all[ ['ID'] + ['time_point'] + ['time'] + ['event'] + ['event_1'] + ['event_2'] + ['event_3'] + ['event_4'] + ['event_5']]
long_test_clindata = pd.get_dummies(long_test_clindata, columns=['time_point'])
covariates = df[['ID'] + df.columns.tolist()[8:]]
clindata = {'train_val':train_val_clindata , 'test':long_test_clindata, 'covariates': covariates,'time_train': train_clindata_all['time'], 'event_train': train_clindata_all['event'], 'slide_id_test': test_clindata_all['ID'], 'cutoff': cutoff , 'cens': cens_perc, 'cens_train': cens_perc_train}
return clindata
def sim_event_times_case4(trainset, num_samples):
train_n = int( .8 * num_samples)
test_n = int( (.2) * num_samples)
cov = np.random.standard_normal(size=(num_samples, 9))
treatment = np.random.binomial(n=1,p=.5,size=num_samples)
treatment=np.expand_dims(treatment,1)
clinical_data = np.concatenate((treatment, cov), axis=1)
index = np.arange(len(trainset.targets))
idx_sample = np.random.choice(index, num_samples,replace=False)
digits = np.array(trainset.targets)[idx_sample]
shape = np.maximum(0.001,np.exp(.5*digits+.2*clinical_data[:,0] * np.cos(digits)+1.5*clinical_data[:,1]+1.2*clinical_data[:,0]))
true_times = np.random.gamma(shape,digits, num_samples) # shape = shape; scale = digits
censored_times = np.random.uniform(low=0,high=true_times)
censored_indicator = np.random.binomial(n=1,p=.3,size=digits.shape[0])
times = np.where(censored_indicator==1, censored_times,true_times)
event = np.where(censored_indicator==1,0,1)
cutoff = np.array(np.quantile(true_times,(.2,.3,.4,.5,.6)))
event_1= np.where(true_times<= cutoff[0],1,0)
event_2= np.where(true_times<= cutoff[1],1,0)
event_3= np.where(true_times<= cutoff[2],1,0)
event_4= np.where(true_times<= cutoff[3],1,0)
event_5= np.where(true_times<= cutoff[4],1,0)
cens_perc = np.sum(censored_indicator)/num_samples
cens_perc_train = np.sum(censored_indicator[:train_n])/train_n
df = np.concatenate((np.expand_dims(idx_sample,axis=1), np.expand_dims(times,axis=1),np.expand_dims(event,axis=1),
np.expand_dims(event_1,axis=1),np.expand_dims(event_2,axis=1),np.expand_dims(event_3,axis=1),np.expand_dims(event_4,axis=1),np.expand_dims(event_5,axis=1),clinical_data),axis=1)
df = pd.DataFrame(df,columns= ('ID','time','event','event_1','event_2','event_3','event_4','event_5','cov1','cov2','cov3','cov4','cov5','cov6','cov7','cov8','cov9','cov10')) # the ID is the image chosen
train_clindata_all = df.iloc[0:train_n,:]
order_time = np.argsort(train_clindata_all['time'])
train_clindata_all = train_clindata_all.iloc[order_time,:]
test_clindata_all = df.iloc[train_n:,:]
time_r = robjects.FloatVector(train_clindata_all['time'])
event_r = robjects.BoolVector(train_clindata_all['event'])
cutoff_r = robjects.FloatVector(cutoff)
robjects.globalenv["time_r"] = time_r
robjects.globalenv["event_r"] = event_r
robjects.globalenv["cutoff"] = cutoff_r
r('km_out <- prodlim(Hist(time_r,event_r)~1)')
r(' surv_pso <- jackknife(km_out,times=cutoff) ' )
risk_pso1 = r('1-surv_pso[,1]')
risk_pso2 = r('1-surv_pso[,2]')
risk_pso3 = r('1-surv_pso[,3]')
risk_pso4 = r('1-surv_pso[,4]')
risk_pso5 = r('1-surv_pso[,5]')
train_clindata_all = train_clindata_all.assign(risk_pso1 = np.array(risk_pso1,dtype=np.float64),
risk_pso2 = np.array(risk_pso2,dtype=np.float64),
risk_pso3 = np.array(risk_pso3,dtype=np.float64),
risk_pso4 = np.array(risk_pso4,dtype=np.float64),
risk_pso5 = np.array(risk_pso5,dtype=np.float64)
)
long_df = pd.melt(train_clindata_all, id_vars=['ID'],value_vars=['risk_pso1','risk_pso2','risk_pso3','risk_pso4','risk_pso5'] )
long_df.rename(columns={'variable': 'time_point','value': 'ps_risk'}, inplace=True)
mymap= {'risk_pso1': 'time1', 'risk_pso2': 'time2', 'risk_pso3': 'time3', 'risk_pso4': 'time4', 'risk_pso5': 'time5' }
long_df = long_df.applymap(lambda s : mymap.get(s) if s in mymap else s)
train_val_clindata = pd.get_dummies(long_df, columns=['time_point'])
test_clindata_all = test_clindata_all.assign( time_point1=1,time_point2=2,time_point3=3,time_point4=4,time_point5=5 )
long_test_df = pd.melt(test_clindata_all, id_vars=['ID'],value_vars=['time_point1','time_point2','time_point3','time_point4','time_point5'] )
long_test_df.rename(columns={'value': 'time_point'}, inplace=True)
long_test_clindata_all = pd.merge(left=long_test_df, right=test_clindata_all, how='left',left_on='ID' ,right_on='ID')
cols_test = long_test_clindata_all.columns.tolist()
long_test_clindata = long_test_clindata_all[ ['ID'] + ['time_point'] + ['time'] + ['event'] + ['event_1'] + ['event_2'] + ['event_3'] + ['event_4'] + ['event_5']]
long_test_clindata = pd.get_dummies(long_test_clindata, columns=['time_point'])
covariates = df[['ID'] + df.columns.tolist()[8:]]
clindata = {'train_val':train_val_clindata , 'test':long_test_clindata, 'covariates': covariates,'time_train': train_clindata_all['time'], 'event_train': train_clindata_all['event'], 'slide_id_test': test_clindata_all['ID'], 'cutoff': cutoff , 'cens': cens_perc, 'cens_train': cens_perc_train}
return clindata
def sim_event_times_case5(trainset, num_samples):
train_n = int( .8 * num_samples)
test_n = int( (.2) * num_samples)
cov = np.random.standard_normal(size=(num_samples, 9))
treatment = np.random.binomial(n=1,p=.5,size=num_samples)
treatment=np.expand_dims(treatment,1)
clinical_data = np.concatenate((treatment, cov), axis=1)
index = np.arange(len(trainset.targets))
idx_sample = np.random.choice(index, num_samples,replace=False)
digits = np.array(trainset.targets)[idx_sample]
shape = np.maximum(0.001,np.exp(.5*digits+.2*clinical_data[:,0] * np.cos(digits)+1.5*clinical_data[:,1]+1.2*clinical_data[:,0]))
true_times = np.random.gamma(shape,digits, num_samples) # shape = shape; scale = digits
denom = np.exp( -3.4*clinical_data[:,0]+.6*clinical_data[:,1] -2.2*clinical_data[:,2] ) * .005
censored_times = np.sqrt(-np.log(np.random.uniform(low=0,high=1,size=num_samples))/denom )
censored_indicator = (true_times > censored_times)*1
times = np.where(censored_indicator==1, censored_times,true_times)
event = np.where(censored_indicator==1,0,1)
cutoff = np.array(np.quantile(true_times,(.2,.3,.4,.5,.6)))
event_1= np.where(true_times<= cutoff[0],1,0)
event_2= np.where(true_times<= cutoff[1],1,0)
event_3= np.where(true_times<= cutoff[2],1,0)
event_4= np.where(true_times<= cutoff[3],1,0)
event_5= np.where(true_times<= cutoff[4],1,0)
cens_perc = np.sum(censored_indicator)/num_samples
cens_perc_train = np.sum(censored_indicator[:train_n])/train_n
df = np.concatenate((np.expand_dims(idx_sample,axis=1), np.expand_dims(times,axis=1),np.expand_dims(event,axis=1),
np.expand_dims(event_1,axis=1),np.expand_dims(event_2,axis=1),np.expand_dims(event_3,axis=1),np.expand_dims(event_4,axis=1),np.expand_dims(event_5,axis=1),clinical_data),axis=1)
df = pd.DataFrame(df,columns= ('ID','time','event','event_1','event_2','event_3','event_4','event_5','cov1','cov2','cov3','cov4','cov5','cov6','cov7','cov8','cov9','cov10')) # the ID is the image chosen
train_clindata_all = df.iloc[0:train_n,:]
order_time = np.argsort(train_clindata_all['time'])
train_clindata_all = train_clindata_all.iloc[order_time,:]
test_clindata_all = df.iloc[train_n:,:]
time_r = robjects.FloatVector(train_clindata_all['time'])
event_r = robjects.BoolVector(train_clindata_all['event'])
cutoff_r = robjects.FloatVector(cutoff)
robjects.globalenv["time_r"] = time_r
robjects.globalenv["event_r"] = event_r
robjects.globalenv["cutoff"] = cutoff_r
r('km_out <- prodlim(Hist(time_r,event_r)~1)')
r(' surv_pso <- jackknife(km_out,times=cutoff) ' )
risk_pso1 = r('1-surv_pso[,1]')
risk_pso2 = r('1-surv_pso[,2]')
risk_pso3 = r('1-surv_pso[,3]')
risk_pso4 = r('1-surv_pso[,4]')
risk_pso5 = r('1-surv_pso[,5]')
train_clindata_all = train_clindata_all.assign(risk_pso1 = np.array(risk_pso1,dtype=np.float64),
risk_pso2 = np.array(risk_pso2,dtype=np.float64),
risk_pso3 = np.array(risk_pso3,dtype=np.float64),
risk_pso4 = np.array(risk_pso4,dtype=np.float64),
risk_pso5 = np.array(risk_pso5,dtype=np.float64)
)
long_df = pd.melt(train_clindata_all, id_vars=['ID'],value_vars=['risk_pso1','risk_pso2','risk_pso3','risk_pso4','risk_pso5'] )
long_df.rename(columns={'variable': 'time_point','value': 'ps_risk'}, inplace=True)
mymap= {'risk_pso1': 'time1', 'risk_pso2': 'time2', 'risk_pso3': 'time3', 'risk_pso4': 'time4', 'risk_pso5': 'time5' }
long_df = long_df.applymap(lambda s : mymap.get(s) if s in mymap else s)
train_val_clindata = pd.get_dummies(long_df, columns=['time_point'])
test_clindata_all = test_clindata_all.assign( time_point1=1,time_point2=2,time_point3=3,time_point4=4,time_point5=5 )
long_test_df = pd.melt(test_clindata_all, id_vars=['ID'],value_vars=['time_point1','time_point2','time_point3','time_point4','time_point5'] )
long_test_df.rename(columns={'value': 'time_point'}, inplace=True)
long_test_clindata_all = pd.merge(left=long_test_df, right=test_clindata_all, how='left',left_on='ID' ,right_on='ID')
cols_test = long_test_clindata_all.columns.tolist()
long_test_clindata = long_test_clindata_all[ ['ID'] + ['time_point'] + ['time'] + ['event'] + ['event_1'] + ['event_2'] + ['event_3'] + ['event_4'] + ['event_5']]
long_test_clindata = | pd.get_dummies(long_test_clindata, columns=['time_point']) | pandas.get_dummies |
#!/usr/bin/python
import time
import numpy as np
import pandas as pd
import argparse
from math import exp
from math import sqrt
from datetime import datetime
from os import listdir
import sys
# BOKEH
from bokeh import events
from bokeh.io import output_file, show
from bokeh.models import CustomJS, HoverTool, ColumnDataSource, Slider, CheckboxGroup, RadioGroup, Button, MultiSelect
from bokeh.plotting import figure
from bokeh.transform import linear_cmap
from bokeh.transform import log_cmap
from bokeh.util.hex import axial_to_cartesian
from bokeh.util.hex import cartesian_to_axial
from bokeh.layouts import column, row
from bokeh.palettes import Viridis256, Greys256
def import_table(file):
'''
This function imports the pkl files from the tables forlder, and returns a pandas dataframe.
'''
table = pd.read_pickle(file)
return table
def create_array(table):
'''
This function recieves a dataframe with logP and Mass values for every ChEBI identifier.
It returns two numpy arrays: for mass and logP.
'''
# create lists
x = [float(logP) for logP in table.logP]
y = [float(mass) for mass in table.Mass]
return np.asarray(x), np.asarray(y)
def hexbin(df, x, y, size, aspect_scale, orientation):
'''
This function recieves x and y coordinate arrays and converts these into q and r hexagon coordinates by calling Bokeh's "cartesian_to_axial" function.
The q and r coordinates are added to the dataframe, and this dataframe is returend.
'''
q, r = cartesian_to_axial(x, y, size, orientation=orientation, aspect_scale=aspect_scale)
df.loc[:'q'] = q
df.loc[:'r'] = r
return df
def add_tooltip_columns(df, table):
'''
For every hexagon, a tooltip will be created that will be shown when the user hovers with the mouse over the hexagon.
The tooltip will show the 3 most frequent ChEBI identifiers and additional information.
This function recieves:
- a "df" dataframe (with hexagonal coordinates) that will be the source for the multiplot
- the "table" dataframe with information that is needed for the tooltip such as name, mass, logP for every ChEBI identifier
In this function, the tooltip information will be added to the original dataframe in additional columns.
These columns will be used by JavaScript code to display in the tooltip.
'''
table = table.drop(['Class', 'logP', 'Mass'], axis=1)
table = table.reset_index()
# Define tooltip size
TOOLTIP_COUNT = 3
columns = table.columns
tooltip_columns = {column+str(i):[] for i in range(1, TOOLTIP_COUNT+1) for column in columns}
# Extract ChEBI identifiers from dataframe
chebi_ids = [ids if isinstance(ids, list) else [] for ids in df.ChEBI]
list_for_df = []
# Use chebi identifiers to look up information in the "table" dataframe
for ids in chebi_ids:
rows = table[table.ChEBI.isin(ids)]
# Sort and select most frequent ChEBI identifiers
rows = rows.sort_values(by='Count', ascending=False)
values_nested = rows[0:TOOLTIP_COUNT-1].values.tolist()
# Unnest information from the table,
values_unnested = [item for sublist in values_nested for item in sublist]
while len(values_unnested) < (TOOLTIP_COUNT*len(columns)):
values_unnested.append("-")
list_for_df.append(values_unnested)
df_tooltip = pd.DataFrame(list_for_df, columns = tooltip_columns)
df = df.join(df_tooltip, how='left')
return df
def get_blur(x,y,sigma_x,sigma_y):
'''
This function recieves x, y values and sigma x, sigma y values and returns the calculated blur value.
See https://en.wikipedia.org/wiki/Multivariate_normal_distribution
'''
return exp(-0.5*(x*x/sigma_x/sigma_x + y*y/sigma_y/sigma_y))
def get_rows(q, r, counts, tfidf, kernel, blur_max, step_size):
'''
For every hexagon (=row in the dataframe), counts will be distributed to surrounding hexagons in a gaussian manner (blur).
This function recieves information for one hexagon so that it can distribute its counts to other hexagons.
To distribute the counts to other hexagons/rows, we use the "kernel".
We create new rows for other hexagons, which means we might create many rows with the same hexagonal coorindates.
Newly created rows will be returned
'''
# initiate original row
rows = [[q, r, counts, tfidf] + [counts for i in np.arange(0, blur_max+step_size, step_size)] + [tfidf for i in np.arange(0, blur_max+step_size, step_size)]]
# use kernel to distribute counts to other rows
for coords_new, blur_factors in kernel.items():
q_new = q + coords_new[0]
r_new = r + coords_new[1]
# create new row, use kernels coordinates and calculated blur values
new_row = [q_new, r_new, 0, 0] + list(map(lambda n: n * counts, blur_factors)) + list(map(lambda n: n*tfidf, blur_factors))
rows.append(new_row)
return rows
def construct_kernel(blur_max, step_size):
'''
This function recieves the maximum blur and step size to construct the kernel.
The kernel is a dictionary that uses the coordinates as keys, and the blur values as values.
The blur values depend on sd_x, so the kernel will return a list of blur values from 0 to blur_max.
Blur values are calculated by "get_blur()"".
The kernel will be used by "get_rows()" to distribute counts to surrounding hexagons.
'''
coordinates_to_distance = {(-5,2):(7.5, sqrt(3)/2),(-5,3):(7.5, sqrt(3)/2),
(-4,1):(6, sqrt(3)),(-4,2):(6, 0),(-4,3):(6,sqrt(3)),
(-3,0):(4.5, 3*sqrt(3)/2),(-3,1):(4.5, sqrt(3)/2),(-3,2):(4.5, sqrt(3)/2),(-3,3):(4.5, 3*sqrt(3)/2),
(-2,-1):(3, 2*sqrt(3)),(-2,0):(3, sqrt(3)),(-2,1):(3, 0),(-2,2):(3, sqrt(3)),(-2,3):(3, 2*sqrt(3)),
(-1,-1):(1.5, 3*sqrt(3)/2),(-1,0):(1.5, sqrt(3)/2),(-1,1):(1.5, sqrt(3)/2),(-1,2):(1.5, 3*sqrt(3)/2),
(0,-2):(0, 2*sqrt(3)),(0,-1):(0, sqrt(3)),(0,1):(0, sqrt(3)),(0,2):(0, 2*sqrt(3)),
(1,-2):(1.5, 3*sqrt(3)/2),(1,-1):(1.5, sqrt(3)/2),(1,0):(1.5, sqrt(3)/2),(1,1):(1.5, 3*sqrt(3)/2),(2,-3):(3, 2*sqrt(3)),
(2,-2):(3, sqrt(3)),(2,-1):(3, 0),(2,0):(3, sqrt(3)),(2,1):(3, 2*sqrt(3)),
(3,-3):(4.5, 3*sqrt(3)/2),(3,-2):(4.5, sqrt(3)/2),(3,-1):(4.5, sqrt(3)/2),(3,0):(4.5, 3*sqrt(3)/2),
(4,-3):(6, sqrt(3)),(4,-2):(6, 0),(4,-1):(6, sqrt(3)),
(5,-3):(7.5, sqrt(3)/2),(5,-2):(7.5, sqrt(3)/2)}
kernel = {}
for key, distance in coordinates_to_distance.items():
kernel[key] = [0]
for sd_x in np.arange(step_size, blur_max+step_size, step_size):
kernel[key].append(get_blur(distance[0], distance[1], sd_x, sd_x/2))
return kernel
def add_gaussian_blur(df, blur_max, step_size):
'''
Function:
This function adds gaussian blur to the plot by going through all hexagons and applying the kernel to the neighbouring hexagons.
To speed up the process, the pandas dataframe is put in a python dictionary, so to quickly find the neighbouring hexagon coordinates using the coordinates as keys.
After bluring, the dictionary is put in a pandas dataframe again, and this dataframe is returned.
Columns:
sd_x values for calculating blur values are used to name a column with counts that result from blurring with that specific sd_x.
This makes selecting correct sd_x column easy with the slider code, because the slider returns values that represent sd_x values.
ColumnDataSource does not accept intergers as column names, so sd_x column names are changed to string.
sd_x columns contain lists, in a list the first value is the normal counts, the second value is the tfidf count.
'''
kernel = construct_kernel(blur_max, step_size)
columns = ['q', 'r', 'Count', 'TFIDF'] + [str(sd_x) for sd_x in np.arange(0, (blur_max+step_size), step_size)] + ['%s_tfidf' % str(sd_x) for sd_x in np.arange(0, (blur_max+step_size), step_size)]
df_blur = pd.concat([pd.DataFrame(get_rows(q, r, counts, tfidf, kernel, blur_max, step_size),
columns=columns) for q, r, counts, tfidf in zip(df.q, df.r, df.Count, df.TFIDF)], ignore_index=True)
df_blur = df_blur.groupby(['q', 'r'], as_index=False).agg(sum)
df_joined = df_blur.merge(df.loc[:,['q', 'r', 'ChEBI']], on=['q', 'r'], how='outer')
return df_joined
def check_for_ids(id_list, class_id):
'''
This functions checks if the Class identifier is in the "id_list".
This ChEBI ID-specific list contains other ChEBI identifers of chemicals that are of higher level hierarchical class.
'''
# check = any(class_id == id for id in id_list)
check = any(str(class_id) == str(id) for id in id_list)
return check
def create_class_source(table, size, ratio, orientation, class_id):
'''
This function finds all chemicals belonging to class "class_id" as defined by the ChEBI ontology.
It returns a dataframe filtered for these chemicals.
'''
if class_id == None:
# if no class id is given, then class source should be empty
df = | pd.DataFrame() | pandas.DataFrame |
# pylint: disable-msg=E1101,W0612
from datetime import datetime, timedelta
import os
import operator
import unittest
import cStringIO as StringIO
import nose
from numpy import nan
import numpy as np
import numpy.ma as ma
from pandas import Index, Series, TimeSeries, DataFrame, isnull, notnull
from pandas.core.index import MultiIndex
import pandas.core.datetools as datetools
from pandas.util import py3compat
from pandas.util.testing import assert_series_equal, assert_almost_equal
import pandas.util.testing as tm
#-------------------------------------------------------------------------------
# Series test cases
JOIN_TYPES = ['inner', 'outer', 'left', 'right']
class CheckNameIntegration(object):
def test_scalarop_preserve_name(self):
result = self.ts * 2
self.assertEquals(result.name, self.ts.name)
def test_copy_name(self):
result = self.ts.copy()
self.assertEquals(result.name, self.ts.name)
# def test_copy_index_name_checking(self):
# # don't want to be able to modify the index stored elsewhere after
# # making a copy
# self.ts.index.name = None
# cp = self.ts.copy()
# cp.index.name = 'foo'
# self.assert_(self.ts.index.name is None)
def test_append_preserve_name(self):
result = self.ts[:5].append(self.ts[5:])
self.assertEquals(result.name, self.ts.name)
def test_binop_maybe_preserve_name(self):
# names match, preserve
result = self.ts * self.ts
self.assertEquals(result.name, self.ts.name)
result = self.ts * self.ts[:-2]
self.assertEquals(result.name, self.ts.name)
# names don't match, don't preserve
cp = self.ts.copy()
cp.name = 'something else'
result = self.ts + cp
self.assert_(result.name is None)
def test_combine_first_name(self):
result = self.ts.combine_first(self.ts[:5])
self.assertEquals(result.name, self.ts.name)
def test_getitem_preserve_name(self):
result = self.ts[self.ts > 0]
self.assertEquals(result.name, self.ts.name)
result = self.ts[[0, 2, 4]]
self.assertEquals(result.name, self.ts.name)
result = self.ts[5:10]
self.assertEquals(result.name, self.ts.name)
def test_multilevel_name_print(self):
index = MultiIndex(levels=[['foo', 'bar', 'baz', 'qux'],
['one', 'two', 'three']],
labels=[[0, 0, 0, 1, 1, 2, 2, 3, 3, 3],
[0, 1, 2, 0, 1, 1, 2, 0, 1, 2]],
names=['first', 'second'])
s = Series(range(0,len(index)), index=index, name='sth')
expected = ["first second",
"foo one 0",
" two 1",
" three 2",
"bar one 3",
" two 4",
"baz two 5",
" three 6",
"qux one 7",
" two 8",
" three 9",
"Name: sth"]
expected = "\n".join(expected)
self.assertEquals(repr(s), expected)
def test_multilevel_preserve_name(self):
index = MultiIndex(levels=[['foo', 'bar', 'baz', 'qux'],
['one', 'two', 'three']],
labels=[[0, 0, 0, 1, 1, 2, 2, 3, 3, 3],
[0, 1, 2, 0, 1, 1, 2, 0, 1, 2]],
names=['first', 'second'])
s = Series(np.random.randn(len(index)), index=index, name='sth')
result = s['foo']
result2 = s.ix['foo']
self.assertEquals(result.name, s.name)
self.assertEquals(result2.name, s.name)
def test_name_printing(self):
# test small series
s = Series([0, 1, 2])
s.name = "test"
self.assert_("Name: test" in repr(s))
s.name = None
self.assert_(not "Name:" in repr(s))
# test big series (diff code path)
s = Series(range(0,1000))
s.name = "test"
self.assert_("Name: test" in repr(s))
s.name = None
self.assert_(not "Name:" in repr(s))
def test_pickle_preserve_name(self):
unpickled = self._pickle_roundtrip(self.ts)
self.assertEquals(unpickled.name, self.ts.name)
def _pickle_roundtrip(self, obj):
obj.save('__tmp__')
unpickled = Series.load('__tmp__')
os.remove('__tmp__')
return unpickled
def test_argsort_preserve_name(self):
result = self.ts.argsort()
self.assertEquals(result.name, self.ts.name)
def test_sort_index_name(self):
result = self.ts.sort_index(ascending=False)
self.assertEquals(result.name, self.ts.name)
def test_to_sparse_pass_name(self):
result = self.ts.to_sparse()
self.assertEquals(result.name, self.ts.name)
class SafeForSparse(object):
pass
class TestSeries(unittest.TestCase, CheckNameIntegration):
def setUp(self):
self.ts = tm.makeTimeSeries()
self.ts.name = 'ts'
self.series = tm.makeStringSeries()
self.series.name = 'series'
self.objSeries = tm.makeObjectSeries()
self.objSeries.name = 'objects'
self.empty = Series([], index=[])
def test_constructor(self):
# Recognize TimeSeries
self.assert_(isinstance(self.ts, TimeSeries))
# Pass in Series
derived = Series(self.ts)
self.assert_(isinstance(derived, TimeSeries))
self.assert_(tm.equalContents(derived.index, self.ts.index))
# Ensure new index is not created
self.assertEquals(id(self.ts.index), id(derived.index))
# Pass in scalar
scalar = Series(0.5)
self.assert_(isinstance(scalar, float))
# Mixed type Series
mixed = Series(['hello', np.NaN], index=[0, 1])
self.assert_(mixed.dtype == np.object_)
self.assert_(mixed[1] is np.NaN)
self.assert_(not isinstance(self.empty, TimeSeries))
self.assert_(not isinstance(Series({}), TimeSeries))
self.assertRaises(Exception, Series, np.random.randn(3, 3),
index=np.arange(3))
def test_constructor_empty(self):
empty = Series()
empty2 = Series([])
assert_series_equal(empty, empty2)
empty = Series(index=range(10))
empty2 = Series(np.nan, index=range(10))
assert_series_equal(empty, empty2)
def test_constructor_maskedarray(self):
data = ma.masked_all((3,), dtype=float)
result = Series(data)
expected = Series([nan, nan, nan])
assert_series_equal(result, expected)
data[0] = 0.0
data[2] = 2.0
index = ['a', 'b', 'c']
result = Series(data, index=index)
expected = Series([0.0, nan, 2.0], index=index)
assert_series_equal(result, expected)
def test_constructor_default_index(self):
s = Series([0, 1, 2])
assert_almost_equal(s.index, np.arange(3))
def test_constructor_corner(self):
df = tm.makeTimeDataFrame()
objs = [df, df]
s = Series(objs, index=[0, 1])
self.assert_(isinstance(s, Series))
def test_constructor_cast(self):
self.assertRaises(ValueError, Series, ['a', 'b', 'c'], dtype=float)
def test_constructor_dict(self):
d = {'a' : 0., 'b' : 1., 'c' : 2.}
result = Series(d, index=['b', 'c', 'd', 'a'])
expected = Series([1, 2, nan, 0], index=['b', 'c', 'd', 'a'])
assert_series_equal(result, expected)
def test_constructor_list_of_tuples(self):
data = [(1, 1), (2, 2), (2, 3)]
s = Series(data)
self.assertEqual(list(s), data)
def test_constructor_tuple_of_tuples(self):
data = ((1, 1), (2, 2), (2, 3))
s = Series(data)
self.assertEqual(tuple(s), data)
def test_fromDict(self):
data = {'a' : 0, 'b' : 1, 'c' : 2, 'd' : 3}
series = Series(data)
self.assert_(tm.is_sorted(series.index))
data = {'a' : 0, 'b' : '1', 'c' : '2', 'd' : datetime.now()}
series = Series(data)
self.assert_(series.dtype == np.object_)
data = {'a' : 0, 'b' : '1', 'c' : '2', 'd' : '3'}
series = Series(data)
self.assert_(series.dtype == np.object_)
data = {'a' : '0', 'b' : '1'}
series = Series(data, dtype=float)
self.assert_(series.dtype == np.float64)
def test_setindex(self):
# wrong type
series = self.series.copy()
self.assertRaises(TypeError, setattr, series, 'index', None)
# wrong length
series = self.series.copy()
self.assertRaises(AssertionError, setattr, series, 'index',
np.arange(len(series) - 1))
# works
series = self.series.copy()
series.index = np.arange(len(series))
self.assert_(isinstance(series.index, Index))
def test_array_finalize(self):
pass
def test_fromValue(self):
nans = Series(np.NaN, index=self.ts.index)
self.assert_(nans.dtype == np.float_)
self.assertEqual(len(nans), len(self.ts))
strings = Series('foo', index=self.ts.index)
self.assert_(strings.dtype == np.object_)
self.assertEqual(len(strings), len(self.ts))
d = datetime.now()
dates = Series(d, index=self.ts.index)
self.assert_(dates.dtype == np.object_)
self.assertEqual(len(dates), len(self.ts))
def test_contains(self):
tm.assert_contains_all(self.ts.index, self.ts)
def test_pickle(self):
unp_series = self._pickle_roundtrip(self.series)
unp_ts = self._pickle_roundtrip(self.ts)
assert_series_equal(unp_series, self.series)
assert_series_equal(unp_ts, self.ts)
def _pickle_roundtrip(self, obj):
obj.save('__tmp__')
unpickled = Series.load('__tmp__')
os.remove('__tmp__')
return unpickled
def test_getitem_get(self):
idx1 = self.series.index[5]
idx2 = self.objSeries.index[5]
self.assertEqual(self.series[idx1], self.series.get(idx1))
self.assertEqual(self.objSeries[idx2], self.objSeries.get(idx2))
self.assertEqual(self.series[idx1], self.series[5])
self.assertEqual(self.objSeries[idx2], self.objSeries[5])
self.assert_(self.series.get(-1) is None)
self.assertEqual(self.series[5], self.series.get(self.series.index[5]))
# missing
d = self.ts.index[0] - datetools.bday
self.assertRaises(KeyError, self.ts.__getitem__, d)
def test_iget(self):
s = Series(np.random.randn(10), index=range(0, 20, 2))
for i in range(len(s)):
result = s.iget(i)
exp = s[s.index[i]]
assert_almost_equal(result, exp)
# pass a slice
result = s.iget(slice(1, 3))
expected = s.ix[2:4]
assert_series_equal(result, expected)
def test_getitem_regression(self):
s = Series(range(5), index=range(5))
result = s[range(5)]
assert_series_equal(result, s)
def test_getitem_slice_bug(self):
s = Series(range(10), range(10))
result = s[-12:]
assert_series_equal(result, s)
result = s[-7:]
assert_series_equal(result, s[3:])
result = s[:-12]
assert_series_equal(result, s[:0])
def test_getitem_int64(self):
idx = np.int64(5)
self.assertEqual(self.ts[idx], self.ts[5])
def test_getitem_fancy(self):
slice1 = self.series[[1,2,3]]
slice2 = self.objSeries[[1,2,3]]
self.assertEqual(self.series.index[2], slice1.index[1])
self.assertEqual(self.objSeries.index[2], slice2.index[1])
self.assertEqual(self.series[2], slice1[1])
self.assertEqual(self.objSeries[2], slice2[1])
def test_getitem_boolean(self):
s = self.series
mask = s > s.median()
# passing list is OK
result = s[list(mask)]
expected = s[mask]
assert_series_equal(result, expected)
self.assert_(np.array_equal(result.index, s.index[mask]))
def test_getitem_generator(self):
gen = (x > 0 for x in self.series)
result = self.series[gen]
result2 = self.series[iter(self.series > 0)]
expected = self.series[self.series > 0]
assert_series_equal(result, expected)
assert_series_equal(result2, expected)
def test_getitem_boolean_object(self):
# using column from DataFrame
s = self.series
mask = s > s.median()
omask = mask.astype(object)
# getitem
result = s[omask]
expected = s[mask]
assert_series_equal(result, expected)
# setitem
cop = s.copy()
cop[omask] = 5
s[mask] = 5
assert_series_equal(cop, s)
# nans raise exception
omask[5:10] = np.nan
self.assertRaises(Exception, s.__getitem__, omask)
self.assertRaises(Exception, s.__setitem__, omask, 5)
def test_getitem_setitem_boolean_corner(self):
ts = self.ts
mask_shifted = ts.shift(1, offset=datetools.bday) > ts.median()
self.assertRaises(Exception, ts.__getitem__, mask_shifted)
self.assertRaises(Exception, ts.__setitem__, mask_shifted, 1)
self.assertRaises(Exception, ts.ix.__getitem__, mask_shifted)
self.assertRaises(Exception, ts.ix.__setitem__, mask_shifted, 1)
def test_getitem_setitem_slice_integers(self):
s = Series(np.random.randn(8), index=[2, 4, 6, 8, 10, 12, 14, 16])
result = s[:4]
expected = s.reindex([2, 4, 6, 8])
assert_series_equal(result, expected)
s[:4] = 0
self.assert_((s[:4] == 0).all())
self.assert_(not (s[4:] == 0).any())
def test_getitem_out_of_bounds(self):
# don't segfault, GH #495
self.assertRaises(IndexError, self.ts.__getitem__, len(self.ts))
def test_getitem_box_float64(self):
value = self.ts[5]
self.assert_(isinstance(value, np.float64))
def test_getitem_ambiguous_keyerror(self):
s = Series(range(10), index=range(0, 20, 2))
self.assertRaises(KeyError, s.__getitem__, 1)
self.assertRaises(KeyError, s.ix.__getitem__, 1)
def test_setitem_ambiguous_keyerror(self):
s = Series(range(10), index=range(0, 20, 2))
self.assertRaises(KeyError, s.__setitem__, 1, 5)
self.assertRaises(KeyError, s.ix.__setitem__, 1, 5)
def test_slice(self):
numSlice = self.series[10:20]
numSliceEnd = self.series[-10:]
objSlice = self.objSeries[10:20]
self.assert_(self.series.index[9] not in numSlice.index)
self.assert_(self.objSeries.index[9] not in objSlice.index)
self.assertEqual(len(numSlice), len(numSlice.index))
self.assertEqual(self.series[numSlice.index[0]],
numSlice[numSlice.index[0]])
self.assertEqual(numSlice.index[1], self.series.index[11])
self.assert_(tm.equalContents(numSliceEnd,
np.array(self.series)[-10:]))
# test return view
sl = self.series[10:20]
sl[:] = 0
self.assert_((self.series[10:20] == 0).all())
def test_slice_can_reorder_not_uniquely_indexed(self):
s = Series(1, index=['a', 'a', 'b', 'b', 'c'])
result = s[::-1] # it works!
def test_setitem(self):
self.ts[self.ts.index[5]] = np.NaN
self.ts[[1,2,17]] = np.NaN
self.ts[6] = np.NaN
self.assert_(np.isnan(self.ts[6]))
self.assert_(np.isnan(self.ts[2]))
self.ts[np.isnan(self.ts)] = 5
self.assert_(not np.isnan(self.ts[2]))
# caught this bug when writing tests
series = Series(tm.makeIntIndex(20).astype(float),
index=tm.makeIntIndex(20))
series[::2] = 0
self.assert_((series[::2] == 0).all())
# set item that's not contained
self.assertRaises(Exception, self.series.__setitem__,
'foobar', 1)
def test_set_value(self):
idx = self.ts.index[10]
res = self.ts.set_value(idx, 0)
self.assert_(res is self.ts)
self.assertEqual(self.ts[idx], 0)
res = self.series.set_value('foobar', 0)
self.assert_(res is not self.series)
self.assert_(res.index[-1] == 'foobar')
self.assertEqual(res['foobar'], 0)
def test_setslice(self):
sl = self.ts[5:20]
self.assertEqual(len(sl), len(sl.index))
self.assertEqual(len(sl.index.indexMap), len(sl.index))
def test_basic_getitem_setitem_corner(self):
# invalid tuples, e.g. self.ts[:, None] vs. self.ts[:, 2]
self.assertRaises(Exception, self.ts.__getitem__,
(slice(None, None), 2))
self.assertRaises(Exception, self.ts.__setitem__,
(slice(None, None), 2), 2)
# weird lists. [slice(0, 5)] will work but not two slices
result = self.ts[[slice(None, 5)]]
expected = self.ts[:5]
| assert_series_equal(result, expected) | pandas.util.testing.assert_series_equal |
from collections import OrderedDict
import numpy as np
import pytest
from pandas import (
DataFrame,
Index,
MultiIndex,
Series,
)
import pandas._testing as tm
from pandas.core.construction import create_series_with_explicit_dtype
class TestFromDict:
# Note: these tests are specific to the from_dict method, not for
# passing dictionaries to DataFrame.__init__
def test_from_dict_scalars_requires_index(self):
msg = "If using all scalar values, you must pass an index"
with pytest.raises(ValueError, match=msg):
DataFrame.from_dict(OrderedDict([("b", 8), ("a", 5), ("a", 6)]))
def test_constructor_list_of_odicts(self):
data = [
OrderedDict([["a", 1.5], ["b", 3], ["c", 4], ["d", 6]]),
OrderedDict([["a", 1.5], ["b", 3], ["d", 6]]),
OrderedDict([["a", 1.5], ["d", 6]]),
OrderedDict(),
OrderedDict([["a", 1.5], ["b", 3], ["c", 4]]),
OrderedDict([["b", 3], ["c", 4], ["d", 6]]),
]
result = DataFrame(data)
expected = DataFrame.from_dict(
dict(zip(range(len(data)), data)), orient="index"
)
tm.assert_frame_equal(result, expected.reindex(result.index))
def test_constructor_single_row(self):
data = [OrderedDict([["a", 1.5], ["b", 3], ["c", 4], ["d", 6]])]
result = DataFrame(data)
expected = DataFrame.from_dict(dict(zip([0], data)), orient="index").reindex(
result.index
)
tm.assert_frame_equal(result, expected)
def test_constructor_list_of_series(self):
data = [
OrderedDict([["a", 1.5], ["b", 3.0], ["c", 4.0]]),
OrderedDict([["a", 1.5], ["b", 3.0], ["c", 6.0]]),
]
sdict = OrderedDict(zip(["x", "y"], data))
idx = Index(["a", "b", "c"])
# all named
data2 = [
Series([1.5, 3, 4], idx, dtype="O", name="x"),
Series([1.5, 3, 6], idx, name="y"),
]
result = DataFrame(data2)
expected = DataFrame.from_dict(sdict, orient="index")
tm.assert_frame_equal(result, expected)
# some unnamed
data2 = [
Series([1.5, 3, 4], idx, dtype="O", name="x"),
Series([1.5, 3, 6], idx),
]
result = DataFrame(data2)
sdict = OrderedDict(zip(["x", "Unnamed 0"], data))
expected = DataFrame.from_dict(sdict, orient="index")
tm.assert_frame_equal(result, expected)
# none named
data = [
OrderedDict([["a", 1.5], ["b", 3], ["c", 4], ["d", 6]]),
OrderedDict([["a", 1.5], ["b", 3], ["d", 6]]),
OrderedDict([["a", 1.5], ["d", 6]]),
OrderedDict(),
OrderedDict([["a", 1.5], ["b", 3], ["c", 4]]),
OrderedDict([["b", 3], ["c", 4], ["d", 6]]),
]
data = [
create_series_with_explicit_dtype(d, dtype_if_empty=object) for d in data
]
result = DataFrame(data)
sdict = OrderedDict(zip(range(len(data)), data))
expected = DataFrame.from_dict(sdict, orient="index")
tm.assert_frame_equal(result, expected.reindex(result.index))
result2 = DataFrame(data, index=np.arange(6))
tm.assert_frame_equal(result, result2)
result = DataFrame([Series(dtype=object)])
expected = DataFrame(index=[0])
tm.assert_frame_equal(result, expected)
data = [
OrderedDict([["a", 1.5], ["b", 3.0], ["c", 4.0]]),
OrderedDict([["a", 1.5], ["b", 3.0], ["c", 6.0]]),
]
sdict = OrderedDict(zip(range(len(data)), data))
idx = Index(["a", "b", "c"])
data2 = [Series([1.5, 3, 4], idx, dtype="O"), Series([1.5, 3, 6], idx)]
result = DataFrame(data2)
expected = DataFrame.from_dict(sdict, orient="index")
tm.assert_frame_equal(result, expected)
def test_constructor_orient(self, float_string_frame):
data_dict = float_string_frame.T._series
recons = DataFrame.from_dict(data_dict, orient="index")
expected = float_string_frame.reindex(index=recons.index)
tm.assert_frame_equal(recons, expected)
# dict of sequence
a = {"hi": [32, 3, 3], "there": [3, 5, 3]}
rs = DataFrame.from_dict(a, orient="index")
xp = DataFrame.from_dict(a).T.reindex(list(a.keys()))
tm.assert_frame_equal(rs, xp)
def test_constructor_from_ordered_dict(self):
# GH#8425
a = OrderedDict(
[
("one", OrderedDict([("col_a", "foo1"), ("col_b", "bar1")])),
("two", OrderedDict([("col_a", "foo2"), ("col_b", "bar2")])),
("three", OrderedDict([("col_a", "foo3"), ("col_b", "bar3")])),
]
)
expected = | DataFrame.from_dict(a, orient="columns") | pandas.DataFrame.from_dict |
# -*- coding: utf-8 -*-
"""project3.ipynb
Automatically generated by Colaboratory.
Original file is located at
https://colab.research.google.com/drive/1PW90I5c1X5VipzIvowFpbLOAtjLw7-co
"""
import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
import seaborn as sns
"""1.**transforming data csv to pandas dataframe** """
data= | pd.read_csv("/content/drive/MyDrive/simplilearn/python with data science /project3/Comcast_telecom_complaints_data.csv") | pandas.read_csv |
import pandas as pd
from autor import Author
from excel import ExcelFile
from individuos import Student, Egress
from verifica_autores import em_lista_autores, trata_exceçoes
from valores import ND, quadrennium
from PyscopusModified import ScopusModified
from pprint import pprint
from excecoes import excecoes_artigos_scopus
def calcula_AE(data_frame, lista_egressos, lista_alunos):
egressos_nomes = []
for egresso in lista_egressos:
egressos_nomes.append(trata_exceçoes(egresso.name.strip()))
alunos_nomes = []
for aluno in lista_alunos:
alunos_nomes.append(trata_exceçoes(aluno.name.strip()))
AE_quantidade = 0
for index, row in data_frame.iterrows():
AE = False
for coluna in row.index:
if "Autor" in str(coluna):
if data_frame[coluna][index] != "":
for pos_egresso, egresso in enumerate(egressos_nomes):
if data_frame[coluna][index] == egresso:
if lista_egressos[pos_egresso].period[str(int(data_frame["Ano"][index]))[2:]] == True:
AE = True
for pos_aluno, aluno in enumerate(alunos_nomes):
if data_frame[coluna][index] == aluno:
if lista_alunos[pos_aluno].period[str(data_frame["Ano"][index])[2:]] == True:
AE = True
if AE == True:
AE_quantidade += 1
return AE_quantidade
def calcula_quantidade(data_frame, aux_porc, lista_egressos, lista_alunos):
qtd_AE = calcula_AE(data_frame, lista_egressos, lista_alunos)
qtd = len(data_frame.index)
porc = f"{aux_porc * qtd:.2f}%"
try:
porc_AE = f"{100/qtd * qtd_AE:.2f}%"
except ZeroDivisionError:
porc_AE = "0%"
return (qtd, qtd_AE, porc, porc_AE)
def get_indicadores(info, lista_egressos, lista_alunos, geral = False):
data_frame = pd.DataFrame(info)
porcentagens = []
total_artigos = len(data_frame["Tipo"])
if total_artigos != 0:
aux_porc = 100/total_artigos
else:
aux_porc = 0
porcentagens_AE = []
periodicos = data_frame.loc[data_frame["Tipo"] == "Periódico"]
periodicos, AE_periodicos, porc_periodicos, porc_AE_periodicos = calcula_quantidade(periodicos, aux_porc, lista_egressos, lista_alunos)
anais = data_frame.loc[data_frame["Tipo"] == "Anais"]
anais, AE_anais, porc_anais, porc_AE_anais = calcula_quantidade(anais, aux_porc, lista_egressos, lista_alunos)
a1 = data_frame.loc[data_frame["Qualis 2019"] == "A1"]
a1, AE_a1, porc_a1, porc_AE_a1 = calcula_quantidade(a1, aux_porc, lista_egressos, lista_alunos)
a2 = data_frame.loc[data_frame["Qualis 2019"] == "A2"]
a2, AE_a2, porc_a2, porc_AE_a2 = calcula_quantidade(a2, aux_porc, lista_egressos, lista_alunos)
a3 = data_frame.loc[data_frame["Qualis 2019"] == "A3"]
a3, AE_a3, porc_a3, porc_AE_a3 = calcula_quantidade(a3, aux_porc, lista_egressos, lista_alunos)
a4 = data_frame.loc[data_frame["Qualis 2019"] == "A4"]
a4, AE_a4, porc_a4, porc_AE_a4 = calcula_quantidade(a4, aux_porc, lista_egressos, lista_alunos)
a1_a4 = a1 + a2 + a3 + a4
AE_a1_a4 = AE_a1 + AE_a2 + AE_a3 + AE_a4
porc_a1_a4 = f"{aux_porc * a1_a4:.2f}%"
try:
porc_AE_a1_a4 = f"{100/a1_a4 * AE_a1_a4:.2f}%"
except ZeroDivisionError:
porc_AE_a1_a4 = "0%"
b1 = data_frame.loc[data_frame["Qualis 2019"] == "B1"]
b1, AE_b1, porc_b1, porc_AE_b1 = calcula_quantidade(b1, aux_porc, lista_egressos, lista_alunos)
b2 = data_frame.loc[data_frame["Qualis 2019"] == "B2"]
b2, AE_b2, porc_b2, porc_AE_b2 = calcula_quantidade(b2, aux_porc, lista_egressos, lista_alunos)
b3 = data_frame.loc[data_frame["Qualis 2019"] == "B3"]
b3, AE_b3, porc_b3, porc_AE_b3 = calcula_quantidade(b3, aux_porc, lista_egressos, lista_alunos)
b4 = data_frame.loc[data_frame["Qualis 2019"] == "B4"]
b4, AE_b4, porc_b4, porc_AE_b4 = calcula_quantidade(b4, aux_porc, lista_egressos, lista_alunos)
b1_b4 = b1 + b2 + b3 + b4
AE_b1_b4 = AE_b1 + AE_b2 + AE_b3 + AE_b4
porc_b1_b4 = f"{aux_porc * b1_b4:.2f}%"
try:
porc_AE_b1_b4 = f"{100/b1_b4 * AE_b1_b4:.2f}%"
except ZeroDivisionError:
porc_AE_b1_b4 = "0%"
outros = data_frame.loc[((data_frame["Qualis 2019"] != "A1") & (data_frame["Qualis 2019"] != "A2") & (data_frame["Qualis 2019"] != "A3") & (data_frame["Qualis 2019"] != "A4"))]
outros = outros.loc[((outros["Qualis 2019"] != "B1") & (outros["Qualis 2019"] != "B2") & (outros["Qualis 2019"] != "B3") & (outros["Qualis 2019"] != "B4"))]
outros, AE_outros, porc_outros, porc_AE_outros = calcula_quantidade(outros, aux_porc, lista_egressos, lista_alunos)
porcentagens.append(porc_periodicos)
porcentagens.append(porc_anais)
porcentagens.append(porc_a1_a4)
porcentagens.append(porc_a1)
porcentagens.append(porc_a2)
porcentagens.append(porc_a3)
porcentagens.append(porc_a4)
porcentagens.append(porc_b1_b4)
porcentagens.append(porc_b1)
porcentagens.append(porc_b2)
porcentagens.append(porc_b3)
porcentagens.append(porc_b4)
porcentagens.append(porc_outros)
porcentagens_AE.append(porc_AE_periodicos)
porcentagens_AE.append(porc_AE_anais)
porcentagens_AE.append(porc_AE_a1_a4)
porcentagens_AE.append(porc_AE_a1)
porcentagens_AE.append(porc_AE_a2)
porcentagens_AE.append(porc_AE_a3)
porcentagens_AE.append(porc_AE_a4)
porcentagens_AE.append(porc_AE_b1_b4)
porcentagens_AE.append(porc_AE_b1)
porcentagens_AE.append(porc_AE_b2)
porcentagens_AE.append(porc_AE_b3)
porcentagens_AE.append(porc_AE_b4)
porcentagens_AE.append(porc_AE_outros)
tipo_qualis = ["Periódicos", "Anais", "A1-A4", "A1", "A2", "A3", "A4", "B1-B4", "B1", "B2", "B3", "B4", "Outros"]
tabela = {"Tipo/Qualis": tipo_qualis, "Quantidade": [], "Porcentagem": [], "Quantidade com alunos/egressos":[], "Porcentagem alunos/egressos":[]}
tabela["Tipo/Qualis"].append(None)
tabela["Tipo/Qualis"].append("Índice")
tabela["Tipo/Qualis"].append("Irestrito")
tabela["Tipo/Qualis"].append("Igeral")
tabela["Quantidade"].append(periodicos)
tabela["Quantidade"].append(anais)
tabela["Quantidade"].append(a1_a4)
tabela["Quantidade"].append(a1)
tabela["Quantidade"].append(a2)
tabela["Quantidade"].append(a3)
tabela["Quantidade"].append(a4)
tabela["Quantidade"].append(b1_b4)
tabela["Quantidade"].append(b1)
tabela["Quantidade"].append(b2)
tabela["Quantidade"].append(b3)
tabela["Quantidade"].append(b4)
tabela["Quantidade"].append(outros)
tabela["Quantidade"].append(None)
Irestrito = a1 + (a2 * 0.875) + (a3 * 0.75) + (a4 * 0.625)
if Irestrito != 0:
Irestrito = round(Irestrito, 2)
Irestrito_medio = round((Irestrito/ND), 2)
else:
Irestrito_medio = 0
Igeral = Irestrito + (b1 * 0.5) + (b2 * 0.2) + (b3 * 0.1) + (b4 * 0.05)
if Igeral != 0:
Igeral = round(Igeral, 2)
Igeral_medio = round((Igeral/ND), 2)
else:
Igeral_medio = 0
tabela["Quantidade"].append("Acumulado")
tabela["Quantidade"].append(Irestrito)
tabela["Quantidade"].append(Igeral)
tabela["Quantidade com alunos/egressos"].append(AE_periodicos)
tabela["Quantidade com alunos/egressos"].append(AE_anais)
tabela["Quantidade com alunos/egressos"].append(AE_a1_a4)
tabela["Quantidade com alunos/egressos"].append(AE_a1)
tabela["Quantidade com alunos/egressos"].append(AE_a2)
tabela["Quantidade com alunos/egressos"].append(AE_a3)
tabela["Quantidade com alunos/egressos"].append(AE_a4)
tabela["Quantidade com alunos/egressos"].append(AE_b1_b4)
tabela["Quantidade com alunos/egressos"].append(AE_b1)
tabela["Quantidade com alunos/egressos"].append(AE_b2)
tabela["Quantidade com alunos/egressos"].append(AE_b3)
tabela["Quantidade com alunos/egressos"].append(AE_b4)
tabela["Quantidade com alunos/egressos"].append(AE_outros)
tabela["Quantidade com alunos/egressos"].append(None)
tabela["Quantidade com alunos/egressos"].append(None)
tabela["Quantidade com alunos/egressos"].append(None)
tabela["Quantidade com alunos/egressos"].append(None)
tabela["Porcentagem alunos/egressos"] = porcentagens_AE
tabela["Porcentagem alunos/egressos"].append(None)
tabela["Porcentagem alunos/egressos"].append(None)
tabela["Porcentagem alunos/egressos"].append(None)
tabela["Porcentagem alunos/egressos"].append(None)
tabela["Porcentagem"] = porcentagens
tabela["Porcentagem"].append(None)
if geral:
tabela["Porcentagem"].append("Média por docente")
tabela["Porcentagem"].append(Irestrito_medio)
tabela["Porcentagem"].append(Igeral_medio)
else:
tabela["Porcentagem"].append(None)
tabela["Porcentagem"].append(None)
tabela["Porcentagem"].append(None)
return pd.DataFrame(tabela)
def read_files():
# Read files - People
try:
professors = pd.read_csv("UNIVALI - PPGC - Professores.csv", sep=";", encoding='iso-8859-1')
except:
professors = pd.read_csv("UNIVALI - PPGC - Professores.csv", sep=";", encoding='utf-8')
try:
egress = pd.read_csv("planilha_egressos_lattes.CSV", sep=";", encoding='iso-8859-1')
except:
egress = pd.read_csv("planilha_egressos_lattes.CSV", sep=";", encoding='utf-8')
try:
students = pd.read_csv("Planilha - Levantamento alunos ativos.CSV", sep=";", encoding='iso-8859-1')
except:
students = pd.read_csv("Planilha - Levantamento alunos ativos.CSV", sep=";", encoding='utf-8')
# Read files - Qualis
try:
qualis_cc2016_file = | pd.read_csv("Qualis/QualisCC_2013_2016.csv", sep=";", encoding='iso-8859-1') | pandas.read_csv |
import numpy as np
import pandas as pd
from numba import njit, typeof
from numba.typed import List
from datetime import datetime, timedelta
import pytest
import vectorbt as vbt
from vectorbt.portfolio.enums import *
from vectorbt.generic.enums import drawdown_dt
from vectorbt import settings
from vectorbt.utils.random import set_seed
from vectorbt.portfolio import nb
from tests.utils import record_arrays_close
seed = 42
day_dt = np.timedelta64(86400000000000)
settings.returns['year_freq'] = '252 days' # same as empyrical
price = pd.Series([1., 2., 3., 4., 5.], index=pd.Index([
datetime(2020, 1, 1),
datetime(2020, 1, 2),
datetime(2020, 1, 3),
datetime(2020, 1, 4),
datetime(2020, 1, 5)
]))
price_wide = price.vbt.tile(3, keys=['a', 'b', 'c'])
big_price = pd.DataFrame(np.random.uniform(size=(1000,)))
big_price.index = [datetime(2018, 1, 1) + timedelta(days=i) for i in range(1000)]
big_price_wide = big_price.vbt.tile(1000)
# ############# nb ############# #
def assert_same_tuple(tup1, tup2):
for i in range(len(tup1)):
assert tup1[i] == tup2[i] or np.isnan(tup1[i]) and np.isnan(tup2[i])
def test_process_order_nb():
# Errors, ignored and rejected orders
log_record = np.empty(1, dtype=log_dt)[0]
log_record[0] = 0
log_record[1] = 0
log_record[2] = 0
log_record[3] = 0
log_record[-1] = 0
cash_now, shares_now, order_result = nb.process_order_nb(
100., 100., 10., 1100.,
nb.create_order_nb(), log_record)
assert cash_now == 100.
assert shares_now == 100.
assert_same_tuple(order_result, OrderResult(
size=np.nan, price=np.nan, fees=np.nan, side=-1, status=1, status_info=0))
cash_now, shares_now, order_result = nb.process_order_nb(
100., 100., 10., 1100.,
nb.create_order_nb(size=10), log_record)
assert cash_now == 100.
assert shares_now == 100.
assert_same_tuple(order_result, OrderResult(
size=np.nan, price=np.nan, fees=np.nan, side=-1, status=1, status_info=1))
with pytest.raises(Exception) as e_info:
_ = nb.process_order_nb(
-100., 100., 10., 1100.,
nb.create_order_nb(size=10, price=10), log_record)
with pytest.raises(Exception) as e_info:
_ = nb.process_order_nb(
np.nan, 100., 10., 1100.,
nb.create_order_nb(size=10, price=10), log_record)
with pytest.raises(Exception) as e_info:
_ = nb.process_order_nb(
100., np.inf, 10., 1100.,
nb.create_order_nb(size=10, price=10), log_record)
with pytest.raises(Exception) as e_info:
_ = nb.process_order_nb(
100., np.nan, 10., 1100.,
nb.create_order_nb(size=10, price=10), log_record)
with pytest.raises(Exception) as e_info:
_ = nb.process_order_nb(
100., 100., 10., 1100.,
nb.create_order_nb(size=10, price=10, size_type=-2), log_record)
with pytest.raises(Exception) as e_info:
_ = nb.process_order_nb(
100., 100., 10., 1100.,
nb.create_order_nb(size=10, price=10, size_type=20), log_record)
with pytest.raises(Exception) as e_info:
_ = nb.process_order_nb(
100., 100., 10., 1100.,
nb.create_order_nb(size=10, price=10, direction=-2), log_record)
with pytest.raises(Exception) as e_info:
_ = nb.process_order_nb(
100., 100., 10., 1100.,
nb.create_order_nb(size=10, price=10, direction=20), log_record)
with pytest.raises(Exception) as e_info:
_ = nb.process_order_nb(
100., -100., 10., 1100.,
nb.create_order_nb(size=10, price=10, direction=Direction.LongOnly), log_record)
with pytest.raises(Exception) as e_info:
_ = nb.process_order_nb(
100., 100., 10., 1100.,
nb.create_order_nb(size=10, price=10, direction=Direction.ShortOnly), log_record)
with pytest.raises(Exception) as e_info:
_ = nb.process_order_nb(
100., 100., 10., 1100.,
nb.create_order_nb(size=10, price=np.inf), log_record)
with pytest.raises(Exception) as e_info:
_ = nb.process_order_nb(
100., 100., 10., 1100.,
nb.create_order_nb(size=10, price=-10), log_record)
with pytest.raises(Exception) as e_info:
_ = nb.process_order_nb(
100., 100., 10., 1100.,
nb.create_order_nb(size=10, price=10, fees=np.inf), log_record)
with pytest.raises(Exception) as e_info:
_ = nb.process_order_nb(
100., 100., 10., 1100.,
nb.create_order_nb(size=10, price=10, fees=-1), log_record)
with pytest.raises(Exception) as e_info:
_ = nb.process_order_nb(
100., 100., 10., 1100.,
nb.create_order_nb(size=10, price=10, fixed_fees=np.inf), log_record)
with pytest.raises(Exception) as e_info:
_ = nb.process_order_nb(
100., 100., 10., 1100.,
nb.create_order_nb(size=10, price=10, fixed_fees=-1), log_record)
with pytest.raises(Exception) as e_info:
_ = nb.process_order_nb(
100., 100., 10., 1100.,
nb.create_order_nb(size=10, price=10, slippage=np.inf), log_record)
with pytest.raises(Exception) as e_info:
_ = nb.process_order_nb(
100., 100., 10., 1100.,
nb.create_order_nb(size=10, price=10, slippage=-1), log_record)
with pytest.raises(Exception) as e_info:
_ = nb.process_order_nb(
100., 100., 10., 1100.,
nb.create_order_nb(size=10, price=10, min_size=np.inf), log_record)
with pytest.raises(Exception) as e_info:
_ = nb.process_order_nb(
100., 100., 10., 1100.,
nb.create_order_nb(size=10, price=10, min_size=-1), log_record)
with pytest.raises(Exception) as e_info:
_ = nb.process_order_nb(
100., 100., 10., 1100.,
nb.create_order_nb(size=10, price=10, max_size=0), log_record)
with pytest.raises(Exception) as e_info:
_ = nb.process_order_nb(
100., 100., 10., 1100.,
nb.create_order_nb(size=10, price=10, max_size=-10), log_record)
with pytest.raises(Exception) as e_info:
_ = nb.process_order_nb(
100., 100., 10., 1100.,
nb.create_order_nb(size=10, price=10, reject_prob=np.nan), log_record)
with pytest.raises(Exception) as e_info:
_ = nb.process_order_nb(
100., 100., 10., 1100.,
nb.create_order_nb(size=10, price=10, reject_prob=-1), log_record)
with pytest.raises(Exception) as e_info:
_ = nb.process_order_nb(
100., 100., 10., 1100.,
nb.create_order_nb(size=10, price=10, reject_prob=2), log_record)
cash_now, shares_now, order_result = nb.process_order_nb(
100., 100., 10., np.nan,
nb.create_order_nb(size=1, price=10, size_type=SizeType.TargetPercent), log_record)
assert cash_now == 100.
assert shares_now == 100.
assert_same_tuple(order_result, OrderResult(
size=np.nan, price=np.nan, fees=np.nan, side=-1, status=1, status_info=3))
cash_now, shares_now, order_result = nb.process_order_nb(
100., 100., 10., -10.,
nb.create_order_nb(size=1, price=10, size_type=SizeType.TargetPercent), log_record)
assert cash_now == 100.
assert shares_now == 100.
assert_same_tuple(order_result, OrderResult(
size=np.nan, price=np.nan, fees=np.nan, side=-1, status=2, status_info=4))
with pytest.raises(Exception) as e_info:
_ = nb.process_order_nb(
100., 100., np.inf, 1100.,
nb.create_order_nb(size=10, price=10, size_type=SizeType.TargetValue), log_record)
with pytest.raises(Exception) as e_info:
_ = nb.process_order_nb(
100., 100., -10., 1100.,
nb.create_order_nb(size=10, price=10, size_type=SizeType.TargetValue), log_record)
cash_now, shares_now, order_result = nb.process_order_nb(
100., 100., np.nan, 1100.,
nb.create_order_nb(size=10, price=10, size_type=SizeType.TargetValue), log_record)
assert cash_now == 100.
assert shares_now == 100.
assert_same_tuple(order_result, OrderResult(
size=np.nan, price=np.nan, fees=np.nan, side=-1, status=1, status_info=2))
cash_now, shares_now, order_result = nb.process_order_nb(
100., -10., 10., 1100.,
nb.create_order_nb(size=np.inf, price=10, direction=Direction.ShortOnly), log_record)
assert cash_now == 100.
assert shares_now == -10.
assert_same_tuple(order_result, OrderResult(
size=np.nan, price=np.nan, fees=np.nan, side=-1, status=2, status_info=6))
cash_now, shares_now, order_result = nb.process_order_nb(
100., -10., 10., 1100.,
nb.create_order_nb(size=-np.inf, price=10, direction=Direction.All), log_record)
assert cash_now == 100.
assert shares_now == -10.
assert_same_tuple(order_result, OrderResult(
size=np.nan, price=np.nan, fees=np.nan, side=-1, status=2, status_info=6))
cash_now, shares_now, order_result = nb.process_order_nb(
100., 10., 10., 1100.,
nb.create_order_nb(size=0, price=10), log_record)
assert cash_now == 100.
assert shares_now == 10.
assert_same_tuple(order_result, OrderResult(
size=np.nan, price=np.nan, fees=np.nan, side=-1, status=1, status_info=5))
with pytest.raises(Exception) as e_info:
_ = nb.process_order_nb(
100., 100., 10., 1100.,
nb.create_order_nb(size=15, price=10, max_size=10, allow_partial=False, raise_reject=True), log_record)
cash_now, shares_now, order_result = nb.process_order_nb(
100., 100., 10., 1100.,
nb.create_order_nb(size=15, price=10, max_size=10, allow_partial=False), log_record)
assert cash_now == 100.
assert shares_now == 100.
assert_same_tuple(order_result, OrderResult(
size=np.nan, price=np.nan, fees=np.nan, side=-1, status=2, status_info=9))
with pytest.raises(Exception) as e_info:
_ = nb.process_order_nb(
100., 100., 10., 1100.,
nb.create_order_nb(size=10, price=10, reject_prob=1., raise_reject=True), log_record)
cash_now, shares_now, order_result = nb.process_order_nb(
100., 100., 10., 1100.,
nb.create_order_nb(size=10, price=10, reject_prob=1.), log_record)
assert cash_now == 100.
assert shares_now == 100.
assert_same_tuple(order_result, OrderResult(
size=np.nan, price=np.nan, fees=np.nan, side=-1, status=2, status_info=10))
cash_now, shares_now, order_result = nb.process_order_nb(
0., 100., 10., 1100.,
nb.create_order_nb(size=10, price=10, direction=Direction.LongOnly), log_record)
assert cash_now == 0.
assert shares_now == 100.
assert_same_tuple(order_result, OrderResult(
size=np.nan, price=np.nan, fees=np.nan, side=-1, status=2, status_info=7))
cash_now, shares_now, order_result = nb.process_order_nb(
0., 100., 10., 1100.,
nb.create_order_nb(size=10, price=10, direction=Direction.All), log_record)
assert cash_now == 0.
assert shares_now == 100.
assert_same_tuple(order_result, OrderResult(
size=np.nan, price=np.nan, fees=np.nan, side=-1, status=2, status_info=7))
with pytest.raises(Exception) as e_info:
_ = nb.process_order_nb(
np.inf, 100., 10., 1100.,
nb.create_order_nb(size=np.inf, price=10, direction=Direction.LongOnly), log_record)
with pytest.raises(Exception) as e_info:
_ = nb.process_order_nb(
np.inf, 100., 10., 1100.,
nb.create_order_nb(size=np.inf, price=10, direction=Direction.All), log_record)
cash_now, shares_now, order_result = nb.process_order_nb(
100., 0., 10., 1100.,
nb.create_order_nb(size=-10, price=10, direction=Direction.ShortOnly), log_record)
assert cash_now == 100.
assert shares_now == 0.
assert_same_tuple(order_result, OrderResult(
size=np.nan, price=np.nan, fees=np.nan, side=-1, status=2, status_info=8))
with pytest.raises(Exception) as e_info:
_ = nb.process_order_nb(
np.inf, 100., 10., 1100.,
nb.create_order_nb(size=np.inf, price=10, direction=Direction.ShortOnly), log_record)
with pytest.raises(Exception) as e_info:
_ = nb.process_order_nb(
np.inf, 100., 10., 1100.,
nb.create_order_nb(size=-np.inf, price=10, direction=Direction.All), log_record)
cash_now, shares_now, order_result = nb.process_order_nb(
100., 0., 10., 1100.,
nb.create_order_nb(size=-10, price=10, direction=Direction.LongOnly), log_record)
assert cash_now == 100.
assert shares_now == 0.
assert_same_tuple(order_result, OrderResult(
size=np.nan, price=np.nan, fees=np.nan, side=-1, status=2, status_info=8))
with pytest.raises(Exception) as e_info:
_ = nb.process_order_nb(
100., 100., 10., 1100.,
nb.create_order_nb(size=10, price=10, fixed_fees=100, raise_reject=True), log_record)
cash_now, shares_now, order_result = nb.process_order_nb(
100., 100., 10., 1100.,
nb.create_order_nb(size=10, price=10, fixed_fees=100), log_record)
assert cash_now == 100.
assert shares_now == 100.
assert_same_tuple(order_result, OrderResult(
size=np.nan, price=np.nan, fees=np.nan, side=-1, status=2, status_info=11))
with pytest.raises(Exception) as e_info:
_ = nb.process_order_nb(
100., 100., 10., 1100.,
nb.create_order_nb(size=10, price=10, min_size=100, raise_reject=True), log_record)
cash_now, shares_now, order_result = nb.process_order_nb(
100., 100., 10., 1100.,
nb.create_order_nb(size=10, price=10, min_size=100), log_record)
assert cash_now == 100.
assert shares_now == 100.
assert_same_tuple(order_result, OrderResult(
size=np.nan, price=np.nan, fees=np.nan, side=-1, status=2, status_info=12))
with pytest.raises(Exception) as e_info:
_ = nb.process_order_nb(
100., 100., 10., 1100.,
nb.create_order_nb(size=100, price=10, allow_partial=False, raise_reject=True), log_record)
cash_now, shares_now, order_result = nb.process_order_nb(
100., 100., 10., 1100.,
nb.create_order_nb(size=100, price=10, allow_partial=False), log_record)
assert cash_now == 100.
assert shares_now == 100.
assert_same_tuple(order_result, OrderResult(
size=np.nan, price=np.nan, fees=np.nan, side=-1, status=2, status_info=13))
with pytest.raises(Exception) as e_info:
_ = nb.process_order_nb(
100., 100., 10., 1100.,
nb.create_order_nb(size=-10, price=10, min_size=100, raise_reject=True), log_record)
cash_now, shares_now, order_result = nb.process_order_nb(
100., 100., 10., 1100.,
nb.create_order_nb(size=-10, price=10, min_size=100), log_record)
assert cash_now == 100.
assert shares_now == 100.
assert_same_tuple(order_result, OrderResult(
size=np.nan, price=np.nan, fees=np.nan, side=-1, status=2, status_info=12))
with pytest.raises(Exception) as e_info:
_ = nb.process_order_nb(
100., 100., 10., 1100.,
nb.create_order_nb(size=-200, price=10, direction=Direction.LongOnly, allow_partial=False,
raise_reject=True),
log_record)
cash_now, shares_now, order_result = nb.process_order_nb(
100., 100., 10., 1100.,
nb.create_order_nb(size=-200, price=10, direction=Direction.LongOnly, allow_partial=False), log_record)
assert cash_now == 100.
assert shares_now == 100.
assert_same_tuple(order_result, OrderResult(
size=np.nan, price=np.nan, fees=np.nan, side=-1, status=2, status_info=13))
with pytest.raises(Exception) as e_info:
_ = nb.process_order_nb(
100., 100., 10., 1100.,
nb.create_order_nb(size=-10, price=10, fixed_fees=1000, raise_reject=True), log_record)
cash_now, shares_now, order_result = nb.process_order_nb(
100., 100., 10., 1100.,
nb.create_order_nb(size=-10, price=10, fixed_fees=1000), log_record)
assert cash_now == 100.
assert shares_now == 100.
assert_same_tuple(order_result, OrderResult(
size=np.nan, price=np.nan, fees=np.nan, side=-1, status=2, status_info=11))
# Calculations
cash_now, shares_now, order_result = nb.process_order_nb(
100., 0., 10., 100.,
nb.create_order_nb(size=10, price=10, fees=0.1, fixed_fees=1, slippage=0.1), log_record)
assert cash_now == 0.
assert shares_now == 8.18181818181818
assert_same_tuple(order_result, OrderResult(
size=8.18181818181818, price=11.0, fees=10.000000000000014, side=0, status=0, status_info=-1))
cash_now, shares_now, order_result = nb.process_order_nb(
100., 0., 10., 100.,
nb.create_order_nb(size=100, price=10, fees=0.1, fixed_fees=1, slippage=0.1), log_record)
assert cash_now == 0.
assert shares_now == 8.18181818181818
assert_same_tuple(order_result, OrderResult(
size=8.18181818181818, price=11.0, fees=10.000000000000014, side=0, status=0, status_info=-1))
cash_now, shares_now, order_result = nb.process_order_nb(
100., 0., 10., 100.,
nb.create_order_nb(size=-10, price=10, fees=0.1, fixed_fees=1, slippage=0.1), log_record)
assert cash_now == 180.
assert shares_now == -10.
assert_same_tuple(order_result, OrderResult(
size=10.0, price=9.0, fees=10.0, side=1, status=0, status_info=-1))
cash_now, shares_now, order_result = nb.process_order_nb(
100., 0., 10., 100.,
nb.create_order_nb(size=-100, price=10, fees=0.1, fixed_fees=1, slippage=0.1), log_record)
assert cash_now == 909.
assert shares_now == -100.
assert_same_tuple(order_result, OrderResult(
size=100.0, price=9.0, fees=91.0, side=1, status=0, status_info=-1))
cash_now, shares_now, order_result = nb.process_order_nb(
100., 0., 10., 100.,
nb.create_order_nb(size=10, price=10, size_type=SizeType.TargetShares), log_record)
assert cash_now == 0.
assert shares_now == 10.
assert_same_tuple(order_result, OrderResult(
size=10., price=10.0, fees=0., side=0, status=0, status_info=-1))
cash_now, shares_now, order_result = nb.process_order_nb(
100., 0., 10., 100.,
nb.create_order_nb(size=-10, price=10, size_type=SizeType.TargetShares), log_record)
assert cash_now == 200.
assert shares_now == -10.
assert_same_tuple(order_result, OrderResult(
size=10., price=10.0, fees=0., side=1, status=0, status_info=-1))
cash_now, shares_now, order_result = nb.process_order_nb(
100., 0., 10., 100.,
nb.create_order_nb(size=100, price=10, size_type=SizeType.TargetValue), log_record)
assert cash_now == 0.
assert shares_now == 10.
assert_same_tuple(order_result, OrderResult(
size=10., price=10.0, fees=0., side=0, status=0, status_info=-1))
cash_now, shares_now, order_result = nb.process_order_nb(
100., 0., 10., 100.,
nb.create_order_nb(size=-100, price=10, size_type=SizeType.TargetValue), log_record)
assert cash_now == 200.
assert shares_now == -10.
assert_same_tuple(order_result, OrderResult(
size=10., price=10.0, fees=0., side=1, status=0, status_info=-1))
cash_now, shares_now, order_result = nb.process_order_nb(
100., 0., 10., 100.,
nb.create_order_nb(size=1, price=10, size_type=SizeType.TargetPercent), log_record)
assert cash_now == 0.
assert shares_now == 10.
assert_same_tuple(order_result, OrderResult(
size=10., price=10.0, fees=0., side=0, status=0, status_info=-1))
cash_now, shares_now, order_result = nb.process_order_nb(
100., 0., 10., 100.,
nb.create_order_nb(size=-1, price=10, size_type=SizeType.TargetPercent), log_record)
assert cash_now == 200.
assert shares_now == -10.
assert_same_tuple(order_result, OrderResult(
size=10., price=10.0, fees=0., side=1, status=0, status_info=-1))
cash_now, shares_now, order_result = nb.process_order_nb(
100., 0., 10., 100.,
nb.create_order_nb(size=1, price=10, size_type=SizeType.Percent), log_record)
assert cash_now == 0.
assert shares_now == 10.
assert_same_tuple(order_result, OrderResult(
size=10., price=10.0, fees=0., side=0, status=0, status_info=-1))
cash_now, shares_now, order_result = nb.process_order_nb(
100., 0., 10., 100.,
nb.create_order_nb(size=0.5, price=10, size_type=SizeType.Percent, fixed_fees=1.), log_record)
assert cash_now == 50.
assert shares_now == 4.9
assert_same_tuple(order_result, OrderResult(
size=4.9, price=10.0, fees=1., side=0, status=0, status_info=-1))
cash_now, shares_now, order_result = nb.process_order_nb(
0., 10., 10., 100.,
nb.create_order_nb(size=-1, price=10, size_type=SizeType.Percent), log_record)
assert cash_now == 100.
assert shares_now == 0.
assert_same_tuple(order_result, OrderResult(
size=10., price=10.0, fees=0., side=1, status=0, status_info=-1))
cash_now, shares_now, order_result = nb.process_order_nb(
0., 10., 10., 100.,
nb.create_order_nb(size=-0.5, price=10, size_type=SizeType.Percent, fixed_fees=1.), log_record)
assert cash_now == 49.
assert shares_now == 5.
assert_same_tuple(order_result, OrderResult(
size=5., price=10.0, fees=1., side=1, status=0, status_info=-1))
cash_now, shares_now, order_result = nb.process_order_nb(
100., -10., 10., 100.,
nb.create_order_nb(size=1., price=10, size_type=SizeType.Percent), log_record)
assert cash_now == 0.
assert shares_now == 0.
assert_same_tuple(order_result, OrderResult(
size=10., price=10.0, fees=0., side=0, status=0, status_info=-1))
cash_now, shares_now, order_result = nb.process_order_nb(
0., -10., 10., 100.,
nb.create_order_nb(size=-1., price=10, size_type=SizeType.Percent), log_record)
assert cash_now == 100.
assert shares_now == -20.
assert_same_tuple(order_result, OrderResult(
size=10., price=10.0, fees=0., side=1, status=0, status_info=-1))
cash_now, shares_now, order_result = nb.process_order_nb(
100., 0., 10., 100.,
nb.create_order_nb(size=np.inf, price=10), log_record)
assert cash_now == 0.
assert shares_now == 10.
assert_same_tuple(order_result, OrderResult(
size=10., price=10.0, fees=0., side=0, status=0, status_info=-1))
cash_now, shares_now, order_result = nb.process_order_nb(
100., 0., 10., 100.,
nb.create_order_nb(size=-np.inf, price=10), log_record)
assert cash_now == 200.
assert shares_now == -10.
assert_same_tuple(order_result, OrderResult(
size=10., price=10.0, fees=0., side=1, status=0, status_info=-1))
cash_now, shares_now, order_result = nb.process_order_nb(
150., -5., 10., 100.,
nb.create_order_nb(size=-np.inf, price=10), log_record)
assert cash_now == 200.
assert shares_now == -10.
assert_same_tuple(order_result, OrderResult(
size=5., price=10.0, fees=0., side=1, status=0, status_info=-1))
# Logging
_ = nb.process_order_nb(
100., 0., 10., 100.,
nb.create_order_nb(log=True), log_record)
assert_same_tuple(log_record, (
0, 0, 0, 0, 100., 0., 10., 100., np.nan, 0, 2, np.nan, 0., 0., 0., 0., np.inf, 0.,
True, False, True, 100., 0., np.nan, np.nan, np.nan, -1, 1, 0, 0
))
_ = nb.process_order_nb(
100., 0., 10., 100.,
nb.create_order_nb(size=np.inf, price=10, log=True), log_record)
assert_same_tuple(log_record, (
0, 0, 0, 0, 100., 0., 10., 100., np.inf, 0, 2, 10., 0., 0., 0., 0., np.inf, 0.,
True, False, True, 0., 10., 10., 10., 0., 0, 0, -1, 0
))
_ = nb.process_order_nb(
100., 0., 10., 100.,
nb.create_order_nb(size=-np.inf, price=10, log=True), log_record)
assert_same_tuple(log_record, (
0, 0, 0, 0, 100., 0., 10., 100., -np.inf, 0, 2, 10., 0., 0., 0., 0., np.inf, 0.,
True, False, True, 200., -10., 10., 10., 0., 1, 0, -1, 0
))
def test_build_call_seq_nb():
group_lens = np.array([1, 2, 3, 4])
np.testing.assert_array_equal(
nb.build_call_seq_nb((10, 10), group_lens, CallSeqType.Default),
nb.build_call_seq((10, 10), group_lens, CallSeqType.Default)
)
np.testing.assert_array_equal(
nb.build_call_seq_nb((10, 10), group_lens, CallSeqType.Reversed),
nb.build_call_seq((10, 10), group_lens, CallSeqType.Reversed)
)
set_seed(seed)
out1 = nb.build_call_seq_nb((10, 10), group_lens, CallSeqType.Random)
set_seed(seed)
out2 = nb.build_call_seq((10, 10), group_lens, CallSeqType.Random)
np.testing.assert_array_equal(out1, out2)
# ############# from_signals ############# #
entries = pd.Series([True, True, True, False, False], index=price.index)
entries_wide = entries.vbt.tile(3, keys=['a', 'b', 'c'])
exits = pd.Series([False, False, True, True, True], index=price.index)
exits_wide = exits.vbt.tile(3, keys=['a', 'b', 'c'])
def from_signals_all(price=price, entries=entries, exits=exits, **kwargs):
return vbt.Portfolio.from_signals(price, entries, exits, direction='all', **kwargs)
def from_signals_longonly(price=price, entries=entries, exits=exits, **kwargs):
return vbt.Portfolio.from_signals(price, entries, exits, direction='longonly', **kwargs)
def from_signals_shortonly(price=price, entries=entries, exits=exits, **kwargs):
return vbt.Portfolio.from_signals(price, entries, exits, direction='shortonly', **kwargs)
class TestFromSignals:
def test_one_column(self):
record_arrays_close(
from_signals_all().order_records,
np.array([
(0, 0, 0, 100., 1., 0., 0), (1, 3, 0, 200., 4., 0., 1)
], dtype=order_dt)
)
record_arrays_close(
from_signals_longonly().order_records,
np.array([
(0, 0, 0, 100., 1., 0., 0), (1, 3, 0, 100., 4., 0., 1)
], dtype=order_dt)
)
record_arrays_close(
from_signals_shortonly().order_records,
np.array([
(0, 0, 0, 100., 1., 0., 1), (1, 3, 0, 50., 4., 0., 0)
], dtype=order_dt)
)
portfolio = from_signals_all()
pd.testing.assert_index_equal(
portfolio.wrapper.index,
pd.DatetimeIndex(['2020-01-01', '2020-01-02', '2020-01-03', '2020-01-04', '2020-01-05'])
)
pd.testing.assert_index_equal(
portfolio.wrapper.columns,
pd.Int64Index([0], dtype='int64')
)
assert portfolio.wrapper.ndim == 1
assert portfolio.wrapper.freq == day_dt
assert portfolio.wrapper.grouper.group_by is None
def test_multiple_columns(self):
record_arrays_close(
from_signals_all(price=price_wide).order_records,
np.array([
(0, 0, 0, 100., 1., 0., 0), (1, 3, 0, 200., 4., 0., 1),
(2, 0, 1, 100., 1., 0., 0), (3, 3, 1, 200., 4., 0., 1),
(4, 0, 2, 100., 1., 0., 0), (5, 3, 2, 200., 4., 0., 1)
], dtype=order_dt)
)
record_arrays_close(
from_signals_longonly(price=price_wide).order_records,
np.array([
(0, 0, 0, 100., 1., 0., 0), (1, 3, 0, 100., 4., 0., 1),
(2, 0, 1, 100., 1., 0., 0), (3, 3, 1, 100., 4., 0., 1),
(4, 0, 2, 100., 1., 0., 0), (5, 3, 2, 100., 4., 0., 1)
], dtype=order_dt)
)
record_arrays_close(
from_signals_shortonly(price=price_wide).order_records,
np.array([
(0, 0, 0, 100., 1., 0., 1), (1, 3, 0, 50., 4., 0., 0),
(2, 0, 1, 100., 1., 0., 1), (3, 3, 1, 50., 4., 0., 0),
(4, 0, 2, 100., 1., 0., 1), (5, 3, 2, 50., 4., 0., 0)
], dtype=order_dt)
)
portfolio = from_signals_all(price=price_wide)
pd.testing.assert_index_equal(
portfolio.wrapper.index,
pd.DatetimeIndex(['2020-01-01', '2020-01-02', '2020-01-03', '2020-01-04', '2020-01-05'])
)
pd.testing.assert_index_equal(
portfolio.wrapper.columns,
pd.Index(['a', 'b', 'c'], dtype='object')
)
assert portfolio.wrapper.ndim == 2
assert portfolio.wrapper.freq == day_dt
assert portfolio.wrapper.grouper.group_by is None
def test_size(self):
record_arrays_close(
from_signals_all(size=[[-1, 0, 1, np.inf]]).order_records,
np.array([
(0, 0, 0, 1.0, 1.0, 0.0, 0), (1, 3, 0, 2.0, 4.0, 0.0, 1), (2, 0, 2, 1.0, 1.0, 0.0, 0),
(3, 3, 2, 2.0, 4.0, 0.0, 1), (4, 0, 3, 100.0, 1.0, 0.0, 0), (5, 3, 3, 200.0, 4.0, 0.0, 1)
], dtype=order_dt)
)
record_arrays_close(
from_signals_longonly(size=[[-1, 0, 1, np.inf]]).order_records,
np.array([
(0, 0, 0, 1.0, 1.0, 0.0, 0), (1, 3, 0, 1.0, 4.0, 0.0, 1), (2, 0, 2, 1.0, 1.0, 0.0, 0),
(3, 3, 2, 1.0, 4.0, 0.0, 1), (4, 0, 3, 100.0, 1.0, 0.0, 0), (5, 3, 3, 100.0, 4.0, 0.0, 1)
], dtype=order_dt)
)
record_arrays_close(
from_signals_shortonly(size=[[-1, 0, 1, np.inf]]).order_records,
np.array([
(0, 0, 0, 1.0, 1.0, 0.0, 1), (1, 3, 0, 1.0, 4.0, 0.0, 0), (2, 0, 2, 1.0, 1.0, 0.0, 1),
(3, 3, 2, 1.0, 4.0, 0.0, 0), (4, 0, 3, 100.0, 1.0, 0.0, 1), (5, 3, 3, 50.0, 4.0, 0.0, 0)
], dtype=order_dt)
)
def test_percent(self):
with pytest.raises(Exception) as e_info:
_ = from_signals_all(size=0.5, size_type='percent')
record_arrays_close(
from_signals_all(size=0.5, size_type='percent', close_first=True).order_records,
np.array([
(0, 0, 0, 50., 1., 0., 0), (1, 3, 0, 50., 4., 0., 1)
], dtype=order_dt)
)
record_arrays_close(
from_signals_all(size=0.5, size_type='percent', close_first=True, accumulate=True).order_records,
np.array([
(0, 0, 0, 50., 1., 0., 0), (1, 1, 0, 12.5, 2., 0., 0),
(2, 3, 0, 31.25, 4., 0., 1), (3, 4, 0, 15.625, 5., 0., 1)
], dtype=order_dt)
)
record_arrays_close(
from_signals_longonly(size=0.5, size_type='percent').order_records,
np.array([
(0, 0, 0, 50., 1., 0., 0), (1, 3, 0, 50., 4., 0., 1)
], dtype=order_dt)
)
record_arrays_close(
from_signals_shortonly(size=0.5, size_type='percent').order_records,
np.array([], dtype=order_dt)
)
record_arrays_close(
from_signals_longonly(
price=price_wide, size=0.5, size_type='percent',
group_by=np.array([0, 0, 0]), cash_sharing=True).order_records,
np.array([
(0, 0, 0, 50., 1., 0., 0), (1, 0, 1, 25., 1., 0., 0),
(2, 0, 2, 12.5, 1., 0., 0), (3, 3, 0, 50., 4., 0., 1),
(4, 3, 1, 25., 4., 0., 1), (5, 3, 2, 12.5, 4., 0., 1)
], dtype=order_dt)
)
def test_price(self):
record_arrays_close(
from_signals_all(price=price * 1.01).order_records,
np.array([
(0, 0, 0, 99.00990099009901, 1.01, 0.0, 0), (1, 3, 0, 198.01980198019803, 4.04, 0.0, 1)
], dtype=order_dt)
)
record_arrays_close(
from_signals_longonly(price=price * 1.01).order_records,
np.array([
(0, 0, 0, 99.00990099, 1.01, 0., 0), (1, 3, 0, 99.00990099, 4.04, 0., 1)
], dtype=order_dt)
)
record_arrays_close(
from_signals_shortonly(price=price * 1.01).order_records,
np.array([
(0, 0, 0, 99.00990099009901, 1.01, 0.0, 1), (1, 3, 0, 49.504950495049506, 4.04, 0.0, 0)
], dtype=order_dt)
)
def test_fees(self):
record_arrays_close(
from_signals_all(size=1, fees=[[0., 0.1, 1.]]).order_records,
np.array([
(0, 0, 0, 1.0, 1.0, 0.0, 0), (1, 3, 0, 2.0, 4.0, 0.0, 1), (2, 0, 1, 1.0, 1.0, 0.1, 0),
(3, 3, 1, 2.0, 4.0, 0.8, 1), (4, 0, 2, 1.0, 1.0, 1.0, 0), (5, 3, 2, 2.0, 4.0, 8.0, 1)
], dtype=order_dt)
)
record_arrays_close(
from_signals_longonly(size=1, fees=[[0., 0.1, 1.]]).order_records,
np.array([
(0, 0, 0, 1.0, 1.0, 0.0, 0), (1, 3, 0, 1.0, 4.0, 0.0, 1), (2, 0, 1, 1.0, 1.0, 0.1, 0),
(3, 3, 1, 1.0, 4.0, 0.4, 1), (4, 0, 2, 1.0, 1.0, 1.0, 0), (5, 3, 2, 1.0, 4.0, 4.0, 1)
], dtype=order_dt)
)
record_arrays_close(
from_signals_shortonly(size=1, fees=[[0., 0.1, 1.]]).order_records,
np.array([
(0, 0, 0, 1.0, 1.0, 0.0, 1), (1, 3, 0, 1.0, 4.0, 0.0, 0), (2, 0, 1, 1.0, 1.0, 0.1, 1),
(3, 3, 1, 1.0, 4.0, 0.4, 0), (4, 0, 2, 1.0, 1.0, 1.0, 1), (5, 3, 2, 1.0, 4.0, 4.0, 0)
], dtype=order_dt)
)
def test_fixed_fees(self):
record_arrays_close(
from_signals_all(size=1, fixed_fees=[[0., 0.1, 1.]]).order_records,
np.array([
(0, 0, 0, 1.0, 1.0, 0.0, 0), (1, 3, 0, 2.0, 4.0, 0.0, 1), (2, 0, 1, 1.0, 1.0, 0.1, 0),
(3, 3, 1, 2.0, 4.0, 0.1, 1), (4, 0, 2, 1.0, 1.0, 1.0, 0), (5, 3, 2, 2.0, 4.0, 1.0, 1)
], dtype=order_dt)
)
record_arrays_close(
from_signals_longonly(size=1, fixed_fees=[[0., 0.1, 1.]]).order_records,
np.array([
(0, 0, 0, 1.0, 1.0, 0.0, 0), (1, 3, 0, 1.0, 4.0, 0.0, 1), (2, 0, 1, 1.0, 1.0, 0.1, 0),
(3, 3, 1, 1.0, 4.0, 0.1, 1), (4, 0, 2, 1.0, 1.0, 1.0, 0), (5, 3, 2, 1.0, 4.0, 1.0, 1)
], dtype=order_dt)
)
record_arrays_close(
from_signals_shortonly(size=1, fixed_fees=[[0., 0.1, 1.]]).order_records,
np.array([
(0, 0, 0, 1.0, 1.0, 0.0, 1), (1, 3, 0, 1.0, 4.0, 0.0, 0), (2, 0, 1, 1.0, 1.0, 0.1, 1),
(3, 3, 1, 1.0, 4.0, 0.1, 0), (4, 0, 2, 1.0, 1.0, 1.0, 1), (5, 3, 2, 1.0, 4.0, 1.0, 0)
], dtype=order_dt)
)
def test_slippage(self):
record_arrays_close(
from_signals_all(size=1, slippage=[[0., 0.1, 1.]]).order_records,
np.array([
(0, 0, 0, 1.0, 1.0, 0.0, 0), (1, 3, 0, 2.0, 4.0, 0.0, 1), (2, 0, 1, 1.0, 1.1, 0.0, 0),
(3, 3, 1, 2.0, 3.6, 0.0, 1), (4, 0, 2, 1.0, 2.0, 0.0, 0), (5, 3, 2, 2.0, 0.0, 0.0, 1)
], dtype=order_dt)
)
record_arrays_close(
from_signals_longonly(size=1, slippage=[[0., 0.1, 1.]]).order_records,
np.array([
(0, 0, 0, 1.0, 1.0, 0.0, 0), (1, 3, 0, 1.0, 4.0, 0.0, 1), (2, 0, 1, 1.0, 1.1, 0.0, 0),
(3, 3, 1, 1.0, 3.6, 0.0, 1), (4, 0, 2, 1.0, 2.0, 0.0, 0), (5, 3, 2, 1.0, 0.0, 0.0, 1)
], dtype=order_dt)
)
record_arrays_close(
from_signals_shortonly(size=1, slippage=[[0., 0.1, 1.]]).order_records,
np.array([
(0, 0, 0, 1.0, 1.0, 0.0, 1), (1, 3, 0, 1.0, 4.0, 0.0, 0), (2, 0, 1, 1.0, 0.9, 0.0, 1),
(3, 3, 1, 1.0, 4.4, 0.0, 0), (4, 0, 2, 1.0, 0.0, 0.0, 1), (5, 3, 2, 1.0, 8.0, 0.0, 0)
], dtype=order_dt)
)
def test_min_size(self):
record_arrays_close(
from_signals_all(size=1, min_size=[[0., 1., 2.]]).order_records,
np.array([
(0, 0, 0, 1.0, 1.0, 0.0, 0), (1, 3, 0, 2.0, 4.0, 0.0, 1), (2, 0, 1, 1.0, 1.0, 0.0, 0),
(3, 3, 1, 2.0, 4.0, 0.0, 1)
], dtype=order_dt)
)
record_arrays_close(
from_signals_longonly(size=1, min_size=[[0., 1., 2.]]).order_records,
np.array([
(0, 0, 0, 1.0, 1.0, 0.0, 0), (1, 3, 0, 1.0, 4.0, 0.0, 1), (2, 0, 1, 1.0, 1.0, 0.0, 0),
(3, 3, 1, 1.0, 4.0, 0.0, 1)
], dtype=order_dt)
)
record_arrays_close(
from_signals_shortonly(size=1, min_size=[[0., 1., 2.]]).order_records,
np.array([
(0, 0, 0, 1.0, 1.0, 0.0, 1), (1, 3, 0, 1.0, 4.0, 0.0, 0), (2, 0, 1, 1.0, 1.0, 0.0, 1),
(3, 3, 1, 1.0, 4.0, 0.0, 0)
], dtype=order_dt)
)
def test_max_size(self):
record_arrays_close(
from_signals_all(size=1, max_size=[[0.5, 1., np.inf]]).order_records,
np.array([
(0, 0, 0, 0.5, 1.0, 0.0, 0), (1, 3, 0, 0.5, 4.0, 0.0, 1), (2, 4, 0, 0.5, 5.0, 0.0, 1),
(3, 0, 1, 1.0, 1.0, 0.0, 0), (4, 3, 1, 1.0, 4.0, 0.0, 1), (5, 4, 1, 1.0, 5.0, 0.0, 1),
(6, 0, 2, 1.0, 1.0, 0.0, 0), (7, 3, 2, 2.0, 4.0, 0.0, 1)
], dtype=order_dt)
)
record_arrays_close(
from_signals_longonly(size=1, max_size=[[0.5, 1., np.inf]]).order_records,
np.array([
(0, 0, 0, 0.5, 1.0, 0.0, 0), (1, 3, 0, 0.5, 4.0, 0.0, 1), (2, 0, 1, 1.0, 1.0, 0.0, 0),
(3, 3, 1, 1.0, 4.0, 0.0, 1), (4, 0, 2, 1.0, 1.0, 0.0, 0), (5, 3, 2, 1.0, 4.0, 0.0, 1)
], dtype=order_dt)
)
record_arrays_close(
from_signals_shortonly(size=1, max_size=[[0.5, 1., np.inf]]).order_records,
np.array([
(0, 0, 0, 0.5, 1.0, 0.0, 1), (1, 3, 0, 0.5, 4.0, 0.0, 0), (2, 0, 1, 1.0, 1.0, 0.0, 1),
(3, 3, 1, 1.0, 4.0, 0.0, 0), (4, 0, 2, 1.0, 1.0, 0.0, 1), (5, 3, 2, 1.0, 4.0, 0.0, 0)
], dtype=order_dt)
)
def test_reject_prob(self):
record_arrays_close(
from_signals_all(size=1., reject_prob=[[0., 0.5, 1.]], seed=42).order_records,
np.array([
(0, 0, 0, 1.0, 1.0, 0.0, 0), (1, 3, 0, 2.0, 4.0, 0.0, 1), (2, 1, 1, 1.0, 2.0, 0.0, 0),
(3, 3, 1, 2.0, 4.0, 0.0, 1)
], dtype=order_dt)
)
record_arrays_close(
from_signals_longonly(size=1., reject_prob=[[0., 0.5, 1.]], seed=42).order_records,
np.array([
(0, 0, 0, 1.0, 1.0, 0.0, 0), (1, 3, 0, 1.0, 4.0, 0.0, 1), (2, 1, 1, 1.0, 2.0, 0.0, 0),
(3, 3, 1, 1.0, 4.0, 0.0, 1)
], dtype=order_dt)
)
record_arrays_close(
from_signals_shortonly(size=1., reject_prob=[[0., 0.5, 1.]], seed=42).order_records,
np.array([
(0, 0, 0, 1.0, 1.0, 0.0, 1), (1, 3, 0, 1.0, 4.0, 0.0, 0), (2, 1, 1, 1.0, 2.0, 0.0, 1),
(3, 3, 1, 1.0, 4.0, 0.0, 0)
], dtype=order_dt)
)
def test_close_first(self):
record_arrays_close(
from_signals_all(close_first=[[False, True]]).order_records,
np.array([
(0, 0, 0, 100.0, 1.0, 0.0, 0), (1, 3, 0, 200.0, 4.0, 0.0, 1), (2, 0, 1, 100.0, 1.0, 0.0, 0),
(3, 3, 1, 100.0, 4.0, 0.0, 1), (4, 4, 1, 80.0, 5.0, 0.0, 1)
], dtype=order_dt)
)
record_arrays_close(
from_signals_all(
price=pd.Series(price.values[::-1], index=price.index),
entries=pd.Series(entries.values[::-1], index=price.index),
exits=pd.Series(exits.values[::-1], index=price.index),
close_first=[[False, True]]
).order_records,
np.array([
(0, 0, 0, 20.0, 5.0, 0.0, 1), (1, 3, 0, 100.0, 2.0, 0.0, 0), (2, 0, 1, 20.0, 5.0, 0.0, 1),
(3, 3, 1, 20.0, 2.0, 0.0, 0), (4, 4, 1, 160.0, 1.0, 0.0, 0)
], dtype=order_dt)
)
def test_allow_partial(self):
record_arrays_close(
from_signals_all(size=1000, allow_partial=[[True, False]]).order_records,
np.array([
(0, 0, 0, 100.0, 1.0, 0.0, 0), (1, 3, 0, 1100.0, 4.0, 0.0, 1), (2, 3, 1, 1000.0, 4.0, 0.0, 1)
], dtype=order_dt)
)
record_arrays_close(
from_signals_longonly(size=1000, allow_partial=[[True, False]]).order_records,
np.array([
(0, 0, 0, 100.0, 1.0, 0.0, 0), (1, 3, 0, 100.0, 4.0, 0.0, 1)
], dtype=order_dt)
)
record_arrays_close(
from_signals_shortonly(size=1000, allow_partial=[[True, False]]).order_records,
np.array([
(0, 0, 0, 1000.0, 1.0, 0.0, 1), (1, 3, 0, 275.0, 4.0, 0.0, 0), (2, 0, 1, 1000.0, 1.0, 0.0, 1)
], dtype=order_dt)
)
record_arrays_close(
from_signals_all(size=np.inf, allow_partial=[[True, False]]).order_records,
np.array([
(0, 0, 0, 100.0, 1.0, 0.0, 0), (1, 3, 0, 200.0, 4.0, 0.0, 1), (2, 0, 1, 100.0, 1.0, 0.0, 0),
(3, 3, 1, 200.0, 4.0, 0.0, 1)
], dtype=order_dt)
)
record_arrays_close(
from_signals_longonly(size=np.inf, allow_partial=[[True, False]]).order_records,
np.array([
(0, 0, 0, 100.0, 1.0, 0.0, 0), (1, 3, 0, 100.0, 4.0, 0.0, 1), (2, 0, 1, 100.0, 1.0, 0.0, 0),
(3, 3, 1, 100.0, 4.0, 0.0, 1)
], dtype=order_dt)
)
record_arrays_close(
from_signals_shortonly(size=np.inf, allow_partial=[[True, False]]).order_records,
np.array([
(0, 0, 0, 100.0, 1.0, 0.0, 1), (1, 3, 0, 50.0, 4.0, 0.0, 0), (2, 0, 1, 100.0, 1.0, 0.0, 1)
], dtype=order_dt)
)
def test_raise_reject(self):
record_arrays_close(
from_signals_all(size=1000, allow_partial=True, raise_reject=True).order_records,
np.array([
(0, 0, 0, 100.0, 1.0, 0.0, 0), (1, 3, 0, 1100.0, 4.0, 0.0, 1)
], dtype=order_dt)
)
record_arrays_close(
from_signals_longonly(size=1000, allow_partial=True, raise_reject=True).order_records,
np.array([
(0, 0, 0, 100.0, 1.0, 0.0, 0), (1, 3, 0, 100.0, 4.0, 0.0, 1)
], dtype=order_dt)
)
with pytest.raises(Exception) as e_info:
_ = from_signals_shortonly(size=1000, allow_partial=True, raise_reject=True).order_records
with pytest.raises(Exception) as e_info:
_ = from_signals_all(size=1000, allow_partial=False, raise_reject=True).order_records
with pytest.raises(Exception) as e_info:
_ = from_signals_longonly(size=1000, allow_partial=False, raise_reject=True).order_records
with pytest.raises(Exception) as e_info:
_ = from_signals_shortonly(size=1000, allow_partial=False, raise_reject=True).order_records
def test_accumulate(self):
record_arrays_close(
from_signals_all(size=1, accumulate=True).order_records,
np.array([
(0, 0, 0, 1.0, 1.0, 0.0, 0), (1, 1, 0, 1.0, 2.0, 0.0, 0), (2, 3, 0, 1.0, 4.0, 0.0, 1),
(3, 4, 0, 1.0, 5.0, 0.0, 1)
], dtype=order_dt)
)
record_arrays_close(
from_signals_longonly(size=1, accumulate=True).order_records,
np.array([
(0, 0, 0, 1.0, 1.0, 0.0, 0), (1, 1, 0, 1.0, 2.0, 0.0, 0), (2, 3, 0, 1.0, 4.0, 0.0, 1),
(3, 4, 0, 1.0, 5.0, 0.0, 1)
], dtype=order_dt)
)
record_arrays_close(
from_signals_shortonly(size=1, accumulate=True).order_records,
np.array([
(0, 0, 0, 1.0, 1.0, 0.0, 1), (1, 1, 0, 1.0, 2.0, 0.0, 1), (2, 3, 0, 1.0, 4.0, 0.0, 0),
(3, 4, 0, 1.0, 5.0, 0.0, 0)
], dtype=order_dt)
)
def test_log(self):
record_arrays_close(
from_signals_all(log=True).log_records,
np.array([
(0, 0, 0, 0, 100.0, 0.0, 1.0, 100.0, np.inf, 0, 2, 1.0, 0.0, 0.0, 0.0, 1e-08, np.inf, 0.0,
True, False, True, 0.0, 100.0, 100.0, 1.0, 0.0, 0, 0, -1, 0),
(1, 3, 0, 0, 0.0, 100.0, 4.0, 400.0, -np.inf, 0, 2, 4.0, 0.0, 0.0, 0.0, 1e-08, np.inf, 0.0,
True, False, True, 800.0, -100.0, 200.0, 4.0, 0.0, 1, 0, -1, 1)
], dtype=log_dt)
)
def test_conflict_mode(self):
kwargs = dict(
price=price.iloc[:3],
entries=pd.DataFrame([
[True, True, True, True, True],
[True, True, True, True, False],
[True, True, True, True, True]
]),
exits=pd.DataFrame([
[True, True, True, True, True],
[False, False, False, False, True],
[True, True, True, True, True]
]),
size=1.,
conflict_mode=[[
'ignore',
'entry',
'exit',
'opposite',
'opposite'
]]
)
record_arrays_close(
from_signals_all(**kwargs).order_records,
np.array([
(0, 1, 0, 1.0, 2.0, 0.0, 0), (1, 0, 1, 1.0, 1.0, 0.0, 0), (2, 0, 2, 1.0, 1.0, 0.0, 1),
(3, 1, 2, 2.0, 2.0, 0.0, 0), (4, 2, 2, 2.0, 3.0, 0.0, 1), (5, 1, 3, 1.0, 2.0, 0.0, 0),
(6, 2, 3, 2.0, 3.0, 0.0, 1), (7, 1, 4, 1.0, 2.0, 0.0, 1), (8, 2, 4, 2.0, 3.0, 0.0, 0)
], dtype=order_dt)
)
record_arrays_close(
from_signals_longonly(**kwargs).order_records,
np.array([
(0, 1, 0, 1.0, 2.0, 0.0, 0), (1, 0, 1, 1.0, 1.0, 0.0, 0), (2, 1, 2, 1.0, 2.0, 0.0, 0),
(3, 2, 2, 1.0, 3.0, 0.0, 1), (4, 1, 3, 1.0, 2.0, 0.0, 0), (5, 2, 3, 1.0, 3.0, 0.0, 1)
], dtype=order_dt)
)
record_arrays_close(
from_signals_shortonly(**kwargs).order_records,
np.array([
(0, 1, 0, 1.0, 2.0, 0.0, 1), (1, 0, 1, 1.0, 1.0, 0.0, 1), (2, 1, 2, 1.0, 2.0, 0.0, 1),
(3, 2, 2, 1.0, 3.0, 0.0, 0), (4, 1, 3, 1.0, 2.0, 0.0, 1), (5, 2, 3, 1.0, 3.0, 0.0, 0)
], dtype=order_dt)
)
def test_init_cash(self):
record_arrays_close(
from_signals_all(price=price_wide, size=1., init_cash=[0., 1., 100.]).order_records,
np.array([
(0, 3, 0, 1.0, 4.0, 0.0, 1), (1, 0, 1, 1.0, 1.0, 0.0, 0), (2, 3, 1, 2.0, 4.0, 0.0, 1),
(3, 0, 2, 1.0, 1.0, 0.0, 0), (4, 3, 2, 2.0, 4.0, 0.0, 1)
], dtype=order_dt)
)
record_arrays_close(
from_signals_longonly(price=price_wide, size=1., init_cash=[0., 1., 100.]).order_records,
np.array([
(0, 0, 1, 1.0, 1.0, 0.0, 0), (1, 3, 1, 1.0, 4.0, 0.0, 1), (2, 0, 2, 1.0, 1.0, 0.0, 0),
(3, 3, 2, 1.0, 4.0, 0.0, 1)
], dtype=order_dt)
)
record_arrays_close(
from_signals_shortonly(price=price_wide, size=1., init_cash=[0., 1., 100.]).order_records,
np.array([
(0, 0, 0, 1.0, 1.0, 0.0, 1), (1, 3, 0, 0.25, 4.0, 0.0, 0), (2, 0, 1, 1.0, 1.0, 0.0, 1),
(3, 3, 1, 0.5, 4.0, 0.0, 0), (4, 0, 2, 1.0, 1.0, 0.0, 1), (5, 3, 2, 1.0, 4.0, 0.0, 0)
], dtype=order_dt)
)
with pytest.raises(Exception) as e_info:
_ = from_signals_all(init_cash=np.inf).order_records
with pytest.raises(Exception) as e_info:
_ = from_signals_longonly(init_cash=np.inf).order_records
with pytest.raises(Exception) as e_info:
_ = from_signals_shortonly(init_cash=np.inf).order_records
def test_group_by(self):
portfolio = from_signals_all(price=price_wide, group_by=np.array([0, 0, 1]))
record_arrays_close(
portfolio.order_records,
np.array([
(0, 0, 0, 100.0, 1.0, 0.0, 0), (1, 3, 0, 200.0, 4.0, 0.0, 1), (2, 0, 1, 100.0, 1.0, 0.0, 0),
(3, 3, 1, 200.0, 4.0, 0.0, 1), (4, 0, 2, 100.0, 1.0, 0.0, 0), (5, 3, 2, 200.0, 4.0, 0.0, 1)
], dtype=order_dt)
)
pd.testing.assert_index_equal(
portfolio.wrapper.grouper.group_by,
pd.Int64Index([0, 0, 1], dtype='int64')
)
pd.testing.assert_series_equal(
portfolio.init_cash,
pd.Series([200., 100.], index=pd.Int64Index([0, 1], dtype='int64')).rename('init_cash')
)
assert not portfolio.cash_sharing
def test_cash_sharing(self):
portfolio = from_signals_all(price=price_wide, group_by=np.array([0, 0, 1]), cash_sharing=True)
record_arrays_close(
portfolio.order_records,
np.array([
(0, 0, 0, 100.0, 1.0, 0.0, 0), (1, 3, 0, 200.0, 4.0, 0.0, 1), (2, 3, 1, 200.0, 4.0, 0.0, 1),
(3, 0, 2, 100.0, 1.0, 0.0, 0), (4, 3, 2, 200.0, 4.0, 0.0, 1)
], dtype=order_dt)
)
pd.testing.assert_index_equal(
portfolio.wrapper.grouper.group_by,
pd.Int64Index([0, 0, 1], dtype='int64')
)
pd.testing.assert_series_equal(
portfolio.init_cash,
pd.Series([100., 100.], index=pd.Int64Index([0, 1], dtype='int64')).rename('init_cash')
)
assert portfolio.cash_sharing
with pytest.raises(Exception) as e_info:
_ = portfolio.regroup(group_by=False)
def test_call_seq(self):
portfolio = from_signals_all(price=price_wide, group_by=np.array([0, 0, 1]), cash_sharing=True)
record_arrays_close(
portfolio.order_records,
np.array([
(0, 0, 0, 100.0, 1.0, 0.0, 0), (1, 3, 0, 200.0, 4.0, 0.0, 1), (2, 3, 1, 200.0, 4.0, 0.0, 1),
(3, 0, 2, 100.0, 1.0, 0.0, 0), (4, 3, 2, 200.0, 4.0, 0.0, 1)
], dtype=order_dt)
)
np.testing.assert_array_equal(
portfolio.call_seq.values,
np.array([
[0, 1, 0],
[0, 1, 0],
[0, 1, 0],
[0, 1, 0],
[0, 1, 0]
])
)
portfolio = from_signals_all(
price=price_wide, group_by=np.array([0, 0, 1]),
cash_sharing=True, call_seq='reversed')
record_arrays_close(
portfolio.order_records,
np.array([
(0, 0, 1, 100.0, 1.0, 0.0, 0), (1, 3, 1, 200.0, 4.0, 0.0, 1), (2, 3, 0, 200.0, 4.0, 0.0, 1),
(3, 0, 2, 100.0, 1.0, 0.0, 0), (4, 3, 2, 200.0, 4.0, 0.0, 1)
], dtype=order_dt)
)
np.testing.assert_array_equal(
portfolio.call_seq.values,
np.array([
[1, 0, 0],
[1, 0, 0],
[1, 0, 0],
[1, 0, 0],
[1, 0, 0]
])
)
portfolio = from_signals_all(
price=price_wide, group_by=np.array([0, 0, 1]),
cash_sharing=True, call_seq='random', seed=seed)
record_arrays_close(
portfolio.order_records,
np.array([
(0, 0, 1, 100.0, 1.0, 0.0, 0), (1, 3, 1, 200.0, 4.0, 0.0, 1), (2, 3, 0, 200.0, 4.0, 0.0, 1),
(3, 0, 2, 100.0, 1.0, 0.0, 0), (4, 3, 2, 200.0, 4.0, 0.0, 1)
], dtype=order_dt)
)
np.testing.assert_array_equal(
portfolio.call_seq.values,
np.array([
[1, 0, 0],
[0, 1, 0],
[1, 0, 0],
[1, 0, 0],
[1, 0, 0]
])
)
kwargs = dict(
price=1.,
entries=pd.DataFrame([
[False, False, True],
[False, True, False],
[True, False, False],
[False, False, True],
[False, True, False],
]),
exits=pd.DataFrame([
[False, False, False],
[False, False, True],
[False, True, False],
[True, False, False],
[False, False, True],
]),
group_by=np.array([0, 0, 0]),
cash_sharing=True,
call_seq='auto'
)
portfolio = from_signals_all(**kwargs)
record_arrays_close(
portfolio.order_records,
np.array([
(0, 0, 2, 100.0, 1.0, 0.0, 0), (1, 1, 2, 200.0, 1.0, 0.0, 1), (2, 1, 1, 200.0, 1.0, 0.0, 0),
(3, 2, 1, 400.0, 1.0, 0.0, 1), (4, 2, 0, 400.0, 1.0, 0.0, 0), (5, 3, 0, 800.0, 1.0, 0.0, 1),
(6, 3, 2, 800.0, 1.0, 0.0, 0), (7, 4, 2, 1400.0, 1.0, 0.0, 1), (8, 4, 1, 1400.0, 1.0, 0.0, 0)
], dtype=order_dt)
)
np.testing.assert_array_equal(
portfolio.call_seq.values,
np.array([
[0, 1, 2],
[2, 0, 1],
[1, 2, 0],
[0, 1, 2],
[2, 0, 1]
])
)
portfolio = from_signals_longonly(**kwargs)
record_arrays_close(
portfolio.order_records,
np.array([
(0, 0, 2, 100.0, 1.0, 0.0, 0), (1, 1, 2, 100.0, 1.0, 0.0, 1), (2, 1, 1, 100.0, 1.0, 0.0, 0),
(3, 2, 1, 100.0, 1.0, 0.0, 1), (4, 2, 0, 100.0, 1.0, 0.0, 0), (5, 3, 0, 100.0, 1.0, 0.0, 1),
(6, 3, 2, 100.0, 1.0, 0.0, 0), (7, 4, 2, 100.0, 1.0, 0.0, 1), (8, 4, 1, 100.0, 1.0, 0.0, 0)
], dtype=order_dt)
)
np.testing.assert_array_equal(
portfolio.call_seq.values,
np.array([
[0, 1, 2],
[2, 0, 1],
[1, 2, 0],
[0, 1, 2],
[2, 0, 1]
])
)
portfolio = from_signals_shortonly(**kwargs)
record_arrays_close(
portfolio.order_records,
np.array([
(0, 0, 2, 100.0, 1.0, 0.0, 1), (1, 1, 1, 200.0, 1.0, 0.0, 1), (2, 1, 2, 100.0, 1.0, 0.0, 0),
(3, 2, 0, 300.0, 1.0, 0.0, 1), (4, 2, 1, 200.0, 1.0, 0.0, 0), (5, 3, 2, 400.0, 1.0, 0.0, 1),
(6, 3, 0, 300.0, 1.0, 0.0, 0), (7, 4, 1, 500.0, 1.0, 0.0, 1), (8, 4, 2, 400.0, 1.0, 0.0, 0)
], dtype=order_dt)
)
np.testing.assert_array_equal(
portfolio.call_seq.values,
np.array([
[2, 0, 1],
[1, 0, 2],
[0, 2, 1],
[2, 1, 0],
[1, 0, 2]
])
)
portfolio = from_signals_longonly(**kwargs, size=1., size_type='percent')
record_arrays_close(
portfolio.order_records,
np.array([
(0, 0, 2, 100.0, 1.0, 0.0, 0), (1, 1, 2, 100.0, 1.0, 0.0, 1), (2, 1, 1, 100.0, 1.0, 0.0, 0),
(3, 2, 1, 100.0, 1.0, 0.0, 1), (4, 2, 0, 100.0, 1.0, 0.0, 0), (5, 3, 0, 100.0, 1.0, 0.0, 1),
(6, 3, 2, 100.0, 1.0, 0.0, 0), (7, 4, 2, 100.0, 1.0, 0.0, 1), (8, 4, 1, 100.0, 1.0, 0.0, 0)
], dtype=order_dt)
)
np.testing.assert_array_equal(
portfolio.call_seq.values,
np.array([
[0, 1, 2],
[2, 0, 1],
[1, 0, 2],
[0, 1, 2],
[2, 0, 1]
])
)
def test_max_orders(self):
_ = from_signals_all(price=price_wide)
_ = from_signals_all(price=price_wide, max_orders=6)
with pytest.raises(Exception) as e_info:
_ = from_signals_all(price=price_wide, max_orders=5)
def test_max_logs(self):
_ = from_signals_all(price=price_wide, log=True)
_ = from_signals_all(price=price_wide, log=True, max_logs=6)
with pytest.raises(Exception) as e_info:
_ = from_signals_all(price=price_wide, log=True, max_logs=5)
# ############# from_holding ############# #
class TestFromHolding:
def test_from_holding(self):
record_arrays_close(
vbt.Portfolio.from_holding(price).order_records,
vbt.Portfolio.from_signals(price, True, False, accumulate=False).order_records
)
# ############# from_random_signals ############# #
class TestFromRandom:
def test_from_random_n(self):
result = vbt.Portfolio.from_random_signals(price, n=2, seed=seed)
record_arrays_close(
result.order_records,
vbt.Portfolio.from_signals(
price,
[True, False, True, False, False],
[False, True, False, False, True]
).order_records
)
pd.testing.assert_index_equal(
result.wrapper.index,
price.vbt.wrapper.index
)
pd.testing.assert_index_equal(
result.wrapper.columns,
price.vbt.wrapper.columns
)
result = vbt.Portfolio.from_random_signals(price, n=[1, 2], seed=seed)
record_arrays_close(
result.order_records,
vbt.Portfolio.from_signals(
price,
[[False, True], [True, False], [False, True], [False, False], [False, False]],
[[False, False], [False, True], [False, False], [False, True], [True, False]]
).order_records
)
pd.testing.assert_index_equal(
result.wrapper.index,
pd.DatetimeIndex([
'2020-01-01', '2020-01-02', '2020-01-03', '2020-01-04', '2020-01-05'
], dtype='datetime64[ns]', freq=None)
)
pd.testing.assert_index_equal(
result.wrapper.columns,
pd.Int64Index([1, 2], dtype='int64', name='rand_n')
)
def test_from_random_prob(self):
result = vbt.Portfolio.from_random_signals(price, prob=0.5, seed=seed)
record_arrays_close(
result.order_records,
vbt.Portfolio.from_signals(
price,
[True, False, False, False, False],
[False, False, False, False, True]
).order_records
)
pd.testing.assert_index_equal(
result.wrapper.index,
price.vbt.wrapper.index
)
pd.testing.assert_index_equal(
result.wrapper.columns,
price.vbt.wrapper.columns
)
result = vbt.Portfolio.from_random_signals(price, prob=[0.25, 0.5], seed=seed)
record_arrays_close(
result.order_records,
vbt.Portfolio.from_signals(
price,
[[False, True], [False, False], [False, False], [False, False], [True, False]],
[[False, False], [False, True], [False, False], [False, False], [False, False]]
).order_records
)
pd.testing.assert_index_equal(
result.wrapper.index,
pd.DatetimeIndex([
'2020-01-01', '2020-01-02', '2020-01-03', '2020-01-04', '2020-01-05'
], dtype='datetime64[ns]', freq=None)
)
pd.testing.assert_index_equal(
result.wrapper.columns,
pd.MultiIndex.from_tuples([(0.25, 0.25), (0.5, 0.5)], names=['rprob_entry_prob', 'rprob_exit_prob'])
)
# ############# from_orders ############# #
order_size = pd.Series([np.inf, -np.inf, np.nan, np.inf, -np.inf], index=price.index)
order_size_wide = order_size.vbt.tile(3, keys=['a', 'b', 'c'])
order_size_one = pd.Series([1, -1, np.nan, 1, -1], index=price.index)
def from_orders_all(price=price, size=order_size, **kwargs):
return vbt.Portfolio.from_orders(price, size, direction='all', **kwargs)
def from_orders_longonly(price=price, size=order_size, **kwargs):
return vbt.Portfolio.from_orders(price, size, direction='longonly', **kwargs)
def from_orders_shortonly(price=price, size=order_size, **kwargs):
return vbt.Portfolio.from_orders(price, size, direction='shortonly', **kwargs)
class TestFromOrders:
def test_one_column(self):
record_arrays_close(
from_orders_all().order_records,
np.array([
(0, 0, 0, 100.0, 1.0, 0.0, 0), (1, 1, 0, 200.0, 2.0, 0.0, 1), (2, 3, 0, 100.0, 4.0, 0.0, 0)
], dtype=order_dt)
)
record_arrays_close(
from_orders_longonly().order_records,
np.array([
(0, 0, 0, 100.0, 1.0, 0.0, 0), (1, 1, 0, 100.0, 2.0, 0.0, 1), (2, 3, 0, 50.0, 4.0, 0.0, 0),
(3, 4, 0, 50.0, 5.0, 0.0, 1)
], dtype=order_dt)
)
record_arrays_close(
from_orders_shortonly().order_records,
np.array([
(0, 0, 0, 100.0, 1.0, 0.0, 1), (1, 1, 0, 100.0, 2.0, 0.0, 0)
], dtype=order_dt)
)
portfolio = from_orders_all()
pd.testing.assert_index_equal(
portfolio.wrapper.index,
pd.DatetimeIndex(['2020-01-01', '2020-01-02', '2020-01-03', '2020-01-04', '2020-01-05'])
)
pd.testing.assert_index_equal(
portfolio.wrapper.columns,
pd.Int64Index([0], dtype='int64')
)
assert portfolio.wrapper.ndim == 1
assert portfolio.wrapper.freq == day_dt
assert portfolio.wrapper.grouper.group_by is None
def test_multiple_columns(self):
record_arrays_close(
from_orders_all(price=price_wide).order_records,
np.array([
(0, 0, 0, 100.0, 1.0, 0.0, 0), (1, 1, 0, 200.0, 2.0, 0.0, 1), (2, 3, 0, 100.0, 4.0, 0.0, 0),
(3, 0, 1, 100.0, 1.0, 0.0, 0), (4, 1, 1, 200.0, 2.0, 0.0, 1), (5, 3, 1, 100.0, 4.0, 0.0, 0),
(6, 0, 2, 100.0, 1.0, 0.0, 0), (7, 1, 2, 200.0, 2.0, 0.0, 1), (8, 3, 2, 100.0, 4.0, 0.0, 0)
], dtype=order_dt)
)
record_arrays_close(
from_orders_longonly(price=price_wide).order_records,
np.array([
(0, 0, 0, 100.0, 1.0, 0.0, 0), (1, 1, 0, 100.0, 2.0, 0.0, 1), (2, 3, 0, 50.0, 4.0, 0.0, 0),
(3, 4, 0, 50.0, 5.0, 0.0, 1), (4, 0, 1, 100.0, 1.0, 0.0, 0), (5, 1, 1, 100.0, 2.0, 0.0, 1),
(6, 3, 1, 50.0, 4.0, 0.0, 0), (7, 4, 1, 50.0, 5.0, 0.0, 1), (8, 0, 2, 100.0, 1.0, 0.0, 0),
(9, 1, 2, 100.0, 2.0, 0.0, 1), (10, 3, 2, 50.0, 4.0, 0.0, 0), (11, 4, 2, 50.0, 5.0, 0.0, 1)
], dtype=order_dt)
)
record_arrays_close(
from_orders_shortonly(price=price_wide).order_records,
np.array([
(0, 0, 0, 100.0, 1.0, 0.0, 1), (1, 1, 0, 100.0, 2.0, 0.0, 0), (2, 0, 1, 100.0, 1.0, 0.0, 1),
(3, 1, 1, 100.0, 2.0, 0.0, 0), (4, 0, 2, 100.0, 1.0, 0.0, 1), (5, 1, 2, 100.0, 2.0, 0.0, 0)
], dtype=order_dt)
)
portfolio = from_orders_all(price=price_wide)
pd.testing.assert_index_equal(
portfolio.wrapper.index,
pd.DatetimeIndex(['2020-01-01', '2020-01-02', '2020-01-03', '2020-01-04', '2020-01-05'])
)
pd.testing.assert_index_equal(
portfolio.wrapper.columns,
pd.Index(['a', 'b', 'c'], dtype='object')
)
assert portfolio.wrapper.ndim == 2
assert portfolio.wrapper.freq == day_dt
assert portfolio.wrapper.grouper.group_by is None
def test_size_inf(self):
record_arrays_close(
from_orders_all(size=[[np.inf, -np.inf]]).order_records,
np.array([
(0, 0, 0, 100.0, 1.0, 0.0, 0), (1, 0, 1, 100.0, 1.0, 0.0, 1)
], dtype=order_dt)
)
record_arrays_close(
from_orders_longonly(size=[[np.inf, -np.inf]]).order_records,
np.array([
(0, 0, 0, 100.0, 1.0, 0.0, 0)
], dtype=order_dt)
)
record_arrays_close(
from_orders_shortonly(size=[[np.inf, -np.inf]]).order_records,
np.array([
(0, 0, 0, 100.0, 1.0, 0.0, 1)
], dtype=order_dt)
)
def test_price(self):
record_arrays_close(
from_orders_all(price=price * 1.01).order_records,
np.array([
(0, 0, 0, 99.00990099009901, 1.01, 0.0, 0), (1, 1, 0, 198.01980198019803, 2.02, 0.0, 1),
(2, 3, 0, 99.00990099009901, 4.04, 0.0, 0)
], dtype=order_dt)
)
record_arrays_close(
from_orders_longonly(price=price * 1.01).order_records,
np.array([
(0, 0, 0, 99.00990099009901, 1.01, 0.0, 0), (1, 1, 0, 99.00990099009901, 2.02, 0.0, 1),
(2, 3, 0, 49.504950495049506, 4.04, 0.0, 0), (3, 4, 0, 49.504950495049506, 5.05, 0.0, 1)
], dtype=order_dt)
)
record_arrays_close(
from_orders_shortonly(price=price * 1.01).order_records,
np.array([
(0, 0, 0, 99.00990099009901, 1.01, 0.0, 1), (1, 1, 0, 99.00990099009901, 2.02, 0.0, 0)
], dtype=order_dt)
)
def test_fees(self):
record_arrays_close(
from_orders_all(size=order_size_one, fees=[[0., 0.1, 1.]]).order_records,
np.array([
(0, 0, 0, 1.0, 1.0, 0.0, 0), (1, 1, 0, 1.0, 2.0, 0.0, 1), (2, 3, 0, 1.0, 4.0, 0.0, 0),
(3, 4, 0, 1.0, 5.0, 0.0, 1), (4, 0, 1, 1.0, 1.0, 0.1, 0), (5, 1, 1, 1.0, 2.0, 0.2, 1),
(6, 3, 1, 1.0, 4.0, 0.4, 0), (7, 4, 1, 1.0, 5.0, 0.5, 1), (8, 0, 2, 1.0, 1.0, 1.0, 0),
(9, 1, 2, 1.0, 2.0, 2.0, 1), (10, 3, 2, 1.0, 4.0, 4.0, 0), (11, 4, 2, 1.0, 5.0, 5.0, 1)
], dtype=order_dt)
)
record_arrays_close(
from_orders_longonly(size=order_size_one, fees=[[0., 0.1, 1.]]).order_records,
np.array([
(0, 0, 0, 1.0, 1.0, 0.0, 0), (1, 1, 0, 1.0, 2.0, 0.0, 1), (2, 3, 0, 1.0, 4.0, 0.0, 0),
(3, 4, 0, 1.0, 5.0, 0.0, 1), (4, 0, 1, 1.0, 1.0, 0.1, 0), (5, 1, 1, 1.0, 2.0, 0.2, 1),
(6, 3, 1, 1.0, 4.0, 0.4, 0), (7, 4, 1, 1.0, 5.0, 0.5, 1), (8, 0, 2, 1.0, 1.0, 1.0, 0),
(9, 1, 2, 1.0, 2.0, 2.0, 1), (10, 3, 2, 1.0, 4.0, 4.0, 0), (11, 4, 2, 1.0, 5.0, 5.0, 1)
], dtype=order_dt)
)
record_arrays_close(
from_orders_shortonly(size=order_size_one, fees=[[0., 0.1, 1.]]).order_records,
np.array([
(0, 0, 0, 1.0, 1.0, 0.0, 1), (1, 1, 0, 1.0, 2.0, 0.0, 0), (2, 3, 0, 1.0, 4.0, 0.0, 1),
(3, 4, 0, 1.0, 5.0, 0.0, 0), (4, 0, 1, 1.0, 1.0, 0.1, 1), (5, 1, 1, 1.0, 2.0, 0.2, 0),
(6, 3, 1, 1.0, 4.0, 0.4, 1), (7, 4, 1, 1.0, 5.0, 0.5, 0), (8, 0, 2, 1.0, 1.0, 1.0, 1),
(9, 1, 2, 1.0, 2.0, 2.0, 0), (10, 3, 2, 1.0, 4.0, 4.0, 1), (11, 4, 2, 1.0, 5.0, 5.0, 0)
], dtype=order_dt)
)
def test_fixed_fees(self):
record_arrays_close(
from_orders_all(size=order_size_one, fixed_fees=[[0., 0.1, 1.]]).order_records,
np.array([
(0, 0, 0, 1.0, 1.0, 0.0, 0), (1, 1, 0, 1.0, 2.0, 0.0, 1), (2, 3, 0, 1.0, 4.0, 0.0, 0),
(3, 4, 0, 1.0, 5.0, 0.0, 1), (4, 0, 1, 1.0, 1.0, 0.1, 0), (5, 1, 1, 1.0, 2.0, 0.1, 1),
(6, 3, 1, 1.0, 4.0, 0.1, 0), (7, 4, 1, 1.0, 5.0, 0.1, 1), (8, 0, 2, 1.0, 1.0, 1.0, 0),
(9, 1, 2, 1.0, 2.0, 1.0, 1), (10, 3, 2, 1.0, 4.0, 1.0, 0), (11, 4, 2, 1.0, 5.0, 1.0, 1)
], dtype=order_dt)
)
record_arrays_close(
from_orders_longonly(size=order_size_one, fixed_fees=[[0., 0.1, 1.]]).order_records,
np.array([
(0, 0, 0, 1.0, 1.0, 0.0, 0), (1, 1, 0, 1.0, 2.0, 0.0, 1), (2, 3, 0, 1.0, 4.0, 0.0, 0),
(3, 4, 0, 1.0, 5.0, 0.0, 1), (4, 0, 1, 1.0, 1.0, 0.1, 0), (5, 1, 1, 1.0, 2.0, 0.1, 1),
(6, 3, 1, 1.0, 4.0, 0.1, 0), (7, 4, 1, 1.0, 5.0, 0.1, 1), (8, 0, 2, 1.0, 1.0, 1.0, 0),
(9, 1, 2, 1.0, 2.0, 1.0, 1), (10, 3, 2, 1.0, 4.0, 1.0, 0), (11, 4, 2, 1.0, 5.0, 1.0, 1)
], dtype=order_dt)
)
record_arrays_close(
from_orders_shortonly(size=order_size_one, fixed_fees=[[0., 0.1, 1.]]).order_records,
np.array([
(0, 0, 0, 1.0, 1.0, 0.0, 1), (1, 1, 0, 1.0, 2.0, 0.0, 0), (2, 3, 0, 1.0, 4.0, 0.0, 1),
(3, 4, 0, 1.0, 5.0, 0.0, 0), (4, 0, 1, 1.0, 1.0, 0.1, 1), (5, 1, 1, 1.0, 2.0, 0.1, 0),
(6, 3, 1, 1.0, 4.0, 0.1, 1), (7, 4, 1, 1.0, 5.0, 0.1, 0), (8, 0, 2, 1.0, 1.0, 1.0, 1),
(9, 1, 2, 1.0, 2.0, 1.0, 0), (10, 3, 2, 1.0, 4.0, 1.0, 1), (11, 4, 2, 1.0, 5.0, 1.0, 0)
], dtype=order_dt)
)
def test_slippage(self):
record_arrays_close(
from_orders_all(size=order_size_one, slippage=[[0., 0.1, 1.]]).order_records,
np.array([
(0, 0, 0, 1.0, 1.0, 0.0, 0), (1, 1, 0, 1.0, 2.0, 0.0, 1), (2, 3, 0, 1.0, 4.0, 0.0, 0),
(3, 4, 0, 1.0, 5.0, 0.0, 1), (4, 0, 1, 1.0, 1.1, 0.0, 0), (5, 1, 1, 1.0, 1.8, 0.0, 1),
(6, 3, 1, 1.0, 4.4, 0.0, 0), (7, 4, 1, 1.0, 4.5, 0.0, 1), (8, 0, 2, 1.0, 2.0, 0.0, 0),
(9, 1, 2, 1.0, 0.0, 0.0, 1), (10, 3, 2, 1.0, 8.0, 0.0, 0), (11, 4, 2, 1.0, 0.0, 0.0, 1)
], dtype=order_dt)
)
record_arrays_close(
from_orders_longonly(size=order_size_one, slippage=[[0., 0.1, 1.]]).order_records,
np.array([
(0, 0, 0, 1.0, 1.0, 0.0, 0), (1, 1, 0, 1.0, 2.0, 0.0, 1), (2, 3, 0, 1.0, 4.0, 0.0, 0),
(3, 4, 0, 1.0, 5.0, 0.0, 1), (4, 0, 1, 1.0, 1.1, 0.0, 0), (5, 1, 1, 1.0, 1.8, 0.0, 1),
(6, 3, 1, 1.0, 4.4, 0.0, 0), (7, 4, 1, 1.0, 4.5, 0.0, 1), (8, 0, 2, 1.0, 2.0, 0.0, 0),
(9, 1, 2, 1.0, 0.0, 0.0, 1), (10, 3, 2, 1.0, 8.0, 0.0, 0), (11, 4, 2, 1.0, 0.0, 0.0, 1)
], dtype=order_dt)
)
record_arrays_close(
from_orders_shortonly(size=order_size_one, slippage=[[0., 0.1, 1.]]).order_records,
np.array([
(0, 0, 0, 1.0, 1.0, 0.0, 1), (1, 1, 0, 1.0, 2.0, 0.0, 0), (2, 3, 0, 1.0, 4.0, 0.0, 1),
(3, 4, 0, 1.0, 5.0, 0.0, 0), (4, 0, 1, 1.0, 0.9, 0.0, 1), (5, 1, 1, 1.0, 2.2, 0.0, 0),
(6, 3, 1, 1.0, 3.6, 0.0, 1), (7, 4, 1, 1.0, 5.5, 0.0, 0), (8, 0, 2, 1.0, 0.0, 0.0, 1),
(9, 1, 2, 1.0, 4.0, 0.0, 0), (10, 3, 2, 1.0, 0.0, 0.0, 1), (11, 4, 2, 1.0, 10.0, 0.0, 0)
], dtype=order_dt)
)
def test_min_size(self):
record_arrays_close(
from_orders_all(size=order_size_one, min_size=[[0., 1., 2.]]).order_records,
np.array([
(0, 0, 0, 1.0, 1.0, 0.0, 0), (1, 1, 0, 1.0, 2.0, 0.0, 1), (2, 3, 0, 1.0, 4.0, 0.0, 0),
(3, 4, 0, 1.0, 5.0, 0.0, 1), (4, 0, 1, 1.0, 1.0, 0.0, 0), (5, 1, 1, 1.0, 2.0, 0.0, 1),
(6, 3, 1, 1.0, 4.0, 0.0, 0), (7, 4, 1, 1.0, 5.0, 0.0, 1)
], dtype=order_dt)
)
record_arrays_close(
from_orders_longonly(size=order_size_one, min_size=[[0., 1., 2.]]).order_records,
np.array([
(0, 0, 0, 1.0, 1.0, 0.0, 0), (1, 1, 0, 1.0, 2.0, 0.0, 1), (2, 3, 0, 1.0, 4.0, 0.0, 0),
(3, 4, 0, 1.0, 5.0, 0.0, 1), (4, 0, 1, 1.0, 1.0, 0.0, 0), (5, 1, 1, 1.0, 2.0, 0.0, 1),
(6, 3, 1, 1.0, 4.0, 0.0, 0), (7, 4, 1, 1.0, 5.0, 0.0, 1)
], dtype=order_dt)
)
record_arrays_close(
from_orders_shortonly(size=order_size_one, min_size=[[0., 1., 2.]]).order_records,
np.array([
(0, 0, 0, 1.0, 1.0, 0.0, 1), (1, 1, 0, 1.0, 2.0, 0.0, 0), (2, 3, 0, 1.0, 4.0, 0.0, 1),
(3, 4, 0, 1.0, 5.0, 0.0, 0), (4, 0, 1, 1.0, 1.0, 0.0, 1), (5, 1, 1, 1.0, 2.0, 0.0, 0),
(6, 3, 1, 1.0, 4.0, 0.0, 1), (7, 4, 1, 1.0, 5.0, 0.0, 0)
], dtype=order_dt)
)
def test_max_size(self):
record_arrays_close(
from_orders_all(size=order_size_one, max_size=[[0.5, 1., np.inf]]).order_records,
np.array([
(0, 0, 0, 0.5, 1.0, 0.0, 0), (1, 1, 0, 0.5, 2.0, 0.0, 1), (2, 3, 0, 0.5, 4.0, 0.0, 0),
(3, 4, 0, 0.5, 5.0, 0.0, 1), (4, 0, 1, 1.0, 1.0, 0.0, 0), (5, 1, 1, 1.0, 2.0, 0.0, 1),
(6, 3, 1, 1.0, 4.0, 0.0, 0), (7, 4, 1, 1.0, 5.0, 0.0, 1), (8, 0, 2, 1.0, 1.0, 0.0, 0),
(9, 1, 2, 1.0, 2.0, 0.0, 1), (10, 3, 2, 1.0, 4.0, 0.0, 0), (11, 4, 2, 1.0, 5.0, 0.0, 1)
], dtype=order_dt)
)
record_arrays_close(
from_orders_longonly(size=order_size_one, max_size=[[0.5, 1., np.inf]]).order_records,
np.array([
(0, 0, 0, 0.5, 1.0, 0.0, 0), (1, 1, 0, 0.5, 2.0, 0.0, 1), (2, 3, 0, 0.5, 4.0, 0.0, 0),
(3, 4, 0, 0.5, 5.0, 0.0, 1), (4, 0, 1, 1.0, 1.0, 0.0, 0), (5, 1, 1, 1.0, 2.0, 0.0, 1),
(6, 3, 1, 1.0, 4.0, 0.0, 0), (7, 4, 1, 1.0, 5.0, 0.0, 1), (8, 0, 2, 1.0, 1.0, 0.0, 0),
(9, 1, 2, 1.0, 2.0, 0.0, 1), (10, 3, 2, 1.0, 4.0, 0.0, 0), (11, 4, 2, 1.0, 5.0, 0.0, 1)
], dtype=order_dt)
)
record_arrays_close(
from_orders_shortonly(size=order_size_one, max_size=[[0.5, 1., np.inf]]).order_records,
np.array([
(0, 0, 0, 0.5, 1.0, 0.0, 1), (1, 1, 0, 0.5, 2.0, 0.0, 0), (2, 3, 0, 0.5, 4.0, 0.0, 1),
(3, 4, 0, 0.5, 5.0, 0.0, 0), (4, 0, 1, 1.0, 1.0, 0.0, 1), (5, 1, 1, 1.0, 2.0, 0.0, 0),
(6, 3, 1, 1.0, 4.0, 0.0, 1), (7, 4, 1, 1.0, 5.0, 0.0, 0), (8, 0, 2, 1.0, 1.0, 0.0, 1),
(9, 1, 2, 1.0, 2.0, 0.0, 0), (10, 3, 2, 1.0, 4.0, 0.0, 1), (11, 4, 2, 1.0, 5.0, 0.0, 0)
], dtype=order_dt)
)
def test_reject_prob(self):
record_arrays_close(
from_orders_all(size=order_size_one, reject_prob=[[0., 0.5, 1.]], seed=42).order_records,
np.array([
(0, 0, 0, 1.0, 1.0, 0.0, 0), (1, 1, 0, 1.0, 2.0, 0.0, 1), (2, 3, 0, 1.0, 4.0, 0.0, 0),
(3, 4, 0, 1.0, 5.0, 0.0, 1), (4, 1, 1, 1.0, 2.0, 0.0, 1), (5, 3, 1, 1.0, 4.0, 0.0, 0),
(6, 4, 1, 1.0, 5.0, 0.0, 1)
], dtype=order_dt)
)
record_arrays_close(
from_orders_longonly(size=order_size_one, reject_prob=[[0., 0.5, 1.]], seed=42).order_records,
np.array([
(0, 0, 0, 1.0, 1.0, 0.0, 0), (1, 1, 0, 1.0, 2.0, 0.0, 1), (2, 3, 0, 1.0, 4.0, 0.0, 0),
(3, 4, 0, 1.0, 5.0, 0.0, 1), (4, 3, 1, 1.0, 4.0, 0.0, 0), (5, 4, 1, 1.0, 5.0, 0.0, 1)
], dtype=order_dt)
)
record_arrays_close(
from_orders_shortonly(size=order_size_one, reject_prob=[[0., 0.5, 1.]], seed=42).order_records,
np.array([
(0, 0, 0, 1.0, 1.0, 0.0, 1), (1, 1, 0, 1.0, 2.0, 0.0, 0), (2, 3, 0, 1.0, 4.0, 0.0, 1),
(3, 4, 0, 1.0, 5.0, 0.0, 0), (4, 3, 1, 1.0, 4.0, 0.0, 1), (5, 4, 1, 1.0, 5.0, 0.0, 0)
], dtype=order_dt)
)
def test_allow_partial(self):
record_arrays_close(
from_orders_all(size=order_size_one * 1000, allow_partial=[[True, False]]).order_records,
np.array([
(0, 0, 0, 100.0, 1.0, 0.0, 0), (1, 1, 0, 1000.0, 2.0, 0.0, 1), (2, 3, 0, 500.0, 4.0, 0.0, 0),
(3, 4, 0, 1000.0, 5.0, 0.0, 1), (4, 1, 1, 1000.0, 2.0, 0.0, 1), (5, 4, 1, 1000.0, 5.0, 0.0, 1)
], dtype=order_dt)
)
record_arrays_close(
from_orders_longonly(size=order_size_one * 1000, allow_partial=[[True, False]]).order_records,
np.array([
(0, 0, 0, 100.0, 1.0, 0.0, 0), (1, 1, 0, 100.0, 2.0, 0.0, 1), (2, 3, 0, 50.0, 4.0, 0.0, 0),
(3, 4, 0, 50.0, 5.0, 0.0, 1)
], dtype=order_dt)
)
record_arrays_close(
from_orders_shortonly(size=order_size_one * 1000, allow_partial=[[True, False]]).order_records,
np.array([
(0, 0, 0, 1000.0, 1.0, 0.0, 1), (1, 1, 0, 550.0, 2.0, 0.0, 0), (2, 3, 0, 1000.0, 4.0, 0.0, 1),
(3, 4, 0, 800.0, 5.0, 0.0, 0), (4, 0, 1, 1000.0, 1.0, 0.0, 1), (5, 3, 1, 1000.0, 4.0, 0.0, 1),
(6, 4, 1, 1000.0, 5.0, 0.0, 0)
], dtype=order_dt)
)
record_arrays_close(
from_orders_all(size=order_size, allow_partial=[[True, False]]).order_records,
np.array([
(0, 0, 0, 100.0, 1.0, 0.0, 0), (1, 1, 0, 200.0, 2.0, 0.0, 1), (2, 3, 0, 100.0, 4.0, 0.0, 0),
(3, 0, 1, 100.0, 1.0, 0.0, 0), (4, 1, 1, 200.0, 2.0, 0.0, 1), (5, 3, 1, 100.0, 4.0, 0.0, 0)
], dtype=order_dt)
)
record_arrays_close(
from_orders_longonly(size=order_size, allow_partial=[[True, False]]).order_records,
np.array([
(0, 0, 0, 100.0, 1.0, 0.0, 0), (1, 1, 0, 100.0, 2.0, 0.0, 1), (2, 3, 0, 50.0, 4.0, 0.0, 0),
(3, 4, 0, 50.0, 5.0, 0.0, 1), (4, 0, 1, 100.0, 1.0, 0.0, 0), (5, 1, 1, 100.0, 2.0, 0.0, 1),
(6, 3, 1, 50.0, 4.0, 0.0, 0), (7, 4, 1, 50.0, 5.0, 0.0, 1)
], dtype=order_dt)
)
record_arrays_close(
from_orders_shortonly(size=order_size, allow_partial=[[True, False]]).order_records,
np.array([
(0, 0, 0, 100.0, 1.0, 0.0, 1), (1, 1, 0, 100.0, 2.0, 0.0, 0), (2, 0, 1, 100.0, 1.0, 0.0, 1),
(3, 1, 1, 100.0, 2.0, 0.0, 0)
], dtype=order_dt)
)
def test_raise_reject(self):
record_arrays_close(
from_orders_all(size=order_size_one * 1000, allow_partial=True, raise_reject=True).order_records,
np.array([
(0, 0, 0, 100.0, 1.0, 0.0, 0), (1, 1, 0, 1000.0, 2.0, 0.0, 1), (2, 3, 0, 500.0, 4.0, 0.0, 0),
(3, 4, 0, 1000.0, 5.0, 0.0, 1)
], dtype=order_dt)
)
record_arrays_close(
from_orders_longonly(size=order_size_one * 1000, allow_partial=True, raise_reject=True).order_records,
np.array([
(0, 0, 0, 100.0, 1.0, 0.0, 0), (1, 1, 0, 100.0, 2.0, 0.0, 1), (2, 3, 0, 50.0, 4.0, 0.0, 0),
(3, 4, 0, 50.0, 5.0, 0.0, 1)
], dtype=order_dt)
)
record_arrays_close(
from_orders_shortonly(size=order_size_one * 1000, allow_partial=True, raise_reject=True).order_records,
np.array([
(0, 0, 0, 1000.0, 1.0, 0.0, 1), (1, 1, 0, 550.0, 2.0, 0.0, 0), (2, 3, 0, 1000.0, 4.0, 0.0, 1),
(3, 4, 0, 800.0, 5.0, 0.0, 0)
], dtype=order_dt)
)
with pytest.raises(Exception) as e_info:
_ = from_orders_all(size=order_size_one * 1000, allow_partial=False, raise_reject=True).order_records
with pytest.raises(Exception) as e_info:
_ = from_orders_longonly(size=order_size_one * 1000, allow_partial=False, raise_reject=True).order_records
with pytest.raises(Exception) as e_info:
_ = from_orders_shortonly(size=order_size_one * 1000, allow_partial=False, raise_reject=True).order_records
def test_log(self):
record_arrays_close(
from_orders_all(log=True).log_records,
np.array([
(0, 0, 0, 0, 100.0, 0.0, 1.0, 100.0, np.inf, 0, 2, 1.0, 0.0, 0.0, 0.0, 1e-08, np.inf, 0.0,
True, False, True, 0.0, 100.0, 100.0, 1.0, 0.0, 0, 0, -1, 0),
(1, 1, 0, 0, 0.0, 100.0, 2.0, 200.0, -np.inf, 0, 2, 2.0, 0.0, 0.0, 0.0, 1e-08, np.inf, 0.0,
True, False, True, 400.0, -100.0, 200.0, 2.0, 0.0, 1, 0, -1, 1),
(2, 2, 0, 0, 400.0, -100.0, 3.0, 100.0, np.nan, 0, 2, 3.0, 0.0, 0.0, 0.0, 1e-08, np.inf, 0.0,
True, False, True, 400.0, -100.0, np.nan, np.nan, np.nan, -1, 1, 0, -1),
(3, 3, 0, 0, 400.0, -100.0, 4.0, 0.0, np.inf, 0, 2, 4.0, 0.0, 0.0, 0.0, 1e-08, np.inf, 0.0,
True, False, True, 0.0, 0.0, 100.0, 4.0, 0.0, 0, 0, -1, 2),
(4, 4, 0, 0, 0.0, 0.0, 5.0, 0.0, -np.inf, 0, 2, 5.0, 0.0, 0.0, 0.0, 1e-08, np.inf, 0.0,
True, False, True, 0.0, 0.0, np.nan, np.nan, np.nan, -1, 2, 6, -1)
], dtype=log_dt)
)
def test_group_by(self):
portfolio = from_orders_all(price=price_wide, group_by=np.array([0, 0, 1]))
record_arrays_close(
portfolio.order_records,
np.array([
(0, 0, 0, 100.0, 1.0, 0.0, 0), (1, 1, 0, 200.0, 2.0, 0.0, 1), (2, 3, 0, 100.0, 4.0, 0.0, 0),
(3, 0, 1, 100.0, 1.0, 0.0, 0), (4, 1, 1, 200.0, 2.0, 0.0, 1), (5, 3, 1, 100.0, 4.0, 0.0, 0),
(6, 0, 2, 100.0, 1.0, 0.0, 0), (7, 1, 2, 200.0, 2.0, 0.0, 1), (8, 3, 2, 100.0, 4.0, 0.0, 0)
], dtype=order_dt)
)
pd.testing.assert_index_equal(
portfolio.wrapper.grouper.group_by,
pd.Int64Index([0, 0, 1], dtype='int64')
)
pd.testing.assert_series_equal(
portfolio.init_cash,
pd.Series([200., 100.], index=pd.Int64Index([0, 1], dtype='int64')).rename('init_cash')
)
assert not portfolio.cash_sharing
def test_cash_sharing(self):
portfolio = from_orders_all(price=price_wide, group_by=np.array([0, 0, 1]), cash_sharing=True)
record_arrays_close(
portfolio.order_records,
np.array([
(0, 0, 0, 100.0, 1.0, 0.0, 0), (1, 1, 0, 200.0, 2.0, 0.0, 1), (2, 1, 1, 200.0, 2.0, 0.0, 1),
(3, 3, 0, 200.0, 4.0, 0.0, 0), (4, 4, 0, 200.0, 5.0, 0.0, 1), (5, 0, 2, 100.0, 1.0, 0.0, 0),
(6, 1, 2, 200.0, 2.0, 0.0, 1), (7, 3, 2, 100.0, 4.0, 0.0, 0)
], dtype=order_dt)
)
pd.testing.assert_index_equal(
portfolio.wrapper.grouper.group_by,
pd.Int64Index([0, 0, 1], dtype='int64')
)
pd.testing.assert_series_equal(
portfolio.init_cash,
pd.Series([100., 100.], index=pd.Int64Index([0, 1], dtype='int64')).rename('init_cash')
)
assert portfolio.cash_sharing
with pytest.raises(Exception) as e_info:
_ = portfolio.regroup(group_by=False)
def test_call_seq(self):
portfolio = from_orders_all(price=price_wide, group_by=np.array([0, 0, 1]), cash_sharing=True)
record_arrays_close(
portfolio.order_records,
np.array([
(0, 0, 0, 100.0, 1.0, 0.0, 0), (1, 1, 0, 200.0, 2.0, 0.0, 1), (2, 1, 1, 200.0, 2.0, 0.0, 1),
(3, 3, 0, 200.0, 4.0, 0.0, 0), (4, 4, 0, 200.0, 5.0, 0.0, 1), (5, 0, 2, 100.0, 1.0, 0.0, 0),
(6, 1, 2, 200.0, 2.0, 0.0, 1), (7, 3, 2, 100.0, 4.0, 0.0, 0)
], dtype=order_dt)
)
np.testing.assert_array_equal(
portfolio.call_seq.values,
np.array([
[0, 1, 0],
[0, 1, 0],
[0, 1, 0],
[0, 1, 0],
[0, 1, 0]
])
)
portfolio = from_orders_all(
price=price_wide, group_by=np.array([0, 0, 1]),
cash_sharing=True, call_seq='reversed')
record_arrays_close(
portfolio.order_records,
np.array([
(0, 0, 1, 100.0, 1.0, 0.0, 0), (1, 1, 1, 200.0, 2.0, 0.0, 1), (2, 1, 0, 200.0, 2.0, 0.0, 1),
(3, 3, 1, 200.0, 4.0, 0.0, 0), (4, 4, 1, 200.0, 5.0, 0.0, 1), (5, 0, 2, 100.0, 1.0, 0.0, 0),
(6, 1, 2, 200.0, 2.0, 0.0, 1), (7, 3, 2, 100.0, 4.0, 0.0, 0)
], dtype=order_dt)
)
np.testing.assert_array_equal(
portfolio.call_seq.values,
np.array([
[1, 0, 0],
[1, 0, 0],
[1, 0, 0],
[1, 0, 0],
[1, 0, 0]
])
)
portfolio = from_orders_all(
price=price_wide, group_by=np.array([0, 0, 1]),
cash_sharing=True, call_seq='random', seed=seed)
record_arrays_close(
portfolio.order_records,
np.array([
(0, 0, 1, 100.0, 1.0, 0.0, 0), (1, 1, 1, 200.0, 2.0, 0.0, 1), (2, 3, 1, 100.0, 4.0, 0.0, 0),
(3, 0, 2, 100.0, 1.0, 0.0, 0), (4, 1, 2, 200.0, 2.0, 0.0, 1), (5, 3, 2, 100.0, 4.0, 0.0, 0)
], dtype=order_dt)
)
np.testing.assert_array_equal(
portfolio.call_seq.values,
np.array([
[1, 0, 0],
[0, 1, 0],
[1, 0, 0],
[1, 0, 0],
[1, 0, 0]
])
)
kwargs = dict(
price=1.,
size=pd.DataFrame([
[0., 0., np.inf],
[0., np.inf, -np.inf],
[np.inf, -np.inf, 0.],
[-np.inf, 0., np.inf],
[0., np.inf, -np.inf],
]),
group_by=np.array([0, 0, 0]),
cash_sharing=True,
call_seq='auto'
)
portfolio = from_orders_all(**kwargs)
record_arrays_close(
portfolio.order_records,
np.array([
(0, 0, 2, 100.0, 1.0, 0.0, 0), (1, 1, 2, 200.0, 1.0, 0.0, 1), (2, 1, 1, 200.0, 1.0, 0.0, 0),
(3, 2, 1, 400.0, 1.0, 0.0, 1), (4, 2, 0, 400.0, 1.0, 0.0, 0), (5, 3, 0, 800.0, 1.0, 0.0, 1),
(6, 3, 2, 800.0, 1.0, 0.0, 0), (7, 4, 2, 1400.0, 1.0, 0.0, 1), (8, 4, 1, 1400.0, 1.0, 0.0, 0)
], dtype=order_dt)
)
np.testing.assert_array_equal(
portfolio.call_seq.values,
np.array([
[0, 1, 2],
[2, 0, 1],
[1, 2, 0],
[0, 1, 2],
[2, 0, 1]
])
)
portfolio = from_orders_longonly(**kwargs)
record_arrays_close(
portfolio.order_records,
np.array([
(0, 0, 2, 100.0, 1.0, 0.0, 0), (1, 1, 2, 100.0, 1.0, 0.0, 1), (2, 1, 1, 100.0, 1.0, 0.0, 0),
(3, 2, 1, 100.0, 1.0, 0.0, 1), (4, 2, 0, 100.0, 1.0, 0.0, 0), (5, 3, 0, 100.0, 1.0, 0.0, 1),
(6, 3, 2, 100.0, 1.0, 0.0, 0), (7, 4, 2, 100.0, 1.0, 0.0, 1), (8, 4, 1, 100.0, 1.0, 0.0, 0)
], dtype=order_dt)
)
np.testing.assert_array_equal(
portfolio.call_seq.values,
np.array([
[0, 1, 2],
[2, 0, 1],
[1, 2, 0],
[0, 1, 2],
[2, 0, 1]
])
)
portfolio = from_orders_shortonly(**kwargs)
record_arrays_close(
portfolio.order_records,
np.array([
(0, 0, 2, 100.0, 1.0, 0.0, 1), (1, 1, 1, 200.0, 1.0, 0.0, 1), (2, 1, 2, 100.0, 1.0, 0.0, 0),
(3, 2, 0, 300.0, 1.0, 0.0, 1), (4, 2, 1, 200.0, 1.0, 0.0, 0), (5, 3, 2, 400.0, 1.0, 0.0, 1),
(6, 3, 0, 300.0, 1.0, 0.0, 0), (7, 4, 1, 500.0, 1.0, 0.0, 1), (8, 4, 2, 400.0, 1.0, 0.0, 0)
], dtype=order_dt)
)
np.testing.assert_array_equal(
portfolio.call_seq.values,
np.array([
[2, 0, 1],
[1, 0, 2],
[0, 2, 1],
[2, 1, 0],
[1, 0, 2]
])
)
def test_target_shares(self):
record_arrays_close(
from_orders_all(size=[[75., -75.]], size_type='targetshares').order_records,
np.array([
(0, 0, 0, 75.0, 1.0, 0.0, 0), (1, 0, 1, 75.0, 1.0, 0.0, 1)
], dtype=order_dt)
)
record_arrays_close(
from_orders_longonly(size=[[75., -75.]], size_type='targetshares').order_records,
np.array([
(0, 0, 0, 75.0, 1.0, 0.0, 0)
], dtype=order_dt)
)
record_arrays_close(
from_orders_shortonly(size=[[75., -75.]], size_type='targetshares').order_records,
np.array([
(0, 0, 0, 75.0, 1.0, 0.0, 1)
], dtype=order_dt)
)
record_arrays_close(
from_orders_all(
price=price_wide, size=75., size_type='targetshares',
group_by=np.array([0, 0, 0]), cash_sharing=True).order_records,
np.array([
(0, 0, 0, 75.0, 1.0, 0.0, 0), (1, 0, 1, 25.0, 1.0, 0.0, 0)
], dtype=order_dt)
)
def test_target_value(self):
record_arrays_close(
from_orders_all(size=[[50., -50.]], size_type='targetvalue').order_records,
np.array([
(0, 0, 0, 50.0, 1.0, 0.0, 0), (1, 1, 0, 25.0, 2.0, 0.0, 1),
(2, 2, 0, 8.333333333333332, 3.0, 0.0, 1), (3, 3, 0, 4.166666666666668, 4.0, 0.0, 1),
(4, 4, 0, 2.5, 5.0, 0.0, 1), (5, 0, 1, 50.0, 1.0, 0.0, 1),
(6, 1, 1, 25.0, 2.0, 0.0, 0), (7, 2, 1, 8.333333333333332, 3.0, 0.0, 0),
(8, 3, 1, 4.166666666666668, 4.0, 0.0, 0), (9, 4, 1, 2.5, 5.0, 0.0, 0)
], dtype=order_dt)
)
record_arrays_close(
from_orders_longonly(size=[[50., -50.]], size_type='targetvalue').order_records,
np.array([
(0, 0, 0, 50.0, 1.0, 0.0, 0), (1, 1, 0, 25.0, 2.0, 0.0, 1),
(2, 2, 0, 8.333333333333332, 3.0, 0.0, 1), (3, 3, 0, 4.166666666666668, 4.0, 0.0, 1),
(4, 4, 0, 2.5, 5.0, 0.0, 1)
], dtype=order_dt)
)
record_arrays_close(
from_orders_shortonly(size=[[50., -50.]], size_type='targetvalue').order_records,
np.array([
(0, 0, 0, 50.0, 1.0, 0.0, 1), (1, 1, 0, 25.0, 2.0, 0.0, 0),
(2, 2, 0, 8.333333333333332, 3.0, 0.0, 0), (3, 3, 0, 4.166666666666668, 4.0, 0.0, 0),
(4, 4, 0, 2.5, 5.0, 0.0, 0)
], dtype=order_dt)
)
record_arrays_close(
from_orders_all(
price=price_wide, size=50., size_type='targetvalue',
group_by=np.array([0, 0, 0]), cash_sharing=True).order_records,
np.array([
(0, 0, 0, 50.0, 1.0, 0.0, 0), (1, 0, 1, 50.0, 1.0, 0.0, 0),
(2, 1, 0, 25.0, 2.0, 0.0, 1), (3, 1, 1, 25.0, 2.0, 0.0, 1),
(4, 1, 2, 25.0, 2.0, 0.0, 0), (5, 2, 0, 8.333333333333332, 3.0, 0.0, 1),
(6, 2, 1, 8.333333333333332, 3.0, 0.0, 1), (7, 2, 2, 8.333333333333332, 3.0, 0.0, 1),
(8, 3, 0, 4.166666666666668, 4.0, 0.0, 1), (9, 3, 1, 4.166666666666668, 4.0, 0.0, 1),
(10, 3, 2, 4.166666666666668, 4.0, 0.0, 1), (11, 4, 0, 2.5, 5.0, 0.0, 1),
(12, 4, 1, 2.5, 5.0, 0.0, 1), (13, 4, 2, 2.5, 5.0, 0.0, 1)
], dtype=order_dt)
)
def test_target_percent(self):
record_arrays_close(
from_orders_all(size=[[0.5, -0.5]], size_type='targetpercent').order_records,
np.array([
(0, 0, 0, 50.0, 1.0, 0.0, 0), (1, 1, 0, 12.5, 2.0, 0.0, 1), (2, 2, 0, 6.25, 3.0, 0.0, 1),
(3, 3, 0, 3.90625, 4.0, 0.0, 1), (4, 4, 0, 2.734375, 5.0, 0.0, 1), (5, 0, 1, 50.0, 1.0, 0.0, 1),
(6, 1, 1, 37.5, 2.0, 0.0, 0), (7, 2, 1, 6.25, 3.0, 0.0, 0), (8, 3, 1, 2.34375, 4.0, 0.0, 0),
(9, 4, 1, 1.171875, 5.0, 0.0, 0)
], dtype=order_dt)
)
record_arrays_close(
from_orders_longonly(size=[[0.5, -0.5]], size_type='targetpercent').order_records,
np.array([
(0, 0, 0, 50.0, 1.0, 0.0, 0), (1, 1, 0, 12.5, 2.0, 0.0, 1), (2, 2, 0, 6.25, 3.0, 0.0, 1),
(3, 3, 0, 3.90625, 4.0, 0.0, 1), (4, 4, 0, 2.734375, 5.0, 0.0, 1)
], dtype=order_dt)
)
record_arrays_close(
from_orders_shortonly(size=[[0.5, -0.5]], size_type='targetpercent').order_records,
np.array([
(0, 0, 0, 50.0, 1.0, 0.0, 1), (1, 1, 0, 37.5, 2.0, 0.0, 0), (2, 2, 0, 6.25, 3.0, 0.0, 0),
(3, 3, 0, 2.34375, 4.0, 0.0, 0), (4, 4, 0, 1.171875, 5.0, 0.0, 0)
], dtype=order_dt)
)
record_arrays_close(
from_orders_all(
price=price_wide, size=0.5, size_type='targetpercent',
group_by=np.array([0, 0, 0]), cash_sharing=True).order_records,
np.array([
(0, 0, 0, 50.0, 1.0, 0.0, 0), (1, 0, 1, 50.0, 1.0, 0.0, 0)
], dtype=order_dt)
)
def test_percent(self):
record_arrays_close(
from_orders_all(size=[[0.5, -0.5]], size_type='percent').order_records,
np.array([
(0, 0, 0, 50., 1., 0., 0), (1, 1, 0, 12.5, 2., 0., 0),
(2, 2, 0, 4.16666667, 3., 0., 0), (3, 3, 0, 1.5625, 4., 0., 0),
(4, 4, 0, 0.625, 5., 0., 0)
], dtype=order_dt)
)
record_arrays_close(
from_orders_longonly(size=[[0.5, -0.5]], size_type='percent').order_records,
np.array([
(0, 0, 0, 50., 1., 0., 0), (1, 1, 0, 12.5, 2., 0., 0),
(2, 2, 0, 4.16666667, 3., 0., 0), (3, 3, 0, 1.5625, 4., 0., 0),
(4, 4, 0, 0.625, 5., 0., 0)
], dtype=order_dt)
)
record_arrays_close(
from_orders_shortonly(size=[[0.5, -0.5]], size_type='percent').order_records,
np.array([], dtype=order_dt)
)
record_arrays_close(
from_orders_all(
price=price_wide, size=0.5, size_type='percent',
group_by=np.array([0, 0, 0]), cash_sharing=True).order_records,
np.array([
(0, 0, 0, 5.00000000e+01, 1., 0., 0), (1, 0, 1, 2.50000000e+01, 1., 0., 0),
(2, 0, 2, 1.25000000e+01, 1., 0., 0), (3, 1, 0, 3.12500000e+00, 2., 0., 0),
(4, 1, 1, 1.56250000e+00, 2., 0., 0), (5, 1, 2, 7.81250000e-01, 2., 0., 0),
(6, 2, 0, 2.60416667e-01, 3., 0., 0), (7, 2, 1, 1.30208333e-01, 3., 0., 0),
(8, 2, 2, 6.51041667e-02, 3., 0., 0), (9, 3, 0, 2.44140625e-02, 4., 0., 0),
(10, 3, 1, 1.22070312e-02, 4., 0., 0), (11, 3, 2, 6.10351562e-03, 4., 0., 0),
(12, 4, 0, 2.44140625e-03, 5., 0., 0), (13, 4, 1, 1.22070312e-03, 5., 0., 0),
(14, 4, 2, 6.10351562e-04, 5., 0., 0)
], dtype=order_dt)
)
def test_auto_seq(self):
target_hold_value = pd.DataFrame({
'a': [0., 70., 30., 0., 70.],
'b': [30., 0., 70., 30., 30.],
'c': [70., 30., 0., 70., 0.]
}, index=price.index)
pd.testing.assert_frame_equal(
from_orders_all(
price=1., size=target_hold_value, size_type='targetvalue',
group_by=np.array([0, 0, 0]), cash_sharing=True,
call_seq='auto').holding_value(group_by=False),
target_hold_value
)
pd.testing.assert_frame_equal(
from_orders_all(
price=1., size=target_hold_value / 100, size_type='targetpercent',
group_by=np.array([0, 0, 0]), cash_sharing=True,
call_seq='auto').holding_value(group_by=False),
target_hold_value
)
def test_max_orders(self):
_ = from_orders_all(price=price_wide)
_ = from_orders_all(price=price_wide, max_orders=9)
with pytest.raises(Exception) as e_info:
_ = from_orders_all(price=price_wide, max_orders=8)
def test_max_logs(self):
_ = from_orders_all(price=price_wide, log=True)
_ = from_orders_all(price=price_wide, log=True, max_logs=15)
with pytest.raises(Exception) as e_info:
_ = from_orders_all(price=price_wide, log=True, max_logs=14)
# ############# from_order_func ############# #
@njit
def order_func_nb(oc, size):
return nb.create_order_nb(size=size if oc.i % 2 == 0 else -size, price=oc.close[oc.i, oc.col])
@njit
def log_order_func_nb(oc, size):
return nb.create_order_nb(size=size if oc.i % 2 == 0 else -size, price=oc.close[oc.i, oc.col], log=True)
class TestFromOrderFunc:
@pytest.mark.parametrize(
"test_row_wise",
[False, True],
)
def test_one_column(self, test_row_wise):
portfolio = vbt.Portfolio.from_order_func(price.tolist(), order_func_nb, np.inf, row_wise=test_row_wise)
record_arrays_close(
portfolio.order_records,
np.array([
(0, 0, 0, 100.0, 1.0, 0.0, 0), (1, 1, 0, 200.0, 2.0, 0.0, 1),
(2, 2, 0, 133.33333333333334, 3.0, 0.0, 0), (3, 3, 0, 66.66666666666669, 4.0, 0.0, 1),
(4, 4, 0, 53.33333333333335, 5.0, 0.0, 0)
], dtype=order_dt)
)
portfolio = vbt.Portfolio.from_order_func(price, order_func_nb, np.inf, row_wise=test_row_wise)
record_arrays_close(
portfolio.order_records,
np.array([
(0, 0, 0, 100.0, 1.0, 0.0, 0), (1, 1, 0, 200.0, 2.0, 0.0, 1),
(2, 2, 0, 133.33333333333334, 3.0, 0.0, 0), (3, 3, 0, 66.66666666666669, 4.0, 0.0, 1),
(4, 4, 0, 53.33333333333335, 5.0, 0.0, 0)
], dtype=order_dt)
)
pd.testing.assert_index_equal(
portfolio.wrapper.index,
pd.DatetimeIndex(['2020-01-01', '2020-01-02', '2020-01-03', '2020-01-04', '2020-01-05'])
)
pd.testing.assert_index_equal(
portfolio.wrapper.columns,
pd.Int64Index([0], dtype='int64')
)
assert portfolio.wrapper.ndim == 1
assert portfolio.wrapper.freq == day_dt
assert portfolio.wrapper.grouper.group_by is None
@pytest.mark.parametrize(
"test_row_wise",
[False, True],
)
def test_multiple_columns(self, test_row_wise):
portfolio = vbt.Portfolio.from_order_func(price_wide, order_func_nb, np.inf, row_wise=test_row_wise)
if test_row_wise:
record_arrays_close(
portfolio.order_records,
np.array([
(0, 0, 0, 100.0, 1.0, 0.0, 0), (1, 0, 1, 100.0, 1.0, 0.0, 0),
(2, 0, 2, 100.0, 1.0, 0.0, 0), (3, 1, 0, 200.0, 2.0, 0.0, 1),
(4, 1, 1, 200.0, 2.0, 0.0, 1), (5, 1, 2, 200.0, 2.0, 0.0, 1),
(6, 2, 0, 133.33333333333334, 3.0, 0.0, 0), (7, 2, 1, 133.33333333333334, 3.0, 0.0, 0),
(8, 2, 2, 133.33333333333334, 3.0, 0.0, 0), (9, 3, 0, 66.66666666666669, 4.0, 0.0, 1),
(10, 3, 1, 66.66666666666669, 4.0, 0.0, 1), (11, 3, 2, 66.66666666666669, 4.0, 0.0, 1),
(12, 4, 0, 53.33333333333335, 5.0, 0.0, 0), (13, 4, 1, 53.33333333333335, 5.0, 0.0, 0),
(14, 4, 2, 53.33333333333335, 5.0, 0.0, 0)
], dtype=order_dt)
)
else:
record_arrays_close(
portfolio.order_records,
np.array([
(0, 0, 0, 100.0, 1.0, 0.0, 0), (1, 1, 0, 200.0, 2.0, 0.0, 1),
(2, 2, 0, 133.33333333333334, 3.0, 0.0, 0), (3, 3, 0, 66.66666666666669, 4.0, 0.0, 1),
(4, 4, 0, 53.33333333333335, 5.0, 0.0, 0), (5, 0, 1, 100.0, 1.0, 0.0, 0),
(6, 1, 1, 200.0, 2.0, 0.0, 1), (7, 2, 1, 133.33333333333334, 3.0, 0.0, 0),
(8, 3, 1, 66.66666666666669, 4.0, 0.0, 1), (9, 4, 1, 53.33333333333335, 5.0, 0.0, 0),
(10, 0, 2, 100.0, 1.0, 0.0, 0), (11, 1, 2, 200.0, 2.0, 0.0, 1),
(12, 2, 2, 133.33333333333334, 3.0, 0.0, 0), (13, 3, 2, 66.66666666666669, 4.0, 0.0, 1),
(14, 4, 2, 53.33333333333335, 5.0, 0.0, 0)
], dtype=order_dt)
)
pd.testing.assert_index_equal(
portfolio.wrapper.index,
pd.DatetimeIndex(['2020-01-01', '2020-01-02', '2020-01-03', '2020-01-04', '2020-01-05'])
)
pd.testing.assert_index_equal(
portfolio.wrapper.columns,
pd.Index(['a', 'b', 'c'], dtype='object')
)
assert portfolio.wrapper.ndim == 2
assert portfolio.wrapper.freq == day_dt
assert portfolio.wrapper.grouper.group_by is None
@pytest.mark.parametrize(
"test_row_wise",
[False, True],
)
def test_target_shape(self, test_row_wise):
portfolio = vbt.Portfolio.from_order_func(
price, order_func_nb, np.inf,
target_shape=(5,), row_wise=test_row_wise)
pd.testing.assert_index_equal(
portfolio.wrapper.columns,
pd.Int64Index([0], dtype='int64')
)
assert portfolio.wrapper.ndim == 1
portfolio = vbt.Portfolio.from_order_func(
price, order_func_nb, np.inf,
target_shape=(5, 1), row_wise=test_row_wise)
pd.testing.assert_index_equal(
portfolio.wrapper.columns,
pd.Int64Index([0], dtype='int64', name='iteration_idx')
)
assert portfolio.wrapper.ndim == 2
portfolio = vbt.Portfolio.from_order_func(
price, order_func_nb, np.inf,
target_shape=(5, 1), row_wise=test_row_wise,
keys=pd.Index(['first'], name='custom'))
pd.testing.assert_index_equal(
portfolio.wrapper.columns,
pd.Index(['first'], dtype='object', name='custom')
)
assert portfolio.wrapper.ndim == 2
portfolio = vbt.Portfolio.from_order_func(
price, order_func_nb, np.inf,
target_shape=(5, 3), row_wise=test_row_wise)
pd.testing.assert_index_equal(
portfolio.wrapper.columns,
pd.Int64Index([0, 1, 2], dtype='int64', name='iteration_idx')
)
assert portfolio.wrapper.ndim == 2
portfolio = vbt.Portfolio.from_order_func(
price, order_func_nb, np.inf,
target_shape=(5, 3), row_wise=test_row_wise,
keys=pd.Index(['first', 'second', 'third'], name='custom'))
pd.testing.assert_index_equal(
portfolio.wrapper.columns,
pd.Index(['first', 'second', 'third'], dtype='object', name='custom')
)
assert portfolio.wrapper.ndim == 2
@pytest.mark.parametrize(
"test_row_wise",
[False, True],
)
def test_group_by(self, test_row_wise):
portfolio = vbt.Portfolio.from_order_func(
price_wide, order_func_nb, np.inf,
group_by=np.array([0, 0, 1]), row_wise=test_row_wise)
if test_row_wise:
record_arrays_close(
portfolio.order_records,
np.array([
(0, 0, 0, 100.0, 1.0, 0.0, 0), (1, 0, 1, 100.0, 1.0, 0.0, 0),
(2, 0, 2, 100.0, 1.0, 0.0, 0), (3, 1, 0, 200.0, 2.0, 0.0, 1),
(4, 1, 1, 200.0, 2.0, 0.0, 1), (5, 1, 2, 200.0, 2.0, 0.0, 1),
(6, 2, 0, 133.33333333333334, 3.0, 0.0, 0), (7, 2, 1, 133.33333333333334, 3.0, 0.0, 0),
(8, 2, 2, 133.33333333333334, 3.0, 0.0, 0), (9, 3, 0, 66.66666666666669, 4.0, 0.0, 1),
(10, 3, 1, 66.66666666666669, 4.0, 0.0, 1), (11, 3, 2, 66.66666666666669, 4.0, 0.0, 1),
(12, 4, 0, 53.33333333333335, 5.0, 0.0, 0), (13, 4, 1, 53.33333333333335, 5.0, 0.0, 0),
(14, 4, 2, 53.33333333333335, 5.0, 0.0, 0)
], dtype=order_dt)
)
else:
record_arrays_close(
portfolio.order_records,
np.array([
(0, 0, 0, 100.0, 1.0, 0.0, 0), (1, 0, 1, 100.0, 1.0, 0.0, 0),
(2, 1, 0, 200.0, 2.0, 0.0, 1), (3, 1, 1, 200.0, 2.0, 0.0, 1),
(4, 2, 0, 133.33333333333334, 3.0, 0.0, 0), (5, 2, 1, 133.33333333333334, 3.0, 0.0, 0),
(6, 3, 0, 66.66666666666669, 4.0, 0.0, 1), (7, 3, 1, 66.66666666666669, 4.0, 0.0, 1),
(8, 4, 0, 53.33333333333335, 5.0, 0.0, 0), (9, 4, 1, 53.33333333333335, 5.0, 0.0, 0),
(10, 0, 2, 100.0, 1.0, 0.0, 0), (11, 1, 2, 200.0, 2.0, 0.0, 1),
(12, 2, 2, 133.33333333333334, 3.0, 0.0, 0), (13, 3, 2, 66.66666666666669, 4.0, 0.0, 1),
(14, 4, 2, 53.33333333333335, 5.0, 0.0, 0)
], dtype=order_dt)
)
pd.testing.assert_index_equal(
portfolio.wrapper.grouper.group_by,
pd.Int64Index([0, 0, 1], dtype='int64')
)
pd.testing.assert_series_equal(
portfolio.init_cash,
pd.Series([200., 100.], index=pd.Int64Index([0, 1], dtype='int64')).rename('init_cash')
)
assert not portfolio.cash_sharing
@pytest.mark.parametrize(
"test_row_wise",
[False, True],
)
def test_cash_sharing(self, test_row_wise):
portfolio = vbt.Portfolio.from_order_func(
price_wide, order_func_nb, np.inf,
group_by=np.array([0, 0, 1]), cash_sharing=True, row_wise=test_row_wise)
if test_row_wise:
record_arrays_close(
portfolio.order_records,
np.array([
(0, 0, 0, 100.0, 1.0, 0.0, 0), (1, 0, 2, 100.0, 1.0, 0.0, 0),
(2, 1, 0, 200.0, 2.0, 0.0, 1), (3, 1, 1, 200.0, 2.0, 0.0, 1),
(4, 1, 2, 200.0, 2.0, 0.0, 1), (5, 2, 0, 266.6666666666667, 3.0, 0.0, 0),
(6, 2, 2, 133.33333333333334, 3.0, 0.0, 0), (7, 3, 0, 333.33333333333337, 4.0, 0.0, 1),
(8, 3, 2, 66.66666666666669, 4.0, 0.0, 1), (9, 4, 0, 266.6666666666667, 5.0, 0.0, 0),
(10, 4, 2, 53.33333333333335, 5.0, 0.0, 0)
], dtype=order_dt)
)
else:
record_arrays_close(
portfolio.order_records,
np.array([
(0, 0, 0, 100.0, 1.0, 0.0, 0), (1, 1, 0, 200.0, 2.0, 0.0, 1),
(2, 1, 1, 200.0, 2.0, 0.0, 1), (3, 2, 0, 266.6666666666667, 3.0, 0.0, 0),
(4, 3, 0, 333.33333333333337, 4.0, 0.0, 1), (5, 4, 0, 266.6666666666667, 5.0, 0.0, 0),
(6, 0, 2, 100.0, 1.0, 0.0, 0), (7, 1, 2, 200.0, 2.0, 0.0, 1),
(8, 2, 2, 133.33333333333334, 3.0, 0.0, 0), (9, 3, 2, 66.66666666666669, 4.0, 0.0, 1),
(10, 4, 2, 53.33333333333335, 5.0, 0.0, 0)
], dtype=order_dt)
)
pd.testing.assert_index_equal(
portfolio.wrapper.grouper.group_by,
pd.Int64Index([0, 0, 1], dtype='int64')
)
pd.testing.assert_series_equal(
portfolio.init_cash,
pd.Series([100., 100.], index=pd.Int64Index([0, 1], dtype='int64')).rename('init_cash')
)
assert portfolio.cash_sharing
@pytest.mark.parametrize(
"test_row_wise",
[False, True],
)
def test_call_seq(self, test_row_wise):
portfolio = vbt.Portfolio.from_order_func(
price_wide, order_func_nb, np.inf, group_by=np.array([0, 0, 1]),
cash_sharing=True, row_wise=test_row_wise)
if test_row_wise:
record_arrays_close(
portfolio.order_records,
np.array([
(0, 0, 0, 100.0, 1.0, 0.0, 0), (1, 0, 2, 100.0, 1.0, 0.0, 0),
(2, 1, 0, 200.0, 2.0, 0.0, 1), (3, 1, 1, 200.0, 2.0, 0.0, 1),
(4, 1, 2, 200.0, 2.0, 0.0, 1), (5, 2, 0, 266.6666666666667, 3.0, 0.0, 0),
(6, 2, 2, 133.33333333333334, 3.0, 0.0, 0), (7, 3, 0, 333.33333333333337, 4.0, 0.0, 1),
(8, 3, 2, 66.66666666666669, 4.0, 0.0, 1), (9, 4, 0, 266.6666666666667, 5.0, 0.0, 0),
(10, 4, 2, 53.33333333333335, 5.0, 0.0, 0)
], dtype=order_dt)
)
else:
record_arrays_close(
portfolio.order_records,
np.array([
(0, 0, 0, 100.0, 1.0, 0.0, 0), (1, 1, 0, 200.0, 2.0, 0.0, 1),
(2, 1, 1, 200.0, 2.0, 0.0, 1), (3, 2, 0, 266.6666666666667, 3.0, 0.0, 0),
(4, 3, 0, 333.33333333333337, 4.0, 0.0, 1), (5, 4, 0, 266.6666666666667, 5.0, 0.0, 0),
(6, 0, 2, 100.0, 1.0, 0.0, 0), (7, 1, 2, 200.0, 2.0, 0.0, 1),
(8, 2, 2, 133.33333333333334, 3.0, 0.0, 0), (9, 3, 2, 66.66666666666669, 4.0, 0.0, 1),
(10, 4, 2, 53.33333333333335, 5.0, 0.0, 0)
], dtype=order_dt)
)
np.testing.assert_array_equal(
portfolio.call_seq.values,
np.array([
[0, 1, 0],
[0, 1, 0],
[0, 1, 0],
[0, 1, 0],
[0, 1, 0]
])
)
portfolio = vbt.Portfolio.from_order_func(
price_wide, order_func_nb, np.inf, group_by=np.array([0, 0, 1]),
cash_sharing=True, call_seq='reversed', row_wise=test_row_wise)
if test_row_wise:
record_arrays_close(
portfolio.order_records,
np.array([
(0, 0, 1, 100.0, 1.0, 0.0, 0), (1, 0, 2, 100.0, 1.0, 0.0, 0),
(2, 1, 1, 200.0, 2.0, 0.0, 1), (3, 1, 0, 200.0, 2.0, 0.0, 1),
(4, 1, 2, 200.0, 2.0, 0.0, 1), (5, 2, 1, 266.6666666666667, 3.0, 0.0, 0),
(6, 2, 2, 133.33333333333334, 3.0, 0.0, 0), (7, 3, 1, 333.33333333333337, 4.0, 0.0, 1),
(8, 3, 2, 66.66666666666669, 4.0, 0.0, 1), (9, 4, 1, 266.6666666666667, 5.0, 0.0, 0),
(10, 4, 2, 53.33333333333335, 5.0, 0.0, 0)
], dtype=order_dt)
)
else:
record_arrays_close(
portfolio.order_records,
np.array([
(0, 0, 1, 100.0, 1.0, 0.0, 0), (1, 1, 1, 200.0, 2.0, 0.0, 1),
(2, 1, 0, 200.0, 2.0, 0.0, 1), (3, 2, 1, 266.6666666666667, 3.0, 0.0, 0),
(4, 3, 1, 333.33333333333337, 4.0, 0.0, 1), (5, 4, 1, 266.6666666666667, 5.0, 0.0, 0),
(6, 0, 2, 100.0, 1.0, 0.0, 0), (7, 1, 2, 200.0, 2.0, 0.0, 1),
(8, 2, 2, 133.33333333333334, 3.0, 0.0, 0), (9, 3, 2, 66.66666666666669, 4.0, 0.0, 1),
(10, 4, 2, 53.33333333333335, 5.0, 0.0, 0)
], dtype=order_dt)
)
np.testing.assert_array_equal(
portfolio.call_seq.values,
np.array([
[1, 0, 0],
[1, 0, 0],
[1, 0, 0],
[1, 0, 0],
[1, 0, 0]
])
)
portfolio = vbt.Portfolio.from_order_func(
price_wide, order_func_nb, np.inf, group_by=np.array([0, 0, 1]),
cash_sharing=True, call_seq='random', seed=seed, row_wise=test_row_wise)
if test_row_wise:
record_arrays_close(
portfolio.order_records,
np.array([
(0, 0, 1, 100.0, 1.0, 0.0, 0), (1, 0, 2, 100.0, 1.0, 0.0, 0),
(2, 1, 1, 200.0, 2.0, 0.0, 1), (3, 1, 2, 200.0, 2.0, 0.0, 1),
(4, 2, 1, 133.33333333333334, 3.0, 0.0, 0), (5, 2, 2, 133.33333333333334, 3.0, 0.0, 0),
(6, 3, 1, 66.66666666666669, 4.0, 0.0, 1), (7, 3, 0, 66.66666666666669, 4.0, 0.0, 1),
(8, 3, 2, 66.66666666666669, 4.0, 0.0, 1), (9, 4, 1, 106.6666666666667, 5.0, 0.0, 0),
(10, 4, 2, 53.33333333333335, 5.0, 0.0, 0)
], dtype=order_dt)
)
else:
record_arrays_close(
portfolio.order_records,
np.array([
(0, 0, 1, 100.0, 1.0, 0.0, 0), (1, 1, 1, 200.0, 2.0, 0.0, 1),
(2, 2, 1, 133.33333333333334, 3.0, 0.0, 0), (3, 3, 1, 66.66666666666669, 4.0, 0.0, 1),
(4, 3, 0, 66.66666666666669, 4.0, 0.0, 1), (5, 4, 1, 106.6666666666667, 5.0, 0.0, 0),
(6, 0, 2, 100.0, 1.0, 0.0, 0), (7, 1, 2, 200.0, 2.0, 0.0, 1),
(8, 2, 2, 133.33333333333334, 3.0, 0.0, 0), (9, 3, 2, 66.66666666666669, 4.0, 0.0, 1),
(10, 4, 2, 53.33333333333335, 5.0, 0.0, 0)
], dtype=order_dt)
)
np.testing.assert_array_equal(
portfolio.call_seq.values,
np.array([
[1, 0, 0],
[0, 1, 0],
[1, 0, 0],
[1, 0, 0],
[1, 0, 0]
])
)
with pytest.raises(Exception) as e_info:
_ = vbt.Portfolio.from_order_func(
price_wide, order_func_nb, np.inf, group_by=np.array([0, 0, 1]),
cash_sharing=True, call_seq='auto', row_wise=test_row_wise
)
target_hold_value = pd.DataFrame({
'a': [0., 70., 30., 0., 70.],
'b': [30., 0., 70., 30., 30.],
'c': [70., 30., 0., 70., 0.]
}, index=price.index)
@njit
def segment_prep_func_nb(sc, target_hold_value):
order_size = np.copy(target_hold_value[sc.i, sc.from_col:sc.to_col])
order_size_type = np.full(sc.group_len, SizeType.TargetValue)
direction = np.full(sc.group_len, Direction.All)
order_value_out = np.empty(sc.group_len, dtype=np.float_)
sc.last_val_price[sc.from_col:sc.to_col] = sc.close[sc.i, sc.from_col:sc.to_col]
nb.sort_call_seq_nb(sc, order_size, order_size_type, direction, order_value_out)
return order_size, order_size_type, direction
@njit
def pct_order_func_nb(oc, order_size, order_size_type, direction):
col_i = oc.call_seq_now[oc.call_idx]
return nb.create_order_nb(
size=order_size[col_i],
size_type=order_size_type[col_i],
price=oc.close[oc.i, col_i],
direction=direction[col_i]
)
portfolio = vbt.Portfolio.from_order_func(
price_wide * 0 + 1, pct_order_func_nb, group_by=np.array([0, 0, 0]),
cash_sharing=True, segment_prep_func_nb=segment_prep_func_nb,
segment_prep_args=(target_hold_value.values,), row_wise=test_row_wise)
np.testing.assert_array_equal(
portfolio.call_seq.values,
np.array([
[0, 1, 2],
[2, 1, 0],
[0, 2, 1],
[1, 0, 2],
[2, 1, 0]
])
)
pd.testing.assert_frame_equal(
portfolio.holding_value(group_by=False),
target_hold_value
)
@pytest.mark.parametrize(
"test_row_wise",
[False, True],
)
def test_target_value(self, test_row_wise):
@njit
def target_val_segment_prep_func_nb(sc, val_price):
sc.last_val_price[sc.from_col:sc.to_col] = val_price[sc.i]
return ()
@njit
def target_val_order_func_nb(oc):
return nb.create_order_nb(size=50., size_type=SizeType.TargetValue, price=oc.close[oc.i, oc.col])
portfolio = vbt.Portfolio.from_order_func(
price.iloc[1:], target_val_order_func_nb, row_wise=test_row_wise)
if test_row_wise:
record_arrays_close(
portfolio.order_records,
np.array([
(0, 1, 0, 25.0, 3.0, 0.0, 0), (1, 2, 0, 8.333333333333332, 4.0, 0.0, 1),
(2, 3, 0, 4.166666666666668, 5.0, 0.0, 1)
], dtype=order_dt)
)
else:
record_arrays_close(
portfolio.order_records,
np.array([
(0, 1, 0, 25.0, 3.0, 0.0, 0), (1, 2, 0, 8.333333333333332, 4.0, 0.0, 1),
(2, 3, 0, 4.166666666666668, 5.0, 0.0, 1)
], dtype=order_dt)
)
portfolio = vbt.Portfolio.from_order_func(
price.iloc[1:], target_val_order_func_nb,
segment_prep_func_nb=target_val_segment_prep_func_nb,
segment_prep_args=(price.iloc[:-1].values,), row_wise=test_row_wise)
if test_row_wise:
record_arrays_close(
portfolio.order_records,
np.array([
(0, 0, 0, 50.0, 2.0, 0.0, 0), (1, 1, 0, 25.0, 3.0, 0.0, 1),
(2, 2, 0, 8.333333333333332, 4.0, 0.0, 1), (3, 3, 0, 4.166666666666668, 5.0, 0.0, 1)
], dtype=order_dt)
)
else:
record_arrays_close(
portfolio.order_records,
np.array([
(0, 0, 0, 50.0, 2.0, 0.0, 0), (1, 1, 0, 25.0, 3.0, 0.0, 1),
(2, 2, 0, 8.333333333333332, 4.0, 0.0, 1), (3, 3, 0, 4.166666666666668, 5.0, 0.0, 1)
], dtype=order_dt)
)
@pytest.mark.parametrize(
"test_row_wise",
[False, True],
)
def test_target_percent(self, test_row_wise):
@njit
def target_pct_segment_prep_func_nb(sc, val_price):
sc.last_val_price[sc.from_col:sc.to_col] = val_price[sc.i]
return ()
@njit
def target_pct_order_func_nb(oc):
return nb.create_order_nb(size=0.5, size_type=SizeType.TargetPercent, price=oc.close[oc.i, oc.col])
portfolio = vbt.Portfolio.from_order_func(
price.iloc[1:], target_pct_order_func_nb, row_wise=test_row_wise)
if test_row_wise:
record_arrays_close(
portfolio.order_records,
np.array([
(0, 1, 0, 25.0, 3.0, 0.0, 0), (1, 2, 0, 8.333333333333332, 4.0, 0.0, 1),
(2, 3, 0, 1.0416666666666679, 5.0, 0.0, 1)
], dtype=order_dt)
)
else:
record_arrays_close(
portfolio.order_records,
np.array([
(0, 1, 0, 25.0, 3.0, 0.0, 0), (1, 2, 0, 8.333333333333332, 4.0, 0.0, 1),
(2, 3, 0, 1.0416666666666679, 5.0, 0.0, 1)
], dtype=order_dt)
)
portfolio = vbt.Portfolio.from_order_func(
price.iloc[1:], target_pct_order_func_nb,
segment_prep_func_nb=target_pct_segment_prep_func_nb,
segment_prep_args=(price.iloc[:-1].values,), row_wise=test_row_wise)
if test_row_wise:
record_arrays_close(
portfolio.order_records,
np.array([
(0, 0, 0, 50.0, 2.0, 0.0, 0), (1, 1, 0, 25.0, 3.0, 0.0, 1),
(2, 3, 0, 3.125, 5.0, 0.0, 1)
], dtype=order_dt)
)
else:
record_arrays_close(
portfolio.order_records,
np.array([
(0, 0, 0, 50.0, 2.0, 0.0, 0), (1, 1, 0, 25.0, 3.0, 0.0, 1),
(2, 3, 0, 3.125, 5.0, 0.0, 1)
], dtype=order_dt)
)
@pytest.mark.parametrize(
"test_row_wise",
[False, True],
)
def test_init_cash(self, test_row_wise):
portfolio = vbt.Portfolio.from_order_func(
price_wide, order_func_nb, 10., row_wise=test_row_wise, init_cash=[1., 10., np.inf])
if test_row_wise:
record_arrays_close(
portfolio.order_records,
np.array([
(0, 0, 0, 1.0, 1.0, 0.0, 0), (1, 0, 1, 10.0, 1.0, 0.0, 0),
(2, 0, 2, 10.0, 1.0, 0.0, 0), (3, 1, 0, 10.0, 2.0, 0.0, 1),
(4, 1, 1, 10.0, 2.0, 0.0, 1), (5, 1, 2, 10.0, 2.0, 0.0, 1),
(6, 2, 0, 6.666666666666667, 3.0, 0.0, 0), (7, 2, 1, 6.666666666666667, 3.0, 0.0, 0),
(8, 2, 2, 10.0, 3.0, 0.0, 0), (9, 3, 0, 10.0, 4.0, 0.0, 1),
(10, 3, 1, 10.0, 4.0, 0.0, 1), (11, 3, 2, 10.0, 4.0, 0.0, 1),
(12, 4, 0, 8.0, 5.0, 0.0, 0), (13, 4, 1, 8.0, 5.0, 0.0, 0),
(14, 4, 2, 10.0, 5.0, 0.0, 0)
], dtype=order_dt)
)
else:
record_arrays_close(
portfolio.order_records,
np.array([
(0, 0, 0, 1.0, 1.0, 0.0, 0), (1, 1, 0, 10.0, 2.0, 0.0, 1),
(2, 2, 0, 6.666666666666667, 3.0, 0.0, 0), (3, 3, 0, 10.0, 4.0, 0.0, 1),
(4, 4, 0, 8.0, 5.0, 0.0, 0), (5, 0, 1, 10.0, 1.0, 0.0, 0),
(6, 1, 1, 10.0, 2.0, 0.0, 1), (7, 2, 1, 6.666666666666667, 3.0, 0.0, 0),
(8, 3, 1, 10.0, 4.0, 0.0, 1), (9, 4, 1, 8.0, 5.0, 0.0, 0),
(10, 0, 2, 10.0, 1.0, 0.0, 0), (11, 1, 2, 10.0, 2.0, 0.0, 1),
(12, 2, 2, 10.0, 3.0, 0.0, 0), (13, 3, 2, 10.0, 4.0, 0.0, 1),
(14, 4, 2, 10.0, 5.0, 0.0, 0)
], dtype=order_dt)
)
assert type(portfolio._init_cash) == np.ndarray
base_portfolio = vbt.Portfolio.from_order_func(
price_wide, order_func_nb, 10., row_wise=test_row_wise, init_cash=np.inf)
portfolio = vbt.Portfolio.from_order_func(
price_wide, order_func_nb, 10., row_wise=test_row_wise, init_cash=InitCashMode.Auto)
record_arrays_close(
portfolio.order_records,
base_portfolio.orders.values
)
assert portfolio._init_cash == InitCashMode.Auto
portfolio = vbt.Portfolio.from_order_func(
price_wide, order_func_nb, 10., row_wise=test_row_wise, init_cash=InitCashMode.AutoAlign)
record_arrays_close(
portfolio.order_records,
base_portfolio.orders.values
)
assert portfolio._init_cash == InitCashMode.AutoAlign
def test_func_calls(self):
@njit
def prep_func_nb(simc, call_i, sim_lst):
call_i[0] += 1
sim_lst.append(call_i[0])
return (call_i,)
@njit
def group_prep_func_nb(gc, call_i, group_lst):
call_i[0] += 1
group_lst.append(call_i[0])
return (call_i,)
@njit
def segment_prep_func_nb(sc, call_i, segment_lst):
call_i[0] += 1
segment_lst.append(call_i[0])
return (call_i,)
@njit
def order_func_nb(oc, call_i, order_lst):
call_i[0] += 1
order_lst.append(call_i[0])
return NoOrder
call_i = np.array([0])
sim_lst = List.empty_list(typeof(0))
group_lst = List.empty_list(typeof(0))
segment_lst = List.empty_list(typeof(0))
order_lst = List.empty_list(typeof(0))
_ = vbt.Portfolio.from_order_func(
price_wide, order_func_nb, order_lst,
group_by=np.array([0, 0, 1]),
prep_func_nb=prep_func_nb, prep_args=(call_i, sim_lst),
group_prep_func_nb=group_prep_func_nb, group_prep_args=(group_lst,),
segment_prep_func_nb=segment_prep_func_nb, segment_prep_args=(segment_lst,)
)
assert call_i[0] == 28
assert list(sim_lst) == [1]
assert list(group_lst) == [2, 18]
assert list(segment_lst) == [3, 6, 9, 12, 15, 19, 21, 23, 25, 27]
assert list(order_lst) == [4, 5, 7, 8, 10, 11, 13, 14, 16, 17, 20, 22, 24, 26, 28]
call_i = np.array([0])
sim_lst = List.empty_list(typeof(0))
group_lst = List.empty_list(typeof(0))
segment_lst = List.empty_list(typeof(0))
order_lst = List.empty_list(typeof(0))
active_mask = np.array([
[False, True],
[False, False],
[False, True],
[False, False],
[False, True],
])
_ = vbt.Portfolio.from_order_func(
price_wide, order_func_nb, order_lst,
group_by=np.array([0, 0, 1]),
prep_func_nb=prep_func_nb, prep_args=(call_i, sim_lst),
group_prep_func_nb=group_prep_func_nb, group_prep_args=(group_lst,),
segment_prep_func_nb=segment_prep_func_nb, segment_prep_args=(segment_lst,),
active_mask=active_mask
)
assert call_i[0] == 8
assert list(sim_lst) == [1]
assert list(group_lst) == [2]
assert list(segment_lst) == [3, 5, 7]
assert list(order_lst) == [4, 6, 8]
def test_func_calls_row_wise(self):
@njit
def prep_func_nb(simc, call_i, sim_lst):
call_i[0] += 1
sim_lst.append(call_i[0])
return (call_i,)
@njit
def row_prep_func_nb(gc, call_i, row_lst):
call_i[0] += 1
row_lst.append(call_i[0])
return (call_i,)
@njit
def segment_prep_func_nb(sc, call_i, segment_lst):
call_i[0] += 1
segment_lst.append(call_i[0])
return (call_i,)
@njit
def order_func_nb(oc, call_i, order_lst):
call_i[0] += 1
order_lst.append(call_i[0])
return NoOrder
call_i = np.array([0])
sim_lst = List.empty_list(typeof(0))
row_lst = List.empty_list(typeof(0))
segment_lst = List.empty_list(typeof(0))
order_lst = List.empty_list(typeof(0))
_ = vbt.Portfolio.from_order_func(
price_wide, order_func_nb, order_lst,
group_by=np.array([0, 0, 1]),
prep_func_nb=prep_func_nb, prep_args=(call_i, sim_lst),
row_prep_func_nb=row_prep_func_nb, row_prep_args=(row_lst,),
segment_prep_func_nb=segment_prep_func_nb, segment_prep_args=(segment_lst,),
row_wise=True
)
assert call_i[0] == 31
assert list(sim_lst) == [1]
assert list(row_lst) == [2, 8, 14, 20, 26]
assert list(segment_lst) == [3, 6, 9, 12, 15, 18, 21, 24, 27, 30]
assert list(order_lst) == [4, 5, 7, 10, 11, 13, 16, 17, 19, 22, 23, 25, 28, 29, 31]
call_i = np.array([0])
sim_lst = List.empty_list(typeof(0))
row_lst = List.empty_list(typeof(0))
segment_lst = List.empty_list(typeof(0))
order_lst = List.empty_list(typeof(0))
active_mask = np.array([
[False, False],
[False, True],
[True, False],
[True, True],
[False, False],
])
_ = vbt.Portfolio.from_order_func(
price_wide, order_func_nb, order_lst,
group_by=np.array([0, 0, 1]),
prep_func_nb=prep_func_nb, prep_args=(call_i, sim_lst),
row_prep_func_nb=row_prep_func_nb, row_prep_args=(row_lst,),
segment_prep_func_nb=segment_prep_func_nb, segment_prep_args=(segment_lst,),
active_mask=active_mask,
row_wise=True
)
assert call_i[0] == 14
assert list(sim_lst) == [1]
assert list(row_lst) == [2, 5, 9]
assert list(segment_lst) == [3, 6, 10, 13]
assert list(order_lst) == [4, 7, 8, 11, 12, 14]
@pytest.mark.parametrize(
"test_row_wise",
[False, True],
)
def test_max_orders(self, test_row_wise):
_ = vbt.Portfolio.from_order_func(
price_wide, order_func_nb, np.inf, row_wise=test_row_wise)
_ = vbt.Portfolio.from_order_func(
price_wide, order_func_nb, np.inf, row_wise=test_row_wise, max_orders=15)
with pytest.raises(Exception) as e_info:
_ = vbt.Portfolio.from_order_func(
price_wide, order_func_nb, np.inf, row_wise=test_row_wise, max_orders=14)
@pytest.mark.parametrize(
"test_row_wise",
[False, True],
)
def test_max_logs(self, test_row_wise):
_ = vbt.Portfolio.from_order_func(
price_wide, log_order_func_nb, np.inf, row_wise=test_row_wise)
_ = vbt.Portfolio.from_order_func(
price_wide, log_order_func_nb, np.inf, row_wise=test_row_wise, max_logs=15)
with pytest.raises(Exception) as e_info:
_ = vbt.Portfolio.from_order_func(
price_wide, log_order_func_nb, np.inf, row_wise=test_row_wise, max_logs=14)
# ############# Portfolio ############# #
price_na = pd.DataFrame({
'a': [np.nan, 2., 3., 4., 5.],
'b': [1., 2., np.nan, 4., 5.],
'c': [1., 2., 3., 4., np.nan]
}, index=price.index)
order_size_new = pd.Series([1., 0.1, -1., -0.1, 1.])
directions = ['longonly', 'shortonly', 'all']
group_by = pd.Index(['first', 'first', 'second'], name='group')
portfolio = vbt.Portfolio.from_orders(
price_na, order_size_new, size_type='shares', direction=directions,
fees=0.01, fixed_fees=0.1, slippage=0.01, log=True,
call_seq='reversed', group_by=None,
init_cash=[100., 100., 100.], freq='1D'
) # independent
portfolio_grouped = vbt.Portfolio.from_orders(
price_na, order_size_new, size_type='shares', direction=directions,
fees=0.01, fixed_fees=0.1, slippage=0.01, log=True,
call_seq='reversed', group_by=group_by, cash_sharing=False,
init_cash=[100., 100., 100.], freq='1D'
) # grouped
portfolio_shared = vbt.Portfolio.from_orders(
price_na, order_size_new, size_type='shares', direction=directions,
fees=0.01, fixed_fees=0.1, slippage=0.01, log=True,
call_seq='reversed', group_by=group_by, cash_sharing=True,
init_cash=[200., 100.], freq='1D'
) # shared
class TestPortfolio:
def test_config(self, tmp_path):
assert vbt.Portfolio.loads(portfolio['a'].dumps()) == portfolio['a']
assert vbt.Portfolio.loads(portfolio.dumps()) == portfolio
portfolio.save(tmp_path / 'portfolio')
assert vbt.Portfolio.load(tmp_path / 'portfolio') == portfolio
def test_wrapper(self):
pd.testing.assert_index_equal(
portfolio.wrapper.index,
price_na.index
)
pd.testing.assert_index_equal(
portfolio.wrapper.columns,
price_na.columns
)
assert portfolio.wrapper.ndim == 2
assert portfolio.wrapper.grouper.group_by is None
assert portfolio.wrapper.grouper.allow_enable
assert portfolio.wrapper.grouper.allow_disable
assert portfolio.wrapper.grouper.allow_modify
pd.testing.assert_index_equal(
portfolio_grouped.wrapper.index,
price_na.index
)
pd.testing.assert_index_equal(
portfolio_grouped.wrapper.columns,
price_na.columns
)
assert portfolio_grouped.wrapper.ndim == 2
pd.testing.assert_index_equal(
portfolio_grouped.wrapper.grouper.group_by,
group_by
)
assert portfolio_grouped.wrapper.grouper.allow_enable
assert portfolio_grouped.wrapper.grouper.allow_disable
assert portfolio_grouped.wrapper.grouper.allow_modify
pd.testing.assert_index_equal(
portfolio_shared.wrapper.index,
price_na.index
)
pd.testing.assert_index_equal(
portfolio_shared.wrapper.columns,
price_na.columns
)
assert portfolio_shared.wrapper.ndim == 2
pd.testing.assert_index_equal(
portfolio_shared.wrapper.grouper.group_by,
group_by
)
assert not portfolio_shared.wrapper.grouper.allow_enable
assert portfolio_shared.wrapper.grouper.allow_disable
assert not portfolio_shared.wrapper.grouper.allow_modify
def test_indexing(self):
assert portfolio['a'].wrapper == portfolio.wrapper['a']
assert portfolio['a'].orders == portfolio.orders['a']
assert portfolio['a'].logs == portfolio.logs['a']
assert portfolio['a'].init_cash == portfolio.init_cash['a']
pd.testing.assert_series_equal(portfolio['a'].call_seq, portfolio.call_seq['a'])
assert portfolio['c'].wrapper == portfolio.wrapper['c']
assert portfolio['c'].orders == portfolio.orders['c']
assert portfolio['c'].logs == portfolio.logs['c']
assert portfolio['c'].init_cash == portfolio.init_cash['c']
pd.testing.assert_series_equal(portfolio['c'].call_seq, portfolio.call_seq['c'])
assert portfolio[['c']].wrapper == portfolio.wrapper[['c']]
assert portfolio[['c']].orders == portfolio.orders[['c']]
assert portfolio[['c']].logs == portfolio.logs[['c']]
pd.testing.assert_series_equal(portfolio[['c']].init_cash, portfolio.init_cash[['c']])
pd.testing.assert_frame_equal(portfolio[['c']].call_seq, portfolio.call_seq[['c']])
assert portfolio_grouped['first'].wrapper == portfolio_grouped.wrapper['first']
assert portfolio_grouped['first'].orders == portfolio_grouped.orders['first']
assert portfolio_grouped['first'].logs == portfolio_grouped.logs['first']
assert portfolio_grouped['first'].init_cash == portfolio_grouped.init_cash['first']
pd.testing.assert_frame_equal(portfolio_grouped['first'].call_seq, portfolio_grouped.call_seq[['a', 'b']])
assert portfolio_grouped[['first']].wrapper == portfolio_grouped.wrapper[['first']]
assert portfolio_grouped[['first']].orders == portfolio_grouped.orders[['first']]
assert portfolio_grouped[['first']].logs == portfolio_grouped.logs[['first']]
pd.testing.assert_series_equal(
portfolio_grouped[['first']].init_cash,
portfolio_grouped.init_cash[['first']])
pd.testing.assert_frame_equal(portfolio_grouped[['first']].call_seq, portfolio_grouped.call_seq[['a', 'b']])
assert portfolio_grouped['second'].wrapper == portfolio_grouped.wrapper['second']
assert portfolio_grouped['second'].orders == portfolio_grouped.orders['second']
assert portfolio_grouped['second'].logs == portfolio_grouped.logs['second']
assert portfolio_grouped['second'].init_cash == portfolio_grouped.init_cash['second']
pd.testing.assert_series_equal(portfolio_grouped['second'].call_seq, portfolio_grouped.call_seq['c'])
assert portfolio_grouped[['second']].orders == portfolio_grouped.orders[['second']]
assert portfolio_grouped[['second']].wrapper == portfolio_grouped.wrapper[['second']]
assert portfolio_grouped[['second']].orders == portfolio_grouped.orders[['second']]
assert portfolio_grouped[['second']].logs == portfolio_grouped.logs[['second']]
pd.testing.assert_series_equal(
portfolio_grouped[['second']].init_cash,
portfolio_grouped.init_cash[['second']])
pd.testing.assert_frame_equal(portfolio_grouped[['second']].call_seq, portfolio_grouped.call_seq[['c']])
assert portfolio_shared['first'].wrapper == portfolio_shared.wrapper['first']
assert portfolio_shared['first'].orders == portfolio_shared.orders['first']
assert portfolio_shared['first'].logs == portfolio_shared.logs['first']
assert portfolio_shared['first'].init_cash == portfolio_shared.init_cash['first']
pd.testing.assert_frame_equal(portfolio_shared['first'].call_seq, portfolio_shared.call_seq[['a', 'b']])
assert portfolio_shared[['first']].orders == portfolio_shared.orders[['first']]
assert portfolio_shared[['first']].wrapper == portfolio_shared.wrapper[['first']]
assert portfolio_shared[['first']].orders == portfolio_shared.orders[['first']]
assert portfolio_shared[['first']].logs == portfolio_shared.logs[['first']]
pd.testing.assert_series_equal(
portfolio_shared[['first']].init_cash,
portfolio_shared.init_cash[['first']])
pd.testing.assert_frame_equal(portfolio_shared[['first']].call_seq, portfolio_shared.call_seq[['a', 'b']])
assert portfolio_shared['second'].wrapper == portfolio_shared.wrapper['second']
assert portfolio_shared['second'].orders == portfolio_shared.orders['second']
assert portfolio_shared['second'].logs == portfolio_shared.logs['second']
assert portfolio_shared['second'].init_cash == portfolio_shared.init_cash['second']
pd.testing.assert_series_equal(portfolio_shared['second'].call_seq, portfolio_shared.call_seq['c'])
assert portfolio_shared[['second']].wrapper == portfolio_shared.wrapper[['second']]
assert portfolio_shared[['second']].orders == portfolio_shared.orders[['second']]
assert portfolio_shared[['second']].logs == portfolio_shared.logs[['second']]
pd.testing.assert_series_equal(
portfolio_shared[['second']].init_cash,
portfolio_shared.init_cash[['second']])
pd.testing.assert_frame_equal(portfolio_shared[['second']].call_seq, portfolio_shared.call_seq[['c']])
def test_regroup(self):
assert portfolio.regroup(None) == portfolio
assert portfolio.regroup(False) == portfolio
assert portfolio.regroup(group_by) != portfolio
pd.testing.assert_index_equal(portfolio.regroup(group_by).wrapper.grouper.group_by, group_by)
assert portfolio_grouped.regroup(None) == portfolio_grouped
assert portfolio_grouped.regroup(False) != portfolio_grouped
assert portfolio_grouped.regroup(False).wrapper.grouper.group_by is None
assert portfolio_grouped.regroup(group_by) == portfolio_grouped
assert portfolio_shared.regroup(None) == portfolio_shared
with pytest.raises(Exception) as e_info:
_ = portfolio_shared.regroup(False)
assert portfolio_shared.regroup(group_by) == portfolio_shared
def test_cash_sharing(self):
assert not portfolio.cash_sharing
assert not portfolio_grouped.cash_sharing
assert portfolio_shared.cash_sharing
def test_call_seq(self):
pd.testing.assert_frame_equal(
portfolio.call_seq,
pd.DataFrame(
np.array([
[0, 0, 0],
[0, 0, 0],
[0, 0, 0],
[0, 0, 0],
[0, 0, 0]
]),
index=price_na.index,
columns=price_na.columns
)
)
pd.testing.assert_frame_equal(
portfolio_grouped.call_seq,
pd.DataFrame(
np.array([
[1, 0, 0],
[1, 0, 0],
[1, 0, 0],
[1, 0, 0],
[1, 0, 0]
]),
index=price_na.index,
columns=price_na.columns
)
)
pd.testing.assert_frame_equal(
portfolio_shared.call_seq,
pd.DataFrame(
np.array([
[1, 0, 0],
[1, 0, 0],
[1, 0, 0],
[1, 0, 0],
[1, 0, 0]
]),
index=price_na.index,
columns=price_na.columns
)
)
def test_incl_unrealized(self):
assert not vbt.Portfolio.from_orders(price_na, 1000., incl_unrealized=False).incl_unrealized
assert vbt.Portfolio.from_orders(price_na, 1000., incl_unrealized=True).incl_unrealized
def test_orders(self):
record_arrays_close(
portfolio.orders.values,
np.array([
(0, 1, 0, 0.1, 2.02, 0.10202, 0), (1, 2, 0, 0.1, 2.9699999999999998, 0.10297, 1),
(2, 4, 0, 1.0, 5.05, 0.1505, 0), (3, 0, 1, 1.0, 0.99, 0.10990000000000001, 1),
(4, 1, 1, 0.1, 1.98, 0.10198, 1), (5, 3, 1, 0.1, 4.04, 0.10404000000000001, 0),
(6, 4, 1, 1.0, 4.95, 0.14950000000000002, 1), (7, 0, 2, 1.0, 1.01, 0.1101, 0),
(8, 1, 2, 0.1, 2.02, 0.10202, 0), (9, 2, 2, 1.0, 2.9699999999999998, 0.1297, 1),
(10, 3, 2, 0.1, 3.96, 0.10396000000000001, 1)
], dtype=order_dt)
)
result = pd.Series(
np.array([3, 4, 4]),
index=price_na.columns
).rename('count')
pd.testing.assert_series_equal(
portfolio.orders.count(),
result
)
pd.testing.assert_series_equal(
portfolio_grouped.get_orders(group_by=False).count(),
result
)
pd.testing.assert_series_equal(
portfolio_shared.get_orders(group_by=False).count(),
result
)
result = pd.Series(
np.array([7, 4]),
index=pd.Index(['first', 'second'], dtype='object', name='group')
).rename('count')
pd.testing.assert_series_equal(
portfolio.get_orders(group_by=group_by).count(),
result
)
pd.testing.assert_series_equal(
portfolio_grouped.orders.count(),
result
)
pd.testing.assert_series_equal(
portfolio_shared.orders.count(),
result
)
def test_logs(self):
record_arrays_close(
portfolio.logs.values,
np.array([
(0, 0, 0, 0, 100.0, 0.0, np.nan, 100.0, 1.0, 0, 0, np.nan, 0.01, 0.1, 0.01, 1e-08, np.inf,
0.0, True, False, True, 100.0, 0.0, np.nan, np.nan, np.nan, -1, 1, 1, -1),
(1, 1, 0, 0, 100.0, 0.0, 2.0, 100.0, 0.1, 0, 0, 2.0, 0.01, 0.1, 0.01, 1e-08, np.inf,
0.0, True, False, True, 99.69598, 0.1, 0.1, 2.02, 0.10202, 0, 0, -1, 0),
(2, 2, 0, 0, 99.69598, 0.1, 3.0, 99.99598, -1.0, 0, 0, 3.0, 0.01, 0.1, 0.01, 1e-08, np.inf,
0.0, True, False, True, 99.89001, 0.0, 0.1, 2.9699999999999998, 0.10297, 1, 0, -1, 1),
(3, 3, 0, 0, 99.89001, 0.0, 4.0, 99.89001, -0.1, 0, 0, 4.0, 0.01, 0.1, 0.01, 1e-08, np.inf,
0.0, True, False, True, 99.89001, 0.0, np.nan, np.nan, np.nan, -1, 2, 8, -1),
(4, 4, 0, 0, 99.89001, 0.0, 5.0, 99.89001, 1.0, 0, 0, 5.0, 0.01, 0.1, 0.01, 1e-08, np.inf,
0.0, True, False, True, 94.68951, 1.0, 1.0, 5.05, 0.1505, 0, 0, -1, 2),
(5, 0, 1, 1, 100.0, 0.0, 1.0, 100.0, 1.0, 0, 1, 1.0, 0.01, 0.1, 0.01, 1e-08, np.inf,
0.0, True, False, True, 100.8801, -1.0, 1.0, 0.99, 0.10990000000000001, 1, 0, -1, 3),
(6, 1, 1, 1, 100.8801, -1.0, 2.0, 98.8801, 0.1, 0, 1, 2.0, 0.01, 0.1, 0.01, 1e-08, np.inf,
0.0, True, False, True, 100.97612, -1.1, 0.1, 1.98, 0.10198, 1, 0, -1, 4),
(7, 2, 1, 1, 100.97612, -1.1, np.nan, np.nan, -1.0, 0, 1, np.nan, 0.01, 0.1, 0.01, 1e-08, np.inf,
0.0, True, False, True, 100.97612, -1.1, np.nan, np.nan, np.nan, -1, 1, 1, -1),
(8, 3, 1, 1, 100.97612, -1.1, 4.0, 96.57611999999999, -0.1, 0, 1, 4.0, 0.01, 0.1, 0.01, 1e-08, np.inf,
0.0, True, False, True, 100.46808, -1.0, 0.1, 4.04, 0.10404000000000001, 0, 0, -1, 5),
(9, 4, 1, 1, 100.46808, -1.0, 5.0, 95.46808, 1.0, 0, 1, 5.0, 0.01, 0.1, 0.01, 1e-08, np.inf,
0.0, True, False, True, 105.26858, -2.0, 1.0, 4.95, 0.14950000000000002, 1, 0, -1, 6),
(10, 0, 2, 2, 100.0, 0.0, 1.0, 100.0, 1.0, 0, 2, 1.0, 0.01, 0.1, 0.01, 1e-08, np.inf,
0.0, True, False, True, 98.8799, 1.0, 1.0, 1.01, 0.1101, 0, 0, -1, 7),
(11, 1, 2, 2, 98.8799, 1.0, 2.0, 100.8799, 0.1, 0, 2, 2.0, 0.01, 0.1, 0.01, 1e-08, np.inf,
0.0, True, False, True, 98.57588000000001, 1.1, 0.1, 2.02, 0.10202, 0, 0, -1, 8),
(12, 2, 2, 2, 98.57588000000001, 1.1, 3.0, 101.87588000000001, -1.0, 0, 2, 3.0,
0.01, 0.1, 0.01, 1e-08, np.inf, 0.0, True, False, True, 101.41618000000001,
0.10000000000000009, 1.0, 2.9699999999999998, 0.1297, 1, 0, -1, 9),
(13, 3, 2, 2, 101.41618000000001, 0.10000000000000009, 4.0, 101.81618000000002,
-0.1, 0, 2, 4.0, 0.01, 0.1, 0.01, 1e-08, np.inf, 0.0, True, False, True,
101.70822000000001, 0.0, 0.1, 3.96, 0.10396000000000001, 1, 0, -1, 10),
(14, 4, 2, 2, 101.70822000000001, 0.0, np.nan, 101.70822000000001, 1.0, 0, 2, np.nan, 0.01, 0.1, 0.01,
1e-08, np.inf, 0.0, True, False, True, 101.70822000000001, 0.0, np.nan, np.nan, np.nan, -1, 1, 1, -1)
], dtype=log_dt)
)
result = pd.Series(
np.array([5, 5, 5]),
index=price_na.columns
).rename('count')
pd.testing.assert_series_equal(
portfolio.logs.count(),
result
)
pd.testing.assert_series_equal(
portfolio_grouped.get_logs(group_by=False).count(),
result
)
pd.testing.assert_series_equal(
portfolio_shared.get_logs(group_by=False).count(),
result
)
result = pd.Series(
np.array([10, 5]),
index=pd.Index(['first', 'second'], dtype='object', name='group')
).rename('count')
pd.testing.assert_series_equal(
portfolio.get_logs(group_by=group_by).count(),
result
)
pd.testing.assert_series_equal(
portfolio_grouped.logs.count(),
result
)
pd.testing.assert_series_equal(
portfolio_shared.logs.count(),
result
)
def test_trades(self):
record_arrays_close(
portfolio.trades.values,
np.array([
(0, 0, 0.1, 1, 2.02, 0.10202, 2, 2.9699999999999998, 0.10297,
-0.10999000000000003, -0.5445049504950497, 0, 1, 0),
(1, 0, 1.0, 4, 5.05, 0.1505, 4, 5.0, 0.0,
-0.20049999999999982, -0.03970297029702967, 0, 0, 1),
(2, 1, 0.1, 0, 1.0799999999999998, 0.019261818181818182,
3, 4.04, 0.10404000000000001, -0.4193018181818182, -3.882424242424243, 1, 1, 2),
(3, 1, 2.0, 0, 3.015, 0.3421181818181819, 4, 5.0, 0.0,
-4.312118181818182, -0.7151108095884214, 1, 0, 2),
(4, 2, 1.0, 0, 1.1018181818181818, 0.19283636363636364, 2,
2.9699999999999998, 0.1297, 1.5456454545454543, 1.4028135313531351, 0, 1, 3),
(5, 2, 0.10000000000000009, 0, 1.1018181818181818, 0.019283636363636378,
3, 3.96, 0.10396000000000001, 0.1625745454545457, 1.4755115511551162, 0, 1, 3)
], dtype=trade_dt)
)
result = pd.Series(
np.array([2, 2, 2]),
index=price_na.columns
).rename('count')
pd.testing.assert_series_equal(
portfolio.trades.count(),
result
)
pd.testing.assert_series_equal(
portfolio_grouped.get_trades(group_by=False).count(),
result
)
pd.testing.assert_series_equal(
portfolio_shared.get_trades(group_by=False).count(),
result
)
result = pd.Series(
np.array([4, 2]),
index=pd.Index(['first', 'second'], dtype='object', name='group')
).rename('count')
pd.testing.assert_series_equal(
portfolio.get_trades(group_by=group_by).count(),
result
)
pd.testing.assert_series_equal(
portfolio_grouped.trades.count(),
result
)
pd.testing.assert_series_equal(
portfolio_shared.trades.count(),
result
)
def test_positions(self):
record_arrays_close(
portfolio.positions.values,
np.array([
(0, 0, 0.1, 1, 2.02, 0.10202, 2, 2.9699999999999998,
0.10297, -0.10999000000000003, -0.5445049504950497, 0, 1),
(1, 0, 1.0, 4, 5.05, 0.1505, 4, 5.0, 0.0,
-0.20049999999999982, -0.03970297029702967, 0, 0),
(2, 1, 2.1, 0, 2.9228571428571426, 0.36138000000000003, 4, 4.954285714285714,
0.10404000000000001, -4.731420000000001, -0.7708406647116326, 1, 0),
(3, 2, 1.1, 0, 1.1018181818181818, 0.21212000000000003, 3,
3.06, 0.23366000000000003, 1.7082200000000003, 1.4094224422442245, 0, 1)
], dtype=position_dt)
)
result = pd.Series(
np.array([2, 1, 1]),
index=price_na.columns
).rename('count')
pd.testing.assert_series_equal(
portfolio.positions.count(),
result
)
pd.testing.assert_series_equal(
portfolio_grouped.get_positions(group_by=False).count(),
result
)
pd.testing.assert_series_equal(
portfolio_shared.get_positions(group_by=False).count(),
result
)
result = pd.Series(
np.array([3, 1]),
index=pd.Index(['first', 'second'], dtype='object', name='group')
).rename('count')
pd.testing.assert_series_equal(
portfolio.get_positions(group_by=group_by).count(),
result
)
pd.testing.assert_series_equal(
portfolio_grouped.positions.count(),
result
)
pd.testing.assert_series_equal(
portfolio_shared.positions.count(),
result
)
def test_drawdowns(self):
record_arrays_close(
portfolio.drawdowns.values,
np.array([
(0, 0, 0, 4, 4, 0), (1, 1, 0, 4, 4, 0), (2, 2, 2, 3, 4, 0)
], dtype=drawdown_dt)
)
result = pd.Series(
np.array([1, 1, 1]),
index=price_na.columns
).rename('count')
pd.testing.assert_series_equal(
portfolio.drawdowns.count(),
result
)
pd.testing.assert_series_equal(
portfolio_grouped.get_drawdowns(group_by=False).count(),
result
)
pd.testing.assert_series_equal(
portfolio_shared.get_drawdowns(group_by=False).count(),
result
)
result = pd.Series(
np.array([1, 1]),
index=pd.Index(['first', 'second'], dtype='object', name='group')
).rename('count')
pd.testing.assert_series_equal(
portfolio.get_drawdowns(group_by=group_by).count(),
result
)
pd.testing.assert_series_equal(
portfolio_grouped.drawdowns.count(),
result
)
pd.testing.assert_series_equal(
portfolio_shared.drawdowns.count(),
result
)
def test_close(self):
pd.testing.assert_frame_equal(portfolio.close, price_na)
pd.testing.assert_frame_equal(portfolio_grouped.close, price_na)
pd.testing.assert_frame_equal(portfolio_shared.close, price_na)
def test_fill_close(self):
pd.testing.assert_frame_equal(
portfolio.fill_close(ffill=False, bfill=False),
price_na
)
pd.testing.assert_frame_equal(
portfolio.fill_close(ffill=True, bfill=False),
price_na.ffill()
)
pd.testing.assert_frame_equal(
portfolio.fill_close(ffill=False, bfill=True),
price_na.bfill()
)
pd.testing.assert_frame_equal(
portfolio.fill_close(ffill=True, bfill=True),
price_na.ffill().bfill()
)
def test_share_flow(self):
pd.testing.assert_frame_equal(
portfolio.share_flow(direction='longonly'),
pd.DataFrame(
np.array([
[0., 0., 1.],
[0.1, 0., 0.1],
[-0.1, 0., -1.],
[0., 0., -0.1],
[1., 0., 0.]
]),
index=price_na.index,
columns=price_na.columns
)
)
pd.testing.assert_frame_equal(
portfolio.share_flow(direction='shortonly'),
pd.DataFrame(
np.array([
[0., 1., 0.],
[0., 0.1, 0.],
[0., 0., 0.],
[0., -0.1, 0.],
[0., 1., 0.]
]),
index=price_na.index,
columns=price_na.columns
)
)
result = pd.DataFrame(
np.array([
[0., -1., 1.],
[0.1, -0.1, 0.1],
[-0.1, 0., -1.],
[0., 0.1, -0.1],
[1., -1., 0.]
]),
index=price_na.index,
columns=price_na.columns
)
pd.testing.assert_frame_equal(
portfolio.share_flow(),
result
)
pd.testing.assert_frame_equal(
portfolio_grouped.share_flow(),
result
)
pd.testing.assert_frame_equal(
portfolio_shared.share_flow(),
result
)
def test_shares(self):
pd.testing.assert_frame_equal(
portfolio.shares(direction='longonly'),
pd.DataFrame(
np.array([
[0., 0., 1.],
[0.1, 0., 1.1],
[0., 0., 0.1],
[0., 0., 0.],
[1., 0., 0.]
]),
index=price_na.index,
columns=price_na.columns
)
)
pd.testing.assert_frame_equal(
portfolio.shares(direction='shortonly'),
pd.DataFrame(
np.array([
[0., 1., 0.],
[0., 1.1, 0.],
[0., 1.1, 0.],
[0., 1., 0.],
[0., 2., 0.]
]),
index=price_na.index,
columns=price_na.columns
)
)
result = pd.DataFrame(
np.array([
[0., -1., 1.],
[0.1, -1.1, 1.1],
[0., -1.1, 0.1],
[0., -1., 0.],
[1., -2., 0.]
]),
index=price_na.index,
columns=price_na.columns
)
pd.testing.assert_frame_equal(
portfolio.shares(),
result
)
pd.testing.assert_frame_equal(
portfolio_grouped.shares(),
result
)
pd.testing.assert_frame_equal(
portfolio_shared.shares(),
result
)
def test_pos_mask(self):
pd.testing.assert_frame_equal(
portfolio.pos_mask(direction='longonly'),
pd.DataFrame(
np.array([
[False, False, True],
[True, False, True],
[False, False, True],
[False, False, False],
[True, False, False]
]),
index=price_na.index,
columns=price_na.columns
)
)
pd.testing.assert_frame_equal(
portfolio.pos_mask(direction='shortonly'),
pd.DataFrame(
np.array([
[False, True, False],
[False, True, False],
[False, True, False],
[False, True, False],
[False, True, False]
]),
index=price_na.index,
columns=price_na.columns
)
)
result = pd.DataFrame(
np.array([
[False, True, True],
[True, True, True],
[False, True, True],
[False, True, False],
[True, True, False]
]),
index=price_na.index,
columns=price_na.columns
)
pd.testing.assert_frame_equal(
portfolio.pos_mask(),
result
)
pd.testing.assert_frame_equal(
portfolio_grouped.pos_mask(group_by=False),
result
)
pd.testing.assert_frame_equal(
portfolio_shared.pos_mask(group_by=False),
result
)
result = pd.DataFrame(
np.array([
[True, True],
[True, True],
[True, True],
[True, False],
[True, False]
]),
index=price_na.index,
columns=pd.Index(['first', 'second'], dtype='object', name='group')
)
pd.testing.assert_frame_equal(
portfolio.pos_mask(group_by=group_by),
result
)
pd.testing.assert_frame_equal(
portfolio_grouped.pos_mask(),
result
)
pd.testing.assert_frame_equal(
portfolio_shared.pos_mask(),
result
)
def test_pos_coverage(self):
pd.testing.assert_series_equal(
portfolio.pos_coverage(direction='longonly'),
pd.Series(np.array([0.4, 0., 0.6]), index=price_na.columns).rename('pos_coverage')
)
pd.testing.assert_series_equal(
portfolio.pos_coverage(direction='shortonly'),
pd.Series(np.array([0., 1., 0.]), index=price_na.columns).rename('pos_coverage')
)
result = pd.Series(np.array([0.4, 1., 0.6]), index=price_na.columns).rename('pos_coverage')
pd.testing.assert_series_equal(
portfolio.pos_coverage(),
result
)
pd.testing.assert_series_equal(
portfolio_grouped.pos_coverage(group_by=False),
result
)
pd.testing.assert_series_equal(
portfolio_shared.pos_coverage(group_by=False),
result
)
result = pd.Series(
np.array([0.7, 0.6]),
pd.Index(['first', 'second'], dtype='object', name='group')
).rename('pos_coverage')
pd.testing.assert_series_equal(
portfolio.pos_coverage(group_by=group_by),
result
)
pd.testing.assert_series_equal(
portfolio_grouped.pos_coverage(),
result
)
pd.testing.assert_series_equal(
portfolio_shared.pos_coverage(),
result
)
def test_cash_flow(self):
pd.testing.assert_frame_equal(
portfolio.cash_flow(short_cash=False),
pd.DataFrame(
np.array([
[0., -1.0999, -1.1201],
[-0.30402, -0.29998, -0.30402],
[0.19403, 0., 2.8403],
[0., 0.29996, 0.29204],
[-5.2005, -5.0995, 0.]
]),
index=price_na.index,
columns=price_na.columns
)
)
result = pd.DataFrame(
np.array([
[0., 0.8801, -1.1201],
[-0.30402, 0.09602, -0.30402],
[0.19403, 0., 2.8403],
[0., -0.50804, 0.29204],
[-5.2005, 4.8005, 0.]
]),
index=price_na.index,
columns=price_na.columns
)
pd.testing.assert_frame_equal(
portfolio.cash_flow(),
result
)
pd.testing.assert_frame_equal(
portfolio_grouped.cash_flow(group_by=False),
result
)
pd.testing.assert_frame_equal(
portfolio_shared.cash_flow(group_by=False),
result
)
result = pd.DataFrame(
np.array([
[0.8801, -1.1201],
[-0.208, -0.30402],
[0.19403, 2.8403],
[-0.50804, 0.29204],
[-0.4, 0.]
]),
index=price_na.index,
columns=pd.Index(['first', 'second'], dtype='object', name='group')
)
pd.testing.assert_frame_equal(
portfolio.cash_flow(group_by=group_by),
result
)
pd.testing.assert_frame_equal(
portfolio_grouped.cash_flow(),
result
)
pd.testing.assert_frame_equal(
portfolio_shared.cash_flow(),
result
)
def test_init_cash(self):
pd.testing.assert_series_equal(
portfolio.init_cash,
pd.Series(np.array([100., 100., 100.]), index=price_na.columns).rename('init_cash')
)
pd.testing.assert_series_equal(
portfolio_grouped.get_init_cash(group_by=False),
pd.Series(np.array([100., 100., 100.]), index=price_na.columns).rename('init_cash')
)
pd.testing.assert_series_equal(
portfolio_shared.get_init_cash(group_by=False),
pd.Series(np.array([200., 200., 100.]), index=price_na.columns).rename('init_cash')
)
result = pd.Series(
np.array([200., 100.]),
pd.Index(['first', 'second'], dtype='object', name='group')
).rename('init_cash')
pd.testing.assert_series_equal(
portfolio.get_init_cash(group_by=group_by),
result
)
pd.testing.assert_series_equal(
portfolio_grouped.init_cash,
result
)
pd.testing.assert_series_equal(
portfolio_shared.init_cash,
result
)
pd.testing.assert_series_equal(
vbt.Portfolio.from_orders(
price_na, 1000., init_cash=InitCashMode.Auto, group_by=None).init_cash,
pd.Series(
np.array([14000., 12000., 10000.]),
index=price_na.columns
).rename('init_cash')
)
pd.testing.assert_series_equal(
vbt.Portfolio.from_orders(
price_na, 1000., init_cash=InitCashMode.Auto, group_by=group_by).init_cash,
pd.Series(
np.array([26000.0, 10000.0]),
index=pd.Index(['first', 'second'], dtype='object', name='group')
).rename('init_cash')
)
pd.testing.assert_series_equal(
vbt.Portfolio.from_orders(
price_na, 1000., init_cash=InitCashMode.Auto, group_by=group_by, cash_sharing=True).init_cash,
pd.Series(
np.array([26000.0, 10000.0]),
index= | pd.Index(['first', 'second'], dtype='object', name='group') | pandas.Index |
import pandas as pd
import os
import warnings
import pickle
from nltk.corpus import stopwords
from nltk.tokenize import RegexpTokenizer
from collections import namedtuple
Fact = namedtuple("Fact", "uid fact file")
answer_key_map = {"A": 0, "B": 1, "C": 2, "D": 3, "E": 4, "F": 5}
tables_dir = "annotation/expl-tablestore-export-2017-08-25-230344/tables/"
stopwords = stopwords.words('english')
tokenizer = RegexpTokenizer(r'\w+')
# Lemmatization map
lemmatization = {}
with open('annotation/lemmatization-en.txt', 'r') as f:
for line in f:
l0 = line.strip().split('\t')
lemmatization[l0[1]] = l0[0]
print(f"len(lemmatization): {len(lemmatization)}")
######################
# FACT AS NODE GRAPH #
######################
# Map from "words" to facts containing the "words"
graph_word_to_fact_map = {}
fact_base = {}
for path, _, files in os.walk(tables_dir):
for f in files:
print(".", end="")
df = pd.read_csv(os.path.join(path, f), sep='\t')
uid = None
header = []
graph_header = []
check_skip_dep = False
# if "[SKIP] DEP" in df.columns:
# check_skip_dep = True
for name in df.columns:
if name.startswith("[SKIP]"):
if 'UID' in name:
if uid is None:
uid = name
else:
raise AttributeError('Possibly misformatted file: ' + path)
elif name.startswith("[FILL]"):
header.append(name)
else:
graph_header.append(name)
header.append(name)
if not uid or len(df) == 0:
warnings.warn('Possibly misformatted file: ' + f)
continue
for _, row in df.iterrows():
row_uid = row[uid]
# if check_skip_dep and not pd.isna(row["[SKIP] DEP"]):
# skip deprecated row
# continue
if row_uid in fact_base:
print(f"repeated UID {row_uid} in file {f}")
continue
fact_base[row_uid] = Fact(row_uid, ' '.join(str(s) for s in list(row[header]) if not | pd.isna(s) | pandas.isna |
import streamlit as st
import streamlit.components.v1 as stc
import time
from random import random
import numpy as np
import pandas as pd
import altair as alt
from altair import Chart, X, Y, Axis, SortField, OpacityValue
# 2020-10-25 edit@ from st.annotated_text import annotated_text
from annotated_text import annotated_text
import st_state
def main():
st.beta_set_page_config(
page_title="AB Testing", # String or None. Strings get appended with "• Streamlit".
page_icon="🎲", # String, anything supported by st.image, or None.
layout="centered", # Can be "centered" or "wide". In the future also "dashboard", etc.
initial_sidebar_state="auto") # Can be "auto", "expanded", "collapsed"
# load state object
state = st_state._get_state()
# ==================== Nav Bar ==================== #
if state.nav is None: state.nav = 0
nav = state.nav
part1, part2, part3 = st.beta_columns([1, 1, 1])
pages = ['⚪ Part I: Probability ',
'⚪ Part II: Error ',
'⚪ Part III: P-values ']
pages[nav] = '🔴 ' + pages[nav][2:]
with part1:
if st.button(pages[0]): state.nav = 0
with part2:
if st.button(pages[1]): state.nav = 1
with part3:
if st.button(pages[2]): state.nav = 2
st.markdown('---')
if nav == 0: ############ PART I ############
st.header('👩🔬 Exploring Intuitions Around AB Testing')
st.write('In AB testing we want to know how often an event occurs, and compare it against a competing design. In practice however, we can only observe the outcome of measuring an event, and not the true conversion rate behind it. ')
st.write('For example, we may observe that 2/10 visitors click a button. So how many clicks would we expect if we had 100 visitors? By generating random numbers we can simulate behaviour on our website.')
st.header('🎲 Random Click Generator')
conversion_rate = st.number_input('True Conversion Rate', value=0.2)
n_samples = st.number_input('Sample size (people)', value=100)
# ============== Setup placeholder chart =============== #
res = []
df = pd.DataFrame()
df['A'] = | pd.Series(res) | pandas.Series |
import os
import pandas as pd
BASE_DIR = os.path.abspath(os.path.dirname(__file__))
def open_csv(filepath, header_names=None):
"""Opens CSV file with option to add header names."""
if header_names and hasattr(header_names, "__iter__"):
return | pd.read_csv(filepath, sep=",", header=0, names=header_names) | pandas.read_csv |
# -*- coding: utf-8 -*-
"""
@author: <NAME> - https://www.linkedin.com/in/adamrvfisher/
"""
#This is a strategy tester
#pandas_datareader is deprecated, use YahooGrabber
#Import modules
from pandas_datareader import data
import pandas as pd
import numpy as np
#Assign ticker
ticker = '^GSPC'
#Request data
s = data.DataReader(ticker, 'yahoo', start='01/01/2016', end='01/01/2050')
#Calculate log returns
s['LogRet'] = np.log(s['Adj Close']/s['Adj Close'].shift(1))
s['LogRet'] = s['LogRet'].fillna(0)
#Iterable
s['Ranger'] = range(len(s))
#Create dataframe
k = pd.DataFrame(index = s['Ranger'])
#Empty list
AroonUp = []
AroonDown = []
AroonDate = []
#Variable assignment
#Time factor
tf = 7
AdjClose = s['Adj Close'].tolist()
AdjCloseSeries = pd.Series(AdjClose)
k['Adj Close'] = AdjCloseSeries
Date = s['Ranger'].tolist()
counter = tf
#Calculate Aroon indicator
while counter < len(s):
Aroon_Up = ((k['Adj Close'][counter-tf:counter].tolist().index(max
(k['Adj Close'][counter-tf:counter])))/float(tf)*100)
Aroon_Down = ((k['Adj Close'][counter-tf:counter].tolist().index(min
(k['Adj Close'][counter-tf:counter])))/float(tf)*100)
AroonUp.append(Aroon_Up)
AroonDown.append(Aroon_Down)
AroonDate.append(Date[counter])
counter = counter + 1
s = s[tf:]
#List to series
AroonUpSeries = | pd.Series(AroonUp, index=s.index) | pandas.Series |
# coding=utf-8
# pylint: disable-msg=E1101,W0612
import numpy as np
import pytest
from pandas.compat import lrange, range
import pandas as pd
from pandas import DataFrame, Index, Series
import pandas.util.testing as tm
from pandas.util.testing import assert_series_equal
def test_get():
# GH 6383
s = Series(np.array([43, 48, 60, 48, 50, 51, 50, 45, 57, 48, 56, 45,
51, 39, 55, 43, 54, 52, 51, 54]))
result = s.get(25, 0)
expected = 0
assert result == expected
s = Series(np.array([43, 48, 60, 48, 50, 51, 50, 45, 57, 48, 56,
45, 51, 39, 55, 43, 54, 52, 51, 54]),
index=pd.Float64Index(
[25.0, 36.0, 49.0, 64.0, 81.0, 100.0,
121.0, 144.0, 169.0, 196.0, 1225.0,
1296.0, 1369.0, 1444.0, 1521.0, 1600.0,
1681.0, 1764.0, 1849.0, 1936.0],
dtype='object'))
result = s.get(25, 0)
expected = 43
assert result == expected
# GH 7407
# with a boolean accessor
df = pd.DataFrame({'i': [0] * 3, 'b': [False] * 3})
vc = df.i.value_counts()
result = vc.get(99, default='Missing')
assert result == 'Missing'
vc = df.b.value_counts()
result = vc.get(False, default='Missing')
assert result == 3
result = vc.get(True, default='Missing')
assert result == 'Missing'
def test_get_nan():
# GH 8569
s = pd.Float64Index(range(10)).to_series()
assert s.get(np.nan) is None
assert s.get(np.nan, default='Missing') == 'Missing'
def test_get_nan_multiple():
# GH 8569
# ensure that fixing "test_get_nan" above hasn't broken get
# with multiple elements
s = pd.Float64Index(range(10)).to_series()
idx = [2, 30]
with tm.assert_produces_warning(FutureWarning, check_stacklevel=False):
assert_series_equal(s.get(idx),
Series([2, np.nan], index=idx))
idx = [2, np.nan]
with tm.assert_produces_warning(FutureWarning, check_stacklevel=False):
assert_series_equal(s.get(idx),
Series([2, np.nan], index=idx))
# GH 17295 - all missing keys
idx = [20, 30]
assert(s.get(idx) is None)
idx = [np.nan, np.nan]
assert(s.get(idx) is None)
def test_delitem():
# GH 5542
# should delete the item inplace
s = Series(lrange(5))
del s[0]
expected = Series(lrange(1, 5), index=lrange(1, 5))
assert_series_equal(s, expected)
del s[1]
expected = Series(lrange(2, 5), index=lrange(2, 5))
assert_series_equal(s, expected)
# empty
s = Series()
with pytest.raises(KeyError):
del s[0]
# only 1 left, del, add, del
s = Series(1)
del s[0]
assert_series_equal(s, Series(dtype='int64', index=Index(
[], dtype='int64')))
s[0] = 1
assert_series_equal(s, Series(1))
del s[0]
assert_series_equal(s, Series(dtype='int64', index=Index(
[], dtype='int64')))
# Index(dtype=object)
s = Series(1, index=['a'])
del s['a']
assert_series_equal(s, Series(dtype='int64', index=Index(
[], dtype='object')))
s['a'] = 1
assert_series_equal(s, Series(1, index=['a']))
del s['a']
assert_series_equal(s, Series(dtype='int64', index=Index(
[], dtype='object')))
def test_slice_float64():
values = np.arange(10., 50., 2)
index = Index(values)
start, end = values[[5, 15]]
s = Series(np.random.randn(20), index=index)
result = s[start:end]
expected = s.iloc[5:16]
assert_series_equal(result, expected)
result = s.loc[start:end]
assert_series_equal(result, expected)
df = DataFrame(np.random.randn(20, 3), index=index)
result = df[start:end]
expected = df.iloc[5:16]
tm.assert_frame_equal(result, expected)
result = df.loc[start:end]
| tm.assert_frame_equal(result, expected) | pandas.util.testing.assert_frame_equal |
import argparse
import os
import matplotlib.pyplot as plt
import pandas as pd
import seaborn as sns
def parse_args(args):
"""define arguments"""
parser = argparse.ArgumentParser(description="TATA_enrichment_plots")
parser.add_argument(
"file_names",
type=str,
help="Name of folder and filenames for the promoters extracted",
)
parser.add_argument(
"gat_TATA_constitutive_output",
type=str,
help="Location of constitutive promoter gat analysis output",
)
parser.add_argument(
"gat_TATA_variable_output",
type=str,
help="Location of variable promoter gat analysis output",
)
parser.add_argument(
"gat_TATA_nonspecific_output",
type=str,
help="Location of non-specific promoter gat analysis output",
)
parser.add_argument(
"gat_TATA_tissue_specific_output",
type=str,
help="Location of tissue_specific promoter gat analysis output",
)
parser.add_argument(
"output_prefix",
type=str,
help="Output prefix to add to plot file name",
)
parser.add_argument(
"output_folder_name",
type=str,
help="Optional output folder name ending in a forward slash",
default="",
nargs="?",
)
parser.add_argument(
"palette_cv",
type=str,
help="Optional replacement colour palette for cv",
default=None,
nargs="?",
)
parser.add_argument(
"palette_tau",
type=str,
help="Optional replacement colour palette for tau",
default=None,
nargs="?",
)
return parser.parse_args(
args
) # let argparse grab args from sys.argv itself to allow for testing in module import
def create_plot(
gat_TATA_constitutive_output,
gat_TATA_variable_output,
gat_TATA_nonspecific_output,
gat_TATA_tissuespecific_output,
output_prefix,
palette_cv,
palette_tau,
output_folder_name,
file_names,
):
"""import and process the raw outputs after running gat (Genomic association tester). Then create barplot of constitutive and variable gene TATA enrichment"""
# import gat output files as dfs
def merge_gatfiles(gat_output1, gat_output2, palette):
df1 = pd.read_table(gat_output1, sep="\t", header=0)
df2 = | pd.read_table(gat_output2, sep="\t", header=0) | pandas.read_table |
"""
PFRA Module for working with HEC-RAS model output files
"""
import gdal
from time import time
import geopandas as gpd
from geopandas.tools import sjoin
from shapely.ops import cascaded_union
from shapely.geometry import Point, LineString, Polygon
import numpy as np
import pandas as pd
import h5py
from matplotlib import pyplot as plt
from hecrasio.core import ResultsZip
from io import BytesIO
import boto3
import rasterio
from rasterio.plot import show
import pathlib as pl
import os
import shutil
from collections import ChainMap
import json
# Add additional keys as needed
GEOMETRY_ATTRIBUTES = '/Geometry/2D Flow Areas/Attributes'
GEOMETRY_2DFLOW_AREA = '/Geometry/2D Flow Areas'
PLAN_DATA = '/Plan Data'
EVENT_DATA_BC = '/Event Conditions/Unsteady/Boundary Conditions'
UNSTEADY_SUMMARY = '/Results/Unsteady/Summary'
TSERIES_RESULTS_2DFLOW_AREA = '/Results/Unsteady/Output/Output Blocks/Base Output/Unsteady Time Series/2D Flow Areas'
class PFRAError:
"""
Generic Error Class for PFRA
"""
def __init__(self, error):
self.Error = error
class HDFResultsFile:
"""
HEC-RAS HDF Plan File Object to compute flow data at breaklines.
Some functionality may be useful for other ras objects.
"""
def __init__(self, model:ResultsZip, model_path:str, path:str):
self.__model = model
if '.zip' in model_path:
self.__zip_path = path
else:
self.__path = path
def decoder():
"""
Decode bytes objects from hdf file
:return:
"""
if isinstance(x, bytes):
return x.decode()
else:
return x
def local_hdf():
"""
Add Description
:return:
"""
try:
self.__model.zipfile.extract(self.__zip_path)
return h5py.File(self.__zip_path, 'r')
except:
return h5py.File(self.__path, 'r')
def get_2dFlowArea_data():
"""
Add Description
:return:
"""
table_data = self._hdfLocal[GEOMETRY_ATTRIBUTES]
names = table_data.dtype.names
domain_data = {}
# Use [1:-1] to pull the name from the 0 element (row[0])
for row in table_data:
domain_data[row[0].decode()] = list(row)[1:-1]
return pd.DataFrame(domain_data, index=names[1:-1])
def get_planData(table):
"""
Add Description
:param table:
:return:
"""
table_data = self._hdfLocal['{}/{}'.format(PLAN_DATA, table)].attrs
values = [table_data[n] for n in list(table_data.keys())]
# Add wrapper here?
values = [v[0] if isinstance(v, list) else v for v in values]
values = [v.decode() if isinstance(v, bytes) else v for v in values]
return pd.DataFrame(data=values, index=list(table_data.keys()), columns=['Results'])
def get_geometry_data(table, domain):
"""Read in data from results tables"""
data = '{}/{}/{}'.format(GEOMETRY_2DFLOW_AREA, domain, table)
return np.array(self._plan_data[data])
def get_perimeter(domain):
"""Creates a perimeter polygon from points"""
d_array = get_geometry_data('Perimeter', domain)
aoi = Polygon([tuple(p) for p in d_array])
return gpd.GeoDataFrame(geometry=gpd.GeoSeries(aoi))
def get_domain_geometries():
domains = self._domains
if len(domains) > 1:
poly_list = [get_perimeter(domain) for domain in domains]
df = pd.concat(poly_list).reset_index(level=0, drop=True)
return gpd.GeoDataFrame(df)
else:
print('Single domain found...')
pass
def get_2dSummary():
"""Add Description"""
try:
table_data = self._hdfLocal[UNSTEADY_SUMMARY].attrs
values = [table_data[n] for n in list(table_data.keys())]
values = [v.decode() if isinstance(v, bytes) else v for v in values]
values = [str(v) if isinstance(v, list) else v for v in values]
return pd.DataFrame(data=values, index=list(table_data.keys()), columns=['Results'])
except KeyError as e:
print('You do not seem to have a summary table...')
print('Exiting.')
self._hdfLocal = local_hdf()
self._plan_data = self._hdfLocal
self._Plan_Information = get_planData('Plan Information')
self._Plan_Parameters = get_planData('Plan Parameters')
self._2dFlowArea = get_2dFlowArea_data()
self._domains = self._2dFlowArea.columns.tolist()
self._domain_polys = get_domain_geometries()
self._summary = get_2dSummary()
# Getter functions
@property
def hdfLocal(self):
"""Add Description"""
return self._hdfLocal
@property
def domains(self):
"""Add Description"""
return self._domains
@property
def domain_polys(self):
"""Domain Polygons"""
return self._domain_polys
@property
def Plan_Information(self):
"""Add Description"""
return self._Plan_Information
@property
def Plan_Parameters(self):
"""Add Description"""
return self._Plan_Parameters
@property
def summary(self):
"""Add Description"""
return self._summary
@property
def get_2dFlowArea(self):
"""Add Description"""
return self._2dFlowArea
class DomainResults:
"""
HEC-RAS HDF Plan File Object to compute flow data at breaklines.
Some functionality may be useful for other ras objects.
"""
def __init__(self, model: ResultsZip, plan: HDFResultsFile, domain: str):
# Specify Domain to instantiate Object
self.__model = model
self._plan = plan
self._domain = domain
self._plan_data = self._plan.hdfLocal
def get_domain_cell_size():
"""Identifies mean cell size for a domain"""
flowData = self._plan.get_2dFlowArea.copy()
flowData = flowData[self._domain]
xspacing = flowData.loc['Spacing dx']
yspacing = flowData.loc['Spacing dy']
return np.mean([xspacing, yspacing])
def get_tseries_results(table):
"""Read in data from results tables as a Pandas DataFrame"""
try:
data = '{}/{}/{}'.format(TSERIES_RESULTS_2DFLOW_AREA, self._domain, table)
d_array = np.array(self._plan_data[data]).T
return pd.DataFrame(d_array)
except:
print('{} is missing from the HDF!'.format(table))
def get_tseries_forcing(table):
"""This table is not domain specific"""
group = list(self._plan_data['{}/{}'.format(EVENT_DATA_BC, table)])
table_data = {}
for g in group:
table_data[g] = np.array(self._plan_data['{}/{}/{}'.format(EVENT_DATA_BC, table, g)])
return table_data
def get_geometry_data(table):
"""Read in data from results tables"""
data = '{}/{}/{}'.format(GEOMETRY_2DFLOW_AREA, self._domain, table)
return np.array(self._plan_data[data])
def get_perimeter():
"""Creates a perimeter polygon from points"""
d_array = get_geometry_data('Perimeter')
aoi = Polygon([tuple(p) for p in d_array])
return gpd.GeoDataFrame(geometry=gpd.GeoSeries(aoi))
def get_face():
"""Returns GeoDataFrame with Faces per pair of Face Indices"""
gdf = gpd.GeoDataFrame(self._Faces_FacePoint_Indexes, columns=['from_idx', 'to_idx'])
gdf['face'] = gdf.apply(lambda row:
LineString([self._Face_FacePoints_Coordinate[row['from_idx']],
self._Face_FacePoints_Coordinate[row['to_idx']]]),
axis=1)
gdf['geometry'] = gdf['face']
gdf = gdf.drop(['from_idx', 'to_idx', 'face'], axis=1)
return gdf
def get_centroids():
"""Returns GeoDataFrame with Face centroids per pair of Face Indices"""
gdf = get_face()
gdf['face_cnt'] = gdf.apply(lambda row: row.geometry.centroid, axis=1)
gdf['geometry'] = gdf['face_cnt']
gdf = gdf.drop(['face_cnt'], axis=1)
return gdf
def describe_depth():
"""Calculate max, min, and range of depths for each cell center"""
# Pull in cell centroids and attribute them
cc_array = self._Cells_Center_Coordinate
cc_gdf = gpd.GeoDataFrame([Point([coord[0], coord[1]]) for coord in cc_array], columns=['geometry'])
depth_array = self._Depth
# Attribute cell centroids with depths
# NOT USED?
# cc_attr = pd.concat([cc_gdf, depth_array], axis=1)
# Obtain descriptive statistics for each centroid
max_attr = pd.DataFrame(depth_array.max(axis=1), columns=['max'])
max_gdf = pd.concat([cc_gdf, max_attr], axis=1)
max_gdf_nonzero = max_gdf[max_gdf['max'] != 0]
min_attr = pd.DataFrame(depth_array.min(axis=1), columns=['min'])
min_gdf = pd.concat([cc_gdf, min_attr], axis=1)
min_gdf_nonzero = min_gdf[min_gdf['min'] != 0]
return max_gdf_nonzero, min_gdf_nonzero
def get_avg_depth():
"""Calculates average depth at faces returning an array."""
depth_list = []
for (c1_idx, c2_idx) in self._Faces_Cell_Indexes:
# cat_depths = np.stack([self._Depth.loc[c1_idx], self._Depth.loc[c2_idx]])
cat_depths = np.stack([self._Depth[c1_idx, :], self._Depth[c2_idx, :]])
avg_face = np.average(cat_depths, axis=0)
depth_list.append(np.around(avg_face, decimals=2))
# np.stack use default axis=0
return pd.DataFrame(np.stack(depth_list))
def get_extreme_edge_depths():
"""Identifies Face Centroids with absolute, avgerage depths greater-than one foot"""
# Obtain boundary line
boundary_line = list(self._Perimeter['geometry'])[0].boundary
# Identify external faces
df = pd.DataFrame()
df['exterior'] = self._Faces.geometry.apply(lambda lstring: lstring.intersects(boundary_line))
# Identify minima
attr = pd.DataFrame(abs(self._Avg_Face_Depth).max(axis=1), columns=['abs_max'])
face_dp = pd.concat([self._Face_Centroid_Coordinates, attr], axis=1)
exterior_faces = face_dp[df['exterior'] == True]
return exterior_faces[exterior_faces['abs_max'] > 1]
def get_extreme_edge_depths():
"""Identifies Face Centroids with absolute, avgerage depths greater-than one foot"""
# Obtain boundary line
boundary_line = list(self._Perimeter['geometry'])[0].boundary
# Identify external faces
df = pd.DataFrame()
perimeter = gpd.GeoDataFrame(gpd.GeoSeries(boundary_line).to_frame(), geometry=0)
intersections = gpd.sjoin(perimeter, self._Faces, how="inner", op='intersects')
# Identify minima
attr = pd.DataFrame(abs(self._Avg_Face_Depth).max(axis=1), columns=['abs_max'])
face_dp = pd.concat([self._Face_Centroid_Coordinates, attr], axis=1)
exterior_faces = face_dp.loc[intersections['index_right']]
return exterior_faces[exterior_faces['abs_max'] > 1]
try:
self._StageBC = get_tseries_forcing('Stage Hydrographs')
except KeyError as e:
self._StageBC = None
try:
self._FlowBC = get_tseries_forcing('Flow Hydrographs')
except KeyError as e:
print(e)
self._FlowBC = None
try:
self._PrecipBC = get_tseries_forcing('Precipitation Hydrographs')
except KeyError as e:
print(e)
self._PrecipBC = None
self._CellSize = get_domain_cell_size()
self._Faces_FacePoint_Indexes = get_geometry_data('Faces FacePoint Indexes')
self._Face_FacePoints_Coordinate = get_geometry_data('FacePoints Coordinate')
self._Faces_Cell_Indexes = get_geometry_data('Faces Cell Indexes')
self._Face_Velocity = abs(get_tseries_results('Face Velocity'))
self._Face_Centroid_Coordinates = get_centroids()
self._Cells_Center_Coordinate = get_geometry_data('Cells Center Coordinate')
self._Depth = np.array(get_tseries_results('Depth'))
self._Describe_Depths = describe_depth()
self._Avg_Face_Depth = get_avg_depth()
self._Perimeter = get_perimeter()
self._Faces = get_face()
self._Extreme_Edges = get_extreme_edge_depths()
@property
def CellSize(self):
"""Domain mean cell size"""
print('Domain ID: {}, Average Cell Size = {}'.format(self._domain, self._CellSize))
return self._CellSize
@property
def StageBC(self):
"""Stage boundary conditions"""
return self._StageBC
@property
def FlowBC(self):
"""Flow boundary conditions"""
return self._FlowBC
@property
def PrecipBC(self):
"""Precipitation boundary conditions"""
return self._PrecipBC
@property
def Faces_FacePoint_Indexes(self):
"""Indices of face points used to create each Face"""
return self._Faces_FacePoint_Indexes
@property
def Face_FacePoints_Coordinate(self):
"""Coordinates of face points"""
return self._Face_FacePoints_Coordinate
@property
def Cells_Center_Coordinate(self):
"""Coordinates of cell centers"""
return self._Cells_Center_Coordinate
@property
def Faces(self):
"""Faces created from face point indecies and coordinates"""
return self._Faces
@property
def Face_Centroid_Coordinates(self):
"""Centroid of faces"""
return self._Face_Centroid_Coordinates
@property
def Faces_Cell_Indexes(self):
"""Indecies of cells bounded by each face"""
return self._Faces_Cell_Indexes
@property
def Face_Velocity(self):
"""Velocity measurements at each face"""
return self._Face_Velocity
@property
def Depth(self):
"""Depth measurements at each cell center"""
return self._Depth
@property
def Describe_Depths(self):
"""Max, min, and range of depths for each cell center"""
return self._Describe_Depths
@property
def Avg_Face_Depth(self):
"""Average depth of cell centers bounding a face"""
return self._Avg_Face_Depth
@property
def Perimeter(self):
"""Domain area polygon"""
return self._Perimeter
@property
def Extreme_Edges(self):
"""Perimeter face centroids with absolute, average depths greater than one"""
return self._Extreme_Edges
def find_anomalous_attributes(self, attr: str = 'Face_Velocity', threshold: int = 30):
"""
Returns attributed points with the maximum of their attributes exceeding a threshold
:param attr:
:param threshold:
:return:
"""
max_attr = pd.DataFrame(getattr(self, attr).max(axis=1), columns=['max'])
df_thresh = max_attr[max_attr['max'] > threshold]
gdf_thresh = self.Face_Centroid_Coordinates.iloc[df_thresh.index]
try:
return pd.concat([gdf_thresh, df_thresh], axis=1)
except ValueError as e:
print('No Anomolous Data Found')
return None
def count_anomalous_attributes(self, attr: str = 'Face_Velocity', threshold: int = 30):
"""
Returns attributed points with a count of their attributes exceeding a threshold
:param attr:
:param threshold:
:return:
"""
dseries = getattr(self, attr).apply(lambda row: sum(row > threshold), axis=1)
non_nan = dseries[dseries != 0].dropna()
df_non_nan = pd.DataFrame(non_nan, columns=['count'])
gdf_thresh = self.Face_Centroid_Coordinates.iloc[df_non_nan.index]
try:
return pd.concat([gdf_thresh, df_non_nan], axis=1)
except ValueError as e:
print('No Anomolous Data Found')
return None
# Functions ---------------------------------------------------------------------
def all_aoi_gdf(domain_results:list) -> gpd.geodataframe.GeoDataFrame:
"""
Creates a geodataframe containing polygons for all domains.
:param domain_results:
"""
perimeters = [domain.Perimeter for domain in domain_results]
df = pd.concat(perimeters).reset_index(drop=True)
return gpd.GeoDataFrame(df)
def group_excessive_points(gdf: gpd.geodataframe.GeoDataFrame, cell_size: float):
"""
Creates groupings of collocated points exceeding a threshold.
By default, a grouping is defined as three times the average
cell size of the input file.
:param gdf:
:param cell_size:
:return:
"""
gdf_aois = gpd.GeoDataFrame()
gdf_aois['point'] = gdf.geometry
gdf_aois['polygon'] = gdf_aois.point.apply(lambda row: row.buffer(cell_size * 3))
gdf_aois['geometry'] = gdf_aois['polygon']
try:
diss_aois = list(cascaded_union(gdf_aois.geometry))
gdf_diss_aois = gpd.GeoDataFrame(diss_aois, columns=['geometry'])
except:
diss_aois = cascaded_union(gdf_aois.geometry)
gdf_diss_aois = gpd.GeoDataFrame([diss_aois], columns=['geometry'])
return gdf_diss_aois
def subset_data(grouping_polys: gpd.geodataframe.GeoDataFrame, thresheld_gdf: gpd.geodataframe.GeoDataFrame,
count_gdf: gpd.geodataframe.GeoDataFrame, face_gdf: gpd.geodataframe.GeoDataFrame,
buff_distance: int = 100) -> [list, list, list]:
"""
Creates three lists of dataframes subset by a polygon where the polygon
is a grouping of centroids. The first list contains maximum values for
each face centroid, the second list contains counts of instances above
a threshold, and the third lists faces within the buffered bounding
box of a group of centroids.
:param grouping_polys:
:param thresheld_gdf:
:param count_gdf:
:param face_gdf:
:param buff_distance:
:return:
"""
subset_max_list, subset_count_list, subset_face_list = [], [], []
for i, poly in enumerate(grouping_polys.geometry):
subset_max = thresheld_gdf[thresheld_gdf.within(poly)]
subset_max_list.append(subset_max)
# NOT USED?
# subset_count = count_gdf.loc[subset_max.index]
subset_count_list.append(count_gdf.loc[subset_max.index])
x0, y0, x1, y1 = poly.buffer(buff_distance).bounds
bbox = Polygon([[x0, y0], [x1, y0], [x1, y1], [x0, y1]])
subset_faces = face_gdf[face_gdf.within(bbox)]
subset_face_list.append(subset_faces)
return subset_max_list, subset_count_list, subset_face_list
def find_large_and_small_groups(count_list: list, max_list: list, face_list: list,
gdf_groups: gpd.geodataframe.GeoDataFrame,
min_count: int = 5) -> [dict, dict]:
"""
Identifies large groupings, i.e. above minimum count, of points and
small groupings. Returns two dictionaries. One with large idxs,
maximums, counts, faces, and groups as well as one with small idxs,
maximums, and counts.
:param count_list:
:param max_list:
:param face_list:
:param gdf_groups:
:param min_count:
:return:
"""
large_dict, small_dict = {}, {}
large_tuples = [(i, count) for i, count in enumerate(count_list) if len(count) > min_count]
large_dict['idxs'] = [large_tuple[0] for large_tuple in large_tuples]
large_dict['maxes'] = [max_list[i] for i in large_dict['idxs']]
large_dict['counts'] = [large_tuple[1] for large_tuple in large_tuples]
large_dict['faces'] = [face_list[i] for i in large_dict['idxs']]
large_dict['groups'] = [gdf_groups.iloc[i] for i in large_dict['idxs']]
small_tuples = [(i, count) for i, count in enumerate(count_list) if len(count) <= min_count]
small_dict['idxs'] = [small_tuple[0] for small_tuple in small_tuples]
small_dict['maxes'] = [max_list[i] for i in small_dict['idxs']]
small_dict['counts'] = [small_tuple[1] for small_tuple in small_tuples]
return large_dict, small_dict
def velCheckMain(results, domain, plot_tseries=5):
"""
Add Description
:param results:
:param plot_tseries:
:param domain:
"""
# Identify face velocities above a given threshold
df_thresh = results.find_anomalous_attributes()
df_count = results.count_anomalous_attributes()
if df_count.shape[0] > 1 and df_thresh.shape[0] > 1:
# Identify groups of excessive centroids
gdf_groups = group_excessive_points(df_thresh, results.CellSize)
# Using a method nearly doubles the time
max_list, count_list, face_list = subset_data(gdf_groups, df_thresh, df_count, results.Faces)
# Split groups into large (n > 5) clusters vs. everything else
l_dict, s_dict = find_large_and_small_groups(count_list, max_list, face_list, gdf_groups)
# Identify group of interest
for idx in range(len(l_dict['groups'])):
plot_instabilities(l_dict['maxes'], l_dict['counts'], l_dict['faces'], results.Perimeter,
l_dict['groups'], idx)
# NOT USED?
maxes = l_dict['maxes'][idx]
# counts = l_dict['counts'][idx]
# faces = l_dict['faces'][idx]
# group = l_dict['groups'][idx]
max_vFaceIDs = list(maxes.sort_values(by='max', ascending=False)[0:plot_tseries].index)
# NOT USED?
# groupID = idx
depths = results.Avg_Face_Depth.iloc[max_vFaceIDs]
velocities = results.Face_Velocity.iloc[max_vFaceIDs]
for i in depths.index:
DepthVelPlot(depths.loc[i], velocities.loc[i], i)
try:
plot_disparate_instabilities(s_dict['maxes'], s_dict['counts'], results.Perimeter, domain)
except:
print('No disparate instabilities found. All instabilities must be grouped!')
return pd.DataFrame(data=[len(pd.concat(count_list)), max(pd.concat(max_list)['max'])],
columns=['Results'],
index=['Instability Count', 'Max Velocity'])
else:
max_vel = results.Face_Velocity.values.max()
return pd.DataFrame(data=[0, max_vel],
columns=['Results'],
index=['Instability Count', 'Max Velocity'])
print('No Velocity Errors Found in Domain {}'.format(domain))
# Plotting Functions ------------------------------------------------------------
def show_results(domains:list, model, rasPlan, plot_tseries:int=3) -> None:
"""Wrapper function plotting descriptive statistics, extreme edges, boundary
conditions and velocity values.
"""
if len(domains) > 1:
results = {domain: DomainResults(model, rasPlan, domain) for domain in domains}
results_table = {}
for domain, result in results.items():
plot_descriptive_stats(result.Describe_Depths, result.Perimeter, domain)
plot_extreme_edges(result.Extreme_Edges, result.Perimeter, mini_map=rasPlan.domain_polys)
plotBCs(result, domain)
results_table[domain] = velCheckMain(result, domain, plot_tseries)
instability_count = sum([value.loc['Instability Count'] for value in list(results_table.values())])[0]
max_velocity = max([value.loc['Max Velocity'].values[0] for value in list(results_table.values())])
return pd.DataFrame(data=[instability_count, max_velocity],
columns=['Results'],
index=['Instability Count', 'Max Velocity'])
else:
domain = domains[0]
result = DomainResults(model, rasPlan, domain)
plot_descriptive_stats(result.Describe_Depths, result.Perimeter, domain)
plot_extreme_edges(result.Extreme_Edges, result.Perimeter)
plotBCs(result, domain)
return velCheckMain(result, domain, plot_tseries)
def plot_instabilities(max_list, count_list, gdf_face, gdf_face_all, ex_groups, idx):
"""
Add Description
:param max_list:
:param count_list:
:param gdf_face:
:param gdf_face_all:
:param ex_groups:
:param idx:
"""
fig, _ = plt.subplots(2, 2, figsize=(20, 8))
x0, y0, x1, y1 = ex_groups[idx].geometry.buffer(100).bounds
# Plot Max Velocities
ax1 = plt.subplot2grid((2, 2), (0, 0))
max_list[idx].plot(column='max', cmap='viridis', legend=True, ax=ax1)
gdf_face[idx].plot(alpha=0.1, color='black', ax=ax1)
ax1.set_title('Maximum Velocity recorded at Cell Face (ft/s)')
ax1.set_xlim(x0, x1)
ax1.set_ylim(y0, y1)
# Plot Number of instabilities recorded (timesteps above threshold)
ax2 = plt.subplot2grid((2, 2), (1, 0))
ax2 = count_list[idx].plot(column='count', cmap='viridis', legend=True, ax=ax2)
ax2 = gdf_face[idx].plot(alpha=0.1, color='black', ax=ax2)
ax2.set_title('Number of Instabilities recorded at Cell Face (n)')
ax2.set_xlim(x0, x1)
ax2.set_ylim(y0, y1)
# Plot Map Key (domain)
ax3 = plt.subplot2grid((2, 2), (0, 1), rowspan=2)
gdf_face_all.plot(alpha=0.05, color='black', ax=ax3)
pnt_group = gpd.GeoDataFrame(geometry=gpd.GeoSeries(ex_groups[idx].geometry.buffer(1000)))
pnt_group.plot(alpha=0.5, color='Red', legend=False, ax=ax3)
ax3.set_title('Map Legend')
ax1.axis('off')
ax2.axis('off')
ax3.axis('off')
fig.suptitle('Group {}'.format(idx + 1), fontsize=16, fontweight='bold')
def plot_disparate_instabilities(max_list, count_list, bounding_polygon, domain):
"""
Add Description
:param max_list:
:param count_list:
:param bounding_polygon:
:param domain:
"""
small_maxes = pd.concat(max_list)
small_counts = pd.concat(count_list)
fig, _ = plt.subplots(1, 2, figsize=(20, 8))
ax1 = plt.subplot2grid((1, 2), (0, 0))
small_maxes.plot(column='max', cmap='viridis', legend=True, ax=ax1)
bounding_polygon.plot(alpha=0.1, color='black', ax=ax1)
ax1.set_title('Maximum Velocity recorded at Cell Face (ft/s)')
ax2 = plt.subplot2grid((1, 2), (0, 1))
ax2 = small_counts.plot(column='count', cmap='viridis', legend=True, ax=ax2)
ax2 = bounding_polygon.plot(alpha=0.1, color='black', ax=ax2)
ax2.set_title('Number of Instabilities recorded at Cell Face (n)')
ax1.axis('off')
ax2.axis('off')
fig.suptitle('Isolated Points above Threshold for Domain {}'.format(domain), fontsize=16, fontweight='bold')
def plot_descriptive_stats(stat_lists: tuple, aoi: gpd.geodataframe.GeoDataFrame, domain:str) -> None:
"""
Plots the descriptive statistics (Max, Min) for
cell centers with the area of interest underneath.
:param stat_lists:
:param aoi:
"""
maximums, minimums = stat_lists
# Plot descriptive statistics
fig, (ax_string) = plt.subplots(1, 2, figsize=(20, 8))
ax1 = plt.subplot2grid((1, 2), (0, 0))
aoi.plot(color='k', alpha=0.25, ax=ax1)
maximums.plot(column='max', cmap='viridis', markersize=0.1, legend=True, ax=ax1)
ax1.set_title('Maximum Depth (ft)')
ax2 = plt.subplot2grid((1, 2), (0, 1))
aoi.plot(color='k', alpha=0.25, ax=ax2)
ax2 = minimums.plot(column='min', cmap='viridis', markersize=0.1, legend=True, ax=ax2, s=1)
ax2.set_title('Minimum Depth (ft)')
ax1.axis('off')
ax2.axis('off')
fig.suptitle('Depths at Cell Centers of Domain {}'.format(domain),
fontsize=16, fontweight='bold')
def plot_extreme_edges(gdf: gpd.geodataframe.GeoDataFrame,
aoi: gpd.geodataframe.GeoDataFrame,
**kwargs) -> None:
"""
Plots extreme depths along edges along with an overview map showing current
plotted domain versus all other domains.
:param gdf:
:param aoi:
:param \**kwargs:
See below
:Keyword Arguments:
* *mini_map* (gpd.geodataframe.GeoDataFrame) -- Multiple domain perimeters.
"""
if 'mini_map' in kwargs.keys():
mini_map = list(kwargs.values())[0]
fig, (ax_string) = plt.subplots(1, 2, figsize=(20, 8))
ax1 = plt.subplot2grid((1, 2), (0, 0))
aoi.plot(color='k', alpha=0.25, ax=ax1)
gdf.plot(column='abs_max', cmap='viridis', legend=True, ax=ax1, markersize=16)
ax1.set_title('Cell Locations with Depths > 1 ft\n(Check for Ponding)'.format(len(gdf)),
fontsize=12, fontweight='bold')
ax1.axis('off')
ax2 = plt.subplot2grid((1, 2), (0, 1))
mini_map.plot(color='#BFBFBF', edgecolor='k', ax=ax2, markersize=16)
aoi.plot(color='#FFC0CB', edgecolor='k', ax=ax2)
ax2.set_title('Current domain (pink) compared to all domains (grey)'.format(len(gdf)),
fontsize=12, fontweight='bold')
ax2.axis('off')
else:
fig, ax = plt.subplots(figsize = (7,7))
aoi.plot(color='k', alpha=0.25, ax=ax)
gdf.plot(column='abs_max', cmap='viridis', legend=True, ax=ax, markersize=16)
ax.set_title('Cell Locations with Depths > 1 ft\n(Check for Ponding)'.format(len(gdf)),
fontsize=12, fontweight='bold')
ax.axis('off')
def DepthVelPlot(depths: pd.Series, velocities: pd.Series, groupID: int, velThreshold: int = 30):
"""
Add Description
:param depths:
:param velocities:
:param groupID:
:param velThreshold:
"""
t = depths.index
data1 = depths
data2 = velocities
fig, ax1 = plt.subplots(figsize=(10, 2))
fig.suptitle('Velocity Anomalies at face {}'.format(groupID), fontsize=12, fontweight='bold', x=0.49, y=1.1)
color = 'blue'
ax1.set_xlabel('Time Steps')
ax1.set_ylabel('Depth (ft)', color=color)
ax1.plot(data1, color=color)
ax1.tick_params(axis='y', labelcolor=color)
ax2 = ax1.twinx() # instantiate a second axes that shares the same x-axis
color = 'red'
ax2.set_ylabel('Velocity (ft/s)', color=color) # we already handled the x-label with ax1
ax2.plot(data2, color=color, alpha=0.5)
ax2.tick_params(axis='y', labelcolor=color)
ax2.hlines(velThreshold, t.min(), t.max(), colors='k', linestyles='--', alpha=0.5, label='Threshold')
ax2.hlines(velThreshold * -1, t.min(), t.max(), colors='k', linestyles='--', alpha=0.5, label='Threshold')
fig.tight_layout() # otherwise the right y-label is slightly clipped
plt.show()
def plotBCs(results, domain:str):
"""
Add Description
"""
if results.FlowBC is not None:
for k, v in results.FlowBC.items():
if domain in k:
fig, ax = plt.subplots(figsize=(20, 2))
ax.set_title('{}\nPeak Flow of {} cfs'.format(k, int(v[:, 1].max())))
ax.set_ylabel('Flow (ft)')
ax.set_xlabel('Days')
ax.plot(v[:, 0], v[:, 1])
ax.grid()
if results.StageBC is not None:
for k, v in results.StageBC.items():
if domain in k:
fig, ax = plt.subplots(figsize=(20, 2))
ax.set_title(k)
ax.set_ylabel('Stage (cfs)')
ax.set_xlabel('Days')
ax.plot(v[:, 0], v[:, 1])
ax.grid()
if results.PrecipBC is not None:
for k, v in results.PrecipBC.items():
if domain in k:
fig, ax = plt.subplots(figsize=(20, 2))
ax.set_title(k)
ax.set_ylabel('Precipitation (inches)')
ax.set_xlabel('Days')
ax.plot(v[:, 0], v[:, 1])
ax.grid()
def identify_unique_values(result_table:pd.core.frame.DataFrame,
desired_columns:list) -> pd.core.frame.DataFrame:
"""
Identifies unique values within a results table for a given attribute.
"""
df = pd.DataFrame(columns=['Unique_Values'])
df['Result_Attribute'] = | pd.Index(desired_columns) | pandas.Index |
import pandas as pd
import json
import os
import sys
import datetime
from datetime import time
from src.util import logger
def loadIntradayData(filepath):
data = | pd.read_csv(filepath, parse_dates=[0], names=['datetime', 'value']) | pandas.read_csv |
# -*- coding: utf-8 -*-
"""
@file:base_6900.py
@time:2019/7/6 21:49
@author:Tangj
@software:Pycharm
@Desc
"""
import pandas as pd
import numpy as np
from sklearn.model_selection import StratifiedKFold
from lightgbm.sklearn import LGBMClassifier
from sklearn.metrics import mean_squared_error, mean_absolute_error, log_loss, accuracy_score
from sklearn.feature_extraction.text import CountVectorizer
from scipy import sparse
from scipy.stats import kurtosis
import time
import warnings
warnings.filterwarnings('ignore')
pd.set_option('display.max_columns', None)
'''
方案思路:33分类。
label为规定还款日期距还款日期的天数,可能的情况有0天到31天,未还款定义为-1,一共33个类别。
预测出每个label对应的概率,然后分别乘以应还的金额,就是每天需要还的金额。
这里可以将预测概率非常小的还款直接归0,因为实际情况来说,只有一天还款,且还款金额是全部借款数。
'''
# 读取数据,并将相应的日期解析为时间格式
train_data = pd.read_csv('../data/train.csv', parse_dates=['auditing_date', 'due_date', 'repay_date'])
train_data['repay_date'] = train_data[['due_date', 'repay_date']].apply(
lambda x: x['repay_date'] if x['repay_date'] != '\\N' else x['due_date'], axis=1
)
train_data['repay_amt'] = train_data['repay_amt'].apply(lambda x: x if x != '\\N' else 0).astype('float32')
train_data['label'] = (train_data['due_date'] - train_data['repay_date']).dt.days
train_data.loc[train_data['repay_amt'] == 0, 'label'] = -1
# 读取user_repay_logs文件扩充训练集,order_id是为了区分原始的train和扩充的train
train_data.loc[:, 'order_id'] = 0
train_expand = pd.read_csv('../data/user_repay_logs.csv', parse_dates=['due_date', 'repay_date'])
train_expand = train_expand[train_expand['order_id'] == 1]
# del train_expand['order_id']
train_expand.loc[train_expand['repay_date'].dt.year == 2200, 'repay_amt'] = 0
train_expand['label'] = (train_expand['due_date'] - train_expand['repay_date']).dt.days
train_expand.loc[train_expand['repay_amt'] == 0, 'label'] = -1
train_data = pd.concat([train_data, train_expand])
train_data = train_data.drop_duplicates('listing_id').reset_index(drop=True)
# 扩充的训练集中有的不是2018年的数据,因此这里只保留2018年的数据作为train集合
mask = train_data['due_date'].dt.year == 2018
train_data = train_data[mask]
clf_labels = train_data['label'].values + 1
amt_labels = train_data['repay_amt'].values
del train_data['label'], train_data['repay_amt'], train_data['repay_date']
train_due_amt_data = train_data[['due_amt']]
train_num = train_data.shape[0]
# 对test集合的处理
test_data = pd.read_csv('../data/test.csv', parse_dates=['auditing_date', 'due_date'])
sub = test_data[['listing_id', 'auditing_date', 'due_amt', 'due_date']]
data = pd.concat([train_data, test_data], axis=0, ignore_index=True)
listing_info_data = pd.read_csv('../data/listing_info.csv')
del listing_info_data['user_id'], listing_info_data['auditing_date']
data = data.merge(listing_info_data, on='listing_id', how='left')
# 将user信息加入进来,表中有少数user不止一条记录,因此按日期排序,去重,只保留最新的一条记录。
user_info_data = pd.read_csv('../data/user_info.csv', parse_dates=['reg_mon', 'insertdate'])
user_info_data.rename(columns={'insertdate': 'info_insert_date'}, inplace=True)
user_info_data_1 = user_info_data.sort_values(by='info_insert_date', ascending=False).drop_duplicates('user_id').reset_index(drop=True)#按照插入日期降序排列,去重,只保留最新的一条
user_info_data_1['foreign_land']=np.where(user_info_data_1['cell_province']==user_info_data_1['id_province'],'n','y')
modifyInfoNum=user_info_data.groupby('user_id').count()['info_insert_date'].to_frame().rename(columns={'info_insert_date':'modify_info_num'})
user_info_data_2=pd.merge(user_info_data_1,modifyInfoNum,how='left',on='user_id')
# 将user信息中的年龄信息分桶
def map_age(s):
if s < 25:
return 'Young'
elif s>24 and s < 36:
return 'Middle1'
elif s>35 and s < 51:
return 'Middle2'
else:
return 'Old'
user_info_data_2['map_age']=user_info_data_2['age'].map(map_age)
data = data.merge(user_info_data_2, on='user_id', how='left')#将用户基础信息表合并到训练集之中
# 将用户画像标签列表信息加入,对于多于的数据,排序去重合并
user_tag_data = pd.read_csv('../data/user_taglist.csv', parse_dates=['insertdate'])
user_tag_data.rename(columns={'insertdate': 'tag_insert_date'}, inplace=True)
user_tag_data_1 = user_tag_data.sort_values(by='tag_insert_date', ascending=False).drop_duplicates('user_id').reset_index(drop=True)
modifyTagListNum = user_tag_data.groupby('user_id').count()['tag_insert_date'].to_frame().rename(columns={'tag_insert_date':'modify_taglist_num'})
user_tag_data_2=pd.merge(user_tag_data_1,modifyTagListNum,how='left',on='user_id')
data = data.merge(user_tag_data_2, on='user_id', how='left')
# 用户行为表
user_behavior_logs = | pd.read_csv('../data/user_behavior_logs.csv', parse_dates=['behavior_time']) | pandas.read_csv |
from ..parsers import get_parsing_function
import pandas as pd
from tqdm import tqdm
from ..DataStructures import reg_fixed_fileds
from ...settings import get_regions_batch_size
# Loading strategy
loading_strategy = "single_core"
batch = get_regions_batch_size()
def load_regions(collected_result):
# get the number of regions
# n_regions = collected_result.getNumberOfRegions()
# get the string delimiters
regions_delimiter = collected_result.REGIONS_DELIMITER()
values_delimiter = collected_result.VALUES_DELIMITER()
end_of_stream = collected_result.END_OF_STREAM()
# get how the strings are structured
names, types = get_schema(collected_result)
result = []
from ...settings import is_progress_enabled
if loading_strategy == 'single_core':
bar = tqdm(disable=not is_progress_enabled(), desc='Collecting regions')
region = collected_result.getRegionAsString(batch)
while region != end_of_stream:
region = region.split(regions_delimiter)
bar.update(len(region))
result.extend(map(lambda x: string_to_dictionary(x, values_delimiter, names, types), region))
region = collected_result.getRegionAsString(batch)
# # get the full string
# regions_string = collected_result.getRegionAsString(none())
# if regions_string:
# # convert to list of strings
# regions_string = regions_string.split(regions_delimiter)
# iterator = map(lambda x: string_to_dictionary(x, values_delimiter, names, types),
# tqdm(regions_string, disable=not is_progress_enabled()))
# result.extend(iterator)
# elif loading_strategy == 'multi_core':
# # number of divisions
# divisions = 10
# chunk_size = math.ceil(n_regions / divisions)
# p = pool.Pool(min(4, cpu_count()))
#
# std_partial = partial(string_to_dictionary, values_delimiter=values_delimiter,
# names=names, types=types)
#
# for _ in tqdm(range(divisions), disable=not is_progress_enabled()):
# # get the full string
# regions_string = collected_result.getRegionsAsString(Some(chunk_size))
# if regions_string:
# # convert to list of strings
# regions_string = regions_string.split(regions_delimiter)
#
# iterator = p.map(std_partial, regions_string)
# result.extend(iterator)
#
# p.close()
else:
raise ValueError("Unknown loading mode ({})".format(loading_strategy))
columns = reg_fixed_fileds + names
if len(result) > 0:
df = | pd.DataFrame.from_dict(result) | pandas.DataFrame.from_dict |
"""
Module for static data retrieval. These functions were performed once during the initial project creation. Resulting
data is now provided in bulk at the url above.
"""
import datetime
import json
from math import sin, cos, sqrt, atan2, radians
import re
import requests
import pandas as pd
from riverrunner import settings
from riverrunner.context import StationRiverDistance
from riverrunner.repository import Repository
def scrape_rivers_urls():
"""scrape river run data from Professor Paddle
generates URLs from the array of strings below. Each element represents a unique river. Each page is
requested with the entire HTML contents being saved to disk. The parsed river data is saved to 'data/rivers.csv'
"""
# copied from jquery selection in chrome dev tools on main prof paddle run table
river_links = pd.read_csv('riverrunner/data/static_river_urls.csv').columns.values
river_ids = [r[r.find("=")+1:] for r in river_links]
url = "http://www.professorpaddle.com/rivers/riverdetails.asp?riverid="
for id in river_ids:
r = requests.get(url + id)
if r.status_code == 200:
with open("river_%s.html" % id, 'w+') as f:
f.write(str(r.content))
rivers = []
for rid in river_ids:
with open('data/river_%s.html' % rid) as f:
river = f.readlines()
r = river[0]
row = {}
# title and river name
r = r[r.find('<font size="+2">'):]
run_name = r[r.find(">") + 1:r.find('<a')]
run_name = re.sub(r'<[^>]*>| ', ' ', run_name)
river_name = run_name[:run_name.find(' ')]
run_name = run_name[len(river_name):]
run_name = re.sub(r''', "'", run_name)
run_name = re.sub(r'—', "", run_name).strip()
row['run_name'] = re.sub(r'( )+', ' ', run_name)
row['river_name'] = river_name
# chunk off the class
r = r[r.find('Class'):]
rating = r[6:r.find('</strong>')]
row['class_rating'] = rating
# river length
r = r[r.find('<strong>')+8:]
length = r[:r.find("<")]
row['river_length'] = length
# zip code
r = r[r.find('Zip Code'):]
r = r[r.find('path')+6:]
row['zip'] = r[:r.find("<")]
# put in long
r = r[r.find("Put In Longitude"):]
r = r[r.find('path')+6:]
row['put_in_long'] = r[:r.find("<")]
# put in lat
r = r[r.find("Put In Latitude"):]
r = r[r.find('path')+6:]
row['put_in_lat'] = r[:r.find("<")]
# take out long
r = r[r.find("Take Out Longitude"):]
r = r[r.find('path')+6:]
row['take_out_long'] = r[:r.find("<")]
# take out lat
r = r[r.find("Take Out Latitude"):]
r = r[r.find('path')+6:]
row['take_out_lat'] = r[:r.find("<")]
# county
r = r[r.find("County"):]
r = r[r.find('path')+6:]
row['county'] = r[:r.find("<")]
# min level
r = r[r.find("Minimum Recomended Level"):]
r = r[r.find(" ")+6:]
row['min_level'] = r[:r.find("&")]
# min level units
r = r[r.find(';')+1:]
row['min_level_units'] = r[:r.find('&')]
# Maximum Recomended Level
r = r[r.find("Maximum Recomended Level"):]
r = r[r.find(" ")+6:]
row['max_level'] = r[:r.find("&")]
# max level units
r = r[r.find(';')+1:]
row['max_level_units'] = r[:r.find('&')]
row['id'] = rid
row['url'] = url + rid
rivers.append(row)
pd.DataFrame(rivers).to_csv('data/rivers.csv')
def parse_location_components(components, lat, lon):
"""parses location data from a Goggle address component list"""
location = {'latitude': lat, 'longitude': lon}
for component in components:
component_type = component['types']
if 'route' in component_type:
location['address'] = component['long_name']
elif 'locality' in component_type:
location['city'] = component['long_name']
elif 'administrative_area_level_2' in component_type:
location['route'] = re.sub(r'County', '', component['long_name'])
elif 'administrative_area_level_1' in component_type:
location['state'] = component['short_name']
elif 'postal_code' in component_type:
location['zip'] = component['long_name']
print(location)
return location
def parse_addresses_from_rivers():
"""parses river geolocation data and retrieves associated address information from Google geolocation services"""
df = pd.read_csv('data/rivers.csv').fillna('null')
addresses = []
# put in addresses
for name, group in df.groupby(['put_in_lat', 'put_in_long']):
if name[0] == 0 or name[1] == 0:
continue
r = requests.get('https://maps.googleapis.com/maps/api/geocode/json?latlng=%s,%s&key=%s' %
(name[0], name[1], settings.GEOLOCATION_API_KEY))
components = json.loads(r.content)['results'][0]['address_components']
addresses.append(parse_location_components(components, name[0], name[1]))
# take out addresses
for name, group in df.groupby(['take_out_lat', 'take_out_long']):
if name[0] == 0 or name[1] == 0:
continue
r = requests.get('https://maps.googleapis.com/maps/api/geocode/json?latlng=%s,%s&key=%s' %
(name[0], name[1], settings.GEOLOCATION_API_KEY))
if r.status_code == 200 and len(r.content) > 10:
components = json.loads(r.content)['results'][0]['address_components']
addresses.append(parse_location_components(components, name[0], name[1]))
pd.DataFrame(addresses).to_csv('data/addresses_takeout.csv', index=False)
def scrape_snowfall():
"""scrapes daily snowfall data from NOAA"""
base_url = 'https://www.ncdc.noaa.gov/snow-and-ice/daily-snow/WA-snow-depth-'
snowfall = []
for year in [2016, 2017, 2018]:
for month in range(1, 13):
for day in range(1, 32):
try:
date = '%s%02d%02d' % (year, month, day)
r = requests.get(base_url + date + '.json')
if r.status_code == 200 and len(r.content) > 0:
snf = json.loads(r.content)
for row in snf['rows']:
lat = row['c'][0]['v']
lon = row['c'][1]['v']
location_name = row['c'][2]['v'].strip().lower()
depth = row['c'][3]['v']
this_row = (datetime.datetime.strptime(str(date), '%Y%m%d').date(), lat, lon, location_name, depth)
snowfall.append(this_row)
print(this_row)
except Exception as e:
print([str(a) for a in e.args])
df = pd.DataFrame(snowfall)
df.columns = ['date', 'lat', 'lon', 'location_name', 'depth']
df.to_csv('data/snowfall.csv', index=None)
def parse_addresses_and_stations_from_snowfall():
"""iterate through snowfall geolocation data for associated station addresses"""
df = pd.read_csv('data/snowfall.csv')
addresses, stations = [], []
for name, group in df.groupby(['lat', 'lon']):
if name[0] == 0 or name[1] == 0:
continue
# parse address information
r = requests.get('https://maps.googleapis.com/maps/api/geocode/json?latlng=%s,%s&key=%s' %
(name[0], name[1], settings.GEOLOCATION_API_KEY))
components = json.loads(r.content)['results'][0]['address_components']
addresses.append(parse_location_components(components, name[0], name[1]))
# parse station information
station = dict()
name = pd.unique(group.location_name)[0]
station['station_id'] = name[name.find('(') + 1:-1].strip().lower()
parts = name[:name.find(',')].split(' ')
for i, s in enumerate(parts):
if s.isdigit() or s not in \
['N', 'NE', 'NNE', 'ENE', 'E', 'ESE', 'SSE',
'SE', 'S', 'SSW', 'SW', 'WSW', 'W', 'WNW', 'NW', 'NNW']:
parts[i] = s.title()
station['name'] = ' '.join(parts)
station['source'] = 'NOAA'
station['latitude'] = pd.unique(group.lat)[0]
station['longitude'] = pd.unique(group.lon)[0]
stations.append(station)
pd.DataFrame(addresses).to_csv('data/addresses_snowfall.csv', index=False)
pd.DataFrame(stations).to_csv('data/stations_snowfall.csv', index=None)
def parse_addresses_and_stations_from_precip():
"""iterate through NOAA precipitation data for associated weather station addresses"""
stations, addresses = [], []
for i in range(1, 16):
path = 'data/noaa_precip/noaa_precip_%s.csv' % i
df = pd.read_csv(path)
for name, group in df.groupby(['STATION_NAME']):
station = dict()
# parse the station
station['name'] = re.sub(r'(WA|US)', '', name).strip().title()
station['station_id'] = re.sub(r':', '', | pd.unique(group.STATION) | pandas.unique |
"""
Machine learning examples with SciPy and scikit-learn.
"""
from pandas import Categorical, DataFrame, Series
from scipy.cluster.hierarchy import fcluster, linkage
from sklearn import linear_model
class Classify:
"""
Train, use, and re-use an automatic classifier.
Input training data, then call with new data to return a Series.
Constructor inputs:
clues DataFrame: Training data with numeric columns.
answers Iterable: Known classes. Must align with clues.
model optional str: Name of an sklearn.linear_model.
**kwargs will be passed to the chosen model.
Call inputs:
clues DataFrame: Same columns as training 'clues'.
probs bool: Return a DataFrame of class probabilties?
Note: Some models cannot return class probabilities.
"""
def __init__(self, clues, answers, model="LogisticRegressionCV", **kwargs):
answers = Categorical(answers)
clues = DataFrame(clues)
model = getattr(linear_model, str(model))
self.cats = answers.categories.tolist()
self.columns = clues.columns.tolist()
self.model = model(**kwargs).fit(clues, answers.codes)
def __call__(self, clues, probs=False):
cats, model = self.cats, self.model
clues = DataFrame(clues)
if probs:
data = DataFrame(model.predict_proba(clues), columns=cats)
data.index = clues.index
else:
data = Categorical.from_codes(model.predict(clues), categories=cats)
data = | Series(data, index=clues.index, name="class") | pandas.Series |
# Copyright 2015 Novo Nordisk Foundation Center for Biosustainability, DTU.
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
# http://www.apache.org/licenses/LICENSE-2.0
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from __future__ import absolute_import, print_function
from itertools import combinations
from numpy import ndarray
from pandas import DataFrame, melt
from driven.data_sets.normalization import or2min_and2max
from driven.stats import freedman_diaconis
from driven.utils import get_common_start
from driven.vizualization.plotting import plotting
class ExpressionProfile(object):
"""
Representation of an Expression profile. It can be RNA-Seq, Proteomics, TNSeq or any other
profile that links genes/proteins to a value (continuous or discrete).
It the storage of single or multiple conditions as well as p-values.
Attributes
----------
identifiers: list
The gene or protein ids
conditions: list
The conditions in the expression profile (time points, media conditions, etc...)
expression: numpy.ndarray
An 2 dimensional array (nxm) where n is the number of genes and m the number of conditions.
p_values: numpy.ndarray
The p-values between conditions.
"""
@classmethod
def from_csv(cls, file_path, sep=",", replicas=None):
"""
Reads and expression profile from a Comma Separated Values (CSV) file.
Parameters
----------
file_path: str
The path to load.
sep: str
Default is ","
replicas: int
number of replicas. It uses the median of the replicas.
Returns
-------
ExpressionProfile
An expression profile built from the file.
"""
data = DataFrame.from_csv(file_path, sep=sep)
if replicas:
columns = data.columns
data = DataFrame([data[columns[i:i+replicas]].median(axis=1) for i in
range(0, len(columns), replicas)]).transpose()
data.columns = [get_common_start(*columns[i:i+replicas].tolist()) for i in
range(0, len(columns), replicas)]
return cls.from_data_frame(data)
@classmethod
def from_data_frame(cls, data_frame):
"""
Reads and expression profile from a pandas.DataFrame.
Parameters
----------
data_frame: pandas.DataFrame
A DataFrame containing the genes as index and the columns as conditions.
For more information about p-values see @ExpressionProfile.p_value_columns
Returns
-------
ExpressionProfile
An expression profile built from the DataFrame.
"""
columns = list(data_frame.columns)
conditions = [c for c in columns if "p-value" not in c]
p_value_keys = [c for c in columns if "p-value" in c]
if len(p_value_keys) > 0:
p_values = data_frame[p_value_keys].values
else:
p_values = None
expression = data_frame[conditions].values
identifiers = list(data_frame.index)
return ExpressionProfile(identifiers, conditions, expression, p_values)
def __init__(self, identifiers, conditions, expression, p_values=None):
assert isinstance(identifiers, list)
assert isinstance(conditions, list)
assert isinstance(expression, ndarray)
assert expression.shape == (len(identifiers), len(conditions))
self.conditions = conditions
self._condition_index = dict((c, i) for i, c in enumerate(conditions))
self.identifiers = identifiers
self._gene_index = dict((g, i) for i, g in enumerate(identifiers))
self.expression = expression
self._p_values = p_values
def __getitem__(self, item):
if not isinstance(item, tuple):
raise AttributeError(
"Non supported slicing method. E.g. profile[1,2] or profile[\"id1\", \"condition_a\"]")
if isinstance(item[0], str):
i = self._gene_index[item[0]]
elif isinstance(item[0], (slice, int)):
i = item[0]
else:
raise AttributeError(
"Non supported slicing value. E.g. profile[1,2] or profile[\"id1\", \"condition_a\"]")
if isinstance(item[1], str):
j = self._condition_index[item[1]]
elif isinstance(item[1], (slice, int)):
j = item[1]
else:
raise AttributeError(
"Non supported slicing method. E.g. profile[1,2] or profile[\"id1\", \"condition_a\"]")
return self.expression[i, j]
def __eq__(self, other):
if not isinstance(other, ExpressionProfile):
return False
else:
if self._p_values is None and other.p_values is None:
return self.identifiers == other.identifiers and \
self.conditions == other.conditions and \
self._p_values == other._p_values and \
(self.expression == other.expression).all()
else:
return self.identifiers == other.identifiers and \
self.conditions == other.conditions and \
(self._p_values == other._p_values).all() and \
(self.expression == other.expression).all()
def _repr_html_(self):
return self.data_frame._repr_html_()
@property
def data_frame(self):
"""
Builds a pandas.DataFrame from the ExpressionProfile.
Returns
-------
pandas.DataFrame
A DataFrame
"""
if self._p_values is None:
return DataFrame(self.expression,
index=self.identifiers,
columns=self.conditions)
else:
return DataFrame(self.expression+self.p_values, index=self.identifiers,
columns=self.conditions+self.p_value_columns)
@property
def p_value_columns(self):
"""
Generates the p-value column names. The p-values are between conditions.
Returns
-------
list
A list with p-value column headers.
"""
return ["%s %s p-value" % c for c in combinations(self.conditions, 2)]
@property
def p_values(self):
return self._p_values
@p_values.setter
def p_values(self, p_values):
assert isinstance(p_values, (ndarray, type(None)))
if p_values is not None:
if p_values.shape[1] != len(self.p_value_columns):
raise ValueError("Argument p_values does not cover all conditions (expected %s)" % self.p_value_columns)
self._p_values = p_values
@p_values.deleter
def p_values(self):
self._p_values = None
def histogram(self, conditions=None, transform=None, filter=None, bins=None,
width=800, height=None, palette='Spectral'):
if conditions is None:
conditions = self.conditions
data = melt(self.data_frame[conditions], var_name='condition')
if filter:
data = data.query(filter)
if transform:
data['value'] = data['value'].apply(transform)
hist = plotting.histogram(data, values='value', groups='condition', bins=bins,
width=width, height=height, palette=palette,
title="Histogram of expression values", legend=True)
plotting.display(hist)
return hist
def scatter(self, condition1=None, condition2=None, transform=float, width=800, height=None, color="#AFDCEC"):
if len(self.conditions) <= 1:
raise AssertionError("Cannot build a scatter with only one condition")
if condition1 is None:
condition1 = self.conditions[0]
elif isinstance(condition1, int):
condition1 = self.conditions[condition1]
if condition2 is None:
condition2 = self.conditions[1]
elif isinstance(condition2, int):
condition2 = self.conditions[condition2]
if transform:
data = self.data_frame.applymap(transform)
else:
data = self.data_frame
data = data.reset_index()
scatter = plotting.scatter(data, x=condition1, y=condition2, width=width, height=height,
color=color, label='index',
title="Expression values %s vs. %s" % (condition1, condition2),
xaxis_label="Expression %s" % condition1,
yaxis_label="Expression %s" % condition2)
plotting.display(scatter)
return scatter
def heatmap(self, conditions=None, identifiers=None, transform=None, low="green", mid="yellow", high="blue",
width=800, height=None, id_map=None):
id_map = {} if id_map is None else id_map
identifiers = self.identifiers if identifiers is None else identifiers
conditions = self.conditions if conditions is None else conditions
data = self.data_frame[conditions]
data['y'] = [id_map.get(i, i) for i in identifiers]
data = | melt(data, id_vars=['y'], var_name='x') | pandas.melt |
import pandas as pd
import matplotlib.pyplot as plt
def plot_results_for_probability_changes():
df1 = pd.read_csv("base.csv")
df2 = pd.read_csv("base_pc_100_pm_80.csv")
df3 = pd.read_csv("base_pc_80_pm_5.csv")
df_iterations = pd.DataFrame({
"90%% crossover, 40%% mutação": df1["iterations"],
"100%% crossover, 80%% mutação": df2["iterations"],
"80%% crossover, 5%% mutação": df3["iterations"]
})
df_avg_fitness = pd.DataFrame({
"90%% crossover, 40%% mutação": df1["average fitness"],
"100%% crossover, 80%% mutação": df2["average fitness"],
"80%% crossover, 5%% mutação": df3["average fitness"]
})
df_iterations.boxplot()
plt.show()
df_avg_fitness.boxplot()
plt.show()
def plot_results_for_pop_size_changes():
df1 = pd.read_csv("base_pc_100_pm_80_pop_20.csv")
df2 = pd.read_csv("base_pc_100_pm_80_pop_50.csv")
df3 = pd.read_csv("base_pc_100_pm_80.csv")
df4 = pd.read_csv("base_pc_100_pm_80_pop_200.csv")
df_iterations = pd.DataFrame({
"20 indivíduos": df1["iterations"],
"50 indivíduos": df2["iterations"],
"100 indivíduos": df3["iterations"],
"200 indivíduos": df4["iterations"]
})
df_avg_fitness = pd.DataFrame({
"20 indivíduos": df1["average fitness"],
"50 indivíduos": df2["average fitness"],
"100 indivíduos": df3["average fitness"],
"200 indivíduos": df4["average fitness"]
})
df_iterations.boxplot()
plt.show()
df_avg_fitness.boxplot()
plt.show()
def plot_results_for_crossover_changes():
df1 = pd.read_csv("base_pc_100_pm_80_pop_200.csv")
df2 = | pd.read_csv("pmx_pc_100_pm_80_pop_200.csv") | pandas.read_csv |
import pandas as pd
from datetime import date
from pandas.core.indexes import category
import config as config
from sklearn.preprocessing import MinMaxScaler, RobustScaler, StandardScaler, MaxAbsScaler
from main_table import MainInsert
class AlgoInsert:
def __init__(self):
self.category = config.Config.CATEGORY
self.naver = config.Config.NAVER
self.kakao = config.Config.KAKAO
self.camp=config.Config.CAMP
self.weights=config.Config.WEIGHTS
self.main_cat=config.Config.MAIN_CAT
# 태그 컬럼 전처리
def make_tag(self, camp_df):
camping_data = camp_df[['place_id', 'content_id', 'place_name', 'addr', 'tag', 'animal_cmg']]
camping_data['tag'] = camping_data['tag'].fillna("")
# 반려견 출입 가능 유무 컬럼으로 반려견 태그 만들기
camping_data["tag"][camping_data["animal_cmg"] == "가능"] = camping_data[camping_data["animal_cmg"] == "가능"]["tag"] + "#반려견"
camping_data["tag"][camping_data["animal_cmg"] == "가능(소형견)"] = camping_data[camping_data["animal_cmg"] == "가능(소형견)"]["tag"] + "#반려견"
# 태그 내에서 봄,여름,가을,겨울 제외
camping_data['tag'] = [t[:] if type(t) == str else "" for t in camping_data['tag']]
for kw in ['#봄 ', '#여름 ', '#가을', '#가을 ', '#겨울', '봄 ', '여름 ', '가을 ', '겨울',]:
camping_data['tag'] = [t.replace(kw, "") if type(t) == str else "" for t in camping_data['tag']]
return camping_data
# 소분류 one hot encoding
def subcat(self, camping_data):
camping_data["tag"] = camping_data["tag"].str.replace(" ", "")
subcat = camping_data["tag"].str.split("#").apply(pd.Series).loc[:, 1:]
sub_df = pd.get_dummies(subcat.stack()).reset_index().groupby("level_0").sum().drop("level_1", 1)
return sub_df
# 대분류 one hot encoding
def maincat(self, sub_df):
# 대분류 불러오기
lookup = pd.DataFrame(columns=["sub_cat", "main_cat"], data=self.category)
lookup['main_cat'] = lookup['main_cat'].str.replace(" ","")
main_df = pd.DataFrame()
for i in range(len(sub_df)):
main_df = pd.concat([pd.DataFrame(sub_df.values[i] * lookup["main_cat"].T), main_df], 1)
main_df = main_df.T.reset_index(drop=True)
main_df = pd.get_dummies(main_df.stack()).reset_index().groupby("level_0").sum().drop("level_1", 1)
main_df = main_df.iloc[:,1:]
main_df.index = sub_df.index
return main_df
# 소분류와 대분류 one hot encoding concat
def make_algo_search(self, camp_df):
camping_data = self.make_tag(camp_df)
sub_df = self.subcat(camping_data)
main_df = self.maincat(sub_df)
last_df = pd.concat([sub_df, main_df], 1)
last_df[last_df > 1] = 1
last_df['index']= last_df.index
algo_search_df = | pd.merge(camping_data, last_df, how="left", left_on = 'place_id', right_on='index') | pandas.merge |
import pandas as pd
import numpy as np
def frequency_encoding(df,feature):
map_dict=df[feature].value_counts().to_dict()
df[feature]=df[feature].map(map_dict)
def target_guided_encoding(df,feature,target):
order=df.groupby([feature])[target].mean().sort_values().index
map_dic={k:i for i,k in enumerate(order,0)}
df[feature]=df[feature].map(map_dic)
def mean_encoding(df,feature,target):
map_dict=df.groupby([feature])[target].mean().to_dict()
df[feature]=df[feature].map(map_dict)
def probability_ratio_encoding(df,feature,target):
order=df.groupby([feature])[target].mean()
prob_df= | pd.DataFrame(order) | pandas.DataFrame |
from pathlib import Path
import pandas as pd
import typer
from jinja2 import Environment, FileSystemLoader
from reki.data_finder import find_local_file
from reki_data_tool.postprocess.grid.gfs.ne.config import OUTPUT_DIRECTORY
from reki_data_tool.postprocess.grid.gfs.util import get_random_start_time, get_random_forecast_time
app = typer.Typer()
@app.command("serial")
def create_serial_task(
output_script_path: Path = typer.Option(Path(OUTPUT_DIRECTORY, "03-serial", "gfs_ne_grib2_serial_case_1.sh"))
):
start_time = get_random_start_time()
start_time_label = start_time.strftime("%Y%m%d%H")
forecast_time = get_random_forecast_time()
forecast_time_label = f"{int(forecast_time / pd.Timedelta(hours=1)):03}"
print(start_time_label, forecast_time_label)
output_directory = OUTPUT_DIRECTORY
output_file_path = Path(
output_directory,
f'ne_{start_time_label}_{forecast_time_label}.grb2'
)
file_loader = FileSystemLoader(Path(__file__).parent)
env = Environment(loader=file_loader)
template = env.get_template("slurm_job.sh")
job_params = dict(
job_name=output_script_path.stem,
is_parallel=False,
partition="serial",
model_path="reki_data_tool.postprocess.grid.gfs.ne",
options=f"""serial \\
--start-time={start_time_label} \\
--forecast-time={forecast_time_label}h \\
--output-file-path={output_file_path}"""
)
task_script_content = template.render(**job_params)
with open(output_script_path, "w") as f:
f.write(task_script_content)
return output_script_path
@app.command("dask-v1")
def create_dask_v1_task(
output_script_path: Path = typer.Option(Path(OUTPUT_DIRECTORY, "11-dask-v1", "dask_v1_case_1.sh")),
work_directory: Path = typer.Option(Path(OUTPUT_DIRECTORY)),
start_time: str = typer.Option(None),
forecast_time: str = typer.Option(None),
nodes: int = 1,
ntasks_per_node: int = 32,
partition: str = "normal"
):
if start_time is None:
start_time = get_random_start_time()
else:
start_time = | pd.to_datetime(start_time, format="%Y%m%d%H") | pandas.to_datetime |
# Copyright 2020 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from pathlib import Path
from typing import Any, Dict, List
import requests
from pandas import DataFrame, concat, isna
from lib.case_line import convert_cases_to_time_series
from lib.cast import safe_int_cast, numeric_code_as_string
from lib.pipeline import DataSource
from lib.time import datetime_isoformat
from lib.utils import table_rename
_IBGE_STATES = {
# Norte
"RO": 11,
"AC": 12,
"AM": 13,
"RR": 14,
"PA": 15,
"AP": 16,
"TO": 17,
# Nordeste
"MA": 21,
"PI": 22,
"CE": 23,
"RN": 24,
"PB": 25,
"PE": 26,
"AL": 27,
"SE": 28,
"BA": 29,
# Sudeste
"MG": 31,
"ES": 32,
"RJ": 33,
"SP": 35,
# Sul
"PR": 41,
"SC": 42,
"RS": 43,
# Centro-Oeste
"MS": 50,
"MT": 51,
"GO": 52,
"DF": 53,
}
class BrazilMunicipalitiesDataSource(DataSource):
def fetch(
self, output_folder: Path, cache: Dict[str, str], fetch_opts: List[Dict[str, Any]]
) -> Dict[str, str]:
# Get the URL from a fake browser request
url = requests.get(
"https://xx9p7hp1p7.execute-api.us-east-1.amazonaws.com/prod/PortalGeral",
headers={
"Accept": "application/json, text/plain, */*",
"Accept-Language": "en-GB,en;q=0.5",
"X-Parse-Application-Id": "unAFkcaNDeXajurGB7LChj8SgQYS2ptm",
"Origin": "https://covid.saude.gov.br",
"Connection": "keep-alive",
"Referer": "https://covid.saude.gov.br/",
"Pragma": "no-cache",
"Cache-Control": "no-cache",
"TE": "Trailers",
},
).json()["results"][0]["arquivo"]["url"]
# Pass the actual URL down to fetch it
return super().fetch(output_folder, cache, [{"url": url}])
def parse_dataframes(
self, dataframes: Dict[str, DataFrame], aux: Dict[str, DataFrame], **parse_opts
) -> DataFrame:
data = table_rename(
dataframes[0],
{
"data": "date",
"estado": "subregion1_code",
"codmun": "subregion2_code",
"municipio": "subregion2_name",
"casosNovos": "new_confirmed",
"obitosNovos": "new_deceased",
"casosAcumulado": "total_confirmed",
"obitosAcumulado": "total_deceased",
"Recuperadosnovos": "total_recovered",
},
drop=True,
)
# Convert date to ISO format
data["date"] = data["date"].astype(str)
# Parse region codes as strings
data["subregion2_code"] = data["subregion2_code"].apply(
lambda x: numeric_code_as_string(x, 6)
)
# Country-level data has null state
data["key"] = None
country_mask = data["subregion1_code"].isna()
data.loc[country_mask, "key"] = "BR"
# State-level data has null municipality
state_mask = data["subregion2_code"].isna()
data.loc[~country_mask & state_mask, "key"] = "BR_" + data["subregion1_code"]
# We can derive the key from subregion1 + subregion2
data.loc[~country_mask & ~state_mask, "key"] = (
"BR_" + data["subregion1_code"] + "_" + data["subregion2_code"]
)
# Drop bogus data
data = data[data["subregion2_code"].str.slice(-4) != "0000"]
return data
_column_adapter = {
"sexo": "sex",
"idade": "age",
"municipioIBGE": "subregion2_code",
"dataTeste": "date_new_tested",
"dataInicioSintomas": "_date_onset",
"estadoIBGE": "_state_code",
"evolucaoCaso": "_prognosis",
"dataEncerramento": "_date_update",
"resultadoTeste": "_test_result",
"classificacaoFinal": "_classification",
}
class BrazilStratifiedDataSource(DataSource):
def fetch(
self, output_folder: Path, cache: Dict[str, str], fetch_opts: List[Dict[str, Any]]
) -> Dict[str, str]:
# The source URL is a template which we must format for the requested state
parse_opts = self.config["parse"]
fetch_opts = [
{**opts, "url": opts["url"].format(parse_opts["subregion1_code"].lower())}
for opts in fetch_opts
]
return super().fetch(output_folder, cache, fetch_opts)
def parse(self, sources: Dict[str, str], aux: Dict[str, DataFrame], **parse_opts) -> DataFrame:
# Manipulate the parse options here because we have access to the columns adapter
parse_opts = {**parse_opts, "error_bad_lines": False, "usecols": _column_adapter.keys()}
return super().parse(sources, aux, **parse_opts)
def parse_dataframes(
self, dataframes: Dict[str, DataFrame], aux: Dict[str, DataFrame], **parse_opts
) -> DataFrame:
cases = table_rename(dataframes[0], _column_adapter, drop=True)
# Keep only cases for a single state
subregion1_code = parse_opts["subregion1_code"]
cases = cases[cases["_state_code"].apply(safe_int_cast) == _IBGE_STATES[subregion1_code]]
# Confirmed cases are only those with a confirmed positive test result
cases["date_new_confirmed"] = None
confirmed_mask = cases["_test_result"] == "Positivo"
cases.loc[confirmed_mask, "date_new_confirmed"] = cases.loc[
confirmed_mask, "date_new_tested"
]
# Deceased cases have a specific label and the date is the "closing" date
cases["date_new_deceased"] = None
deceased_mask = cases["_prognosis"] == "Óbito"
cases.loc[confirmed_mask, "date_new_deceased"] = cases.loc[deceased_mask, "_date_update"]
# Recovered cases have a specific label and the date is the "closing" date
cases["date_new_recovered"] = None
recovered_mask = cases["_prognosis"] == "Cured"
cases.loc[confirmed_mask, "date_new_recovered"] = cases.loc[recovered_mask, "_date_update"]
# Drop columns which we have no use for
cases = cases[[col for col in cases.columns if not col.startswith("_")]]
# Subregion code comes from the parsing parameters
cases["subregion1_code"] = subregion1_code
# Make sure our region code is of type str
cases["subregion2_code"] = cases["subregion2_code"].apply(safe_int_cast)
# The last digit of the region code is actually not necessary
cases["subregion2_code"] = cases["subregion2_code"].apply(
lambda x: None if | isna(x) | pandas.isna |
#파이썬으로 상관분석 회기분석테스트
import numpy as np
import pandas as pd
#csv 파일 읽어오기
hdr = ['V1','V2','V3','V4','V5','V6','V7','V8','V9']
df = | pd.read_csv('c:/java/phone-02.csv', header=None,names=hdr) | pandas.read_csv |
from caes import ICAES2
import pandas as pd
from joblib import Parallel, delayed, parallel_backend
import time
import os
from datetime import datetime
# =====================
# function to enable sensitivity analysis
# =====================
def sizing_and_sensitivity(wrkdir, xlsx_filename, sheet_name, capacity, duration, polytropic_index, float_perm,
int_perm, debug):
# create folder to store results
result_dir = os.path.join(wrkdir, sheet_name)
try:
os.stat(result_dir)
except:
os.mkdir(result_dir)
# -----------------------------
# prepare for sizing
# -----------------------------
entries = ['depth_m', 'thickness_m', 'porosity', 'capacity_MW', 'duration_hr', 'permeability_mD', 'n_cmp1',
'n_exp1']
user_input = pd.read_excel(xlsx_filename, sheet_name=sheet_name)
user_input = user_input.set_index('Variable')
s = pd.Series(index=entries)
s['sheet_name'] = sheet_name
s['depth_m'] = user_input.loc['depth', 'Baseline']
s['thickness_m'] = user_input.loc['h', 'Baseline']
s['porosity'] = user_input.loc['phi', 'Baseline']
s['capacity_MW'] = capacity
s['duration_hr'] = duration
s['permeability_mD'] = user_input.loc['k', 'Baseline']
s['r_w'] = user_input.loc['r_w', 'Baseline']
s['n_cmp1'] = polytropic_index
s['n_exp1'] = polytropic_index
# ------------------
# run sizing
# ------------------
sized_result = sizing(s, debug=False)
# save inputs
os.chdir(result_dir)
sized_result.to_csv('sizing_results.csv')
# ------------------
# prepare for sensitivity
# ------------------
os.chdir(wrkdir)
user_input = pd.read_excel(xlsx_filename, sheet_name=sheet_name)
os.chdir(result_dir)
# use results from sizing
m_dot = | pd.Series() | pandas.Series |
from datetime import (
datetime,
timedelta,
)
from importlib import reload
import string
import sys
import numpy as np
import pytest
from pandas._libs.tslibs import iNaT
import pandas.util._test_decorators as td
from pandas import (
NA,
Categorical,
CategoricalDtype,
Index,
Interval,
NaT,
Series,
Timedelta,
Timestamp,
cut,
date_range,
)
import pandas._testing as tm
class TestAstypeAPI:
def test_arg_for_errors_in_astype(self):
# see GH#14878
ser = Series([1, 2, 3])
msg = (
r"Expected value of kwarg 'errors' to be one of \['raise', "
r"'ignore'\]\. Supplied value is 'False'"
)
with pytest.raises(ValueError, match=msg):
ser.astype(np.float64, errors=False)
ser.astype(np.int8, errors="raise")
@pytest.mark.parametrize("dtype_class", [dict, Series])
def test_astype_dict_like(self, dtype_class):
# see GH#7271
ser = Series(range(0, 10, 2), name="abc")
dt1 = dtype_class({"abc": str})
result = ser.astype(dt1)
expected = Series(["0", "2", "4", "6", "8"], name="abc")
tm.assert_series_equal(result, expected)
dt2 = dtype_class({"abc": "float64"})
result = ser.astype(dt2)
expected = Series([0.0, 2.0, 4.0, 6.0, 8.0], dtype="float64", name="abc")
tm.assert_series_equal(result, expected)
dt3 = dtype_class({"abc": str, "def": str})
msg = (
"Only the Series name can be used for the key in Series dtype "
r"mappings\."
)
with pytest.raises(KeyError, match=msg):
ser.astype(dt3)
dt4 = dtype_class({0: str})
with pytest.raises(KeyError, match=msg):
ser.astype(dt4)
# GH#16717
# if dtypes provided is empty, it should error
if dtype_class is Series:
dt5 = dtype_class({}, dtype=object)
else:
dt5 = dtype_class({})
with pytest.raises(KeyError, match=msg):
ser.astype(dt5)
class TestAstype:
@pytest.mark.parametrize("dtype", np.typecodes["All"])
def test_astype_empty_constructor_equality(self, dtype):
# see GH#15524
if dtype not in (
"S",
"V", # poor support (if any) currently
"M",
"m", # Generic timestamps raise a ValueError. Already tested.
):
init_empty = Series([], dtype=dtype)
with tm.assert_produces_warning(DeprecationWarning):
as_type_empty = Series([]).astype(dtype)
tm.assert_series_equal(init_empty, as_type_empty)
@pytest.mark.parametrize("dtype", [str, np.str_])
@pytest.mark.parametrize(
"series",
[
Series([string.digits * 10, tm.rands(63), tm.rands(64), tm.rands(1000)]),
Series([string.digits * 10, tm.rands(63), tm.rands(64), np.nan, 1.0]),
],
)
def test_astype_str_map(self, dtype, series):
# see GH#4405
result = series.astype(dtype)
expected = series.map(str)
tm.assert_series_equal(result, expected)
def test_astype_float_to_period(self):
result = Series([np.nan]).astype("period[D]")
expected = Series([NaT], dtype="period[D]")
tm.assert_series_equal(result, expected)
def test_astype_no_pandas_dtype(self):
# https://github.com/pandas-dev/pandas/pull/24866
ser = Series([1, 2], dtype="int64")
# Don't have PandasDtype in the public API, so we use `.array.dtype`,
# which is a PandasDtype.
result = ser.astype(ser.array.dtype)
tm.assert_series_equal(result, ser)
@pytest.mark.parametrize("dtype", [np.datetime64, np.timedelta64])
def test_astype_generic_timestamp_no_frequency(self, dtype, request):
# see GH#15524, GH#15987
data = [1]
s = Series(data)
if np.dtype(dtype).name not in ["timedelta64", "datetime64"]:
mark = pytest.mark.xfail(reason="GH#33890 Is assigned ns unit")
request.node.add_marker(mark)
msg = (
fr"The '{dtype.__name__}' dtype has no unit\. "
fr"Please pass in '{dtype.__name__}\[ns\]' instead."
)
with pytest.raises(ValueError, match=msg):
s.astype(dtype)
def test_astype_dt64_to_str(self):
# GH#10442 : testing astype(str) is correct for Series/DatetimeIndex
dti = date_range("2012-01-01", periods=3)
result = Series(dti).astype(str)
expected = Series(["2012-01-01", "2012-01-02", "2012-01-03"], dtype=object)
tm.assert_series_equal(result, expected)
def test_astype_dt64tz_to_str(self):
# GH#10442 : testing astype(str) is correct for Series/DatetimeIndex
dti_tz = date_range("2012-01-01", periods=3, tz="US/Eastern")
result = Series(dti_tz).astype(str)
expected = Series(
[
"2012-01-01 00:00:00-05:00",
"2012-01-02 00:00:00-05:00",
"2012-01-03 00:00:00-05:00",
],
dtype=object,
)
tm.assert_series_equal(result, expected)
def test_astype_datetime(self):
s = Series(iNaT, dtype="M8[ns]", index=range(5))
s = s.astype("O")
assert s.dtype == np.object_
s = Series([datetime(2001, 1, 2, 0, 0)])
s = s.astype("O")
assert s.dtype == np.object_
s = Series([datetime(2001, 1, 2, 0, 0) for i in range(3)])
s[1] = np.nan
assert s.dtype == "M8[ns]"
s = s.astype("O")
assert s.dtype == np.object_
def test_astype_datetime64tz(self):
s = Series(date_range("20130101", periods=3, tz="US/Eastern"))
# astype
result = s.astype(object)
expected = Series(s.astype(object), dtype=object)
tm.assert_series_equal(result, expected)
result = Series(s.values).dt.tz_localize("UTC").dt.tz_convert(s.dt.tz)
tm.assert_series_equal(result, s)
# astype - object, preserves on construction
result = Series(s.astype(object))
expected = s.astype(object)
tm.assert_series_equal(result, expected)
# astype - datetime64[ns, tz]
with tm.assert_produces_warning(FutureWarning):
# dt64->dt64tz astype deprecated
result = Series(s.values).astype("datetime64[ns, US/Eastern]")
tm.assert_series_equal(result, s)
with tm.assert_produces_warning(FutureWarning):
# dt64->dt64tz astype deprecated
result = Series(s.values).astype(s.dtype)
tm.assert_series_equal(result, s)
result = s.astype("datetime64[ns, CET]")
expected = Series(date_range("20130101 06:00:00", periods=3, tz="CET"))
tm.assert_series_equal(result, expected)
def test_astype_str_cast_dt64(self):
# see GH#9757
ts = Series([Timestamp("2010-01-04 00:00:00")])
s = ts.astype(str)
expected = Series(["2010-01-04"])
tm.assert_series_equal(s, expected)
ts = Series([Timestamp("2010-01-04 00:00:00", tz="US/Eastern")])
s = ts.astype(str)
expected = | Series(["2010-01-04 00:00:00-05:00"]) | pandas.Series |
import re
import time
import requests
import pandas as pd
from bs4 import BeautifulSoup
class stackScrape(object):
def __init__(self):
pass
def extractDataFromUrl(self, url):
'''
Returns the scraped data from the target URL in raw format (HTML), which can be stackoverflow or stackexchange
Paramaters
----------
url: url of the website to be scraped (parameter passed by scrape_data function)
Returns
-------
a JSON with the raw format (HTML) of all the page (BeautifulSoup object)
'''
# server request
response = requests.get(url)
# Read the request, as a HTML text
html = response.text
# String -> Soup (special data structure of information)
soup = BeautifulSoup(html, 'html.parser')
# Extracts the desired information from the website and passes it on to json
data = stackScrape.parseTaggedPage(self, soup)
return data
def multiplyViews(self, text):
'''
checks for the existence of an order of magnitude (e.g. 10k), converts this string to a value by multiplying\n by the respective order of magnitude value
Paramaters
----------
text: given to be treated, parameter passed by the clean_scraped_data function
Returns
-------
returns an integer
'''
# multiplier ratio by order of magnitude
pattern = {
'k': 1000,
'm': 1000000
}
# multiplication of the value by the order of magnitude
try:
mult = int(re.search('(\d+)', text).group(1)) * pattern[re.search('([A-Za-z]+)', text).group(1)]
# exception for smaller data which is the smallest order of magnitude
except AttributeError:
mult = int(re.search('(\d+)', text).group(1))
return mult
def cleanScrapedData(self, text , keyname=None):
'''
performs data transformations as trim of the data if necessary and calls another function to handle values
Paramaters
----------
text: given to be treated, parameter passed by the parse_tagged_page function
keyname: refers to which encapsulation function should be performed by means of a key-value
- default=None
Returns
-------
returns the data with the proper transformation
'''
# encapsulated treatments by means of a key
transforms = {
'votes': text.replace('\nvote', '').strip('s'),
'answer': text.replace('answer', '').strip('s'),
'views': text.replace('views', '')
}
# application of the treatment
if keyname in transforms.keys():
# application of excision treatment
if keyname == 'views':
# transformation of the data into an integer and multiplies it by the order of magnitude
trasnf = stackScrape.multiplyViews(self, transforms[keyname])
return trasnf
return transforms[keyname]
# data that do not need treatment
else:
return text
def getId(self, q):
'''
returns the clean question id on page
Paramaters
----------
q: cleaning target, this parameter is passed by parse_tagged_page function
Returns
-------
string with the clean question id
'''
# cleaning pattern
pattern = 'href="/questions/([^"]+)[\/]'
# applies the established pattern to extract the id
q_id = re.search(pattern,q).group(1)
return q_id
def parseTaggedPage(self, soup):
'''
Returns the scraped data from the target URL, which can be stackoverflow or stackexchange
Paramaters
----------
soup: a JSON with the raw format (HTML) of all the page (BeautifulSoup object), this parameter is passed by extract_data_from_url function
Returns
-------
a JSON with the 'Question', 'Number of Votes', 'question-related tags', 'number of responses' and 'number of views' data\n\t\t from the records of one page
'''
# css target class
questionSummaries = soup.select('.question-summary')
# list of names to be assigned to data
keyNames = ['question', 'votes', 'tags', 'answer', 'views']
# css target class subclasses
classesNeeded = ['.question-hyperlink', '.vote', '.tags', '.status', '.views']
datas = []
# loop in each data extracted through the target css class
for q_el in questionSummaries:
questionData = {}
# loop in each enumerate subclass listed above
for i, _class in enumerate(classesNeeded):
# obtain the value of each subclass
sub_el = q_el.select(_class)[0]
# obtains the value of each subclass to generate the dictionary key
keyname = keyNames[i]
# attach the given treaty with its respective key to the dictionary
questionData[keyname] = stackScrape.cleanScrapedData(self, sub_el.text.strip(), keyname=keyname)
# subclass that receives a treatment of exception to the others
if _class == '.question-hyperlink':
# get the id of each question
questionData['id'] = stackScrape.getId(self, str(sub_el))
# attach the complete dictionary of each question to the empty list created
datas.append(questionData)
return datas
def scrapeData(self, base_url, tag, query_filter, max_pages, pagesize):
'''
Itera on all selected pages by rotating the function to extract the data from each page and gather it in a json
Paramaters
----------
base_url: url path to all question filter by a tag
- stackexchange: https://stats.stackexchange.com/questions/tagged/
- stackoverflow: https://stackoverflow.com/questions/tagged/
tag: tag to be filtered (e.g.: 'python', 'r', 'javascript', ...)
query_filter: filter to perform a query ('Newest', 'Active', 'Bounties', 'Unanswered', 'Frequent', Votes')
max_pages: the maximum number of pages to be scraped
pagesize: the number of records per page (the maximum number is 50)
Returns
-------
a DataFrame with the 'Question', 'Number of Votes', 'question-related tags', 'number of responses' and 'number of views' data\n\t\t from the records of all selected pages
'''
datas = list()
# loop on each page of the given range
for p in range(max_pages):
# adjustment of the iteration variable
page_num = p + 1
# target page url formation
url = f'{base_url}{tag}?tab={query_filter}&page={page_num}&pagesize={pagesize}'
# url data extraction
datas += stackScrape.extractDataFromUrl(self, url)
# time control
time.sleep(0.5)
# DataFrame
dfStack = pd.DataFrame(datas, columns=datas[0].keys())
# DataFrame convert dtypes (int)
dfStack['votes'] = dfStack['votes'].astype(int)
dfStack['answer'] = dfStack['answer'].astype(int)
dfStack['views'] = dfStack['views'].astype(int)
return dfStack
def TagsStack(self, base_url, tag, query_filter, max_pages, pagesize):
'''
returns a dataframe with the top 15 tags ranked by the ratio of Views by Incidence\n\t\t
among the most incident tags in the issues recorded in stackoverflow or stackexchange.\n\t\t
The purpose of this dataframe is to use in other stages of the Project, such as, for example,\n\t\t
the youtube data scraping parameter
Paramaters
----------
base_url: url path to all question filter by a tag
- stackexchange: https://stats.stackexchange.com/questions/tagged/
- stackoverflow: https://stackoverflow.com/questions/tagged/
tag: tag to be filtered (e.g.: 'python', 'r', 'javascript', ...)
query_filter: filter to perform a query ('Newest', 'Active', 'Bounties', 'Unanswered', 'Frequent', Votes')
max_pages: the maximum number of pages to be scraped
pagesize: the number of records per page (the maximum number is 50)
Returns
-------
a DataFrame with the 'Tag', 'Incidence', 'Votes', 'Answer' and 'ViewsPerIncidence' data\n\t\t of the top 15 tags () ranked by the ratio of Views by Incidence\n\t\t
among the most incident tags in stackoverflow or stackexchange issues
'''
# Empty Dictinaries
bagOfWords = {}
bagOfWordsVotes = {}
bagOfWordsAnswers = {}
bagOfWordsViews = {}
# DataFrame with all the question that was scraped
df = stackScrape.scrapeData(self, base_url, tag, query_filter, max_pages, pagesize)
count = 0
# loop on each row of the DataFrame with the questions
for row in df['tags'].apply(lambda row: row.split()):
# loop on each tag of the set of tags in each row of the DataFrame
for i in row:
# Incidence of each tag
bagOfWords[i] = bagOfWords.get(i, 0) + 1
# sum total of votes per tag
bagOfWordsVotes[i] = bagOfWordsVotes.get(i, 0) + df.loc[count, 'votes']
# sum total of answers per tag
bagOfWordsAnswers[i] = bagOfWordsAnswers.get(i, 0) + df.loc[count, 'answer']
# sum total of views per tag
bagOfWordsViews[i] = bagOfWordsViews.get(i, 0) + df.loc[count, 'views']
count += 1
# Merge All the dictionaries into one DataFrame
DfTags = pd.Series(bagOfWords).to_frame().rename(columns={0:'Incidence'}).reset_index()
DfTags = pd.merge(DfTags, pd.Series(bagOfWordsVotes).to_frame().rename(columns={0:'Votes'}).reset_index(), how='left', left_on='index', right_on='index')
DfTags = pd.merge(DfTags, pd.Series(bagOfWordsAnswers).to_frame().rename(columns={0:'Answer'}).reset_index(), how='left', left_on='index', right_on='index')
DfTags = pd.merge(DfTags, | pd.Series(bagOfWordsViews) | pandas.Series |
import csv
import json
import multiprocessing
import os
import queue
import subprocess
import warnings
from datetime import datetime, timedelta
from glob import glob
from time import time
import joblib
import numpy as np
import pandas as pd
import psutil
# import wfdb
from sklearn.model_selection import train_test_split
from tqdm import tqdm
from xgboost import XGBClassifier
from driver import load_challenge_data
from neurokit2_parallel import (
ECG_LEAD_NAMES,
KEYS_INTERVALRELATED,
KEYS_TSFRESH,
wfdb_record_to_feature_dataframe,
)
from util import parse_fc_parameters
from util.elapsed_timer import ElapsedTimer
from util.evaluate_12ECG_score import is_number, load_table
from util.evaluation_helper import evaluate_score_batch
from util.log import configure_logging
from util.raw_to_wfdb import convert_to_wfdb_record
def _get_fieldnames():
field_names = ["header_file", "age", "sex"]
for lead_name in ECG_LEAD_NAMES:
for key in KEYS_INTERVALRELATED:
field_names.append(f"{lead_name}_{key}")
for key in KEYS_TSFRESH:
hb_key = f"hb__{key}"
field_names.append(f"{lead_name}_{hb_key}")
for key in KEYS_TSFRESH:
sig_key = f"sig__{key}"
field_names.append(f"{lead_name}_{sig_key}")
return field_names
def feat_extract_process(
input_queue: multiprocessing.JoinableQueue,
output_queue: multiprocessing.JoinableQueue,
fc_parameters: [None, dict],
):
while True:
try:
header_file_path = input_queue.get(True, 1)
input_queue.task_done()
except queue.Empty:
# When the input queue is empty, worker process terminates
# NOTE: queue.Empty may be raised even in input_queue contains values
# parent process should respawn new workers in this edge case
break
# for some reason, OS FileError (Too many files) is raised...
# r = wfdb.rdrecord(header_file_path.rsplit(".hea")[0])
with warnings.catch_warnings():
warnings.simplefilter("ignore")
mat_fp = header_file_path.replace(".hea", ".mat")
data, header_data = load_challenge_data(mat_fp)
r = convert_to_wfdb_record(data, header_data)
record_features, dx = wfdb_record_to_feature_dataframe(
r, fc_parameters=fc_parameters
)
# turn dataframe record_features into dict flatten out the values (one key to one row)
ecg_features = dict(
(k, v[0]) for (k, v) in record_features.to_dict().items()
)
output_queue.put((header_file_path, ecg_features, dx))
def train_12ECG_classifier(
input_directory,
output_directory,
labels_fp="dxs.txt",
features_fp="features.csv",
weights_file="evaluation-2020/weights.csv",
early_stopping_rounds=20,
experiments_to_run=1, # 1 for challenge, 100 for paper
evaluation_size=0, # 0 for challenge, 0.15 for paper
limit_features_to=1000,
):
logger = configure_logging()
labels_fp = os.path.join(output_directory, labels_fp)
features_fp = os.path.join(output_directory, features_fp)
fieldnames = _get_fieldnames()
fc_parameters = None
# HARD CODE IN THE IMPORTANCES RANK!
importance_data = None
importances_fp = os.path.join("importances_rank.json")
if os.path.exists(importances_fp):
logger.info(f"Loading importances from '{importances_fp}'")
with open(importances_fp) as importancesfile:
importance_data = json.load(importancesfile)
# update the fieldnames to be the important features
logger.info(
f"Limiting classification to top {limit_features_to} important features!"
)
important_fields = importance_data["sorted_keys"][:limit_features_to]
fc_parameters = parse_fc_parameters(important_fields)
fieldnames = ["header_file",] + sorted(important_fields)
else:
logger.info(
"No importances_rank.json found, generating full feature set (VERY SLOW)."
)
logger.info(f"Loading feature extraction result from '{labels_fp}'...")
# check how many files have been processed already, allows feature extraction to be resumable
label_mapped_records = []
if os.path.isfile(labels_fp):
with open(labels_fp, mode="r", newline="\n") as labelfile:
for line in labelfile.readlines():
header_file_path, _ = json.loads(line)
label_mapped_records.append(header_file_path)
logger.info(f"Loaded {len(label_mapped_records)} from prior run.")
else:
logger.info("No labels file found.")
with open(labels_fp, mode="w"):
# initialize the file
pass
logger.info(f"Loading feature extraction result from '{features_fp}'...")
feature_mapped_records = []
if os.path.isfile(features_fp):
# get fieldnames of existing records
with open(features_fp, "r", newline="\n") as csvfile:
reader = csv.reader(csvfile)
fieldnames = next(reader)
with open(features_fp, "r", newline="\n") as csvfile:
reader = csv.DictReader(csvfile, fieldnames=fieldnames)
next(reader) # ignore header
with tqdm(reader) as t:
for row in t:
feature_mapped_records.append(row["header_file"])
else:
logger.info("No features file found.")
logger.info(f"Discovering ECG input files in '{input_directory}'...")
process_header_files = tuple(
hfp
for hfp in glob(os.path.join(input_directory, "**/*.hea"), recursive=True)
if hfp not in label_mapped_records or hfp not in feature_mapped_records
)
del label_mapped_records
del feature_mapped_records
logger.info(
"Number of ECG records remain to process: %d", len(process_header_files)
)
# Setup & populate input queue, then initialize output queue
input_queue = multiprocessing.JoinableQueue()
for header_file in process_header_files:
input_queue.put_nowait(header_file)
output_queue = multiprocessing.JoinableQueue()
# calculate CPUs used for feature extraction
num_cpus = len(os.sched_getaffinity(0))
logger.info("Number of available CPUs: %d", num_cpus)
total_ram_bytes = psutil.virtual_memory().total
total_ram_GiB = total_ram_bytes / (1024 ** 3)
ram_bottleneck_cpus = max(int(total_ram_GiB / 2.3), 1)
logger.info(f"Available virtual memory: {total_ram_GiB} GiB")
# quick test for GPUs used, allow no GPU classifier training
try:
num_gpus = str(subprocess.check_output(["nvidia-smi", "-L"])).count("UUID")
except Exception:
num_gpus = 0
logger.info(f"Detected {num_gpus} gpus.")
if ram_bottleneck_cpus < num_cpus:
logger.info(
f"Each proccess takes ~2.3 GiB, capping to {ram_bottleneck_cpus} processes"
)
num_cpus = ram_bottleneck_cpus
num_feature_extractor_procs = max(num_cpus, 1)
feature_extractor_procs = []
killed_extractor_procs = []
for _ in range(num_feature_extractor_procs):
p = multiprocessing.Process(
target=feat_extract_process, args=(input_queue, output_queue, fc_parameters)
)
p.start()
feature_extractor_procs.append(p)
# main process used for concatenating features
processed_files_counter = 0
out_start = datetime.now()
out_log = None
avg_records_per_sec = 0
# initialize the header if the file does not exist
if not os.path.isfile(features_fp):
with open(features_fp, "w", newline="\n") as csvfile:
writer = csv.DictWriter(csvfile, fieldnames=fieldnames)
writer.writeheader()
with open(features_fp, "a", newline="\n") as csvfile:
writer = csv.DictWriter(csvfile, fieldnames=fieldnames)
with open(labels_fp, "a") as labelfile:
while True:
try:
header_file_path, f_dict, dxs = output_queue.get(True, 0.1)
labelfile.write(json.dumps((header_file_path, dxs)) + "\n")
labelfile.flush()
f_dict["header_file"] = header_file_path
writer.writerow(f_dict)
output_queue.task_done()
processed_files_counter += 1
except queue.Empty:
# When the output queue is empty and all workers are terminated
# all files have been processed
if input_queue.empty() and all(
not p.is_alive() for p in feature_extractor_procs
):
# input queue is empty and all children processes have exited
break
elif not input_queue.empty():
# input queue is not empty, restart stopped workers
num_feature_extractor_procs = len(feature_extractor_procs)
for fe_proc_idx in range(num_feature_extractor_procs):
p = feature_extractor_procs[fe_proc_idx]
if p in killed_extractor_procs:
continue
if not p.is_alive():
disp_str = (
f"{p.pid} (exitcode: {p.exitcode}) is not alive "
f"while input queue contains {input_queue.qsize()} tasks! "
"Restarting..."
)
logger.info(disp_str)
p.join()
killed_extractor_procs.append(p)
p_new = multiprocessing.Process(
target=feat_extract_process,
args=(input_queue, output_queue),
)
p_new.start()
feature_extractor_procs.append(p_new)
finally:
out_cur = datetime.now()
if out_log is None or out_cur - out_log > timedelta(seconds=5):
start_delta = out_cur - out_start
remaining_time, avg_records_per_sec = _eta_calculate(
start_delta,
processed_files_counter,
len(process_header_files),
avg_records_per_sec,
)
logger.info(
f"Processed {processed_files_counter}/{len(process_header_files)} in {start_delta} (est {remaining_time} remain)"
)
out_log = out_cur
out_cur = datetime.now()
start_delta = out_cur - out_start
logger.info(
f"Finished processing {processed_files_counter}/{len(process_header_files)} in {start_delta}"
)
# Close the queues
input_queue.close()
input_queue.join_thread()
output_queue.close()
output_queue.join_thread()
# print(input_queue.qsize(), output_queue.qsize(), processed_files_counter)
# load the data
logger.info(f"Loading record label mapping from '{labels_fp}'")
mapped_records = {}
with open(labels_fp, mode="r", newline="\n") as labelfile:
for line in labelfile.readlines():
header_file_path, dxs = json.loads(line)
mapped_records[header_file_path] = dxs
logger.info(f"Loading features_df from '{features_fp}'")
features_df = pd.read_csv(
features_fp, header=0, names=fieldnames, index_col="header_file"
)
logger.info("Constructing labels array...")
labels = [mapped_records[row[0]] for row in features_df.itertuples()]
# logger.info("Dropping 'header_file' column from features_df")
# features_df.reset_index(drop=True, inplace=True) # is necessary?
# Load the SNOMED CT code mapping table
with open("data/snomed_ct_dx_map.json", "r") as f:
SNOMED_CODE_MAP = json.load(f)
logger.info("Loading scoring function weights")
rows, cols, all_weights = load_table(weights_file)
assert rows == cols, "rows and cols mismatch"
scored_codes = rows
for experiment_num in range(experiments_to_run):
with ElapsedTimer() as timer:
logger.info(f"Running experiment #{experiment_num}")
logger.info(
f"Splitting data into training and evaluation split ({evaluation_size})"
)
if evaluation_size > 0:
(
train_features,
eval_features,
train_labels,
eval_labels,
) = train_test_split(features_df, labels, test_size=evaluation_size)
else:
train_features = features_df
train_labels = labels
eval_features = | pd.DataFrame({}) | pandas.DataFrame |
import warnings
from copy import deepcopy
from typing import Dict
from typing import List
from typing import Optional
from typing import Union
import numpy as np
import pandas as pd
from sklearn.base import TransformerMixin
from etna.core import StringEnumWithRepr
from etna.transforms.base import Transform
from etna.transforms.utils import match_target_quantiles
class TransformMode(StringEnumWithRepr):
"""Enum for different metric aggregation modes."""
macro = "macro"
per_segment = "per-segment"
class SklearnTransform(Transform):
"""Base class for different sklearn transforms."""
def __init__(
self,
in_column: Optional[Union[str, List[str]]],
out_column: Optional[str],
transformer: TransformerMixin,
inplace: bool = True,
mode: Union[TransformMode, str] = "per-segment",
):
"""
Init SklearnTransform.
Parameters
----------
in_column:
columns to be transformed, if None - all columns will be transformed.
transformer:
sklearn.base.TransformerMixin instance.
inplace:
features are changed by transformed.
out_column:
base for the names of generated columns, uses self.__repr__() if not given.
mode:
"macro" or "per-segment", way to transform features over segments.
If "macro", transforms features globally, gluing the corresponding ones for all segments.
If "per-segment", transforms features for each segment separately.
Raises
------
ValueError:
if incorrect mode given
"""
if inplace and (out_column is not None):
warnings.warn("Transformation will be applied inplace, out_column param will be ignored")
self.transformer = transformer
if isinstance(in_column, str):
in_column = [in_column]
self.in_column = in_column if in_column is None else sorted(in_column)
self.inplace = inplace
self.mode = TransformMode(mode)
self.out_column = out_column
self.out_columns: Optional[List[str]] = None
def _get_column_name(self, in_column: str) -> str:
if self.out_column is None:
new_transform = deepcopy(self)
new_transform.in_column = [in_column]
return f"{new_transform.__repr__()}"
else:
return f"{self.out_column}_{in_column}"
def fit(self, df: pd.DataFrame) -> "SklearnTransform":
"""
Fit transformer with data from df.
Parameters
----------
df:
DataFrame to fit transformer.
Returns
-------
self
"""
segments = sorted(set(df.columns.get_level_values("segment")))
if self.in_column is None:
self.in_column = sorted(set(df.columns.get_level_values("feature")))
if self.inplace:
self.out_columns = self.in_column
else:
self.out_columns = [self._get_column_name(column) for column in self.in_column]
if self.mode == TransformMode.per_segment:
x = df.loc[:, (segments, self.in_column)].values
elif self.mode == TransformMode.macro:
x = self._reshape(df)
else:
raise ValueError(f"'{self.mode}' is not a valid TransformMode.")
self.transformer.fit(X=x)
return self
def transform(self, df: pd.DataFrame) -> pd.DataFrame:
"""
Transform given data with fitted transformer.
Parameters
----------
df:
DataFrame to transform with transformer.
Returns
-------
transformed DataFrame.
"""
segments = sorted(set(df.columns.get_level_values("segment")))
if self.mode == TransformMode.per_segment:
x = df.loc[:, (segments, self.in_column)].values
transformed = self.transformer.transform(X=x)
elif self.mode == TransformMode.macro:
x = self._reshape(df)
transformed = self.transformer.transform(X=x)
transformed = self._inverse_reshape(df, transformed)
else:
raise ValueError(f"'{self.mode}' is not a valid TransformMode.")
if self.inplace:
df.loc[:, (segments, self.in_column)] = transformed
else:
transformed_features = pd.DataFrame(
transformed, columns=df.loc[:, (segments, self.in_column)].columns, index=df.index
)
transformed_features.columns = pd.MultiIndex.from_product([segments, self.out_columns])
df = pd.concat((df, transformed_features), axis=1)
df = df.sort_index(axis=1)
return df
def inverse_transform(self, df: pd.DataFrame) -> pd.DataFrame:
"""
Apply inverse transformation to DataFrame.
Parameters
----------
df:
DataFrame to apply inverse transform.
Returns
-------
transformed DataFrame.
"""
segments = sorted(set(df.columns.get_level_values("segment")))
if self.in_column is None:
raise ValueError("Transform is not fitted yet.")
if "target" in self.in_column:
quantiles = match_target_quantiles(set(df.columns.get_level_values("feature")))
else:
quantiles = set()
if self.inplace:
quantiles_arrays: Dict[str, pd.DataFrame] = dict()
if self.mode == TransformMode.per_segment:
x = df.loc[:, (segments, self.in_column)].values
transformed = self.transformer.inverse_transform(X=x)
# quantiles inverse transformation
for quantile_column_nm in quantiles:
df_slice_copy = df.loc[:, (segments, self.in_column)].copy()
df_slice_copy.loc[:, (segments, "target")] = df.loc[:, (segments, quantile_column_nm)].values
df_slice_copy.loc[:, (segments, self.in_column)] = self.transformer.inverse_transform(
X=df_slice_copy
)
quantiles_arrays[quantile_column_nm] = df_slice_copy.loc[:, (segments, "target")].rename(
columns={"target": quantile_column_nm}
)
elif self.mode == TransformMode.macro:
x = self._reshape(df)
transformed = self.transformer.inverse_transform(X=x)
transformed = self._inverse_reshape(df, transformed)
# quantiles inverse transformation
for quantile_column_nm in quantiles:
df_slice_copy = df.loc[:, (segments, self.in_column)].copy()
df_slice_copy.loc[:, (segments, "target")] = df.loc[:, (segments, quantile_column_nm)].values
df_slice_copy_reshaped_array = self._reshape(df_slice_copy)
transformed_ = self.transformer.inverse_transform(X=df_slice_copy_reshaped_array)
df_slice_copy.loc[:, (segments, self.in_column)] = self._inverse_reshape(
df_slice_copy, transformed_
)
quantiles_arrays[quantile_column_nm] = df_slice_copy.loc[:, (segments, "target")].rename(
columns={"target": quantile_column_nm}
)
else:
raise ValueError(f"'{self.mode}' is not a valid TransformMode.")
df.loc[:, (segments, self.in_column)] = transformed
for quantile_column_nm in quantiles:
df.loc[:, (segments, quantile_column_nm)] = quantiles_arrays[quantile_column_nm].values
return df
def _reshape(self, df: pd.DataFrame) -> np.ndarray:
segments = sorted(set(df.columns.get_level_values("segment")))
x = df.loc[:, (segments, self.in_column)]
x = | pd.concat([x[segment] for segment in segments]) | pandas.concat |
"""Functions for transofrmation of films and books datasets.
Functions
---------
get_books_ratings - transform books dataset
get_films_ratings - transform films dataset
generate_datasets - generate films and books datasets
"""
from typing import Set
import pandas as pd
from pathlib import Path
from os import mkdir, path
BOOKS_LOCATION = 'raw_data/books.csv'
FILMS_LOCATIONS = ['raw_data/title.basics.tsv',
'raw_data/title.ratings.tsv']
BOOKS_COLS = {'original_title': 'title', 'ratings_count': 'num_votes'}
FILMS_COLS = {'originalTitle': 'title', 'startYear': 'year',
'averageRating': 'average_rating', 'numVotes': 'num_votes'}
def get_books_ratings(location: str) -> pd.DataFrame:
"""
Read data from books rating dataset, select
<NAME>' books and remove unnecessary data.
:param location: location of the dataset
:return: transformed data
>>> get_books_ratings(BOOKS_LOCATION)
title average_rating num_votes
0 The Time Machine 7.74 276076
1 The War of the Worlds 7.60 159752
2 The Invisible Man 7.24 84778
3 The Island of Dr. Moreau 7.44 60346
"""
dataframe = pd.read_csv(location, low_memory=False)
dataframe = dataframe.loc[(dataframe['authors'].str.contains(
'<NAME>')) & (~dataframe['language_code'].isnull())]
# transform rating from 0-5 to 0-10 system
dataframe['average_rating'] *= 2
# only keep columns with title, rating and ratings count
dataframe = dataframe.loc[:, ['original_title', 'average_rating',
'ratings_count']].reset_index(drop=True)
# rename columns
dataframe.rename(columns=BOOKS_COLS, inplace=True)
return dataframe
def get_films_ratings(location_1: str, location_2: str, books: Set[str]) -> pd.DataFrame:
"""
Read and transform data from two film datasets, only
selecting corresponding films for the given set of books.
:param location_1: location of film titles dataset
:param location_1: location of film ratings dataset
:param books: a set of books to select films to
:return: a dataframe with films ratings
>>> get_films_ratings(*FILMS_LOCATIONS, {'The Time Machine', 'The Island of Dr. Moreau',\
'The Invisible Man', 'The War of the Worlds'})
title year average_rating num_votes
0 The Invisible Man 1933 7.7 30172
1 The War of the Worlds 1953 7.1 32429
2 The Time Machine 1960 7.6 35786
3 The Island of Dr. Moreau 1977 5.9 5677
4 The Island of Dr. Moreau 1996 4.6 30894
5 The Time Machine 2002 6.0 117796
6 The Invisible Man 2020 7.1 152154
7 The Invisible Man 2017 3.3 168
"""
# read title basics data
df_basics = | pd.read_csv(location_1, sep='\t', low_memory=False) | pandas.read_csv |
import streamlit as st
import pandas as pd
import numpy as np
import plotly.graph_objects as go
from sklearn.preprocessing import StandardScaler
from sklearn.decomposition import PCA
from MulticoreTSNE import MulticoreTSNE as TSNE
import umap
from sklearn.cluster import KMeans
SEED = 100
# @st.cache is speedup option
@st.cache
def standardize(df):
X = df.values
X_std = StandardScaler().fit_transform(X)
return pd.DataFrame(X_std, columns=df.columns)
@st.cache
def do_pca(df):
pca = PCA(n_components=3)
embedding = pca.fit_transform(df.values)
return pd.DataFrame(embedding)
@st.cache
def do_umap(df):
umap_ = umap.UMAP(n_components=3)
embedding = umap_.fit_transform(df.values)
return pd.DataFrame(embedding)
@st.cache
def do_tsne(df):
tsne = TSNE(n_components=3)
embedding = tsne.fit_transform(df.values)
return pd.DataFrame(embedding)
def reduce_dimension(df, method='PCA'):
if method == 'PCA':
df = do_pca(df)
elif method == 'UMAP':
df = do_umap(df)
elif method == 't-SNE':
df = do_tsne(df)
return df
# @st.cache
def do_kmeans(df, num_c):
kmeans = KMeans(n_clusters=num_c, random_state=SEED)
clusters = kmeans.fit(df.values)
return clusters.labels_
def clustering(df, method='k-means', num_c=4):
if method == 'k-means':
labels = do_kmeans(df, num_c)
return labels
def main():
st.title('csv visualizer')
link = '[use example csv](https://raw.githubusercontent.com/root4kaido/csv_visualize_app/main/heart.csv)'
st.sidebar.markdown(link, unsafe_allow_html=True)
uploaded_file = st.sidebar.file_uploader("csv file upload", type='csv')
if uploaded_file is not None:
df_raw = | pd.read_csv(uploaded_file) | pandas.read_csv |
import types
from functools import wraps
import numpy as np
import datetime
import collections
from pandas.compat import(
zip, builtins, range, long, lzip,
OrderedDict, callable
)
from pandas import compat
from pandas.core.base import PandasObject
from pandas.core.categorical import Categorical
from pandas.core.frame import DataFrame
from pandas.core.generic import NDFrame
from pandas.core.index import Index, MultiIndex, _ensure_index, _union_indexes
from pandas.core.internals import BlockManager, make_block
from pandas.core.series import Series
from pandas.core.panel import Panel
from pandas.util.decorators import cache_readonly, Appender
import pandas.core.algorithms as algos
import pandas.core.common as com
from pandas.core.common import(_possibly_downcast_to_dtype, isnull,
notnull, _DATELIKE_DTYPES, is_numeric_dtype,
is_timedelta64_dtype, is_datetime64_dtype,
is_categorical_dtype, _values_from_object)
from pandas.core.config import option_context
from pandas import _np_version_under1p7
import pandas.lib as lib
from pandas.lib import Timestamp
import pandas.tslib as tslib
import pandas.algos as _algos
import pandas.hashtable as _hash
_agg_doc = """Aggregate using input function or dict of {column -> function}
Parameters
----------
arg : function or dict
Function to use for aggregating groups. If a function, must either
work when passed a DataFrame or when passed to DataFrame.apply. If
passed a dict, the keys must be DataFrame column names.
Notes
-----
Numpy functions mean/median/prod/sum/std/var are special cased so the
default behavior is applying the function along axis=0
(e.g., np.mean(arr_2d, axis=0)) as opposed to
mimicking the default Numpy behavior (e.g., np.mean(arr_2d)).
Returns
-------
aggregated : DataFrame
"""
# special case to prevent duplicate plots when catching exceptions when
# forwarding methods from NDFrames
_plotting_methods = frozenset(['plot', 'boxplot', 'hist'])
_common_apply_whitelist = frozenset([
'last', 'first',
'head', 'tail', 'median',
'mean', 'sum', 'min', 'max',
'cumsum', 'cumprod', 'cummin', 'cummax', 'cumcount',
'resample',
'describe',
'rank', 'quantile', 'count',
'fillna',
'mad',
'any', 'all',
'irow', 'take',
'idxmax', 'idxmin',
'shift', 'tshift',
'ffill', 'bfill',
'pct_change', 'skew',
'corr', 'cov', 'diff',
]) | _plotting_methods
_series_apply_whitelist = \
(_common_apply_whitelist - set(['boxplot'])) | \
frozenset(['dtype', 'value_counts', 'unique', 'nunique',
'nlargest', 'nsmallest'])
_dataframe_apply_whitelist = \
_common_apply_whitelist | frozenset(['dtypes', 'corrwith'])
class GroupByError(Exception):
pass
class DataError(GroupByError):
pass
class SpecificationError(GroupByError):
pass
def _groupby_function(name, alias, npfunc, numeric_only=True,
_convert=False):
def f(self):
self._set_selection_from_grouper()
try:
return self._cython_agg_general(alias, numeric_only=numeric_only)
except AssertionError as e:
raise SpecificationError(str(e))
except Exception:
result = self.aggregate(lambda x: npfunc(x, axis=self.axis))
if _convert:
result = result.convert_objects()
return result
f.__doc__ = "Compute %s of group values" % name
f.__name__ = name
return f
def _first_compat(x, axis=0):
def _first(x):
x = np.asarray(x)
x = x[notnull(x)]
if len(x) == 0:
return np.nan
return x[0]
if isinstance(x, DataFrame):
return x.apply(_first, axis=axis)
else:
return _first(x)
def _last_compat(x, axis=0):
def _last(x):
x = np.asarray(x)
x = x[notnull(x)]
if len(x) == 0:
return np.nan
return x[-1]
if isinstance(x, DataFrame):
return x.apply(_last, axis=axis)
else:
return _last(x)
def _count_compat(x, axis=0):
try:
return x.size
except:
return x.count()
class Grouper(object):
"""
A Grouper allows the user to specify a groupby instruction for a target object
This specification will select a column via the key parameter, or if the level and/or
axis parameters are given, a level of the index of the target object.
These are local specifications and will override 'global' settings, that is the parameters
axis and level which are passed to the groupby itself.
Parameters
----------
key : string, defaults to None
groupby key, which selects the grouping column of the target
level : name/number, defaults to None
the level for the target index
freq : string / freqency object, defaults to None
This will groupby the specified frequency if the target selection (via key or level) is
a datetime-like object
axis : number/name of the axis, defaults to None
sort : boolean, default to False
whether to sort the resulting labels
additional kwargs to control time-like groupers (when freq is passed)
closed : closed end of interval; left or right
label : interval boundary to use for labeling; left or right
convention : {'start', 'end', 'e', 's'}
If grouper is PeriodIndex
Returns
-------
A specification for a groupby instruction
Examples
--------
>>> df.groupby(Grouper(key='A')) : syntatic sugar for df.groupby('A')
>>> df.groupby(Grouper(key='date',freq='60s')) : specify a resample on the column 'date'
>>> df.groupby(Grouper(level='date',freq='60s',axis=1)) :
specify a resample on the level 'date' on the columns axis with a frequency of 60s
"""
def __new__(cls, *args, **kwargs):
if kwargs.get('freq') is not None:
from pandas.tseries.resample import TimeGrouper
cls = TimeGrouper
return super(Grouper, cls).__new__(cls)
def __init__(self, key=None, level=None, freq=None, axis=None, sort=False):
self.key=key
self.level=level
self.freq=freq
self.axis=axis
self.sort=sort
self.grouper=None
self.obj=None
self.indexer=None
self.binner=None
self.grouper=None
@property
def ax(self):
return self.grouper
def _get_grouper(self, obj):
"""
Parameters
----------
obj : the subject object
Returns
-------
a tuple of binner, grouper, obj (possibly sorted)
"""
self._set_grouper(obj)
return self.binner, self.grouper, self.obj
def _set_grouper(self, obj, sort=False):
"""
given an object and the specifcations, setup the internal grouper for this particular specification
Parameters
----------
obj : the subject object
"""
if self.key is not None and self.level is not None:
raise ValueError("The Grouper cannot specify both a key and a level!")
# the key must be a valid info item
if self.key is not None:
key = self.key
if key not in obj._info_axis:
raise KeyError("The grouper name {0} is not found".format(key))
ax = Index(obj[key],name=key)
else:
ax = obj._get_axis(self.axis)
if self.level is not None:
level = self.level
# if a level is given it must be a mi level or
# equivalent to the axis name
if isinstance(ax, MultiIndex):
if isinstance(level, compat.string_types):
if obj.index.name != level:
raise ValueError('level name %s is not the name of the '
'index' % level)
elif level > 0:
raise ValueError('level > 0 only valid with MultiIndex')
ax = Index(ax.get_level_values(level), name=level)
else:
if not (level == 0 or level == ax.name):
raise ValueError("The grouper level {0} is not valid".format(level))
# possibly sort
if (self.sort or sort) and not ax.is_monotonic:
indexer = self.indexer = ax.argsort(kind='quicksort')
ax = ax.take(indexer)
obj = obj.take(indexer, axis=self.axis, convert=False, is_copy=False)
self.obj = obj
self.grouper = ax
return self.grouper
def _get_binner_for_grouping(self, obj):
raise NotImplementedError
@property
def groups(self):
return self.grouper.groups
class GroupBy(PandasObject):
"""
Class for grouping and aggregating relational data. See aggregate,
transform, and apply functions on this object.
It's easiest to use obj.groupby(...) to use GroupBy, but you can also do:
::
grouped = groupby(obj, ...)
Parameters
----------
obj : pandas object
axis : int, default 0
level : int, default None
Level of MultiIndex
groupings : list of Grouping objects
Most users should ignore this
exclusions : array-like, optional
List of columns to exclude
name : string
Most users should ignore this
Notes
-----
After grouping, see aggregate, apply, and transform functions. Here are
some other brief notes about usage. When grouping by multiple groups, the
result index will be a MultiIndex (hierarchical) by default.
Iteration produces (key, group) tuples, i.e. chunking the data by group. So
you can write code like:
::
grouped = obj.groupby(keys, axis=axis)
for key, group in grouped:
# do something with the data
Function calls on GroupBy, if not specially implemented, "dispatch" to the
grouped data. So if you group a DataFrame and wish to invoke the std()
method on each group, you can simply do:
::
df.groupby(mapper).std()
rather than
::
df.groupby(mapper).aggregate(np.std)
You can pass arguments to these "wrapped" functions, too.
See the online documentation for full exposition on these topics and much
more
Returns
-------
**Attributes**
groups : dict
{group name -> group labels}
len(grouped) : int
Number of groups
"""
_apply_whitelist = _common_apply_whitelist
_internal_names = ['_cache']
_internal_names_set = set(_internal_names)
_group_selection = None
def __init__(self, obj, keys=None, axis=0, level=None,
grouper=None, exclusions=None, selection=None, as_index=True,
sort=True, group_keys=True, squeeze=False):
self._selection = selection
if isinstance(obj, NDFrame):
obj._consolidate_inplace()
self.level = level
if not as_index:
if not isinstance(obj, DataFrame):
raise TypeError('as_index=False only valid with DataFrame')
if axis != 0:
raise ValueError('as_index=False only valid for axis=0')
self.as_index = as_index
self.keys = keys
self.sort = sort
self.group_keys = group_keys
self.squeeze = squeeze
if grouper is None:
grouper, exclusions, obj = _get_grouper(obj, keys, axis=axis,
level=level, sort=sort)
self.obj = obj
self.axis = obj._get_axis_number(axis)
self.grouper = grouper
self.exclusions = set(exclusions) if exclusions else set()
def __len__(self):
return len(self.indices)
def __unicode__(self):
# TODO: Better unicode/repr for GroupBy object
return object.__repr__(self)
@property
def groups(self):
""" dict {group name -> group labels} """
return self.grouper.groups
@property
def ngroups(self):
return self.grouper.ngroups
@property
def indices(self):
""" dict {group name -> group indices} """
return self.grouper.indices
def _get_index(self, name):
""" safe get index, translate keys for datelike to underlying repr """
def convert(key, s):
# possibly convert to they actual key types
# in the indices, could be a Timestamp or a np.datetime64
if isinstance(s, (Timestamp,datetime.datetime)):
return Timestamp(key)
elif isinstance(s, np.datetime64):
return Timestamp(key).asm8
return key
sample = next(iter(self.indices))
if isinstance(sample, tuple):
if not isinstance(name, tuple):
raise ValueError("must supply a tuple to get_group with multiple grouping keys")
if not len(name) == len(sample):
raise ValueError("must supply a a same-length tuple to get_group with multiple grouping keys")
name = tuple([ convert(n, k) for n, k in zip(name,sample) ])
else:
name = convert(name, sample)
return self.indices[name]
@property
def name(self):
if self._selection is None:
return None # 'result'
else:
return self._selection
@property
def _selection_list(self):
if not isinstance(self._selection, (list, tuple, Series, Index, np.ndarray)):
return [self._selection]
return self._selection
@cache_readonly
def _selected_obj(self):
if self._selection is None or isinstance(self.obj, Series):
if self._group_selection is not None:
return self.obj[self._group_selection]
return self.obj
else:
return self.obj[self._selection]
def _set_selection_from_grouper(self):
""" we may need create a selection if we have non-level groupers """
grp = self.grouper
if self.as_index and getattr(grp,'groupings',None) is not None and self.obj.ndim > 1:
ax = self.obj._info_axis
groupers = [ g.name for g in grp.groupings if g.level is None and g.name is not None and g.name in ax ]
if len(groupers):
self._group_selection = (ax-Index(groupers)).tolist()
def _local_dir(self):
return sorted(set(self.obj._local_dir() + list(self._apply_whitelist)))
def __getattr__(self, attr):
if attr in self._internal_names_set:
return object.__getattribute__(self, attr)
if attr in self.obj:
return self[attr]
if hasattr(self.obj, attr):
return self._make_wrapper(attr)
raise AttributeError("%r object has no attribute %r" %
(type(self).__name__, attr))
def __getitem__(self, key):
raise NotImplementedError('Not implemented: %s' % key)
def _make_wrapper(self, name):
if name not in self._apply_whitelist:
is_callable = callable(getattr(self._selected_obj, name, None))
kind = ' callable ' if is_callable else ' '
msg = ("Cannot access{0}attribute {1!r} of {2!r} objects, try "
"using the 'apply' method".format(kind, name,
type(self).__name__))
raise AttributeError(msg)
# need to setup the selection
# as are not passed directly but in the grouper
self._set_selection_from_grouper()
f = getattr(self._selected_obj, name)
if not isinstance(f, types.MethodType):
return self.apply(lambda self: getattr(self, name))
f = getattr(type(self._selected_obj), name)
def wrapper(*args, **kwargs):
# a little trickery for aggregation functions that need an axis
# argument
kwargs_with_axis = kwargs.copy()
if 'axis' not in kwargs_with_axis:
kwargs_with_axis['axis'] = self.axis
def curried_with_axis(x):
return f(x, *args, **kwargs_with_axis)
def curried(x):
return f(x, *args, **kwargs)
# preserve the name so we can detect it when calling plot methods,
# to avoid duplicates
curried.__name__ = curried_with_axis.__name__ = name
# special case otherwise extra plots are created when catching the
# exception below
if name in _plotting_methods:
return self.apply(curried)
try:
return self.apply(curried_with_axis)
except Exception:
try:
return self.apply(curried)
except Exception:
# related to : GH3688
# try item-by-item
# this can be called recursively, so need to raise ValueError if
# we don't have this method to indicated to aggregate to
# mark this column as an error
try:
return self._aggregate_item_by_item(name, *args, **kwargs)
except (AttributeError):
raise ValueError
return wrapper
def get_group(self, name, obj=None):
"""
Constructs NDFrame from group with provided name
Parameters
----------
name : object
the name of the group to get as a DataFrame
obj : NDFrame, default None
the NDFrame to take the DataFrame out of. If
it is None, the object groupby was called on will
be used
Returns
-------
group : type of obj
"""
if obj is None:
obj = self._selected_obj
inds = self._get_index(name)
return obj.take(inds, axis=self.axis, convert=False)
def __iter__(self):
"""
Groupby iterator
Returns
-------
Generator yielding sequence of (name, subsetted object)
for each group
"""
return self.grouper.get_iterator(self.obj, axis=self.axis)
def apply(self, func, *args, **kwargs):
"""
Apply function and combine results together in an intelligent way. The
split-apply-combine combination rules attempt to be as common sense
based as possible. For example:
case 1:
group DataFrame
apply aggregation function (f(chunk) -> Series)
yield DataFrame, with group axis having group labels
case 2:
group DataFrame
apply transform function ((f(chunk) -> DataFrame with same indexes)
yield DataFrame with resulting chunks glued together
case 3:
group Series
apply function with f(chunk) -> DataFrame
yield DataFrame with result of chunks glued together
Parameters
----------
func : function
Notes
-----
See online documentation for full exposition on how to use apply.
In the current implementation apply calls func twice on the
first group to decide whether it can take a fast or slow code
path. This can lead to unexpected behavior if func has
side-effects, as they will take effect twice for the first
group.
See also
--------
aggregate, transform
Returns
-------
applied : type depending on grouped object and function
"""
func = _intercept_function(func)
@wraps(func)
def f(g):
return func(g, *args, **kwargs)
# ignore SettingWithCopy here in case the user mutates
with option_context('mode.chained_assignment',None):
return self._python_apply_general(f)
def _python_apply_general(self, f):
keys, values, mutated = self.grouper.apply(f, self._selected_obj,
self.axis)
return self._wrap_applied_output(keys, values,
not_indexed_same=mutated)
def aggregate(self, func, *args, **kwargs):
raise NotImplementedError
@Appender(_agg_doc)
def agg(self, func, *args, **kwargs):
return self.aggregate(func, *args, **kwargs)
def _iterate_slices(self):
yield self.name, self._selected_obj
def transform(self, func, *args, **kwargs):
raise NotImplementedError
def mean(self):
"""
Compute mean of groups, excluding missing values
For multiple groupings, the result index will be a MultiIndex
"""
try:
return self._cython_agg_general('mean')
except GroupByError:
raise
except Exception: # pragma: no cover
self._set_selection_from_grouper()
f = lambda x: x.mean(axis=self.axis)
return self._python_agg_general(f)
def median(self):
"""
Compute median of groups, excluding missing values
For multiple groupings, the result index will be a MultiIndex
"""
try:
return self._cython_agg_general('median')
except GroupByError:
raise
except Exception: # pragma: no cover
self._set_selection_from_grouper()
def f(x):
if isinstance(x, np.ndarray):
x = Series(x)
return x.median(axis=self.axis)
return self._python_agg_general(f)
def std(self, ddof=1):
"""
Compute standard deviation of groups, excluding missing values
For multiple groupings, the result index will be a MultiIndex
"""
# todo, implement at cython level?
return np.sqrt(self.var(ddof=ddof))
def var(self, ddof=1):
"""
Compute variance of groups, excluding missing values
For multiple groupings, the result index will be a MultiIndex
"""
if ddof == 1:
return self._cython_agg_general('var')
else:
self._set_selection_from_grouper()
f = lambda x: x.var(ddof=ddof)
return self._python_agg_general(f)
def sem(self, ddof=1):
"""
Compute standard error of the mean of groups, excluding missing values
For multiple groupings, the result index will be a MultiIndex
"""
return self.std(ddof=ddof)/np.sqrt(self.count())
def size(self):
"""
Compute group sizes
"""
return self.grouper.size()
sum = _groupby_function('sum', 'add', np.sum)
prod = _groupby_function('prod', 'prod', np.prod)
min = _groupby_function('min', 'min', np.min, numeric_only=False)
max = _groupby_function('max', 'max', np.max, numeric_only=False)
first = _groupby_function('first', 'first', _first_compat,
numeric_only=False, _convert=True)
last = _groupby_function('last', 'last', _last_compat, numeric_only=False,
_convert=True)
_count = _groupby_function('_count', 'count', _count_compat,
numeric_only=False)
def count(self, axis=0):
return self._count().astype('int64')
def ohlc(self):
"""
Compute sum of values, excluding missing values
For multiple groupings, the result index will be a MultiIndex
"""
return self._apply_to_column_groupbys(
lambda x: x._cython_agg_general('ohlc'))
def nth(self, n, dropna=None):
"""
Take the nth row from each group.
If dropna, will not show nth non-null row, dropna is either
Truthy (if a Series) or 'all', 'any' (if a DataFrame); this is equivalent
to calling dropna(how=dropna) before the groupby.
Examples
--------
>>> df = DataFrame([[1, np.nan], [1, 4], [5, 6]], columns=['A', 'B'])
>>> g = df.groupby('A')
>>> g.nth(0)
A B
0 1 NaN
2 5 6
>>> g.nth(1)
A B
1 1 4
>>> g.nth(-1)
A B
1 1 4
2 5 6
>>> g.nth(0, dropna='any')
B
A
1 4
5 6
>>> g.nth(1, dropna='any') # NaNs denote group exhausted when using dropna
B
A
1 NaN
5 NaN
"""
self._set_selection_from_grouper()
if not dropna: # good choice
m = self.grouper._max_groupsize
if n >= m or n < -m:
return self._selected_obj.loc[[]]
rng = np.zeros(m, dtype=bool)
if n >= 0:
rng[n] = True
is_nth = self._cumcount_array(rng)
else:
rng[- n - 1] = True
is_nth = self._cumcount_array(rng, ascending=False)
result = self._selected_obj[is_nth]
# the result index
if self.as_index:
ax = self.obj._info_axis
names = self.grouper.names
if self.obj.ndim == 1:
# this is a pass-thru
pass
elif all([ n in ax for n in names ]):
result.index = Index(self.obj[names][is_nth].values.ravel()).set_names(names)
elif self._group_selection is not None:
result.index = self.obj._get_axis(self.axis)[is_nth]
result = result.sort_index()
return result
if (isinstance(self._selected_obj, DataFrame)
and dropna not in ['any', 'all']):
# Note: when agg-ing picker doesn't raise this, just returns NaN
raise ValueError("For a DataFrame groupby, dropna must be "
"either None, 'any' or 'all', "
"(was passed %s)." % (dropna),)
# old behaviour, but with all and any support for DataFrames.
# modified in GH 7559 to have better perf
max_len = n if n >= 0 else - 1 - n
dropped = self.obj.dropna(how=dropna, axis=self.axis)
# get a new grouper for our dropped obj
if self.keys is None and self.level is None:
# we don't have the grouper info available (e.g. we have selected out
# a column that is not in the current object)
axis = self.grouper.axis
grouper = axis[axis.isin(dropped.index)]
keys = self.grouper.names
else:
# create a grouper with the original parameters, but on the dropped object
grouper, _, _ = _get_grouper(dropped, key=self.keys, axis=self.axis,
level=self.level, sort=self.sort)
sizes = dropped.groupby(grouper).size()
result = dropped.groupby(grouper).nth(n)
mask = (sizes<max_len).values
# set the results which don't meet the criteria
if len(result) and mask.any():
result.loc[mask] = np.nan
# reset/reindex to the original groups
if len(self.obj) == len(dropped) or len(result) == len(self.grouper.result_index):
result.index = self.grouper.result_index
else:
result = result.reindex(self.grouper.result_index)
return result
def cumcount(self, **kwargs):
"""
Number each item in each group from 0 to the length of that group - 1.
Essentially this is equivalent to
>>> self.apply(lambda x: Series(np.arange(len(x)), x.index))
Parameters
----------
ascending : bool, default True
If False, number in reverse, from length of group - 1 to 0.
Example
-------
>>> df = pd.DataFrame([['a'], ['a'], ['a'], ['b'], ['b'], ['a']],
... columns=['A'])
>>> df
A
0 a
1 a
2 a
3 b
4 b
5 a
>>> df.groupby('A').cumcount()
0 0
1 1
2 2
3 0
4 1
5 3
dtype: int64
>>> df.groupby('A').cumcount(ascending=False)
0 3
1 2
2 1
3 1
4 0
5 0
dtype: int64
"""
self._set_selection_from_grouper()
ascending = kwargs.pop('ascending', True)
index = self._selected_obj.index
cumcounts = self._cumcount_array(ascending=ascending)
return Series(cumcounts, index)
def head(self, n=5):
"""
Returns first n rows of each group.
Essentially equivalent to ``.apply(lambda x: x.head(n))``,
except ignores as_index flag.
Example
-------
>>> df = DataFrame([[1, 2], [1, 4], [5, 6]],
columns=['A', 'B'])
>>> df.groupby('A', as_index=False).head(1)
A B
0 1 2
2 5 6
>>> df.groupby('A').head(1)
A B
0 1 2
2 5 6
"""
obj = self._selected_obj
in_head = self._cumcount_array() < n
head = obj[in_head]
return head
def tail(self, n=5):
"""
Returns last n rows of each group
Essentially equivalent to ``.apply(lambda x: x.tail(n))``,
except ignores as_index flag.
Example
-------
>>> df = DataFrame([[1, 2], [1, 4], [5, 6]],
columns=['A', 'B'])
>>> df.groupby('A', as_index=False).tail(1)
A B
0 1 2
2 5 6
>>> df.groupby('A').head(1)
A B
0 1 2
2 5 6
"""
obj = self._selected_obj
rng = np.arange(0, -self.grouper._max_groupsize, -1, dtype='int64')
in_tail = self._cumcount_array(rng, ascending=False) > -n
tail = obj[in_tail]
return tail
def _cumcount_array(self, arr=None, **kwargs):
"""
arr is where cumcount gets its values from
note: this is currently implementing sort=False (though the default is sort=True)
for groupby in general
"""
ascending = kwargs.pop('ascending', True)
if arr is None:
arr = np.arange(self.grouper._max_groupsize, dtype='int64')
len_index = len(self._selected_obj.index)
cumcounts = np.zeros(len_index, dtype=arr.dtype)
if not len_index:
return cumcounts
indices, values = [], []
for v in self.indices.values():
indices.append(v)
if ascending:
values.append(arr[:len(v)])
else:
values.append(arr[len(v)-1::-1])
indices = np.concatenate(indices)
values = np.concatenate(values)
cumcounts[indices] = values
return cumcounts
def _index_with_as_index(self, b):
"""
Take boolean mask of index to be returned from apply, if as_index=True
"""
# TODO perf, it feels like this should already be somewhere...
from itertools import chain
original = self._selected_obj.index
gp = self.grouper
levels = chain((gp.levels[i][gp.labels[i][b]]
for i in range(len(gp.groupings))),
(original.get_level_values(i)[b]
for i in range(original.nlevels)))
new = MultiIndex.from_arrays(list(levels))
new.names = gp.names + original.names
return new
def _try_cast(self, result, obj):
"""
try to cast the result to our obj original type,
we may have roundtripped thru object in the mean-time
"""
if obj.ndim > 1:
dtype = obj.values.dtype
else:
dtype = obj.dtype
if not np.isscalar(result):
result = _possibly_downcast_to_dtype(result, dtype)
return result
def _cython_agg_general(self, how, numeric_only=True):
output = {}
for name, obj in self._iterate_slices():
is_numeric = is_numeric_dtype(obj.dtype)
if numeric_only and not is_numeric:
continue
try:
result, names = self.grouper.aggregate(obj.values, how)
except AssertionError as e:
raise GroupByError(str(e))
output[name] = self._try_cast(result, obj)
if len(output) == 0:
raise DataError('No numeric types to aggregate')
return self._wrap_aggregated_output(output, names)
def _python_agg_general(self, func, *args, **kwargs):
func = _intercept_function(func)
f = lambda x: func(x, *args, **kwargs)
# iterate through "columns" ex exclusions to populate output dict
output = {}
for name, obj in self._iterate_slices():
try:
result, counts = self.grouper.agg_series(obj, f)
output[name] = self._try_cast(result, obj)
except TypeError:
continue
if len(output) == 0:
return self._python_apply_general(f)
if self.grouper._filter_empty_groups:
mask = counts.ravel() > 0
for name, result in compat.iteritems(output):
# since we are masking, make sure that we have a float object
values = result
if is_numeric_dtype(values.dtype):
values = com.ensure_float(values)
output[name] = self._try_cast(values[mask], result)
return self._wrap_aggregated_output(output)
def _wrap_applied_output(self, *args, **kwargs):
raise NotImplementedError
def _concat_objects(self, keys, values, not_indexed_same=False):
from pandas.tools.merge import concat
if not not_indexed_same:
result = concat(values, axis=self.axis)
ax = self._selected_obj._get_axis(self.axis)
if isinstance(result, Series):
result = result.reindex(ax)
else:
result = result.reindex_axis(ax, axis=self.axis)
elif self.group_keys:
if self.as_index:
# possible MI return case
group_keys = keys
group_levels = self.grouper.levels
group_names = self.grouper.names
result = concat(values, axis=self.axis, keys=group_keys,
levels=group_levels, names=group_names)
else:
# GH5610, returns a MI, with the first level being a
# range index
keys = list(range(len(values)))
result = concat(values, axis=self.axis, keys=keys)
else:
result = concat(values, axis=self.axis)
return result
def _apply_filter(self, indices, dropna):
if len(indices) == 0:
indices = []
else:
indices = np.sort(np.concatenate(indices))
if dropna:
filtered = self._selected_obj.take(indices)
else:
mask = np.empty(len(self._selected_obj.index), dtype=bool)
mask.fill(False)
mask[indices.astype(int)] = True
# mask fails to broadcast when passed to where; broadcast manually.
mask = np.tile(mask, list(self._selected_obj.shape[1:]) + [1]).T
filtered = self._selected_obj.where(mask) # Fill with NaNs.
return filtered
@Appender(GroupBy.__doc__)
def groupby(obj, by, **kwds):
if isinstance(obj, Series):
klass = SeriesGroupBy
elif isinstance(obj, DataFrame):
klass = DataFrameGroupBy
else: # pragma: no cover
raise TypeError('invalid type: %s' % type(obj))
return klass(obj, by, **kwds)
def _get_axes(group):
if isinstance(group, Series):
return [group.index]
else:
return group.axes
def _is_indexed_like(obj, axes):
if isinstance(obj, Series):
if len(axes) > 1:
return False
return obj.index.equals(axes[0])
elif isinstance(obj, DataFrame):
return obj.index.equals(axes[0])
return False
class BaseGrouper(object):
"""
This is an internal Grouper class, which actually holds the generated groups
"""
def __init__(self, axis, groupings, sort=True, group_keys=True):
self.axis = axis
self.groupings = groupings
self.sort = sort
self.group_keys = group_keys
self.compressed = True
@property
def shape(self):
return tuple(ping.ngroups for ping in self.groupings)
def __iter__(self):
return iter(self.indices)
@property
def nkeys(self):
return len(self.groupings)
def get_iterator(self, data, axis=0):
"""
Groupby iterator
Returns
-------
Generator yielding sequence of (name, subsetted object)
for each group
"""
splitter = self._get_splitter(data, axis=axis)
keys = self._get_group_keys()
for key, (i, group) in zip(keys, splitter):
yield key, group
def _get_splitter(self, data, axis=0):
comp_ids, _, ngroups = self.group_info
return get_splitter(data, comp_ids, ngroups, axis=axis)
def _get_group_keys(self):
if len(self.groupings) == 1:
return self.levels[0]
else:
comp_ids, _, ngroups = self.group_info
# provide "flattened" iterator for multi-group setting
mapper = _KeyMapper(comp_ids, ngroups, self.labels, self.levels)
return [mapper.get_key(i) for i in range(ngroups)]
def apply(self, f, data, axis=0):
mutated = False
splitter = self._get_splitter(data, axis=axis)
group_keys = self._get_group_keys()
# oh boy
f_name = com._get_callable_name(f)
if (f_name not in _plotting_methods and
hasattr(splitter, 'fast_apply') and axis == 0):
try:
values, mutated = splitter.fast_apply(f, group_keys)
return group_keys, values, mutated
except (lib.InvalidApply):
# we detect a mutation of some kind
# so take slow path
pass
except (Exception) as e:
# raise this error to the caller
pass
result_values = []
for key, (i, group) in zip(group_keys, splitter):
object.__setattr__(group, 'name', key)
# group might be modified
group_axes = _get_axes(group)
res = f(group)
if not _is_indexed_like(res, group_axes):
mutated = True
result_values.append(res)
return group_keys, result_values, mutated
@cache_readonly
def indices(self):
""" dict {group name -> group indices} """
if len(self.groupings) == 1:
return self.groupings[0].indices
else:
label_list = [ping.labels for ping in self.groupings]
keys = [_values_from_object(ping.group_index) for ping in self.groupings]
return _get_indices_dict(label_list, keys)
@property
def labels(self):
return [ping.labels for ping in self.groupings]
@property
def levels(self):
return [ping.group_index for ping in self.groupings]
@property
def names(self):
return [ping.name for ping in self.groupings]
def size(self):
"""
Compute group sizes
"""
# TODO: better impl
labels, _, ngroups = self.group_info
bin_counts = algos.value_counts(labels, sort=False)
bin_counts = bin_counts.reindex(np.arange(ngroups))
bin_counts.index = self.result_index
return bin_counts
@cache_readonly
def _max_groupsize(self):
'''
Compute size of largest group
'''
# For many items in each group this is much faster than
# self.size().max(), in worst case marginally slower
if self.indices:
return max(len(v) for v in self.indices.values())
else:
return 0
@cache_readonly
def groups(self):
""" dict {group name -> group labels} """
if len(self.groupings) == 1:
return self.groupings[0].groups
else:
to_groupby = lzip(*(ping.grouper for ping in self.groupings))
to_groupby = Index(to_groupby)
return self.axis.groupby(to_groupby.values)
@cache_readonly
def group_info(self):
comp_ids, obs_group_ids = self._get_compressed_labels()
ngroups = len(obs_group_ids)
comp_ids = com._ensure_int64(comp_ids)
return comp_ids, obs_group_ids, ngroups
def _get_compressed_labels(self):
all_labels = [ping.labels for ping in self.groupings]
if self._overflow_possible:
tups = lib.fast_zip(all_labels)
labs, uniques = algos.factorize(tups)
if self.sort:
uniques, labs = _reorder_by_uniques(uniques, labs)
return labs, uniques
else:
if len(all_labels) > 1:
group_index = get_group_index(all_labels, self.shape)
comp_ids, obs_group_ids = _compress_group_index(group_index)
else:
ping = self.groupings[0]
comp_ids = ping.labels
obs_group_ids = np.arange(len(ping.group_index))
self.compressed = False
self._filter_empty_groups = False
return comp_ids, obs_group_ids
@cache_readonly
def _overflow_possible(self):
return _int64_overflow_possible(self.shape)
@cache_readonly
def ngroups(self):
return len(self.result_index)
@cache_readonly
def result_index(self):
recons = self.get_group_levels()
return MultiIndex.from_arrays(recons, names=self.names)
def get_group_levels(self):
obs_ids = self.group_info[1]
if not self.compressed and len(self.groupings) == 1:
return [self.groupings[0].group_index]
if self._overflow_possible:
recons_labels = [np.array(x) for x in zip(*obs_ids)]
else:
recons_labels = decons_group_index(obs_ids, self.shape)
name_list = []
for ping, labels in zip(self.groupings, recons_labels):
labels = com._ensure_platform_int(labels)
levels = ping.group_index.take(labels)
name_list.append(levels)
return name_list
#------------------------------------------------------------
# Aggregation functions
_cython_functions = {
'add': 'group_add',
'prod': 'group_prod',
'min': 'group_min',
'max': 'group_max',
'mean': 'group_mean',
'median': {
'name': 'group_median'
},
'var': 'group_var',
'first': {
'name': 'group_nth',
'f': lambda func, a, b, c, d: func(a, b, c, d, 1)
},
'last': 'group_last',
'count': 'group_count',
}
_cython_arity = {
'ohlc': 4, # OHLC
}
_name_functions = {}
_filter_empty_groups = True
def _get_aggregate_function(self, how, values):
dtype_str = values.dtype.name
def get_func(fname):
# find the function, or use the object function, or return a
# generic
for dt in [dtype_str, 'object']:
f = getattr(_algos, "%s_%s" % (fname, dtype_str), None)
if f is not None:
return f
return getattr(_algos, fname, None)
ftype = self._cython_functions[how]
if isinstance(ftype, dict):
func = afunc = get_func(ftype['name'])
# a sub-function
f = ftype.get('f')
if f is not None:
def wrapper(*args, **kwargs):
return f(afunc, *args, **kwargs)
# need to curry our sub-function
func = wrapper
else:
func = get_func(ftype)
if func is None:
raise NotImplementedError("function is not implemented for this"
"dtype: [how->%s,dtype->%s]" %
(how, dtype_str))
return func, dtype_str
def aggregate(self, values, how, axis=0):
arity = self._cython_arity.get(how, 1)
vdim = values.ndim
swapped = False
if vdim == 1:
values = values[:, None]
out_shape = (self.ngroups, arity)
else:
if axis > 0:
swapped = True
values = values.swapaxes(0, axis)
if arity > 1:
raise NotImplementedError
out_shape = (self.ngroups,) + values.shape[1:]
if is_numeric_dtype(values.dtype):
values = com.ensure_float(values)
is_numeric = True
out_dtype = 'f%d' % values.dtype.itemsize
else:
is_numeric = issubclass(values.dtype.type, (np.datetime64,
np.timedelta64))
if is_numeric:
out_dtype = 'float64'
values = values.view('int64')
else:
out_dtype = 'object'
values = values.astype(object)
# will be filled in Cython function
result = np.empty(out_shape, dtype=out_dtype)
result.fill(np.nan)
counts = np.zeros(self.ngroups, dtype=np.int64)
result = self._aggregate(result, counts, values, how, is_numeric)
if self._filter_empty_groups:
if result.ndim == 2:
try:
result = lib.row_bool_subset(
result, (counts > 0).view(np.uint8))
except ValueError:
result = lib.row_bool_subset_object(
result, (counts > 0).view(np.uint8))
else:
result = result[counts > 0]
if vdim == 1 and arity == 1:
result = result[:, 0]
if how in self._name_functions:
# TODO
names = self._name_functions[how]()
else:
names = None
if swapped:
result = result.swapaxes(0, axis)
return result, names
def _aggregate(self, result, counts, values, how, is_numeric):
agg_func, dtype = self._get_aggregate_function(how, values)
comp_ids, _, ngroups = self.group_info
if values.ndim > 3:
# punting for now
raise NotImplementedError
elif values.ndim > 2:
for i, chunk in enumerate(values.transpose(2, 0, 1)):
chunk = chunk.squeeze()
agg_func(result[:, :, i], counts, chunk, comp_ids)
else:
agg_func(result, counts, values, comp_ids)
return result
def agg_series(self, obj, func):
try:
return self._aggregate_series_fast(obj, func)
except Exception:
return self._aggregate_series_pure_python(obj, func)
def _aggregate_series_fast(self, obj, func):
func = _intercept_function(func)
if obj.index._has_complex_internals:
raise TypeError('Incompatible index for Cython grouper')
group_index, _, ngroups = self.group_info
# avoids object / Series creation overhead
dummy = obj._get_values(slice(None, 0)).to_dense()
indexer = _algos.groupsort_indexer(group_index, ngroups)[0]
obj = obj.take(indexer, convert=False)
group_index = com.take_nd(group_index, indexer, allow_fill=False)
grouper = lib.SeriesGrouper(obj, func, group_index, ngroups,
dummy)
result, counts = grouper.get_result()
return result, counts
def _aggregate_series_pure_python(self, obj, func):
group_index, _, ngroups = self.group_info
counts = np.zeros(ngroups, dtype=int)
result = None
splitter = get_splitter(obj, group_index, ngroups, axis=self.axis)
for label, group in splitter:
res = func(group)
if result is None:
if (isinstance(res, (Series, Index, np.ndarray)) or
isinstance(res, list)):
raise ValueError('Function does not reduce')
result = np.empty(ngroups, dtype='O')
counts[label] = group.shape[0]
result[label] = res
result = lib.maybe_convert_objects(result, try_float=0)
return result, counts
def generate_bins_generic(values, binner, closed):
"""
Generate bin edge offsets and bin labels for one array using another array
which has bin edge values. Both arrays must be sorted.
Parameters
----------
values : array of values
binner : a comparable array of values representing bins into which to bin
the first array. Note, 'values' end-points must fall within 'binner'
end-points.
closed : which end of bin is closed; left (default), right
Returns
-------
bins : array of offsets (into 'values' argument) of bins.
Zero and last edge are excluded in result, so for instance the first
bin is values[0:bin[0]] and the last is values[bin[-1]:]
"""
lenidx = len(values)
lenbin = len(binner)
if lenidx <= 0 or lenbin <= 0:
raise ValueError("Invalid length for values or for binner")
# check binner fits data
if values[0] < binner[0]:
raise ValueError("Values falls before first bin")
if values[lenidx - 1] > binner[lenbin - 1]:
raise ValueError("Values falls after last bin")
bins = np.empty(lenbin - 1, dtype=np.int64)
j = 0 # index into values
bc = 0 # bin count
# linear scan, presume nothing about values/binner except that it fits ok
for i in range(0, lenbin - 1):
r_bin = binner[i + 1]
# count values in current bin, advance to next bin
while j < lenidx and (values[j] < r_bin or
(closed == 'right' and values[j] == r_bin)):
j += 1
bins[bc] = j
bc += 1
return bins
class BinGrouper(BaseGrouper):
def __init__(self, bins, binlabels, filter_empty=False):
self.bins = com._ensure_int64(bins)
self.binlabels = _ensure_index(binlabels)
self._filter_empty_groups = filter_empty
@cache_readonly
def groups(self):
""" dict {group name -> group labels} """
# this is mainly for compat
# GH 3881
result = {}
for key, value in zip(self.binlabels, self.bins):
if key is not tslib.NaT:
result[key] = value
return result
@property
def nkeys(self):
return 1
def get_iterator(self, data, axis=0):
"""
Groupby iterator
Returns
-------
Generator yielding sequence of (name, subsetted object)
for each group
"""
if isinstance(data, NDFrame):
slicer = lambda start,edge: data._slice(slice(start,edge),axis=axis)
length = len(data.axes[axis])
else:
slicer = lambda start,edge: data[slice(start,edge)]
length = len(data)
start = 0
for edge, label in zip(self.bins, self.binlabels):
if label is not tslib.NaT:
yield label, slicer(start,edge)
start = edge
if start < length:
yield self.binlabels[-1], slicer(start,None)
def apply(self, f, data, axis=0):
result_keys = []
result_values = []
mutated = False
for key, group in self.get_iterator(data, axis=axis):
object.__setattr__(group, 'name', key)
# group might be modified
group_axes = _get_axes(group)
res = f(group)
if not _is_indexed_like(res, group_axes):
mutated = True
result_keys.append(key)
result_values.append(res)
return result_keys, result_values, mutated
@cache_readonly
def indices(self):
indices = collections.defaultdict(list)
i = 0
for label, bin in zip(self.binlabels, self.bins):
if i < bin:
if label is not tslib.NaT:
indices[label] = list(range(i, bin))
i = bin
return indices
@cache_readonly
def ngroups(self):
return len(self.binlabels)
@cache_readonly
def result_index(self):
mask = self.binlabels.asi8 == tslib.iNaT
return self.binlabels[~mask]
@property
def levels(self):
return [self.binlabels]
@property
def names(self):
return [self.binlabels.name]
@property
def groupings(self):
# for compat
return None
def size(self):
"""
Compute group sizes
"""
base = Series(np.zeros(len(self.result_index), dtype=np.int64),
index=self.result_index)
indices = self.indices
for k, v in compat.iteritems(indices):
indices[k] = len(v)
bin_counts = Series(indices, dtype=np.int64)
result = base.add(bin_counts, fill_value=0)
# addition with fill_value changes dtype to float64
result = result.astype(np.int64)
return result
#----------------------------------------------------------------------
# cython aggregation
_cython_functions = {
'add': 'group_add_bin',
'prod': 'group_prod_bin',
'mean': 'group_mean_bin',
'min': 'group_min_bin',
'max': 'group_max_bin',
'var': 'group_var_bin',
'ohlc': 'group_ohlc',
'first': {
'name': 'group_nth_bin',
'f': lambda func, a, b, c, d: func(a, b, c, d, 1)
},
'last': 'group_last_bin',
'count': 'group_count_bin',
}
_name_functions = {
'ohlc': lambda *args: ['open', 'high', 'low', 'close']
}
_filter_empty_groups = True
def _aggregate(self, result, counts, values, how, is_numeric=True):
agg_func, dtype = self._get_aggregate_function(how, values)
if values.ndim > 3:
# punting for now
raise NotImplementedError
elif values.ndim > 2:
for i, chunk in enumerate(values.transpose(2, 0, 1)):
agg_func(result[:, :, i], counts, chunk, self.bins)
else:
agg_func(result, counts, values, self.bins)
return result
def agg_series(self, obj, func):
dummy = obj[:0]
grouper = lib.SeriesBinGrouper(obj, func, self.bins, dummy)
return grouper.get_result()
class Grouping(object):
"""
Holds the grouping information for a single key
Parameters
----------
index : Index
grouper :
obj :
name :
level :
Returns
-------
**Attributes**:
* indices : dict of {group -> index_list}
* labels : ndarray, group labels
* ids : mapping of label -> group
* counts : array of group counts
* group_index : unique groups
* groups : dict of {group -> label_list}
"""
def __init__(self, index, grouper=None, obj=None, name=None, level=None,
sort=True):
self.name = name
self.level = level
self.grouper = _convert_grouper(index, grouper)
self.index = index
self.sort = sort
self.obj = obj
# right place for this?
if isinstance(grouper, (Series, Index)) and name is None:
self.name = grouper.name
if isinstance(grouper, MultiIndex):
self.grouper = grouper.values
# pre-computed
self._was_factor = False
self._should_compress = True
# we have a single grouper which may be a myriad of things, some of which are
# dependent on the passing in level
#
if level is not None:
if not isinstance(level, int):
if level not in index.names:
raise AssertionError('Level %s not in index' % str(level))
level = index.names.index(level)
inds = index.labels[level]
level_index = index.levels[level]
if self.name is None:
self.name = index.names[level]
# XXX complete hack
if grouper is not None:
level_values = index.levels[level].take(inds)
self.grouper = level_values.map(self.grouper)
else:
self._was_factor = True
# all levels may not be observed
labels, uniques = algos.factorize(inds, sort=True)
if len(uniques) > 0 and uniques[0] == -1:
# handle NAs
mask = inds != -1
ok_labels, uniques = algos.factorize(inds[mask], sort=True)
labels = np.empty(len(inds), dtype=inds.dtype)
labels[mask] = ok_labels
labels[~mask] = -1
if len(uniques) < len(level_index):
level_index = level_index.take(uniques)
self._labels = labels
self._group_index = level_index
self.grouper = level_index.take(labels)
else:
if isinstance(self.grouper, (list, tuple)):
self.grouper = com._asarray_tuplesafe(self.grouper)
# a passed Categorical
elif isinstance(self.grouper, Categorical):
factor = self.grouper
self._was_factor = True
# Is there any way to avoid this?
self.grouper = np.asarray(factor)
self._labels = factor.codes
self._group_index = factor.levels
if self.name is None:
self.name = factor.name
# a passed Grouper like
elif isinstance(self.grouper, Grouper):
# get the new grouper
grouper = self.grouper._get_binner_for_grouping(self.obj)
self.obj = self.grouper.obj
self.grouper = grouper
if self.name is None:
self.name = grouper.name
# no level passed
if not isinstance(self.grouper, (Series, Index, np.ndarray)):
self.grouper = self.index.map(self.grouper)
if not (hasattr(self.grouper, "__len__") and
len(self.grouper) == len(self.index)):
errmsg = ('Grouper result violates len(labels) == '
'len(data)\nresult: %s' %
com.pprint_thing(self.grouper))
self.grouper = None # Try for sanity
raise AssertionError(errmsg)
# if we have a date/time-like grouper, make sure that we have Timestamps like
if getattr(self.grouper,'dtype',None) is not None:
if is_datetime64_dtype(self.grouper):
from pandas import to_datetime
self.grouper = to_datetime(self.grouper)
elif is_timedelta64_dtype(self.grouper):
from pandas import to_timedelta
self.grouper = to_timedelta(self.grouper)
def __repr__(self):
return 'Grouping(%s)' % self.name
def __iter__(self):
return iter(self.indices)
_labels = None
_group_index = None
@property
def ngroups(self):
return len(self.group_index)
@cache_readonly
def indices(self):
return _groupby_indices(self.grouper)
@property
def labels(self):
if self._labels is None:
self._make_labels()
return self._labels
@property
def group_index(self):
if self._group_index is None:
self._make_labels()
return self._group_index
def _make_labels(self):
if self._was_factor: # pragma: no cover
raise Exception('Should not call this method grouping by level')
else:
labels, uniques = algos.factorize(self.grouper, sort=self.sort)
uniques = Index(uniques, name=self.name)
self._labels = labels
self._group_index = uniques
_groups = None
@property
def groups(self):
if self._groups is None:
self._groups = self.index.groupby(self.grouper)
return self._groups
def _get_grouper(obj, key=None, axis=0, level=None, sort=True):
"""
create and return a BaseGrouper, which is an internal
mapping of how to create the grouper indexers.
This may be composed of multiple Grouping objects, indicating
multiple groupers
Groupers are ultimately index mappings. They can originate as:
index mappings, keys to columns, functions, or Groupers
Groupers enable local references to axis,level,sort, while
the passed in axis, level, and sort are 'global'.
This routine tries to figure of what the passing in references
are and then creates a Grouping for each one, combined into
a BaseGrouper.
"""
group_axis = obj._get_axis(axis)
# validate thatthe passed level is compatible with the passed
# axis of the object
if level is not None:
if not isinstance(group_axis, MultiIndex):
if isinstance(level, compat.string_types):
if obj.index.name != level:
raise ValueError('level name %s is not the name of the '
'index' % level)
elif level > 0:
raise ValueError('level > 0 only valid with MultiIndex')
level = None
key = group_axis
# a passed in Grouper, directly convert
if isinstance(key, Grouper):
binner, grouper, obj = key._get_grouper(obj)
if key.key is None:
return grouper, [], obj
else:
return grouper, set([key.key]), obj
# already have a BaseGrouper, just return it
elif isinstance(key, BaseGrouper):
return key, [], obj
if not isinstance(key, (tuple, list)):
keys = [key]
else:
keys = key
# what are we after, exactly?
match_axis_length = len(keys) == len(group_axis)
any_callable = any(callable(g) or isinstance(g, dict) for g in keys)
any_arraylike = any(isinstance(g, (list, tuple, Series, Index, np.ndarray))
for g in keys)
try:
if isinstance(obj, DataFrame):
all_in_columns = all(g in obj.columns for g in keys)
else:
all_in_columns = False
except Exception:
all_in_columns = False
if (not any_callable and not all_in_columns
and not any_arraylike and match_axis_length
and level is None):
keys = [com._asarray_tuplesafe(keys)]
if isinstance(level, (tuple, list)):
if key is None:
keys = [None] * len(level)
levels = level
else:
levels = [level] * len(keys)
groupings = []
exclusions = []
for i, (gpr, level) in enumerate(zip(keys, levels)):
name = None
try:
obj._data.items.get_loc(gpr)
in_axis = True
except Exception:
in_axis = False
if _is_label_like(gpr) or in_axis:
exclusions.append(gpr)
name = gpr
gpr = obj[gpr]
if isinstance(gpr, Categorical) and len(gpr) != len(obj):
errmsg = "Categorical grouper must have len(grouper) == len(data)"
raise AssertionError(errmsg)
ping = Grouping(group_axis, gpr, obj=obj, name=name, level=level, sort=sort)
groupings.append(ping)
if len(groupings) == 0:
raise ValueError('No group keys passed!')
# create the internals grouper
grouper = BaseGrouper(group_axis, groupings, sort=sort)
return grouper, exclusions, obj
def _is_label_like(val):
return isinstance(val, compat.string_types) or np.isscalar(val)
def _convert_grouper(axis, grouper):
if isinstance(grouper, dict):
return grouper.get
elif isinstance(grouper, Series):
if grouper.index.equals(axis):
return grouper.values
else:
return grouper.reindex(axis).values
elif isinstance(grouper, (list, Series, Index, np.ndarray)):
if len(grouper) != len(axis):
raise AssertionError('Grouper and axis must be same length')
return grouper
else:
return grouper
class SeriesGroupBy(GroupBy):
_apply_whitelist = _series_apply_whitelist
def aggregate(self, func_or_funcs, *args, **kwargs):
"""
Apply aggregation function or functions to groups, yielding most likely
Series but in some cases DataFrame depending on the output of the
aggregation function
Parameters
----------
func_or_funcs : function or list / dict of functions
List/dict of functions will produce DataFrame with column names
determined by the function names themselves (list) or the keys in
the dict
Notes
-----
agg is an alias for aggregate. Use it.
Examples
--------
>>> series
bar 1.0
baz 2.0
qot 3.0
qux 4.0
>>> mapper = lambda x: x[0] # first letter
>>> grouped = series.groupby(mapper)
>>> grouped.aggregate(np.sum)
b 3.0
q 7.0
>>> grouped.aggregate([np.sum, np.mean, np.std])
mean std sum
b 1.5 0.5 3
q 3.5 0.5 7
>>> grouped.agg({'result' : lambda x: x.mean() / x.std(),
... 'total' : np.sum})
result total
b 2.121 3
q 4.95 7
See also
--------
apply, transform
Returns
-------
Series or DataFrame
"""
if isinstance(func_or_funcs, compat.string_types):
return getattr(self, func_or_funcs)(*args, **kwargs)
if hasattr(func_or_funcs, '__iter__'):
ret = self._aggregate_multiple_funcs(func_or_funcs)
else:
cyfunc = _intercept_cython(func_or_funcs)
if cyfunc and not args and not kwargs:
return getattr(self, cyfunc)()
if self.grouper.nkeys > 1:
return self._python_agg_general(func_or_funcs, *args, **kwargs)
try:
return self._python_agg_general(func_or_funcs, *args, **kwargs)
except Exception:
result = self._aggregate_named(func_or_funcs, *args, **kwargs)
index = Index(sorted(result), name=self.grouper.names[0])
ret = Series(result, index=index)
if not self.as_index: # pragma: no cover
print('Warning, ignoring as_index=True')
return ret
def _aggregate_multiple_funcs(self, arg):
if isinstance(arg, dict):
columns = list(arg.keys())
arg = list(arg.items())
elif any(isinstance(x, (tuple, list)) for x in arg):
arg = [(x, x) if not isinstance(x, (tuple, list)) else x
for x in arg]
# indicated column order
columns = lzip(*arg)[0]
else:
# list of functions / function names
columns = []
for f in arg:
if isinstance(f, compat.string_types):
columns.append(f)
else:
# protect against callables without names
columns.append(com._get_callable_name(f))
arg = lzip(columns, arg)
results = {}
for name, func in arg:
if name in results:
raise SpecificationError('Function names must be unique, '
'found multiple named %s' % name)
results[name] = self.aggregate(func)
return DataFrame(results, columns=columns)
def _wrap_aggregated_output(self, output, names=None):
# sort of a kludge
output = output[self.name]
index = self.grouper.result_index
if names is not None:
return DataFrame(output, index=index, columns=names)
else:
name = self.name
if name is None:
name = self._selected_obj.name
return Series(output, index=index, name=name)
def _wrap_applied_output(self, keys, values, not_indexed_same=False):
if len(keys) == 0:
# GH #6265
return Series([], name=self.name)
def _get_index():
if self.grouper.nkeys > 1:
index = MultiIndex.from_tuples(keys, names=self.grouper.names)
else:
index = Index(keys, name=self.grouper.names[0])
return index
if isinstance(values[0], dict):
# GH #823
index = _get_index()
return DataFrame(values, index=index).stack()
if isinstance(values[0], (Series, dict)):
return self._concat_objects(keys, values,
not_indexed_same=not_indexed_same)
elif isinstance(values[0], DataFrame):
# possible that Series -> DataFrame by applied function
return self._concat_objects(keys, values,
not_indexed_same=not_indexed_same)
else:
# GH #6265
return Series(values, index=_get_index(), name=self.name)
def _aggregate_named(self, func, *args, **kwargs):
result = {}
for name, group in self:
group.name = name
output = func(group, *args, **kwargs)
if isinstance(output, (Series, Index, np.ndarray)):
raise Exception('Must produce aggregated value')
result[name] = self._try_cast(output, group)
return result
def transform(self, func, *args, **kwargs):
"""
Call function producing a like-indexed Series on each group and return
a Series with the transformed values
Parameters
----------
func : function
To apply to each group. Should return a Series with the same index
Examples
--------
>>> grouped.transform(lambda x: (x - x.mean()) / x.std())
Returns
-------
transformed : Series
"""
# if string function
if isinstance(func, compat.string_types):
return self._transform_fast(lambda : getattr(self, func)(*args, **kwargs))
# do we have a cython function
cyfunc = _intercept_cython(func)
if cyfunc and not args and not kwargs:
return self._transform_fast(cyfunc)
# reg transform
dtype = self._selected_obj.dtype
result = self._selected_obj.values.copy()
wrapper = lambda x: func(x, *args, **kwargs)
for i, (name, group) in enumerate(self):
object.__setattr__(group, 'name', name)
res = wrapper(group)
if hasattr(res, 'values'):
res = res.values
# may need to astype
try:
common_type = np.common_type(np.array(res), result)
if common_type != result.dtype:
result = result.astype(common_type)
except:
pass
indexer = self._get_index(name)
result[indexer] = res
result = _possibly_downcast_to_dtype(result, dtype)
return self._selected_obj.__class__(result,
index=self._selected_obj.index,
name=self._selected_obj.name)
def _transform_fast(self, func):
"""
fast version of transform, only applicable to builtin/cythonizable functions
"""
if isinstance(func, compat.string_types):
func = getattr(self,func)
values = func().values
counts = self.count().values
values = np.repeat(values, com._ensure_platform_int(counts))
# the values/counts are repeated according to the group index
indices = self.indices
# shortcut of we have an already ordered grouper
if Index(self.grouper.group_info[0]).is_monotonic:
result = Series(values, index=self.obj.index)
else:
index = Index(np.concatenate([ indices[v] for v in self.grouper.result_index ]))
result = Series(values, index=index).sort_index()
result.index = self.obj.index
return result
def filter(self, func, dropna=True, *args, **kwargs):
"""
Return a copy of a Series excluding elements from groups that
do not satisfy the boolean criterion specified by func.
Parameters
----------
func : function
To apply to each group. Should return True or False.
dropna : Drop groups that do not pass the filter. True by default;
if False, groups that evaluate False are filled with NaNs.
Example
-------
>>> grouped.filter(lambda x: x.mean() > 0)
Returns
-------
filtered : Series
"""
if isinstance(func, compat.string_types):
wrapper = lambda x: getattr(x, func)(*args, **kwargs)
else:
wrapper = lambda x: func(x, *args, **kwargs)
# Interpret np.nan as False.
def true_and_notnull(x, *args, **kwargs):
b = wrapper(x, *args, **kwargs)
return b and notnull(b)
try:
indices = [self._get_index(name) if true_and_notnull(group) else []
for name, group in self]
except ValueError:
raise TypeError("the filter must return a boolean result")
except TypeError:
raise TypeError("the filter must return a boolean result")
filtered = self._apply_filter(indices, dropna)
return filtered
def _apply_to_column_groupbys(self, func):
""" return a pass thru """
return func(self)
class NDFrameGroupBy(GroupBy):
def _iterate_slices(self):
if self.axis == 0:
# kludge
if self._selection is None:
slice_axis = self.obj.columns
else:
slice_axis = self._selection_list
slicer = lambda x: self.obj[x]
else:
slice_axis = self.obj.index
slicer = self.obj.xs
for val in slice_axis:
if val in self.exclusions:
continue
yield val, slicer(val)
def _cython_agg_general(self, how, numeric_only=True):
new_items, new_blocks = self._cython_agg_blocks(how, numeric_only=numeric_only)
return self._wrap_agged_blocks(new_items, new_blocks)
def _wrap_agged_blocks(self, items, blocks):
obj = self._obj_with_exclusions
new_axes = list(obj._data.axes)
# more kludge
if self.axis == 0:
new_axes[0], new_axes[1] = new_axes[1], self.grouper.result_index
else:
new_axes[self.axis] = self.grouper.result_index
# Make sure block manager integrity check passes.
assert new_axes[0].equals(items)
new_axes[0] = items
mgr = BlockManager(blocks, new_axes)
new_obj = type(obj)(mgr)
return self._post_process_cython_aggregate(new_obj)
_block_agg_axis = 0
def _cython_agg_blocks(self, how, numeric_only=True):
data, agg_axis = self._get_data_to_aggregate()
new_blocks = []
if numeric_only:
data = data.get_numeric_data(copy=False)
for block in data.blocks:
values = block._try_operate(block.values)
if block.is_numeric:
values = com.ensure_float(values)
result, _ = self.grouper.aggregate(values, how, axis=agg_axis)
# see if we can cast the block back to the original dtype
result = block._try_coerce_and_cast_result(result)
newb = make_block(result, placement=block.mgr_locs)
new_blocks.append(newb)
if len(new_blocks) == 0:
raise DataError('No numeric types to aggregate')
return data.items, new_blocks
def _get_data_to_aggregate(self):
obj = self._obj_with_exclusions
if self.axis == 0:
return obj.swapaxes(0, 1)._data, 1
else:
return obj._data, self.axis
def _post_process_cython_aggregate(self, obj):
# undoing kludge from below
if self.axis == 0:
obj = obj.swapaxes(0, 1)
return obj
@cache_readonly
def _obj_with_exclusions(self):
if self._selection is not None:
return self.obj.reindex(columns=self._selection_list)
if len(self.exclusions) > 0:
return self.obj.drop(self.exclusions, axis=1)
else:
return self.obj
@Appender(_agg_doc)
def aggregate(self, arg, *args, **kwargs):
if isinstance(arg, compat.string_types):
return getattr(self, arg)(*args, **kwargs)
result = OrderedDict()
if isinstance(arg, dict):
if self.axis != 0: # pragma: no cover
raise ValueError('Can only pass dict with axis=0')
obj = self._selected_obj
if any(isinstance(x, (list, tuple, dict)) for x in arg.values()):
new_arg = OrderedDict()
for k, v in compat.iteritems(arg):
if not isinstance(v, (tuple, list, dict)):
new_arg[k] = [v]
else:
new_arg[k] = v
arg = new_arg
keys = []
if self._selection is not None:
subset = obj
if isinstance(subset, DataFrame):
raise NotImplementedError
for fname, agg_how in compat.iteritems(arg):
colg = SeriesGroupBy(subset, selection=self._selection,
grouper=self.grouper)
result[fname] = colg.aggregate(agg_how)
keys.append(fname)
else:
for col, agg_how in compat.iteritems(arg):
colg = SeriesGroupBy(obj[col], selection=col,
grouper=self.grouper)
result[col] = colg.aggregate(agg_how)
keys.append(col)
if isinstance(list(result.values())[0], DataFrame):
from pandas.tools.merge import concat
result = concat([result[k] for k in keys], keys=keys, axis=1)
else:
result = DataFrame(result)
elif isinstance(arg, list):
return self._aggregate_multiple_funcs(arg)
else:
cyfunc = _intercept_cython(arg)
if cyfunc and not args and not kwargs:
return getattr(self, cyfunc)()
if self.grouper.nkeys > 1:
return self._python_agg_general(arg, *args, **kwargs)
else:
# try to treat as if we are passing a list
try:
assert not args and not kwargs
result = self._aggregate_multiple_funcs([arg])
result.columns = Index(result.columns.levels[0],
name=self._selected_obj.columns.name)
except:
result = self._aggregate_generic(arg, *args, **kwargs)
if not self.as_index:
if isinstance(result.index, MultiIndex):
zipped = zip(result.index.levels, result.index.labels,
result.index.names)
for i, (lev, lab, name) in enumerate(zipped):
result.insert(i, name,
com.take_nd(lev.values, lab,
allow_fill=False))
result = result.consolidate()
else:
values = result.index.values
name = self.grouper.groupings[0].name
result.insert(0, name, values)
result.index = np.arange(len(result))
return result.convert_objects()
def _aggregate_multiple_funcs(self, arg):
from pandas.tools.merge import concat
if self.axis != 0:
raise NotImplementedError
obj = self._obj_with_exclusions
results = []
keys = []
for col in obj:
try:
colg = SeriesGroupBy(obj[col], selection=col,
grouper=self.grouper)
results.append(colg.aggregate(arg))
keys.append(col)
except (TypeError, DataError):
pass
except SpecificationError:
raise
result = concat(results, keys=keys, axis=1)
return result
def _aggregate_generic(self, func, *args, **kwargs):
if self.grouper.nkeys != 1:
raise AssertionError('Number of keys must be 1')
axis = self.axis
obj = self._obj_with_exclusions
result = {}
if axis != obj._info_axis_number:
try:
for name, data in self:
# for name in self.indices:
# data = self.get_group(name, obj=obj)
result[name] = self._try_cast(func(data, *args, **kwargs),
data)
except Exception:
return self._aggregate_item_by_item(func, *args, **kwargs)
else:
for name in self.indices:
try:
data = self.get_group(name, obj=obj)
result[name] = self._try_cast(func(data, *args, **kwargs),
data)
except Exception:
wrapper = lambda x: func(x, *args, **kwargs)
result[name] = data.apply(wrapper, axis=axis)
return self._wrap_generic_output(result, obj)
def _wrap_aggregated_output(self, output, names=None):
raise NotImplementedError
def _aggregate_item_by_item(self, func, *args, **kwargs):
# only for axis==0
obj = self._obj_with_exclusions
result = {}
cannot_agg = []
errors=None
for item in obj:
try:
data = obj[item]
colg = SeriesGroupBy(data, selection=item,
grouper=self.grouper)
result[item] = self._try_cast(
colg.aggregate(func, *args, **kwargs), data)
except ValueError:
cannot_agg.append(item)
continue
except TypeError as e:
cannot_agg.append(item)
errors=e
continue
result_columns = obj.columns
if cannot_agg:
result_columns = result_columns.drop(cannot_agg)
# GH6337
if not len(result_columns) and errors is not None:
raise errors
return DataFrame(result, columns=result_columns)
def _decide_output_index(self, output, labels):
if len(output) == len(labels):
output_keys = labels
else:
output_keys = sorted(output)
try:
output_keys.sort()
except Exception: # pragma: no cover
pass
if isinstance(labels, MultiIndex):
output_keys = MultiIndex.from_tuples(output_keys,
names=labels.names)
return output_keys
def _wrap_applied_output(self, keys, values, not_indexed_same=False):
from pandas.core.index import _all_indexes_same
if len(keys) == 0:
# XXX
return DataFrame({})
key_names = self.grouper.names
if isinstance(values[0], DataFrame):
return self._concat_objects(keys, values,
not_indexed_same=not_indexed_same)
elif self.grouper.groupings is not None:
if len(self.grouper.groupings) > 1:
key_index = MultiIndex.from_tuples(keys, names=key_names)
else:
ping = self.grouper.groupings[0]
if len(keys) == ping.ngroups:
key_index = ping.group_index
key_index.name = key_names[0]
key_lookup = Index(keys)
indexer = key_lookup.get_indexer(key_index)
# reorder the values
values = [values[i] for i in indexer]
else:
key_index = Index(keys, name=key_names[0])
# don't use the key indexer
if not self.as_index:
key_index = None
# make Nones an empty object
if com._count_not_none(*values) != len(values):
v = next(v for v in values if v is not None)
if v is None:
return DataFrame()
elif isinstance(v, NDFrame):
values = [
x if x is not None else
v._constructor(**v._construct_axes_dict())
for x in values
]
v = values[0]
if isinstance(v, (np.ndarray, Index, Series)):
if isinstance(v, Series):
applied_index = self._selected_obj._get_axis(self.axis)
all_indexed_same = _all_indexes_same([
x.index for x in values
])
singular_series = (len(values) == 1 and
applied_index.nlevels == 1)
# GH3596
# provide a reduction (Frame -> Series) if groups are
# unique
if self.squeeze:
# assign the name to this series
if singular_series:
values[0].name = keys[0]
# GH2893
# we have series in the values array, we want to
# produce a series:
# if any of the sub-series are not indexed the same
# OR we don't have a multi-index and we have only a
# single values
return self._concat_objects(
keys, values, not_indexed_same=not_indexed_same
)
# still a series
# path added as of GH 5545
elif all_indexed_same:
from pandas.tools.merge import concat
return concat(values)
if not all_indexed_same:
return self._concat_objects(
keys, values, not_indexed_same=not_indexed_same
)
try:
if self.axis == 0:
# GH6124 if the list of Series have a consistent name,
# then propagate that name to the result.
index = v.index.copy()
if index.name is None:
# Only propagate the series name to the result
# if all series have a consistent name. If the
# series do not have a consistent name, do
# nothing.
names = set(v.name for v in values)
if len(names) == 1:
index.name = list(names)[0]
# normally use vstack as its faster than concat
# and if we have mi-columns
if not _np_version_under1p7 or isinstance(v.index,MultiIndex) or key_index is None:
stacked_values = np.vstack([np.asarray(x) for x in values])
result = DataFrame(stacked_values,index=key_index,columns=index)
else:
# GH5788 instead of stacking; concat gets the dtypes correct
from pandas.tools.merge import concat
result = concat(values,keys=key_index,names=key_index.names,
axis=self.axis).unstack()
result.columns = index
else:
stacked_values = np.vstack([np.asarray(x) for x in values])
result = DataFrame(stacked_values.T,index=v.index,columns=key_index)
except (ValueError, AttributeError):
# GH1738: values is list of arrays of unequal lengths fall
# through to the outer else caluse
return Series(values, index=key_index)
# if we have date/time like in the original, then coerce dates
# as we are stacking can easily have object dtypes here
if (self._selected_obj.ndim == 2
and self._selected_obj.dtypes.isin(_DATELIKE_DTYPES).any()):
cd = 'coerce'
else:
cd = True
return result.convert_objects(convert_dates=cd)
else:
# only coerce dates if we find at least 1 datetime
cd = 'coerce' if any([ isinstance(v,Timestamp) for v in values ]) else False
return Series(values, index=key_index).convert_objects(convert_dates=cd)
else:
# Handle cases like BinGrouper
return self._concat_objects(keys, values,
not_indexed_same=not_indexed_same)
def _transform_general(self, func, *args, **kwargs):
from pandas.tools.merge import concat
applied = []
obj = self._obj_with_exclusions
gen = self.grouper.get_iterator(obj, axis=self.axis)
fast_path, slow_path = self._define_paths(func, *args, **kwargs)
path = None
for name, group in gen:
object.__setattr__(group, 'name', name)
if path is None:
# Try slow path and fast path.
try:
path, res = self._choose_path(fast_path, slow_path, group)
except TypeError:
return self._transform_item_by_item(obj, fast_path)
except Exception: # pragma: no cover
res = fast_path(group)
path = fast_path
else:
res = path(group)
# broadcasting
if isinstance(res, Series):
if res.index.is_(obj.index):
group.T.values[:] = res
else:
group.values[:] = res
applied.append(group)
else:
applied.append(res)
concat_index = obj.columns if self.axis == 0 else obj.index
concatenated = concat(applied, join_axes=[concat_index],
axis=self.axis, verify_integrity=False)
concatenated.sort_index(inplace=True)
return concatenated
def transform(self, func, *args, **kwargs):
"""
Call function producing a like-indexed DataFrame on each group and
return a DataFrame having the same indexes as the original object
filled with the transformed values
Parameters
----------
f : function
Function to apply to each subframe
Notes
-----
Each subframe is endowed the attribute 'name' in case you need to know
which group you are working on.
Examples
--------
>>> grouped = df.groupby(lambda x: mapping[x])
>>> grouped.transform(lambda x: (x - x.mean()) / x.std())
"""
# try to do a fast transform via merge if possible
try:
obj = self._obj_with_exclusions
if isinstance(func, compat.string_types):
result = getattr(self, func)(*args, **kwargs)
else:
cyfunc = _intercept_cython(func)
if cyfunc and not args and not kwargs:
result = getattr(self, cyfunc)()
else:
return self._transform_general(func, *args, **kwargs)
except:
return self._transform_general(func, *args, **kwargs)
# a reduction transform
if not isinstance(result, DataFrame):
return self._transform_general(func, *args, **kwargs)
# nuiscance columns
if not result.columns.equals(obj.columns):
return self._transform_general(func, *args, **kwargs)
# a grouped that doesn't preserve the index, remap index based on the grouper
# and broadcast it
if ((not isinstance(obj.index,MultiIndex) and
type(result.index) != type(obj.index)) or
len(result.index) != len(obj.index)):
results = obj.values.copy()
for (name, group), (i, row) in zip(self, result.iterrows()):
indexer = self._get_index(name)
results[indexer] = np.tile(row.values,len(indexer)).reshape(len(indexer),-1)
return DataFrame(results,columns=result.columns,index=obj.index).convert_objects()
# we can merge the result in
# GH 7383
names = result.columns
result = obj.merge(result, how='outer', left_index=True, right_index=True).iloc[:,-result.shape[1]:]
result.columns = names
return result
def _define_paths(self, func, *args, **kwargs):
if isinstance(func, compat.string_types):
fast_path = lambda group: getattr(group, func)(*args, **kwargs)
slow_path = lambda group: group.apply(
lambda x: getattr(x, func)(*args, **kwargs), axis=self.axis)
else:
fast_path = lambda group: func(group, *args, **kwargs)
slow_path = lambda group: group.apply(
lambda x: func(x, *args, **kwargs), axis=self.axis)
return fast_path, slow_path
def _choose_path(self, fast_path, slow_path, group):
path = slow_path
res = slow_path(group)
# if we make it here, test if we can use the fast path
try:
res_fast = fast_path(group)
# compare that we get the same results
if res.shape == res_fast.shape:
res_r = res.values.ravel()
res_fast_r = res_fast.values.ravel()
mask = notnull(res_r)
if (res_r[mask] == res_fast_r[mask]).all():
path = fast_path
except:
pass
return path, res
def _transform_item_by_item(self, obj, wrapper):
# iterate through columns
output = {}
inds = []
for i, col in enumerate(obj):
try:
output[col] = self[col].transform(wrapper)
inds.append(i)
except Exception:
pass
if len(output) == 0: # pragma: no cover
raise TypeError('Transform function invalid for data types')
columns = obj.columns
if len(output) < len(obj.columns):
columns = columns.take(inds)
return | DataFrame(output, index=obj.index, columns=columns) | pandas.core.frame.DataFrame |
#!/usr/bin/env python
# coding: utf-8
# In[1]:
from pandas import ExcelFile
from pandas import ExcelWriter
from scipy import ndimage
from scipy.stats import randint as sp_randint
from sklearn.base import BaseEstimator
from sklearn.base import TransformerMixin
from sklearn.ensemble import ExtraTreesClassifier
from sklearn.ensemble import RandomForestClassifier
from sklearn.feature_selection import SelectFromModel
from sklearn import datasets
from sklearn import metrics
from sklearn import pipeline
from sklearn.metrics import roc_auc_score, roc_curve
from sklearn.model_selection import cross_val_score
from sklearn.model_selection import GridSearchCV
from sklearn.model_selection import PredefinedSplit
from sklearn.model_selection import RandomizedSearchCV
from sklearn.model_selection import ShuffleSplit
from sklearn.model_selection import StratifiedKFold
from sklearn.model_selection import train_test_split
from sklearn.pipeline import Pipeline
from sklearn.preprocessing import FunctionTransformer
from sklearn.preprocessing import Imputer
from sklearn.preprocessing import LabelEncoder
from sklearn.preprocessing import StandardScaler
from sklearn.utils import resample
import matplotlib.pyplot as plt
import numpy as np
import openpyxl
import pandas as pd
import scipy
import xlsxwriter
import os
from matplotlib import pyplot as plt
from sklearn.metrics import r2_score
from sklearn.metrics import mean_absolute_error
from sklearn.metrics import mean_squared_error
from math import sqrt
print("All the libraries are loaded")
# In[27]:
FCPC = pd.read_excel(r"test_preds_FCPC.xlsx")
FCPCe = pd.read_excel(r'test_preds_FCPCe.xlsx')
C1DS = pd.read_excel(r'test_preds_C1DS.xlsx')
C2DF = pd.read_excel(r'test_preds_C2DF.xlsx')
MGC = pd.read_excel(r'test_preds_MGC.xlsx')
MWC = pd.read_excel(r'test_preds_MWC.xlsx')
ACTIVITY = pd.read_excel(r'test.xlsx')
ACTIVITY=ACTIVITY['ACTIVITY']
# In[28]:
Average_output=(FCPC['FCPC_out']+FCPCe['FCPCe_out']+C1DS['C1DS_out']+C2DF['C2DF_out']+MGC['MGC_out']+MWC['MWC_out'])/6
# In[29]:
print(type(Average_output))
# In[30]:
Average_output= | pd.DataFrame(Average_output) | pandas.DataFrame |
import pandas as pd
import numpy as np
def get_rating_summary(df,num_users = None, num_items = None):
'''
print summary of user-item matrix
args:
df: data frame which contains userId & itemId columns
'''
if num_users == None:
num_users = len(df['userId'].unique())
if num_items == None:
num_items = len(df['itemId'].unique())
num_values = len(df)
sparsity = 1 - (num_values/(num_users * num_items))
print('# users: {0}, # items: {1}, # vals: {2}, sparsity: {3:.7f}'.
format(num_users, num_items, num_values, sparsity))
def get_tag_summary(df, num_users = None, num_items = None, tagcol = 'tagId'):
'''
print summary of user-item-tag matrix
args:
df: data frame which contains userId & itemId & tagId columns
'''
if num_users == None:
num_users = len(df['userId'].unique())
if num_items == None:
num_items = len(df['itemId'].unique())
num_tags = len(df[tagcol].unique())
tagnum_per_interaction = df.groupby(['userId','itemId'])[tagcol].apply(lambda x:len(set(x))).reset_index()[tagcol]
num_interaction = len(tagnum_per_interaction)
sparsity = 1 - (num_interaction/(num_users * num_items))
tagged_items_per_user = df.groupby('userId')['itemId'].apply(lambda x:len(set(x))).reset_index()['itemId']
tag_count = df.groupby(tagcol)['itemId'].apply(len).reset_index()['itemId']
print('# users: {0}, # items: {1}, # tags: {2}, #interaction: {3}, sparsity: {4:.7f}'.
format(num_users, num_items, num_tags, num_interaction, sparsity))
print("summary for the number of tags per interation")
print(tagnum_per_interaction.describe())
print("summary for the number of tagged items per users")
print(tagged_items_per_user.describe())
print("summary for the occurence per tag")
print(tag_count.describe())
def preprocess_ratings(ratings, min_rating):
if min_rating > 1:
ratings = ratings[ratings['rating'] >= min_rating]
return ratings[['userId','itemId']]
def preprocess_tags(tags, tag_user_threshold, tag_item_threshold):
'''
stemming tags and remove rare tags.
'''
tags = tags[['userId','itemId','tag']]
tags['tag'] = tags['tag'].apply(lambda x: x.lower().replace('.', ''))
#tags.loc[:,'tag'] = tt
if tag_item_threshold > 1:
#limit the vocabulary of tags to those that have been applied by at least "tag_item_threshold" items
counter = tags.groupby('tag')['itemId'].apply(lambda x: len(set(x))).to_frame('count').reset_index()
counter = counter[counter['count']>=tag_item_threshold]
tags = pd.merge(tags,counter,on='tag')[['userId','itemId','tag']]
if tag_user_threshold > 1:
#limit the vocabulary of tags to those that have been applied by at least "tag_user_threshold" users
counter = tags.groupby('tag')['userId'].apply(lambda x: len(set(x))).to_frame('count').reset_index()
counter = counter[counter['count']>=tag_user_threshold]
tags = pd.merge(tags,counter,on='tag')[['userId','itemId','tag']]
return tags
def set_tagId(tags):
'''
set uinque tag id for tags.
'''
tag_list = list(tags['tag'].unique())
tagId_list = list(range(len(tag_list)))
tag_tagId = pd.DataFrame({'tag':tag_list,'tagId':tagId_list})
tags = pd.merge(tags,tag_tagId, on='tag')[['userId','itemId','tagId']]
return tags, tag_tagId
def _update_id(ratings, tags):
old_itemId = ratings['itemId'].unique()
new_itemId = np.arange(len(old_itemId))
updated_itemId = pd.DataFrame({'itemId':old_itemId,'new_itemId':new_itemId})
old_userId = ratings['userId'].unique()
new_userId = np.arange(len(old_userId))
updated_userId = | pd.DataFrame({'userId':old_userId,'new_userId':new_userId}) | pandas.DataFrame |
# for each 1-minute window from the training data, apply the ensemble model to get the score. Sort the scores and find out what score s is at a specific percentile p. e.g., if p=10, it means that 10% of scores are <= than s
# --- Imports ---
from sklearn.preprocessing import MinMaxScaler
import scipy.integrate as integrate
import pandas as pd
import numpy as np
import time
import os, sys
# add the parent directory to the path
sys.path.insert(0, os.path.abspath("../"))
sys.path.insert(0, os.path.abspath("ensemble/"))
sys.path.insert(0, os.path.abspath("ranking/"))
from common import *
from model import *
from ranking import *
from ensemble_for_ranking import *
from constants_model import *
WEIGHTED=True
FEATURE_IMP_DIR = "../data/results/feature_importance_coefficients/"
# --- Main ---
if __name__ == '__main__':
print("\nFeature cols:", FEATURE_COLS)
# get the model; we usually use previously trained models
feature_str = get_feature_str(FEATURE_COLS, feature_imp_str=None)
print("Features, feature str:", FEATURE_COLS, feature_str)
if not USE_SAVED_MODEL:
print("Please train the model first")
exit()
print("Model location: ", MODEL_DIR)
print("Train directory: ", TRAIN_DIR)
# dictionaries of scores and results for each file (day)
df_scores = dict()
dir_scores = SCORES_TRAINING_DIR
dir_scores = os.path.join(dir_scores, "weighted_ensemble")
os.makedirs(dir_scores, exist_ok=True)
print("Output dir for scores in training: ", dir_scores)
# get the train data
train_files = sorted(os.listdir(TRAIN_DIR))
train_files = [os.path.join(TRAIN_DIR, obj) for obj in train_files]
train_files = [f for f in train_files if os.path.isfile(f)]
print("Train files: ", train_files)
for port in PORTS:
print("\nPort:", port)
scores_combined = np.asarray([])
for crt_file in train_files:
print("Train file: ", crt_file)
scores_crt, _ = get_combined_scores_per_port(port, FEATURE_COLS, crt_file, model_dir=MODEL_DIR, feature_imp_dir=FEATURE_IMP_DIR, weighted=WEIGHTED, labeled=None, ranking=False, port_feat_imp=port)
print("scores_crt: ", scores_crt)
if len(scores_crt) == 0: continue
scores_combined = np.concatenate((scores_combined, scores_crt), axis=None)
percentiles = []
thresh = [0.0, 0.05, 0.1, 0.2, 0.3, 0.4, 0.5, 0.6, 0.7, 0.8, 0.9] + list(range(1, 101))
for i in thresh:
percentile_i = np.percentile(scores_combined, i, axis=None)
percentiles.append(tuple([i, percentile_i]))
scores_topk = get_top_k_scores(scores_combined, len(scores_combined))
print("Len of scores: ", len(scores_combined))
COL_NAMES_RANKING = ["window", "score"]
COL_NAMES_PERCENTILES = ["percentile", "score"]
newf = "scores_training_p{}.obj".format(port)
newfcsv = "scores_training_p{}.csv".format(port)
percentiles_csv = "percentiles_training_p{}.csv".format(port)
newf = os.path.join(dir_scores, newf)
newfcsv = os.path.join(dir_scores, newfcsv)
percentiles_csv = os.path.join(dir_scores, percentiles_csv)
write_pickle(newf, scores_topk)
| pd.DataFrame(scores_topk, columns=COL_NAMES_RANKING) | pandas.DataFrame |
import pandas as pd
import numpy as np
from random import gauss, uniform
def get_makespan(curr_plan, num_resources, workflow_inaccur, positive=False, dynamic_res=False):
'''
Calculate makespan
'''
under = False
reactive_resource_usage = [0] * num_resources
resource_usage = [0] * num_resources
expected = [0] * num_resources
tmp_idx = [0] * num_resources
for placement in curr_plan:
workflow = placement[0]
resource = placement[1]
resource_id = resource['id']
expected_finish = placement[3]
if dynamic_res:
perf = gauss(resource['performance'], resource['performance'] * 0.0644)
else:
pref = resource['performance']
if positive:
inaccur = uniform(0, workflow_inaccur)
else:
inaccur = uniform(-workflow_inaccur, workflow_inaccur)
exec_time = (workflow['num_oper'] * (1 + inaccur)) / perf
reactive_resource_usage[resource_id - 1] += exec_time
resource_usage[resource_id - 1] = max(resource_usage[resource_id - 1] + exec_time, expected_finish)
expected[resource_id - 1] = expected_finish
tmp_idx[resource_id - 1] += 1
return max(resource_usage), max(reactive_resource_usage), max(expected)
# ------------------------------------------------------------------------------
#
test_case = pd.read_csv('../Data/heft/DynHeteroResources_StHeteroCampaignsHEFT.csv')
results = pd.DataFrame(columns=['size','planner','plan','makespan', 'reactive', 'expected','mpn_snt', 'rect_snt', 'time'])
for idx, row in test_case.iterrows():
size = row['size']
planner = row['planner']
plan = eval(row['plan'])
makespan, reactive, expected = get_makespan(plan, size, 0, dynamic_res=True)
time = row['time']
results.loc[len(results)] = [size, planner, plan, makespan, reactive, expected, makespan - expected, reactive - expected, time]
results.to_csv('../Data/heft/DynHeteroResources_StHeteroCampaignsHEFT.csv', index=False)
test_case = | pd.read_csv('../Data/heft/DynHeteroResources_StHomoCampaignsHEFT.csv') | pandas.read_csv |
from __future__ import absolute_import, division, print_function
import os
import numpy as np
import pandas as pd
import shutil
import requests
import numpy.testing as npt
import pytest
import skimage.io as skio
from .. import argus_shapes as shapes
import pulse2percept.implants as p2pi
try:
FileNotFoundError
except NameError:
# Python 2
FileNotFoundError = IOError
def generate_dummy_data():
X = pd.DataFrame()
X['subject'] = pd.Series(['S1', 'S1', 'S2', 'S2', 'S3', 'S3'])
X['feature1'] = pd.Series([0.1, 0.2, 0.3, 0.4, 0.5, 0.6])
X['feature2'] = pd.Series([0.1, 0.2, 0.3, 0.4, 0.5, 0.6])
y = pd.DataFrame()
y['subject'] = pd.Series(['S1', 'S1', 'S2', 'S2', 'S3', 'S3'],
index=X.index)
y['target'] = pd.Series([0.1, 0.2, 0.3, 0.4, 0.5, 0.6],
index=X.index)
y['image'] = pd.Series([np.random.rand(10, 10)] * 6)
y['area'] = pd.Series([1, 2, 3, 4, 5, 6])
return X, y
def test_download_file():
fname = "test.zip"
with pytest.raises(requests.exceptions.HTTPError):
shapes.download_file("https://github.com/VisCog/blah", fname)
shapes.download_file("https://osf.io/rduj4", fname)
os.remove(fname)
def test_fetch_data():
test_dir = "test"
with pytest.raises(ValueError):
shapes.fetch_data()
shapes.fetch_data(save_path=test_dir)
npt.assert_equal(
os.path.exists(os.path.join(test_dir, 'argus_shapes.zip')),
True
)
npt.assert_equal(os.path.isdir(os.path.join(test_dir, 'argus_shapes')),
True)
npt.assert_equal(
os.path.exists(os.path.join(test_dir, 'argus_shapes',
'drawings_single.csv')),
True
)
npt.assert_equal(
os.path.exists(os.path.join(test_dir, 'argus_shapes', 'subjects.csv')),
True
)
shutil.rmtree(test_dir)
def test_load_data():
with pytest.raises(FileNotFoundError):
shapes.load_data("doesforsurenotexist.csv", auto_fetch=False)
csvfile = "data.csv"
csvfile2 = "data2.csv"
imgfile = "test_image.png"
skio.imsave(imgfile, np.random.randint(256, size=(10, 10)))
subjects = ['S1', 'S2']
electrodes = ['A1', 'F9']
amps = [2.0, 3.0]
for use_fullpath in [True, False]:
data = []
for subject in subjects:
for electrode in electrodes:
for amp in amps:
if use_fullpath:
fname = os.path.join(os.getcwd(), imgfile)
else:
fname = imgfile
row = {
'subject_id': subject,
'PTS_ELECTRODE': electrode,
'PTS_FILE': fname,
'PTS_AMP': amp,
'PTS_FREQ': 20.0,
'PTS_PULSE_DUR': 0.45,
'stim_class': 'SingleElectrode',
'date': '1985/09/30'
}
data.append(row)
| pd.DataFrame(data) | pandas.DataFrame |
import sys
import pandas as pd
import numpy as np
from scipy import stats
from itertools import compress
import statsmodels.stats.multitest as smt
import scikits.bootstrap as bootstrap
from sklearn.decomposition import PCA
from .scaler import scaler
from .imputeData import imputeData
class statistics:
usage = """Generate a table of parametric or non-parametric statistics and merges them with the Peak Table (node table).
Initial_Parameters
----------
peaktable : Pandas dataframe containing peak data. Must contain 'Name' and 'Label'.
datatable : Pandas dataframe matrix containing values for statistical analysis
Methods
-------
set_params : Set parameters -
parametric: Perform parametric statistical analysis, assuming the data is normally distributed (default: True)
log_data: Perform a log ('natural', base 2 or base 10) on all data prior to statistical analysis (default: (False, 2))
scale_data: Scale the data ('standard' (centers to the mean and scales to unit variance), 'minmax' (scales between 0 and 1), 'maxabs' (scales to the absolute maximum value), 'robust' (centers to the median and scales to between 25th and 75th quantile range) (default: (True, 'standard'))
impute_data: Impute any missing values using KNN impute with a set number of nearest neighbours (default: (False, 3))
group_column_name: The group column name used in the datatable (default: None)
control_group_name: The control group name in the datatable, if available (default: None)
group_alpha_CI: The alpha value for group confidence intervals (default: 0.05)
fold_change_alpha_CI: The alpha value for mean/median fold change confidence intervals (default: 0.05)
pca_alpha_CI: The alpha value for the PCA confidence intervals (default: 0.05)
total_missing: Calculate the total missing values per feature (Default: False)
group_missing: Calculate the missing values per feature per group (if group_column_name not None) (Default: False)
pca_loadings: Calculate PC1 and PC2 loadings for each feature (Default: True)
normality_test: Determine normal distribution across whole dataset using Shapiro-Wilk test (pvalues < 0.05 ~ non-normal distribution) (default: True)
group_normality_test: Determine normal distribution across each group (if group_column_name not None) using Shapiro-Wilk test (pvalues < 0.05 ~ non-normal distribution) (default: True)
group_mean_CI: Determine the mean with bootstrapped CI across each group (if parametric = True and group_column_name not None) (default: True)
group_median_CI: Determine the median with bootstrapped CI across each group (if parametric = False and group_column_name not None) (default: True)
mean_fold_change: Calculate the mean fold change with bootstrapped confidence intervals (if parametric = True, group_column_name not None and control_group_name not None) (default: False)
median_fold_change: Calculate the median fold change with bootstrapped confidence intervals (if parametric = False, group_column_name not None and control_group_name not None) (default: False)
levene_twoGroup: Test null hypothesis that control group and each of the other groups come from populations with equal variances (if group_column_name not None and control_group_name not None) (default: False)
levene_allGroup: Test null hypothesis that all groups come from populations with equal variances (if group_column_name not None) (default: False)
oneway_Anova_test: Test null hypothesis that all groups have the same population mean, with included Benjamini-Hochberg FDR (if parametric = True and group_column_name not None) (default: False)
kruskal_wallis_test: Test null hypothesis that population median of all groups are equal, with included Benjamini-Hochberg FDR (if parametric = False and group_column_name not None) (default: False)
ttest_oneGroup: Calculate the T-test for the mean across all the data (one group), with included Benjamini-Hochberg FDR (if parametric = True, group_column_name is None or there is only 1 group in the data) (default: False)
ttest_twoGroup: Calculate the T-test for the mean of two groups, with one group being the control group, with included Benjamini-Hochberg FDR (if parametric = True, group_column_name not None and control_group_name not None) (default: False)
mann_whitney_u_test: Compute the Mann-Whitney U test to determine differences in distribution between two groups, with one being the control group, with included Benjamini-Hochberg FDR (if parametric = False, group_column_name not None and control_group_name not None) (default: False)
help : Print this help text
calculate : Performs the statistical calculations and outputs the Peak Table (node table) with the results appended.
"""
def __init__(self, peaktable, datatable):
peaktable = self.__checkPeakTable(self.__checkData(peaktable))
datatable = self.__checkData(datatable)
#Slice the meta-data, and select only peaks from the peaktable for processing, and add the meta-data back
meta = datatable.T[~datatable.T.index.isin(peaktable['Name'])].T.reset_index(drop=True)
dat = datatable[peaktable['Name']].reset_index()
datatable = pd.concat([meta, dat], axis=1).set_index(['index'])
datatable.index.name = None
self.__peaktable = peaktable
self.__datatable = datatable
self.set_params()
def help(self):
print(statistics.usage)
def set_params(self, parametric=True, log_data=(False,2), scale_data=(False, 'standard'), impute_data=(False, 3), group_column_name=None, control_group_name=None, group_alpha_CI=0.05, fold_change_alpha_CI=0.05, pca_alpha_CI=0.05, total_missing=False, group_missing=False, pca_loadings=True, normality_test=True, group_normality_test=True, group_mean_CI=True, group_median_CI=True, mean_fold_change=False, median_fold_change=False, kruskal_wallis_test=False, levene_twoGroup=False, levene_allGroup=False, oneway_Anova_test=False, ttest_oneGroup=False, ttest_twoGroup=False, mann_whitney_u_test=False):
parametric, log_data, scale_data, impute_data, group_column_name, control_group_name, group_alpha_CI, fold_change_alpha_CI, pca_alpha_CI, total_missing, group_missing, pca_loadings, normality_test, group_normality_test, group_mean_CI, group_median_CI, mean_fold_change, median_fold_change, oneway_Anova_test, kruskal_wallis_test, levene_twoGroup, levene_allGroup, ttest_oneGroup, ttest_twoGroup, mann_whitney_u_test = self.__paramCheck(parametric, log_data, scale_data, impute_data, group_column_name, control_group_name, group_alpha_CI, fold_change_alpha_CI, pca_alpha_CI, total_missing, group_missing, pca_loadings, normality_test, group_normality_test, group_mean_CI, group_median_CI, mean_fold_change, median_fold_change, oneway_Anova_test, kruskal_wallis_test, levene_twoGroup, levene_allGroup, ttest_oneGroup, ttest_twoGroup, mann_whitney_u_test)
self.__parametric = parametric;
self.__log_data = log_data;
self.__scale_data = scale_data;
self.__impute_data = impute_data;
self.__group_column_name = group_column_name;
self.__control_group_name = control_group_name;
self.__group_alpha_CI = group_alpha_CI;
self.__fold_change_alpha_CI = fold_change_alpha_CI;
self.__pca_alpha_CI = pca_alpha_CI;
self.__total_missing = total_missing;
self.__group_missing = group_missing;
self.__pca_loadings = pca_loadings;
self.__normality_test = normality_test;
self.__group_normality_test = group_normality_test;
self.__group_mean_CI = group_mean_CI;
self.__group_median_CI = group_median_CI;
self.__mean_fold_change = mean_fold_change;
self.__median_fold_change = median_fold_change;
self.__oneway_Anova_test = oneway_Anova_test;
self.__kruskal_wallis_test = kruskal_wallis_test;
self.__levene_twoGroup = levene_twoGroup;
self.__levene_allGroup = levene_allGroup;
self.__ttest_oneGroup = ttest_oneGroup;
self.__ttest_twoGroup = ttest_twoGroup;
self.__mann_whitney_u_test = mann_whitney_u_test;
def calculate(self):
peaktable = self.__peaktable
datatable = self.__datatable
parametric = self.__parametric
log_data = self.__log_data
scale_data = self.__scale_data
impute_data = self.__impute_data
group_column_name = self.__group_column_name
control_group_name = self.__control_group_name
group_alpha_CI = self.__group_alpha_CI
fold_change_alpha_CI = self.__fold_change_alpha_CI
pca_alpha_CI = self.__pca_alpha_CI
total_missing = self.__total_missing
group_missing = self.__group_missing
pca_loadings = self.__pca_loadings
normality_test = self.__normality_test
group_normality_test = self.__group_normality_test
group_mean_CI = self.__group_mean_CI
group_median_CI = self.__group_median_CI
mean_fold_change = self.__mean_fold_change
median_fold_change = self.__median_fold_change
kruskal_wallis_test = self.__kruskal_wallis_test
levene_twoGroup = self.__levene_twoGroup
levene_allGroup = self.__levene_allGroup
oneway_Anova_test = self.__oneway_Anova_test
ttest_oneGroup = self.__ttest_oneGroup
ttest_twoGroup = self.__ttest_twoGroup
mann_whitney_u_test = self.__mann_whitney_u_test
peakNames = list(peaktable['Name'].values)
meta = datatable.T[~datatable.T.index.isin(peakNames)].T.reset_index(drop=True)
peakData = datatable[peakNames].reset_index(drop=True)
(log_bool, log_base) = log_data;
if log_bool:
if isinstance(log_base, str) and log_base.lower() == 'natural':
peakData = peakData.applymap(np.log)
elif log_base == 2:
peakData = peakData.applymap(np.log2)
elif log_base == 10:
peakData = peakData.applymap(np.log10)
else:
print("Error: The chosen log type is invalid.")
sys.exit()
(scale_bool, scale_type) = scale_data
if scale_bool:
if isinstance(scale_type, str) and scale_type.lower() == 'standard':
peakData = scaler(peakData, type=scale_type.lower()).reset_index(drop=True)
elif isinstance(scale_type, str) and scale_type.lower() == 'minmax':
peakData = scaler(peakData, type=scale_type.lower()).reset_index(drop=True)
elif isinstance(scale_type, str) and scale_type.lower() == 'maxabs':
peakData = scaler(peakData, type=scale_type.lower()).reset_index(drop=True)
elif isinstance(scale_type, str) and scale_type.lower() == 'robust':
peakData = scaler(peakData, type=scale_type.lower()).reset_index(drop=True)
else:
print("Error: The chosen scale type is invalid.")
sys.exit()
(impute_bool, k) = impute_data;
if impute_bool:
peakData = imputeData(peakData, k=k).reset_index(drop=True)
if not isinstance(peakData, pd.DataFrame):
peakData = pd.DataFrame(peakData, columns=list(peakNames)).reset_index(drop=True)
#Add the meta data back in with the logged, scaled, or imputed data
datatable = pd.concat([meta, peakData], axis=1).reset_index(drop=True)
statsData = pd.DataFrame()
if group_column_name is not None:
groups = np.unique(datatable[group_column_name].values)
groupData = []
# Append each group to a list
for group in groups:
groupData.append(datatable.loc[datatable[group_column_name] == group])
#Iterate over each peak/feature and calculate statistics
for peakName in peakNames:
statsDataDict = {}
groupDict = {}
df_totalGrpMissing = pd.DataFrame()
totalGrpMissingTitles = []
df_meanFold = pd.DataFrame()
df_medianFold = pd.DataFrame()
df_mannWhitney = pd.DataFrame()
df_ttest = pd.DataFrame()
df_levene_twoGroup = pd.DataFrame()
df_groupNormality = pd.DataFrame()
df_grpMeanCI = pd.DataFrame()
df_grpMedianCI = pd.DataFrame()
mannWhitneyTitles = []
ttestTitles = []
leveneTwoGroupTitles = []
mannwhitney_pvalue_name = ''
mannwhitney_statistic_name = ''
# for each group populate a group dictionary
if group_column_name is not None:
for grpIdx, group in enumerate(groupData):
# Calculate values missing within each group
if group_missing:
df_totalGrpMissing = self.__GroupMissing_Calc(group, groups, grpIdx, peakName, totalGrpMissingTitles, df_totalGrpMissing)
statsDataDict['GroupMissingValues'] = df_totalGrpMissing
x = group[[peakName]].values
groupDict[groups[grpIdx]] = x[~np.isnan(x)]
if control_group_name is not None:
controlGroup = groupDict[control_group_name];
if group_column_name is not None:
for key, group in groupDict.items():
if group_normality_test:
df_groupNormality = self.__GroupNormality(key, group, df_groupNormality)
statsDataDict['GroupNormality'] = df_groupNormality
if parametric:
if group_mean_CI:
df_grpMeanCI = self.__GroupMeanCI(key, group, df_grpMeanCI, group_alpha_CI)
statsDataDict['GroupMeanCI'] = df_grpMeanCI
else:
if group_median_CI:
df_grpMedianCI = self.__GroupMedianCI(key, group, df_grpMedianCI, group_alpha_CI)
statsDataDict['GroupMedianCI'] = df_grpMedianCI
if key != control_group_name and control_group_name is not None:
# Merge group and control, accounting for different array lengths by replacing with nan (indices need to be the same length for bootstrapping)
groupPairDict = dict(controlGroup=controlGroup, caseGroup=group)
groupPair = pd.DataFrame(dict([ (k,pd.Series(v)) for k,v in groupPairDict.items() ]))
controlList = np.array(groupPair['controlGroup'].values)
caseList = np.array(groupPair['caseGroup'].values)
groupList = list(zip(controlList, caseList))
if parametric:
if ttest_twoGroup:
# T-test statistic calculation for two samples (one always being the control)
TTEST_twoGroup_statistic, TTEST_twoGroup_pvalue = self.__TTEST_twoGroup(groupList)
if mean_fold_change:
meanFoldChange = self.__mean_fold(groupList)
# Boostrap for confidence intervals for the mean fold change
if ((len(group) > 2) and (len(controlGroup) > 2)):
meanFold = lambda x: self.__mean_fold(x)
CIs = bootstrap.ci(data=groupList, statfunction=meanFold, n_samples=500, alpha=fold_change_alpha_CI)
else:
CIs = [np.nan, np.nan]
else:
if mann_whitney_u_test:
# Mann-Whitney U statistic calculation for two samples (one always being the control)
MannWhitney_statistic, MannWhitney_pvalue = self.__MANN_WHITNEY_U(groupList)
if median_fold_change:
medianFoldChange = self.__median_fold(groupList)
# Boostrap for confidence intervals for the median fold change
if ((len(group) > 2) and (len(controlGroup) > 2)):
medianFold = lambda x: self.__median_fold(x)
CIs = bootstrap.ci(data=groupList, statfunction=medianFold, n_samples=500, alpha=fold_change_alpha_CI)
else:
CIs = [np.nan, np.nan]
if levene_twoGroup:
# Levene statistic calculation for two samples (one always being the control)
LEVENE_twoGroup_statistic, LEVENE_twoGroup_pvalue = self.__LEVENE_twoGroup(groupList)
if parametric:
ttest_twoGroup_statistics_name = 'TTEST-twoGroup_statistic_' + str(key)
ttest_twoGroup_pvalue_name = 'TTEST-twoGroup_pvalue_' + str(key)
ttestTitles.append(ttest_twoGroup_statistics_name)
ttestTitles.append(ttest_twoGroup_pvalue_name)
mean_fold_change_name = 'MeanFoldChange_' + str(key)
mean_fold_change_name_CIlower = 'MeanFoldChange_CI_lower_' + str(key)
mean_fold_change_name_CIupper = 'MeanFoldChange_CI_upper_' + str(key)
mean_fold_change_name_sig = 'MeanFoldChange_sig_' + str(key)
else:
mannwhitney_statistic_name = 'MannWhitneyU_statistic_' + str(key)
mannwhitney_pvalue_name = 'MannWhitneyU_pvalue_' + str(key)
mannWhitneyTitles.append(mannwhitney_statistic_name)
mannWhitneyTitles.append(mannwhitney_pvalue_name)
median_fold_change_name = 'MedianFoldChange_' + str(key)
median_fold_change_name_CIlower = 'MedianFoldChange_CI_lower_' + str(key)
median_fold_change_name_CIupper = 'MedianFoldChange_CI_upper_' + str(key)
median_fold_change_name_sig = 'MedianFoldChange_sig_' + str(key)
levene_twoGroup_statistics_name = 'LEVENE-twoGroup_statistic_' + str(key)
levene_twoGroup_pvalue_name = 'LEVENE-twoGroup_pvalue_' + str(key)
leveneTwoGroupTitles.append(levene_twoGroup_statistics_name)
leveneTwoGroupTitles.append(levene_twoGroup_pvalue_name)
if ttest_twoGroup and parametric:
if df_ttest.empty:
df_ttest = pd.DataFrame({ttest_twoGroup_statistics_name: [TTEST_twoGroup_statistic], ttest_twoGroup_pvalue_name: [TTEST_twoGroup_pvalue]})
else:
df_ttest = pd.concat([df_ttest, pd.DataFrame({ttest_twoGroup_statistics_name: [TTEST_twoGroup_statistic], ttest_twoGroup_pvalue_name: [TTEST_twoGroup_pvalue]})], axis=1).reset_index(drop=True)
statsDataDict['TTEST-twoGroup'] = df_ttest
if mann_whitney_u_test and not parametric:
if df_mannWhitney.empty:
df_mannWhitney = pd.DataFrame({mannwhitney_statistic_name: [MannWhitney_statistic], mannwhitney_pvalue_name: [MannWhitney_pvalue]})
else:
df_mannWhitney = pd.concat([df_mannWhitney, pd.DataFrame({mannwhitney_statistic_name: [MannWhitney_statistic], mannwhitney_pvalue_name: [MannWhitney_pvalue]})], axis=1).reset_index(drop=True)
statsDataDict['MannWhitneyU'] = df_mannWhitney
if mean_fold_change and parametric:
sigMeanFold = np.add(np.sign(np.multiply(CIs[0], CIs[1])), 1).astype(bool);
if df_meanFold.empty:
df_meanFold = pd.DataFrame({mean_fold_change_name: [meanFoldChange], mean_fold_change_name_CIlower: CIs[0], mean_fold_change_name_CIupper: CIs[1], mean_fold_change_name_sig: [sigMeanFold]})
else:
df_meanFold = pd.concat([df_meanFold, pd.DataFrame({mean_fold_change_name: [meanFoldChange], mean_fold_change_name_CIlower: CIs[0], mean_fold_change_name_CIupper: CIs[1], mean_fold_change_name_sig: [sigMeanFold]})], axis=1).reset_index(drop=True)
statsDataDict['MeanFoldChange'] = df_meanFold
if median_fold_change and not parametric:
sigMedianFold = np.add(np.sign(np.multiply(CIs[0], CIs[1])), 1).astype(bool);
if df_medianFold.empty:
df_medianFold = pd.DataFrame({median_fold_change_name: [medianFoldChange], median_fold_change_name_CIlower: CIs[0], median_fold_change_name_CIupper: CIs[1], median_fold_change_name_sig: [sigMedianFold]})
else:
df_medianFold = pd.concat([df_medianFold, pd.DataFrame({median_fold_change_name: [medianFoldChange], median_fold_change_name_CIlower: CIs[0], median_fold_change_name_CIupper: CIs[1], median_fold_change_name_sig: [sigMedianFold]})], axis=1).reset_index(drop=True)
statsDataDict['MedianFoldChange'] = df_medianFold
if levene_twoGroup:
if df_levene_twoGroup.empty:
df_levene_twoGroup = pd.DataFrame({levene_twoGroup_statistics_name: [LEVENE_twoGroup_statistic], levene_twoGroup_pvalue_name: [LEVENE_twoGroup_pvalue]})
else:
df_levene_twoGroup = pd.concat([df_levene_twoGroup, pd.DataFrame({levene_twoGroup_statistics_name: [LEVENE_twoGroup_statistic], levene_twoGroup_pvalue_name: [LEVENE_twoGroup_pvalue]})], axis=1).reset_index(drop=True)
statsDataDict['LEVENE-twoGroup'] = df_levene_twoGroup
# Filter dictionary for empty values
groupDict_filt = {}
for key, group in groupDict.items():
if (len(group) > 0):
groupDict_filt[key] = group
# One-way Anova and Kruskal-Wallis test for each group
if oneway_Anova_test and parametric and group_column_name is not None:
df_onewayANOVA = self.__oneWayANOVA(groupDict_filt)
statsDataDict['One-way ANOVA'] = df_onewayANOVA
if kruskal_wallis_test and not parametric and group_column_name is not None:
df_KW = self.__kruskalWallis(groupDict_filt)
statsDataDict['Kruskal-Wallis'] = df_KW
if levene_allGroup and group_column_name is not None:
df_levene_allGroup = self.__LEVENE_allGroup(groupDict_filt)
statsDataDict['LEVENE-allGroup'] = df_levene_allGroup
peak = datatable[[peakName]]
if total_missing:
df_totalMissing = self.__TotalMissing_Calc(peak);
statsDataDict['TotalMissing'] = df_totalMissing
pList = peak.values
pList = pList[~np.isnan(pList)]
if normality_test:
df_normality = self.__normality(pList);
statsDataDict['Normality'] = df_normality
if ttest_oneGroup:
df_TTEST = self.__TTEST_oneGroup(pList)
statsDataDict['TTEST-oneGroup'] = df_TTEST
if statsData.empty:
if statsDataDict:
statsData = pd.concat(list(statsDataDict.values()), axis=1)
else:
if statsDataDict:
statsData = pd.concat([statsData, pd.concat(list(statsDataDict.values()), axis=1)], axis=0).reset_index(drop=True)
if ttest_oneGroup and parametric:
TTEST_qvalueData = pd.DataFrame()
pvals = statsData['TTEST-oneGroup_pvalue'].values.flatten()
mask = np.isfinite(pvals)
pval_masked = [x for x in compress(pvals, mask)]
TTEST_BHFDR_qval = np.empty(len(pvals))
TTEST_BHFDR_qval.fill(np.nan)
_, TTEST_BHFDR_qval[mask] = smt.multipletests(pval_masked, alpha=0.05, method='fdr_bh')[:2]
TTEST_qvalueData['TTEST-oneGroup_BHFDR_qvalue'] = pd.Series(TTEST_BHFDR_qval)
statsData = pd.merge(statsData, TTEST_qvalueData, left_index=True, right_index=True)
if ttest_twoGroup and parametric and group_column_name is not None:
TTEST_qvalueData = pd.DataFrame()
ttestTitles_pvalues = ttestTitles[1:len(ttestTitles):2]
ttestQvalueNames = []
for val in ttestTitles_pvalues:
pvals = statsData[val].values.flatten()
mask = np.isfinite(pvals)
pval_masked = [x for x in compress(pvals, mask)]
Ttest_BHFDR_qval = np.empty(len(pvals))
Ttest_BHFDR_qval.fill(np.nan)
_, Ttest_BHFDR_qval[mask] = smt.multipletests(pval_masked, alpha=0.05, method='fdr_bh')[:2]
val_BHFDR_qvalue = val.replace('pvalue', 'BHFDR_qvalue')
ttestQvalueNames.append(val_BHFDR_qvalue)
TTEST_qvalueData[val_BHFDR_qvalue] = pd.Series(Ttest_BHFDR_qval)
statsData = pd.merge(statsData, TTEST_qvalueData, left_index=True, right_index=True)
if oneway_Anova_test and parametric and group_column_name is not None:
onewayANOVA_qvalueData = pd.DataFrame()
pvals = statsData['onewayANOVA_pvalue'].values.flatten()
mask = np.isfinite(pvals)
pval_masked = [x for x in compress(pvals, mask)]
onewayANOVA_BHFDR_qval = np.empty(len(pvals))
onewayANOVA_BHFDR_qval.fill(np.nan)
onewayANOVA_BYFDR_qval = np.empty(len(pvals))
onewayANOVA_BYFDR_qval.fill(np.nan)
_, onewayANOVA_BHFDR_qval[mask] = smt.multipletests(pval_masked, alpha=0.05, method='fdr_bh')[:2]
onewayANOVA_qvalueData['onewayANOVA_BHFDR_qvalue'] = pd.Series(onewayANOVA_BHFDR_qval)
statsData = pd.merge(statsData, onewayANOVA_qvalueData, left_index=True, right_index=True)
if kruskal_wallis_test and not parametric and group_column_name is not None:
KW_qvalueData = pd.DataFrame()
pvals = statsData['Kruskal–Wallis_pvalue'].values.flatten()
mask = np.isfinite(pvals)
pval_masked = [x for x in compress(pvals, mask)]
KW_BHFDR_qval = np.empty(len(pvals))
KW_BHFDR_qval.fill(np.nan)
KW_BYFDR_qval = np.empty(len(pvals))
KW_BYFDR_qval.fill(np.nan)
_, KW_BHFDR_qval[mask] = smt.multipletests(pval_masked, alpha=0.05, method='fdr_bh')[:2]
KW_qvalueData['Kruskal-Wallis_BHFDR_qvalue'] = pd.Series(KW_BHFDR_qval)
statsData = pd.merge(statsData, KW_qvalueData, left_index=True, right_index=True)
if mann_whitney_u_test and not parametric and group_column_name is not None:
MannWhitney_qvalueData = pd.DataFrame()
mannWhitneyTitles_pvalues = mannWhitneyTitles[1:len(mannWhitneyTitles):2]
mannWhitneyQvalueNames = []
for val in mannWhitneyTitles_pvalues:
pvals = statsData[val].values.flatten()
mask = np.isfinite(pvals)
pval_masked = [x for x in compress(pvals, mask)]
MannWhitney_BHFDR_qval = np.empty(len(pvals))
MannWhitney_BHFDR_qval.fill(np.nan)
_, MannWhitney_BHFDR_qval[mask] = smt.multipletests(pval_masked, alpha=0.05, method='fdr_bh')[:2]
val_BHFDR_qvalue = val.replace('pvalue', 'BHFDR_qvalue')
mannWhitneyQvalueNames.append(val_BHFDR_qvalue)
MannWhitney_qvalueData[val_BHFDR_qvalue] = pd.Series(MannWhitney_BHFDR_qval)
statsData = pd.merge(statsData, MannWhitney_qvalueData, left_index=True, right_index=True)
if pca_loadings:
peakData = datatable[peakNames].reset_index(drop=True)
d_filled = imputeData(peakData, 3)
#pca, pca_x, pca_loadings = self.__PCA_Calc(d_filled)
pca, pca_loadings = self.__PCA_Calc(d_filled)
df_pca_components = pd.DataFrame(pca_loadings, columns=['PC1', 'PC2'])
bootpc1 = lambda x: self.__boot_pca(x, pca.components_.T, 1)
bootpc2 = lambda x: self.__boot_pca(x, pca.components_.T, 2)
PC1_CIs = bootstrap.ci(data=d_filled, statfunction=bootpc1, n_samples=500, alpha=pca_alpha_CI)
PC2_CIs = bootstrap.ci(data=d_filled, statfunction=bootpc2, n_samples=500, alpha=pca_alpha_CI)
pc1_lower = np.array(PC1_CIs[0, :]).flatten()
pc1_upper = np.array(PC1_CIs[1, :]).flatten()
pc2_lower = np.array(PC2_CIs[0, :]).flatten()
pc2_upper = np.array(PC2_CIs[1, :]).flatten()
sigPC1 = np.add(np.sign(np.multiply(pc1_lower, pc1_upper)), 1).astype(bool);
sigPC2 = np.add(np.sign(np.multiply(pc2_lower, pc2_upper)), 1).astype(bool);
df_pca_stats = pd.DataFrame({"PC1_lower": pc1_lower, "PC1_upper": pc1_upper, "PC1_sig": sigPC1,
"PC2_lower": pc2_lower, "PC2_upper": pc2_upper, "PC2_sig": sigPC2})
if not statsData.empty:
df_pca = pd.merge(statsData, df_pca_components, left_index=True, right_index=True);
statsData = pd.merge(df_pca, df_pca_stats, left_index=True, right_index=True);
else:
statsData = pd.merge(df_pca_components, df_pca_stats, left_index=True, right_index=True);
if not statsData.empty:
statsData = pd.merge(peaktable.reset_index(drop=True), statsData, left_index=True, right_index=True)
else:
statsData = peaktable.copy()
return statsData
def __checkData(self, df):
if not isinstance(df, pd.DataFrame):
print("Error: A dataframe was not entered. Please check your data.")
return df
def __checkPeakTable(self, PeakTable):
if "Name" not in PeakTable.columns:
print("Error: \"Name\" column not in Peak Table. Please check your data.")
sys.exit()
if "Label" not in PeakTable.columns:
print("Error: \"Label\" column not in Peak Table. Please check your data.")
sys.exit()
# Do not assume the peaks/nodes have been indexed correctly. Remove any index columns and reindex.
column_list = [column.lower() for column in PeakTable.columns]
if 'idx' in column_list:
index = column_list.index('idx')
column_name = PeakTable.columns[index]
PeakTable = PeakTable.drop(columns=[column_name])
if 'index' in column_list:
index = column_list.index('index')
column_name = PeakTable.columns[index]
PeakTable = PeakTable.drop(columns=[column_name])
PeakTable = PeakTable.reset_index(drop=True)
PeakTable.index.name = 'Idx'
PeakTable = PeakTable.reset_index()
return PeakTable
def __paramCheck(self, parametric, log_data, scale_data, impute_data, group_column_name, control_group_name, group_alpha_CI, fold_change_alpha_CI, pca_alpha_CI, total_missing, group_missing, pca_loadings, normality_test, group_normality_test, group_mean_CI, group_median_CI, mean_fold_change, median_fold_change, oneway_Anova_test, kruskal_wallis_test, levene_twoGroup, levene_allGroup, ttest_oneGroup, ttest_twoGroup, mann_whitney_u_test):
peaks = self.__peaktable
data = self.__datatable
meta = data.T[~data.T.index.isin(peaks['Name'])].T.reset_index(drop=True)
col_list = list(meta.columns)
if group_column_name is not None:
group_names = list(set(list(meta[group_column_name].values)))
else:
group_names = []
if not isinstance(parametric, bool):
print("Error: Parametric not valid. Choose either \"True\" or \"False\".")
sys.exit()
if not isinstance(log_data, tuple):
print("Error: Log data type if not a tuple. Please ensure the value is a tuple (e.g. (True, 2).")
sys.exit()
else:
(log_bool, log_base) = log_data
if not isinstance(log_bool, bool):
print("Error: Log data first tuple item is not a boolean value. Choose either \"True\" or \"False\".")
sys.exit()
base_types = ['natural', 2, 10]
if isinstance(log_base, str):
log_base = log_base.lower()
if log_base not in base_types:
print("Error: Log data second tuple item is not valid. Choose one of {}.".format(', '.join(base_types)))
sys.exit()
if not isinstance(scale_data, tuple):
print("Error: Scale data type if not a tuple. Please ensure the value is a tuple (e.g. (True, 'standard').")
sys.exit()
else:
(scale_bool, scale_type) = scale_data
if not isinstance(scale_bool, bool):
print("Error: Scale data first tuple item is not a boolean value. Choose either \"True\" or \"False\".")
sys.exit()
scale_types = ['standard', 'minmax', 'maxabs', 'robust']
if isinstance(scale_type, str):
scale_type = scale_type.lower()
if scale_type not in scale_types:
print("Error: Scale data second tuple item is not valid. Choose one of {}.".format(
', '.join(scale_types)))
sys.exit()
if not isinstance(impute_data, tuple):
print("Error: Impute data type if not a tuple. Please ensure the value is a tuple (e.g. (True, 3).")
sys.exit()
else:
(impute_bool, k) = impute_data
if not isinstance(impute_bool, bool):
print("Error: Impute data first tuple item is not a boolean value. Choose either \"True\" or \"False\".")
sys.exit()
if not isinstance(k, float):
if not isinstance(k, int):
print("Error: Impute data second tuple item, the nearest neighbours k value, is not valid. Choose a float or integer value.")
sys.exit()
if group_column_name is not None:
if not isinstance(group_column_name, str):
print("Error: Group column name is not valid. Choose a string value.")
sys.exit()
else:
if group_column_name not in col_list:
print("Error: Group column name not valid. Choose one of {}.".format(', '.join(col_list)))
sys.exit()
if control_group_name is not None:
if not isinstance(control_group_name, str):
print("Error: Control group name is not valid. Choose a string value.")
sys.exit()
else:
if control_group_name not in group_names:
print("Error: Control group name not valid. Choose one of {}.".format(', '.join(group_names)))
sys.exit()
if not isinstance(group_alpha_CI, float):
print("Error: Group alpha confidence interval is not valid. Choose a float value.")
sys.exit()
if not isinstance(fold_change_alpha_CI, float):
print("Error: Mean/Median fold change alpha confidence interval is not valid. Choose a float value.")
sys.exit()
if not isinstance(pca_alpha_CI, float):
print("Error: PCA alpha confidence interval is not valid. Choose a float value.")
sys.exit()
if not isinstance(total_missing, bool):
print("Error: Total missing is not valid. Choose either \"True\" or \"False\".")
sys.exit()
if not isinstance(group_missing, bool):
print("Error: Group missing is not valid. Choose either \"True\" or \"False\".")
sys.exit()
if not isinstance(pca_loadings, bool):
print("Error: PCA loadings is not valid. Choose either \"True\" or \"False\".")
sys.exit()
if not isinstance(normality_test, bool):
print("Error: Normality test is not valid. Choose either \"True\" or \"False\".")
sys.exit()
if not isinstance(group_normality_test, bool):
print("Error: Group normality test is not valid. Choose either \"True\" or \"False\".")
sys.exit()
if not isinstance(group_mean_CI, bool):
print("Error: Group mean confidence interval is not valid. Choose either \"True\" or \"False\".")
sys.exit()
if not isinstance(group_median_CI, bool):
print("Error: Group median confidence interval is not valid. Choose either \"True\" or \"False\".")
sys.exit()
if not isinstance(mean_fold_change, bool):
print("Error: Mean fold change is not valid. Choose either \"True\" or \"False\".")
sys.exit()
if not isinstance(median_fold_change, bool):
print("Error: Median fold change is not valid. Choose either \"True\" or \"False\".")
sys.exit()
if not isinstance(oneway_Anova_test, bool):
print("Error: One-way Anova test is not valid. Choose either \"True\" or \"False\".")
sys.exit()
if not isinstance(kruskal_wallis_test, bool):
print("Error: Kruskal–Wallis test is not valid. Choose either \"True\" or \"False\".")
sys.exit()
if not isinstance(levene_twoGroup, bool):
print("Error: Levene two group is not valid. Choose either \"True\" or \"False\".")
sys.exit()
if not isinstance(levene_allGroup, bool):
print("Error: Levene all group is not valid. Choose either \"True\" or \"False\".")
sys.exit()
if not isinstance(ttest_oneGroup, bool):
print("Error: T-test one group is not valid. Choose either \"True\" or \"False\".")
sys.exit()
if not isinstance(ttest_twoGroup, bool):
print("Error: T-test two group is not valid. Choose either \"True\" or \"False\".")
sys.exit()
if not isinstance(mann_whitney_u_test, bool):
print("Error: Mann–Whitney U test is not valid. Choose either \"True\" or \"False\".")
sys.exit()
return parametric, log_data, scale_data, impute_data, group_column_name, control_group_name, group_alpha_CI, fold_change_alpha_CI, pca_alpha_CI, total_missing, group_missing, pca_loadings, normality_test, group_normality_test, group_mean_CI, group_median_CI, mean_fold_change, median_fold_change, oneway_Anova_test, kruskal_wallis_test, levene_twoGroup, levene_allGroup, ttest_oneGroup, ttest_twoGroup, mann_whitney_u_test
def __mean_fold(self, groupList):
(controlGroup, caseGroup) = zip(*groupList)
if ((len(list(caseGroup)) > 0) and (len(list(controlGroup)) > 0)):
meanFoldChange = np.nanmean(list(caseGroup)) / np.nanmean(list(controlGroup))
else:
meanFoldChange = np.nan
return meanFoldChange
def __median_fold(self, groupList):
(controlGroup, caseGroup) = zip(*groupList)
if ((len(list(caseGroup)) > 0) and (len(list(controlGroup)) > 0)):
medianFoldChange = np.nanmedian(list(caseGroup)) / np.nanmedian(list(controlGroup))
else:
medianFoldChange = np.nan
return medianFoldChange
def __PCA_Calc(self, data):
pca = PCA(n_components=2)
pca.fit_transform(data)
return pca, pca.components_.T
def __TotalMissing_Calc(self, peak):
missing = peak.isnull().sum()
totalMissing = np.multiply(np.divide(missing, peak.shape[0]).tolist(), 100)
df_totalMissing = | pd.DataFrame({'Percent_Total_Missing': totalMissing}) | pandas.DataFrame |
import codecs
import math
import os
import re
import gensim
import jieba.posseg as jieba
import numpy as np
import pandas as pd
from sklearn.cluster import KMeans
# 返回特征词向量
def getWordVecs(wordList, model):
name = []
vecs = []
for word in wordList:
word = word.replace('\n', '')
try:
if word in model: # 模型中存在该词的向量表示
name.append(word)
vecs.append(model[word])
except KeyError:
continue
a = | pd.DataFrame(name, columns=['word']) | pandas.DataFrame |
import matplotlib.image as mpimg
import matplotlib.style as style
import matplotlib.pyplot as plt
from matplotlib import rcParams
from simtk.openmm.app import *
from simtk.openmm import *
from simtk.unit import *
from sys import stdout
import seaborn as sns
from math import exp
import pandas as pd
import mdtraj as md
import pickle as pk
import numpy as np
import statistics
import itertools
import fileinput
import fnmatch
import shutil
import random
import math
import os
import re
def fix_cap_remove_ace(pdb_file):
"""
Removes the H atoms of the capped ACE residue.
"""
remove_words = [
"H1 ACE",
"H2 ACE",
"H3 ACE",
"H31 ACE",
"H32 ACE",
"H33 ACE",
]
with open(pdb_file) as oldfile, open("intermediate.pdb", "w") as newfile:
for line in oldfile:
if not any(word in line for word in remove_words):
newfile.write(line)
command = "rm -rf " + pdb_file
os.system(command)
command = "mv intermediate.pdb " + pdb_file
os.system(command)
def fix_cap_replace_ace(pdb_file):
"""
Replaces the alpha carbon atom of the
capped ACE residue with a standard name.
"""
fin = open(pdb_file, "rt")
data = fin.read()
data = data.replace("CA ACE", "CH3 ACE")
data = data.replace("C ACE", "CH3 ACE")
fin.close()
fin = open(pdb_file, "wt")
fin.write(data)
fin.close()
def fix_cap_remove_nme(pdb_file):
"""
Removes the H atoms of the capped NME residue.
"""
remove_words = [
"H1 NME",
"H2 NME",
"H3 NME",
"H31 NME",
"H32 NME",
"H33 NME",
]
with open(pdb_file) as oldfile, open("intermediate.pdb", "w") as newfile:
for line in oldfile:
if not any(word in line for word in remove_words):
newfile.write(line)
command = "rm -rf " + pdb_file
os.system(command)
command = "mv intermediate.pdb " + pdb_file
os.system(command)
def fix_cap_replace_nme(pdb_file):
"""
Replaces the alpha carbon atom of the
capped NME residue with a standard name.
"""
fin = open(pdb_file, "rt")
data = fin.read()
data = data.replace("CA NME", "CH3 NME")
data = data.replace("C NME", "CH3 NME")
fin.close()
fin = open(pdb_file, "wt")
fin.write(data)
fin.close()
def prepare_alanine_dipeptide():
"""
Prepares the alanine dipeptide system for Gaussian
Accelerated Molecular Dynamics (GaMD) simulations.
Downloads the pdb structure from
https://markovmodel.github.io/mdshare/ALA2/ and
parameterizes it using General Amber Force Field
(GAFF).
"""
os.system(
"curl -O http://ftp.imp.fu-berlin.de/pub/cmb-data/alanine-dipeptide-nowater.pdb"
)
os.system(
"rm -rf system_inputs"
) # Removes any existing directory named system_inputs
os.system("mkdir system_inputs") # Creates a directory named system_inputs
cwd = os.getcwd()
target_dir = cwd + "/" + "system_inputs"
os.system("pdb4amber -i alanine-dipeptide-nowater.pdb -o intermediate.pdb")
# Delete HH31, HH32 and HH33 from the ACE residue (tleap adds them later)
remove_words = ["HH31 ACE", "HH32 ACE", "HH33 ACE"]
with open("intermediate.pdb") as oldfile, open(
"system.pdb", "w"
) as newfile:
for line in oldfile:
if not any(word in line for word in remove_words):
newfile.write(line)
os.system("rm -rf intermediate*")
# save the tleap script to file
with open("input_TIP3P.leap", "w") as f:
f.write(
"""
source leaprc.protein.ff14SB
source leaprc.water.tip3p
set default FlexibleWater on
set default PBRadii mbondi2
pdb = loadpdb system.pdb
solvateBox pdb TIP3PBOX 15
saveamberparm pdb system_TIP3P.prmtop system_TIP3P.inpcrd
saveamberparm pdb system_TIP3P.parm7 system_TIP3P.rst7
savepdb pdb system_TIP3P.pdb
quit
"""
)
os.system("tleap -f input_TIP3P.leap")
os.system("rm -rf leap.log")
shutil.copy(
cwd + "/" + "system_TIP3P.inpcrd",
target_dir + "/" + "system_TIP3P.inpcrd",
)
shutil.copy(
cwd + "/" + "system_TIP3P.parm7",
target_dir + "/" + "system_TIP3P.parm7",
)
shutil.copy(
cwd + "/" + "system_TIP3P.pdb", target_dir + "/" + "system_TIP3P.pdb"
)
shutil.copy(
cwd + "/" + "system_TIP3P.prmtop",
target_dir + "/" + "system_TIP3P.prmtop",
)
shutil.copy(
cwd + "/" + "system_TIP3P.rst7", target_dir + "/" + "system_TIP3P.rst7"
)
shutil.copy(cwd + "/" + "system.pdb", target_dir + "/" + "system.pdb")
shutil.copy(
cwd + "/" + "alanine-dipeptide-nowater.pdb",
target_dir + "/" + "alanine-dipeptide-nowater.pdb",
)
shutil.copy(
cwd + "/" + "input_TIP3P.leap", target_dir + "/" + "input_TIP3P.leap"
)
os.system("rm -rf system_TIP3P.inpcrd")
os.system("rm -rf system_TIP3P.parm7")
os.system("rm -rf system_TIP3P.pdb")
os.system("rm -rf system_TIP3P.inpcrd")
os.system("rm -rf system_TIP3P.rst7")
os.system("rm -rf system_TIP3P.prmtop")
os.system("rm -rf system.pdb")
os.system("rm -rf input_TIP3P.leap")
os.system("rm -rf alanine-dipeptide-nowater.pdb")
def create_vectors(x):
"""
Extracts peridic box information from the
given line.
"""
x = str(x)
x = x.replace("Vec3", "")
x = re.findall("\d*\.?\d+", x)
for i in range(0, len(x)):
x[i] = float(x[i])
x = tuple(x)
n = int(len(x) / 3)
x = [x[i * n : (i + 1) * n] for i in range((len(x) + n - 1) // n)]
return x
def simulated_annealing(
parm="system_TIP3P.prmtop",
rst="system_TIP3P.inpcrd",
annealing_output_pdb="system_annealing_output.pdb",
annealing_steps=100000,
pdb_freq=100000,
starting_temp=0,
target_temp=300,
temp_incr=3,
):
"""
Performs simulated annealing of the system from
0K to 300 K (default) using OpenMM MD engine and
saves the last frame of the simulation to be
accessed by the next simulation.
Parameters
----------
parm: str
System's topology file
rst: str
System's coordinate file
annealing_output_pdb: str
System's output trajectory file
annealing_steps: int
Aneealing steps at each temperatrure jump
pdb_freq: int
Trajectory to be saved after every pdb_freq steps
starting_temp: int
Initial temperature of Simulated Annealing
target_temp: int
Final temperature of Simulated Annealing
temp_incr: int
Temmperature increase for every step
"""
prmtop = AmberPrmtopFile(parm)
inpcrd = AmberInpcrdFile(rst)
annealing_system = prmtop.createSystem(
nonbondedMethod=PME, nonbondedCutoff=1 * nanometer, constraints=HBonds
)
annealing_integrator = LangevinIntegrator(
0 * kelvin, 1 / picosecond, 2 * femtoseconds
)
total_steps = ((target_temp / temp_incr) + 1) * annealing_steps
annealing_temp_range = int((target_temp / temp_incr) + 1)
annealing_platform = Platform.getPlatformByName("CUDA")
annealing_properties = {"CudaDeviceIndex": "0", "CudaPrecision": "mixed"}
annealing_simulation = Simulation(
prmtop.topology,
annealing_system,
annealing_integrator,
annealing_platform,
annealing_properties,
)
annealing_simulation.context.setPositions(inpcrd.positions)
if inpcrd.boxVectors is not None:
annealing_simulation.context.setPeriodicBoxVectors(*inpcrd.boxVectors)
annealing_simulation.minimizeEnergy()
annealing_simulation.reporters.append(
PDBReporter(annealing_output_pdb, pdb_freq)
)
simulated_annealing_last_frame = (
annealing_output_pdb[:-4] + "_last_frame.pdb"
)
annealing_simulation.reporters.append(
PDBReporter(simulated_annealing_last_frame, total_steps)
)
annealing_simulation.reporters.append(
StateDataReporter(
stdout,
pdb_freq,
step=True,
time=True,
potentialEnergy=True,
totalSteps=total_steps,
temperature=True,
progress=True,
remainingTime=True,
speed=True,
separator="\t",
)
)
temp = starting_temp
while temp <= target_temp:
annealing_integrator.setTemperature(temp * kelvin)
if temp == starting_temp:
annealing_simulation.step(annealing_steps)
annealing_simulation.saveState("annealing.state")
else:
annealing_simulation.loadState("annealing.state")
annealing_simulation.step(annealing_steps)
temp += temp_incr
state = annealing_simulation.context.getState()
print(state.getPeriodicBoxVectors())
annealing_simulation_box_vectors = state.getPeriodicBoxVectors()
print(annealing_simulation_box_vectors)
with open("annealing_simulation_box_vectors.pkl", "wb") as f:
pk.dump(annealing_simulation_box_vectors, f)
print("Finshed NVT Simulated Annealing Simulation")
def npt_equilibration(
parm="system_TIP3P.prmtop",
npt_output_pdb="system_npt_output.pdb",
pdb_freq=500000,
npt_steps=5000000,
target_temp=300,
npt_pdb="system_annealing_output_last_frame.pdb",
):
"""
Performs NPT equilibration MD of the system
using OpenMM MD engine and saves the last
frame of the simulation to be accessed by
the next simulation.
Parameters
----------
parm: str
System's topology file
npt_output_pdb: str
System's output trajectory file
pdb_freq: int
Trajectory to be saved after every pdb_freq steps
npt_steps: int
NPT simulation steps
target_temp: int
Temperature for MD simulation
npt_pdb: str
Last frame of the simulation
"""
npt_init_pdb = PDBFile(npt_pdb)
prmtop = AmberPrmtopFile(parm)
npt_system = prmtop.createSystem(
nonbondedMethod=PME, nonbondedCutoff=1 * nanometer, constraints=HBonds
)
barostat = MonteCarloBarostat(25.0 * bar, target_temp * kelvin, 25)
npt_system.addForce(barostat)
npt_integrator = LangevinIntegrator(
target_temp * kelvin, 1 / picosecond, 2 * femtoseconds
)
npt_platform = Platform.getPlatformByName("CUDA")
npt_properties = {"CudaDeviceIndex": "0", "CudaPrecision": "mixed"}
npt_simulation = Simulation(
prmtop.topology,
npt_system,
npt_integrator,
npt_platform,
npt_properties,
)
npt_simulation.context.setPositions(npt_init_pdb.positions)
npt_simulation.context.setVelocitiesToTemperature(target_temp * kelvin)
with open("annealing_simulation_box_vectors.pkl", "rb") as f:
annealing_simulation_box_vectors = pk.load(f)
annealing_simulation_box_vectors = create_vectors(
annealing_simulation_box_vectors
)
npt_simulation.context.setPeriodicBoxVectors(
annealing_simulation_box_vectors[0],
annealing_simulation_box_vectors[1],
annealing_simulation_box_vectors[2],
)
npt_last_frame = npt_output_pdb[:-4] + "_last_frame.pdb"
npt_simulation.reporters.append(PDBReporter(npt_output_pdb, pdb_freq))
npt_simulation.reporters.append(PDBReporter(npt_last_frame, npt_steps))
npt_simulation.reporters.append(
StateDataReporter(
stdout,
pdb_freq,
step=True,
time=True,
potentialEnergy=True,
totalSteps=npt_steps,
temperature=True,
progress=True,
remainingTime=True,
speed=True,
separator="\t",
)
)
npt_simulation.minimizeEnergy()
npt_simulation.step(npt_steps)
npt_simulation.saveState("npt_simulation.state")
state = npt_simulation.context.getState()
print(state.getPeriodicBoxVectors())
npt_simulation_box_vectors = state.getPeriodicBoxVectors()
print(npt_simulation_box_vectors)
with open("npt_simulation_box_vectors.pkl", "wb") as f:
pk.dump(npt_simulation_box_vectors, f)
print("Finished NPT Simulation")
def nvt_equilibration(
parm="system_TIP3P.prmtop",
nvt_output_pdb="system_nvt_output.pdb",
pdb_freq=500000,
nvt_steps=5000000,
target_temp=300,
nvt_pdb="system_npt_output_last_frame.pdb",
):
"""
Performs NVT equilibration MD of the system
using OpenMM MD engine saves the last
frame of the simulation to be accessed by
the next simulation.
Parameters
----------
parm: str
System's topology file
nvt_output_pdb: str
System's output trajectory file
pdb_freq: int
Trajectory to be saved after every pdb_freq steps
nvt_steps: int
NVT simulation steps
target_temp: int
Temperature for MD simulation
nvt_pdb: str
Last frame of the simulation
"""
nvt_init_pdb = PDBFile(nvt_pdb)
prmtop = AmberPrmtopFile(parm)
nvt_system = prmtop.createSystem(
nonbondedMethod=PME, nonbondedCutoff=1 * nanometer, constraints=HBonds
)
nvt_integrator = LangevinIntegrator(
target_temp * kelvin, 1 / picosecond, 2 * femtoseconds
)
nvt_platform = Platform.getPlatformByName("CUDA")
nvt_properties = {"CudaDeviceIndex": "0", "CudaPrecision": "mixed"}
nvt_simulation = Simulation(
prmtop.topology,
nvt_system,
nvt_integrator,
nvt_platform,
nvt_properties,
)
nvt_simulation.context.setPositions(nvt_init_pdb.positions)
nvt_simulation.context.setVelocitiesToTemperature(target_temp * kelvin)
with open("npt_simulation_box_vectors.pkl", "rb") as f:
npt_simulation_box_vectors = pk.load(f)
npt_simulation_box_vectors = create_vectors(npt_simulation_box_vectors)
nvt_simulation.context.setPeriodicBoxVectors(
npt_simulation_box_vectors[0],
npt_simulation_box_vectors[1],
npt_simulation_box_vectors[2],
)
nvt_last_frame = nvt_output_pdb[:-4] + "_last_frame.pdb"
nvt_simulation.reporters.append(PDBReporter(nvt_output_pdb, pdb_freq))
nvt_simulation.reporters.append(PDBReporter(nvt_last_frame, nvt_steps))
nvt_simulation.reporters.append(
StateDataReporter(
stdout,
pdb_freq,
step=True,
time=True,
potentialEnergy=True,
totalSteps=nvt_steps,
temperature=True,
progress=True,
remainingTime=True,
speed=True,
separator="\t",
)
)
nvt_simulation.minimizeEnergy()
nvt_simulation.step(nvt_steps)
nvt_simulation.saveState("nvt_simulation.state")
state = nvt_simulation.context.getState()
print(state.getPeriodicBoxVectors())
nvt_simulation_box_vectors = state.getPeriodicBoxVectors()
print(nvt_simulation_box_vectors)
with open("nvt_simulation_box_vectors.pkl", "wb") as f:
pk.dump(nvt_simulation_box_vectors, f)
print("Finished NVT Simulation")
def run_equilibration():
"""
Runs systematic simulated annealing followed by
NPT and NVT equilibration MD simulation.
"""
cwd = os.getcwd()
target_dir = cwd + "/" + "equilibration"
os.system("rm -rf equilibration")
os.system("mkdir equilibration")
shutil.copy(
cwd + "/" + "system_inputs" + "/" + "system_TIP3P.inpcrd",
target_dir + "/" + "system_TIP3P.inpcrd",
)
shutil.copy(
cwd + "/" + "system_inputs" + "/" + "system_TIP3P.parm7",
target_dir + "/" + "system_TIP3P.parm7",
)
shutil.copy(
cwd + "/" + "system_inputs" + "/" + "system_TIP3P.pdb",
target_dir + "/" + "system_TIP3P.pdb",
)
shutil.copy(
cwd + "/" + "system_inputs" + "/" + "system_TIP3P.prmtop",
target_dir + "/" + "system_TIP3P.prmtop",
)
shutil.copy(
cwd + "/" + "system_inputs" + "/" + "system_TIP3P.rst7",
target_dir + "/" + "system_TIP3P.rst7",
)
shutil.copy(
cwd + "/" + "system_inputs" + "/" + "system.pdb",
target_dir + "/" + "system.pdb",
)
shutil.copy(
cwd + "/" + "system_inputs" + "/" + "alanine-dipeptide-nowater.pdb",
target_dir + "/" + "alanine-dipeptide-nowater.pdb",
)
shutil.copy(
cwd + "/" + "system_inputs" + "/" + "input_TIP3P.leap",
target_dir + "/" + "input_TIP3P.leap",
)
os.chdir(target_dir)
simulated_annealing()
npt_equilibration()
nvt_equilibration()
os.system("rm -rf system_TIP3P.inpcrd")
os.system("rm -rf system_TIP3P.parm7")
os.system("rm -rf system_TIP3P.pdb")
os.system("rm -rf system_TIP3P.rst7")
os.system("rm -rf system_TIP3P.prmtop")
os.system("rm -rf system.pdb")
os.system("rm -rf alanine-dipeptide-nowater.pdb")
os.system("rm -rf input_TIP3P.leap")
os.chdir(cwd)
def create_starting_structures():
"""
Prepares starting structures for Amber GaMD simulations.
All input files required to run Amber GaMD simulations are
placed in the starting_structures directory.
"""
cwd = os.getcwd()
target_dir = cwd + "/" + "starting_structures"
os.system("rm -rf starting_structures")
os.system("mkdir starting_structures")
shutil.copy(
cwd + "/" + "equilibration" + "/" + "system_nvt_output_last_frame.pdb",
target_dir + "/" + "system_nvt_output_last_frame.pdb",
)
os.chdir(target_dir)
fix_cap_remove_nme("system_nvt_output_last_frame.pdb")
fix_cap_replace_nme("system_nvt_output_last_frame.pdb")
# Save the tleap script to file
with open("final_input_TIP3P.leap", "w") as f:
f.write(
"""
source leaprc.protein.ff14SB
source leaprc.water.tip3p
set default FlexibleWater on
set default PBRadii mbondi2
pdb = loadpdb system_nvt_output_last_frame.pdb
saveamberparm pdb system_final.prmtop system_final.inpcrd
saveamberparm pdb system_final.parm7 system_final.rst7
savepdb pdb system_final.pdb
quit
"""
)
os.system("tleap -f final_input_TIP3P.leap")
os.system("rm -rf leap.log")
os.system("rm -rf system_nvt_output_last_frame.pdb")
os.chdir(cwd)
def add_vec_inpcrd():
"""
Adds box dimensions captured from the last saved
frame of the NVT simulations to the inpcrd file.
Only to be used when the box dimensions are not
present in the inpcrd file.
"""
cwd = os.getcwd()
target_dir = cwd + "/" + "starting_structures"
shutil.copy(
cwd + "/" + "equilibration" + "/" + "nvt_simulation_box_vectors.pkl",
target_dir + "/" + "nvt_simulation_box_vectors.pkl",
)
os.chdir(target_dir)
with open("nvt_simulation_box_vectors.pkl", "rb") as f:
nvt_simulation_box_vectors = pk.load(f)
nvt_simulation_box_vectors = create_vectors(nvt_simulation_box_vectors)
vectors = (
(nvt_simulation_box_vectors[0][0]) * 10,
(nvt_simulation_box_vectors[1][1]) * 10,
(nvt_simulation_box_vectors[2][2]) * 10,
)
vectors = (
round(vectors[0], 7),
round(vectors[1], 7),
round(vectors[2], 7),
)
last_line = (
" "
+ str(vectors[0])
+ " "
+ str(vectors[1])
+ " "
+ str(vectors[2])
+ " 90.0000000"
+ " 90.0000000"
+ " 90.0000000"
)
with open("system_final.inpcrd", "a+") as f:
f.write(last_line)
os.system("rm -rf nvt_simulation_box_vectors.pkl")
os.chdir(cwd)
def add_vec_prmtop():
"""
Adds box dimensions captured from the last saved
frame of the NVT simulations to the prmtop file.
Only to be used when the box dimensions are not
present in the prmtop file.
"""
cwd = os.getcwd()
target_dir = cwd + "/" + "starting_structures"
shutil.copy(
cwd + "/" + "equilibration" + "/" + "nvt_simulation_box_vectors.pkl",
target_dir + "/" + "nvt_simulation_box_vectors.pkl",
)
os.chdir(target_dir)
with open("nvt_simulation_box_vectors.pkl", "rb") as f:
nvt_simulation_box_vectors = pk.load(f)
nvt_simulation_box_vectors = create_vectors(nvt_simulation_box_vectors)
vectors = (
nvt_simulation_box_vectors[0][0],
nvt_simulation_box_vectors[1][1],
nvt_simulation_box_vectors[2][2],
)
vectors = round(vectors[0], 7), round(vectors[1], 7), round(vectors[2], 7)
oldbeta = "9.00000000E+01"
x = str(vectors[0]) + str(0) + "E+" + "01"
y = str(vectors[1]) + str(0) + "E+" + "01"
z = str(vectors[2]) + str(0) + "E+" + "01"
line1 = "%FLAG BOX_DIMENSIONS"
line2 = "%FORMAT(5E16.8)"
line3 = " " + oldbeta + " " + x + " " + y + " " + z
with open("system_final.prmtop") as i, open(
"system_intermediate_final.prmtop", "w"
) as f:
for line in i:
if line.startswith("%FLAG RADIUS_SET"):
line = line1 + "\n" + line2 + "\n" + line3 + "\n" + line
f.write(line)
os.system("rm -rf system_final.prmtop")
os.system("mv system_intermediate_final.prmtop system_final.prmtop")
os.system("rm -rf nvt_simulation_box_vectors.pkl")
os.chdir(cwd)
def create_filetree(
nst_lim=26000000,
ntw_x=1000,
nt_cmd=1000000,
n_teb=1000000,
n_tave=50000,
ntcmd_prep=200000,
nteb_prep=200000,
):
"""
Creates a directory named gamd_simulations. Inside
this directory, there are subdirectories for dihedral,
dual and total potential-boosted GaMD with upper and
lower threshold boosts separately.
Parameters
----------
nst_lim: int
Total simulation time including preparatory simulation.
For example, if nst_lim = 26000000, then, we may have
2 ns of preparatory simulation i.e. 1000000 preparation steps
and 50 ns of GaMD simulation i.e. 25000000 simulation steps
ntw_x: int
Saving coordinates of the simulation every ntw_x
timesteps. For example, 2 ps implies 1000 timesteps
nt_cmd: int
Number of initial MD simulation step, 2 ns of
preparatory simulation requires 1000000 preparation
timesteps
n_teb: int
Number of biasing MD simulation steps
n_tave: int
Number of simulation steps used to calculate the
average and standard deviation of potential energies
ntcmd_prep: int
Number of preparation conventional molecular dynamics
steps.This is used for system equilibration and
potential energies are not collected for statistics
nteb_prep: int
Number of preparation biasing molecular dynamics
simulation steps. This is used for system
equilibration
"""
cwd = os.getcwd()
os.system("rm -rf gamd_simulations")
os.system("mkdir gamd_simulations")
os.chdir(cwd + "/" + "gamd_simulations")
source_dir = cwd + "/" + "starting_structures"
target_dir = cwd + "/" + "gamd_simulations"
dir_list = [
"dihedral_threshold_lower",
"dihedral_threshold_upper",
"dual_threshold_lower",
"dual_threshold_upper",
"total_threshold_lower",
"total_threshold_upper",
]
for i in range(len(dir_list)):
os.mkdir(dir_list[i])
os.chdir(target_dir + "/" + dir_list[i])
shutil.copy(
source_dir + "/" + "system_final.inpcrd",
target_dir + "/" + dir_list[i] + "/" + "system_final.inpcrd",
)
shutil.copy(
source_dir + "/" + "system_final.prmtop",
target_dir + "/" + dir_list[i] + "/" + "system_final.prmtop",
)
if "lower" in dir_list[i]:
i_E = 1
if "upper" in dir_list[i]:
i_E = 2
if "total" in dir_list[i]:
i_gamd = 1
if "dihedral" in dir_list[i]:
i_gamd = 2
if "dual" in dir_list[i]:
i_gamd = 3
with open("md.in", "w") as f:
f.write("&cntrl" + "\n")
f.write(" imin = 0, irest = 0, ntx = 1," + "\n")
f.write(" nstlim = " + str(nst_lim) + ", dt = 0.002," + "\n")
f.write(" ntc = 2, ntf = 2, tol = 0.000001," + "\n")
f.write(" iwrap = 1, ntb = 1, cut = 8.0," + "\n")
f.write(" ntt = 3, temp0 = 300.0, gamma_ln = 1.0, " + "\n")
f.write(
" ntpr = 500, ntwx = " + str(ntw_x) + ", ntwr = 500," + "\n"
)
f.write(" ntxo = 2, ioutfm = 1, ig = -1, ntwprt = 0," + "\n")
f.write(
" igamd = "
+ str(i_gamd)
+ ", iE = "
+ str(i_E)
+ ", irest_gamd = 0,"
+ "\n"
)
f.write(
" ntcmd = "
+ str(nt_cmd)
+ ", nteb = "
+ str(n_teb)
+ ", ntave = "
+ str(n_tave)
+ ","
+ "\n"
)
f.write(
" ntcmdprep = "
+ str(ntcmd_prep)
+ ", ntebprep = "
+ str(nteb_prep)
+ ","
+ "\n"
)
f.write(" sigma0D = 6.0, sigma0P = 6.0" + " \n")
f.write("&end" + "\n")
os.chdir(target_dir)
os.chdir(cwd)
def run_simulations():
"""
Runs GaMD simulations for each of the dihedral, dual and total
potential boosts for both thresholds i.e. upper and lower potential
thresholds. (Remember to check md.in files for further details and
flag information).
"""
cwd = os.getcwd()
os.chdir(cwd + "/" + "gamd_simulations")
os.chdir(cwd + "/" + "gamd_simulations" + "/" + "dihedral_threshold_lower")
os.system(
"pmemd.cuda -O -i md.in -o system_final.out -p system_final.prmtop -c system_final.inpcrd -r system_final.rst -x system_final.nc"
)
os.chdir(cwd + "/" + "gamd_simulations" + "/" + "dihedral_threshold_upper")
os.system(
"pmemd.cuda -O -i md.in -o system_final.out -p system_final.prmtop -c system_final.inpcrd -r system_final.rst -x system_final.nc"
)
os.chdir(cwd + "/" + "gamd_simulations" + "/" + "dual_threshold_lower")
os.system(
"pmemd.cuda -O -i md.in -o system_final.out -p system_final.prmtop -c system_final.inpcrd -r system_final.rst -x system_final.nc"
)
os.chdir(cwd + "/" + "gamd_simulations" + "/" + "dual_threshold_upper")
os.system(
"pmemd.cuda -O -i md.in -o system_final.out -p system_final.prmtop -c system_final.inpcrd -r system_final.rst -x system_final.nc"
)
os.chdir(cwd + "/" + "gamd_simulations" + "/" + "total_threshold_lower")
os.system(
"pmemd.cuda -O -i md.in -o system_final.out -p system_final.prmtop -c system_final.inpcrd -r system_final.rst -x system_final.nc"
)
os.chdir(cwd + "/" + "gamd_simulations" + "/" + "total_threshold_upper")
os.system(
"pmemd.cuda -O -i md.in -o system_final.out -p system_final.prmtop -c system_final.inpcrd -r system_final.rst -x system_final.nc"
)
os.chdir(cwd + "/" + "gamd_simulations")
os.chdir(cwd)
def create_data_files(
jump=10,
traj="system_final.nc",
topology="system_final.prmtop",
T=300,
):
"""
Extracts data from GaMD log files and saves them as
weights.dat, Psi.dat and Phi_Psi.dat. gamd.log file
contains data excluding the initial equilibration MD
simulation steps but trajectory output file has all
the trajectories including the initial equilibration
MD steps. This part has ben taken care to make the
data consistent.
Parameters
----------
jump: int
Every nth frame to be considered for reweighting
traj: str
System's trajectory file
topology: str
System's topology file
T: int
MD simulation temperature
"""
# To make data consistent with gamd.log and .nc file
factor = 0.001987 * T
with open("md.in") as f:
lines = f.readlines()
for i in lines:
if "nstlim =" in i:
nstlim_line = i
if "ntcmd =" in i:
ntcmd_line = i
if "ntwx =" in i:
ntwx_line = i
x = re.findall(r"\b\d+\b", ntcmd_line)
ntcmd = int(x[0])
x = re.findall(r"\b\d+\b", nstlim_line)
nstlim = int(x[0])
x = re.findall(r"\b\d+\b", ntwx_line)
ntwx = int(x[1])
# From the .nc trajectory files, we will not consider ntcmd trajectories
leave_frames = int(ntcmd / ntwx)
no_frames = int(nstlim / ntwx)
# Recheck conditions
file = open("gamd.log", "r")
number_of_lines = 0
for line in file:
line = line.strip("\n")
number_of_lines += 1
file.close()
f = open("gamd.log")
fourth_line = f.readlines()[3]
if str(ntcmd) in fourth_line:
datapoints = number_of_lines - 4
if not str(ntcmd) in fourth_line:
datapoints = number_of_lines - 3
print(datapoints == int((nstlim - ntcmd) / ntwx))
# Creating Psi.dat and Phi_Psi.dat
traj = md.load(traj, top=topology)
traj = traj[leave_frames:no_frames:jump]
phi = md.compute_phi(traj)
phi = phi[1] # 0:indices, 1:phi angles
phi = np.array([math.degrees(i) for i in phi]) # radians to degrees
psi = md.compute_psi(traj)
psi = psi[1] # 0:indices, 1:psi angles
psi = np.array([math.degrees(i) for i in psi]) # radians to degrees
df_psi = pd.DataFrame(phi, columns=["Psi"])
df_psi = df_psi.tail(int(datapoints))
df_psi.to_csv("Psi.dat", sep="\t", index=False, header=False)
df_phi = pd.DataFrame(psi, columns=["Phi"])
df_phi = df_phi.tail(int(datapoints))
df_phi_psi = pd.concat([df_phi, df_psi], axis=1)
df_phi_psi.to_csv("Phi_Psi.dat", sep="\t", index=False, header=False)
# Creating weights.dat
with open("gamd.log") as f:
lines = f.readlines()
column_names = lines[2]
column_names = column_names.replace("#", "")
column_names = column_names.replace("\n", "")
column_names = column_names.replace(" ", "")
column_names = column_names.split(",")
list_words = ["#"]
with open("gamd.log") as oldfile, open("data.log", "w") as newfile:
for line in oldfile:
if not any(word in line for word in list_words):
newfile.write(line)
df = pd.read_csv("data.log", delim_whitespace=True, header=None)
df.columns = column_names
df["dV(kcal/mol)"] = (
df["Boost-Energy-Potential"] + df["Boost-Energy-Dihedral"]
)
df["dV(kbT)"] = df["dV(kcal/mol)"] / factor
df_ = df[["dV(kbT)", "total_nstep", "dV(kcal/mol)"]]
df_ = df_[::jump]
df_.to_csv("weights.dat", sep="\t", index=False, header=False)
os.system("rm -rf data.log")
print(df_phi_psi.shape)
print(df_phi.shape)
print(df_.shape)
def create_bins(lower_bound, width, upper_bound):
"""
Creates bin if given the lower and upper bound
with the wirdth information.
"""
bins = []
for low in range(lower_bound, upper_bound, width):
bins.append([low, low + width])
return bins
def find_bin(value, bins):
"""
Finds which value belongs to which bin.
"""
for i in range(0, len(bins)):
if bins[i][0] <= value < bins[i][1]:
return i
return -1
def reweight_1d(
binspace=10, n_structures=4, Xdim=[-180, 180], T=300.0, min_prob=0.000001
):
"""
Reweights boosted potential energies in one-dimension based on
Maclaurin series expansion to one, two and three degrees.
Parameters
----------
binspace: int
Spacing between the bins
n_structures: int
Number of structures per bin chosen
for Weighted Ensemble (WE) simulations
Xdim: list
Range of dihedral angles
T: float
MD simulation temperature
min_prob: float
minimum probability threshold
"""
beta = 1.0 / (0.001987 * float(T))
df_Psi = pd.read_csv("Psi.dat", delim_whitespace=True, header=None)
df_Psi.columns = ["Psi"]
df_weight = pd.read_csv("weights.dat", delim_whitespace=True, header=None)
df_weight.columns = ["dV_kBT", "timestep", "dVkcalmol"]
sum_total = df_Psi.shape[0]
binsX = np.arange(float(Xdim[0]), (float(Xdim[1]) + binspace), binspace)
hist, hist_edges = np.histogram(df_Psi[["Psi"]], bins=binsX, weights=None)
pstarA = [i / sum_total for i in list(hist)]
bins = create_bins(
lower_bound=int(Xdim[0]), width=binspace, upper_bound=int(Xdim[1])
)
data = df_Psi["Psi"].values.tolist()
binned_weights = []
for value in data:
bin_index = find_bin(value, bins)
binned_weights.append(bin_index)
df_index = pd.DataFrame(binned_weights)
df_index.columns = ["index"]
df = pd.concat([df_index, df_Psi, df_weight], axis=1)
dV_c1 = []
dV_c2 = []
dV_c3 = []
dV = []
for i in range(len(bins)):
df_i = df.loc[(df["index"] == i)]
dV_list = df_i["dVkcalmol"].values.tolist()
if len(dV_list) >= 10:
dV_c1.append(statistics.mean(dV_list))
dV_c2.append(
statistics.mean([i ** 2 for i in dV_list])
- (statistics.mean(dV_list)) ** 2
)
dV_c3.append(
statistics.mean([i ** 3 for i in dV_list])
- 3
* (statistics.mean([i ** 2 for i in dV_list]))
* (statistics.mean(dV_list))
+ 2 * (statistics.mean(dV_list)) ** 3
)
if len(dV_list) < 10:
dV_c1.append(0)
dV_c2.append(0)
dV_c3.append(0)
dV.append(dV_list)
c1 = [i * beta for i in dV_c1]
c2 = [i * ((beta ** 2) / 2) for i in dV_c2]
c3 = [i * ((beta ** 3) / 6) for i in dV_c3]
c1 = c1
c12 = [a + b for a, b in zip(c1, c2)]
c123 = [a + b for a, b in zip(c12, c3)]
for i in range(len(c1)):
if c1[i] >= 700:
c1[i] = 700
for i in range(len(c12)):
if c12[i] >= 700:
c12[i] = 700
for i in range(len(c123)):
if c123[i] >= 700:
c123[i] = 700
ensemble_average_c1 = [exp(i) for i in c1]
ensemble_average_c12 = [exp(i) for i in c12]
ensemble_average_c123 = [exp(i) for i in c123]
numerator_c1 = [a * b for a, b in zip(pstarA, ensemble_average_c1)]
numerator_c12 = [a * b for a, b in zip(pstarA, ensemble_average_c12)]
numerator_c123 = [a * b for a, b in zip(pstarA, ensemble_average_c123)]
#### c1
denominatorc1 = []
for i in range(len(bins)):
product_c1 = pstarA[i] * ensemble_average_c1[i]
denominatorc1.append(product_c1)
denominator_c1 = sum(denominatorc1)
pA_c1 = [i / denominator_c1 for i in numerator_c1]
#### c12
denominatorc12 = []
for i in range(len(bins)):
product_c12 = pstarA[i] * ensemble_average_c12[i]
denominatorc12.append(product_c12)
denominator_c12 = sum(denominatorc12)
pA_c12 = [i / denominator_c12 for i in numerator_c12]
#### c123
denominatorc123 = []
for i in range(len(bins)):
product_c123 = pstarA[i] * ensemble_average_c123[i]
denominatorc123.append(product_c123)
denominator_c123 = sum(denominatorc123)
pA_c123 = [i / denominator_c123 for i in numerator_c123]
data_c1 = list(zip(bins, pA_c1))
data_c12 = list(zip(bins, pA_c12))
data_c123 = list(zip(bins, pA_c123))
df_c1 = pd.DataFrame(data_c1, columns=["bins", "pA_c1"])
df_c12 = pd.DataFrame(data_c12, columns=["bins", "pA_c12"])
df_c123 = pd.DataFrame(data_c123, columns=["bins", "pA_c123"])
####c1
df_c1.to_csv("c1_1d.txt", header=True, index=None, sep=" ", mode="w")
with open("c1_1d.txt", "r") as f1, open("pA_c1_1d.txt", "w") as f2:
for line in f1:
f2.write(line.replace('"', "").replace("'", ""))
os.system("rm -rf c1_1d.txt")
####c12
df_c12.to_csv("c12_1d.txt", header=True, index=None, sep=" ", mode="w")
with open("c12_1d.txt", "r") as f1, open("pA_c12_1d.txt", "w") as f2:
for line in f1:
f2.write(line.replace('"', "").replace("'", ""))
os.system("rm -rf c12_1d.txt")
####c123
df_c123.to_csv("c123_1d.txt", header=True, index=None, sep=" ", mode="w")
with open("c123_1d.txt", "r") as f1, open("pA_c123_1d.txt", "w") as f2:
for line in f1:
f2.write(line.replace('"', "").replace("'", ""))
os.system("rm -rf c123_1d.txt")
####c1_arranged
df_c1_arranged = df_c1.sort_values(by="pA_c1", ascending=False)
df_c1_arranged = df_c1_arranged[df_c1_arranged.pA_c1 > min_prob]
df_c1_arranged.to_csv(
"c1_arranged_1d.txt", header=True, index=None, sep=" ", mode="w"
)
with open("c1_arranged_1d.txt", "r") as f1, open(
"pA_c1_arranged_1d.txt", "w"
) as f2:
for line in f1:
f2.write(line.replace('"', "").replace("'", ""))
os.system("rm -rf c1_arranged_1d.txt")
####c12_arranged
df_c12_arranged = df_c12.sort_values(by="pA_c12", ascending=False)
df_c12_arranged = df_c12_arranged[df_c12_arranged.pA_c12 > min_prob]
df_c12_arranged.to_csv(
"c12_arranged_1d.txt", header=True, index=None, sep=" ", mode="w"
)
with open("c12_arranged_1d.txt", "r") as f1, open(
"pA_c12_arranged_1d.txt", "w"
) as f2:
for line in f1:
f2.write(line.replace('"', "").replace("'", ""))
os.system("rm -rf c12_arranged_1d.txt")
####c123_arranged
df_c123_arranged = df_c123.sort_values(by="pA_c123", ascending=False)
df_c123_arranged = df_c123_arranged[df_c123_arranged.pA_c123 > min_prob]
df_c123_arranged.to_csv(
"c123_arranged_1d.txt", header=True, index=None, sep=" ", mode="w"
)
with open("c123_arranged_1d.txt", "r") as f1, open(
"pA_c123_arranged_1d.txt", "w"
) as f2:
for line in f1:
f2.write(line.replace('"', "").replace("'", ""))
os.system("rm -rf c123_arranged_1d.txt")
####c1_arranged
df_c1_arranged["index"] = df_c1_arranged.index
index_list_c1 = df_c1_arranged["index"].tolist()
df["frame_index"] = df.index
df_frame_index = df[["frame_index", "index"]]
frame_indices_c1 = []
index_indces_c1 = []
for i in index_list_c1:
df_index_list_c1 = df_frame_index.loc[df_frame_index["index"] == i]
frame_c1 = df_index_list_c1["frame_index"].tolist()
frame_indices_c1.append(frame_c1)
index_c1 = [i] * len(frame_c1)
index_indces_c1.append(index_c1)
frame_indices_c1 = [item for elem in frame_indices_c1 for item in elem]
index_indces_c1 = [item for elem in index_indces_c1 for item in elem]
df_c1_frame = pd.DataFrame(frame_indices_c1, columns=["frame_index"])
df_c1_index = pd.DataFrame(index_indces_c1, columns=["index"])
df_c1_frame_index = pd.concat([df_c1_frame, df_c1_index], axis=1)
df_c1_frame_index = df_c1_frame_index.groupby("index").filter(
lambda x: len(x) >= 10
)
df_c1_frame_index.to_csv(
"c1_frame_index_1d.txt", header=True, index=None, sep=" ", mode="w"
)
with open("c1_frame_index_1d.txt", "r") as f1, open(
"c1_frame_1d.txt", "w"
) as f2:
for line in f1:
f2.write(line.replace('"', "").replace("'", ""))
os.system("rm -rf c1_frame_index_1d.txt")
####c12_arranged
df_c12_arranged["index"] = df_c12_arranged.index
index_list_c12 = df_c12_arranged["index"].tolist()
df["frame_index"] = df.index
df_frame_index = df[["frame_index", "index"]]
frame_indices_c12 = []
index_indces_c12 = []
for i in index_list_c12:
df_index_list_c12 = df_frame_index.loc[df_frame_index["index"] == i]
frame_c12 = df_index_list_c12["frame_index"].tolist()
frame_indices_c12.append(frame_c12)
index_c12 = [i] * len(frame_c12)
index_indces_c12.append(index_c12)
frame_indices_c12 = [item for elem in frame_indices_c12 for item in elem]
index_indces_c12 = [item for elem in index_indces_c12 for item in elem]
df_c12_frame = pd.DataFrame(frame_indices_c12, columns=["frame_index"])
df_c12_index = pd.DataFrame(index_indces_c12, columns=["index"])
df_c12_frame_index = pd.concat([df_c12_frame, df_c12_index], axis=1)
df_c12_frame_index = df_c12_frame_index.groupby("index").filter(
lambda x: len(x) >= 10
)
df_c12_frame_index.to_csv(
"c12_frame_index_1d.txt", header=True, index=None, sep=" ", mode="w"
)
with open("c12_frame_index_1d.txt", "r") as f1, open(
"c12_frame_1d.txt", "w"
) as f2:
for line in f1:
f2.write(line.replace('"', "").replace("'", ""))
os.system("rm -rf c12_frame_index_1d.txt")
####c123_arranged
df_c123_arranged["index"] = df_c123_arranged.index
index_list_c123 = df_c123_arranged["index"].tolist()
df["frame_index"] = df.index
df_frame_index = df[["frame_index", "index"]]
frame_indices_c123 = []
index_indces_c123 = []
for i in index_list_c123:
df_index_list_c123 = df_frame_index.loc[df_frame_index["index"] == i]
frame_c123 = df_index_list_c123["frame_index"].tolist()
frame_indices_c123.append(frame_c123)
index_c123 = [i] * len(frame_c123)
index_indces_c123.append(index_c123)
frame_indices_c123 = [item for elem in frame_indices_c123 for item in elem]
index_indces_c123 = [item for elem in index_indces_c123 for item in elem]
df_c123_frame = pd.DataFrame(frame_indices_c123, columns=["frame_index"])
df_c123_index = pd.DataFrame(index_indces_c123, columns=["index"])
df_c123_frame_index = pd.concat([df_c123_frame, df_c123_index], axis=1)
df_c123_frame_index = df_c123_frame_index.groupby("index").filter(
lambda x: len(x) >= 10
)
df_c123_frame_index.to_csv(
"c123_frame_index_1d.txt", header=True, index=None, sep=" ", mode="w"
)
with open("c123_frame_index_1d.txt", "r") as f1, open(
"c123_frame_1d.txt", "w"
) as f2:
for line in f1:
f2.write(line.replace('"', "").replace("'", ""))
os.system("rm -rf c123_frame_index_1d.txt")
####c1
indices_c1_1d = df_c1_frame_index["index"].unique()
frames_c1 = []
for i in indices_c1_1d:
x = df_c1_frame_index.loc[df_c1_frame_index["index"] == i]
y = x["frame_index"].values.tolist()
z = random.sample(y, n_structures)
frames_c1.append(z)
frames_c1_1d = [item for elem in frames_c1 for item in elem]
with open("frames_c1_1d.pickle", "wb") as f:
pk.dump(frames_c1_1d, f)
with open("indices_c1_1d.pickle", "wb") as f:
pk.dump(indices_c1_1d, f)
####c12
indices_c12_1d = df_c12_frame_index["index"].unique()
frames_c12 = []
for i in indices_c12_1d:
x = df_c12_frame_index.loc[df_c12_frame_index["index"] == i]
y = x["frame_index"].values.tolist()
z = random.sample(y, n_structures)
frames_c12.append(z)
frames_c12_1d = [item for elem in frames_c12 for item in elem]
with open("frames_c12_1d.pickle", "wb") as f:
pk.dump(frames_c12_1d, f)
with open("indices_c12_1d.pickle", "wb") as f:
pk.dump(indices_c12_1d, f)
####c123
indices_c123_1d = df_c123_frame_index["index"].unique()
frames_c123 = []
for i in indices_c123_1d:
x = df_c123_frame_index.loc[df_c123_frame_index["index"] == i]
y = x["frame_index"].values.tolist()
z = random.sample(y, n_structures)
frames_c123.append(z)
frames_c123_1d = [item for elem in frames_c123 for item in elem]
with open("frames_c123_1d.pickle", "wb") as f:
pk.dump(frames_c123_1d, f)
with open("indices_c123_1d.pickle", "wb") as f:
pk.dump(indices_c123_1d, f)
##saving probabilities for each selected frame
####c1
prob_c1_1d_list = []
for i in indices_c1_1d:
prob_c1_1d_list.append(df_c1["pA_c1"][i])
prob_c1_1d_list = list(
itertools.chain.from_iterable(
itertools.repeat(x, n_structures) for x in prob_c1_1d_list
)
)
prob_c1_1d_list = [x / n_structures for x in prob_c1_1d_list]
with open("prob_c1_1d_list.pickle", "wb") as f:
pk.dump(prob_c1_1d_list, f)
####c12
prob_c12_1d_list = []
for i in indices_c12_1d:
prob_c12_1d_list.append(df_c12["pA_c12"][i])
prob_c12_1d_list = list(
itertools.chain.from_iterable(
itertools.repeat(x, n_structures) for x in prob_c12_1d_list
)
)
prob_c12_1d_list = [x / n_structures for x in prob_c12_1d_list]
with open("prob_c12_1d_list.pickle", "wb") as f:
pk.dump(prob_c12_1d_list, f)
####c123
prob_c123_1d_list = []
for i in indices_c123_1d:
prob_c123_1d_list.append(df_c123["pA_c123"][i])
prob_c123_1d_list = list(
itertools.chain.from_iterable(
itertools.repeat(x, n_structures) for x in prob_c123_1d_list
)
)
prob_c123_1d_list = [x / n_structures for x in prob_c123_1d_list]
with open("prob_c123_1d_list.pickle", "wb") as f:
pk.dump(prob_c123_1d_list, f)
ref_df_1d = pd.DataFrame(bins, columns=["dim0", "dim1"])
ref_df_1d["bins"] = ref_df_1d.agg(
lambda x: f"[{x['dim0']} , {x['dim1']}]", axis=1
)
ref_df_1d = ref_df_1d[["bins"]]
index_ref_1d = []
for i in range(len(bins)):
index_ref_1d.append(i)
index_ref_df_1d = pd.DataFrame(index_ref_1d, columns=["index"])
df_ref_1d = pd.concat([ref_df_1d, index_ref_df_1d], axis=1)
df_ref_1d.to_csv("ref_1d.txt", header=True, index=None, sep=" ", mode="w")
df.to_csv("df_1d.csv", index=False)
os.system("rm -rf __pycache__")
print("Successfully Completed Reweighing")
def reweight_2d(
binspace=10,
n_structures=4,
Xdim=[-180, 180],
Ydim=[-180, 180],
T=300.0,
min_prob=0.000001,
):
"""
Reweights boosted potential energies in two-dimensions
based on Maclaurin series expansion to one, two and
three degrees.
Parameters
----------
binspace: int
Spacing between the bins
n_structures: int
Number of structures per bin chosen
for Weighted Ensemble (WE) simulations
Xdim: list
Range of dihedral angles (1st dimension)
Ydim: list
Range of dihedral angles (2nd dimension)
T: float
MD simulation temperature
min_prob: float
minimum probability threshold
"""
beta = 1.0 / (0.001987 * float(T))
df_Phi_Psi = pd.read_csv("Phi_Psi.dat", delim_whitespace=True, header=None)
df_Phi_Psi.columns = ["Phi", "Psi"]
df_weight = pd.read_csv("weights.dat", delim_whitespace=True, header=None)
df_weight.columns = ["dV_kBT", "timestep", "dVkcalmol"]
sum_total = df_Phi_Psi.shape[0]
binsX = np.arange(float(Xdim[0]), (float(Xdim[1]) + binspace), binspace)
binsY = np.arange(float(Ydim[0]), (float(Ydim[1]) + binspace), binspace)
hist2D, hist_edgesX, hist_edgesY = np.histogram2d(
df_Phi_Psi["Phi"].values.tolist(),
df_Phi_Psi["Psi"].values.tolist(),
bins=(binsX, binsY),
weights=None,
)
pstarA_2D = [i / sum_total for i in list(hist2D)]
bins_tuple_X = create_bins(
lower_bound=int(Xdim[0]), width=binspace, upper_bound=int(Xdim[1])
)
bins_tuple_Y = create_bins(
lower_bound=int(Ydim[0]), width=binspace, upper_bound=int(Ydim[1])
)
bins = []
for i in range(len(bins_tuple_X)):
for j in range(len(bins_tuple_Y)):
bins.append([bins_tuple_X[i], bins_tuple_Y[j]])
pstarA = [item for elem in pstarA_2D for item in elem]
hist = [item for elem in hist2D for item in elem]
hist = [int(i) for i in hist]
data_X = df_Phi_Psi["Phi"].values.tolist()
binned_weights_X = []
for value in data_X:
bin_index_X = find_bin(value, bins_tuple_X)
binned_weights_X.append(bin_index_X)
data_Y = df_Phi_Psi["Psi"].values.tolist()
binned_weights_Y = []
for value in data_Y:
bin_index_Y = find_bin(value, bins_tuple_Y)
binned_weights_Y.append(bin_index_Y)
binned_weights_2D = []
for i in range(len(binned_weights_X)):
binned_weights_2D.append([binned_weights_X[i], binned_weights_Y[i]])
binned_weights = []
for i in range(len(binned_weights_2D)):
binned_weights.append(
(binned_weights_2D[i][0] * len(bins_tuple_Y))
+ (binned_weights_2D[i][1] + 1)
)
df_index = pd.DataFrame(binned_weights)
df_index.columns = ["index"]
df_index["index"] = df_index["index"] - 1
df = pd.concat([df_index, df_Phi_Psi, df_weight], axis=1)
dV_c1 = []
dV_c2 = []
dV_c3 = []
dV = []
for i in range(len(bins)):
df_i = df.loc[(df["index"] == i)]
dV_list = df_i["dVkcalmol"].values.tolist()
if len(dV_list) >= 10:
dV_c1.append(statistics.mean(dV_list))
dV_c2.append(
statistics.mean([i ** 2 for i in dV_list])
- (statistics.mean(dV_list)) ** 2
)
dV_c3.append(
statistics.mean([i ** 3 for i in dV_list])
- 3
* (statistics.mean([i ** 2 for i in dV_list]))
* (statistics.mean(dV_list))
+ 2 * (statistics.mean(dV_list)) ** 3
)
if len(dV_list) < 10:
dV_c1.append(0)
dV_c2.append(0)
dV_c3.append(0)
dV.append(dV_list)
c1 = [i * beta for i in dV_c1]
c2 = [i * ((beta ** 2) / 2) for i in dV_c2]
c3 = [i * ((beta ** 3) / 6) for i in dV_c3]
c1 = c1
c12 = [a + b for a, b in zip(c1, c2)]
c123 = [a + b for a, b in zip(c12, c3)]
for i in range(len(c1)):
if c1[i] >= 700:
c1[i] = 700
for i in range(len(c12)):
if c12[i] >= 700:
c12[i] = 700
for i in range(len(c123)):
if c123[i] >= 700:
c123[i] = 700
ensemble_average_c1 = [exp(i) for i in c1]
ensemble_average_c12 = [exp(i) for i in c12]
ensemble_average_c123 = [exp(i) for i in c123]
numerator_c1 = [a * b for a, b in zip(pstarA, ensemble_average_c1)]
numerator_c12 = [a * b for a, b in zip(pstarA, ensemble_average_c12)]
numerator_c123 = [a * b for a, b in zip(pstarA, ensemble_average_c123)]
#### c1
denominatorc1 = []
for i in range(len(bins)):
product_c1 = pstarA[i] * ensemble_average_c1[i]
denominatorc1.append(product_c1)
denominator_c1 = sum(denominatorc1)
pA_c1 = [i / denominator_c1 for i in numerator_c1]
#### c12
denominatorc12 = []
for i in range(len(bins)):
product_c12 = pstarA[i] * ensemble_average_c12[i]
denominatorc12.append(product_c12)
denominator_c12 = sum(denominatorc12)
pA_c12 = [i / denominator_c12 for i in numerator_c12]
#### c123
denominatorc123 = []
for i in range(len(bins)):
product_c123 = pstarA[i] * ensemble_average_c123[i]
denominatorc123.append(product_c123)
denominator_c123 = sum(denominatorc123)
pA_c123 = [i / denominator_c123 for i in numerator_c123]
data_c1 = list(zip(bins, pA_c1))
data_c12 = list(zip(bins, pA_c12))
data_c123 = list(zip(bins, pA_c123))
df_c1 = | pd.DataFrame(data_c1, columns=["bins", "pA_c1"]) | pandas.DataFrame |
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Tue Aug 18 17:54:55 2020
@author: RredRrobin
"""
import os
import tkinter as tk
import tkinter.filedialog as filedialog
import tkinter.ttk as ttk
import pandas as pd
from datetime import datetime, timedelta
import numpy as np
class TextScrollCombo(tk.Frame):
def __init__(self, master):
tk.Frame.__init__(self, master)
self.grid_propagate(True)
self.text = tk.Text(self, wrap="none")
self.text.grid(row=0, column=0, sticky="nsew", padx=2, pady=2)
scroll_bar_y = ttk.Scrollbar(self, command=self.text.yview)
scroll_bar_y.grid(row=0, column=1, sticky='nsew')
self.text['yscrollcommand'] = scroll_bar_y.set
scroll_bar_x = ttk.Scrollbar(self, command=self.text.xview, orient=tk.HORIZONTAL)
scroll_bar_x.grid(row=1, column=0, sticky='nsew')
self.text['xscrollcommand'] = scroll_bar_x.set
def add(self, row):
self.text.insert("end", row)
def empty(self):
self.text.delete('1.0', "end")
class program(tk.Frame):
def __init__(self, master):
tk.Frame.__init__(self, master)
self.grid()
self.grid_propagate(False)
self.grid_rowconfigure(0, weight=1)
self.grid_columnconfigure(0, weight=1)
self.menubar = tk.Menu(master)
master.config(menu=self.menubar)
self.menu_create()
self.combo = TextScrollCombo(master)
self.combo.grid(row=0, column=0, sticky='nsew')
style = ttk.Style()
style.theme_use('clam')
self.WD_open ="/" # set root as default directory to open files
self.WD_save ="/" # set root as default directory to save files
def menu_create(self):
self.progs = tk.Menu(self.menubar, tearoff=False)
self.progs.add_command(label="Choose directory to OPEN files", command=self.chooseWD_open)
self.progs.add_command(label="Choose directory to SAVE files", command=self.chooseWD_save)
self.progs.add_command(label="Close", command=self.close)
self.menubar.add_cascade(label="Program", menu=self.progs)
self.menuPs = tk.Menu(self.menubar, tearoff=False)
self.menuPs.add_command(label="Load data", command=self.data_import)
self.menuPs.add_command(label="Select interval", command=self.interval_select)
self.menuPs.add_command(label="Cut & save interval", command=self.interval_cut)
self.menubar.add_cascade(label="HRdata", menu=self.menuPs)
def data_import(self):
self.file_name = filedialog.askopenfilename(initialdir = self.WD_open, title = "Select file",filetypes = (("HRM files","*.hrm"),("Text files","*.txt"),("all files","*.*")))
file = open(self.file_name)
data = file.read()
file.close()
self.combo.add(data) # to display
# load dataframe
self.df = pd.read_csv(self.file_name, sep = ",")
self.df.columns = ["IBI"] # name column "IBI" (Inter-beat interval)
# delete unnessecary information
a = list(self.df.IBI) # convert column from self.df to list
b = list(self.df.IBI).index('[HRData]') # recognize beginning of HR data in the list
del a[0:4] # deletes first four rows
del a[1:b-3] # deletes rows 2 to "[HRData]"
self.df = pd.DataFrame({'IBI':a}) # writes dataframe
# create column with forth-counted time
self.df['IBI'] = self.df['IBI'].str.replace('StartTime=','') # deletes "StartTime=" to obtain just time value
self.df['IBItime'] = pd.to_timedelta(self.df['IBI'])*1000000 # *1.000.000, because IBI-values are interpreted in microseconds (except for StartTime-value)
l = len(self.df) # calculate length of DF / number of IBIs
liste = list(range(1,l)) # create list from 1 to end (the unused ro with the time has the index number '0' and the last row has length minus 1)
self.df['time'] = pd.to_timedelta(self.df['IBI'][0])
for i in liste:
self.df['time'][i] = self.df['time'][i-1] + self.df['IBItime'][i] # adds continuously the respective time value (previous time value plus length of IBI)
self.combo.empty() # empty screen
self.combo.add(self.df)
# save as .csv-file
filename_w_ext = os.path.basename(self.file_name) # change file name to be saved in a way that
filename, file_extension = os.path.splitext(filename_w_ext) # it has the same name as -hrm file, but
self.n = filename+'.csv' # with .csv as ending
self.df.to_csv(self.n, sep='\t', index=False) # options: tabulator as seperator; without indexing
return(self.df)
def interval_select(self):
def time_select(num1,num2,num3,num4,num5,num6):
stime = (num1+":"+num2+":"+num3) # select start time
dtime = (num4+":"+num5+":"+num6)
print(stime) # print in console
print(dtime)
st = datetime.strptime(stime,"%H:%M:%S")
dt = datetime.strptime(dtime,"%H:%M:%S")
self.Stime = timedelta(hours = st.hour, minutes = st.minute, seconds= st.second)# format into sth that can be used later
self.Dtime = timedelta(hours = dt.hour, minutes = dt.minute, seconds= dt.second)
return(self.Stime)
return(self.Dtime)
# create window
intwin = tk.Tk()
intwin.geometry("200x100+100+100")
intwin.title('Interval')
# label
tk.Label(intwin, text="Start time").grid(row=1, column=0)
tk.Label(intwin, text=":").grid(row=1, column=2)
tk.Label(intwin, text=":").grid(row=1, column=4)
tk.Label(intwin, text="Duration").grid(row=2, column=0)
tk.Label(intwin, text=":").grid(row=2, column=2)
tk.Label(intwin, text=":").grid(row=2, column=4)
# set type of variable
number1 = tk.IntVar()
number2 = tk.IntVar()
number3 = tk.IntVar()
number4 = tk.IntVar()
number5 = tk.IntVar()
number6 = tk.IntVar()
# create entry slot
sHH = tk.Entry(intwin, textvariable=number1, width=2) # commentary: it would be nice to limit the entry options to two digits by default - not just visually - , in order to avoid nonsense entries / typos
sHH.grid(row=1, column=1)
sMM = tk.Entry(intwin, textvariable=number2, width=2)
sMM.grid(row=1, column=3)
sSS = tk.Entry(intwin, textvariable=number3, width=2)
sSS.grid(row=1, column=5)
dHH = tk.Entry(intwin, textvariable=number4, width=2)
dHH.grid(row=2, column=1)
dMM = tk.Entry(intwin, textvariable=number5, width=2)
dMM.grid(row=2, column=3)
dSS = tk.Entry(intwin, textvariable=number6, width=2)
dSS.grid(row=2, column=5)
tk.Button(intwin, text = "OK", command=lambda: time_select(sHH.get(),sMM.get(),sSS.get(),dHH.get(),dMM.get(),dSS.get()), activebackground = "white", activeforeground = "blue").place(x = 70, y = 50)
intwin.mainloop()
self.combo.add(self.Stime)
self.combo.add(self.Dtime)
def interval_cut(self):
# load dataframe
self.df2 = pd.read_csv(self.n, sep = "\t")
# define times as timedelta data
self.df2['IBI'] = pd.to_numeric(self.df2['IBI'][1:])
self.df2['IBItime'] = pd.to_timedelta(self.df2['IBItime'])
self.df2['time'] = | pd.to_timedelta(self.df2['time']) | pandas.to_timedelta |
"""Tests for the sdv.constraints.tabular module."""
import numpy as np
import pandas as pd
import pytest
from sdv.constraints.errors import MissingConstraintColumnError
from sdv.constraints.tabular import (
ColumnFormula, CustomConstraint, GreaterThan, UniqueCombinations)
def dummy_transform():
pass
def dummy_reverse_transform():
pass
def dummy_is_valid():
pass
class TestCustomConstraint():
def test___init__(self):
"""Test the ``CustomConstraint.__init__`` method.
The ``transform``, ``reverse_transform`` and ``is_valid`` methods
should be replaced by the given ones, importing them if necessary.
Setup:
- Create dummy functions (created above this class).
Input:
- dummy transform and revert_transform + is_valid FQN
Output:
- Instance with all the methods replaced by the dummy versions.
"""
is_valid_fqn = __name__ + '.dummy_is_valid'
# Run
instance = CustomConstraint(
transform=dummy_transform,
reverse_transform=dummy_reverse_transform,
is_valid=is_valid_fqn
)
# Assert
assert instance.transform == dummy_transform
assert instance.reverse_transform == dummy_reverse_transform
assert instance.is_valid == dummy_is_valid
class TestUniqueCombinations():
def test___init__(self):
"""Test the ``UniqueCombinations.__init__`` method.
It is expected to create a new Constraint instance and receiving the names of
the columns that need to produce unique combinations.
Side effects:
- instance._colums == columns
"""
# Setup
columns = ['b', 'c']
# Run
instance = UniqueCombinations(columns=columns)
# Assert
assert instance._columns == columns
def test__valid_separator_valid(self):
"""Test ``_valid_separator`` for a valid separator.
If the separator and data are valid, result is ``True``.
Input:
- Table data (pandas.DataFrame)
Output:
- True (bool).
"""
# Setup
columns = ['b', 'c']
instance = UniqueCombinations(columns=columns)
instance._separator = '#'
# Run
table_data = pd.DataFrame({
'a': ['a', 'b', 'c'],
'b': ['d', 'e', 'f'],
'c': ['g', 'h', 'i']
})
is_valid = instance._valid_separator(table_data, instance._separator, columns)
# Assert
assert is_valid
def test__valid_separator_non_valid_separator_contained(self):
"""Test ``_valid_separator`` passing a column that contains the separator.
If any of the columns contains the separator string, result is ``False``.
Input:
- Table data (pandas.DataFrame) with a column that contains the separator string ('#')
Output:
- False (bool).
"""
# Setup
columns = ['b', 'c']
instance = UniqueCombinations(columns=columns)
instance._separator = '#'
# Run
table_data = pd.DataFrame({
'a': ['a', 'b', 'c'],
'b': ['d', '#', 'f'],
'c': ['g', 'h', 'i']
})
is_valid = instance._valid_separator(table_data, instance._separator, columns)
# Assert
assert not is_valid
def test__valid_separator_non_valid_name_joined_exists(self):
"""Test ``_valid_separator`` passing a column whose name is obtained after joining
the column names using the separator.
If the column name obtained after joining the column names using the separator
already exists, result is ``False``.
Input:
- Table data (pandas.DataFrame) with a column name that will be obtained by joining
the column names and the separator.
Output:
- False (bool).
"""
# Setup
columns = ['b', 'c']
instance = UniqueCombinations(columns=columns)
instance._separator = '#'
# Run
table_data = pd.DataFrame({
'b#c': ['a', 'b', 'c'],
'b': ['d', 'e', 'f'],
'c': ['g', 'h', 'i']
})
is_valid = instance._valid_separator(table_data, instance._separator, columns)
# Assert
assert not is_valid
def test_fit(self):
"""Test the ``UniqueCombinations.fit`` method.
The ``UniqueCombinations.fit`` method is expected to:
- Call ``UniqueCombinations._valid_separator``.
- Find a valid separator for the data and generate the joint column name.
Input:
- Table data (pandas.DataFrame)
"""
# Setup
columns = ['b', 'c']
instance = UniqueCombinations(columns=columns)
# Run
table_data = pd.DataFrame({
'a': ['a', 'b', 'c'],
'b': ['d', 'e', 'f'],
'c': ['g', 'h', 'i']
})
instance.fit(table_data)
# Asserts
expected_combinations = pd.DataFrame({
'b': ['d', 'e', 'f'],
'c': ['g', 'h', 'i']
})
assert instance._separator == '#'
assert instance._joint_column == 'b#c'
pd.testing.assert_frame_equal(instance._combinations, expected_combinations)
def test_is_valid_true(self):
"""Test the ``UniqueCombinations.is_valid`` method.
If the input data satisfies the constraint, result is a series of ``True`` values.
Input:
- Table data (pandas.DataFrame), satisfying the constraint.
Output:
- Series of ``True`` values (pandas.Series)
Side effects:
- Since the ``is_valid`` method needs ``self._combinations``, method ``fit``
must be called as well.
"""
# Setup
table_data = pd.DataFrame({
'a': ['a', 'b', 'c'],
'b': ['d', 'e', 'f'],
'c': ['g', 'h', 'i']
})
columns = ['b', 'c']
instance = UniqueCombinations(columns=columns)
instance.fit(table_data)
# Run
out = instance.is_valid(table_data)
expected_out = pd.Series([True, True, True], name='b#c')
pd.testing.assert_series_equal(expected_out, out)
def test_is_valid_false(self):
"""Test the ``UniqueCombinations.is_valid`` method.
If the input data doesn't satisfy the constraint, result is a series of ``False`` values.
Input:
- Table data (pandas.DataFrame), which does not satisfy the constraint.
Output:
- Series of ``False`` values (pandas.Series)
Side effects:
- Since the ``is_valid`` method needs ``self._combinations``, method ``fit``
must be called as well.
"""
# Setup
table_data = pd.DataFrame({
'a': ['a', 'b', 'c'],
'b': ['d', 'e', 'f'],
'c': ['g', 'h', 'i']
})
columns = ['b', 'c']
instance = UniqueCombinations(columns=columns)
instance.fit(table_data)
# Run
incorrect_table = pd.DataFrame({
'a': ['a', 'b', 'c'],
'b': ['D', 'E', 'F'],
'c': ['g', 'h', 'i']
})
out = instance.is_valid(incorrect_table)
# Assert
expected_out = pd.Series([False, False, False], name='b#c')
pd.testing.assert_series_equal(expected_out, out)
def test_transform(self):
"""Test the ``UniqueCombinations.transform`` method.
It is expected to return a Table data with the columns concatenated by the separator.
Input:
- Table data (pandas.DataFrame)
Output:
- Table data transformed, with the columns concatenated (pandas.DataFrame)
Side effects:
- Since the ``transform`` method needs ``self._joint_column``, method ``fit``
must be called as well.
"""
# Setup
table_data = pd.DataFrame({
'a': ['a', 'b', 'c'],
'b': ['d', 'e', 'f'],
'c': ['g', 'h', 'i']
})
columns = ['b', 'c']
instance = UniqueCombinations(columns=columns)
instance.fit(table_data)
# Run
out = instance.transform(table_data)
# Assert
expected_out = pd.DataFrame({
'a': ['a', 'b', 'c'],
'b#c': ['d#g', 'e#h', 'f#i']
})
pd.testing.assert_frame_equal(expected_out, out)
def test_transform_not_all_columns_provided(self):
"""Test the ``UniqueCombinations.transform`` method.
If some of the columns needed for the transform are missing, and
``fit_columns_model`` is False, it will raise a ``MissingConstraintColumnError``.
Input:
- Table data (pandas.DataFrame)
Output:
- Raises ``MissingConstraintColumnError``.
"""
# Setup
table_data = pd.DataFrame({
'a': ['a', 'b', 'c'],
'b': ['d', 'e', 'f'],
'c': ['g', 'h', 'i']
})
columns = ['b', 'c']
instance = UniqueCombinations(columns=columns, fit_columns_model=False)
instance.fit(table_data)
# Run/Assert
with pytest.raises(MissingConstraintColumnError):
instance.transform(pd.DataFrame({'a': ['a', 'b', 'c']}))
def reverse_transform(self):
"""Test the ``UniqueCombinations.reverse_transform`` method.
It is expected to return the original data separating the concatenated columns.
Input:
- Table data transformed (pandas.DataFrame)
Output:
- Original table data, with the concatenated columns separated (pandas.DataFrame)
Side effects:
- Since the ``transform`` method needs ``self._joint_column``, method ``fit``
must be called as well.
"""
# Setup
transformed_data = pd.DataFrame({
'a': ['a', 'b', 'c'],
'b#c': ['d#g', 'e#h', 'f#i']
})
columns = ['b', 'c']
instance = UniqueCombinations(columns=columns)
instance.fit(transformed_data)
# Run
out = instance.reverse_transform(transformed_data)
# Assert
expected_out = pd.DataFrame({
'a': ['a', 'b', 'c'],
'b': ['d', 'e', 'f'],
'c': ['g', 'h', 'i']
})
pd.testing.assert_frame_equal(expected_out, out)
class TestGreaterThan():
def test___init___strict_false(self):
"""Test the ``GreaterThan.__init__`` method.
The passed arguments should be stored as attributes.
Input:
- low = 'a'
- high = 'b'
Side effects:
- instance._low == 'a'
- instance._high == 'b'
- instance._strict == False
"""
# Run
instance = GreaterThan(low='a', high='b')
# Asserts
assert instance._low == 'a'
assert instance._high == 'b'
assert instance._strict is False
def test___init___strict_true(self):
"""Test the ``GreaterThan.__init__`` method.
The passed arguments should be stored as attributes.
Input:
- low = 'a'
- high = 'b'
- strict = True
Side effects:
- instance._low == 'a'
- instance._high == 'b'
- instance._stric == True
"""
# Run
instance = GreaterThan(low='a', high='b', strict=True)
# Asserts
assert instance._low == 'a'
assert instance._high == 'b'
assert instance._strict is True
def test_fit_int(self):
"""Test the ``GreaterThan.fit`` method.
The ``GreaterThan.fit`` method should only learn and store the
``dtype`` of the ``high`` column as the ``_dtype`` attribute.
Input:
- Table that contains two constrained columns with the high one
being made of integers.
Side Effect:
- The _dtype attribute gets `int` as the value even if the low
column has a different dtype.
"""
# Setup
instance = GreaterThan(low='a', high='b')
# Run
table_data = pd.DataFrame({
'a': [1., 2., 3.],
'b': [4, 5, 6],
'c': [7, 8, 9]
})
instance.fit(table_data)
# Asserts
assert instance._dtype.kind == 'i'
def test_fit_float(self):
"""Test the ``GreaterThan.fit`` method.
The ``GreaterThan.fit`` method should only learn and store the
``dtype`` of the ``high`` column as the ``_dtype`` attribute.
Input:
- Table that contains two constrained columns with the high one
being made of float values.
Side Effect:
- The _dtype attribute gets `float` as the value even if the low
column has a different dtype.
"""
# Setup
instance = GreaterThan(low='a', high='b')
# Run
table_data = pd.DataFrame({
'a': [1, 2, 3],
'b': [4., 5., 6.],
'c': [7, 8, 9]
})
instance.fit(table_data)
# Asserts
assert instance._dtype.kind == 'f'
def test_fit_datetime(self):
"""Test the ``GreaterThan.fit`` method.
The ``GreaterThan.fit`` method should only learn and store the
``dtype`` of the ``high`` column as the ``_dtype`` attribute.
Input:
- Table that contains two constrained columns of datetimes.
Side Effect:
- The _dtype attribute gets `datetime` as the value.
"""
# Setup
instance = GreaterThan(low='a', high='b')
# Run
table_data = pd.DataFrame({
'a': pd.to_datetime(['2020-01-01']),
'b': pd.to_datetime(['2020-01-02'])
})
instance.fit(table_data)
# Asserts
assert instance._dtype.kind == 'M'
def test_is_valid_strict_false(self):
"""Test the ``GreaterThan.is_valid`` method with strict False.
If strict is False, equal values should count as valid
Input:
- Table with a strictly valid row, a strictly invalid row and
a row that has the same value for both high and low.
Output:
- False should be returned for the strictly invalid row and True
for the other two.
"""
# Setup
instance = GreaterThan(low='a', high='b', strict=True)
# Run
table_data = pd.DataFrame({
'a': [1, 2, 3],
'b': [4, 2, 2],
'c': [7, 8, 9]
})
out = instance.is_valid(table_data)
# Assert
expected_out = pd.Series([True, False, False])
pd.testing.assert_series_equal(expected_out, out)
def test_is_valid_strict_true(self):
"""Test the ``GreaterThan.is_valid`` method with strict True.
If strict is True, equal values should count as invalid.
Input:
- Table with a strictly valid row, a strictly invalid row and
a row that has the same value for both high and low.
Output:
- True should be returned for the strictly valid row and False
for the other two.
"""
# Setup
instance = GreaterThan(low='a', high='b', strict=False)
# Run
table_data = pd.DataFrame({
'a': [1, 2, 3],
'b': [4, 2, 2],
'c': [7, 8, 9]
})
out = instance.is_valid(table_data)
# Assert
expected_out = pd.Series([True, True, False])
pd.testing.assert_series_equal(expected_out, out)
def test_transform_int_drop_none(self):
"""Test the ``GreaterThan.transform`` method passing a high column of type int.
The ``GreaterThan.transform`` method is expected to compute the distance
between the high and low columns and create a diff column with the
logarithm of the distance + 1.
Setup:
- ``_drop`` is set to ``None``, so all original columns will be in output.
Input:
- Table with two columns two constrained columns at a constant distance of
exactly 3 and one additional dummy column.
Output:
- Same table with a diff column of the logarithms of the distances + 1,
which is np.log(4).
"""
# Setup
instance = GreaterThan(low='a', high='b', strict=True)
# Run
table_data = pd.DataFrame({
'a': [1, 2, 3],
'b': [4, 5, 6],
'c': [7, 8, 9],
})
out = instance.transform(table_data)
# Assert
expected_out = pd.DataFrame({
'a': [1, 2, 3],
'b': [4, 5, 6],
'c': [7, 8, 9],
'#a#b': [np.log(4)] * 3,
})
pd.testing.assert_frame_equal(out, expected_out)
def test_transform_int_drop_high(self):
"""Test the ``GreaterThan.transform`` method passing a high column of type int.
The ``GreaterThan.transform`` method is expected to compute the distance
between the high and low columns and create a diff column with the
logarithm of the distance + 1. It should also drop the high column.
Setup:
- ``_drop`` is set to ``high``.
Input:
- Table with two columns two constrained columns at a constant distance of
exactly 3 and one additional dummy column.
Output:
- Same table with a diff column of the logarithms of the distances + 1,
which is np.log(4) and the high column dropped.
"""
# Setup
instance = GreaterThan(low='a', high='b', strict=True, drop='high')
# Run
table_data = pd.DataFrame({
'a': [1, 2, 3],
'b': [4, 5, 6],
'c': [7, 8, 9],
})
out = instance.transform(table_data)
# Assert
expected_out = pd.DataFrame({
'a': [1, 2, 3],
'c': [7, 8, 9],
'#a#b': [np.log(4)] * 3,
})
pd.testing.assert_frame_equal(out, expected_out)
def test_transform_int_drop_low(self):
"""Test the ``GreaterThan.transform`` method passing a high column of type int.
The ``GreaterThan.transform`` method is expected to compute the distance
between the high and low columns and create a diff column with the
logarithm of the distance + 1. It should also drop the low column.
Setup:
- ``_drop`` is set to ``low``.
Input:
- Table with two columns two constrained columns at a constant distance of
exactly 3 and one additional dummy column.
Output:
- Same table with a diff column of the logarithms of the distances + 1,
which is np.log(4) and the low column dropped.
"""
# Setup
instance = GreaterThan(low='a', high='b', strict=True, drop='low')
# Run
table_data = pd.DataFrame({
'a': [1, 2, 3],
'b': [4, 5, 6],
'c': [7, 8, 9],
})
out = instance.transform(table_data)
# Assert
expected_out = pd.DataFrame({
'b': [4, 5, 6],
'c': [7, 8, 9],
'#a#b': [np.log(4)] * 3,
})
pd.testing.assert_frame_equal(out, expected_out)
def test_transform_float_drop_none(self):
"""Test the ``GreaterThan.transform`` method passing a high column of type float.
The ``GreaterThan.transform`` method is expected to compute the distance
between the high and low columns and create a diff column with the
logarithm of the distance + 1.
Setup:
- ``_drop`` is set to ``None``, so all original columns will be in output.
Input:
- Table with two constrained columns at a constant distance of
exactly 3 and one additional dummy column.
Output:
- Same table with a diff column of the logarithms of the distances + 1,
which is np.log(4).
"""
# Setup
instance = GreaterThan(low='a', high='b', strict=True)
# Run
table_data = pd.DataFrame({
'a': [1, 2, 3],
'b': [4., 5., 6.],
'c': [7, 8, 9],
})
out = instance.transform(table_data)
# Assert
expected_out = pd.DataFrame({
'a': [1, 2, 3],
'b': [4., 5., 6.],
'c': [7, 8, 9],
'#a#b': [np.log(4)] * 3,
})
pd.testing.assert_frame_equal(out, expected_out)
def test_transform_datetime_drop_none(self):
"""Test the ``GreaterThan.transform`` method passing a high column of type datetime.
If the columns are of type datetime, ``transform`` is expected
to convert the timedelta distance into numeric before applying
the +1 and logarithm.
Setup:
- ``_drop`` is set to ``None``, so all original columns will be in output.
Input:
- Table with values at a distance of exactly 1 second.
Output:
- Same table with a diff column of the logarithms
of the dinstance in nanoseconds + 1, which is np.log(1_000_000_001).
"""
# Setup
instance = GreaterThan(low='a', high='b', strict=True)
# Run
table_data = pd.DataFrame({
'a': pd.to_datetime(['2020-01-01T00:00:00', '2020-01-02T00:00:00']),
'b': pd.to_datetime(['2020-01-01T00:00:01', '2020-01-02T00:00:01']),
'c': [1, 2],
})
out = instance.transform(table_data)
# Assert
expected_out = pd.DataFrame({
'a': pd.to_datetime(['2020-01-01T00:00:00', '2020-01-02T00:00:00']),
'b': pd.to_datetime(['2020-01-01T00:00:01', '2020-01-02T00:00:01']),
'c': [1, 2],
'#a#b': [np.log(1_000_000_001), np.log(1_000_000_001)],
})
pd.testing.assert_frame_equal(out, expected_out)
def test_transform_not_all_columns_provided(self):
"""Test the ``GreaterThan.transform`` method.
If some of the columns needed for the transform are missing, it will raise
a ``MissingConstraintColumnError``.
Input:
- Table data (pandas.DataFrame)
Output:
- Raises ``MissingConstraintColumnError``.
"""
# Setup
instance = GreaterThan(low='a', high='b', strict=True, fit_columns_model=False)
# Run/Assert
with pytest.raises(MissingConstraintColumnError):
instance.transform(pd.DataFrame({'a': ['a', 'b', 'c']}))
def test_reverse_transform_int_drop_high(self):
"""Test the ``GreaterThan.reverse_transform`` method for dtype int.
The ``GreaterThan.reverse_transform`` method is expected to:
- apply an exponential to the input
- subtract 1
- add the low column
- convert the output to integers
- add back the dropped column
Setup:
- ``_drop`` is set to ``high``.
Input:
- Table with a diff column that contains the constant np.log(4).
Output:
- Same table with the high column replaced by the low one + 3, as int
and the diff column dropped.
"""
# Setup
instance = GreaterThan(low='a', high='b', strict=True, drop='high')
instance._dtype = pd.Series([1]).dtype # exact dtype (32 or 64) depends on OS
# Run
transformed = pd.DataFrame({
'a': [1, 2, 3],
'c': [7, 8, 9],
'#a#b': [np.log(4)] * 3,
})
out = instance.reverse_transform(transformed)
# Assert
expected_out = pd.DataFrame({
'a': [1, 2, 3],
'c': [7, 8, 9],
'b': [4, 5, 6],
})
pd.testing.assert_frame_equal(out, expected_out)
def test_reverse_transform_float_drop_high(self):
"""Test the ``GreaterThan.reverse_transform`` method for dtype float.
The ``GreaterThan.reverse_transform`` method is expected to:
- apply an exponential to the input
- subtract 1
- add the low column
- convert the output to float values
- add back the dropped column
Setup:
- ``_drop`` is set to ``high``.
Input:
- Table with a diff column that contains the constant np.log(4).
Output:
- Same table with the high column replaced by the low one + 3, as float values
and the diff column dropped.
"""
# Setup
instance = GreaterThan(low='a', high='b', strict=True, drop='high')
instance._dtype = np.dtype('float')
# Run
transformed = pd.DataFrame({
'a': [1.1, 2.2, 3.3],
'c': [7, 8, 9],
'#a#b': [np.log(4)] * 3,
})
out = instance.reverse_transform(transformed)
# Assert
expected_out = pd.DataFrame({
'a': [1.1, 2.2, 3.3],
'c': [7, 8, 9],
'b': [4.1, 5.2, 6.3],
})
pd.testing.assert_frame_equal(out, expected_out)
def test_reverse_transform_datetime_drop_high(self):
"""Test the ``GreaterThan.reverse_transform`` method for dtype datetime.
The ``GreaterThan.reverse_transform`` method is expected to:
- apply an exponential to the input
- subtract 1
- convert the distance to a timedelta
- add the low column
- convert the output to datetimes
Setup:
- ``_drop`` is set to ``high``.
Input:
- Table with a diff column that contains the constant np.log(1_000_000_001).
Output:
- Same table with the high column replaced by the low one + one second
and the diff column dropped.
"""
# Setup
instance = GreaterThan(low='a', high='b', strict=True, drop='high')
instance._dtype = np.dtype('<M8[ns]')
# Run
transformed = pd.DataFrame({
'a': pd.to_datetime(['2020-01-01T00:00:00', '2020-01-02T00:00:00']),
'c': [1, 2],
'#a#b': [np.log(1_000_000_001), np.log(1_000_000_001)],
})
out = instance.reverse_transform(transformed)
# Assert
expected_out = pd.DataFrame({
'a': pd.to_datetime(['2020-01-01T00:00:00', '2020-01-02T00:00:00']),
'c': [1, 2],
'b': pd.to_datetime(['2020-01-01T00:00:01', '2020-01-02T00:00:01'])
})
pd.testing.assert_frame_equal(out, expected_out)
def test_reverse_transform_int_drop_low(self):
"""Test the ``GreaterThan.reverse_transform`` method for dtype int.
The ``GreaterThan.reverse_transform`` method is expected to:
- apply an exponential to the input
- subtract 1
- subtract from the high column
- convert the output to integers
- add back the dropped column
Setup:
- ``_drop`` is set to ``low``.
Input:
- Table with a diff column that contains the constant np.log(4).
Output:
- Same table with the low column replaced by the low one + 3, as int
and the diff column dropped.
"""
# Setup
instance = GreaterThan(low='a', high='b', strict=True, drop='low')
instance._dtype = pd.Series([1]).dtype # exact dtype (32 or 64) depends on OS
# Run
transformed = pd.DataFrame({
'b': [4, 5, 6],
'c': [7, 8, 9],
'#a#b': [np.log(4)] * 3,
})
out = instance.reverse_transform(transformed)
# Assert
expected_out = pd.DataFrame({
'b': [4, 5, 6],
'c': [7, 8, 9],
'a': [1, 2, 3],
})
pd.testing.assert_frame_equal(out, expected_out)
def test_reverse_transform_datetime_drop_low(self):
"""Test the ``GreaterThan.reverse_transform`` method for dtype datetime.
The ``GreaterThan.reverse_transform`` method is expected to:
- apply an exponential to the input
- subtract 1
- convert the distance to a timedelta
- subtract from the high column
- convert the output to datetimes
Setup:
- ``_drop`` is set to ``low``.
Input:
- Table with a diff column that contains the constant np.log(1_000_000_001).
Output:
- Same table with the low column replaced by the low one + one second
and the diff column dropped.
"""
# Setup
instance = GreaterThan(low='a', high='b', strict=True, drop='low')
instance._dtype = np.dtype('<M8[ns]')
# Run
transformed = pd.DataFrame({
'b': pd.to_datetime(['2020-01-01T00:00:01', '2020-01-02T00:00:01']),
'c': [1, 2],
'#a#b': [np.log(1_000_000_001), np.log(1_000_000_001)],
})
out = instance.reverse_transform(transformed)
# Assert
expected_out = pd.DataFrame({
'b': | pd.to_datetime(['2020-01-01T00:00:01', '2020-01-02T00:00:01']) | pandas.to_datetime |
"""Data abstractions."""
from abc import abstractmethod
from collections import defaultdict, namedtuple
import copy
import os
import time
import numpy as np
import pandas as pd
import torch
from torch.utils.data import Dataset, IterableDataset
TYPE_NORMAL_ATTR = 0
TYPE_INDICATOR = 1
TYPE_FANOUT = 2
def time_this(f):
def timed_wrapper(*args, **kw):
start_time = time.time()
result = f(*args, **kw)
end_time = time.time()
# Time taken = end_time - start_time
print('| func:%r took: %2.4f seconds |' % \
(f.__name__, end_time - start_time))
return result
return timed_wrapper
# Column factorization.
#
# See estimators::FactorizedProgressiveSampling::update_factor_mask for
# a description of dominant operators.
#
# What each operator projects to.
PROJECT_OPERATORS = {
"<": "<=",
">": ">=",
"!=": "ALL_TRUE",
"<=": "<=",
">=": ">=",
}
# What each operator projects to for the last subvar, if not the same as other
# subvars.
PROJECT_OPERATORS_LAST = {
"<": "<",
">": ">",
"!=": "!=",
}
# What the dominant operator for each operator is.
PROJECT_OPERATORS_DOMINANT = {
"<=": "<",
">=": ">",
"<": "<",
">": ">",
"!=": "!=",
}
class Column(object):
"""A column. Data is write-once, immutable-after.
Typical usage:
col = Column('myCol').Fill(data).SetDistribution(domain_vals)
"data" and "domain_vals" are NOT copied.
"""
def __init__(self,
name,
distribution_size=None,
pg_name=None,
factor_id=None,
bit_width=None,
bit_offset=None,
domain_bits=None,
num_bits=None):
self.name = name
# Data related fields.
self.data = None
self.all_distinct_values = None
self.distribution_size = distribution_size
# Factorization related fields.
self.factor_id = factor_id
self.bit_width = bit_width
self.bit_offset = bit_offset
self.domain_bits = domain_bits
self.num_bits = num_bits
# pg_name is the name of the corresponding column in the Postgres db.
if pg_name:
self.pg_name = pg_name
else:
self.pg_name = name
def Name(self):
"""Name of this column."""
return self.name
def DistributionSize(self):
"""This column will take on discrete values in [0, N).
Used to dictionary-encode values to this discretized range.
"""
return self.distribution_size
def ProjectValue(self, value):
"""Bit slicing: returns the relevant bits in binary for a sub-var."""
assert self.factor_id is not None, "Only for factorized cols"
return (value >> self.bit_offset) & (2**self.bit_width - 1)
def ProjectOperator(self, op):
assert self.factor_id is not None, "Only for factorized cols"
if self.bit_offset > 0:
# If not found, no need to project.
return PROJECT_OPERATORS.get(op, op)
# Last subvar: identity (should not project).
return op
def ProjectOperatorDominant(self, op):
assert self.factor_id is not None, "Only for factorized cols"
return PROJECT_OPERATORS_DOMINANT.get(op, op)
def BinToVal(self, bin_id):
assert bin_id >= 0 and bin_id < self.distribution_size, bin_id
return self.all_distinct_values[bin_id]
def ValToBin(self, val):
if isinstance(self.all_distinct_values, list):
return self.all_distinct_values.index(val)
inds = np.where(self.all_distinct_values == val)
assert len(inds[0]) > 0, val
return inds[0][0]
def FindProjection(self, val):
if val in self.all_distinct_values:
return (self.ValToBin(val), True)
elif val > self.all_distinct_values[-1]:
return (len(self.all_distinct_values), False)
elif val < self.all_distinct_values[0]:
return (-1, False)
else:
return (next(
i for i, v in enumerate(self.all_distinct_values) if v > val),
False)
def SetDistribution(self, distinct_values):
"""This is all the values this column will ever see."""
assert self.all_distinct_values is None
# pd.isnull returns true for both np.nan and np.datetime64('NaT').
is_nan = pd.isnull(distinct_values)
contains_nan = np.any(is_nan)
dv_no_nan = distinct_values[~is_nan]
# IMPORTANT: np.sort puts NaT values at beginning, and NaN values
# at end for our purposes we always add any null value to the
# beginning.
vs = np.sort(np.unique(dv_no_nan))
if contains_nan and np.issubdtype(distinct_values.dtype, np.datetime64):
vs = np.insert(vs, 0, np.datetime64('NaT'))
elif contains_nan:
vs = np.insert(vs, 0, np.nan)
if self.distribution_size is not None:
assert len(vs) == self.distribution_size
self.all_distinct_values = vs
self.distribution_size = len(vs)
return self
def Fill(self, data_instance, infer_dist=False):
assert self.data is None
self.data = data_instance
# If no distribution is currently specified, then infer distinct values
# from data.
if infer_dist:
self.SetDistribution(self.data)
return self
def InsertNullInDomain(self):
# Convention: np.nan would only appear first.
if not pd.isnull(self.all_distinct_values[0]):
if self.all_distinct_values.dtype == np.dtype('object'):
# String columns: inserting nan preserves the dtype.
self.all_distinct_values = np.insert(self.all_distinct_values,
0, np.nan)
else:
# Assumed to be numeric columns. np.nan is treated as a
# float.
self.all_distinct_values = np.insert(
self.all_distinct_values.astype(np.float64, copy=False), 0,
np.nan)
self.distribution_size = len(self.all_distinct_values)
def __repr__(self):
return 'Column({}, distribution_size={})'.format(
self.name, self.distribution_size)
class Table(object):
"""A collection of Columns."""
def __init__(self, name, columns, pg_name=None, validate_cardinality=True):
"""Creates a Table.
Args:
name: Name of this table object.
columns: List of Column instances to populate this table.
pg_name: name of the corresponding table in Postgres.
"""
self.name = name
if validate_cardinality:
self.cardinality = self._validate_cardinality(columns)
else:
# Used as a wrapper, not a real table.
self.cardinality = None
self.columns = columns
# Bin to val funcs useful for sampling. Takes
# (col 1's bin id, ..., col N's bin id)
# and converts it to
# (col 1's val, ..., col N's val).
self.column_bin_to_val_funcs = [c.BinToVal for c in columns]
self.val_to_bin_funcs = [c.ValToBin for c in columns]
self.name_to_index = {c.Name(): i for i, c in enumerate(self.columns)}
if pg_name:
self.pg_name = pg_name
else:
self.pg_name = name
def __repr__(self):
return '{}({})'.format(self.name, self.columns)
def _validate_cardinality(self, columns):
"""Checks that all the columns have same the number of rows."""
cards = [len(c.data) for c in columns]
c = np.unique(cards)
assert len(c) == 1, c
return c[0]
def to_df(self):
return | pd.DataFrame({c.name: c.data for c in self.columns}) | pandas.DataFrame |
"""Build daily-level feature sets, stitching together weather datasets and defining features.
"""
import numpy as np
import pandas as pd
import geopandas as gpd
from dask import dataframe as dd
from loguru import logger
from shapely.ops import nearest_points
from src.data.gfs.utils import grb2gdf
from src.conf import settings
start_year = 2017
end_year = 2019
OUTPUT_DIR = settings.DATA_DIR / "processed/training/"
if __name__ == "__main__":
df = pd.concat(
[
pd.read_parquet(settings.DATA_DIR / f"processed/caiso_hourly/{y}.parquet")
for y in range(2017, 2020)
]
)
df.index = df.index.tz_convert("US/Pacific")
# Preprocessed hourly data is in MWh, so we can simply sum up to resample to days
df = df.groupby(pd.Grouper(freq="D")).sum()
df.reset_index(inplace=True)
# By construction, we are interested in Feb to May (inclusive)
season_filter = df["timestamp"].dt.month.isin(range(2, 6))
df = df[season_filter]
# Define whether something is a weekday/weekend
df["is_weekday"] = df["timestamp"].dt.weekday.isin([5, 6])
# Integrate forecast data
gfs_data_files = (
settings.DATA_DIR
/ f"interim/gfs/ca/gfs_3_201[7-9][01][2-5]*_0000_{i*3:03}.parquet"
for i in range(5, 10)
)
forecasts = [*(gfs_data_files)]
dayahead_weather = dd.read_parquet(forecasts).compute()
# Add UTC timezone and convert to US/Pacific
dayahead_weather["timestamp"] = (
dayahead_weather["valid_time"].dt.tz_localize("UTC").dt.tz_convert("US/Pacific")
)
dayahead_weather = grb2gdf(dayahead_weather)
# Include powerplant data
counties = gpd.read_file(
settings.DATA_DIR / "processed/geography/CA_Counties/CA_Counties_TIGER2016.shp"
)
weather_point_measurements = dayahead_weather["geometry"].geometry.unary_union
powerplants = pd.read_parquet(
settings.DATA_DIR / f"processed/geography/powerplants.parquet"
)
# Add geometry
powerplants = gpd.GeoDataFrame(
powerplants,
geometry=gpd.points_from_xy(powerplants["longitude"], powerplants["latitude"]),
crs="EPSG:4326",
)
powerplants["geometry"] = (
powerplants["geometry"]
.apply(lambda x: nearest_points(x, weather_point_measurements))
.str.get(1)
)
# In order to integrate powerplant data, we have to merge on the powerplant's closest county location.
powerplants = gpd.tools.sjoin(
powerplants.to_crs("EPSG:4326"),
counties[["GEOID", "geometry"]].to_crs("EPSG:4326"),
op="within",
how="left",
)
powerplants["online_date"] = powerplants["online_date"].dt.tz_localize("US/Pacific")
powerplants["retire_date"] = powerplants["retire_date"].dt.tz_localize("US/Pacific")
# Now group over GEOIDs, and sum up the capacity
# For each month, we have to only associate capacity for powerplants that were online.
weather_orig = dayahead_weather.copy()
capacities = {}
results = []
for date, weather_df in dayahead_weather.groupby(
pd.Grouper(key="timestamp", freq="MS"), as_index=False
):
if weather_df.empty:
logger.warning("Weather data for {date} is empty!", date=date)
continue
logger.debug("Assigning capacity for weather points as of {date}.", date=date)
valid_plants = (powerplants["online_date"] <= date) & (
powerplants["retire_date"].isnull() | (powerplants["retire_date"] > date)
)
valid_plants = powerplants[valid_plants]
county_mw = valid_plants.groupby("GEOID", as_index=False)["capacity_mw"].sum()
weather_df = weather_df.merge(county_mw, on="GEOID", how="left")
weather_df["capacity_mw"] = weather_df["capacity_mw"].fillna(0)
results.append(weather_df)
# Note that this is still on the original df grain as we did not aggregate the groupby!
dayahead_weather = pd.concat(results, ignore_index=True)
# Roll-up to dailies
daily_capacity = (
dayahead_weather.groupby(by=["GEOID", pd.Grouper(key="timestamp", freq="D")])[
"capacity_mw"
]
.mean()
.reset_index()
.groupby(by=pd.Grouper(key="timestamp", freq="D"))["capacity_mw"]
.sum()
)
county_level_dailies = dayahead_weather.groupby(
by=["GEOID", pd.Grouper(key="timestamp", freq="D")], as_index=True
).agg(
t_min=("t", "min"),
t_max=("t", "max"),
t_mean=("t", "mean"),
dswrf_mean=("dswrf", "mean"),
dswrf_max=("dswrf", "max"),
capacity_mw=("capacity_mw", "mean"),
).reset_index()
def weighted_mean_factory(weight_col):
def weighted_avg(s):
if s.empty:
return 0.0
else:
return np.average(s, weights=dayahead_weather.loc[s.index, weight_col])
weighted_avg.__name__ = f"{weight_col}_wmean"
return weighted_avg
# GFS is missing certain days for one reason or another.
# Furthermore, pandas timestamps fill in timesteps to build a full frequency datetime
# Since we don't have continuity in time, we ignore those.
dayahead_daily = (
county_level_dailies.groupby(by=pd.Grouper(key="timestamp", freq="D"),)
.agg(
t_mean= | pd.NamedAgg(column="t_mean", aggfunc="mean") | pandas.NamedAgg |
import pandas as pd
import numpy as np
# from.tools import *
from Multivariate_Markov_Switching_Model.tools import *
from Multivariate_Markov_Switching_Model.core import *
from Multivariate_Markov_Switching_Model.tools import _2dim
import numpy as np
import os
# os.chdir("Multivariate_Markov_Switching_Model")
"""
test A
"""
data = []
with open("Multivariate_Markov_Switching_Model/test_data/MSVARUN.txt") as f:
for _ in f.readlines():
data.append(_.strip().split("\t"))
data = pd.DataFrame(data)
s = data.set_index([1,0]).replace(".",np.nan)[2].astype(float).rename_axis(['year','month'])
apr = data.set_index([1,0]).replace(".",np.nan)[3].astype(float).rename_axis(['year','month'])
apriori = apr[apr.index.get_loc(("1967","7")):apr.index.get_loc(("2004","3"))].values
k_lag = 2
_ = np.log(s).diff(k_lag)*100
s = _[_.index.get_loc(("1967","2"))+k_lag:_.index.get_loc(("2004","2"))+1]
s = ((s-s.mean())/s.std()).values[:,np.newaxis]
y = s
z = generate_lagged_regressors(s,3).values
x = [1]*z.shape[0]
x = _2dim(np.array(x))
model = Markov_Multivarite_Regression(y[-z.shape[0]:],x,z,2,2,"full",apriori=None)
# model = Multivariate_Markov_Switching_Model(y[-z.shape[0]:],x,None,2,2,"full",apriori=None)
res = model.fit()
"""
test B
"""
data = []
with open("Multivariate_Markov_Switching_Model/test_data/MSVARANAS.txt") as f:
for _ in f.readlines():
data.append(_.strip().split("\t"))
data = | pd.DataFrame(data) | pandas.DataFrame |
# Classification
# SVM
# -*- coding: utf-8 -*-
### 기본 라이브러리 불러오기
import pandas as pd
import seaborn as sns
'''
[Step 1] 데이터 준비/ 기본 설정
'''
# load_dataset 함수를 사용하여 데이터프레임으로 변환
df = sns.load_dataset('titanic')
# IPython 디스플레이 설정 - 출력할 열의 개수 한도 늘리기
| pd.set_option('display.max_columns', 15) | pandas.set_option |
# This script generates the scoring and schema files
# Creates the schema, and holds the init and run functions needed to
# operationalize the chestXray model
import os, sys, pickle, base64
import keras.models
import keras.layers
import keras_contrib.applications.densenet
import pandas as pd
import numpy as np
import azure_chestxray_utils, azure_chestxray_cam
####################################
# Parameters
####################################
global chest_XRay_model
global as_string_b64encoded_pickled_data_column_name
as_string_b64encoded_pickled_data_column_name = 'encoded_image'
global densenet_weights_file_name
# densenet_weights_file_name = 'weights_only_chestxray_model_14_weights_712split_epoch_029_val_loss_147.7599.hdf5'
densenet_weights_file_name = 'weights_only_chestxray_model_14_weights_712split_epoch_029_val_loss_147.7599 - Copy.hdf5'
# Import data collection library. Only supported for docker mode.
# Functionality will be ignored when package isn't found
try:
from azureml.datacollector import ModelDataCollector
except ImportError:
print("Data collection is currently only supported in docker mode. May be disabled for local mode.")
# Mocking out model data collector functionality
class ModelDataCollector(object):
def nop(*args, **kw): pass
def __getattr__(self, _): return self.nop
def __init__(self, *args, **kw): return None
pass
####################################
# Utils
####################################
def as_string_b64encoded_pickled(input_object):
#b64encode returns bytes class, make it string by calling .decode('utf-8')
return (base64.b64encode(pickle.dumps(input_object))).decode('utf-8')
def unpickled_b64decoded_as_bytes(input_object):
if input_object.startswith('b\''):
input_object = input_object[2:-1]
# make string bytes
input_object = input_object.encode('utf-8')
#decode and the unpickle the bytes to recover original object
return (pickle.loads(base64.b64decode(input_object)))
def get_image_score_and_serialized_cam(crt_cv2_image, crt_chest_XRay_model):
prj_consts = azure_chestxray_utils.chestxray_consts()
crt_cv2_image = azure_chestxray_utils.normalize_nd_array(crt_cv2_image)
crt_cv2_image = 255*crt_cv2_image
crt_cv2_image=crt_cv2_image.astype('uint8')
predictions, cam_image, predicted_disease_index = \
azure_chestxray_cam.get_score_and_cam_picture(crt_cv2_image, crt_chest_XRay_model)
blended_image = azure_chestxray_cam.process_cam_image(cam_image, crt_cv2_image)
serialized_image = azure_chestxray_cam.plot_cam_results(blended_image, cam_image, crt_cv2_image, \
prj_consts.DISEASE_list[predicted_disease_index])
return predictions, serialized_image
####################################
# API functions
####################################
# Prepare the web service definition by authoring
# init() and run() functions. Test the functions
# before deploying the web service.
def init():
try:
print("init() method: Python version: " + str(sys.version))
print("crt Dir: " + os.getcwd())
import pip
# pip.get_installed_distributions()
myDistr = pip.get_installed_distributions()
type(myDistr)
for crtDist in myDistr:
print(crtDist)
# load the model file
global chest_XRay_model
chest_XRay_model = azure_chestxray_utils.build_DenseNetImageNet201_model()
chest_XRay_model.load_weights(densenet_weights_file_name)
print('Densenet model loaded')
except Exception as e:
print("Exception in init:")
print(str(e))
def run(input_df):
try:
import json
debugCounter = 0
print("run() method: Python version: " + str(sys.version) ); print('Step '+str(debugCounter));debugCounter+=1
print ('\ninput_df shape {}'.format(input_df.shape))
print(list(input_df))
print(input_df)
input_df = input_df[as_string_b64encoded_pickled_data_column_name][0]; print('Step '+str(debugCounter));debugCounter+=1
input_cv2_image = unpickled_b64decoded_as_bytes(input_df); print('Step '+str(debugCounter));debugCounter+=1
#finally scoring
predictions, serialized_cam_image = get_image_score_and_serialized_cam(input_cv2_image, chest_XRay_model)
#predictions = chest_XRay_model.predict(input_cv2_image[None,:,:,:])
# prediction_dc.collect(ADScores)
outDict = {"chestXrayScore": str(predictions), "chestXrayCAM":as_string_b64encoded_pickled(serialized_cam_image)}
return json.dumps(outDict)
except Exception as e:
return(str(e))
####################################
# main function can be used for test and demo
####################################
def main():
from azureml.api.schema.dataTypes import DataTypes
from azureml.api.schema.sampleDefinition import SampleDefinition
from azureml.api.realtime.services import generate_schema
print('Entered main function:')
print(os.getcwd())
amlWBSharedDir = os.environ['AZUREML_NATIVE_SHARE_DIRECTORY']
print(amlWBSharedDir)
def get_files_in_dir(crt_dir):
return( [f for f in os.listdir(crt_dir) if os.path.isfile(os.path.join(crt_dir, f))])
fully_trained_weights_dir=os.path.join(
amlWBSharedDir,
os.path.join(*(['chestxray', 'output', 'trained_models_weights'])))
crt_models = get_files_in_dir(fully_trained_weights_dir)
print(fully_trained_weights_dir)
print(crt_models)
test_images_dir=os.path.join(
amlWBSharedDir,
os.path.join(*(['chestxray', 'data', 'ChestX-ray8', 'test_images'])))
test_images = get_files_in_dir(test_images_dir)
print(test_images_dir)
print(len(test_images))
# score in local mode (i.e. here in main function)
model = azure_chestxray_utils.build_DenseNetImageNet201_model()
model.load_weights(os.path.join(
fully_trained_weights_dir, densenet_weights_file_name))
print('Model weoghts loaded!')
import cv2
cv2_image = cv2.imread(os.path.join(test_images_dir,test_images[0]))
x, serialized_cam_image = get_image_score_and_serialized_cam(cv2_image, model)
file_bytes = np.asarray(bytearray(serialized_cam_image.read()), dtype=np.uint8)
recovered_image = cv2.imdecode(file_bytes, cv2.IMREAD_COLOR)
# x = model.predict(cv2_image[None,:,:,:])
print(test_images[0])
print(x)
print(recovered_image.shape)
# score in local mode (i.e. here in main function) using encoded data
encoded_image = as_string_b64encoded_pickled(cv2_image)
df_for_api = | pd.DataFrame(data=[[encoded_image]], columns=[as_string_b64encoded_pickled_data_column_name]) | pandas.DataFrame |
import json
import logging
import os
import pandas as pd
import wandb
import yaml
from sklearn.metrics import accuracy_score, f1_score, precision_score, recall_score
from transformers import RobertaConfig, RobertaTokenizerFast, RobertaForSequenceClassification, Trainer, \
TrainingArguments
from ClassificationDataset import ClassificationDataset
def compute_metrics(pred):
labels = pred.label_ids
preds = (pred.predictions >= 0.5).astype(int) # .argmax(-1)
# print(labels, preds)
# try:
acc = accuracy_score(labels, preds)
# except ValueError:
return {
'accuracy': acc,
'f1': f1_score(y_true=labels, y_pred=preds, average='weighted'),
'precision': precision_score(y_true=labels, y_pred=preds, average='weighted'),
'recall': recall_score(y_true=labels, y_pred=preds, average='weighted')
}
if __name__ == '__main__':
logging.basicConfig(
level=logging.INFO,
format='%(asctime)s - %(message)s',
datefmt='%Y-%m-%d %H:%M:%S')
# load config
logging.info("Loading config...")
config = yaml.safe_load(open("./params.yaml"))['classification_train']
config_train = yaml.safe_load(open("./params.yaml"))['language_modeling_train']
os.environ["CUDA_VISIBLE_DEVICES"] = ",".join([str(item) for item in config['cuda_visible_devices']])
# log to wandb
logging.info("Logging to wandb...")
wandb.login()
# setup datasets paths
dev_ds = "./data/dev/"
test_ds = "./data/test/"
train_ds = "./data/train/"
# set models path
lm_model_path = "./models/roberta_lm"
models_path = "./models/roberta_classifier"
os.makedirs(models_path, exist_ok=True)
# define special characters
logging.info("Defining special characters...")
special_tokens = [
'<url>',
'<email>',
'<number>',
'<date>',
]
logging.info("Loading tokenizer...")
tokenizer = RobertaTokenizerFast.from_pretrained(lm_model_path, max_len=config_train['max_seq_length'],
use_fast=True)
# add special tokens
tokenizer.add_special_tokens({
'additional_special_tokens': special_tokens
})
# load datasets
logging.info("Loading datasets...")
data1 = | pd.read_csv("./data/dev/in.tsv", delimiter='\t', header=None, encoding="utf8", quoting=0) | pandas.read_csv |
################################################################################
### Python port of rlassoEffects.R
### https://github.com/cran/hdm/blob/master/R/rlassoEffects.R
################################################################################
################################################################################
### 1: Load modules
################################################################################
# Standard Python modules
import joblib as jbl
import multiprocess as mp
import numpy as np
import pandas as pd
from scipy import linalg
from scipy.stats import multivariate_normal, norm
from sklearn.linear_model import LinearRegression as lm
# Other parts of hmdpy
from hdmpy.help_functions import cvec
from hdmpy.rlasso import rlasso
################################################################################
### 2: Define functions
################################################################################
################################################################################
### 2.1: Functions which are not in the original R package
### These are generally helper functions to allow an implementation which
### reads as closely to the original R code as possible, and to ease a
### Python implementation, including parallelizing the code
################################################################################
# Define a function which calculates the homoskedastic variance estimator for
# OLS, based on X and the residuals
def get_cov(X, e, add_intercept=True, homoskedastic=False):
""" Calculates OLS variance estimator based on X and residuals
Inputs
X: n by k matrix, RHS variables
e: n by 1 vector or vector-like, residuals from an OLS regression
add_intercept: Boolean, if True, adds an intercept as the first column of X
(and increases k by one)
Outputs
V_hat: k by k NumPy array, estimated covariance matrix
"""
# Get the number of observations n and parameters k
n, k = X.shape
# Check whether an intercept needs to be added
if add_intercept:
# If so, add the intercept
X = np.concatenate([np.ones(shape=(n,1)), X], axis=1)
# Don't forget to increase k
k = k + 1
# Make sure the residuals are a proper column vector
e = cvec(e)
# Calculate X'X
XX = X.T @ X
# Calculate its inverse
XXinv = linalg.inv(XX)
# Check whether to use homoskedastic errors
if homoskedastic:
# If so, calculate the homoskedastic variance estimator
V_hat = (1 / (n-k)) * XXinv * (e.T @ e)
else:
# Otherwise, calculate an intermediate object
S = (e @ np.ones(shape=(1,k))) * X
# Then, get the HC0 sandwich estimator
V_hat = (n / (n-k)) * XXinv @ (S.transpose() @ S) @ XXinv
# Return the result
return V_hat
# Define a function which wraps rlassoEffect, so it can easily be parallelized
# within rlassoEffects()
def rlassoEffect_wrapper(i, x, y, d, method='double selection', I3=None,
post=True, colnames_d=None, colnames_x=None,
intercept=True, model=True, homoskedastic=False,
X_dependent_lambda=False, lambda_start=None, c=1.1,
gamma=None, numSim=5000, numIter=15, tol=10**(-5),
threshold=-np.inf, par=True, corecap=np.inf,
fix_seed=True, verbose=False):
""" Wrapper for rlassoEffect()
Inputs
i: Integer, index of the current variable of interest
See the rlassoEffect() documentation for other inputs
Output
res: Dictionary, contains a collection of results from rlassoEffect(), or a
collection of empty strings and NANs if an error is encountered while
running rlassoEffect()
"""
if np.amin(x.shape) == 1:
x = cvec(x)
y = cvec(y)
d = cvec(d)
try:
col = rlassoEffect(x, y, d, method=method, I3=I3, post=post,
colnames_d=colnames_d, colnames_x=colnames_x,
intercept=intercept, model=model,
homoskedastic=homoskedastic,
X_dependent_lambda=X_dependent_lambda,
lambda_start=lambda_start, c=c, gamma=gamma,
numSim=numSim, numIter=numIter, tol=tol,
threshold=threshold, par=par, corecap=corecap,
fix_seed=fix_seed)
smat = np.zeros(shape=(x.shape[1]+1, 1)) * np.nan
smat[np.arange(smat.shape[0]) != i] = col['selection_index']
res = {
'coefficients': [i, col['alpha']],
'se': [i, col['se'][0]],
't': [i, col['t'][0]],
'pval': [i, col['pval'][0]],
'lasso_regs': {i: col},
'reside': [i, col['residuals']['epsilon']],
'residv': [i, col['residuals']['v']],
'coef_mat': {i: col['coefficients_reg']},
'selection_matrix': [i, smat]
}
except Exception as e:
# Mimic the results in the original code, where any errors result in a
# variable being skipped, and the preallocated results arrays containing
# either NANs or empty lists
res = {
'coefficients': [i, np.nan],
'se': [i, np.nan],
't': [i, np.nan],
'lasso_regs': {i: e},
'pval': [i, np.nan],
'reside': [i, np.zeros(shape=(x.shape[0], 1)) * np.nan],
'residv': [i, np.zeros(shape=(x.shape[0], 1)) * np.nan],
'coef_mat': {i: []},
'selection_matrix': [i, np.zeros(shape=(x.shape[1]+1, 1)) * np.nan]
}
if verbose:
print('Error encountered in rlassoEffect_wrapper()')
print(e)
print()
return res
# Define a function to simulate quantiles needed for joint confidence intervals
def simul_ci(k=1, Omega=None, var=None, seed=0, fix_seed=True, verbose=False):
if Omega is None:
Omega = np.identity(k)
else:
k = Omega.shape[0]
if var is None:
var = np.diag(Omega)
try:
if fix_seed:
# This is a key difference between the R and Python implementation.
# For some data sets, especially when k > n, scipy.stats.norm() will
# return an error, claiming than Omega is singular. R's
# MASS::mvrnorm(), on the other hand, will happily use Omega and
# calculate draws from it. I had to add allow_singular to get both
# implementations to work similarly.
beta = multivariate_normal(cov=Omega, allow_singular=True).rvs(
random_state=seed
)
else:
beta = multivariate_normal(cov=Omega, allow_singular=True).rvs()
sim = np.amax(np.abs(cvec(beta) / cvec(np.sqrt(var))))
except Exception as e:
if verbose:
print('Error encountered in simul_ci():')
print(e)
print()
sim = np.nan
return sim
################################################################################
### 2.2: Functions which are in the original R package
################################################################################
def rlassoEffect(x, y, d, method='double selection', I3=None, post=True,
colnames_d=None, colnames_x=None, intercept=True,
model=True, homoskedastic=False, X_dependent_lambda=False,
lambda_start=None, c=1.1, gamma=None, numSim=5000, numIter=15,
tol=10**(-5), threshold=-np.inf, par=True, corecap=np.inf,
fix_seed=True):
d = cvec(d)
y = cvec(y)
n, kx = x.shape
if colnames_d is None:
colnames_d = ['d1']
if (colnames_x is None) and (x is not None):
colnames_x = ['x' + str(i) for i in np.arange(kx)]
if method == 'double selection':
I1 = rlasso(x, d, post=post, colnames=colnames_x, intercept=intercept,
model=model, homoskedastic=homoskedastic,
X_dependent_lambda=X_dependent_lambda,
lambda_start=lambda_start, c=c, gamma=gamma, numSim=numSim,
numIter=numIter, tol=tol, threshold=threshold, par=par,
corecap=corecap, fix_seed=fix_seed).est['index']
I2 = rlasso(x, y, post=post, colnames=colnames_x, intercept=intercept,
model=model, homoskedastic=homoskedastic,
X_dependent_lambda=X_dependent_lambda,
lambda_start=lambda_start, c=c, gamma=gamma, numSim=numSim,
numIter=numIter, tol=tol, threshold=threshold, par=par,
corecap=corecap, fix_seed=fix_seed).est['index']
# Original code checks if type(I3) is bool, but I believe they only do
# that to see whether it has been defined by the user
if I3 is not None:
I3 = cvec(I3)
I = cvec(I1.astype(bool) | I2.astype(bool) | I3.astype(bool))
else:
I = cvec(I1.astype(bool) | I2.astype(bool))
# missing here: names(I) <- union(names(I1),names(I2))
if I.sum() == 0:
I = None
x = np.concatenate([d, x[:, I[:,0]]], axis=1)
reg1 = lm(fit_intercept=True).fit(x, y)
alpha = reg1.coef_[0,0]
names_alpha = colnames_d
resid = y - cvec(reg1.predict(x))
if I is None:
xi = (resid) * np.sqrt(n/(n - 1))
else:
xi = (resid) * np.sqrt(n/(n - I.sum() - 1))
if I is None:
# Fit an intercept-only model
reg2 = lm(fit_intercept=False).fit(np.ones_like(d), d)
v = d - cvec(reg2.predict(np.ones_like(d)))
else:
reg2 = lm(fit_intercept=True).fit(x[:, 1:], d)
v = d - cvec(reg2.predict(x[:, 1:]))
var = (
(1/n)
* (1/np.mean(v**2, axis=0))
* np.mean((v**2) * (xi**2), axis=0)
* (1/np.mean(v**2, axis=0))
)
se = np.sqrt(var)
tval = alpha / np.sqrt(var)
pval = 2 * norm.cdf(-np.abs(tval))
if I is None:
no_selected = 1
else:
no_selected = 0
res = {'epsilon': xi, 'v': v}
if np.issubdtype(type(colnames_d), np.str_):
colnames_d = [colnames_d]
results = {
'alpha': alpha,
#'se': pd.DataFrame(se, index=colnames_d),
'se': se,
't': tval,
'pval': pval,
'no_selected': no_selected,
'coefficients': alpha,
'coefficient': alpha,
'coefficients_reg': reg1.coef_,
'selection_index': I,
'residuals': res,
#call = match.call(),
'samplesize': n
}
elif method == 'partialling out':
reg1 = rlasso(x, y, post=post, colnames=colnames_x, intercept=intercept,
model=model, homoskedastic=homoskedastic,
X_dependent_lambda=X_dependent_lambda,
lambda_start=lambda_start, c=c, gamma=gamma,
numSim=numSim, numIter=numIter, tol=tol,
threshold=threshold, par=par, corecap=corecap,
fix_seed=fix_seed)
yr = reg1.est['residuals']
reg2 = rlasso(x, d, post=post, colnames=colnames_x, intercept=intercept,
model=model, homoskedastic=homoskedastic,
X_dependent_lambda=X_dependent_lambda,
lambda_start=lambda_start, c=c, gamma=gamma,
numSim=numSim, numIter=numIter, tol=tol,
threshold=threshold, par=par, corecap=corecap,
fix_seed=fix_seed)
dr = reg2.est['residuals']
reg3 = lm(fit_intercept=True).fit(dr, yr)
alpha = reg3.coef_[0,0]
resid = yr - cvec(reg3.predict(dr))
# This is a difference to the original code. The original code uses
# var <- vcov(reg3)[2, 2], which is the homoskedastic covariance
# estimator for OLS. I wrote get_cov() to calculate that, because the
# linear regression implementation in sklearn does not include standard
# error calculations. (I could have switched to statsmodels instead, but
# sklearn seems more likely to be maintained in the future.) I then
# added the option to get_cov() to calculate heteroskedastic standard
# errors. I believe that if the penalty term is adjusted for
# heteroskedasticity, heteroskedastic standard errors should also be
# used here, to be internally consistent.
var = np.array([get_cov(dr, resid, homoskedastic=homoskedastic)[1,1]])
se = np.sqrt(var)
tval = alpha / np.sqrt(var)
pval = 2 * norm.cdf(-np.abs(tval))
res = {'epsilon': resid, 'v': dr}
I1 = reg1.est['index']
I2 = reg2.est['index']
I = cvec(I1.astype(bool) | I2.astype(bool))
#names(I) <- union(names(I1),names(I2))
results = {
'alpha': alpha,
'se': se,
't': tval,
'pval': pval,
'coefficients': alpha,
'coefficient': alpha,
'coefficients_reg': reg1.est['coefficients'],
'selection_index': I,
'residuals': res,
#call = match.call(),
'samplesize': n
}
return results
################################################################################
### 3: Define classes
################################################################################
class rlassoEffects():
# Initialize index to None to get index=c(1:ncol(x))
def __init__(self, x, y, index=None, method='partialling out', I3=None,
post=True, colnames=None, intercept=True, model=True,
homoskedastic=False, X_dependent_lambda=False,
lambda_start=None, c=1.1, gamma=None, numSim=5000, numIter=15,
tol=10**(-5), threshold=-np.inf, par_outer=True,
par_inner=False, par_any=True, corecap=np.inf, fix_seed=True,
verbose=False):
# Initialize internal variables
if isinstance(x, pd.DataFrame) and colnames is None:
colnames = x.columns
self.x = np.array(x).astype(np.float32)
self.y = cvec(y).astype(np.float32)
if index is None:
self.index = cvec(np.arange(self.x.shape[1]))
else:
self.index = cvec(index)
self.method = method
self.I3 = I3
self.post = post
self.colnames = colnames
if self.index.dtype == bool:
self.k = self.p1 = self.index.sum()
else:
self.k = self.p1 = len(self.index)
self.n = x.shape[1]
self.intercept = intercept
self.model = model
self.homoskedastic = homoskedastic
self.X_dependent_lambda = X_dependent_lambda
self.lambda_start = lambda_start
self.c = c
self.gamma = gamma
self.numSim = numSim
self.numIter = numIter
self.tol = tol
self.threshold = threshold
self.par_outer = par_outer
self.par_inner = par_inner
self.par_any = par_any
self.corecap = corecap
self.fix_seed = fix_seed
if not self.par_any:
self.par_outer = self.par_inner = False
elif self.par_outer and self.par_inner:
self.par_outer = False
self.verbose = verbose
# Initialize internal variables used in other functions
self.B = None
self.parm = None
self.level = None
self.joint = None
# preprocessing index numerical vector
if np.issubdtype(self.index.dtype, np.number):
self.index = self.index.astype(np.int)
if not (np.all(self.index[:,0] < self.x.shape[1])
and (len(self.index) <= self.x.shape[1])):
raise ValueError('Numeric index includes elements which are '
+ 'outside of the column range of x, or the '
+ 'indexing vector is too long')
elif self.index.dtype == bool:
if not (len(self.index) <= self.x.shape[1]):
raise ValueError('Boolean index vector is too long')
self.index = cvec([i for i, b in enumerate(self.index[:,0]) if b])
elif np.issubdtype(self.index.dtype, np.str_):
if not np.all([s in self.x.columns for s in self.index[:,0]]):
raise ValueError('String index specifies column names which '
+ 'are not in the column names of x')
self.index = (
cvec([i for i, s in enumerate(self.index[:,0])
if s in self.x.columns])
)
else:
raise ValueError('Argument index has an invalid type')
if (self.method == 'double selection') and (self.I3 is not None):
I3ind = cvec([i for i, b in enumerate(self.I3) if b])
if I3ind != []:
if len([x for x in I3ind[:,0] if x in self.index[:,0]]) > 0:
raise ValueError('I3 and index must not overlap!')
if self.colnames is None:
self.colnames = ['V' + str(i+1) for i in range(self.x.shape[1])]
# Check whether to use parallel processing
if self.par_outer:
# If so, get the number of cores to use
cores = np.int(np.amin([mp.cpu_count(), self.corecap]))
else:
# Otherwise, use only one core (i.e. run sequentially)
cores = 1
if (self.I3 is not None):
res = jbl.Parallel(n_jobs=cores)(
jbl.delayed(rlassoEffect_wrapper)(
i, x=np.delete(self.x, i, axis=1), y=self.y, d=self.x[:, i],
method=self.method, I3=np.delete(self.I3, i, axis=0),
post=self.post, colnames_d=self.colnames[i],
colnames_x=[c for j, c in enumerate(self.colnames) if j!=i],
intercept=self.intercept, model=self.model,
homoskedastic=self.homoskedastic,
X_dependent_lambda=self.X_dependent_lambda,
lambda_start=self.lambda_start, c=self.c, gamma=self.gamma,
numSim=self.numSim, numIter=self.numIter, tol=self.tol,
threshold=self.threshold, par=self.par_inner,
corecap=self.corecap, fix_seed=self.fix_seed,
verbose=self.verbose
)
for i in self.index[:,0]
)
else:
res = jbl.Parallel(n_jobs=cores)(
jbl.delayed(rlassoEffect_wrapper)(
i, x=np.delete(self.x, i, axis=1), y=self.y, d=self.x[:, i],
method=self.method, I3=self.I3,
post=self.post, colnames_d=self.colnames[i],
colnames_x=[c for j, c in enumerate(self.colnames) if j!=i],
intercept=self.intercept, model=self.model,
homoskedastic=self.homoskedastic,
X_dependent_lambda=self.X_dependent_lambda,
lambda_start=self.lambda_start, c=self.c, gamma=self.gamma,
numSim=self.numSim, numIter=self.numIter, tol=self.tol,
threshold=self.threshold, par=self.par_inner,
corecap=self.corecap, fix_seed=self.fix_seed,
verbose=self.verbose
)
for i in self.index[:,0]
)
# Convert collection of parallel results into usable results sorted by
# their index
coefficients = np.array([r['coefficients'] for r in res])
coefficients = cvec(coefficients[coefficients[:,0].argsort(), 1])
se = np.array([r['se'] for r in res])
se = cvec(se[se[:,0].argsort(), 1])
t = np.array([r['t'] for r in res])
t = cvec(t[t[:,0].argsort(), 1])
pval = np.array([r['pval'] for r in res])
pval = cvec(pval[pval[:,0].argsort(), 1])
lasso_regs = {}
[lasso_regs.update(r['lasso_regs']) for r in res]
reside = (
np.array([np.concatenate([cvec(r['reside'][0]),
r['reside'][1]],
axis=0)[:,0]
for r in res])
)
reside = reside[reside[:,0].argsort(), 1:].T
residv = (
np.array([np.concatenate([cvec(r['residv'][0]),
r['residv'][1]],
axis=0)[:,0]
for r in res])
)
residv = residv[residv[:,0].argsort(), 1:].T
coef_mat = {}
[coef_mat.update(r['coef_mat']) for r in res]
# Replaced this with the following two steps, to ensure this always
# results in a two dimensional array
#selection_matrix = (
# np.array([np.concatenate([cvec(r['selection_matrix'][0]),
# r['selection_matrix'][1]],
# axis=0)[:,0]
# for r in res])
#)
selection_matrix = [
np.concatenate([cvec(r['selection_matrix'][0]),
r['selection_matrix'][1]],
axis=0).T
for r in res
]
selection_matrix = (
np.concatenate(selection_matrix, axis=0)
)
selection_matrix = (
selection_matrix[selection_matrix[:,0].argsort(), 1:]
)
# Added this, to be able to add names to results objects
idx = [self.colnames[i] for i in self.index[:,0]]
residuals = {
'e': pd.DataFrame(reside, columns=idx),
'v': pd.DataFrame(residv, columns=idx)
}
self.res = {
'coefficients': pd.DataFrame(coefficients, index=idx),
'se': | pd.DataFrame(se, index=idx) | pandas.DataFrame |
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Module to parse/process/visualize/export GTFS feed.
@author: ikespand
"""
import pandas as pd
import zipfile
# from keplergl_cli.keplergl_cli import Visualize
from rasta.rasta_kepler import RastaKepler
from shapely.geometry import Point, LineString
import geopandas as gpd
# %%
class Gtfs:
"""GTFS feed parser and visualizer."""
def __init__(self, zip_file):
self.fname = zip_file
self.zf = zipfile.ZipFile(self.fname)
self.check_required_files()
self.read_required_files()
def check_required_files(self):
"""GTFS protocol has certain files which are compulsory and this
function checks for the same.
"""
required_files = [
"agency.txt",
"stops.txt",
"routes.txt",
"trips.txt",
"stop_times.txt",
]
if set(required_files).issubset(self.zf.namelist()):
print("All required files for GTFS are available")
else:
print("Error. Required files are not available. Exiting..")
def read_required_files(self):
"""Function to read FEW of the required files of GTFS"""
self.stops = pd.read_csv(self.zf.open("stops.txt"))
self.routes = pd.read_csv(self.zf.open("routes.txt"))
self.trips = pd.read_csv(self.zf.open("trips.txt"))
def visualize_route(self, MAPBOX_API_KEY, output_map=None):
"""Function to enable a visualization"""
if output_map is None:
self.output_map = "Unknown_"
else:
self.output_map = output_map
if not ("shapes.txt" in self.zf.namelist()):
print("Shape file is not present!")
# self.visualize_stops(MAPBOX_API_KEY)
Gtfs.visualizer(
{"stops": self.stops}, MAPBOX_API_KEY, self.output_map
)
else:
print("Shape file is present, processing...")
self.process_shapes(MAPBOX_API_KEY)
@staticmethod
def visualizer(data, MAPBOX_API_KEY, output_map):
"""Static function which simply usage keplergl_cli to add the data
to the map.
"""
vis = RastaKepler(api_key=MAPBOX_API_KEY, output_map=output_map)
for key in data:
vis.add_data(data=data[key], names=key)
html_path = vis.render(open_browser=False, read_only=False)
return html_path
def process_shapes(self, MAPBOX_API_KEY):
"""Here, we find the shape_id from the shapes.txt and then with that
filter out the route_id and create a geopandas df for each route.
"""
self.shapes = pd.read_csv(self.zf.open("shapes.txt"))
if (self.shapes).empty:
print("Shape file is present, however empty!")
self.visualize_stops(MAPBOX_API_KEY)
else:
shape_ids = self.shapes["shape_id"].unique()
geo_df = []
for shape_id in shape_ids:
# Filter out the shape for given `shape_id`
shape = self.shapes[self.shapes["shape_id"] == shape_id]
# Find the trip by matching `shape_idz
trip = self.trips[self.trips["shape_id"] == shape_id]
# Find the `route` from the given route_id from the `trip`
route = self.routes[
self.routes["route_id"] == trip["route_id"].unique()[0]
]
route = route.reset_index(drop=True)
# zip the coordinates into shapely points and convert to gdf
geometry = [
Point(xy)
for xy in zip(shape.shape_pt_lon, shape.shape_pt_lat)
]
__df = pd.DataFrame()
__df.loc[0, "shape_id"] = shape_id
# Concat the route info to the shape for tooltip in map
__df1 = pd.concat([__df, route], axis=1)
# Append for each route
geo_df.append(
gpd.GeoDataFrame(__df1, geometry=[LineString(geometry)])
)
self.shape_route = | pd.concat(geo_df) | pandas.concat |
import numpy as np
import pytest
import pandas as pd
from pandas import (
DataFrame,
DatetimeIndex,
Series,
date_range,
)
import pandas._testing as tm
from pandas.core.api import Int64Index
class TestDataFrameTruncate:
def test_truncate(self, datetime_frame, frame_or_series):
ts = datetime_frame[::3]
if frame_or_series is Series:
ts = ts.iloc[:, 0]
start, end = datetime_frame.index[3], datetime_frame.index[6]
start_missing = datetime_frame.index[2]
end_missing = datetime_frame.index[7]
# neither specified
truncated = ts.truncate()
tm.assert_equal(truncated, ts)
# both specified
expected = ts[1:3]
truncated = ts.truncate(start, end)
tm.assert_equal(truncated, expected)
truncated = ts.truncate(start_missing, end_missing)
tm.assert_equal(truncated, expected)
# start specified
expected = ts[1:]
truncated = ts.truncate(before=start)
tm.assert_equal(truncated, expected)
truncated = ts.truncate(before=start_missing)
tm.assert_equal(truncated, expected)
# end specified
expected = ts[:3]
truncated = ts.truncate(after=end)
tm.assert_equal(truncated, expected)
truncated = ts.truncate(after=end_missing)
tm.assert_equal(truncated, expected)
# corner case, empty series/frame returned
truncated = ts.truncate(after=ts.index[0] - ts.index.freq)
assert len(truncated) == 0
truncated = ts.truncate(before=ts.index[-1] + ts.index.freq)
assert len(truncated) == 0
msg = "Truncate: 2000-01-06 00:00:00 must be after 2000-02-04 00:00:00"
with pytest.raises(ValueError, match=msg):
ts.truncate(
before=ts.index[-1] - ts.index.freq, after=ts.index[0] + ts.index.freq
)
def test_truncate_copy(self, datetime_frame):
index = datetime_frame.index
truncated = datetime_frame.truncate(index[5], index[10])
truncated.values[:] = 5.0
assert not (datetime_frame.values[5:11] == 5).any()
def test_truncate_nonsortedindex(self, frame_or_series):
# GH#17935
obj = DataFrame({"A": ["a", "b", "c", "d", "e"]}, index=[5, 3, 2, 9, 0])
if frame_or_series is Series:
obj = obj["A"]
msg = "truncate requires a sorted index"
with pytest.raises(ValueError, match=msg):
obj.truncate(before=3, after=9)
def test_sort_values_nonsortedindex(self):
# TODO: belongs elsewhere?
rng = date_range("2011-01-01", "2012-01-01", freq="W")
ts = DataFrame(
{"A": np.random.randn(len(rng)), "B": np.random.randn(len(rng))}, index=rng
)
msg = "truncate requires a sorted index"
with pytest.raises(ValueError, match=msg):
ts.sort_values("A", ascending=False).truncate(
before="2011-11", after="2011-12"
)
def test_truncate_nonsortedindex_axis1(self):
# GH#17935
df = DataFrame(
{
3: np.random.randn(5),
20: np.random.randn(5),
2: np.random.randn(5),
0: np.random.randn(5),
},
columns=[3, 20, 2, 0],
)
msg = "truncate requires a sorted index"
with pytest.raises(ValueError, match=msg):
df.truncate(before=2, after=20, axis=1)
@pytest.mark.parametrize(
"before, after, indices",
[(1, 2, [2, 1]), (None, 2, [2, 1, 0]), (1, None, [3, 2, 1])],
)
@pytest.mark.parametrize("klass", [Int64Index, DatetimeIndex])
def test_truncate_decreasing_index(
self, before, after, indices, klass, frame_or_series
):
# https://github.com/pandas-dev/pandas/issues/33756
idx = klass([3, 2, 1, 0])
if klass is DatetimeIndex:
before = pd.Timestamp(before) if before is not None else None
after = pd.Timestamp(after) if after is not None else None
indices = [pd.Timestamp(i) for i in indices]
values = frame_or_series(range(len(idx)), index=idx)
result = values.truncate(before=before, after=after)
expected = values.loc[indices]
| tm.assert_equal(result, expected) | pandas._testing.assert_equal |
import os
import pickle
import numpy as np
import torch
import torch.nn.functional as F
from collections import Counter
from collections import OrderedDict
import copy
from sys import argv
import json
import pandas as pd
import argparse
from tqdm.auto import tqdm
from IPython.core.debugger import Pdb
split_files = { "nqueens_11_6_test.pkl": (8332,"nqueens_11_6_test_unique.pkl","nqueens_11_6_test_amb.pkl"),
"nqueens_11_6_val.pkl": (8335,"nqueens_11_6_val_unique.pkl","nqueens_11_6_val_amb.pkl"),
"futo_6_18_5_test.pkl": (7505,"futo_6_18_5_test_unique.pkl","futo_6_18_5_test_amb.pkl"),
"futo_6_18_5_val.pkl": (7544,"futo_6_18_5_val_unique.pkl","futo_6_18_5_val_amb.pkl")
}
def exclude_rows(table,colname,values):
return table[~table[colname].isin(set(values))]
def read_tables(exp_dir,dir_list):
gdf = None
for this_dir in dir_list:
df = main(os.path.join(exp_dir, this_dir))
df = to_numeric(df, df.columns)
if gdf is None:
gdf = df
else:
gdf = gdf.append(df)
nan_columns = gdf.columns[gdf.isnull().values.sum(axis=0) > 0]
print("filling nans in following column with 0:", nan_columns)
gdf = gdf.fillna(value= 0)
gdf = exclude_rows(gdf,'wds',{'four-one','four.one'})
return gdf
def row_splitter(row):
if row["test_file"] in split_files:
unq, unq_file, amb_file = split_files[row["test_file"]]
row_unq = copy.deepcopy(row)
row_unq["test_file"] = unq_file
row_unq["test_acc"] = 1 - float(row["1_sol_error"])/unq
row_amb = copy.deepcopy(row)
row_amb["test_file"] = amb_file
row_amb["test_acc"] = (row["test_acc"]*10000 - (unq-float(row["1_sol_error"])))/(10000-unq)
return [row,row_unq,row_amb]
else:
return [row]
def load_errors(error_dir):
errors = []
error_files = [os.path.join(error_dir, x) for x in os.listdir(
error_dir) if x.find("errors") > -1]
for f in error_files:
errors.extend(pickle.load(open(f, 'rb')))
retval = OrderedDict(Counter(errors))
retval = OrderedDict([(str(k)+"_sol_errors", v)
for k, v in retval.items()])
return retval
def read_summary_table(exp_dir):
if not os.path.exists(os.path.join(exp_dir, "summary.json")):
print(exp_dir, "missing summary.json file!")
return None
with open(os.path.join(exp_dir, "summary.json")) as f:
l = f.readlines()
l = [json.loads(x) for x in l]
res = pd.DataFrame(l)
res.loc[res['epoch'] == -1, 'epoch'] = None
res.loc[:, ['epoch']] = res.loc[:, ['epoch']].ffill()
return res
def is_safe_sudoku(x,query,n):
mask = (query>0)
if not torch.equal(x[mask],query[mask]):
return False
grid = x.detach().cpu().numpy().astype(int)
grid = grid.reshape(n,n)
b_size = int(np.sqrt(n))
for i in range(n):
if len(set(grid[i]))<n:
return False
if len(set(grid[:,i]))<n:
return False
b_row = i//b_size
b_col = i%b_size
if len(set(grid[b_size*b_row:b_size*(b_row+1),b_size*b_col:b_size*(b_col+1)].flatten()))<n:
return False
return True
def pointwise_accuracy_stats(query, target_set, count, raw_pred, prefix=''):
pred = raw_pred.argmax(dim=1).int()
#Pdb().set_trace()
non_zero_ind = (query > 0)
copy_accuracy = (query[non_zero_ind] ==
pred[non_zero_ind]).sum().float()/non_zero_ind.sum()
copy_point_total = non_zero_ind.sum().item()
unique_point_ind = (target_set[:, 0, :] == target_set[:, 1, :])
unique_point_ind = unique_point_ind*(~non_zero_ind)
unique_point_accuracy = (target_set[:, 0, :][unique_point_ind]
== pred[unique_point_ind]).sum().float()/unique_point_ind.sum()
unique_point_total = unique_point_ind.sum().item()
ambiguous_point_ind = ~(target_set[:, 0, :] == target_set[:, 1, :])
ambiguous_point_accuracy = (
target_set[:, 0, :][ambiguous_point_ind] == pred[ambiguous_point_ind]).sum()
ambiguous_point_accuracy += (target_set[:, 1, :]
[ambiguous_point_ind] == pred[ambiguous_point_ind]).sum()
ambiguous_point_accuracy = ambiguous_point_accuracy.float() / \
ambiguous_point_ind.sum()
ambiguous_point_total = ambiguous_point_ind.sum().item()
total_points = query.numel()
#Pdb().set_trace()
strict_acc_count = float((target_set[:, 0, :] == pred).all(dim=1).sum(
) + (((target_set[:, 1, :] == pred).all(dim=1).int())*(count-1)).sum())
strict_accuracy = strict_acc_count / float(pred.size(0))
lac = ((target_set[:,0,:] == pred) | (target_set[:,1,:] == pred)).all(dim=1).sum().float().item()
la = lac / float(pred.size(0))
lousy_accuracy = (target_set[:,0,:]==pred).int()+(target_set[:,1,:]==pred).int()*(count-1).unsqueeze(1).expand_as(target_set[:,0,:])
lousy_accuracy = ((lousy_accuracy>0).sum(dim=1)==81)
lousy_acc_count = lousy_accuracy.sum().float()
lousy_accuracy = (lousy_acc_count/pred.shape[0]).item()
corrected_accuracy = []
for i,x in enumerate(pred):
corrected_accuracy.append(is_safe_sudoku(x,query[i],9))
corrected_accuracy = torch.tensor(corrected_accuracy).float().mean()
if lac != lousy_acc_count:
Pdb().set_trace()
rv = OrderedDict()
rv[prefix+'copy_acc'] = copy_accuracy.item()
rv[prefix+'unique_pt_acc'] = unique_point_accuracy.item()
rv[prefix+'amb_pt_acc'] = ambiguous_point_accuracy.item()
rv[prefix+'lousy_acc'] = lousy_accuracy
rv[prefix+'strict_acc'] = strict_accuracy
rv[prefix+'total_pts'] = total_points
rv[prefix+'copy_pts'] = copy_point_total
rv[prefix+'unique_pts'] = unique_point_total
rv[prefix+'amb_pts'] = ambiguous_point_total
rv[prefix+'strict_count'] = strict_acc_count
rv[prefix+'lousy_count'] = lac
rv[prefix+'corrected_acc'] = corrected_accuracy.item()
return rv
# return copy_accuracy.item(), unique_point_accuracy.item(), ambiguous_point_accuracy.item(), lousy_accuracy.item(), total_points, copy_point_total, unique_point_total, ambiguous_point_total, lousy_acc_count
def extract_data_from_dump(exp_dir, analyze=False):
if not analyze:
return []
if not os.path.exists(os.path.join(exp_dir, "pred_dump.pkl")):
print(exp_dir, "pred dump doesn't exist")
return []
with open(os.path.join(exp_dir, "pred_dump.pkl"), "rb") as f:
dump = pickle.load(f)
query = []
target_set = []
count = []
raw_pred = []
for z in dump:
q, t, c, r = z["feed_dict"]["query"], z["feed_dict"]["target_set"], z["feed_dict"]["count"], z["output_dict"]["pred"]
query.append(q)
target_set.append(t)
count.append(c)
raw_pred.append(r)
count_arr = torch.cat(count).int()
unique_ind = count_arr == 1
amb_ind = count_arr > 1
all_stats = pointwise_accuracy_stats(torch.cat(query).int(), torch.cat(
target_set).int(), torch.cat(count).int(), torch.cat(raw_pred).float(), 'ALL ')
unique_stats = pointwise_accuracy_stats(torch.cat(query).int()[unique_ind], torch.cat(
target_set).int()[unique_ind], count_arr[unique_ind], torch.cat(raw_pred).float()[unique_ind], 'UN ')
amb_stats = pointwise_accuracy_stats(torch.cat(query).int()[amb_ind], torch.cat(
target_set).int()[amb_ind], count_arr[amb_ind], torch.cat(raw_pred).float()[amb_ind], 'AMB ')
return [all_stats, unique_stats, amb_stats]
def read_summary_file(exp_dir, analyze=False, acc_type= "corrected accuracy"):
if not os.path.exists(os.path.join(exp_dir, "summary.json")):
print(exp_dir, "missing summary.json file!")
return []
with open(os.path.join(exp_dir, "summary.json")) as f:
l = f.readlines()
l = [json.loads(x) for x in l[::-1]]
test_files = set()
test_stats = []
best_train = 0
last_train = -1
best_train_epoch = 0
best_dev = 0
last_dev = -1
best_dev_epoch = 0
current_epoch = 0
total_epochs = 0
accuracy_type = acc_type
for stat in l:
if (stat["mode"]) != "test":
current_epoch = stat['epoch']
if total_epochs == 0:
total_epochs = current_epoch
if last_train < 0:
last_train = stat[accuracy_type]
if best_train <= stat[accuracy_type]:
best_train_epoch = stat['epoch']
best_train = stat[accuracy_type]
continue
if (stat["data_file"].find("dev")) > 0:
if last_dev < 0:
last_dev = stat[accuracy_type]
if (best_dev <= stat[accuracy_type]) and (stat['lr'] != 0):
best_dev_epoch = current_epoch
best_dev = stat[accuracy_type]
tf = os.path.basename(stat["data_file"])
if tf in test_files:
continue
test_files.add(tf)
test_stats.append(OrderedDict(
{"test_file": tf, "test_acc": stat['corrected accuracy'] if stat['data_file'].find('test') > 0 else stat[accuracy_type]}))
if "error distribution" in stat:
test_stats[-1].update({(str(k+1)+"_sol_error", v)
for k, v in enumerate(stat["error distribution"].split("-"))})
if len(test_stats) == 0:
print(exp_dir, "missing test results in summary.json file!")
#copy_accuracy, unique_point_accuracy, ambiguous_point_accuracy, lousy_accuracy = extract_data_from_dump(
# exp_dir, analyze)
#Pdb().set_trace()
addn_stats = extract_data_from_dump(exp_dir, analyze)
split_test_stats = []
for row in test_stats:
row["best_train"] = best_train
row["last_train"] = last_train
row["best_train_epoch"] = best_train_epoch
row["best_dev"] = best_dev
row["last_dev"] = last_dev
row["best_dev_epoch"] = best_dev_epoch
row['total_epochs'] = total_epochs
for this_dict in addn_stats:
for k in this_dict:
row[k] = this_dict[k]
split_test_stats.extend(row_splitter(row))
#row['copy_acc'] = copy_accuracy
#row['unique_point_acc'] = unique_point_accuracy
#row['ambiguous_point_acc'] = ambiguous_point_accuracy
#row['lousy_acc'] = lousy_accuracy
return split_test_stats
def get_params(exp_dir):
all_params = exp_dir.split('_')
all_params = dict([(x.split('-')[0], '.'.join(x.split('-')[1:]))
for x in all_params])
return all_params
def collate(parent_dir, analyze=False,acc_type='corrected accuracy'):
exp_dirs = os.listdir(parent_dir)
df = []
for exp_dir in tqdm(exp_dirs):
dir_name = os.path.join(parent_dir, exp_dir)
for expt_dict in read_summary_file(dir_name, analyze,acc_type):
expt_dict["config"] = exp_dir
expt_dict.update(get_params(exp_dir))
df.append(expt_dict)
return | pd.DataFrame(df) | pandas.DataFrame |
import pandas as pd
from functools import reduce
def load():
print("Cargando datos")
datos ={}
"""
Seguridad y convivencia
"""
datos['Convivencia'] = data_convivencia = pd.read_excel('./data/datos separados.xlsx', 'Indicadores de convivencia decr')
datos['Seguridad'] = data_seguridad = pd.read_excel('./data/datos separados.xlsx', 'Indicadores de seguridad')
"""
Tránsito
"""
datos['Metrolinea'] = data_metrolinea = pd.read_excel('./data/datos separados.xlsx', 'Metrolínea')
datos['ComparendosMetrolinea'] = data_comparendos_metrolinea = pd.read_excel('./data/datos separados.xlsx', 'Comparendos Metrolinea')
datos['AccidentesTransito'] = data_accidentes_transito = pd.read_excel('./data/datos separados.xlsx', 'Accidentes Transito')
"""
Agua
"""
datos['ReinstalacionAgua'] = data_reinstalacion_agua = pd.read_excel('./data/datos separados.xlsx', 'Reinstalación reconexión AGUA')
datos['Fuentes'] = data_fuentes = pd.read_excel('./data/datos separados.xlsx', 'Fuentes de agua')
datos['Produccion'] = data_produccion = | pd.read_excel('./data/datos separados.xlsx', 'Producción de agua') | pandas.read_excel |
import numpy as np
import pandas as pd
from numba import njit
from datetime import datetime
import pytest
from itertools import product
from sklearn.model_selection import TimeSeriesSplit
import vectorbt as vbt
from vectorbt.generic import nb
seed = 42
day_dt = np.timedelta64(86400000000000)
df = pd.DataFrame({
'a': [1, 2, 3, 4, np.nan],
'b': [np.nan, 4, 3, 2, 1],
'c': [1, 2, np.nan, 2, 1]
}, index=pd.DatetimeIndex([
datetime(2018, 1, 1),
datetime(2018, 1, 2),
datetime(2018, 1, 3),
datetime(2018, 1, 4),
datetime(2018, 1, 5)
]))
group_by = np.array(['g1', 'g1', 'g2'])
@njit
def i_or_col_pow_nb(i_or_col, x, pow):
return np.power(x, pow)
@njit
def pow_nb(x, pow):
return np.power(x, pow)
@njit
def nanmean_nb(x):
return np.nanmean(x)
@njit
def i_col_nanmean_nb(i, col, x):
return np.nanmean(x)
@njit
def i_nanmean_nb(i, x):
return np.nanmean(x)
@njit
def col_nanmean_nb(col, x):
return np.nanmean(x)
# ############# accessors.py ############# #
class TestAccessors:
def test_shuffle(self):
pd.testing.assert_series_equal(
df['a'].vbt.shuffle(seed=seed),
pd.Series(
np.array([2.0, np.nan, 3.0, 1.0, 4.0]),
index=df['a'].index,
name=df['a'].name
)
)
np.testing.assert_array_equal(
df['a'].vbt.shuffle(seed=seed).values,
nb.shuffle_1d_nb(df['a'].values, seed=seed)
)
pd.testing.assert_frame_equal(
df.vbt.shuffle(seed=seed),
pd.DataFrame(
np.array([
[2., 2., 2.],
[np.nan, 4., 1.],
[3., 3., 2.],
[1., np.nan, 1.],
[4., 1., np.nan]
]),
index=df.index,
columns=df.columns
)
)
@pytest.mark.parametrize(
"test_value",
[-1, 0., np.nan],
)
def test_fillna(self, test_value):
pd.testing.assert_series_equal(df['a'].vbt.fillna(test_value), df['a'].fillna(test_value))
pd.testing.assert_frame_equal(df.vbt.fillna(test_value), df.fillna(test_value))
@pytest.mark.parametrize(
"test_n",
[1, 2, 3, 4, 5],
)
def test_bshift(self, test_n):
pd.testing.assert_series_equal(df['a'].vbt.bshift(test_n), df['a'].shift(-test_n))
np.testing.assert_array_equal(
df['a'].vbt.bshift(test_n).values,
nb.bshift_nb(df['a'].values, test_n)
)
pd.testing.assert_frame_equal(df.vbt.bshift(test_n), df.shift(-test_n))
@pytest.mark.parametrize(
"test_n",
[1, 2, 3, 4, 5],
)
def test_fshift(self, test_n):
pd.testing.assert_series_equal(df['a'].vbt.fshift(test_n), df['a'].shift(test_n))
np.testing.assert_array_equal(
df['a'].vbt.fshift(test_n).values,
nb.fshift_1d_nb(df['a'].values, test_n)
)
pd.testing.assert_frame_equal(df.vbt.fshift(test_n), df.shift(test_n))
def test_diff(self):
pd.testing.assert_series_equal(df['a'].vbt.diff(), df['a'].diff())
np.testing.assert_array_equal(df['a'].vbt.diff().values, nb.diff_1d_nb(df['a'].values))
pd.testing.assert_frame_equal(df.vbt.diff(), df.diff())
def test_pct_change(self):
pd.testing.assert_series_equal(df['a'].vbt.pct_change(), df['a'].pct_change(fill_method=None))
np.testing.assert_array_equal(df['a'].vbt.pct_change().values, nb.pct_change_1d_nb(df['a'].values))
pd.testing.assert_frame_equal(df.vbt.pct_change(), df.pct_change(fill_method=None))
def test_ffill(self):
pd.testing.assert_series_equal(df['a'].vbt.ffill(), df['a'].ffill())
pd.testing.assert_frame_equal(df.vbt.ffill(), df.ffill())
def test_product(self):
assert df['a'].vbt.product() == df['a'].product()
np.testing.assert_array_equal(df.vbt.product(), df.product())
def test_cumsum(self):
pd.testing.assert_series_equal(df['a'].vbt.cumsum(), df['a'].cumsum())
pd.testing.assert_frame_equal(df.vbt.cumsum(), df.cumsum())
def test_cumprod(self):
pd.testing.assert_series_equal(df['a'].vbt.cumprod(), df['a'].cumprod())
pd.testing.assert_frame_equal(df.vbt.cumprod(), df.cumprod())
@pytest.mark.parametrize(
"test_window,test_minp",
list(product([1, 2, 3, 4, 5], [1, None]))
)
def test_rolling_min(self, test_window, test_minp):
if test_minp is None:
test_minp = test_window
pd.testing.assert_series_equal(
df['a'].vbt.rolling_min(test_window, minp=test_minp),
df['a'].rolling(test_window, min_periods=test_minp).min()
)
pd.testing.assert_frame_equal(
df.vbt.rolling_min(test_window, minp=test_minp),
df.rolling(test_window, min_periods=test_minp).min()
)
pd.testing.assert_frame_equal(
df.vbt.rolling_min(test_window),
df.rolling(test_window).min()
)
@pytest.mark.parametrize(
"test_window,test_minp",
list(product([1, 2, 3, 4, 5], [1, None]))
)
def test_rolling_max(self, test_window, test_minp):
if test_minp is None:
test_minp = test_window
pd.testing.assert_series_equal(
df['a'].vbt.rolling_max(test_window, minp=test_minp),
df['a'].rolling(test_window, min_periods=test_minp).max()
)
pd.testing.assert_frame_equal(
df.vbt.rolling_max(test_window, minp=test_minp),
df.rolling(test_window, min_periods=test_minp).max()
)
pd.testing.assert_frame_equal(
df.vbt.rolling_max(test_window),
df.rolling(test_window).max()
)
@pytest.mark.parametrize(
"test_window,test_minp",
list(product([1, 2, 3, 4, 5], [1, None]))
)
def test_rolling_mean(self, test_window, test_minp):
if test_minp is None:
test_minp = test_window
pd.testing.assert_series_equal(
df['a'].vbt.rolling_mean(test_window, minp=test_minp),
df['a'].rolling(test_window, min_periods=test_minp).mean()
)
pd.testing.assert_frame_equal(
df.vbt.rolling_mean(test_window, minp=test_minp),
df.rolling(test_window, min_periods=test_minp).mean()
)
pd.testing.assert_frame_equal(
df.vbt.rolling_mean(test_window),
df.rolling(test_window).mean()
)
@pytest.mark.parametrize(
"test_window,test_minp,test_ddof",
list(product([1, 2, 3, 4, 5], [1, None], [0, 1]))
)
def test_rolling_std(self, test_window, test_minp, test_ddof):
if test_minp is None:
test_minp = test_window
pd.testing.assert_series_equal(
df['a'].vbt.rolling_std(test_window, minp=test_minp, ddof=test_ddof),
df['a'].rolling(test_window, min_periods=test_minp).std(ddof=test_ddof)
)
pd.testing.assert_frame_equal(
df.vbt.rolling_std(test_window, minp=test_minp, ddof=test_ddof),
df.rolling(test_window, min_periods=test_minp).std(ddof=test_ddof)
)
pd.testing.assert_frame_equal(
df.vbt.rolling_std(test_window),
df.rolling(test_window).std()
)
@pytest.mark.parametrize(
"test_window,test_minp,test_adjust",
list(product([1, 2, 3, 4, 5], [1, None], [False, True]))
)
def test_ewm_mean(self, test_window, test_minp, test_adjust):
if test_minp is None:
test_minp = test_window
pd.testing.assert_series_equal(
df['a'].vbt.ewm_mean(test_window, minp=test_minp, adjust=test_adjust),
df['a'].ewm(span=test_window, min_periods=test_minp, adjust=test_adjust).mean()
)
pd.testing.assert_frame_equal(
df.vbt.ewm_mean(test_window, minp=test_minp, adjust=test_adjust),
df.ewm(span=test_window, min_periods=test_minp, adjust=test_adjust).mean()
)
pd.testing.assert_frame_equal(
df.vbt.ewm_mean(test_window),
df.ewm(span=test_window).mean()
)
@pytest.mark.parametrize(
"test_window,test_minp,test_adjust,test_ddof",
list(product([1, 2, 3, 4, 5], [1, None], [False, True], [0, 1]))
)
def test_ewm_std(self, test_window, test_minp, test_adjust, test_ddof):
if test_minp is None:
test_minp = test_window
pd.testing.assert_series_equal(
df['a'].vbt.ewm_std(test_window, minp=test_minp, adjust=test_adjust, ddof=test_ddof),
df['a'].ewm(span=test_window, min_periods=test_minp, adjust=test_adjust).std(ddof=test_ddof)
)
pd.testing.assert_frame_equal(
df.vbt.ewm_std(test_window, minp=test_minp, adjust=test_adjust, ddof=test_ddof),
df.ewm(span=test_window, min_periods=test_minp, adjust=test_adjust).std(ddof=test_ddof)
)
pd.testing.assert_frame_equal(
df.vbt.ewm_std(test_window),
df.ewm(span=test_window).std()
)
@pytest.mark.parametrize(
"test_minp",
[1, 3]
)
def test_expanding_min(self, test_minp):
pd.testing.assert_series_equal(
df['a'].vbt.expanding_min(minp=test_minp),
df['a'].expanding(min_periods=test_minp).min()
)
pd.testing.assert_frame_equal(
df.vbt.expanding_min(minp=test_minp),
df.expanding(min_periods=test_minp).min()
)
pd.testing.assert_frame_equal(
df.vbt.expanding_min(),
df.expanding().min()
)
@pytest.mark.parametrize(
"test_minp",
[1, 3]
)
def test_expanding_max(self, test_minp):
pd.testing.assert_series_equal(
df['a'].vbt.expanding_max(minp=test_minp),
df['a'].expanding(min_periods=test_minp).max()
)
pd.testing.assert_frame_equal(
df.vbt.expanding_max(minp=test_minp),
df.expanding(min_periods=test_minp).max()
)
pd.testing.assert_frame_equal(
df.vbt.expanding_max(),
df.expanding().max()
)
@pytest.mark.parametrize(
"test_minp",
[1, 3]
)
def test_expanding_mean(self, test_minp):
pd.testing.assert_series_equal(
df['a'].vbt.expanding_mean(minp=test_minp),
df['a'].expanding(min_periods=test_minp).mean()
)
pd.testing.assert_frame_equal(
df.vbt.expanding_mean(minp=test_minp),
df.expanding(min_periods=test_minp).mean()
)
pd.testing.assert_frame_equal(
df.vbt.expanding_mean(),
df.expanding().mean()
)
@pytest.mark.parametrize(
"test_minp,test_ddof",
list(product([1, 3], [0, 1]))
)
def test_expanding_std(self, test_minp, test_ddof):
pd.testing.assert_series_equal(
df['a'].vbt.expanding_std(minp=test_minp, ddof=test_ddof),
df['a'].expanding(min_periods=test_minp).std(ddof=test_ddof)
)
pd.testing.assert_frame_equal(
df.vbt.expanding_std(minp=test_minp, ddof=test_ddof),
df.expanding(min_periods=test_minp).std(ddof=test_ddof)
)
pd.testing.assert_frame_equal(
df.vbt.expanding_std(),
df.expanding().std()
)
def test_apply_along_axis(self):
pd.testing.assert_frame_equal(
df.vbt.apply_along_axis(i_or_col_pow_nb, 2, axis=0),
df.apply(pow_nb, args=(2,), axis=0, raw=True)
)
pd.testing.assert_frame_equal(
df.vbt.apply_along_axis(i_or_col_pow_nb, 2, axis=1),
df.apply(pow_nb, args=(2,), axis=1, raw=True)
)
@pytest.mark.parametrize(
"test_window,test_minp",
list(product([1, 2, 3, 4, 5], [1, None]))
)
def test_rolling_apply(self, test_window, test_minp):
if test_minp is None:
test_minp = test_window
pd.testing.assert_series_equal(
df['a'].vbt.rolling_apply(test_window, i_col_nanmean_nb, minp=test_minp),
df['a'].rolling(test_window, min_periods=test_minp).apply(nanmean_nb, raw=True)
)
pd.testing.assert_frame_equal(
df.vbt.rolling_apply(test_window, i_col_nanmean_nb, minp=test_minp),
df.rolling(test_window, min_periods=test_minp).apply(nanmean_nb, raw=True)
)
pd.testing.assert_frame_equal(
df.vbt.rolling_apply(test_window, i_col_nanmean_nb),
df.rolling(test_window).apply(nanmean_nb, raw=True)
)
pd.testing.assert_frame_equal(
df.vbt.rolling_apply(3, i_nanmean_nb, on_matrix=True),
pd.DataFrame(
np.array([
[np.nan, np.nan, np.nan],
[np.nan, np.nan, np.nan],
[np.nan, np.nan, np.nan],
[2.75, 2.75, 2.75],
[np.nan, np.nan, np.nan]
]),
index=df.index,
columns=df.columns
)
)
@pytest.mark.parametrize(
"test_minp",
[1, 3]
)
def test_expanding_apply(self, test_minp):
pd.testing.assert_series_equal(
df['a'].vbt.expanding_apply(i_col_nanmean_nb, minp=test_minp),
df['a'].expanding(min_periods=test_minp).apply(nanmean_nb, raw=True)
)
pd.testing.assert_frame_equal(
df.vbt.expanding_apply(i_col_nanmean_nb, minp=test_minp),
df.expanding(min_periods=test_minp).apply(nanmean_nb, raw=True)
)
pd.testing.assert_frame_equal(
df.vbt.expanding_apply(i_col_nanmean_nb),
df.expanding().apply(nanmean_nb, raw=True)
)
pd.testing.assert_frame_equal(
df.vbt.expanding_apply(i_nanmean_nb, on_matrix=True),
pd.DataFrame(
np.array([
[np.nan, np.nan, np.nan],
[2.0, 2.0, 2.0],
[2.2857142857142856, 2.2857142857142856, 2.2857142857142856],
[2.4, 2.4, 2.4],
[2.1666666666666665, 2.1666666666666665, 2.1666666666666665]
]),
index=df.index,
columns=df.columns
)
)
def test_groupby_apply(self):
pd.testing.assert_series_equal(
df['a'].vbt.groupby_apply(np.asarray([1, 1, 2, 2, 3]), i_col_nanmean_nb),
df['a'].groupby(np.asarray([1, 1, 2, 2, 3])).apply(lambda x: nanmean_nb(x.values))
)
pd.testing.assert_frame_equal(
df.vbt.groupby_apply(np.asarray([1, 1, 2, 2, 3]), i_col_nanmean_nb),
df.groupby(np.asarray([1, 1, 2, 2, 3])).agg({
'a': lambda x: nanmean_nb(x.values),
'b': lambda x: nanmean_nb(x.values),
'c': lambda x: nanmean_nb(x.values)
}), # any clean way to do column-wise grouping in pandas?
)
def test_groupby_apply_on_matrix(self):
pd.testing.assert_frame_equal(
df.vbt.groupby_apply(np.asarray([1, 1, 2, 2, 3]), i_nanmean_nb, on_matrix=True),
pd.DataFrame(
np.array([
[2., 2., 2.],
[2.8, 2.8, 2.8],
[1., 1., 1.]
]),
index=pd.Int64Index([1, 2, 3], dtype='int64'),
columns=df.columns
)
)
@pytest.mark.parametrize(
"test_freq",
['1h', '3d', '1w'],
)
def test_resample_apply(self, test_freq):
pd.testing.assert_series_equal(
df['a'].vbt.resample_apply(test_freq, i_col_nanmean_nb),
df['a'].resample(test_freq).apply(lambda x: nanmean_nb(x.values))
)
pd.testing.assert_frame_equal(
df.vbt.resample_apply(test_freq, i_col_nanmean_nb),
df.resample(test_freq).apply(lambda x: nanmean_nb(x.values))
)
pd.testing.assert_frame_equal(
df.vbt.resample_apply('3d', i_nanmean_nb, on_matrix=True),
pd.DataFrame(
np.array([
[2.28571429, 2.28571429, 2.28571429],
[2., 2., 2.]
]),
index=pd.DatetimeIndex(['2018-01-01', '2018-01-04'], dtype='datetime64[ns]', freq='3D'),
columns=df.columns
)
)
def test_applymap(self):
@njit
def mult_nb(i, col, x):
return x * 2
pd.testing.assert_series_equal(
df['a'].vbt.applymap(mult_nb),
df['a'].map(lambda x: x * 2)
)
pd.testing.assert_frame_equal(
df.vbt.applymap(mult_nb),
df.applymap(lambda x: x * 2)
)
def test_filter(self):
@njit
def greater_nb(i, col, x):
return x > 2
pd.testing.assert_series_equal(
df['a'].vbt.filter(greater_nb),
df['a'].map(lambda x: x if x > 2 else np.nan)
)
pd.testing.assert_frame_equal(
df.vbt.filter(greater_nb),
df.applymap(lambda x: x if x > 2 else np.nan)
)
def test_apply_and_reduce(self):
@njit
def every_nth_nb(col, a, n):
return a[::n]
@njit
def sum_nb(col, a, b):
return np.nansum(a) + b
assert df['a'].vbt.apply_and_reduce(every_nth_nb, sum_nb, apply_args=(2,), reduce_args=(3,)) == \
df['a'].iloc[::2].sum() + 3
pd.testing.assert_series_equal(
df.vbt.apply_and_reduce(every_nth_nb, sum_nb, apply_args=(2,), reduce_args=(3,)),
df.iloc[::2].sum().rename('apply_and_reduce') + 3
)
pd.testing.assert_series_equal(
df.vbt.apply_and_reduce(
every_nth_nb, sum_nb, apply_args=(2,),
reduce_args=(3,), wrap_kwargs=dict(time_units=True)),
(df.iloc[::2].sum().rename('apply_and_reduce') + 3) * day_dt
)
def test_reduce(self):
@njit
def sum_nb(col, a):
return np.nansum(a)
assert df['a'].vbt.reduce(sum_nb) == df['a'].sum()
pd.testing.assert_series_equal(
df.vbt.reduce(sum_nb),
df.sum().rename('reduce')
)
pd.testing.assert_series_equal(
df.vbt.reduce(sum_nb, wrap_kwargs=dict(time_units=True)),
df.sum().rename('reduce') * day_dt
)
pd.testing.assert_series_equal(
df.vbt.reduce(sum_nb, group_by=group_by),
pd.Series([20.0, 6.0], index=['g1', 'g2']).rename('reduce')
)
@njit
def argmax_nb(col, a):
a = a.copy()
a[np.isnan(a)] = -np.inf
return np.argmax(a)
assert df['a'].vbt.reduce(argmax_nb, to_idx=True) == df['a'].idxmax()
pd.testing.assert_series_equal(
df.vbt.reduce(argmax_nb, to_idx=True),
df.idxmax().rename('reduce')
)
pd.testing.assert_series_equal(
df.vbt.reduce(argmax_nb, to_idx=True, flatten=True, group_by=group_by),
pd.Series(['2018-01-02', '2018-01-02'], dtype='datetime64[ns]', index=['g1', 'g2']).rename('reduce')
)
@njit
def min_and_max_nb(col, a):
out = np.empty(2)
out[0] = np.nanmin(a)
out[1] = np.nanmax(a)
return out
pd.testing.assert_series_equal(
df['a'].vbt.reduce(
min_and_max_nb, to_array=True,
wrap_kwargs=dict(name_or_index=['min', 'max'])),
pd.Series([np.nanmin(df['a']), np.nanmax(df['a'])], index=['min', 'max'], name='a')
)
pd.testing.assert_frame_equal(
df.vbt.reduce(
min_and_max_nb, to_array=True,
wrap_kwargs=dict(name_or_index=['min', 'max'])),
df.apply(lambda x: pd.Series(np.asarray([np.nanmin(x), np.nanmax(x)]), index=['min', 'max']), axis=0)
)
pd.testing.assert_frame_equal(
df.vbt.reduce(
min_and_max_nb, to_array=True, group_by=group_by,
wrap_kwargs=dict(name_or_index=['min', 'max'])),
pd.DataFrame([[1.0, 1.0], [4.0, 2.0]], index=['min', 'max'], columns=['g1', 'g2'])
)
@njit
def argmin_and_argmax_nb(col, a):
# nanargmin and nanargmax
out = np.empty(2)
_a = a.copy()
_a[np.isnan(_a)] = np.inf
out[0] = np.argmin(_a)
_a = a.copy()
_a[np.isnan(_a)] = -np.inf
out[1] = np.argmax(_a)
return out
pd.testing.assert_series_equal(
df['a'].vbt.reduce(
argmin_and_argmax_nb, to_idx=True, to_array=True,
wrap_kwargs=dict(name_or_index=['idxmin', 'idxmax'])),
pd.Series([df['a'].idxmin(), df['a'].idxmax()], index=['idxmin', 'idxmax'], name='a')
)
pd.testing.assert_frame_equal(
df.vbt.reduce(
argmin_and_argmax_nb, to_idx=True, to_array=True,
wrap_kwargs=dict(name_or_index=['idxmin', 'idxmax'])),
df.apply(lambda x: pd.Series(np.asarray([x.idxmin(), x.idxmax()]), index=['idxmin', 'idxmax']), axis=0)
)
pd.testing.assert_frame_equal(
df.vbt.reduce(argmin_and_argmax_nb, to_idx=True, to_array=True,
flatten=True, order='C', group_by=group_by,
wrap_kwargs=dict(name_or_index=['idxmin', 'idxmax'])),
pd.DataFrame([['2018-01-01', '2018-01-01'], ['2018-01-02', '2018-01-02']],
dtype='datetime64[ns]', index=['idxmin', 'idxmax'], columns=['g1', 'g2'])
)
pd.testing.assert_frame_equal(
df.vbt.reduce(argmin_and_argmax_nb, to_idx=True, to_array=True,
flatten=True, order='F', group_by=group_by,
wrap_kwargs=dict(name_or_index=['idxmin', 'idxmax'])),
pd.DataFrame([['2018-01-01', '2018-01-01'], ['2018-01-04', '2018-01-02']],
dtype='datetime64[ns]', index=['idxmin', 'idxmax'], columns=['g1', 'g2'])
)
def test_squeeze_grouped(self):
pd.testing.assert_frame_equal(
df.vbt.squeeze_grouped(i_col_nanmean_nb, group_by=group_by),
pd.DataFrame([
[1.0, 1.0],
[3.0, 2.0],
[3.0, np.nan],
[3.0, 2.0],
[1.0, 1.0]
], index=df.index, columns=['g1', 'g2'])
)
def test_flatten_grouped(self):
pd.testing.assert_frame_equal(
df.vbt.flatten_grouped(group_by=group_by, order='C'),
pd.DataFrame([
[1.0, 1.0],
[np.nan, np.nan],
[2.0, 2.0],
[4.0, np.nan],
[3.0, np.nan],
[3.0, np.nan],
[4.0, 2.0],
[2.0, np.nan],
[np.nan, 1.0],
[1.0, np.nan]
], index=np.repeat(df.index, 2), columns=['g1', 'g2'])
)
pd.testing.assert_frame_equal(
df.vbt.flatten_grouped(group_by=group_by, order='F'),
pd.DataFrame([
[1.0, 1.0],
[2.0, 2.0],
[3.0, np.nan],
[4.0, 2.0],
[np.nan, 1.0],
[np.nan, np.nan],
[4.0, np.nan],
[3.0, np.nan],
[2.0, np.nan],
[1.0, np.nan]
], index=np.tile(df.index, 2), columns=['g1', 'g2'])
)
@pytest.mark.parametrize(
"test_name,test_func,test_func_nb",
[
('min', lambda x, **kwargs: x.min(**kwargs), nb.nanmin_nb),
('max', lambda x, **kwargs: x.max(**kwargs), nb.nanmax_nb),
('mean', lambda x, **kwargs: x.mean(**kwargs), nb.nanmean_nb),
('median', lambda x, **kwargs: x.median(**kwargs), nb.nanmedian_nb),
('std', lambda x, **kwargs: x.std(**kwargs, ddof=0), nb.nanstd_nb),
('count', lambda x, **kwargs: x.count(**kwargs), nb.nancnt_nb),
('sum', lambda x, **kwargs: x.sum(**kwargs), nb.nansum_nb)
],
)
def test_funcs(self, test_name, test_func, test_func_nb):
# numeric
assert test_func(df['a'].vbt) == test_func(df['a'])
pd.testing.assert_series_equal(
test_func(df.vbt),
test_func(df).rename(test_name)
)
pd.testing.assert_series_equal(
test_func(df.vbt, group_by=group_by),
pd.Series([
test_func(df[['a', 'b']].stack()),
test_func(df['c'])
], index=['g1', 'g2']).rename(test_name)
)
np.testing.assert_array_equal(test_func(df).values, test_func_nb(df.values))
pd.testing.assert_series_equal(
test_func(df.vbt, wrap_kwargs=dict(time_units=True)),
test_func(df).rename(test_name) * day_dt
)
# boolean
bool_ts = df == df
assert test_func(bool_ts['a'].vbt) == test_func(bool_ts['a'])
pd.testing.assert_series_equal(
test_func(bool_ts.vbt),
test_func(bool_ts).rename(test_name)
)
pd.testing.assert_series_equal(
test_func(bool_ts.vbt, wrap_kwargs=dict(time_units=True)),
test_func(bool_ts).rename(test_name) * day_dt
)
@pytest.mark.parametrize(
"test_name,test_func",
[
('idxmin', lambda x, **kwargs: x.idxmin(**kwargs)),
('idxmax', lambda x, **kwargs: x.idxmax(**kwargs))
],
)
def test_arg_funcs(self, test_name, test_func):
assert test_func(df['a'].vbt) == test_func(df['a'])
pd.testing.assert_series_equal(
test_func(df.vbt),
test_func(df).rename(test_name)
)
pd.testing.assert_series_equal(
test_func(df.vbt, group_by=group_by),
pd.Series([
test_func(df[['a', 'b']].stack())[0],
test_func(df['c'])
], index=['g1', 'g2'], dtype='datetime64[ns]').rename(test_name)
)
def test_describe(self):
pd.testing.assert_series_equal(
df['a'].vbt.describe(),
df['a'].describe()
)
pd.testing.assert_frame_equal(
df.vbt.describe(percentiles=None),
df.describe(percentiles=None)
)
pd.testing.assert_frame_equal(
df.vbt.describe(percentiles=[]),
df.describe(percentiles=[])
)
test_against = df.describe(percentiles=np.arange(0, 1, 0.1))
pd.testing.assert_frame_equal(
df.vbt.describe(percentiles=np.arange(0, 1, 0.1)),
test_against
)
pd.testing.assert_frame_equal(
df.vbt.describe(percentiles=np.arange(0, 1, 0.1), group_by=group_by),
pd.DataFrame({
'g1': df[['a', 'b']].stack().describe(percentiles=np.arange(0, 1, 0.1)).values,
'g2': df['c'].describe(percentiles=np.arange(0, 1, 0.1)).values
}, index=test_against.index)
)
def test_drawdown(self):
pd.testing.assert_series_equal(
df['a'].vbt.drawdown(),
df['a'] / df['a'].expanding().max() - 1
)
pd.testing.assert_frame_equal(
df.vbt.drawdown(),
df / df.expanding().max() - 1
)
def test_drawdowns(self):
assert type(df['a'].vbt.drawdowns) is vbt.Drawdowns
assert df['a'].vbt.drawdowns.wrapper.freq == df['a'].vbt.wrapper.freq
assert df['a'].vbt.drawdowns.wrapper.ndim == df['a'].ndim
assert df.vbt.drawdowns.wrapper.ndim == df.ndim
def test_to_mapped_array(self):
np.testing.assert_array_equal(
df.vbt.to_mapped_array().values,
np.array([1., 2., 3., 4., 4., 3., 2., 1., 1., 2., 2., 1.])
)
np.testing.assert_array_equal(
df.vbt.to_mapped_array().col_arr,
np.array([0, 0, 0, 0, 1, 1, 1, 1, 2, 2, 2, 2])
)
np.testing.assert_array_equal(
df.vbt.to_mapped_array().idx_arr,
np.array([0, 1, 2, 3, 1, 2, 3, 4, 0, 1, 3, 4])
)
np.testing.assert_array_equal(
df.vbt.to_mapped_array(dropna=False).values,
np.array([1., 2., 3., 4., np.nan, np.nan, 4., 3., 2., 1., 1., 2., np.nan, 2., 1.])
)
np.testing.assert_array_equal(
df.vbt.to_mapped_array(dropna=False).col_arr,
np.array([0, 0, 0, 0, 0, 1, 1, 1, 1, 1, 2, 2, 2, 2, 2])
)
np.testing.assert_array_equal(
df.vbt.to_mapped_array(dropna=False).idx_arr,
np.array([0, 1, 2, 3, 4, 0, 1, 2, 3, 4, 0, 1, 2, 3, 4])
)
def test_zscore(self):
pd.testing.assert_series_equal(
df['a'].vbt.zscore(),
(df['a'] - df['a'].mean()) / df['a'].std(ddof=0)
)
pd.testing.assert_frame_equal(
df.vbt.zscore(),
(df - df.mean()) / df.std(ddof=0)
)
def test_split(self):
splitter = TimeSeriesSplit(n_splits=2)
(train_df, train_indexes), (test_df, test_indexes) = df['a'].vbt.split(splitter)
pd.testing.assert_frame_equal(
train_df,
pd.DataFrame(
np.array([
[1.0, 1.0],
[2.0, 2.0],
[3.0, 3.0],
[np.nan, 4.0]
]),
index= | pd.RangeIndex(start=0, stop=4, step=1) | pandas.RangeIndex |
import numpy as np
import pandas as pd
from datetime import datetime
import random as rd
from pandas import DataFrame
from math import sqrt
from scipy.stats import norm
from pandas import DataFrame
from functools import wraps
class create_data():
'''create data
e.g.
s = pd.to_datetime('01-01-2019')
create_data('S', date = [s, datetime.now()]).gendateseries()
create_data('S', date = datetime.now(), direction='Backwards').create_brownian_motion()
pandas freq options:
Alias, Description
B, business day frequency
C, custom business day frequency
D, calendar day frequency
W, weekly frequency
M, month end frequency
SM, semi-month end frequency (15th and end of month)
BM, business month end frequency
CBM, custom business month end frequency
MS, month start frequency
SMS, semi-month start frequency (1st and 15th)
BMS, business month start frequency
CBMS, custom business month start frequency
Q, quarter end frequency
BQ, business quarter end frequency
QS, quarter start frequency
BQS, business quarter start frequency
A, Y, year end frequency
BA, BY, business year end frequency
AS, YS, year start frequency
BAS, BYS, business year start frequency
BH, business hour frequency
H, hourly frequency
T, min, minutely frequency
S, secondly frequency
L, ms, milliseconds
U, us, microseconds
N, nanoseconds
'''
def __init__(self, frequency, date=datetime.now(), num_periods: 'int' = 100, direction=None):
self.freq = frequency
self.date = date
self.num_periods = num_periods
self.direction = direction
if isinstance(date, list):
self.start = date[0]
self.end = date[1]
if self.end < self.start:
self.end = date[0]
self.start = date[1]
self.dates = self.gendateseries()
def gendateseries(self):
try:
dates = pd.date_range(
start=self.start, end=self.end, freq=self.freq)
except:
if self.direction == 'Backwards' or self.direction == 1:
dates = pd.date_range(
end=self.date, periods=self.num_periods, freq=self.freq)
elif self.direction == 'Forwards' or self.direction == -1:
dates = pd.date_range(
start=self.date, periods=self.num_periods, freq=self.freq)
# else:
# raise AttributeError
return | DataFrame(dates, columns=['Date']) | pandas.DataFrame |
# -*- coding: utf-8 -*-
"""
Main module, contains the base object that host all the different analysis
Authors: B.G. 18/11/2018
"""
# This module manages raster I/O operations, based on rasterio (which itself depends on GDAL)
from lsdtopytools import raster_loader as rl
from lsdtopytools import lsdtopytools_utilities as ut
from lsdtopytools import geoconvtools as gcv
# This module is the low-level interface with the c++ code: it controls the c++ LSDDEM_xtensor object
from lsdtt_xtensor_python import LSDDEM_cpp
# Numpy provides efficient numerical arrays and shitload of useful operations with it
import numpy as np
# Pandas manages dataset as tables and provides quite a lot of fast operations
import pandas as pd
# Deals with OS operations (check folders, path and stuff aye)
import os
import time as clock
class LSDDEM(object):
"""
LSDEM is the main object controlling the module. It relies on loading a georeferenced encironmont from a raster (or eventually a csv file) and manage everything you need out of that.
This object follows a strategy of accesser/setter with the property argument from python. The advantage of such a method is to make easy it's use for End user.
For example, one might want to totally control all the algorithms parameter himself and extract all the metrics step by step but others might only want a quick overview of their field.
With this approach we can make sure to conditionalize the access to each component. It will also help memory management by dropping to the disk data and only load it when required.
"""
def __init__(self, path = "./", file_name = "test.tif", already_preprocessed = False, verbose = True, prefix_supp = "", remove_seas = False, sea_level = 0):
"""
Description:
Constructor for the DEM object: initiate the program and the talk with LSDTopoTools.
Arguments:
path (str): Path to the file. Example on Windows: "C:/Users/s1675537/Desktop/"; on linux: "/home/Boris/Desktop/"; on Mac: No idea.
file_name (str): The name of the file with the extension, for example "cairgons.tif"
already_preprocessed (bool): If your Raster already is ready for flow routines (ie does not need any filling, carving or filtering), turn it to True.
Return:
A LSDDEM object
Authors:
<NAME>
Date:
14/12/2018
"""
# Saving the path and file name
timer = clock.time()
self.path = path
self.file_name = file_name
# Getting the prefix for saving all your files:
self.prefix = str("".join(self.file_name.split(".")[:-1])) # -> prefix = base name without extention
self.prefix += prefix_supp
# Do you want me to talk? TODO: find a solution to catch and maybe stop the cout statements
self.verbose = verbose
#Alright getting the dem:
print("Loading the raster from file: %s%s"%(self.path,self.file_name) ) if(self.verbose) else 0
temp_loaded = rl.load_raster(path+file_name)
## DEM extents and dimensions
self.extent = temp_loaded["extent"]
self.x_min = temp_loaded["x_min"]
self.x_max = temp_loaded["x_max"]
self.y_min = temp_loaded["y_min"]
self.y_max = temp_loaded["y_max"]
self.resolution = temp_loaded["res"]
self.crs = temp_loaded["crs"]
## Redirecting no data values to a single one
ndt_ls_temp = temp_loaded["nodata"]
print("LOADING TOOK", clock.time() - timer)
timer = clock.time()
# Recasting No Data to -9999. LSDTopoTools cannot deal with multiple no data. Although it would be simple, it would add useless complexity. We are not software developers.
print("I am recasting your nodata values to -9999 (standard LSDTT)") if(self.verbose) else 0
temp_loaded["array"][np.isin(temp_loaded["array"],ndt_ls_temp)] = -9999 # Recasting
self.nodata = -9999 # Saving
# nrows and ncols are the raster indexes dimensions
self.nrows = temp_loaded["nrows"]
self.ncols = temp_loaded["ncols"]
# Removing the seas
if(remove_seas):
temp_loaded["array"][temp_loaded["array"]<sea_level] = -9999
# Other check
self.check_if_preprocessed = False
self.ready_for_flow_routines = False
self.check_catchment_defined = False
self.check_river_readiness = False
self.check_flow_routines = False
self.ksn_extracted = False
self.check_chi_gen = False
self.knickpoint_extracted = False
self.has_been_carved_by_me = False
self.has_been_filled_by_me = False
# Initialaising empty DF
self.df_ksn = None
self.df_knickpoint = None
print("PREPROC TOOK", clock.time() - timer)
timer = clock.time()
print("Alright, let me summon control upon the c++ code ...") if(self.verbose) else 0
self.cppdem = LSDDEM_cpp(self.nrows, self.ncols, self.x_min, self.y_min, self.resolution, self.nodata, temp_loaded["array"])
del temp_loaded # realising memory (I hope at least ahah)
print("Got it.") if(self.verbose) else 0
print("INGESTINGINTO CPP TOOK", clock.time() - timer)
timer = clock.time()
if(already_preprocessed):
print("WARNING: you are telling me that the raster is already preprocessed. You mean either you don't need flow routine (e.g., slope, curvature, roughness,... calculations) or you already made sure you got rid of your depressions.") if(self.verbose) else 0
self.cppdem.is_already_preprocessed()
self.ready_for_flow_routines = True
self.check_if_preprocessed = True
print("TELLINGCPP IT IS PP TOOK", clock.time() - timer)
timer = clock.time()
if not os.path.exists(path+self.prefix+"/"):
os.makedirs(path+self.prefix+"/")
self.save_dir = path+self.prefix+"/"
self.hdf_name = self.save_dir+self.prefix+"_dataset.hdf"
exists = os.path.isfile(self.save_dir+self.prefix+"_dataset.hdf")
if not exists:
df = pd.DataFrame({"created": [True]})
ut.save_to_database(self.hdf_name,"Init", df)
print("FINALISATION TOOK", clock.time() - timer)
timer = clock.time()
print("lsdtopytools is now ready to roll!") if(self.verbose) else 0
def PreProcessing(self, filling = True, carving = True, minimum_slope_for_filling = 0.0001):
"""
Description:
Any dem is noisy at a certain extent. To process flow routines, this function proposes algorithm to preprocess dem cells and ensure there is no vicious pit blocking fow path.
Filling is currently using Wang et al., 2006 and carving is using Lindsay et al., 2016.
Filling makes sure that a minimum slope is induxed to each cells, carving breaches the pit to let the flow go.
Arguments:
filling (bool): do you want to fill?
carving (bool): Wanna carve mate?
minimum_slope_for_filling (float): Minimum gradient to induce between each cells when filling your dem.
Return:
Nothing, calculate the PPRaster in the cpp object
Authors:
<NAME>
Date:
14/12/2018
"""
# Calling the cpp interface:
print("Carving: implementation of Lindsay (2016) DOI: 10.1002/hyp.10648") if(self.verbose and carving) else 0
print("Filling: implementation of Wang and Liu (2006): https://doi.org/10.1080/13658810500433453") if(self.verbose and filling) else 0
print("Processing...") if(self.verbose) else 0
self.cppdem.PreProcessing(carving, filling, minimum_slope_for_filling)
print("DEM ready for flow routines!") if(self.verbose) else 0
self.check_if_preprocessed = True
self.ready_for_flow_routines = True
self.has_been_filled_by_me = True if(filling) else False
self.has_been_carved_by_me = True if(carving) else False
def CommonFlowRoutines(self, discharge = False, ingest_precipitation_raster = None, precipitation_raster_multiplier = 1):
"""
Description:
Most of the algorithms need a common base of flow routines to be preprocessed: flow accumulation, drainage area, ... as well as
hidden LSDTT required to run analysis. This takes care of that for you.
Arguments:
ingest_precipitation_raster (str): path + full name of the precipitation raster to ingest. It HAS to be in the same coordinate system than the main raster
precipitation_raster_multiplier (flaot): multiplier. Useful to change units for example if your raster is in mm, *0,001 would put it in metres.
Returns:
Nothing but initiate a bunch of attributes in the c++ object
Authors:
<NAME>
Date:
14/12/2018
"""
if(self.check_if_preprocessed != True):
print("WARNING!! You did not preprocessed the dem, I am defaulting it. Read the doc about preprocessing for more information.")
self.PreProcessing()
self.check_flow_routines = True
print("Processing common flow routines...") if(self.verbose) else 0
self.cppdem.calculate_FlowInfo()
if(discharge):
print("DEBUGDEBUGDEBUGDEBUG")
print("EXPERIMENTAL WARNING: you are ingesting a precipitation raster, I will recalculate the drainage area into discharge. This will affect all the routines using DA obviously") if(self.verbose) else 0
temp_loaded = rl.load_raster(ingest_precipitation_raster)
temp_loaded["array"] = temp_loaded["array"] * precipitation_raster_multiplier
if(temp_loaded["nodata"][0] is None):
temp_loaded["nodata"] = list(temp_loaded["nodata"])
temp_loaded["nodata"][0] = -9999
self.cppdem.calculate_discharge_from_precipitation(temp_loaded["nrows"], temp_loaded["ncols"],temp_loaded["x_min"], temp_loaded["y_min"], temp_loaded["res"], temp_loaded["nodata"][0],temp_loaded["array"], True)
print("Done!") if(self.verbose) else 0
def ExtractRiverNetwork(self, method = "area_threshold", area_threshold_min = 1000, source_nodes = None):
"""
Description:
Extract river network from sources. Several methods are available to detect source locations that are then used to create the rivers.
Available agorithms:
minimum area threshold: If the exact location of the channel heads is not important or if you are working in relatively low precision raster (e.g.SRTM), this method really quickly initiate channels where a minimum of flow accumulation is reached.
TODO: wiener and DREICH
Arguments:
method (str): Name of the method to use. Can be area_threshold, wiener or dreicht
area_threshold_min (int): in case you chose the area_threshold method, it determines the minimum number of pixels for extracting
Returns:
Nothing, generates the channel network
Authors:
<NAME>
Date:
14/12/2018
"""
if(self.check_if_preprocessed != True):
print("WARNING!! You did not preprocessed the dem, I am defaulting it. Read the doc about preprocessing for more information.")
self.PreProcessing()
if(self.check_flow_routines != True):
print("WARNING!! You did not calculate the flow routines, let me do it first as this is required for extracting the river network.")
self.CommonFlowRoutines()
self.check_river_readiness = True
if(method=="area_threshold"):
self.cppdem.calculate_channel_heads("min_contributing_pixels", int(area_threshold_min))
elif(method == "source_nodes"):
# Ingest source nodes calculated from another method, e.g. using lsdtt
self.cppdem.ingest_channel_head(source_nodes)
else:
print("Not Implemented yet! sns.")
self.cppdem.calculate_juctionnetwork()
def DefineCatchment(self, method="min_area_pixels", test_edges = False, min_area = 1e6,max_area = 1e9, X_coords = [], Y_coords = [],
coord_search_radius_nodes = 30, coord_threshold_stream_order = 3,
coordinates_system = "UTM", UTM_zone = -1):
"""
Description:
Define the catchments of interest from conditions. DISCLAIMER: work in progress, can be buggy.
Safest option (if possible) is a list of lat-lon or XY.
Arguments:
method(str): name of the method to use. Can be:
"min_area_pixels": keep all watersheds made of min_area_pixels or more.
"from_XY": snap location to XY coordinates
TODO: the rest of the methods
test_edges (bool): Ignore uncomplete catchment
min_area_pixels (int): number of pixels to define catchments with the min_area_pixels method.
X_coord (list or numpy array): X coordinates for method "from_XY"
Y_coord (list or numpy array): Y coordinates for method "from_XY"
coord_search_radius_nodes (int): radius of search around the given coordinate;
coord_threshold_stream_order (int): minimum or maximum stream order (e.g., do you want to snap to large or small river. or even moderate, let's go crazy.)
Returns:
Nothing but defines a set of catchment required for some analysis (e.g., movern, knickpoints or ksn)
Authors:
<NAME>
Date:
14/12/2018
"""
# I am first proceeding to routines check: Have you processed everything required?
output = {}
if(self.check_if_preprocessed != True):
print("WARNING!! You did not preprocessed the dem, I am defaulting it. Read the doc about preprocessing for more information.")
self.PreProcessing()
if(self.check_flow_routines != True):
print("WARNING!! You did not calculate the flow routines, let me do it first as this is required for extracting the river network.")
self.CommonFlowRoutines()
if(self.check_river_readiness != True):
print("WARNING!! You did not processed the river network, let me do it first as this is required for defining the catchment.")
self.ExtractRiverNetwork()
self.check_catchment_defined = True
if(method == "min_area"):
output = self.cppdem.calculate_outlets_locations_from_minimum_size(float(min_area), test_edges, False)
elif(method == "from_XY_old"):
X_coords = np.array(X_coords)
Y_coords = np.array(Y_coords)
if(coordinates_system == "WGS84"):
# Then convert
X_coords, Y_coords, zone_number = gcv.from_latlon(X_coords, Y_coords, force_zone_number= UTM_zone)
self.cppdem.calculate_outlets_locations_from_xy(np.array(X_coords), np.array(Y_coords), coord_search_radius_nodes, coord_threshold_stream_order, test_edges, False)
output["X"] = X_coords;
output["Y"] = Y_coords;
elif(method == "from_XY"):
X_coords = np.array(X_coords)
Y_coords = np.array(Y_coords)
if(coordinates_system == "WGS84"):
# Then convert
X_coords, Y_coords, zone_number = gcv.from_latlon(X_coords, Y_coords, force_zone_number= UTM_zone)
self.cppdem.calculate_outlets_locations_from_xy_v2(np.array(X_coords), np.array(Y_coords), coord_search_radius_nodes,test_edges)
output["X"] = X_coords;
output["Y"] = Y_coords;
elif(method == "force_all"):
self.cppdem.force_all_outlets(test_edges)
elif(method == "main_basin"):
output = self.cppdem.calculate_outlet_location_of_main_basin(test_edges)
elif(method == "from_range"):
output = self.cppdem.calculate_outlets_locations_from_range_of_DA(min_area,max_area,test_edges);
else:
print("Not done yet, work in progress on that")
quit()
return self.cppdem.get_baselevels_XY();
def GenerateChi(self, theta=0.45,A_0 = 1):
"""
Generates Chi coordinates. This is needed for quite a lot of routines, and effectively also calculates quite a lot of element in the river network (node ordering and other similar thing).
Chi coordinates is details in Perron and Royden (2013) -> DOI: 10.1002/esp.3302
It needs preprocessing, flow routines, Extraction of river network and the definition of catchment of interest.
Arguments:
theta(float): concavity of profiles (m/n in Stream power like laws, theta in Flint's law). This is a really important parameter to constrain!! -> Mudd et al., 2018
A_0(float): Reference concavity area. It is important to set it to 1 if you want to get ksn from chi coordinate (see. Mudd et al., 2014 for details) -> 10.1002/2013JF002981
returns:
Nothing, but calculates chi ccoordinate.
Authors:
B.G
Date:
12/2018
"""
# pre-checkings
if(self.check_if_preprocessed != True):
print("WARNING!! You did not preprocessed the dem, I am defaulting it. Read the doc about preprocessing for more information.")
self.PreProcessing()
if(self.check_flow_routines != True):
print("WARNING!! You did not calculate the flow routines, let me do it first as this is required generating chi.")
self.CommonFlowRoutines()
if(self.check_river_readiness != True):
print("WARNING!! You did not processed the river network, let me do it first as this is required for generating chi.")
self.ExtractRiverNetwork()
if(self.check_catchment_defined != True):
print("WARNING!! You did not defined any catchment, let me do it first as this is required for generating chi.")
self.DefineCatchment()
self.check_chi_gen = True
self.m_over_n = theta # Idk but we might need that at some point eventually
self.cppdem.generate_chi(theta, A_0)
# Trying to get the chi coordinate out of that
D1 = self.cppdem.get_int_ksn_data()
D2 = self.cppdem.get_float_ksn_data()
Dict_of_ksn = {}; Dict_of_ksn.update(D1); Dict_of_ksn.update(D2); del D1; del D2;
self.df_base_river = pd.DataFrame( { "basin_key": Dict_of_ksn["basin_key"], "col": Dict_of_ksn["col"], "nodeID": Dict_of_ksn["nodeID"],
"row": Dict_of_ksn["row"], "source_key": Dict_of_ksn["source_key"], "chi": Dict_of_ksn["chi"], "drainage_area": Dict_of_ksn["drainage_area"],
"elevation": Dict_of_ksn["elevation"], "flow_distance": Dict_of_ksn["flow_distance"], "x": Dict_of_ksn["x"], "y": Dict_of_ksn["y"] } )
def ksn_MuddEtAl2014(self, target_nodes=70, n_iterations=60, skip=1, minimum_segment_length=10, sigma=2, nthreads = 1, reload_if_same = False):
"""
Calculates ksn with Mudd et al., 2014 algorithm. Design to calculate ksn with a strong statistical method. More robust than SA plots or basic chi-z linear regression
Arguments:
target_nodes(int): Full details in paper, basically low values creates shorter segments (fine grain calculation) and higher values larger segments (large-scale trends). Values higher than 100 will be extremely slow down the algorithm.
n_iterations(int): Full details in paper, the algorithm sample random node combinations and this parameters controls the number of iterations to get that. More iterations = less noise sensitive.
skip(int): Full details in paper, recommended between 1-4. Higher values sample less adjacent nodes.
minimum_segment_length(int): Do not change (debugging purposes, I shall remove that actually).
sigma(int): Full details in paper, recommended not to change.
nthreads (int): Experimental, leave to 1. Not ready for multithreading, will segfault.
reload_if_same (bool): Experimental, the logic will probably change, but it uses pandas and pytables habilities to very efficiently save and reload what has already been calculated. It checks if is safe to reload or not
Retuns:
Returns a string stating "generated" if recalculated or "reloaded" if data has simply been reloaded
"""
if(self.check_if_preprocessed != True):
print("WARNING!! You did not preprocessed the dem, I am defaulting it. Read the doc about preprocessing for more information.")
self.PreProcessing()
if(self.check_flow_routines != True):
print("WARNING!! You did not calculate the flow routines, let me do it first as this is required generating chi.")
self.CommonFlowRoutines()
if(self.check_river_readiness != True):
print("WARNING!! You did not processed the river network, let me do it first as this is required for generating chi.")
self.ExtractRiverNetwork()
if(self.check_catchment_defined != True):
print("WARNING!! You did not defined any catchment, let me do it first as this is required for generating chi.")
self.DefineCatchment()
if(self.check_chi_gen != True):
print("WARNING!! You did not generated chi, I am defaulting it AND THAT'S BAD! I AM ASSUMING A DEFAUT CONCAVITY TO 0.45 WHICH IS NOT NECESSARILY THE CASE!!!.")
self.GenerateChi()
# First checking if you just want to reload
if(reload_if_same):
print("I am just checking if I can reload data safely")
test_dict = {"target_nodes":target_nodes, "n_iterations":n_iterations, "skip":skip, "minimum_segment_length":minimum_segment_length, "sigma":sigma}
tdf, met = ut.load_from_database(self.hdf_name,"ksn_MuddEtAl2014")
is_same = True
for key,val in test_dict.items():
if(val != met[key]):
is_same = False
if(is_same):
self.df_ksn = tdf
print("Successfuly reloaded the file")
self.ksn_extracted = True
## This return statment end the function and will not recalculate ksn
return "reloaded"
else:
print("Failed to safely reload, one of your paramter has changed from the calculation.")
self.ksn_extracted = True
if(nthreads>1):
print("WARNING: Experimental multithreading on ksn extraction. Can (i) crash (works 99/100 of the time on Linux and 50/100 of the time on windows, because windows hates developers, seriously) or (ii) produce weird source/basin numbering (e.g. not always constant).")
self.cppdem.generate_ksn(target_nodes, n_iterations, skip, minimum_segment_length, sigma, nthreads)
D1 = self.cppdem.get_int_ksn_data()
D2 = self.cppdem.get_float_ksn_data()
Dict_of_ksn = {}; Dict_of_ksn.update(D1); Dict_of_ksn.update(D2); del D1; del D2;
self.df_ksn = | pd.DataFrame(Dict_of_ksn) | pandas.DataFrame |
# coding=utf-8
# Author: <NAME>
# Date: Sept 11, 2019
#
# Description: Indexes meta-genes to select core meiotic genes.
# Pipeline: Only mammal (HS & MM) conserved genes that Up/Down Regulated.
#
#
import math
import numpy as np
import pandas as pd
| pd.set_option('display.max_rows', 100) | pandas.set_option |
import matplotlib.pyplot as plt
from matplotlib.gridspec import GridSpec
import numpy as np
import pandas as pd
from adjustText import adjust_text
from pylab import cm
from matplotlib import colors
def PCA_var_explained_plots(adata):
n_rows = 1
n_cols = 2
fig = plt.figure(figsize=(n_cols*4.5, n_rows*3))
# variance explained
ax1 = fig.add_subplot(n_rows, n_cols, 1)
x1 = range(len(adata.uns['pca']['variance_ratio']))
y1 = adata.uns['pca']['variance_ratio']
ax1.scatter(x1, y1, s=3)
ax1.set_xlabel('PC'); ax1.set_ylabel('Fraction of variance explained')
ax1.set_title('Fraction of variance explained per PC')
# cum variance explainend
ax2 = fig.add_subplot(n_rows, n_cols, 2)
cml_var_explained = np.cumsum(adata.uns['pca']['variance_ratio'])
x2 = range(len(adata.uns['pca']['variance_ratio']))
y2 = cml_var_explained
ax2.scatter(x2, y2, s=4)
ax2.set_xlabel('PC')
ax2.set_ylabel('Cumulative fraction of variance explained')
ax2.set_title('Cumulative fraction of variance explained by PCs')
plt.tight_layout()
plt.show()
def assign_to_red_or_black_group(x, y, x_cutoff, y_cutoff):
"""xcoord is coefficient (MAST already took log2). ycoord is -log10(pval). label is gene name."""
if abs(x) > x_cutoff and y > y_cutoff:
color = "red"
# x coordinate (coef) is set to 0 if one of the two groups has zero counts (in that case,
# a fold change cannot be calculated). We'll color these points with 'salmon' (similar to red)
elif abs(x) == 0 and y > y_cutoff:
color = "salmon"
else:
color = "black"
return color
def plot_volcano_plot(
dea_results,
x_cutoff,
y_cutoff,
title,
use_zscores=False,
plot_labels=True,
min_red_dots=None,
figsize=(15, 7.5),
show_plot=False,
):
"""makes volcano plot. title is title of plot. path is path to MAST output csv. cutoffs will determine
which dots will be colored red. plot_labels can be set to False if no labels are wanted, otherwise all
red dots will be labeled with their gene name. If min_red_dots is set to a number, the x_cutoff will be
decreased (with factor .9 every time) until at least min_red_dots are red. figsize is a tuple of size 2,
and determines size of the figure. Returns the figure."""
coefs = dea_results.loc[:, "coef"].copy()
xcoords = coefs.fillna(0)
if use_zscores:
pvals = dea_results.loc[:, "coef_Z"]
ycoords = pvals
else:
pvals = dea_results.loc[:, "pval_adj"].copy()
# NOTE: SETTING PVALS TAHT ARE 0 (DUE TO ROUNDING) TO MINIMUM NON ZERO VALUE HERE
pvals[pvals == 0] = np.min(pvals[pvals != 0]) # np.nextafter(0, 1)
ycoords = -np.log10(pvals)
gene_names = dea_results.index.tolist()
colors = [
assign_to_red_or_black_group(x, y, x_cutoff, y_cutoff)
for x, y in zip(xcoords, ycoords)
]
# if min_red_dots is set (i.e. not None), check if enough points are labeled red. If not, adjust x cutoff:
if min_red_dots != None:
n_red_points = sum([x == "red" for x in colors])
while n_red_points < min_red_dots:
x_cutoff = 0.9 * x_cutoff # make x cutoff less stringent
# reevaluate color of points using new cutoff:
colors = [
assign_to_red_or_black_group(x, y, x_cutoff, y_cutoff)
for x, y in zip(xcoords, ycoords)
]
n_red_points = sum([x == "red" for x in colors])
# extract coordinates separately for red and black
black_coords = [
(x, y) for x, y, color in zip(xcoords, ycoords, colors) if color == "black"
]
red_coords = [
(x, y) for x, y, color in zip(xcoords, ycoords, colors) if color == "red"
]
salmon_coords = [
(x, y) for x, y, color in zip(xcoords, ycoords, colors) if color == "salmon"
]
fig, ax = plt.subplots(figsize=figsize)
plt.plot(
[x for x, y in black_coords],
[y for x, y in black_coords],
marker=".",
linestyle="",
color="royalblue",
)
plt.plot(
[x for x, y in salmon_coords],
[y for x, y in salmon_coords],
marker=".",
linestyle="",
color="salmon",
)
plt.plot(
[x for x, y in red_coords],
[y for x, y in red_coords],
marker=".",
linestyle="",
color="red",
)
if plot_labels == True:
ten_lowest_salmon_pvals_gene_names = [
gene_name
for _, gene_name, color in sorted(zip(pvals, gene_names, colors))
if color == "salmon"
][:10]
# label if color is set to red, or if color is set to salmon and the salmon color is one of the ten salmon genes with lowest pval
labels = [
plt.text(x, y, label, ha="center", va="center")
for x, y, color, label in zip(xcoords, ycoords, colors, gene_names)
if (
color in ["red"]
or (color == "salmon" and label in ten_lowest_salmon_pvals_gene_names)
)
]
adjust_text(labels)
plt.xlabel(
"coef (=log(fold chagne))",
fontsize=13,
)
if use_zscores:
plt.ylabel("Z-score based on stdev")
else:
plt.ylabel("-log10 adjusted p-value", fontsize=14)
plt.title(
title
+ " (n genes: "
+ str(len(gene_names))
+ ") \n x-cutoff="
+ str(round(x_cutoff, 2))
+ ", y-cutoff="
+ str(round(y_cutoff, 2)),
fontsize=16,
)
if show_plot == False:
plt.close()
return fig
def plot_bar_chart(
adata,
x_var,
y_var,
x_names=None,
y_names=None,
y_min=0,
return_fig=False,
cmap="tab20",
):
"""plots stacked bar chart.
Arguments
adata - anndata object
x_var - name of obs variable to use for x-axis
y_var - name of obs variable to use for y-axis
x_names - names of x groups to include, exclude all other groups
y_names - names of y groups to include, exclude all other groups
y_min - minimum percentage of group to be labeled in plots. If
percentage of a y_group is lower than this minimum in all
x_groups, then the y_group will be pooled under "other".
return_fig - (Boolean) whether to return matplotlib figure
cmap - name of matplotlib colormap
Returns:
matplotlib figure of barchart if return_fig is True. Otherwise nothing.
"""
bar_chart_df_abs = adata.obs.groupby([x_var, y_var]).agg(
{x_var: "count"}
) # calculate count of each y_var for each x_var
bar_chart_df = (
bar_chart_df_abs.groupby(level=0)
.apply(lambda x: x / float(x.sum()) * 100)
.unstack()
) # convert to percentages
# clean up columns/index
bar_chart_df.columns = bar_chart_df.columns.droplevel(0)
bar_chart_df.index.name = None
bar_chart_df.columns.name = None
# if y_min > 0, re-map y categories:
if y_min > 0:
# check which y variables never have a fraction above y_min
y_var_to_remove = (bar_chart_df >= y_min).sum(axis=0) == 0
y_var_remapping = dict()
for y_name, to_remove in zip(y_var_to_remove.index, y_var_to_remove.values):
if to_remove:
y_var_remapping[y_name] = "other"
else:
y_var_remapping[y_name] = y_name
adata.obs["y_temp"] = adata.obs[y_var].map(y_var_remapping)
# recalculate bar_chart_df, now using re-mapped y_var
bar_chart_df_abs = adata.obs.groupby([x_var, "y_temp"]).agg(
{x_var: "count"}
) # calculate count of each y_var for each x_var
bar_chart_df = (
bar_chart_df_abs.groupby(level=0)
.apply(lambda x: x / float(x.sum()) * 100)
.unstack()
) # convert to percentages
# clean up columns/index
bar_chart_df.columns = bar_chart_df.columns.droplevel(0)
bar_chart_df.index.name = None
bar_chart_df.columns.name = None
# prepare x and y variables for bar chart:
if x_names is None:
x_names = bar_chart_df.index
else:
if not set(x_names).issubset(adata.obs[x_var]):
raise ValueError("x_names should be a subset of adata.obs[x_var]!")
if y_names is None:
y_names = bar_chart_df.columns
else:
if not set(y_names).issubset(adata.obs[y_var]):
raise ValueError(
"y_names should be a subset of adata.obs[y_var]! (Note that this can be affected by your y_min setting.)"
)
# subset bar_chart_df based on x and y names:
bar_chart_df = bar_chart_df.loc[x_names, y_names]
x_len = len(x_names)
y_names = bar_chart_df.columns
y_len = len(y_names)
# setup colors
colormap = cm.get_cmap(cmap)
cols = [colors.rgb2hex(colormap(i)) for i in range(colormap.N)]
# set bar width
barWidth = 0.85
# plot figure
fig = plt.figure(figsize=(12, 3))
axs = []
# plot the bottom bars of the stacked bar chart
axs.append(
plt.bar(
range(len(x_names)),
bar_chart_df.loc[:, y_names[0]],
color=cols[0],
# edgecolor="white",
width=barWidth,
label=y_names[0],
)
)
# store the bars as bars_added, to know where next stack of bars should start
# in y-axis
bars_added = [bar_chart_df.loc[:, y_names[0]]]
# now loop through the remainder of the y categories and plot
for i, y in enumerate(y_names[1:]):
axs.append(
plt.bar(
x=range(len(x_names)), # numbers of bars [1, ..., n_bars]
height=bar_chart_df.loc[:, y], # height of current stack
bottom=[
sum(idx_list) for idx_list in zip(*bars_added)
], # where to start current stack
color=cols[i + 1],
# edgecolor="white",
width=barWidth,
label=y,
)
)
# append plottend bars to bars_added variable
bars_added.append(bar_chart_df.loc[:, y])
# Custom x axis
plt.xticks(range(len(x_names)), x_names, rotation=90)
plt.xlabel(x_var)
# Add a legend
plt.legend(
axs[::-1],
[ax.get_label() for ax in axs][::-1],
loc="upper left",
bbox_to_anchor=(1, 1),
ncol=1,
)
# add y label:
plt.ylabel("percentage of cells")
# add title:
plt.title(f"{y_var} fractions per {x_var} group")
# Show graphic:
plt.show()
# return figure:
if return_fig:
return fig
def plot_dataset_statistics(
adata, return_fig=False, show=True, fontsize=10, figwidthscale=3, figheightscale=4
):
data_by_subject = adata.obs.groupby("subject_ID").agg(
{
"study": "first",
}
)
data_by_sample = adata.obs.groupby("sample").agg({"study": "first"})
n_figures = 3
n_cols = 3
n_rows = int(np.ceil(n_figures / n_cols))
fig = plt.figure(figsize=(figwidthscale * n_cols, figheightscale * n_rows))
fig_count = 0
# FIGURE
fig_count += 1
ax = fig.add_subplot(n_rows, n_cols, fig_count)
dataset_subj_freqs = data_by_subject.study.value_counts()
datasets_ordered = dataset_subj_freqs.index
ax.bar(dataset_subj_freqs.index, dataset_subj_freqs.values)
ax.set_title("subjects per study", fontsize=fontsize)
ax.set_ylabel("n subjects", fontsize=fontsize)
ax.tick_params(axis="x", rotation=90, labelsize=fontsize)
ax.tick_params(axis="y", labelsize=fontsize)
ax.grid(False)
# FIGURE
fig_count += 1
ax = fig.add_subplot(n_rows, n_cols, fig_count)
dataset_sample_freqs = data_by_sample.study.value_counts()
ax.bar(datasets_ordered, dataset_sample_freqs[datasets_ordered].values)
ax.set_title("samples per study", fontsize=fontsize)
ax.set_ylabel("n samples", fontsize=fontsize)
ax.tick_params(axis="x", rotation=90, labelsize=fontsize)
ax.tick_params(axis="y", labelsize=fontsize)
ax.grid(False)
# FIGURE
fig_count += 1
ax = fig.add_subplot(n_rows, n_cols, fig_count)
dataset_cell_freqs = adata.obs.study.value_counts()
ax.bar(datasets_ordered, dataset_cell_freqs[datasets_ordered].values)
ax.set_title("cells per study", fontsize=fontsize)
ax.set_ylabel("n cells", fontsize=fontsize)
ax.tick_params(axis="x", rotation=90, labelsize=fontsize)
ax.tick_params(axis="y", labelsize=fontsize)
ax.grid(False)
plt.tight_layout()
plt.grid(False)
if show:
plt.show()
plt.close()
if return_fig:
return fig
def plot_subject_statistics(
adata,
return_fig=False,
show=True,
fontsize=12,
figheight=5,
figwidth=5,
barwidth=0.10,
):
data_by_subject = adata.obs.groupby("subject_ID").agg(
{
"age": "first",
"BMI": "first",
"ethnicity": "first",
"sex": "first",
"smoking_status": "first",
}
)
fig = plt.figure(
figsize=(figwidth, figheight),
constrained_layout=True,
)
gs = GridSpec(12, 12, figure=fig)
fig_count = 0
# FIGURE 1 AGE
fig_count += 1
ax = fig.add_subplot(gs[:6, :6])
bins = np.arange(0, max(adata.obs.age), 5)
tick_idc = np.arange(0, len(bins), 4)
perc_annotated = int(
np.round(
100 - (data_by_subject.age.isnull().sum() / data_by_subject.shape[0] * 100),
0,
)
)
ax.hist(data_by_subject.age, bins=bins, rwidth=0.9)
print(f"age: {perc_annotated}% annotated")
ax.set_xlabel("age", fontsize=fontsize)
ax.set_xticks(bins[tick_idc])
ax.tick_params(labelsize=fontsize, bottom=True, left=True)
ax.set_ylabel("n subjects", fontsize=fontsize)
ax.grid(False)
ax.spines["top"].set_visible(False)
ax.spines["right"].set_visible(False)
# FIGURE 2 BMI
fig_count += 1
ax = fig.add_subplot(gs[:6, -6:])
BMIs = data_by_subject.BMI.copy()
perc_annotated = int(round(100 - (BMIs.isna().sum() / len(BMIs) * 100)))
BMIs = BMIs[~BMIs.isna()]
bins = np.arange(np.floor(BMIs.min() / 2) * 2, BMIs.max(), 2)
tick_idc = np.arange(0, len(bins), 3)
ax.hist(data_by_subject.BMI, bins=bins, rwidth=0.9)
print(f"BMI: {perc_annotated}% annotated")
ax.set_xlabel("BMI", fontsize=fontsize)
ax.set_ylabel("n subjects", fontsize=fontsize)
ax.set_xticks(bins[tick_idc])
ax.spines["top"].set_visible(False)
ax.spines["right"].set_visible(False)
ax.tick_params(labelsize=fontsize, bottom=True, left=True)
ax.grid(False)
# FIGURE 3 SEX
fig_count += 1
ax = fig.add_subplot(gs[-6:, :3])
x_man = np.sum(data_by_subject.sex == "male")
x_woman = np.sum(data_by_subject.sex == "female")
perc_annotated = int(
np.round(
100
- sum([s == "nan" or pd.isnull(s) for s in data_by_subject.sex])
/ data_by_subject.shape[1]
* 100,
0,
)
)
ax.bar(
x=[0.25, 0.75],
tick_label=["male", "female"],
height=[x_man, x_woman],
width=barwidth * 5 / 3,
)
ax.set_xlim(left=0, right=1)
print(f"sex: {perc_annotated}% annotated)")
ax.tick_params("x", rotation=90, labelsize=fontsize, bottom=True, left=True)
ax.tick_params("y", labelsize=fontsize, bottom=True, left=True)
ax.spines["top"].set_visible(False)
ax.spines["right"].set_visible(False)
ax.set_ylabel("n subjects", fontsize=fontsize)
ax.set_xlabel("sex", fontsize=fontsize)
ax.grid(False)
# FIGURE 4 ETHNICITY
fig_count += 1
ax = fig.add_subplot(gs[-6:, 3:-4])
ethns = data_by_subject.ethnicity.copy()
perc_annotated = int(
np.round(
100 - sum([e == "nan" or pd.isnull(e) for e in ethns]) / len(ethns) * 100, 0
)
)
ethns = ethns[ethns != "nan"]
ethn_freqs = ethns.value_counts()
n_bars = len(ethn_freqs)
ax.bar(
x=np.linspace(0 + 0.75 / n_bars, 1 - 0.75 / n_bars, n_bars),
tick_label=ethn_freqs.index,
height=ethn_freqs.values,
width=barwidth,
)
ax.set_xlim(left=0, right=1)
print(f"ethnicity {perc_annotated}% annotated")
# ax.set_xlabel("ethnicity")
ax.set_ylabel("n subjects", fontsize=fontsize)
ax.set_xlabel("ethnicity", fontsize=fontsize)
ax.tick_params("x", rotation=90, labelsize=fontsize, bottom=True)
ax.spines["top"].set_visible(False)
ax.spines["right"].set_visible(False)
ax.tick_params("y", labelsize=fontsize, left=True)
ax.grid(False)
# FIGURE SMOKING STATUS
fig_count += 1
ax = fig.add_subplot(gs[-6:, -4:])
smoks = data_by_subject["smoking_status"].copy()
perc_annotated = int(
np.round(
100 - sum([s == "nan" or | pd.isnull(s) | pandas.isnull |
"""
Name : c9_44_equal_weighted_vs_value_weighted.py
Book : Python for Finance (2nd ed.)
Publisher: Packt Publishing Ltd.
Author : <NAME>
Date : 6/6/2017
email : <EMAIL>
<EMAIL>
"""
import pandas as pd
import scipy as sp
x=pd.read_pickle("c:/temp/yanMonthly.pkl")
def ret_f(ticker):
a=x[x.index==ticker]
p=sp.array(a['VALUE'])
ddate=a['DATE'][1:]
ret=p[1:]/p[:-1]-1
out1= | pd.DataFrame(p[1:],index=ddate) | pandas.DataFrame |
from __future__ import print_function
import collections
import os
import re
import sys
import numpy as np
import pandas as pd
from sklearn.preprocessing import Imputer
from sklearn.preprocessing import StandardScaler, MinMaxScaler, MaxAbsScaler
file_path = os.path.dirname(os.path.realpath(__file__))
lib_path = os.path.abspath(os.path.join(file_path, '..', 'utils'))
sys.path.append(lib_path)
from data_utils import get_file
global_cache = {}
SEED = 2017
P1B3_URL = 'http://ftp.mcs.anl.gov/pub/candle/public/benchmarks/P1B3/'
def impute_and_scale(df, scaling='std'):
"""Impute missing values with mean and scale data included in pandas dataframe.
Parameters
----------
df : pandas dataframe
dataframe to impute and scale
scaling : 'maxabs' [-1,1], 'minmax' [0,1], 'std', or None, optional (default 'std')
type of scaling to apply
"""
df = df.dropna(axis=1, how='all')
imputer = Imputer(strategy='mean', axis=0)
mat = imputer.fit_transform(df)
if scaling is None or scaling.lower() == 'none':
return pd.DataFrame(mat, columns=df.columns)
if scaling == 'maxabs':
scaler = MaxAbsScaler()
elif scaling == 'minmax':
scaler = MinMaxScaler()
else:
scaler = StandardScaler()
mat = scaler.fit_transform(mat)
df = pd.DataFrame(mat, columns=df.columns)
return df
def describe_response_data(df, cells=['all'], drugs=['A'], doses=[-5, -4]):
if 'all' in cells or cells == 'all':
cells = all_cells()
if 'all' in drugs or drugs == 'all':
drugs = all_drugs()
elif len(drugs) == 1 and re.match("^[ABC]$", drugs[0].upper()):
drugs = drugs_in_set('Jason:' + drugs[0].upper())
print('cells:', cells)
print('drugs:', drugs)
lconc = -4
for cell in cells:
d = df[(df['CELLNAME'] == cell) & (df['LOG_CONCENTRATION'] == lconc)]
print(cell)
print(d.describe())
break
def load_dose_response(min_logconc=-4., max_logconc=-4., subsample=None, fraction=False):
"""Load cell line response to different drug compounds, sub-select response for a specific
drug log concentration range and return a pandas dataframe.
Parameters
----------
min_logconc : -3, -4, -5, -6, -7, optional (default -4)
min log concentration of drug to return cell line growth
max_logconc : -3, -4, -5, -6, -7, optional (default -4)
max log concentration of drug to return cell line growth
subsample: None, 'naive_balancing' (default None)
subsampling strategy to use to balance the data based on growth
fraction: bool (default False)
divide growth percentage by 100
"""
path = get_file(P1B3_URL + 'NCI60_dose_response_with_missing_z5_avg.csv')
df = global_cache.get(path)
if df is None:
df = pd.read_csv(path, sep=',', engine='c',
na_values=['na', '-', ''],
dtype={'NSC':object, 'CELLNAME':str, 'LOG_CONCENTRATION':np.float32, 'GROWTH':np.float32})
global_cache[path] = df
df = df[(df['LOG_CONCENTRATION'] >= min_logconc) & (df['LOG_CONCENTRATION'] <= max_logconc)]
df = df[['NSC', 'CELLNAME', 'GROWTH', 'LOG_CONCENTRATION']]
if subsample and subsample == 'naive_balancing':
df1 = df[df['GROWTH'] <= 0]
df2 = df[(df['GROWTH'] > 0) & (df['GROWTH'] < 50)].sample(frac=0.7, random_state=SEED)
df3 = df[(df['GROWTH'] >= 50) & (df['GROWTH'] <= 100)].sample(frac=0.18, random_state=SEED)
df4 = df[df['GROWTH'] > 100].sample(frac=0.01, random_state=SEED)
df = pd.concat([df1, df2, df3, df4])
if fraction:
df['GROWTH'] /= 100
df = df.set_index(['NSC'])
return df
def load_drug_descriptors(ncols=None, scaling='std', add_prefix=True):
"""Load drug descriptor data, sub-select columns of drugs descriptors
randomly if specificed, impute and scale the selected data, and return a
pandas dataframe.
Parameters
----------
ncols : int or None
number of columns (drugs descriptors) to randomly subselect (default None : use all data)
scaling : 'maxabs' [-1,1], 'minmax' [0,1], 'std', or None, optional (default 'std')
type of scaling to apply
add_prefix: True or False
add feature namespace prefix
"""
path = get_file(P1B3_URL + 'descriptors.2D-NSC.5dose.filtered.txt')
df = global_cache.get(path)
if df is None:
df = pd.read_csv(path, sep='\t', engine='c',
na_values=['na','-',''],
dtype=np.float32)
global_cache[path] = df
df1 = pd.DataFrame(df.loc[:,'NAME'].astype(int).astype(str))
df1.rename(columns={'NAME': 'NSC'}, inplace=True)
df2 = df.drop('NAME', 1)
if add_prefix:
df2 = df2.add_prefix('dragon7.')
total = df2.shape[1]
if ncols and ncols < total:
usecols = np.random.choice(total, size=ncols, replace=False)
df2 = df2.iloc[:,usecols]
df2 = impute_and_scale(df2, scaling)
df2 = df2.astype(np.float32)
df_dg = pd.concat([df1, df2], axis=1)
return df_dg
def load_cell_expression_u133p2(ncols=None, scaling='std', add_prefix=True):
"""Load U133_Plus2 cell line expression data prepared by Judith,
sub-select columns of gene expression randomly if specificed,
scale the selected data and return a pandas dataframe.
Parameters
----------
ncols : int or None
number of columns (gene expression) to randomly subselect (default None : use all data)
scaling : 'maxabs' [-1,1], 'minmax' [0,1], 'std', or None, optional (default 'std')
type of scaling to apply
add_prefix: True or False
add feature namespace prefix
"""
path = get_file('http://bioseed.mcs.anl.gov/~fangfang/p1h/GSE32474_U133Plus2_GCRMA_gene_median.txt')
df = global_cache.get(path)
if df is None:
df = pd.read_csv(path, sep='\t', engine='c')
global_cache[path] = df
df1 = df['CELLNAME']
df2 = df.drop('CELLNAME', 1)
if add_prefix:
df2 = df2.add_prefix('expr.')
total = df.shape[1]
if ncols and ncols < total:
usecols = np.random.choice(total, size=ncols, replace=False)
df2 = df2.iloc[:, usecols]
df2 = impute_and_scale(df2, scaling)
df2 = df2.astype(np.float32)
df = pd.concat([df1, df2], axis=1)
return df
def load_cell_expression_5platform(ncols=None, scaling='std', add_prefix=True):
"""Load 5-platform averaged cell line expression data, sub-select
columns of gene expression randomly if specificed, scale the
selected data and return a pandas dataframe.
Parameters
----------
ncols : int or None
number of columns (gene expression) to randomly subselect (default None : use all data)
scaling : 'maxabs' [-1,1], 'minmax' [0,1], 'std', or None, optional (default 'std')
type of scaling to apply
add_prefix: True or False
add feature namespace prefix
"""
path = get_file(P1B3_URL + 'RNA_5_Platform_Gene_Transcript_Averaged_intensities.transposed.txt')
df = global_cache.get(path)
if df is None:
df = pd.read_csv(path, sep='\t', engine='c',
na_values=['na','-',''])
global_cache[path] = df
df1 = df['CellLine']
df1 = df1.map(lambda x: x.replace('.', ':'))
df1.name = 'CELLNAME'
df2 = df.drop('CellLine', 1)
if add_prefix:
df2 = df2.add_prefix('expr_5p.')
total = df2.shape[1]
if ncols and ncols < total:
usecols = np.random.choice(total, size=ncols, replace=False)
df2 = df2.iloc[:, usecols]
df2 = impute_and_scale(df2, scaling)
df2 = df2.astype(np.float32)
df = pd.concat([df1, df2], axis=1)
return df
def load_cell_mirna(ncols=None, scaling='std', add_prefix=True):
"""Load cell line microRNA data, sub-select columns randomly if
specificed, scale the selected data and return a pandas
dataframe.
Parameters
----------
ncols : int or None
number of columns to randomly subselect (default None : use all data)
scaling : 'maxabs' [-1,1], 'minmax' [0,1], 'std', or None, optional (default 'std')
type of scaling to apply
add_prefix: True or False
add feature namespace prefix
"""
path = get_file(P1B3_URL + 'RNA__microRNA_OSU_V3_chip_log2.transposed.txt')
df = global_cache.get(path)
if df is None:
df = pd.read_csv(path, sep='\t', engine='c',
na_values=['na','-',''])
global_cache[path] = df
df1 = df['CellLine']
df1 = df1.map(lambda x: x.replace('.', ':'))
df1.name = 'CELLNAME'
df2 = df.drop('CellLine', 1)
if add_prefix:
df2 = df2.add_prefix('mRNA.')
total = df2.shape[1]
if ncols and ncols < total:
usecols = np.random.choice(total, size=ncols, replace=False)
df2 = df2.iloc[:, usecols]
df2 = impute_and_scale(df2, scaling)
df2 = df2.astype(np.float32)
df = pd.concat([df1, df2], axis=1)
return df
def load_cell_proteome(ncols=None, scaling='std', add_prefix=True):
"""Load cell line microRNA data, sub-select columns randomly if
specificed, scale the selected data and return a pandas
dataframe.
Parameters
----------
ncols : int or None
number of columns to randomly subselect (default None : use all data)
scaling : 'maxabs' [-1,1], 'minmax' [0,1], 'std', or None, optional (default 'std')
type of scaling to apply
add_prefix: True or False
add feature namespace prefix
"""
path1 = get_file(P1B3_URL + 'nci60_proteome_log2.transposed.tsv')
path2 = get_file(P1B3_URL + 'nci60_kinome_log2.transposed.tsv')
df = global_cache.get(path1)
if df is None:
df = pd.read_csv(path1, sep='\t', engine='c')
global_cache[path1] = df
df_k = global_cache.get(path2)
if df_k is None:
df_k = pd.read_csv(path2, sep='\t', engine='c')
global_cache[path2] = df_k
df = df.set_index('CellLine')
df_k = df_k.set_index('CellLine')
if add_prefix:
df = df.add_prefix('prot.')
df_k = df_k.add_prefix('kino.')
else:
df_k = df_k.add_suffix('.K')
df = df.merge(df_k, left_index=True, right_index=True)
index = df.index.map(lambda x: x.replace('.', ':'))
total = df.shape[1]
if ncols and ncols < total:
usecols = np.random.choice(total, size=ncols, replace=False)
df = df.iloc[:, usecols]
df = impute_and_scale(df, scaling)
df = df.astype(np.float32)
df.index = index
df.index.names = ['CELLNAME']
df = df.reset_index()
return df
def load_drug_autoencoded_AG(ncols=None, scaling='std', add_prefix=True):
"""Load drug latent representation from Aspuru-Guzik's variational
autoencoder, sub-select columns of drugs randomly if specificed,
impute and scale the selected data, and return a pandas dataframe
Parameters
----------
ncols : int or None
number of columns (drug latent representations) to randomly subselect (default None : use all data)
scaling : 'maxabs' [-1,1], 'minmax' [0,1], 'std', or None, optional (default 'std')
type of scaling to apply
add_prefix: True or False
add feature namespace prefix
"""
path = get_file(P1B3_URL + 'Aspuru-Guzik_NSC_latent_representation_292D.csv')
df = global_cache.get(path)
if df is None:
df = | pd.read_csv(path, engine='c', dtype=np.float32) | pandas.read_csv |
# -*- coding: utf-8 -*-
import unittest
import platform
import pandas as pd
import numpy as np
import pyarrow.parquet as pq
import hpat
from hpat.tests.test_utils import (
count_array_REPs, count_parfor_REPs, count_array_OneDs, get_start_end)
from hpat.tests.gen_test_data import ParquetGenerator
from numba import types
from numba.config import IS_32BITS
from numba.errors import TypingError
_cov_corr_series = [(pd.Series(x), pd.Series(y)) for x, y in [
(
[np.nan, -2., 3., 9.1],
[np.nan, -2., 3., 5.0],
),
# TODO(quasilyte): more intricate data for complex-typed series.
# Some arguments make assert_almost_equal fail.
# Functions that yield mismaching results:
# _column_corr_impl and _column_cov_impl.
(
[complex(-2., 1.0), complex(3.0, 1.0)],
[complex(-3., 1.0), complex(2.0, 1.0)],
),
(
[complex(-2.0, 1.0), complex(3.0, 1.0)],
[1.0, -2.0],
),
(
[1.0, -4.5],
[complex(-4.5, 1.0), complex(3.0, 1.0)],
),
]]
min_float64 = np.finfo('float64').min
max_float64 = np.finfo('float64').max
test_global_input_data_float64 = [
[1., np.nan, -1., 0., min_float64, max_float64],
[np.nan, np.inf, np.NINF, np.NZERO]
]
min_int64 = np.iinfo('int64').min
max_int64 = np.iinfo('int64').max
max_uint64 = np.iinfo('uint64').max
test_global_input_data_integer64 = [
[1, -1, 0],
[min_int64, max_int64],
[max_uint64]
]
test_global_input_data_numeric = test_global_input_data_integer64 + test_global_input_data_float64
test_global_input_data_unicode_kind4 = [
'ascii',
'12345',
'1234567890',
'¡Y tú quién te crees?',
'🐍⚡',
'大处着眼,小处着手。',
]
test_global_input_data_unicode_kind1 = [
'ascii',
'12345',
'1234567890',
]
def _make_func_from_text(func_text, func_name='test_impl'):
loc_vars = {}
exec(func_text, {}, loc_vars)
test_impl = loc_vars[func_name]
return test_impl
def _make_func_use_binop1(operator):
func_text = "def test_impl(A, B):\n"
func_text += " return A {} B\n".format(operator)
return _make_func_from_text(func_text)
def _make_func_use_binop2(operator):
func_text = "def test_impl(A, B):\n"
func_text += " A {} B\n".format(operator)
func_text += " return A\n"
return _make_func_from_text(func_text)
def _make_func_use_method_arg1(method):
func_text = "def test_impl(A, B):\n"
func_text += " return A.{}(B)\n".format(method)
return _make_func_from_text(func_text)
GLOBAL_VAL = 2
class TestSeries(unittest.TestCase):
def test_create1(self):
def test_impl():
df = pd.DataFrame({'A': [1, 2, 3]})
return (df.A == 1).sum()
hpat_func = hpat.jit(test_impl)
self.assertEqual(hpat_func(), test_impl())
@unittest.skip('Feature request: implement Series::ctor with list(list(type))')
def test_create_list_list_unicode(self):
def test_impl():
S = pd.Series([
['abc', 'defg', 'ijk'],
['lmn', 'opq', 'rstuvwxyz']
])
return S
hpat_func = hpat.jit(test_impl)
result_ref = test_impl()
result = hpat_func()
pd.testing.assert_series_equal(result, result_ref)
@unittest.skip('Feature request: implement Series::ctor with list(list(type))')
def test_create_list_list_integer(self):
def test_impl():
S = pd.Series([
[123, 456, -789],
[-112233, 445566, 778899]
])
return S
hpat_func = hpat.jit(test_impl)
result_ref = test_impl()
result = hpat_func()
pd.testing.assert_series_equal(result, result_ref)
@unittest.skip('Feature request: implement Series::ctor with list(list(type))')
def test_create_list_list_float(self):
def test_impl():
S = pd.Series([
[1.23, -4.56, 7.89],
[11.2233, 44.5566, -778.899]
])
return S
hpat_func = hpat.jit(test_impl)
result_ref = test_impl()
result = hpat_func()
pd.testing.assert_series_equal(result, result_ref)
def test_create2(self):
def test_impl(n):
df = pd.DataFrame({'A': np.arange(n)})
return (df.A == 2).sum()
hpat_func = hpat.jit(test_impl)
n = 11
self.assertEqual(hpat_func(n), test_impl(n))
def test_create_series1(self):
def test_impl():
A = pd.Series([1, 2, 3])
return A
hpat_func = hpat.jit(test_impl)
pd.testing.assert_series_equal(hpat_func(), test_impl())
def test_create_series_index1(self):
# create and box an indexed Series
def test_impl():
A = pd.Series([1, 2, 3], ['A', 'C', 'B'])
return A
hpat_func = hpat.jit(test_impl)
pd.testing.assert_series_equal(hpat_func(), test_impl())
def test_create_series_index2(self):
def test_impl():
A = pd.Series([1, 2, 3], index=['A', 'C', 'B'])
return A
hpat_func = hpat.jit(test_impl)
pd.testing.assert_series_equal(hpat_func(), test_impl())
def test_create_series_index3(self):
def test_impl():
A = pd.Series([1, 2, 3], index=['A', 'C', 'B'], name='A')
return A
hpat_func = hpat.jit(test_impl)
pd.testing.assert_series_equal(hpat_func(), test_impl())
def test_create_series_index4(self):
def test_impl(name):
A = pd.Series([1, 2, 3], index=['A', 'C', 'B'], name=name)
return A
hpat_func = hpat.jit(test_impl)
pd.testing.assert_series_equal(hpat_func('A'), test_impl('A'))
def test_create_str(self):
def test_impl():
df = pd.DataFrame({'A': ['a', 'b', 'c']})
return (df.A == 'a').sum()
hpat_func = hpat.jit(test_impl)
self.assertEqual(hpat_func(), test_impl())
def test_pass_df1(self):
def test_impl(df):
return (df.A == 2).sum()
hpat_func = hpat.jit(test_impl)
n = 11
df = pd.DataFrame({'A': np.arange(n)})
self.assertEqual(hpat_func(df), test_impl(df))
def test_pass_df_str(self):
def test_impl(df):
return (df.A == 'a').sum()
hpat_func = hpat.jit(test_impl)
df = pd.DataFrame({'A': ['a', 'b', 'c']})
self.assertEqual(hpat_func(df), test_impl(df))
def test_pass_series1(self):
# TODO: check to make sure it is series type
def test_impl(A):
return (A == 2).sum()
hpat_func = hpat.jit(test_impl)
n = 11
df = pd.DataFrame({'A': np.arange(n)})
self.assertEqual(hpat_func(df.A), test_impl(df.A))
def test_pass_series2(self):
# test creating dataframe from passed series
def test_impl(A):
df = pd.DataFrame({'A': A})
return (df.A == 2).sum()
hpat_func = hpat.jit(test_impl)
n = 11
df = pd.DataFrame({'A': np.arange(n)})
self.assertEqual(hpat_func(df.A), test_impl(df.A))
def test_pass_series_str(self):
def test_impl(A):
return (A == 'a').sum()
hpat_func = hpat.jit(test_impl)
df = pd.DataFrame({'A': ['a', 'b', 'c']})
self.assertEqual(hpat_func(df.A), test_impl(df.A))
def test_pass_series_index1(self):
def test_impl(A):
return A
hpat_func = hpat.jit(test_impl)
S = pd.Series([3, 5, 6], ['a', 'b', 'c'], name='A')
pd.testing.assert_series_equal(hpat_func(S), test_impl(S))
def test_series_size(self):
def test_impl(S):
return S.size
hpat_func = hpat.jit(test_impl)
n = 11
for S, expected in [
(pd.Series(), 0),
(pd.Series([]), 0),
(pd.Series(np.arange(n)), n),
(pd.Series([np.nan, 1, 2]), 3),
(pd.Series(['1', '2', '3']), 3),
]:
with self.subTest(S=S, expected=expected):
self.assertEqual(hpat_func(S), expected)
self.assertEqual(hpat_func(S), test_impl(S))
def test_series_attr2(self):
def test_impl(A):
return A.copy().values
hpat_func = hpat.jit(test_impl)
n = 11
df = pd.DataFrame({'A': np.arange(n)})
np.testing.assert_array_equal(hpat_func(df.A), test_impl(df.A))
def test_series_attr3(self):
def test_impl(A):
return A.min()
hpat_func = hpat.jit(test_impl)
n = 11
df = pd.DataFrame({'A': np.arange(n)})
self.assertEqual(hpat_func(df.A), test_impl(df.A))
def test_series_attr4(self):
def test_impl(A):
return A.cumsum().values
hpat_func = hpat.jit(test_impl)
n = 11
df = pd.DataFrame({'A': np.arange(n)})
np.testing.assert_array_equal(hpat_func(df.A), test_impl(df.A))
def test_series_argsort1(self):
def test_impl(A):
return A.argsort()
hpat_func = hpat.jit(test_impl)
n = 11
A = pd.Series(np.random.ranf(n))
pd.testing.assert_series_equal(hpat_func(A), test_impl(A))
def test_series_attr6(self):
def test_impl(A):
return A.take([2, 3]).values
hpat_func = hpat.jit(test_impl)
n = 11
df = pd.DataFrame({'A': np.arange(n)})
np.testing.assert_array_equal(hpat_func(df.A), test_impl(df.A))
def test_series_attr7(self):
def test_impl(A):
return A.astype(np.float64)
hpat_func = hpat.jit(test_impl)
n = 11
df = pd.DataFrame({'A': np.arange(n)})
np.testing.assert_array_equal(hpat_func(df.A), test_impl(df.A))
def test_series_getattr_ndim(self):
'''Verifies getting Series attribute ndim is supported'''
def test_impl(S):
return S.ndim
hpat_func = hpat.jit(test_impl)
n = 11
S = pd.Series(np.arange(n))
self.assertEqual(hpat_func(S), test_impl(S))
def test_series_getattr_T(self):
'''Verifies getting Series attribute T is supported'''
def test_impl(S):
return S.T
hpat_func = hpat.jit(test_impl)
n = 11
S = pd.Series(np.arange(n))
np.testing.assert_array_equal(hpat_func(S), test_impl(S))
def test_series_copy_str1(self):
def test_impl(A):
return A.copy()
hpat_func = hpat.jit(test_impl)
S = pd.Series(['aa', 'bb', 'cc'])
pd.testing.assert_series_equal(hpat_func(S), test_impl(S))
def test_series_copy_int1(self):
def test_impl(A):
return A.copy()
hpat_func = hpat.jit(test_impl)
S = pd.Series([1, 2, 3])
np.testing.assert_array_equal(hpat_func(S), test_impl(S))
def test_series_copy_deep(self):
def test_impl(A, deep):
return A.copy(deep=deep)
hpat_func = hpat.jit(test_impl)
for S in [
pd.Series([1, 2]),
pd.Series([1, 2], index=["a", "b"]),
]:
with self.subTest(S=S):
for deep in (True, False):
with self.subTest(deep=deep):
actual = hpat_func(S, deep)
expected = test_impl(S, deep)
pd.testing.assert_series_equal(actual, expected)
self.assertEqual(actual.values is S.values, expected.values is S.values)
self.assertEqual(actual.values is S.values, not deep)
# Shallow copy of index is not supported yet
if deep:
self.assertEqual(actual.index is S.index, expected.index is S.index)
self.assertEqual(actual.index is S.index, not deep)
def test_series_astype_int_to_str1(self):
'''Verifies Series.astype implementation with function 'str' as argument
converts integer series to series of strings
'''
def test_impl(S):
return S.astype(str)
hpat_func = hpat.jit(test_impl)
n = 11
S = pd.Series(np.arange(n))
pd.testing.assert_series_equal(hpat_func(S), test_impl(S))
def test_series_astype_int_to_str2(self):
'''Verifies Series.astype implementation with a string literal dtype argument
converts integer series to series of strings
'''
def test_impl(S):
return S.astype('str')
hpat_func = hpat.jit(test_impl)
n = 11
S = pd.Series(np.arange(n))
pd.testing.assert_series_equal(hpat_func(S), test_impl(S))
def test_series_astype_str_to_str1(self):
'''Verifies Series.astype implementation with function 'str' as argument
handles string series not changing it
'''
def test_impl(S):
return S.astype(str)
hpat_func = hpat.jit(test_impl)
S = pd.Series(['aa', 'bb', 'cc'])
pd.testing.assert_series_equal(hpat_func(S), test_impl(S))
def test_series_astype_str_to_str2(self):
'''Verifies Series.astype implementation with a string literal dtype argument
handles string series not changing it
'''
def test_impl(S):
return S.astype('str')
hpat_func = hpat.jit(test_impl)
S = pd.Series(['aa', 'bb', 'cc'])
pd.testing.assert_series_equal(hpat_func(S), test_impl(S))
def test_series_astype_str_to_str_index_str(self):
'''Verifies Series.astype implementation with function 'str' as argument
handles string series not changing it
'''
def test_impl(S):
return S.astype(str)
hpat_func = hpat.jit(test_impl)
S = pd.Series(['aa', 'bb', 'cc'], index=['d', 'e', 'f'])
pd.testing.assert_series_equal(hpat_func(S), test_impl(S))
def test_series_astype_str_to_str_index_int(self):
'''Verifies Series.astype implementation with function 'str' as argument
handles string series not changing it
'''
def test_impl(S):
return S.astype(str)
hpat_func = hpat.jit(test_impl)
S = pd.Series(['aa', 'bb', 'cc'], index=[1, 2, 3])
pd.testing.assert_series_equal(hpat_func(S), test_impl(S))
@unittest.skip('TODO: requires str(datetime64) support in Numba')
def test_series_astype_dt_to_str1(self):
'''Verifies Series.astype implementation with function 'str' as argument
converts datetime series to series of strings
'''
def test_impl(A):
return A.astype(str)
hpat_func = hpat.jit(test_impl)
S = pd.Series([pd.Timestamp('20130101 09:00:00'),
pd.Timestamp('20130101 09:00:02'),
pd.Timestamp('20130101 09:00:03')
])
pd.testing.assert_series_equal(hpat_func(S), test_impl(S))
@unittest.skip('AssertionError: Series are different'
'[left]: [0.000000, 1.000000, 2.000000, 3.000000, ...'
'[right]: [0.0, 1.0, 2.0, 3.0, ...'
'TODO: needs alignment to NumPy on Numba side')
def test_series_astype_float_to_str1(self):
'''Verifies Series.astype implementation with function 'str' as argument
converts float series to series of strings
'''
def test_impl(A):
return A.astype(str)
hpat_func = hpat.jit(test_impl)
n = 11.0
S = pd.Series(np.arange(n))
pd.testing.assert_series_equal(hpat_func(S), test_impl(S))
def test_series_astype_int32_to_int64(self):
'''Verifies Series.astype implementation with NumPy dtype argument
converts series with dtype=int32 to series with dtype=int64
'''
def test_impl(A):
return A.astype(np.int64)
hpat_func = hpat.jit(test_impl)
n = 11
S = pd.Series(np.arange(n), dtype=np.int32)
pd.testing.assert_series_equal(hpat_func(S), test_impl(S))
def test_series_astype_int_to_float64(self):
'''Verifies Series.astype implementation with NumPy dtype argument
converts integer series to series of float
'''
def test_impl(A):
return A.astype(np.float64)
hpat_func = hpat.jit(test_impl)
n = 11
S = pd.Series(np.arange(n))
pd.testing.assert_series_equal(hpat_func(S), test_impl(S))
def test_series_astype_float_to_int32(self):
'''Verifies Series.astype implementation with NumPy dtype argument
converts float series to series of integers
'''
def test_impl(A):
return A.astype(np.int32)
hpat_func = hpat.jit(test_impl)
n = 11.0
S = pd.Series(np.arange(n))
pd.testing.assert_series_equal(hpat_func(S), test_impl(S))
@unittest.skip('TODO: needs Numba astype impl support string literal as dtype arg')
def test_series_astype_literal_dtype1(self):
'''Verifies Series.astype implementation with a string literal dtype argument
converts float series to series of integers
'''
def test_impl(A):
return A.astype('int32')
hpat_func = hpat.jit(test_impl)
n = 11.0
S = pd.Series(np.arange(n))
pd.testing.assert_series_equal(hpat_func(S), test_impl(S))
@unittest.skip('TODO: needs Numba astype impl support converting unicode_type to int')
def test_series_astype_str_to_int32(self):
'''Verifies Series.astype implementation with NumPy dtype argument
converts series of strings to series of integers
'''
import numba
def test_impl(A):
return A.astype(np.int32)
hpat_func = hpat.jit(test_impl)
n = 11
S = pd.Series([str(x) for x in np.arange(n) - n // 2])
pd.testing.assert_series_equal(hpat_func(S), test_impl(S))
@unittest.skip('TODO: needs Numba astype impl support converting unicode_type to float')
def test_series_astype_str_to_float64(self):
'''Verifies Series.astype implementation with NumPy dtype argument
converts series of strings to series of float
'''
def test_impl(A):
return A.astype(np.float64)
hpat_func = hpat.jit(test_impl)
S = pd.Series(['3.24', '1E+05', '-1', '-1.3E-01', 'nan', 'inf'])
pd.testing.assert_series_equal(hpat_func(S), test_impl(S))
def test_series_astype_str_index_str(self):
'''Verifies Series.astype implementation with function 'str' as argument
handles string series not changing it
'''
def test_impl(S):
return S.astype(str)
hpat_func = hpat.jit(test_impl)
S = pd.Series(['aa', 'bb', 'cc'], index=['a', 'b', 'c'])
pd.testing.assert_series_equal(hpat_func(S), test_impl(S))
def test_series_astype_str_index_int(self):
'''Verifies Series.astype implementation with function 'str' as argument
handles string series not changing it
'''
def test_impl(S):
return S.astype(str)
hpat_func = hpat.jit(test_impl)
S = pd.Series(['aa', 'bb', 'cc'], index=[2, 3, 5])
pd.testing.assert_series_equal(hpat_func(S), test_impl(S))
def test_np_call_on_series1(self):
def test_impl(A):
return np.min(A)
hpat_func = hpat.jit(test_impl)
n = 11
df = pd.DataFrame({'A': np.arange(n)})
np.testing.assert_array_equal(hpat_func(df.A), test_impl(df.A))
def test_series_values(self):
def test_impl(A):
return A.values
hpat_func = hpat.jit(test_impl)
n = 11
df = pd.DataFrame({'A': np.arange(n)})
np.testing.assert_array_equal(hpat_func(df.A), test_impl(df.A))
def test_series_values1(self):
def test_impl(A):
return (A == 2).values
hpat_func = hpat.jit(test_impl)
n = 11
df = pd.DataFrame({'A': np.arange(n)})
np.testing.assert_array_equal(hpat_func(df.A), test_impl(df.A))
def test_series_shape1(self):
def test_impl(A):
return A.shape
hpat_func = hpat.jit(test_impl)
n = 11
df = pd.DataFrame({'A': np.arange(n)})
self.assertEqual(hpat_func(df.A), test_impl(df.A))
def test_static_setitem_series1(self):
def test_impl(A):
A[0] = 2
return (A == 2).sum()
hpat_func = hpat.jit(test_impl)
n = 11
df = pd.DataFrame({'A': np.arange(n)})
self.assertEqual(hpat_func(df.A), test_impl(df.A))
def test_setitem_series1(self):
def test_impl(A, i):
A[i] = 2
return (A == 2).sum()
hpat_func = hpat.jit(test_impl)
n = 11
df = pd.DataFrame({'A': np.arange(n)})
self.assertEqual(hpat_func(df.A.copy(), 0), test_impl(df.A.copy(), 0))
def test_setitem_series2(self):
def test_impl(A, i):
A[i] = 100
hpat_func = hpat.jit(test_impl)
n = 11
df = pd.DataFrame({'A': np.arange(n)})
A1 = df.A.copy()
A2 = df.A
hpat_func(A1, 0)
test_impl(A2, 0)
pd.testing.assert_series_equal(A1, A2)
@unittest.skip("enable after remove dead in hiframes is removed")
def test_setitem_series3(self):
def test_impl(A, i):
S = pd.Series(A)
S[i] = 100
hpat_func = hpat.jit(test_impl)
n = 11
A = np.arange(n)
A1 = A.copy()
A2 = A
hpat_func(A1, 0)
test_impl(A2, 0)
np.testing.assert_array_equal(A1, A2)
def test_setitem_series_bool1(self):
def test_impl(A):
A[A > 3] = 100
hpat_func = hpat.jit(test_impl)
n = 11
df = pd.DataFrame({'A': np.arange(n)})
A1 = df.A.copy()
A2 = df.A
hpat_func(A1)
test_impl(A2)
pd.testing.assert_series_equal(A1, A2)
def test_setitem_series_bool2(self):
def test_impl(A, B):
A[A > 3] = B[A > 3]
hpat_func = hpat.jit(test_impl)
n = 11
df = pd.DataFrame({'A': np.arange(n), 'B': np.arange(n)**2})
A1 = df.A.copy()
A2 = df.A
hpat_func(A1, df.B)
test_impl(A2, df.B)
pd.testing.assert_series_equal(A1, A2)
def test_static_getitem_series1(self):
def test_impl(A):
return A[0]
hpat_func = hpat.jit(test_impl)
n = 11
A = pd.Series(np.arange(n))
self.assertEqual(hpat_func(A), test_impl(A))
def test_getitem_series1(self):
def test_impl(A, i):
return A[i]
hpat_func = hpat.jit(test_impl)
n = 11
df = pd.DataFrame({'A': np.arange(n)})
self.assertEqual(hpat_func(df.A, 0), test_impl(df.A, 0))
def test_getitem_series_str1(self):
def test_impl(A, i):
return A[i]
hpat_func = hpat.jit(test_impl)
df = pd.DataFrame({'A': ['aa', 'bb', 'cc']})
self.assertEqual(hpat_func(df.A, 0), test_impl(df.A, 0))
def test_series_iat1(self):
def test_impl(A):
return A.iat[3]
hpat_func = hpat.jit(test_impl)
n = 11
S = pd.Series(np.arange(n)**2)
self.assertEqual(hpat_func(S), test_impl(S))
def test_series_iat2(self):
def test_impl(A):
A.iat[3] = 1
return A
hpat_func = hpat.jit(test_impl)
n = 11
S = pd.Series(np.arange(n)**2)
pd.testing.assert_series_equal(hpat_func(S), test_impl(S))
def test_series_iloc1(self):
def test_impl(A):
return A.iloc[3]
hpat_func = hpat.jit(test_impl)
n = 11
S = pd.Series(np.arange(n)**2)
self.assertEqual(hpat_func(S), test_impl(S))
def test_series_iloc2(self):
def test_impl(A):
return A.iloc[3:8]
hpat_func = hpat.jit(test_impl)
n = 11
S = pd.Series(np.arange(n)**2)
pd.testing.assert_series_equal(
hpat_func(S), test_impl(S).reset_index(drop=True))
def test_series_op1(self):
arithmetic_binops = ('+', '-', '*', '/', '//', '%', '**')
for operator in arithmetic_binops:
test_impl = _make_func_use_binop1(operator)
hpat_func = hpat.jit(test_impl)
n = 11
df = pd.DataFrame({'A': np.arange(1, n), 'B': np.ones(n - 1)})
pd.testing.assert_series_equal(hpat_func(df.A, df.B), test_impl(df.A, df.B), check_names=False)
def test_series_op2(self):
arithmetic_binops = ('+', '-', '*', '/', '//', '%', '**')
for operator in arithmetic_binops:
test_impl = _make_func_use_binop1(operator)
hpat_func = hpat.jit(test_impl)
n = 11
if platform.system() == 'Windows' and not IS_32BITS:
df = pd.DataFrame({'A': np.arange(1, n, dtype=np.int64)})
else:
df = pd.DataFrame({'A': np.arange(1, n)})
pd.testing.assert_series_equal(hpat_func(df.A, 1), test_impl(df.A, 1), check_names=False)
def test_series_op3(self):
arithmetic_binops = ('+', '-', '*', '/', '//', '%', '**')
for operator in arithmetic_binops:
test_impl = _make_func_use_binop2(operator)
hpat_func = hpat.jit(test_impl)
n = 11
df = pd.DataFrame({'A': np.arange(1, n), 'B': np.ones(n - 1)})
pd.testing.assert_series_equal(hpat_func(df.A, df.B), test_impl(df.A, df.B), check_names=False)
def test_series_op4(self):
arithmetic_binops = ('+', '-', '*', '/', '//', '%', '**')
for operator in arithmetic_binops:
test_impl = _make_func_use_binop2(operator)
hpat_func = hpat.jit(test_impl)
n = 11
df = pd.DataFrame({'A': np.arange(1, n)})
pd.testing.assert_series_equal(hpat_func(df.A, 1), test_impl(df.A, 1), check_names=False)
def test_series_op5(self):
arithmetic_methods = ('add', 'sub', 'mul', 'div', 'truediv', 'floordiv', 'mod', 'pow')
for method in arithmetic_methods:
test_impl = _make_func_use_method_arg1(method)
hpat_func = hpat.jit(test_impl)
n = 11
df = pd.DataFrame({'A': np.arange(1, n), 'B': np.ones(n - 1)})
pd.testing.assert_series_equal(hpat_func(df.A, df.B), test_impl(df.A, df.B), check_names=False)
@unittest.skipIf(platform.system() == 'Windows', 'Series values are different (20.0 %)'
'[left]: [1, 1024, 59049, 1048576, 9765625, 60466176, 282475249, 1073741824, 3486784401, 10000000000]'
'[right]: [1, 1024, 59049, 1048576, 9765625, 60466176, 282475249, 1073741824, -808182895, 1410065408]')
def test_series_op5_integer_scalar(self):
arithmetic_methods = ('add', 'sub', 'mul', 'div', 'truediv', 'floordiv', 'mod', 'pow')
for method in arithmetic_methods:
test_impl = _make_func_use_method_arg1(method)
hpat_func = hpat.jit(test_impl)
n = 11
if platform.system() == 'Windows' and not IS_32BITS:
operand_series = pd.Series(np.arange(1, n, dtype=np.int64))
else:
operand_series = pd.Series(np.arange(1, n))
operand_scalar = 10
pd.testing.assert_series_equal(
hpat_func(operand_series, operand_scalar),
test_impl(operand_series, operand_scalar),
check_names=False)
def test_series_op5_float_scalar(self):
arithmetic_methods = ('add', 'sub', 'mul', 'div', 'truediv', 'floordiv', 'mod', 'pow')
for method in arithmetic_methods:
test_impl = _make_func_use_method_arg1(method)
hpat_func = hpat.jit(test_impl)
n = 11
operand_series = pd.Series(np.arange(1, n))
operand_scalar = .5
pd.testing.assert_series_equal(
hpat_func(operand_series, operand_scalar),
test_impl(operand_series, operand_scalar),
check_names=False)
def test_series_op6(self):
def test_impl(A):
return -A
hpat_func = hpat.jit(test_impl)
n = 11
A = pd.Series(np.arange(n))
pd.testing.assert_series_equal(hpat_func(A), test_impl(A))
def test_series_op7(self):
comparison_binops = ('<', '>', '<=', '>=', '!=', '==')
for operator in comparison_binops:
test_impl = _make_func_use_binop1(operator)
hpat_func = hpat.jit(test_impl)
n = 11
A = pd.Series(np.arange(n))
B = pd.Series(np.arange(n)**2)
pd.testing.assert_series_equal(hpat_func(A, B), test_impl(A, B), check_names=False)
def test_series_op8(self):
comparison_methods = ('lt', 'gt', 'le', 'ge', 'ne', 'eq')
for method in comparison_methods:
test_impl = _make_func_use_method_arg1(method)
hpat_func = hpat.jit(test_impl)
n = 11
A = pd.Series(np.arange(n))
B = pd.Series(np.arange(n)**2)
pd.testing.assert_series_equal(hpat_func(A, B), test_impl(A, B), check_names=False)
@unittest.skipIf(platform.system() == 'Windows', "Attribute dtype are different: int64, int32")
def test_series_op8_integer_scalar(self):
comparison_methods = ('lt', 'gt', 'le', 'ge', 'eq', 'ne')
for method in comparison_methods:
test_impl = _make_func_use_method_arg1(method)
hpat_func = hpat.jit(test_impl)
n = 11
operand_series = pd.Series(np.arange(1, n))
operand_scalar = 10
pd.testing.assert_series_equal(
hpat_func(operand_series, operand_scalar),
test_impl(operand_series, operand_scalar),
check_names=False)
def test_series_op8_float_scalar(self):
comparison_methods = ('lt', 'gt', 'le', 'ge', 'eq', 'ne')
for method in comparison_methods:
test_impl = _make_func_use_method_arg1(method)
hpat_func = hpat.jit(test_impl)
n = 11
operand_series = pd.Series(np.arange(1, n))
operand_scalar = .5
pd.testing.assert_series_equal(
hpat_func(operand_series, operand_scalar),
test_impl(operand_series, operand_scalar),
check_names=False)
def test_series_inplace_binop_array(self):
def test_impl(A, B):
A += B
return A
hpat_func = hpat.jit(test_impl)
n = 11
A = np.arange(n)**2.0 # TODO: use 2 for test int casting
B = pd.Series(np.ones(n))
np.testing.assert_array_equal(hpat_func(A.copy(), B), test_impl(A, B))
def test_series_fusion1(self):
def test_impl(A, B):
return A + B + 1
hpat_func = hpat.jit(test_impl)
n = 11
if platform.system() == 'Windows' and not IS_32BITS:
A = pd.Series(np.arange(n), dtype=np.int64)
B = pd.Series(np.arange(n)**2, dtype=np.int64)
else:
A = pd.Series(np.arange(n))
B = pd.Series(np.arange(n)**2)
pd.testing.assert_series_equal(hpat_func(A, B), test_impl(A, B))
self.assertEqual(count_parfor_REPs(), 1)
def test_series_fusion2(self):
# make sure getting data var avoids incorrect single def assumption
def test_impl(A, B):
S = B + 2
if A[0] == 0:
S = A + 1
return S + B
hpat_func = hpat.jit(test_impl)
n = 11
if platform.system() == 'Windows' and not IS_32BITS:
A = pd.Series(np.arange(n), dtype=np.int64)
B = pd.Series(np.arange(n)**2, dtype=np.int64)
else:
A = pd.Series(np.arange(n))
B = pd.Series(np.arange(n)**2)
pd.testing.assert_series_equal(hpat_func(A, B), test_impl(A, B))
self.assertEqual(count_parfor_REPs(), 3)
def test_series_len(self):
def test_impl(A, i):
return len(A)
hpat_func = hpat.jit(test_impl)
n = 11
df = pd.DataFrame({'A': np.arange(n)})
self.assertEqual(hpat_func(df.A, 0), test_impl(df.A, 0))
def test_series_box(self):
def test_impl():
A = pd.Series([1, 2, 3])
return A
hpat_func = hpat.jit(test_impl)
pd.testing.assert_series_equal(hpat_func(), test_impl())
def test_series_box2(self):
def test_impl():
A = pd.Series(['1', '2', '3'])
return A
hpat_func = hpat.jit(test_impl)
pd.testing.assert_series_equal(hpat_func(), test_impl())
def test_series_list_str_unbox1(self):
def test_impl(A):
return A.iloc[0]
hpat_func = hpat.jit(test_impl)
S = pd.Series([['aa', 'b'], ['ccc'], []])
np.testing.assert_array_equal(hpat_func(S), test_impl(S))
# call twice to test potential refcount errors
np.testing.assert_array_equal(hpat_func(S), test_impl(S))
def test_np_typ_call_replace(self):
# calltype replacement is tricky for np.typ() calls since variable
# type can't provide calltype
def test_impl(i):
return np.int32(i)
hpat_func = hpat.jit(test_impl)
self.assertEqual(hpat_func(1), test_impl(1))
def test_series_ufunc1(self):
def test_impl(A, i):
return np.isinf(A).values
hpat_func = hpat.jit(test_impl)
n = 11
df = pd.DataFrame({'A': np.arange(n)})
np.testing.assert_array_equal(hpat_func(df.A, 1), test_impl(df.A, 1))
def test_list_convert(self):
def test_impl():
df = pd.DataFrame({'one': np.array([-1, np.nan, 2.5]),
'two': ['foo', 'bar', 'baz'],
'three': [True, False, True]})
return df.one.values, df.two.values, df.three.values
hpat_func = hpat.jit(test_impl)
one, two, three = hpat_func()
self.assertTrue(isinstance(one, np.ndarray))
self.assertTrue(isinstance(two, np.ndarray))
self.assertTrue(isinstance(three, np.ndarray))
@unittest.skip("needs empty_like typing fix in npydecl.py")
def test_series_empty_like(self):
def test_impl(A):
return np.empty_like(A)
hpat_func = hpat.jit(test_impl)
n = 11
df = pd.DataFrame({'A': np.arange(n)})
self.assertTrue(isinstance(hpat_func(df.A), np.ndarray))
def test_series_fillna1(self):
def test_impl(A):
return A.fillna(5.0)
hpat_func = hpat.jit(test_impl)
df = pd.DataFrame({'A': [1.0, 2.0, np.nan, 1.0]})
pd.testing.assert_series_equal(hpat_func(df.A),
test_impl(df.A), check_names=False)
# test inplace fillna for named numeric series (obtained from DataFrame)
def test_series_fillna_inplace1(self):
def test_impl(A):
A.fillna(5.0, inplace=True)
return A
hpat_func = hpat.jit(test_impl)
df = pd.DataFrame({'A': [1.0, 2.0, np.nan, 1.0]})
pd.testing.assert_series_equal(hpat_func(df.A),
test_impl(df.A), check_names=False)
def test_series_fillna_str1(self):
def test_impl(A):
return A.fillna("dd")
hpat_func = hpat.jit(test_impl)
df = pd.DataFrame({'A': ['aa', 'b', None, 'ccc']})
pd.testing.assert_series_equal(hpat_func(df.A),
test_impl(df.A), check_names=False)
def test_series_fillna_str_inplace1(self):
def test_impl(A):
A.fillna("dd", inplace=True)
return A
hpat_func = hpat.jit(test_impl)
S1 = pd.Series(['aa', 'b', None, 'ccc'])
S2 = S1.copy()
pd.testing.assert_series_equal(hpat_func(S1), test_impl(S2))
# TODO: handle string array reflection
# hpat_func(S1)
# test_impl(S2)
# np.testing.assert_array_equal(S1, S2)
def test_series_fillna_str_inplace_empty1(self):
def test_impl(A):
A.fillna("", inplace=True)
return A
hpat_func = hpat.jit(test_impl)
S1 = pd.Series(['aa', 'b', None, 'ccc'])
S2 = S1.copy()
pd.testing.assert_series_equal(hpat_func(S1), test_impl(S2))
@unittest.skip('Unsupported functionality: failed to handle index')
def test_series_fillna_index_str(self):
def test_impl(S):
return S.fillna(5.0)
hpat_func = hpat.jit(test_impl)
S = pd.Series([1.0, 2.0, np.nan, 1.0], index=['a', 'b', 'c', 'd'])
pd.testing.assert_series_equal(hpat_func(S),
test_impl(S), check_names=False)
@unittest.skip('Unsupported functionality: failed to handle index')
def test_series_fillna_index_int(self):
def test_impl(S):
return S.fillna(5.0)
hpat_func = hpat.jit(test_impl)
S = pd.Series([1.0, 2.0, np.nan, 1.0], index=[2, 3, 4, 5])
pd.testing.assert_series_equal(hpat_func(S),
test_impl(S), check_names=False)
@unittest.skipIf(hpat.config.config_pipeline_hpat_default,
'No support of axis argument in old-style Series.dropna() impl')
def test_series_dropna_axis1(self):
'''Verifies Series.dropna() implementation handles 'index' as axis argument'''
def test_impl(S):
return S.dropna(axis='index')
hpat_func = hpat.jit(test_impl)
S1 = pd.Series([1.0, 2.0, np.nan, 1.0, np.inf])
S2 = S1.copy()
pd.testing.assert_series_equal(hpat_func(S1), test_impl(S2))
@unittest.skipIf(hpat.config.config_pipeline_hpat_default,
'No support of axis argument in old-style Series.dropna() impl')
def test_series_dropna_axis2(self):
'''Verifies Series.dropna() implementation handles 0 as axis argument'''
def test_impl(S):
return S.dropna(axis=0)
hpat_func = hpat.jit(test_impl)
S1 = pd.Series([1.0, 2.0, np.nan, 1.0, np.inf])
S2 = S1.copy()
pd.testing.assert_series_equal(hpat_func(S1), test_impl(S2))
@unittest.skipIf(hpat.config.config_pipeline_hpat_default,
'No support of axis argument in old-style Series.dropna() impl')
def test_series_dropna_axis3(self):
'''Verifies Series.dropna() implementation handles correct non-literal axis argument'''
def test_impl(S, axis):
return S.dropna(axis=axis)
hpat_func = hpat.jit(test_impl)
S1 = pd.Series([1.0, 2.0, np.nan, 1.0, np.inf])
S2 = S1.copy()
axis_values = [0, 'index']
for value in axis_values:
pd.testing.assert_series_equal(hpat_func(S1, value), test_impl(S2, value))
@unittest.skipIf(hpat.config.config_pipeline_hpat_default,
'BUG: old-style dropna impl returns series without index')
def test_series_dropna_float_index1(self):
'''Verifies Series.dropna() implementation for float series with default index'''
def test_impl(S):
return S.dropna()
hpat_func = hpat.jit(test_impl)
for data in test_global_input_data_float64:
S1 = pd.Series(data)
S2 = S1.copy()
pd.testing.assert_series_equal(hpat_func(S1), test_impl(S2))
@unittest.skipIf(hpat.config.config_pipeline_hpat_default,
'BUG: old-style dropna impl returns series without index')
def test_series_dropna_float_index2(self):
'''Verifies Series.dropna() implementation for float series with string index'''
def test_impl(S):
return S.dropna()
hpat_func = hpat.jit(test_impl)
S1 = pd.Series([1.0, 2.0, np.nan, 1.0, np.inf], ['a', 'b', 'c', 'd', 'e'])
S2 = S1.copy()
pd.testing.assert_series_equal(hpat_func(S1), test_impl(S2))
@unittest.skipIf(hpat.config.config_pipeline_hpat_default,
'BUG: old-style dropna impl returns series without index')
def test_series_dropna_str_index1(self):
'''Verifies Series.dropna() implementation for series of strings with default index'''
def test_impl(S):
return S.dropna()
hpat_func = hpat.jit(test_impl)
S1 = pd.Series(['aa', 'b', None, 'cccd', ''])
S2 = S1.copy()
pd.testing.assert_series_equal(hpat_func(S1), test_impl(S2))
@unittest.skipIf(hpat.config.config_pipeline_hpat_default,
'BUG: old-style dropna impl returns series without index')
def test_series_dropna_str_index2(self):
'''Verifies Series.dropna() implementation for series of strings with string index'''
def test_impl(S):
return S.dropna()
hpat_func = hpat.jit(test_impl)
S1 = pd.Series(['aa', 'b', None, 'cccd', ''], ['a', 'b', 'c', 'd', 'e'])
S2 = S1.copy()
pd.testing.assert_series_equal(hpat_func(S1), test_impl(S2))
@unittest.skipIf(hpat.config.config_pipeline_hpat_default,
'BUG: old-style dropna impl returns series without index')
def test_series_dropna_str_index3(self):
def test_impl(S):
return S.dropna()
hpat_func = hpat.jit(test_impl)
S1 = pd.Series(['aa', 'b', None, 'cccd', ''], index=[1, 2, 5, 7, 10])
S2 = S1.copy()
pd.testing.assert_series_equal(hpat_func(S1), test_impl(S2))
@unittest.skip('BUG: old-style dropna impl returns series without index, in new-style inplace is unsupported')
def test_series_dropna_float_inplace_no_index1(self):
'''Verifies Series.dropna() implementation for float series with default index and inplace argument True'''
def test_impl(S):
S.dropna(inplace=True)
return S
hpat_func = hpat.jit(test_impl)
S1 = pd.Series([1.0, 2.0, np.nan, 1.0, np.inf])
S2 = S1.copy()
pd.testing.assert_series_equal(hpat_func(S1), test_impl(S2))
@unittest.skip('TODO: add reflection support and check method return value')
def test_series_dropna_float_inplace_no_index2(self):
'''Verifies Series.dropna(inplace=True) results are reflected back in the original float series'''
def test_impl(S):
return S.dropna(inplace=True)
hpat_func = hpat.jit(test_impl)
S1 = pd.Series([1.0, 2.0, np.nan, 1.0, np.inf])
S2 = S1.copy()
self.assertIsNone(hpat_func(S1))
self.assertIsNone(test_impl(S2))
pd.testing.assert_series_equal(S1, S2)
@unittest.skip('BUG: old-style dropna impl returns series without index, in new-style inplace is unsupported')
def test_series_dropna_str_inplace_no_index1(self):
'''Verifies Series.dropna() implementation for series of strings
with default index and inplace argument True
'''
def test_impl(S):
S.dropna(inplace=True)
return S
hpat_func = hpat.jit(test_impl)
S1 = pd.Series(['aa', 'b', None, 'cccd', ''])
S2 = S1.copy()
pd.testing.assert_series_equal(hpat_func(S1), test_impl(S2))
@unittest.skip('TODO: add reflection support and check method return value')
def test_series_dropna_str_inplace_no_index2(self):
'''Verifies Series.dropna(inplace=True) results are reflected back in the original string series'''
def test_impl(S):
return S.dropna(inplace=True)
hpat_func = hpat.jit(test_impl)
S1 = pd.Series(['aa', 'b', None, 'cccd', ''])
S2 = S1.copy()
self.assertIsNone(hpat_func(S1))
self.assertIsNone(test_impl(S2))
pd.testing.assert_series_equal(S1, S2)
def test_series_dropna_str_parallel1(self):
'''Verifies Series.dropna() distributed work for series of strings with default index'''
def test_impl(A):
B = A.dropna()
return (B == 'gg').sum()
hpat_func = hpat.jit(distributed=['A'])(test_impl)
S1 = pd.Series(['aa', 'b', None, 'ccc', 'dd', 'gg'])
start, end = get_start_end(len(S1))
# TODO: gatherv
self.assertEqual(hpat_func(S1[start:end]), test_impl(S1))
self.assertEqual(count_array_REPs(), 0)
self.assertEqual(count_parfor_REPs(), 0)
self.assertTrue(count_array_OneDs() > 0)
@unittest.skip('AssertionError: Series are different\n'
'Series length are different\n'
'[left]: 3, Int64Index([0, 1, 2], dtype=\'int64\')\n'
'[right]: 2, Int64Index([1, 2], dtype=\'int64\')')
def test_series_dropna_dt_no_index1(self):
'''Verifies Series.dropna() implementation for datetime series with default index'''
def test_impl(S):
return S.dropna()
hpat_func = hpat.jit(test_impl)
S1 = pd.Series([pd.NaT, pd.Timestamp('1970-12-01'), pd.Timestamp('2012-07-25')])
S2 = S1.copy()
pd.testing.assert_series_equal(hpat_func(S1), test_impl(S2))
def test_series_dropna_bool_no_index1(self):
'''Verifies Series.dropna() implementation for bool series with default index'''
def test_impl(S):
return S.dropna()
hpat_func = hpat.jit(test_impl)
S1 = pd.Series([True, False, False, True])
S2 = S1.copy()
pd.testing.assert_series_equal(hpat_func(S1), test_impl(S2))
@unittest.skipIf(hpat.config.config_pipeline_hpat_default,
'BUG: old-style dropna impl returns series without index')
def test_series_dropna_int_no_index1(self):
'''Verifies Series.dropna() implementation for integer series with default index'''
def test_impl(S):
return S.dropna()
hpat_func = hpat.jit(test_impl)
n = 11
S1 = pd.Series(np.arange(n, dtype=np.int64))
S2 = S1.copy()
pd.testing.assert_series_equal(hpat_func(S1), test_impl(S2))
@unittest.skip('numba.errors.TypingError - fix needed\n'
'Failed in hpat mode pipeline'
'(step: convert to distributed)\n'
'Invalid use of Function(<built-in function len>)'
'with argument(s) of type(s): (none)\n')
def test_series_rename1(self):
def test_impl(A):
return A.rename('B')
hpat_func = hpat.jit(test_impl)
df = pd.DataFrame({'A': [1.0, 2.0, np.nan, 1.0]})
pd.testing.assert_series_equal(hpat_func(df.A), test_impl(df.A))
def test_series_sum_default(self):
def test_impl(S):
return S.sum()
hpat_func = hpat.jit(test_impl)
S = pd.Series([1., 2., 3.])
self.assertEqual(hpat_func(S), test_impl(S))
def test_series_sum_nan(self):
def test_impl(S):
return S.sum()
hpat_func = hpat.jit(test_impl)
# column with NA
S = pd.Series([np.nan, 2., 3.])
self.assertEqual(hpat_func(S), test_impl(S))
# all NA case should produce 0
S = pd.Series([np.nan, np.nan])
self.assertEqual(hpat_func(S), test_impl(S))
@unittest.skipIf(hpat.config.config_pipeline_hpat_default, "Old style Series.sum() does not support parameters")
def test_series_sum_skipna_false(self):
def test_impl(S):
return S.sum(skipna=False)
hpat_func = hpat.jit(test_impl)
S = pd.Series([np.nan, 2., 3.])
self.assertEqual(np.isnan(hpat_func(S)), np.isnan(test_impl(S)))
@unittest.skipIf(not hpat.config.config_pipeline_hpat_default,
"Series.sum() operator + is not implemented yet for Numba")
def test_series_sum2(self):
def test_impl(S):
return (S + S).sum()
hpat_func = hpat.jit(test_impl)
S = pd.Series([np.nan, 2., 3.])
self.assertEqual(hpat_func(S), test_impl(S))
S = pd.Series([np.nan, np.nan])
self.assertEqual(hpat_func(S), test_impl(S))
def test_series_prod(self):
def test_impl(S, skipna):
return S.prod(skipna=skipna)
hpat_func = hpat.jit(test_impl)
data_samples = [
[6, 6, 2, 1, 3, 3, 2, 1, 2],
[1.1, 0.3, 2.1, 1, 3, 0.3, 2.1, 1.1, 2.2],
[6, 6.1, 2.2, 1, 3, 3, 2.2, 1, 2],
[6, 6, np.nan, 2, np.nan, 1, 3, 3, np.inf, 2, 1, 2, np.inf],
[1.1, 0.3, np.nan, 1.0, np.inf, 0.3, 2.1, np.nan, 2.2, np.inf],
[1.1, 0.3, np.nan, 1, np.inf, 0, 1.1, np.nan, 2.2, np.inf, 2, 2],
[np.nan, np.nan, np.nan],
[np.nan, np.nan, np.inf],
]
for data in data_samples:
S = pd.Series(data)
for skipna_var in [True, False]:
actual = hpat_func(S, skipna=skipna_var)
expected = test_impl(S, skipna=skipna_var)
if np.isnan(actual) or np.isnan(expected):
# con not compare Nan != Nan directly
self.assertEqual(np.isnan(actual), np.isnan(expected))
else:
self.assertEqual(actual, expected)
def test_series_prod_skipna_default(self):
def test_impl(S):
return S.prod()
hpat_func = hpat.jit(test_impl)
S = pd.Series([np.nan, 2, 3.])
self.assertEqual(hpat_func(S), test_impl(S))
def test_series_count1(self):
def test_impl(S):
return S.count()
hpat_func = hpat.jit(test_impl)
S = pd.Series([np.nan, 2., 3.])
self.assertEqual(hpat_func(S), test_impl(S))
S = pd.Series([np.nan, np.nan])
self.assertEqual(hpat_func(S), test_impl(S))
S = pd.Series(['aa', 'bb', np.nan])
self.assertEqual(hpat_func(S), test_impl(S))
def test_series_mean(self):
def test_impl(S):
return S.mean()
hpat_func = hpat.jit(test_impl)
data_samples = [
[6, 6, 2, 1, 3, 3, 2, 1, 2],
[1.1, 0.3, 2.1, 1, 3, 0.3, 2.1, 1.1, 2.2],
[6, 6.1, 2.2, 1, 3, 3, 2.2, 1, 2],
[6, 6, np.nan, 2, np.nan, 1, 3, 3, np.inf, 2, 1, 2, np.inf],
[1.1, 0.3, np.nan, 1.0, np.inf, 0.3, 2.1, np.nan, 2.2, np.inf],
[1.1, 0.3, np.nan, 1, np.inf, 0, 1.1, np.nan, 2.2, np.inf, 2, 2],
[np.nan, np.nan, np.nan],
[np.nan, np.nan, np.inf],
]
for data in data_samples:
with self.subTest(data=data):
S = pd.Series(data)
actual = hpat_func(S)
expected = test_impl(S)
if np.isnan(actual) or np.isnan(expected):
self.assertEqual(np.isnan(actual), np.isnan(expected))
else:
self.assertEqual(actual, expected)
@unittest.skipIf(hpat.config.config_pipeline_hpat_default, "Series.mean() any parameters unsupported")
def test_series_mean_skipna(self):
def test_impl(S, skipna):
return S.mean(skipna=skipna)
hpat_func = hpat.jit(test_impl)
data_samples = [
[6, 6, 2, 1, 3, 3, 2, 1, 2],
[1.1, 0.3, 2.1, 1, 3, 0.3, 2.1, 1.1, 2.2],
[6, 6.1, 2.2, 1, 3, 3, 2.2, 1, 2],
[6, 6, np.nan, 2, np.nan, 1, 3, 3, np.inf, 2, 1, 2, np.inf],
[1.1, 0.3, np.nan, 1.0, np.inf, 0.3, 2.1, np.nan, 2.2, np.inf],
[1.1, 0.3, np.nan, 1, np.inf, 0, 1.1, np.nan, 2.2, np.inf, 2, 2],
[np.nan, np.nan, np.nan],
[np.nan, np.nan, np.inf],
]
for skipna in [True, False]:
for data in data_samples:
S = pd.Series(data)
actual = hpat_func(S, skipna)
expected = test_impl(S, skipna)
if np.isnan(actual) or np.isnan(expected):
self.assertEqual(np.isnan(actual), np.isnan(expected))
else:
self.assertEqual(actual, expected)
def test_series_var1(self):
def test_impl(S):
return S.var()
hpat_func = hpat.jit(test_impl)
S = pd.Series([np.nan, 2., 3.])
self.assertEqual(hpat_func(S), test_impl(S))
def test_series_min(self):
def test_impl(S):
return S.min()
hpat_func = hpat.jit(test_impl)
# TODO type_min/type_max
for input_data in [[np.nan, 2., np.nan, 3., np.inf, 1, -1000],
[8, 31, 1123, -1024],
[2., 3., 1, -1000, np.inf]]:
S = pd.Series(input_data)
result_ref = test_impl(S)
result = hpat_func(S)
self.assertEqual(result, result_ref)
@unittest.skipIf(hpat.config.config_pipeline_hpat_default, "Series.min() any parameters unsupported")
def test_series_min_param(self):
def test_impl(S, param_skipna):
return S.min(skipna=param_skipna)
hpat_func = hpat.jit(test_impl)
for input_data, param_skipna in [([np.nan, 2., np.nan, 3., 1, -1000, np.inf], True),
([2., 3., 1, np.inf, -1000], False)]:
S = pd.Series(input_data)
result_ref = test_impl(S, param_skipna)
result = hpat_func(S, param_skipna)
self.assertEqual(result, result_ref)
def test_series_max(self):
def test_impl(S):
return S.max()
hpat_func = hpat.jit(test_impl)
# TODO type_min/type_max
for input_data in [[np.nan, 2., np.nan, 3., np.inf, 1, -1000],
[8, 31, 1123, -1024],
[2., 3., 1, -1000, np.inf]]:
S = pd.Series(input_data)
result_ref = test_impl(S)
result = hpat_func(S)
self.assertEqual(result, result_ref)
@unittest.skipIf(hpat.config.config_pipeline_hpat_default, "Series.max() any parameters unsupported")
def test_series_max_param(self):
def test_impl(S, param_skipna):
return S.max(skipna=param_skipna)
hpat_func = hpat.jit(test_impl)
for input_data, param_skipna in [([np.nan, 2., np.nan, 3., 1, -1000, np.inf], True),
([2., 3., 1, np.inf, -1000], False)]:
S = pd.Series(input_data)
result_ref = test_impl(S, param_skipna)
result = hpat_func(S, param_skipna)
self.assertEqual(result, result_ref)
def test_series_value_counts(self):
def test_impl(S):
return S.value_counts()
hpat_func = hpat.jit(test_impl)
S = pd.Series(['AA', 'BB', 'C', 'AA', 'C', 'AA'])
pd.testing.assert_series_equal(hpat_func(S), test_impl(S))
def test_series_dist_input1(self):
'''Verify distribution of a Series without index'''
def test_impl(S):
return S.max()
hpat_func = hpat.jit(distributed={'S'})(test_impl)
n = 111
S = pd.Series(np.arange(n))
start, end = get_start_end(n)
self.assertEqual(hpat_func(S[start:end]), test_impl(S))
self.assertEqual(count_array_REPs(), 0)
self.assertEqual(count_parfor_REPs(), 0)
def test_series_dist_input2(self):
'''Verify distribution of a Series with integer index'''
def test_impl(S):
return S.max()
hpat_func = hpat.jit(distributed={'S'})(test_impl)
n = 111
S = pd.Series(np.arange(n), 1 + np.arange(n))
start, end = get_start_end(n)
self.assertEqual(hpat_func(S[start:end]), test_impl(S))
self.assertEqual(count_array_REPs(), 0)
self.assertEqual(count_parfor_REPs(), 0)
@unittest.skip("Passed if run single")
def test_series_dist_input3(self):
'''Verify distribution of a Series with string index'''
def test_impl(S):
return S.max()
hpat_func = hpat.jit(distributed={'S'})(test_impl)
n = 111
S = pd.Series(np.arange(n), ['abc{}'.format(id) for id in range(n)])
start, end = get_start_end(n)
self.assertEqual(hpat_func(S[start:end]), test_impl(S))
self.assertEqual(count_array_REPs(), 0)
self.assertEqual(count_parfor_REPs(), 0)
def test_series_tuple_input1(self):
def test_impl(s_tup):
return s_tup[0].max()
hpat_func = hpat.jit(test_impl)
n = 111
S = pd.Series(np.arange(n))
S2 = pd.Series(np.arange(n) + 1.0)
s_tup = (S, 1, S2)
self.assertEqual(hpat_func(s_tup), test_impl(s_tup))
@unittest.skip("pending handling of build_tuple in dist pass")
def test_series_tuple_input_dist1(self):
def test_impl(s_tup):
return s_tup[0].max()
hpat_func = hpat.jit(locals={'s_tup:input': 'distributed'})(test_impl)
n = 111
S = pd.Series(np.arange(n))
S2 = pd.Series(np.arange(n) + 1.0)
start, end = get_start_end(n)
s_tup = (S, 1, S2)
h_s_tup = (S[start:end], 1, S2[start:end])
self.assertEqual(hpat_func(h_s_tup), test_impl(s_tup))
def test_series_rolling1(self):
def test_impl(S):
return S.rolling(3).sum()
hpat_func = hpat.jit(test_impl)
S = pd.Series([1.0, 2., 3., 4., 5.])
pd.testing.assert_series_equal(hpat_func(S), test_impl(S))
def test_series_concat1(self):
def test_impl(S1, S2):
return pd.concat([S1, S2]).values
hpat_func = hpat.jit(test_impl)
S1 = pd.Series([1.0, 2., 3., 4., 5.])
S2 = pd.Series([6., 7.])
np.testing.assert_array_equal(hpat_func(S1, S2), test_impl(S1, S2))
def test_series_map1(self):
def test_impl(S):
return S.map(lambda a: 2 * a)
hpat_func = hpat.jit(test_impl)
S = pd.Series([1.0, 2., 3., 4., 5.])
pd.testing.assert_series_equal(hpat_func(S), test_impl(S))
def test_series_map_global1(self):
def test_impl(S):
return S.map(lambda a: a + GLOBAL_VAL)
hpat_func = hpat.jit(test_impl)
S = pd.Series([1.0, 2., 3., 4., 5.])
pd.testing.assert_series_equal(hpat_func(S), test_impl(S))
def test_series_map_tup1(self):
def test_impl(S):
return S.map(lambda a: (a, 2 * a))
hpat_func = hpat.jit(test_impl)
S = pd.Series([1.0, 2., 3., 4., 5.])
pd.testing.assert_series_equal(hpat_func(S), test_impl(S))
def test_series_map_tup_map1(self):
def test_impl(S):
A = S.map(lambda a: (a, 2 * a))
return A.map(lambda a: a[1])
hpat_func = hpat.jit(test_impl)
S = pd.Series([1.0, 2., 3., 4., 5.])
pd.testing.assert_series_equal(hpat_func(S), test_impl(S))
def test_series_combine(self):
def test_impl(S1, S2):
return S1.combine(S2, lambda a, b: 2 * a + b)
hpat_func = hpat.jit(test_impl)
S1 = pd.Series([1.0, 2., 3., 4., 5.])
S2 = pd.Series([6.0, 21., 3.6, 5.])
pd.testing.assert_series_equal(hpat_func(S1, S2), test_impl(S1, S2))
def test_series_combine_float3264(self):
def test_impl(S1, S2):
return S1.combine(S2, lambda a, b: 2 * a + b)
hpat_func = hpat.jit(test_impl)
S1 = pd.Series([np.float64(1), np.float64(2),
np.float64(3), np.float64(4), np.float64(5)])
S2 = pd.Series([np.float32(1), np.float32(2),
np.float32(3), np.float32(4), np.float32(5)])
pd.testing.assert_series_equal(hpat_func(S1, S2), test_impl(S1, S2))
def test_series_combine_assert1(self):
def test_impl(S1, S2):
return S1.combine(S2, lambda a, b: 2 * a + b)
hpat_func = hpat.jit(test_impl)
S1 = pd.Series([1, 2, 3])
S2 = pd.Series([6., 21., 3., 5.])
with self.assertRaises(AssertionError):
hpat_func(S1, S2)
def test_series_combine_assert2(self):
def test_impl(S1, S2):
return S1.combine(S2, lambda a, b: 2 * a + b)
hpat_func = hpat.jit(test_impl)
S1 = pd.Series([6., 21., 3., 5.])
S2 = pd.Series([1, 2, 3])
with self.assertRaises(AssertionError):
hpat_func(S1, S2)
def test_series_combine_integer(self):
def test_impl(S1, S2):
return S1.combine(S2, lambda a, b: 2 * a + b, 16)
hpat_func = hpat.jit(test_impl)
S1 = pd.Series([1, 2, 3, 4, 5])
S2 = pd.Series([6, 21, 3, 5])
pd.testing.assert_series_equal(hpat_func(S1, S2), test_impl(S1, S2))
def test_series_combine_different_types(self):
def test_impl(S1, S2):
return S1.combine(S2, lambda a, b: 2 * a + b)
hpat_func = hpat.jit(test_impl)
S1 = pd.Series([6.1, 21.2, 3.3, 5.4, 6.7])
S2 = pd.Series([1, 2, 3, 4, 5])
pd.testing.assert_series_equal(hpat_func(S1, S2), test_impl(S1, S2))
def test_series_combine_integer_samelen(self):
def test_impl(S1, S2):
return S1.combine(S2, lambda a, b: 2 * a + b)
hpat_func = hpat.jit(test_impl)
S1 = pd.Series([1, 2, 3, 4, 5])
S2 = pd.Series([6, 21, 17, -5, 4])
pd.testing.assert_series_equal(hpat_func(S1, S2), test_impl(S1, S2))
def test_series_combine_samelen(self):
def test_impl(S1, S2):
return S1.combine(S2, lambda a, b: 2 * a + b)
hpat_func = hpat.jit(test_impl)
S1 = pd.Series([1.0, 2., 3., 4., 5.])
S2 = pd.Series([6.0, 21., 3.6, 5., 0.0])
pd.testing.assert_series_equal(hpat_func(S1, S2), test_impl(S1, S2))
def test_series_combine_value(self):
def test_impl(S1, S2):
return S1.combine(S2, lambda a, b: 2 * a + b, 1237.56)
hpat_func = hpat.jit(test_impl)
S1 = pd.Series([1.0, 2., 3., 4., 5.])
S2 = pd.Series([6.0, 21., 3.6, 5.])
pd.testing.assert_series_equal(hpat_func(S1, S2), test_impl(S1, S2))
def test_series_combine_value_samelen(self):
def test_impl(S1, S2):
return S1.combine(S2, lambda a, b: 2 * a + b, 1237.56)
hpat_func = hpat.jit(test_impl)
S1 = pd.Series([1.0, 2., 3., 4., 5.])
S2 = pd.Series([6.0, 21., 3.6, 5., 0.0])
pd.testing.assert_series_equal(hpat_func(S1, S2), test_impl(S1, S2))
def test_series_apply1(self):
def test_impl(S):
return S.apply(lambda a: 2 * a)
hpat_func = hpat.jit(test_impl)
S = pd.Series([1.0, 2., 3., 4., 5.])
pd.testing.assert_series_equal(hpat_func(S), test_impl(S))
def test_series_abs1(self):
def test_impl(S):
return S.abs()
hpat_func = hpat.jit(test_impl)
S = pd.Series([np.nan, -2., 3., 0.5E-01, 0xFF, 0o7, 0b101])
pd.testing.assert_series_equal(hpat_func(S), test_impl(S))
def test_series_cov1(self):
def test_impl(S1, S2):
return S1.cov(S2)
hpat_func = hpat.jit(test_impl)
for pair in _cov_corr_series:
S1, S2 = pair
np.testing.assert_almost_equal(
hpat_func(S1, S2), test_impl(S1, S2),
err_msg='S1={}\nS2={}'.format(S1, S2))
def test_series_corr1(self):
def test_impl(S1, S2):
return S1.corr(S2)
hpat_func = hpat.jit(test_impl)
for pair in _cov_corr_series:
S1, S2 = pair
np.testing.assert_almost_equal(
hpat_func(S1, S2), test_impl(S1, S2),
err_msg='S1={}\nS2={}'.format(S1, S2))
def test_series_str_len1(self):
def test_impl(S):
return S.str.len()
hpat_func = hpat.jit(test_impl)
S = pd.Series(['aa', 'abc', 'c', 'cccd'])
pd.testing.assert_series_equal(hpat_func(S), test_impl(S))
def test_series_str2str(self):
str2str_methods = ('capitalize', 'lower', 'lstrip', 'rstrip',
'strip', 'swapcase', 'title', 'upper')
for method in str2str_methods:
func_text = "def test_impl(S):\n"
func_text += " return S.str.{}()\n".format(method)
test_impl = _make_func_from_text(func_text)
hpat_func = hpat.jit(test_impl)
S = pd.Series([' \tbbCD\t ', 'ABC', ' mCDm\t', 'abc'])
pd.testing.assert_series_equal(hpat_func(S), test_impl(S))
def test_series_append1(self):
def test_impl(S, other):
return S.append(other).values
hpat_func = hpat.jit(test_impl)
S1 = pd.Series([-2., 3., 9.1])
S2 = pd.Series([-2., 5.0])
# Test single series
np.testing.assert_array_equal(hpat_func(S1, S2), test_impl(S1, S2))
def test_series_append2(self):
def test_impl(S1, S2, S3):
return S1.append([S2, S3]).values
hpat_func = hpat.jit(test_impl)
S1 = pd.Series([-2., 3., 9.1])
S2 = pd.Series([-2., 5.0])
S3 = pd.Series([1.0])
# Test series tuple
np.testing.assert_array_equal(hpat_func(S1, S2, S3),
test_impl(S1, S2, S3))
def test_series_isin_list1(self):
def test_impl(S, values):
return S.isin(values)
hpat_func = hpat.jit(test_impl)
n = 11
S = pd.Series(np.arange(n))
values = [1, 2, 5, 7, 8]
pd.testing.assert_series_equal(hpat_func(S, values), test_impl(S, values))
def test_series_isin_list2(self):
def test_impl(S, values):
return S.isin(values)
hpat_func = hpat.jit(test_impl)
n = 11.0
S = pd.Series(np.arange(n))
values = [1., 2., 5., 7., 8.]
pd.testing.assert_series_equal(hpat_func(S, values), test_impl(S, values))
def test_series_isin_list3(self):
def test_impl(S, values):
return S.isin(values)
hpat_func = hpat.jit(test_impl)
S = pd.Series(['a', 'b', 'q', 'w', 'c', 'd', 'e', 'r'])
values = ['a', 'q', 'c', 'd', 'e']
pd.testing.assert_series_equal(hpat_func(S, values), test_impl(S, values))
def test_series_isin_set1(self):
def test_impl(S, values):
return S.isin(values)
hpat_func = hpat.jit(test_impl)
n = 11
S = pd.Series(np.arange(n))
values = {1, 2, 5, 7, 8}
pd.testing.assert_series_equal(hpat_func(S, values), test_impl(S, values))
def test_series_isin_set2(self):
def test_impl(S, values):
return S.isin(values)
hpat_func = hpat.jit(test_impl)
n = 11.0
S = pd.Series(np.arange(n))
values = {1., 2., 5., 7., 8.}
pd.testing.assert_series_equal(hpat_func(S, values), test_impl(S, values))
@unittest.skip('TODO: requires hashable unicode strings in Numba')
def test_series_isin_set3(self):
def test_impl(S, values):
return S.isin(values)
hpat_func = hpat.jit(test_impl)
S = pd.Series(['a', 'b', 'c', 'd', 'e'] * 2)
values = {'b', 'c', 'e'}
pd.testing.assert_series_equal(hpat_func(S, values), test_impl(S, values))
def test_series_isna1(self):
def test_impl(S):
return S.isna()
hpat_func = hpat.jit(test_impl)
# column with NA
S = pd.Series([np.nan, 2., 3., np.inf])
pd.testing.assert_series_equal(hpat_func(S), test_impl(S))
def test_series_isnull1(self):
def test_impl(S):
return S.isnull()
hpat_func = hpat.jit(test_impl)
# column with NA
S = pd.Series([np.nan, 2., 3.])
pd.testing.assert_series_equal(hpat_func(S), test_impl(S))
def test_series_isnull_full(self):
def test_impl(series):
return series.isnull()
hpat_func = hpat.jit(test_impl)
for data in test_global_input_data_numeric + [test_global_input_data_unicode_kind4]:
series = pd.Series(data * 3)
ref_result = test_impl(series)
jit_result = hpat_func(series)
pd.testing.assert_series_equal(ref_result, jit_result)
def test_series_notna1(self):
def test_impl(S):
return S.notna()
hpat_func = hpat.jit(test_impl)
# column with NA
S = pd.Series([np.nan, 2., 3.])
pd.testing.assert_series_equal(hpat_func(S), test_impl(S))
def test_series_notna_noidx_float(self):
def test_impl(S):
return S.notna()
hpat_func = hpat.jit(test_impl)
for input_data in test_global_input_data_float64:
S = pd.Series(input_data)
result_ref = test_impl(S)
result_jit = hpat_func(S)
pd.testing.assert_series_equal(result_jit, result_ref)
@unittest.skip("Need fix test_global_input_data_integer64")
def test_series_notna_noidx_int(self):
def test_impl(S):
return S.notna()
hpat_func = hpat.jit(test_impl)
for input_data in test_global_input_data_integer64:
S = pd.Series(input_data)
result_ref = test_impl(S)
result_jit = hpat_func(S)
pd.testing.assert_series_equal(result_jit, result_ref)
@unittest.skip("Need fix test_global_input_data_integer64")
def test_series_notna_noidx_num(self):
def test_impl(S):
return S.notna()
hpat_func = hpat.jit(test_impl)
for input_data in test_global_input_data_numeric:
S = pd.Series(input_data)
result_ref = test_impl(S)
result_jit = hpat_func(S)
pd.testing.assert_series_equal(result_jit, result_ref)
def test_series_notna_noidx_str(self):
def test_impl(S):
return S.notna()
hpat_func = hpat.jit(test_impl)
input_data = test_global_input_data_unicode_kind4
S = pd.Series(input_data)
result_ref = test_impl(S)
result_jit = hpat_func(S)
pd.testing.assert_series_equal(result_jit, result_ref)
def test_series_str_notna(self):
def test_impl(S):
return S.notna()
hpat_func = hpat.jit(test_impl)
S = pd.Series(['aa', None, 'c', 'cccd'])
pd.testing.assert_series_equal(hpat_func(S), test_impl(S))
def test_series_str_isna1(self):
def test_impl(S):
return S.isna()
hpat_func = hpat.jit(test_impl)
S = pd.Series(['aa', None, 'c', 'cccd'])
pd.testing.assert_series_equal(hpat_func(S), test_impl(S))
@unittest.skip('AssertionError: Series are different')
def test_series_dt_isna1(self):
def test_impl(S):
return S.isna()
hpat_func = hpat.jit(test_impl)
S = pd.Series([pd.NaT, pd.Timestamp('1970-12-01'), pd.Timestamp('2012-07-25')])
pd.testing.assert_series_equal(hpat_func(S), test_impl(S))
def test_series_nlargest1(self):
def test_impl(S):
return S.nlargest(4)
hpat_func = hpat.jit(test_impl)
m = 100
np.random.seed(0)
S = pd.Series(np.random.randint(-30, 30, m))
np.testing.assert_array_equal(hpat_func(S).values, test_impl(S).values)
def test_series_nlargest_default1(self):
def test_impl(S):
return S.nlargest()
hpat_func = hpat.jit(test_impl)
m = 100
np.random.seed(0)
S = pd.Series(np.random.randint(-30, 30, m))
np.testing.assert_array_equal(hpat_func(S).values, test_impl(S).values)
def test_series_nlargest_nan1(self):
def test_impl(S):
return S.nlargest(4)
hpat_func = hpat.jit(test_impl)
S = pd.Series([1.0, np.nan, 3.0, 2.0, np.nan, 4.0])
np.testing.assert_array_equal(hpat_func(S).values, test_impl(S).values)
def test_series_nlargest_parallel1(self):
# create `kde.parquet` file
ParquetGenerator.gen_kde_pq()
def test_impl():
df = pq.read_table('kde.parquet').to_pandas()
S = df.points
return S.nlargest(4)
hpat_func = hpat.jit(test_impl)
np.testing.assert_array_equal(hpat_func().values, test_impl().values)
@unittest.skip('Unsupported functionality: failed to handle index')
def test_series_nlargest_index_str(self):
def test_impl(S):
return S.nlargest(4)
hpat_func = hpat.jit(test_impl)
S = pd.Series([73, 21, 10005, 5, 1], index=['a', 'b', 'c', 'd', 'e'])
np.testing.assert_array_equal(hpat_func(S).values, test_impl(S).values)
@unittest.skip('Unsupported functionality: failed to handle index')
def test_series_nlargest_index_int(self):
def test_impl(S):
return S.nlargest(4)
hpat_func = hpat.jit(test_impl)
S = pd.Series([73, 21, 10005, 5, 1], index=[2, 3, 4, 5, 6])
np.testing.assert_array_equal(hpat_func(S).values, test_impl(S).values)
def test_series_nsmallest1(self):
def test_impl(S):
return S.nsmallest(4)
hpat_func = hpat.jit(test_impl)
m = 100
np.random.seed(0)
S = pd.Series(np.random.randint(-30, 30, m))
np.testing.assert_array_equal(hpat_func(S).values, test_impl(S).values)
def test_series_nsmallest_default1(self):
def test_impl(S):
return S.nsmallest()
hpat_func = hpat.jit(test_impl)
m = 100
np.random.seed(0)
S = pd.Series(np.random.randint(-30, 30, m))
np.testing.assert_array_equal(hpat_func(S).values, test_impl(S).values)
def test_series_nsmallest_nan1(self):
def test_impl(S):
return S.nsmallest(4)
hpat_func = hpat.jit(test_impl)
S = pd.Series([1.0, np.nan, 3.0, 2.0, np.nan, 4.0])
np.testing.assert_array_equal(hpat_func(S).values, test_impl(S).values)
def test_series_nsmallest_parallel1(self):
# create `kde.parquet` file
ParquetGenerator.gen_kde_pq()
def test_impl():
df = pq.read_table('kde.parquet').to_pandas()
S = df.points
return S.nsmallest(4)
hpat_func = hpat.jit(test_impl)
np.testing.assert_array_equal(hpat_func().values, test_impl().values)
@unittest.skip('Unsupported functionality: failed to handle index')
def test_series_nsmallest_index_str(self):
def test_impl(S):
return S.nsmallest(3)
hpat_func = hpat.jit(test_impl)
S = pd.Series([41, 32, 33, 4, 5], index=['a', 'b', 'c', 'd', 'e'])
np.testing.assert_array_equal(hpat_func(S).values, test_impl(S).values)
@unittest.skip('Unsupported functionality: failed to handle index')
def test_series_nsmallest_index_int(self):
def test_impl(S):
return S.nsmallest(3)
hpat_func = hpat.jit(test_impl)
S = pd.Series([41, 32, 33, 4, 5], index=[1, 2, 3, 4, 5])
np.testing.assert_array_equal(hpat_func(S).values, test_impl(S).values)
def test_series_head1(self):
def test_impl(S):
return S.head(4)
hpat_func = hpat.jit(test_impl)
m = 100
np.random.seed(0)
S = pd.Series(np.random.randint(-30, 30, m))
pd.testing.assert_series_equal(hpat_func(S), test_impl(S))
def test_series_head_default1(self):
'''Verifies default head method for non-distributed pass of Series with no index'''
def test_impl(S):
return S.head()
hpat_func = hpat.jit(test_impl)
m = 100
np.random.seed(0)
S = pd.Series(np.random.randint(-30, 30, m))
pd.testing.assert_series_equal(hpat_func(S), test_impl(S))
def test_series_head_index1(self):
'''Verifies head method for Series with integer index created inside jitted function'''
def test_impl():
S = pd.Series([6, 9, 2, 3, 6, 4, 5], [8, 1, 6, 0, 9, 1, 3])
return S.head(3)
hpat_func = hpat.jit(test_impl)
pd.testing.assert_series_equal(hpat_func(), test_impl())
def test_series_head_index2(self):
'''Verifies head method for Series with string index created inside jitted function'''
def test_impl():
S = pd.Series([6, 9, 2, 3, 6, 4, 5], ['a', 'ab', 'abc', 'c', 'f', 'hh', ''])
return S.head(3)
hpat_func = hpat.jit(test_impl)
pd.testing.assert_series_equal(hpat_func(), test_impl())
def test_series_head_index3(self):
'''Verifies head method for non-distributed pass of Series with integer index'''
def test_impl(S):
return S.head(3)
hpat_func = hpat.jit(test_impl)
S = pd.Series([6, 9, 2, 3, 6, 4, 5], [8, 1, 6, 0, 9, 1, 3])
pd.testing.assert_series_equal(hpat_func(S), test_impl(S))
@unittest.skip("Passed if run single")
def test_series_head_index4(self):
'''Verifies head method for non-distributed pass of Series with string index'''
def test_impl(S):
return S.head(3)
hpat_func = hpat.jit(test_impl)
S = pd.Series([6, 9, 2, 4, 6, 4, 5], ['a', 'ab', 'abc', 'c', 'f', 'hh', ''])
pd.testing.assert_series_equal(hpat_func(S), test_impl(S))
def test_series_head_parallel1(self):
'''Verifies head method for distributed Series with string data and no index'''
def test_impl(S):
return S.head(7)
hpat_func = hpat.jit(distributed={'S'})(test_impl)
# need to test different lenghts, as head's size is fixed and implementation
# depends on relation of size of the data per processor to output data size
for n in range(1, 5):
S = pd.Series(['a', 'ab', 'abc', 'c', 'f', 'hh', ''] * n)
start, end = get_start_end(len(S))
pd.testing.assert_series_equal(hpat_func(S[start:end]), test_impl(S))
self.assertTrue(count_array_OneDs() > 0)
def test_series_head_index_parallel1(self):
'''Verifies head method for distributed Series with integer index'''
def test_impl(S):
return S.head(3)
hpat_func = hpat.jit(distributed={'S'})(test_impl)
S = pd.Series([6, 9, 2, 3, 6, 4, 5], [8, 1, 6, 0, 9, 1, 3])
start, end = get_start_end(len(S))
pd.testing.assert_series_equal(hpat_func(S[start:end]), test_impl(S))
self.assertTrue(count_array_OneDs() > 0)
@unittest.skip("Passed if run single")
def test_series_head_index_parallel2(self):
'''Verifies head method for distributed Series with string index'''
def test_impl(S):
return S.head(3)
hpat_func = hpat.jit(distributed={'S'})(test_impl)
S = pd.Series([6, 9, 2, 3, 6, 4, 5], ['a', 'ab', 'abc', 'c', 'f', 'hh', ''])
start, end = get_start_end(len(S))
pd.testing.assert_series_equal(hpat_func(S[start:end]), test_impl(S))
self.assertTrue(count_array_OneDs() > 0)
def test_series_head_noidx_float(self):
def test_impl(S, n):
return S.head(n)
hpat_func = hpat.jit(test_impl)
for input_data in test_global_input_data_float64:
S = pd.Series(input_data)
for n in [-1, 0, 2, 3]:
result_ref = test_impl(S, n)
result_jit = hpat_func(S, n)
pd.testing.assert_series_equal(result_jit, result_ref)
@unittest.skip("Need fix test_global_input_data_integer64")
def test_series_head_noidx_int(self):
def test_impl(S, n):
return S.head(n)
hpat_func = hpat.jit(test_impl)
for input_data in test_global_input_data_integer64:
S = pd.Series(input_data)
for n in [-1, 0, 2, 3]:
result_ref = test_impl(S, n)
result_jit = hpat_func(S, n)
pd.testing.assert_series_equal(result_jit, result_ref)
@unittest.skip("Need fix test_global_input_data_integer64")
def test_series_head_noidx_num(self):
def test_impl(S, n):
return S.head(n)
hpat_func = hpat.jit(test_impl)
for input_data in test_global_input_data_numeric:
S = pd.Series(input_data)
for n in [-1, 0, 2, 3]:
result_ref = test_impl(S, n)
result_jit = hpat_func(S, n)
pd.testing.assert_series_equal(result_jit, result_ref)
@unittest.skip("Old implementation not work with n negative and data str")
def test_series_head_noidx_str(self):
def test_impl(S, n):
return S.head(n)
hpat_func = hpat.jit(test_impl)
input_data = test_global_input_data_unicode_kind4
S = pd.Series(input_data)
for n in [-1, 0, 2, 3]:
result_ref = test_impl(S, n)
result_jit = hpat_func(S, n)
pd.testing.assert_series_equal(result_jit, result_ref)
@unittest.skip("Broke another three tests")
def test_series_head_idx(self):
def test_impl(S):
return S.head()
def test_impl_param(S, n):
return S.head(n)
hpat_func = hpat.jit(test_impl)
data_test = [[6, 6, 2, 1, 3, 3, 2, 1, 2],
[1.1, 0.3, 2.1, 1, 3, 0.3, 2.1, 1.1, 2.2],
[6, 6.1, 2.2, 1, 3, 0, 2.2, 1, 2],
['as', 'b', 'abb', 'sss', 'ytr65', '', 'qw', 'a', 'b'],
[6, 6, 2, 1, 3, np.inf, np.nan, np.nan, np.nan],
[3., 5.3, np.nan, np.nan, np.inf, np.inf, 4.4, 3.7, 8.9]
]
for input_data in data_test:
for index_data in data_test:
S = pd.Series(input_data, index_data)
result_ref = test_impl(S)
result = hpat_func(S)
pd.testing.assert_series_equal(result, result_ref)
hpat_func_param1 = hpat.jit(test_impl_param)
for param1 in [1, 3, 7]:
result_param1_ref = test_impl_param(S, param1)
result_param1 = hpat_func_param1(S, param1)
pd.testing.assert_series_equal(result_param1, result_param1_ref)
def test_series_median1(self):
'''Verifies median implementation for float and integer series of random data'''
def test_impl(S):
return S.median()
hpat_func = hpat.jit(test_impl)
m = 100
np.random.seed(0)
S = pd.Series(np.random.randint(-30, 30, m))
self.assertEqual(hpat_func(S), test_impl(S))
S = pd.Series(np.random.ranf(m))
self.assertEqual(hpat_func(S), test_impl(S))
# odd size
m = 101
S = pd.Series(np.random.randint(-30, 30, m))
self.assertEqual(hpat_func(S), test_impl(S))
S = pd.Series(np.random.ranf(m))
self.assertEqual(hpat_func(S), test_impl(S))
@unittest.skipIf(hpat.config.config_pipeline_hpat_default,
"BUG: old-style median implementation doesn't filter NaNs")
def test_series_median_skipna_default1(self):
'''Verifies median implementation with default skipna=True argument on a series with NA values'''
def test_impl(S):
return S.median()
hpat_func = hpat.jit(test_impl)
S = pd.Series([2., 3., 5., np.nan, 5., 6., 7.])
self.assertEqual(hpat_func(S), test_impl(S))
@unittest.skipIf(hpat.config.config_pipeline_hpat_default,
"Skipna argument is not supported in old-style")
def test_series_median_skipna_false1(self):
'''Verifies median implementation with skipna=False on a series with NA values'''
def test_impl(S):
return S.median(skipna=False)
hpat_func = hpat.jit(test_impl)
# np.inf is not NaN, so verify that a correct number is returned
S1 = pd.Series([2., 3., 5., np.inf, 5., 6., 7.])
self.assertEqual(hpat_func(S1), test_impl(S1))
# TODO: both return values are 'nan', but HPAT's is not np.nan, hence checking with
# assertIs() doesn't work - check if it's Numba relatated
S2 = pd.Series([2., 3., 5., np.nan, 5., 6., 7.])
self.assertEqual(np.isnan(hpat_func(S2)), np.isnan(test_impl(S2)))
def test_series_median_parallel1(self):
# create `kde.parquet` file
ParquetGenerator.gen_kde_pq()
def test_impl():
df = pq.read_table('kde.parquet').to_pandas()
S = df.points
return S.median()
hpat_func = hpat.jit(test_impl)
self.assertEqual(hpat_func(), test_impl())
self.assertEqual(count_array_REPs(), 0)
self.assertEqual(count_parfor_REPs(), 0)
self.assertTrue(count_array_OneDs() > 0)
def test_series_argsort_parallel(self):
# create `kde.parquet` file
ParquetGenerator.gen_kde_pq()
def test_impl():
df = pq.read_table('kde.parquet').to_pandas()
S = df.points
return S.argsort().values
hpat_func = hpat.jit(test_impl)
np.testing.assert_array_equal(hpat_func(), test_impl())
def test_series_idxmin1(self):
def test_impl(A):
return A.idxmin()
hpat_func = hpat.jit(test_impl)
n = 11
np.random.seed(0)
S = pd.Series(np.random.ranf(n))
np.testing.assert_array_equal(hpat_func(S), test_impl(S))
def test_series_idxmin_str(self):
def test_impl(S):
return S.idxmin()
hpat_func = hpat.jit(test_impl)
S = pd.Series([8, 6, 34, np.nan], ['a', 'ab', 'abc', 'c'])
self.assertEqual(hpat_func(S), test_impl(S))
@unittest.skip("Skipna is not implemented")
def test_series_idxmin_str_idx(self):
def test_impl(S):
return S.idxmin(skipna=False)
hpat_func = hpat.jit(test_impl)
S = pd.Series([8, 6, 34, np.nan], ['a', 'ab', 'abc', 'c'])
self.assertEqual(hpat_func(S), test_impl(S))
def test_series_idxmin_no(self):
def test_impl(S):
return S.idxmin()
hpat_func = hpat.jit(test_impl)
S = pd.Series([8, 6, 34, np.nan])
self.assertEqual(hpat_func(S), test_impl(S))
def test_series_idxmin_int(self):
def test_impl(S):
return S.idxmin()
hpat_func = hpat.jit(test_impl)
S = pd.Series([1, 2, 3], [4, 45, 14])
self.assertEqual(hpat_func(S), test_impl(S))
def test_series_idxmin_noidx(self):
def test_impl(S):
return S.idxmin()
hpat_func = hpat.jit(test_impl)
data_test = [[6, 6, 2, 1, 3, 3, 2, 1, 2],
[1.1, 0.3, 2.1, 1, 3, 0.3, 2.1, 1.1, 2.2],
[6, 6.1, 2.2, 1, 3, 0, 2.2, 1, 2],
[6, 6, 2, 1, 3, np.inf, np.nan, np.nan, np.nan],
[3., 5.3, np.nan, np.nan, np.inf, np.inf, 4.4, 3.7, 8.9]
]
for input_data in data_test:
S = pd.Series(input_data)
result_ref = test_impl(S)
result = hpat_func(S)
self.assertEqual(result, result_ref)
def test_series_idxmin_idx(self):
def test_impl(S):
return S.idxmin()
hpat_func = hpat.jit(test_impl)
data_test = [[6, 6, 2, 1, 3, 3, 2, 1, 2],
[1.1, 0.3, 2.1, 1, 3, 0.3, 2.1, 1.1, 2.2],
[6, 6.1, 2.2, 1, 3, 0, 2.2, 1, 2],
[6, 6, 2, 1, 3, -np.inf, np.nan, np.inf, np.nan],
[3., 5.3, np.nan, np.nan, np.inf, np.inf, 4.4, 3.7, 8.9]
]
for input_data in data_test:
for index_data in data_test:
S = pd.Series(input_data, index_data)
result_ref = test_impl(S)
result = hpat_func(S)
if np.isnan(result) or np.isnan(result_ref):
self.assertEqual(np.isnan(result), np.isnan(result_ref))
else:
self.assertEqual(result, result_ref)
def test_series_idxmax1(self):
def test_impl(A):
return A.idxmax()
hpat_func = hpat.jit(test_impl)
n = 11
np.random.seed(0)
S = pd.Series(np.random.ranf(n))
np.testing.assert_array_equal(hpat_func(S), test_impl(S))
@unittest.skip("Skipna is not implemented")
def test_series_idxmax_str_idx(self):
def test_impl(S):
return S.idxmax(skipna=False)
hpat_func = hpat.jit(test_impl)
S = pd.Series([8, 6, 34, np.nan], ['a', 'ab', 'abc', 'c'])
self.assertEqual(hpat_func(S), test_impl(S))
def test_series_idxmax_noidx(self):
def test_impl(S):
return S.idxmax()
hpat_func = hpat.jit(test_impl)
data_test = [[6, 6, 2, 1, 3, 3, 2, 1, 2],
[1.1, 0.3, 2.1, 1, 3, 0.3, 2.1, 1.1, 2.2],
[6, 6.1, 2.2, 1, 3, 0, 2.2, 1, 2],
[6, 6, 2, 1, 3, np.inf, np.nan, np.inf, np.nan],
[3., 5.3, np.nan, np.nan, np.inf, np.inf, 4.4, 3.7, 8.9]
]
for input_data in data_test:
S = pd.Series(input_data)
result_ref = test_impl(S)
result = hpat_func(S)
self.assertEqual(result, result_ref)
def test_series_idxmax_idx(self):
def test_impl(S):
return S.idxmax()
hpat_func = hpat.jit(test_impl)
data_test = [[6, 6, 2, 1, 3, 3, 2, 1, 2],
[1.1, 0.3, 2.1, 1, 3, 0.3, 2.1, 1.1, 2.2],
[6, 6.1, 2.2, 1, 3, 0, 2.2, 1, 2],
[6, 6, 2, 1, 3, np.nan, np.nan, np.nan, np.nan],
[3., 5.3, np.nan, np.nan, np.inf, np.inf, 4.4, 3.7, 8.9]
]
for input_data in data_test:
for index_data in data_test:
S = pd.Series(input_data, index_data)
result_ref = test_impl(S)
result = hpat_func(S)
if np.isnan(result) or np.isnan(result_ref):
self.assertEqual(np.isnan(result), np.isnan(result_ref))
else:
self.assertEqual(result, result_ref)
def test_series_sort_values1(self):
def test_impl(A):
return A.sort_values()
hpat_func = hpat.jit(test_impl)
n = 11
np.random.seed(0)
S = pd.Series(np.random.ranf(n))
pd.testing.assert_series_equal(hpat_func(S), test_impl(S))
def test_series_sort_values_index1(self):
def test_impl(A, B):
S = pd.Series(A, B)
return S.sort_values()
hpat_func = hpat.jit(test_impl)
n = 11
np.random.seed(0)
# TODO: support passing Series with Index
# S = pd.Series(np.random.ranf(n), np.random.randint(0, 100, n))
A = np.random.ranf(n)
B = np.random.ranf(n)
pd.testing.assert_series_equal(hpat_func(A, B), test_impl(A, B))
def test_series_sort_values_parallel1(self):
# create `kde.parquet` file
ParquetGenerator.gen_kde_pq()
def test_impl():
df = pq.read_table('kde.parquet').to_pandas()
S = df.points
return S.sort_values()
hpat_func = hpat.jit(test_impl)
np.testing.assert_array_equal(hpat_func(), test_impl())
def test_series_shift(self):
def pyfunc():
series = pd.Series([1.0, np.nan, -1.0, 0.0, 5e-324])
return series.shift()
cfunc = hpat.jit(pyfunc)
pd.testing.assert_series_equal(cfunc(), pyfunc())
def test_series_shift_unboxing(self):
def pyfunc(series):
return series.shift()
cfunc = hpat.jit(pyfunc)
for data in test_global_input_data_float64:
series = pd.Series(data)
pd.testing.assert_series_equal(cfunc(series), pyfunc(series))
def test_series_shift_full(self):
def pyfunc(series, periods, freq, axis, fill_value):
return series.shift(periods=periods, freq=freq, axis=axis, fill_value=fill_value)
cfunc = hpat.jit(pyfunc)
freq = None
axis = 0
for data in test_global_input_data_float64:
series = pd.Series(data)
for periods in [-2, 0, 3]:
for fill_value in [9.1, np.nan, -3.3, None]:
jit_result = cfunc(series, periods, freq, axis, fill_value)
ref_result = pyfunc(series, periods, freq, axis, fill_value)
pd.testing.assert_series_equal(jit_result, ref_result)
def test_series_shift_str(self):
def pyfunc(series):
return series.shift()
cfunc = hpat.jit(pyfunc)
series = pd.Series(test_global_input_data_unicode_kind4)
with self.assertRaises(TypingError) as raises:
cfunc(series)
msg = 'Method shift(). The object must be a number. Given self.data.dtype: {}'
self.assertIn(msg.format(types.unicode_type), str(raises.exception))
def test_series_shift_fill_str(self):
def pyfunc(series, fill_value):
return series.shift(fill_value=fill_value)
cfunc = hpat.jit(pyfunc)
series = pd.Series(test_global_input_data_float64[0])
with self.assertRaises(TypingError) as raises:
cfunc(series, fill_value='unicode')
msg = 'Method shift(). The object must be a number. Given fill_value: {}'
self.assertIn(msg.format(types.unicode_type), str(raises.exception))
def test_series_shift_unsupported_params(self):
def pyfunc(series, freq, axis):
return series.shift(freq=freq, axis=axis)
cfunc = hpat.jit(pyfunc)
series = pd.Series(test_global_input_data_float64[0])
with self.assertRaises(TypingError) as raises:
cfunc(series, freq='12H', axis=0)
msg = 'Method shift(). Unsupported parameters. Given freq: {}'
self.assertIn(msg.format(types.unicode_type), str(raises.exception))
with self.assertRaises(TypingError) as raises:
cfunc(series, freq=None, axis=1)
msg = 'Method shift(). Unsupported parameters. Given axis != 0'
self.assertIn(msg, str(raises.exception))
@unittest.skip('Unsupported functionality: failed to handle index')
def test_series_shift_index_str(self):
def test_impl(S):
return S.shift()
hpat_func = hpat.jit(test_impl)
S = pd.Series([np.nan, 2., 3., 5., np.nan, 6., 7.], index=['a', 'b', 'c', 'd', 'e', 'f', 'g'])
pd.testing.assert_series_equal(hpat_func(S), test_impl(S))
@unittest.skip('Unsupported functionality: failed to handle index')
def test_series_shift_index_int(self):
def test_impl(S):
return S.shift()
hpat_func = hpat.jit(test_impl)
S = pd.Series([np.nan, 2., 3., 5., np.nan, 6., 7.], index=[1, 2, 3, 4, 5, 6, 7])
pd.testing.assert_series_equal(hpat_func(S), test_impl(S))
def test_series_index1(self):
def test_impl():
A = pd.Series([1, 2, 3], index=['A', 'C', 'B'])
return A.index
hpat_func = hpat.jit(test_impl)
np.testing.assert_array_equal(hpat_func(), test_impl())
def test_series_index2(self):
def test_impl():
A = pd.Series([1, 2, 3], index=[0, 1, 2])
return A.index
hpat_func = hpat.jit(test_impl)
np.testing.assert_array_equal(hpat_func(), test_impl())
def test_series_index3(self):
def test_impl():
A = pd.Series([1, 2, 3])
return A.index
hpat_func = hpat.jit(test_impl)
np.testing.assert_array_equal(hpat_func(), test_impl())
def test_series_take_index_default(self):
def pyfunc():
series = pd.Series([1.0, 13.0, 9.0, -1.0, 7.0])
indices = [1, 3]
return series.take(indices)
cfunc = hpat.jit(pyfunc)
ref_result = pyfunc()
result = cfunc()
pd.testing.assert_series_equal(ref_result, result)
def test_series_take_index_default_unboxing(self):
def pyfunc(series, indices):
return series.take(indices)
cfunc = hpat.jit(pyfunc)
series = pd.Series([1.0, 13.0, 9.0, -1.0, 7.0])
indices = [1, 3]
ref_result = pyfunc(series, indices)
result = cfunc(series, indices)
pd.testing.assert_series_equal(ref_result, result)
def test_series_take_index_int(self):
def pyfunc():
series = pd.Series([1.0, 13.0, 9.0, -1.0, 7.0], index=[3, 0, 4, 2, 1])
indices = [1, 3]
return series.take(indices)
cfunc = hpat.jit(pyfunc)
ref_result = pyfunc()
result = cfunc()
pd.testing.assert_series_equal(ref_result, result)
def test_series_take_index_int_unboxing(self):
def pyfunc(series, indices):
return series.take(indices)
cfunc = hpat.jit(pyfunc)
series = pd.Series([1.0, 13.0, 9.0, -1.0, 7.0], index=[3, 0, 4, 2, 1])
indices = [1, 3]
ref_result = pyfunc(series, indices)
result = cfunc(series, indices)
pd.testing.assert_series_equal(ref_result, result)
def test_series_take_index_str(self):
def pyfunc():
series = pd.Series([1.0, 13.0, 9.0, -1.0, 7.0], index=['test', 'series', 'take', 'str', 'index'])
indices = [1, 3]
return series.take(indices)
cfunc = hpat.jit(pyfunc)
ref_result = pyfunc()
result = cfunc()
pd.testing.assert_series_equal(ref_result, result)
def test_series_take_index_str_unboxing(self):
def pyfunc(series, indices):
return series.take(indices)
cfunc = hpat.jit(pyfunc)
series = pd.Series([1.0, 13.0, 9.0, -1.0, 7.0], index=['test', 'series', 'take', 'str', 'index'])
indices = [1, 3]
ref_result = pyfunc(series, indices)
result = cfunc(series, indices)
pd.testing.assert_series_equal(ref_result, result)
def test_series_iterator_int(self):
def test_impl(A):
return [i for i in A]
A = pd.Series([3, 2, 1, 5, 4])
hpat_func = hpat.jit(test_impl)
np.testing.assert_array_equal(hpat_func(A), test_impl(A))
def test_series_iterator_float(self):
def test_impl(A):
return [i for i in A]
A = pd.Series([0.3, 0.2222, 0.1756, 0.005, 0.4])
hpat_func = hpat.jit(test_impl)
np.testing.assert_array_equal(hpat_func(A), test_impl(A))
def test_series_iterator_boolean(self):
def test_impl(A):
return [i for i in A]
A = pd.Series([True, False])
hpat_func = hpat.jit(test_impl)
np.testing.assert_array_equal(hpat_func(A), test_impl(A))
def test_series_iterator_string(self):
def test_impl(A):
return [i for i in A]
A = pd.Series(['a', 'ab', 'abc', '', 'dddd'])
hpat_func = hpat.jit(test_impl)
np.testing.assert_array_equal(hpat_func(A), test_impl(A))
def test_series_iterator_one_value(self):
def test_impl(A):
return [i for i in A]
A = pd.Series([5])
hpat_func = hpat.jit(test_impl)
np.testing.assert_array_equal(hpat_func(A), test_impl(A))
@unittest.skip("Fails when NUMA_PES>=2 due to unimplemented sync of such construction after distribution")
def test_series_iterator_no_param(self):
def test_impl():
A = pd.Series([3, 2, 1, 5, 4])
return [i for i in A]
hpat_func = hpat.jit(test_impl)
np.testing.assert_array_equal(hpat_func(), test_impl())
def test_series_iterator_empty(self):
def test_impl(A):
return [i for i in A]
A = pd.Series([np.int64(x) for x in range(0)])
hpat_func = hpat.jit(test_impl)
np.testing.assert_array_equal(hpat_func(A), test_impl(A))
def test_series_default_index(self):
def test_impl():
A = pd.Series([3, 2, 1, 5, 4])
return A.index
hpat_func = hpat.jit(test_impl)
np.testing.assert_array_equal(hpat_func(), test_impl())
@unittest.skip("Implement drop_duplicates for Series")
def test_series_drop_duplicates(self):
def test_impl():
A = pd.Series(['lama', 'cow', 'lama', 'beetle', 'lama', 'hippo'])
return A.drop_duplicates()
hpat_func = hpat.jit(test_impl)
pd.testing.assert_series_equal(hpat_func(), test_impl())
def test_series_quantile(self):
def test_impl():
A = pd.Series([1, 2.5, .5, 3, 5])
return A.quantile()
hpat_func = hpat.jit(test_impl)
np.testing.assert_equal(hpat_func(), test_impl())
@unittest.skipIf(hpat.config.config_pipeline_hpat_default, "Series.quantile() parameter as a list unsupported")
def test_series_quantile_q_vector(self):
def test_series_quantile_q_vector_impl(S, param1):
return S.quantile(param1)
S = pd.Series(np.random.ranf(100))
hpat_func = hpat.jit(test_series_quantile_q_vector_impl)
param1 = [0.0, 0.25, 0.5, 0.75, 1.0]
result_ref = test_series_quantile_q_vector_impl(S, param1)
result = hpat_func(S, param1)
np.testing.assert_equal(result, result_ref)
@unittest.skip("Implement unique without sorting like in pandas")
def test_unique(self):
def test_impl(S):
return S.unique()
hpat_func = hpat.jit(test_impl)
S = pd.Series([2, 1, 3, 3])
pd.testing.assert_series_equal(hpat_func(S), test_impl(S))
def test_unique_sorted(self):
def test_impl(S):
return S.unique()
hpat_func = hpat.jit(test_impl)
n = 11
S = pd.Series(np.arange(n))
S[2] = 0
np.testing.assert_array_equal(hpat_func(S), test_impl(S))
def test_unique_str(self):
def test_impl():
data = pd.Series(['aa', 'aa', 'b', 'b', 'cccc', 'dd', 'ddd', 'dd'])
return data.unique()
hpat_func = hpat.jit(test_impl)
# since the orider of the elements are diffrent - check count of elements only
ref_result = test_impl().size
result = hpat_func().size
np.testing.assert_array_equal(ref_result, result)
def test_series_groupby_count(self):
def test_impl():
A = pd.Series([13, 11, 21, 13, 13, 51, 42, 21])
grouped = A.groupby(A, sort=False)
return grouped.count()
hpat_func = hpat.jit(test_impl)
ref_result = test_impl()
result = hpat_func()
pd.testing.assert_series_equal(result, ref_result)
@unittest.skip("getiter for this type is not implemented yet")
def test_series_groupby_iterator_int(self):
def test_impl():
A = pd.Series([13, 11, 21, 13, 13, 51, 42, 21])
grouped = A.groupby(A)
return [i for i in grouped]
hpat_func = hpat.jit(test_impl)
ref_result = test_impl()
result = hpat_func()
np.testing.assert_array_equal(result, ref_result)
def test_series_std(self):
def pyfunc():
series = pd.Series([1.0, np.nan, -1.0, 0.0, 5e-324])
return series.std()
cfunc = hpat.jit(pyfunc)
ref_result = pyfunc()
result = cfunc()
np.testing.assert_equal(ref_result, result)
@unittest.skipIf(hpat.config.config_pipeline_hpat_default,
'Series.std() parameters "skipna" and "ddof" unsupported')
def test_series_std_unboxing(self):
def pyfunc(series, skipna, ddof):
return series.std(skipna=skipna, ddof=ddof)
cfunc = hpat.jit(pyfunc)
for data in test_global_input_data_numeric + [[]]:
series = pd.Series(data)
for ddof in [0, 1]:
for skipna in [True, False]:
ref_result = pyfunc(series, skipna=skipna, ddof=ddof)
result = cfunc(series, skipna=skipna, ddof=ddof)
np.testing.assert_equal(ref_result, result)
@unittest.skipIf(hpat.config.config_pipeline_hpat_default,
'Series.std() strings as input data unsupported')
def test_series_std_str(self):
def pyfunc(series):
return series.std()
cfunc = hpat.jit(pyfunc)
series = pd.Series(test_global_input_data_unicode_kind4)
with self.assertRaises(TypingError) as raises:
cfunc(series)
msg = 'Method std(). The object must be a number. Given self.data.dtype: {}'
self.assertIn(msg.format(types.unicode_type), str(raises.exception))
@unittest.skipIf(hpat.config.config_pipeline_hpat_default,
'Series.std() parameters "axis", "level", "numeric_only" unsupported')
def test_series_std_unsupported_params(self):
def pyfunc(series, axis, level, numeric_only):
return series.std(axis=axis, level=level, numeric_only=numeric_only)
cfunc = hpat.jit(pyfunc)
series = pd.Series(test_global_input_data_float64[0])
msg = 'Method std(). Unsupported parameters. Given {}: {}'
with self.assertRaises(TypingError) as raises:
cfunc(series, axis=1, level=None, numeric_only=None)
self.assertIn(msg.format('axis', 'int'), str(raises.exception))
with self.assertRaises(TypingError) as raises:
cfunc(series, axis=None, level=1, numeric_only=None)
self.assertIn(msg.format('level', 'int'), str(raises.exception))
with self.assertRaises(TypingError) as raises:
cfunc(series, axis=None, level=None, numeric_only=True)
self.assertIn(msg.format('numeric_only', 'bool'), str(raises.exception))
def test_series_nunique(self):
def test_series_nunique_impl(S):
return S.nunique()
def test_series_nunique_param1_impl(S, dropna):
return S.nunique(dropna)
hpat_func = hpat.jit(test_series_nunique_impl)
the_same_string = "the same string"
test_input_data = []
data_simple = [[6, 6, 2, 1, 3, 3, 2, 1, 2],
[1.1, 0.3, 2.1, 1, 3, 0.3, 2.1, 1.1, 2.2],
[6, 6.1, 2.2, 1, 3, 3, 2.2, 1, 2],
['aa', 'aa', 'b', 'b', 'cccc', 'dd', 'ddd', 'dd'],
['aa', 'copy aa', the_same_string, 'b', 'b', 'cccc', the_same_string, 'dd', 'ddd', 'dd', 'copy aa', 'copy aa'],
[]
]
data_extra = [[6, 6, np.nan, 2, np.nan, 1, 3, 3, np.inf, 2, 1, 2, np.inf],
[1.1, 0.3, np.nan, 1.0, np.inf, 0.3, 2.1, np.nan, 2.2, np.inf],
[1.1, 0.3, np.nan, 1, np.inf, 0, 1.1, np.nan, 2.2, np.inf, 2, 2],
# unsupported ['aa', np.nan, 'b', 'b', 'cccc', np.nan, 'ddd', 'dd'],
# unsupported [np.nan, 'copy aa', the_same_string, 'b', 'b', 'cccc', the_same_string, 'dd', 'ddd', 'dd', 'copy aa', 'copy aa'],
[np.nan, np.nan, np.nan],
[np.nan, np.nan, np.inf],
]
if hpat.config.config_pipeline_hpat_default:
"""
HPAT pipeline Series.nunique() does not support numpy.nan
"""
test_input_data = data_simple
else:
test_input_data = data_simple + data_extra
for input_data in test_input_data:
S = pd.Series(input_data)
result_ref = test_series_nunique_impl(S)
result = hpat_func(S)
self.assertEqual(result, result_ref)
if not hpat.config.config_pipeline_hpat_default:
"""
HPAT pipeline does not support parameter to Series.nunique(dropna=True)
"""
hpat_func_param1 = hpat.jit(test_series_nunique_param1_impl)
for param1 in [True, False]:
result_param1_ref = test_series_nunique_param1_impl(S, param1)
result_param1 = hpat_func_param1(S, param1)
self.assertEqual(result_param1, result_param1_ref)
def test_series_var(self):
def pyfunc():
series = pd.Series([1.0, np.nan, -1.0, 0.0, 5e-324])
return series.var()
cfunc = hpat.jit(pyfunc)
np.testing.assert_equal(pyfunc(), cfunc())
def test_series_var_unboxing(self):
def pyfunc(series):
return series.var()
cfunc = hpat.jit(pyfunc)
for data in test_global_input_data_numeric + [[]]:
series = | pd.Series(data) | pandas.Series |
from matplotlib import cm, rcParams
import matplotlib.pyplot as plt
import matplotlib.colors as colors
import matplotlib as matplotlib
import numpy as np
import math as math
import random as rand
import os, sys, csv
import pandas as pd
#matplotlib.pyplot.xkcd(scale=.5, length=100, randomness=2)
c = ['#aa3863', '#d97020', '#ef9f07', '#449775', '#3b7d86', '#5443a3']
# red, orange, yellow, green, blue, purple
dW1, dW2, dW3 = 0, 0 ,0
np.random.seed() #42
def lif_euler(dt, v1, v2, I1, I2):
return [v1 + dt*(-v1 + gamma*(v2-v1) + I1) , v2 + dt*(-v2 + gamma*(v1-v2) + I2) ]
def lif_euler_stoch(dt, v1, v2, I1, I2, s1, s2, s3):
global dW1, dW2, dW3
dW1 = s1*math.sqrt(dt)*np.random.randn()
dW2 = s2*math.sqrt(dt)*np.random.randn()
dW3 = s3*math.sqrt(dt)*np.random.randn()
return [v1 + dt*(-v1 + gamma*(v2-v1) + I1) + dW1 + dW3, v2 + dt*(-v2 + gamma*(v1-v2) + I2) + dW2 + dW3]
def correlations(sigma1, sigma2, sigma3, nb_iterations=1000) :
phis = []
for k in range(nb_iterations) :
#v1_0, v2_0 = 0.7611728117817528, 0.1654125684129333 # Used XPPAUT to find ideal initial conditions s.t. we begin in antiphase with I = 1.4
v1_0, v2_0 = 0.3764002759711251, 0.8546679415731656
x1, x2 = [v1_0], [v2_0]
t = [0]
nb_spikes = 0
I_baseline = 1.5
I1, I2 = [I_baseline], [I_baseline]
pulse_start, pulse_duration = 0, 0.2
begin_pulse = True
while t[-1] < maxtime :
t.append(t[-1]+dt)
if nb_spikes == 10 and begin_pulse :
pulse_start = t[-1]
begin_pulse = False
if nb_spikes >= 10 and t[-1] < pulse_start + pulse_duration :
next_values= lif_euler_stoch(dt, x1[-1], x2[-1], I1[-1], I2[-1], sigma1, sigma2, sigma3)
I1.append(I_baseline + (dW1+dW3)/dt)
I2.append(I_baseline + (dW2+dW3)/dt)
else :
I1.append(I_baseline)
I2.append(I_baseline)
next_values = lif_euler(dt, x1[-1], x2[-1], I1[-1], I2[-1])
if next_values[0] > 1 :
x1.append(0)
nb_spikes += 1
if next_values[1] + gamma*beta > 1 :
x2.append(0)
else :
x2.append(next_values[1]+gamma*beta)
elif next_values[1] > 1 :
x2.append(0)
if next_values[0] + gamma*beta > 1 :
x1.append(0)
else :
x1.append(next_values[0]+gamma*beta)
else :
x1.append(next_values[0])
x2.append(next_values[1])
# Spike times
spike_times, k = [], 0
for i in range(1, len(t)) :
if abs(x1[i]-x1[i-1]) > (Vth-Vr)/2 and t[i] >= Dtime :
spike_times.append(t[i])
k = i
break
for i in range(k, len(t)) :
if abs(x2[i]-x2[i-1]) > (Vth-Vr)/2 :
spike_times.append(t[i])
k = i
break
for i in range(k, len(t)) :
if abs(x1[i+1]-x1[i]) > (Vth-Vr)/2 :
spike_times.append(t[i])
break
phis.append((spike_times[2] - spike_times[1])/(spike_times[2] - spike_times[0]))
"""
# Plot trials
fig, ax = plt.subplots(2, 1, figsize=(12,5), sharey='row')
ax[1].plot(t, x1, label='$V_{1}$', color='#aa3863')
ax[1].plot(t, x2, label='$V_{2}$', color='#3b7d86')
ax[0].plot(t, I1, label='$I_1$')
ax[0].plot(t, I2, label='$I_2$')
ax[0].legend(loc='upper right')
ax[1].legend(loc='upper right')
ax[0].set_title('Noisy input current trial, $\sigma=0.0025, I_{base}=1.5, \gamma=0.4, \\beta=0.1$')
#plt.savefig('trial_example_.png', dpi=600)
plt.show()
"""
phis = np.array(phis) % 1
print("phis ", phis)
return phis
gamma, beta = 0.4, 0.1
Vth, Vr = 1, 0
dt = 0.001
Dtime = 75
maxtime = 80
# CORRELATED
sigma_corr = [[0., 0., 0.1], [0., 0., 0.15], [0., 0., 0.2], [0., 0., 0.25], [0., 0., 0.3], [0., 0., 0.4]]
phis1_corr = correlations(sigma_corr[0][0], sigma_corr[0][1], sigma_corr[0][2])
phis2_corr = correlations(sigma_corr[1][0], sigma_corr[1][1], sigma_corr[1][2])
phis3_corr = correlations(sigma_corr[2][0], sigma_corr[2][1], sigma_corr[2][2])
phis4_corr = correlations(sigma_corr[3][0], sigma_corr[3][1], sigma_corr[3][2])
phis5_corr = correlations(sigma_corr[4][0], sigma_corr[4][1], sigma_corr[4][2])
phis6_corr = correlations(sigma_corr[5][0], sigma_corr[5][1], sigma_corr[5][2])
# Generate data on phase differences
phis1_corr = pd.Series(phis1_corr)
phis2_corr = pd.Series(phis2_corr)
phis3_corr = pd.Series(phis3_corr)
phis4_corr = pd.Series(phis4_corr)
phis5_corr = pd.Series(phis5_corr)
phi65_corr = pd.Series(phis6_corr)
# UNCORRELATED
sigma_uncorr = [[0.1, 0.1, 0.], [0.15, 0.15, 0.], [0.2, 0.2, 0.], [0.25, 0.25, 0.], [0.3, 0.3, 0.], [0.4, 0.4, 0.]]
phis1_uncorr = correlations(sigma_uncorr[0][0], sigma_uncorr[0][1], sigma_uncorr[0][2])
phis2_uncorr = correlations(sigma_uncorr[1][0], sigma_uncorr[1][1], sigma_uncorr[1][2])
phis3_uncorr = correlations(sigma_uncorr[2][0], sigma_uncorr[2][1], sigma_uncorr[2][2])
phis4_uncorr = correlations(sigma_uncorr[3][0], sigma_uncorr[3][1], sigma_uncorr[3][2])
phis5_uncorr = correlations(sigma_uncorr[4][0], sigma_uncorr[4][1], sigma_uncorr[4][2])
phis6_uncorr = correlations(sigma_uncorr[5][0], sigma_uncorr[5][1], sigma_uncorr[5][2])
# Generate data on phase differences
phis1_uncorr = pd.Series(phis1_uncorr)
phis2_uncorr = pd.Series(phis2_uncorr)
phis3_uncorr = pd.Series(phis3_uncorr)
phis4_uncorr = | pd.Series(phis4_uncorr) | pandas.Series |
from typing import Iterable, Optional
import pandas as pd
import numpy as np
from scipy.special import expit
def get_expanded_df(df, event_type_col='J', duration_col='X', pid_col='pid'):
"""
This function gets a dataframe describing each sample the time of the observed events,
and returns an expanded dataframe as explained in TODO add reference
Right censoring is allowed and must be marked as event type 0.
:param df: original dataframe (pd.DataFrame)
:param event_type_col: event type column name (str)
:param duration_col: time column name (str)
:param pid_col: patient id column name (str)
:return: result_df: expanded dataframe
"""
# todo: consider dealing with highly not continues cases
unique_times = df[duration_col].sort_values().unique()
result_df = df.reindex(df.index.repeat(df[duration_col]))
result_df[duration_col] = result_df.groupby(pid_col).cumcount() + 1
# drop times that didn't happen
result_df.drop(index=result_df.loc[~result_df[duration_col].isin(unique_times)].index, inplace=True)
result_df.reset_index(drop=True, inplace=True)
last_idx = result_df.drop_duplicates(subset=[pid_col], keep='last').index
events = sorted(df[event_type_col].unique())
result_df.loc[last_idx, [f'j_{e}' for e in events]] = pd.get_dummies(
result_df.loc[last_idx, event_type_col]).values
result_df[[f'j_{e}' for e in events]] = result_df[[f'j_{e}' for e in events]].fillna(0)
result_df[f'j_0'] = 1 - result_df[[f'j_{e}' for e in events if e > 0]].sum(axis=1)
return result_df
def compare_models_coef_per_event(first_model: pd.Series,
second_model: pd.Series,
real_values: np.array,
event: int,
first_model_label:str = "first",
second_model_label:str = "second"
) -> pd.DataFrame:
"""
Args:
first_model:
second_model:
real_values:
event:
first_model_label:
second_model_label:
Returns:
"""
event_suffix = f"_{event}"
assert (first_model.index == second_model.index).all(), "All index should be the same"
models = pd.concat([first_model.to_frame(first_model_label),
second_model.to_frame(second_model_label)], axis=1)
models.index += event_suffix
real_values_s = pd.Series(real_values, index=models.index)
return pd.concat([models, real_values_s.to_frame("real")], axis=1)
#todo: move from here
def present_coefs(res_dict):
from IPython.display import display
for coef_type, events_dict in res_dict.items():
print(f"for coef: {coef_type.capitalize()}")
df = pd.concat([temp_df for temp_df in events_dict.values()])
display(df)
def get_real_hazard(df, real_coef_dict, times, events):
"""
Args:
df:
real_coef_dict:
times:
events:
Returns:
"""
# todo docstrings
# todo assertions
# todo move to utils?
a_t = {event: {t: real_coef_dict['alpha'][event](t) for t in times} for event in events}
b = pd.concat([df.dot(real_coef_dict['beta'][j]) for j in events], axis=1, keys=events)
for j in events:
df[[f'hazard_j{j}_t{t}' for t in times]] = pd.concat([expit(a_t[j][t] + b[j]) for t in times],
axis=1).values
return df
def assert_fit(event_df, times, event_type_col='J', duration_col='X'):
# todo: split to 2: one generic, one for new model
if not event_df['success'].all():
problematic_times = event_df.loc[~event_df['success'], duration_col].tolist()
event = event_df[event_type_col].max() # all the events in the dataframe are the same
raise RuntimeError(f"Number of observed events at some time points are too small. Consider collapsing neighbor time points."
f"\n See https://tomer1812.github.io/pydts/UsageExample-RegroupingData/ for more details.")
if event_df.shape[0] != len(times):
event = event_df[event_type_col].max() # all the events in the dataframe are the same
problematic_times = pd.Index(event_df[duration_col]).symmetric_difference(times).tolist()
raise RuntimeError(f"Number of observed events at some time points are too small. Consider collapsing neighbor time points."
f"\n See https://tomer1812.github.io/pydts/UsageExample-RegroupingData/ for more details.")
def create_df_for_cif_plots(df: pd.DataFrame, field: str,
covariates: Iterable,
vals: Optional[Iterable] = None,
quantiles: Optional[Iterable] = None,
zero_others: Optional[bool] = False
) -> pd.DataFrame:
"""
This method creates df for cif plot, where it zeros
Args:
df (pd.DataFrame): Dataframe which we yield the statiscal propetrics (means, quantiles, etc) and stacture
field (str): The field which will represent the change
covariates (Iterable): The covariates of the given model
vals (Optional[Iterable]): The values to use for the field
quantiles (Optional[Iterable]): The quantiles to use as values for the field
zero_others (bool): Whether to zero the other covarites or to zero them
Returns:
df (pd.DataFrame): A dataframe that contains records per value for cif ploting
"""
cov_not_fitted = [cov for cov in covariates if cov not in df.columns]
assert len(cov_not_fitted) == 0, \
f"Required covariates are missing from df: {cov_not_fitted}"
# todo add assertions
df_for_ploting = df.copy() # todo make sure .copy() is required
if vals is not None:
pass
elif quantiles is not None:
vals = df_for_ploting[field].quantile(quantiles).values
else:
raise NotImplemented("Only Quantiles or specific values is supported")
temp_series = []
template_s = df_for_ploting.iloc[0][covariates].copy()
if zero_others:
impute_val = 0
else:
impute_val = df_for_ploting[covariates].mean().values
for val in vals:
temp_s = template_s.copy()
temp_s[covariates] = impute_val
temp_s[field] = val
temp_series.append(temp_s)
return | pd.concat(temp_series, axis=1) | pandas.concat |
# coding: utf-8
# In[69]:
import numpy as np
import matplotlib.pyplot as plt
import pandas as pd
import seaborn as sns
import random
get_ipython().magic('matplotlib inline')
# In[70]:
PokemonDf=pd.read_csv('pokemon/Pokemon.csv')
# In[71]:
PokemonDf.head(200000)
# In[72]:
PokemonDf.describe()
PokemonDf.Name.value_counts()
# In[73]:
gx=PokemonDf['Generation'].value_counts().plot.bar(width=0.5)
for b in gx.patches:
gx.annotate(b.get_height()+1,(b.get_x(),b.get_height()+1))
# In[74]:
import plotly.offline as py
import plotly.graph_objs as go
import plotly.tools as tls
# In[75]:
Pokemon_all=pd.concat([PokemonDf['Attack'],PokemonDf['HP']])
Pokemon_all
# In[76]:
Pokemon_all= | pd.concat([PokemonDf['Attack'],PokemonDf['HP']]) | pandas.concat |
import jieba
import pandas as pd
import wordcloud
# 读取弹幕 txt 文件
with open("dan_mu.txt", encoding="utf-8") as f:
txt = f.read()
danmu_list = txt.split("\n")
# jieba 分词
danmu_cut = [jieba.lcut(item) for item in danmu_list]
# 获取停用词
with open("baidu_stopwords.txt",encoding="utf-8") as f:
stop = f.read()
stop_words = stop.split()
# 去掉停用词后的最终词
s_data_cut = pd.Series(danmu_cut)
all_words_after = s_data_cut.apply(lambda x:[i for i in x if i not in stop])
# 词频统计
all_words = []
for i in all_words_after:
all_words.extend(i)
word_count = | pd.Series(all_words) | pandas.Series |
from typing import Union, Optional, List, Dict
import faiss
import pickle
import torch.distributed # noqa: WPS301
import numpy as np
import pandas as pd
from time import time
from pathlib import Path
from catalyst.dl import IRunner, CallbackOrder, Callback
from catalyst.utils.torch import get_activation_fn
from sklearn.metrics import accuracy_score
from src.utils.knn import build_benchmark_index, build_index, knn
from src.datasets.cico import \
INPUT_FILENAME_KEY, INPUT_TARGET_KEY, INPUT_LABEL_KEY, \
OUTPUT_TARGET_KEY, OUTPUT_EMBEDDINGS_KEY
ALL_KEYS = [
INPUT_FILENAME_KEY, INPUT_TARGET_KEY, INPUT_LABEL_KEY,
OUTPUT_TARGET_KEY, OUTPUT_EMBEDDINGS_KEY,
]
XLS_KEYS = [
INPUT_FILENAME_KEY, INPUT_TARGET_KEY, INPUT_LABEL_KEY,
]
PKL_KEYS = [
INPUT_FILENAME_KEY, INPUT_TARGET_KEY, INPUT_LABEL_KEY,
OUTPUT_EMBEDDINGS_KEY
]
class BenchmarkingCallback(Callback):
def __init__(
self,
target_key: str,
label_key: str,
filename_key: str,
embedding_key: str,
logit_key: str = None,
class_names: List[str] = None,
activation: Optional[str] = None,
enable_benchmark: bool = False,
benchmark_train_loader: str = None,
benchmark_test_loader: str = None,
benchmark_xlsx: Union[str, Path] = None,
enable_doev1: bool = False,
doev1_train_loaders: Union[str, List[str]] = None,
doev1_test_loaders: Union[str, List[str]] = None,
enable_doev2: bool = False,
doev2_train_loaders: Union[str, List[str]] = None,
doev2_test_loaders: Union[str, List[str]] = None,
doev2_xlsx: str = None,
save_dir: str = None,
save_loaders: Union[str, List[str]] = None,
):
super().__init__(CallbackOrder.Metric)
self.filename_key = filename_key
self.embedding_key = embedding_key
self.labels_key = label_key
self.target_key = target_key
self.logit_key = logit_key
self.save_dir = save_dir
self.class_names = {i: name for i, name in enumerate(class_names)}
self.class_ids = {name: i for i, name in enumerate(class_names)}
self.activation_fn = get_activation_fn(activation)
self.enable_benchmark = enable_benchmark
if enable_benchmark:
self.benchmark_train_loader = benchmark_train_loader
self.benchmark_test_loader = benchmark_test_loader
self.benchmark_index: Dict = build_benchmark_index(benchmark_xlsx)
self.enable_doev1 = enable_doev1
if enable_doev1:
self.doev1_train_loaders = doev1_train_loaders
self.doev1_test_loaders = doev1_test_loaders
self.enable_doev2 = enable_doev2
if enable_doev2:
self.doev2_train_loaders = doev2_train_loaders
self.doev2_test_loaders = doev2_test_loaders
self.doev2_dfs: Dict[str, pd.DataFrame] = \
| pd.read_excel(doev2_xlsx, sheet_name=None) | pandas.read_excel |
from ioUtils import getFile, saveFile
from timeUtils import clock, elapsed
from numpy import isnan
from pandas import DataFrame, Series
def testManualEntries(fast=False, saveit=False):
if fast:
start, cmt = clock("Testing Manual Entries Pickle File")
else:
start, cmt = clock("Testing Manual Entries YAML File")
dbNames = {k: True for k in ['Discogs', 'AllMusic', 'MusicBrainz', 'LastFM', 'RateYourMusic', 'Deezer', 'AlbumOfTheYear', 'Genius', 'KWorbSpotify', 'KWorbiTunes']}
##############################################################################################################################
#
# Load Manual/Update Entries
#
##############################################################################################################################
manualEntries = getManualEntriesData(fast=fast, local=False)
print("Saved Size = {0}".format(len(manualEntries)))
##############################################################################################################################
#
# Save Global Artistmerge Map
#
##############################################################################################################################
globalArtistMap = {}
for key,value in manualEntries.items():
if value is None:
raise ValueError("Key is empty: {0}".format(key))
if value.get("ArtistName") is not None:
artistName = tuple(["ArtistName", value["ArtistName"]])
else:
raise ValueError("No ArtistName for Key: {0}".format(key))
for val in [tuple([db,dbID]) for db,dbID in value.items()]:
if val[0] == "ArtistName":
continue
db,dbID = val
assert dbNames[db] == True, "DB [{0}] for Key [{1}] is unknown!".format(db,key)
try:
int(dbID)
except:
raise ValueError("Error with {0}/{1}".format(key,val))
if globalArtistMap.get(val) is not None:
if globalArtistMap[val] != artistName:
raise ValueError("Multiple people are calling artist from the permanent file: {0}/{1}".format(artistName, val))
globalArtistMap[val] = artistName
##############################################################################################################################
#
# Load Values -> Keys Data
#
##############################################################################################################################
dbTuplesToKey = {}
for key,value in manualEntries.items():
for val in [tuple([db,dbID]) for db,dbID in value.items()]:
if val[0] == "ArtistName":
continue
if dbTuplesToKey.get(val) is None:
dbTuplesToKey[val] = key
else:
saveManualEntriesData(manualEntries, fast=False, local=False)
raise ValueError("Multiple Manual Matches For {0}/{1}".format(key,val))
print("\t\tNo Multiple Values In Manual Entries")
elapsed(start, cmt)
if fast is False and saveit is True:
saveManualEntriesData(manualEntries, fast=True)
def updateManualEntries(recreate=False, fast=False):
if fast:
start, cmt = clock("Updating Manual Entries Pickle File")
else:
start, cmt = clock("Updating Manual Entries YAML File")
from uuid import uuid4
##############################################################################################################################
#
# Recreate Manual Entries
#
##############################################################################################################################
if recreate is True:
manualEntries = getFile("mergers/manualEntries.yaml")
newManualEntries = {uuid4().hex: v for k,v in manualEntries.items()}
saveFile(idata=newManualEntries, ifile="mergers/manualEntries.yaml", debug=True)
##############################################################################################################################
#
# Load Manual/Update Entries
#
##############################################################################################################################
manualEntries = getManualEntriesData(fast=fast, local=False)
updates = getManualEntriesData(fast=fast, local=True)
print("Saved Size = {0}".format(len(manualEntries)))
print("Update Size = {0}".format(len(updates)))
##############################################################################################################################
#
# Save Global Artist Map
#
##############################################################################################################################
globalArtistMap = {}
err = False
for key,value in manualEntries.items():
if value.get("ArtistName") is not None:
artistName = tuple(["ArtistName", value["ArtistName"]])
for val in [tuple([db,dbID]) for db,dbID in value.items()]:
if val[0] == "ArtistName":
continue
if globalArtistMap.get(val) is not None:
if globalArtistMap[val] != artistName:
print("-"*100)
print("Multiple people are calling artist from the permanent file: {0}/{1}".format(artistName, val))
print("-"*100)
err = True
globalArtistMap[val] = artistName
if err is True:
raise ValueError("ERROR!!!")
##############################################################################################################################
#
# Load Values -> Keys Data
#
##############################################################################################################################
dbTuplesToKey = {}
err = False
for key,value in manualEntries.items():
for val in [tuple([db,dbID]) for db,dbID in value.items()]:
if val[0] == "ArtistName":
continue
if dbTuplesToKey.get(val) is None:
dbTuplesToKey[val] = key
else:
print("-"*100)
print("Multiple Manual Matches For {0}/{1}".format(key,val))
print("-"*100)
err = True
if err is True:
raise ValueError("ERROR!!!")
print("\t\tNo Multiple Values In Manual Entries")
##############################################################################################################################
#
# Save Global Artist Map
#
##############################################################################################################################
err = False
for key,value in updates.items():
if value.get("ArtistName") is not None:
artistName = tuple(["ArtistName", value["ArtistName"]])
for val in [tuple([db,dbID]) for db,dbID in value.items()]:
if val[0] == "ArtistName":
continue
if globalArtistMap.get(val) is not None:
if globalArtistMap[val] != artistName:
print("-"*100)
print("Multiple people are calling artist from the updates file: {0}/{1}".format(artistName, val))
print("-"*100)
err = True
globalArtistMap[val] = artistName
if err is True:
raise ValueError("ERROR!!!")
##############################################################################################################################
#
# Load Values -> Keys Data
#
##############################################################################################################################
preSize = len(dbTuplesToKey)
print("Pre Size = {0}".format(preSize))
err = False
for key,value in updates.items():
isKnown = []
for val in [tuple([db,dbID]) for db,dbID in value.items()]:
if val[0] == "ArtistName":
continue
if dbTuplesToKey.get(val) is not None:
isKnown.append(dbTuplesToKey[val])
isKnown = list(set(isKnown))
if len(isKnown) == 0:
#key = uuid4().hex
for val in [tuple([db,dbID]) for db,dbID in value.items()]:
if val[0] == "ArtistName":
continue
dbTuplesToKey[val] = key
elif len(isKnown) == 1:
key = isKnown[0]
for val in [tuple([db,dbID]) for db,dbID in value.items()]:
if val[0] == "ArtistName":
continue
if dbTuplesToKey.get(val) is None:
dbTuplesToKey[val] = key
else:
print("-"*100)
print("Not sure what to do with multiple key/values for {0}/{1}".format(key,value))
print("-"*100)
print("")
err = True
if err is True:
saveManualEntriesData(manualEntries, fast=False, local=False)
saveManualEntriesData(updates, fast=False, local=True)
raise ValueError("ERROR (Updates)!!!")
postSize = len(dbTuplesToKey)
print("Post Size = {0}".format(postSize))
if preSize == postSize:
print("Nothing changed after checking the updates. Returning...")
elapsed(start, cmt)
return
saveFile(idata=dbTuplesToKey, ifile='tmp.p', debug=True)
##############################################################################################################################
#
# Set Final Key,Values Data
#
##############################################################################################################################
newManualEntries = {}
err = False
for i,(val,key) in enumerate(dbTuplesToKey.items()):
if newManualEntries.get(key) is None:
newManualEntries[key] = {}
db,dbID = val[0],val[1]
if newManualEntries[key].get(db) is None:
newManualEntries[key][db] = dbID
else:
if dbID == newManualEntries[key][db]:
continue
print("New Key: {0}".format(key))
print("New Val: {0}".format(val))
print("Old Val: {0}".format(newManualEntries[key][db]))
print("")
print('-'*50)
print("Stored")
for val2,key2 in dbTuplesToKey.items():
if key2 == key:
print(val2)
print("-"*50)
try:
print("Perm: {0}".format(manualEntries[key]))
except:
pass
try:
print("Update: {0}".format(updates[key]))
except:
pass
print("-"*100)
print(i,'/',len(dbTuplesToKey))
print("Multiple db/dbID for Key: {0}, [{1}/{2}]".format(key,db,dbID))
print("-"*100)
print("")
err = True
if err is True:
saveManualEntriesData(manualEntries, fast=False, local=False)
saveManualEntriesData(updates, fast=False, local=True)
raise ValueError("ERROR!!!")
finalSize = len(newManualEntries)
print("Final Size = {0}".format(finalSize))
##############################################################################################################################
#
# Set Artist Name
#
##############################################################################################################################
for key in newManualEntries.keys():
vals = newManualEntries[key]
artistName = None
for val in [tuple([db,dbID]) for db,dbID in vals.items()]:
if artistName is None:
artistName = globalArtistMap[val]
elif globalArtistMap[val] != artistName:
saveManualEntriesData(manualEntries, fast=False, local=False)
saveManualEntriesData(updates, fast=False, local=True)
raise ValueError("Error with artist name {0}".format(artistName))
vals[artistName[0]] = artistName[1]
print("All is good. Saving {0} manual entries.".format(len(newManualEntries)))
saveManualEntriesData(newManualEntries, fast=fast, local=False)
#saveFile(idata=newManualEntries, ifile="mergers/manualEntries.yaml", debug=True)
elapsed(start, cmt)
def keepVal(k2,v2):
if k2 in ["NumAlbums", "NumMatches", "DBMatches", 'AvgAlbums', 'MaxAlbums']:
return False
elif k2 == "ArtistName":
return True
else:
if v2 is None:
return False
#print(k2,v2)
if isnan(float(v2)) == True:
return False
return True
def saveTempManualEntries(tmp):
idata = tmp.T.to_dict()
idata2 = {k: {k2: v2 for k2,v2 in v.items() if keepVal(k2,v2) is True} for k,v in idata.items()}
if len(idata) > 0:
saveManualEntriesData(idata2, fast=True, local=True)
print("Size = {0}".format(len(idata2)))
def getManualEntries(fast=False):
raise ValueError("Do we really need to call this function!")
start, cmt = clock("Updating Manual Entries YAML File")
manualEntries = getManualEntriesData(fast=fast)
manualEntriesByDB = {}
for key,keyData in manualEntries.items():
for db,dbID in keyData.items():
if db in ["ArtistName"]:
continue
if manualEntriesByDB.get(db) is None:
manualEntriesByDB[db] = {}
if manualEntriesByDB[db].get(dbID) is None:
manualEntriesByDB[db][dbID] = {db2: dbID2 for db2,dbID2 in keyData.items() if db2 not in [""]}
for db,dbVals in manualEntriesByDB.items():
print("{0: <15}{1}".format(db,len(dbVals)))
mdbmaps = {}
for db,dbVals in manualEntriesByDB.items():
mdbmaps[db] = musicDBMap(db, init=True, copy=False)
if mdbmaps.get(db) is None:
continue
for dbID,dbIDMatches in dbVals.items():
try:
artistName = dbIDMatches["ArtistName"]
except:
raise ValueError("No ArtistName for {0}/{1}".format(db,dbVals))
primaryKey = mdbmaps[db].getPrimaryKey(artistName=artistName, artistID=dbID)
if mdbmaps[db].isKnownByKey(primaryKey) is False:
mdbmaps[db].addArtistByKey(primaryKey, artistName=artistName, artistID=dbID)
#print("Added Artist ({0}): {1}".format(db,artistName))
update = True
for db2,dbID2 in dbIDMatches.items():
if db2 == "ArtistName":
continue
try:
int(dbID2)
except:
raise ValueError("Problem with {0}/{1}/{2}".format(primaryKey, db2, dbID2))
mdbmaps[db].addArtistDataByKey(primaryKey, db=db2, dbID=dbID2)
update = True
return mdbmaps
def getFilename(fast, test, local, counts="multi"):
basename="manualEntries"
if test:
basename = "manualEntries.test"
if counts == "single" or counts == "singles":
basename = "manualEntries.singles"
if local is False:
fname = "mergers/{0}".format(basename)
else:
fname = basename
if fast:
fname = "{0}.p".format(fname)
else:
fname = "{0}.yaml".format(fname)
return fname
def getManualEntriesData(fast=False, test=False, local=False, counts="multi"):
if fast:
start, cmt = clock("Getting Manual Entries Pickle File Data")
else:
start, cmt = clock("Getting Manual Entries YAML File Data")
fname = getFilename(fast=fast, test=test, local=local, counts=counts)
print("Loading data from {0}".format(fname))
manualEntries = getFile(fname)
elapsed(start, cmt)
return manualEntries
def createManualEntriesDataFrame(manualEntries):
start, cmt = clock("Getting Manual Entries DataFrame")
meDF = | DataFrame(manualEntries) | pandas.DataFrame |
# Copyright (c) 2019-2020, NVIDIA CORPORATION.
import datetime as dt
import re
import cupy as cp
import numpy as np
import pandas as pd
import pyarrow as pa
import pytest
from pandas.util.testing import (
assert_frame_equal,
assert_index_equal,
assert_series_equal,
)
import cudf
from cudf.core import DataFrame, Series
from cudf.core.index import DatetimeIndex
from cudf.tests.utils import NUMERIC_TYPES, assert_eq
def data1():
return pd.date_range("20010101", "20020215", freq="400h", name="times")
def data2():
return pd.date_range("20010101", "20020215", freq="400h", name="times")
def timeseries_us_data():
return pd.date_range(
"2019-07-16 00:00:00",
"2019-07-16 00:00:01",
freq="5555us",
name="times",
)
def timestamp_ms_data():
return pd.Series(
[
"2019-07-16 00:00:00.333",
"2019-07-16 00:00:00.666",
"2019-07-16 00:00:00.888",
]
)
def timestamp_us_data():
return pd.Series(
[
"2019-07-16 00:00:00.333333",
"2019-07-16 00:00:00.666666",
"2019-07-16 00:00:00.888888",
]
)
def timestamp_ns_data():
return pd.Series(
[
"2019-07-16 00:00:00.333333333",
"2019-07-16 00:00:00.666666666",
"2019-07-16 00:00:00.888888888",
]
)
def numerical_data():
return np.arange(1, 10)
fields = ["year", "month", "day", "hour", "minute", "second", "weekday"]
@pytest.mark.parametrize("data", [data1(), data2()])
def test_series(data):
pd_data = pd.Series(data.copy())
gdf_data = Series(pd_data)
assert_eq(pd_data, gdf_data)
@pytest.mark.parametrize(
"lhs_dtype",
["datetime64[s]", "datetime64[ms]", "datetime64[us]", "datetime64[ns]"],
)
@pytest.mark.parametrize(
"rhs_dtype",
["datetime64[s]", "datetime64[ms]", "datetime64[us]", "datetime64[ns]"],
)
def test_datetime_series_binops_pandas(lhs_dtype, rhs_dtype):
pd_data_1 = pd.Series(
pd.date_range("20010101", "20020215", freq="400h", name="times")
)
pd_data_2 = pd.Series(
pd.date_range("20010101", "20020215", freq="401h", name="times")
)
gdf_data_1 = Series(pd_data_1).astype(lhs_dtype)
gdf_data_2 = Series(pd_data_2).astype(rhs_dtype)
assert_eq(pd_data_1, gdf_data_1.astype("datetime64[ns]"))
assert_eq(pd_data_2, gdf_data_2.astype("datetime64[ns]"))
assert_eq(pd_data_1 < pd_data_2, gdf_data_1 < gdf_data_2)
assert_eq(pd_data_1 > pd_data_2, gdf_data_1 > gdf_data_2)
assert_eq(pd_data_1 == pd_data_2, gdf_data_1 == gdf_data_2)
assert_eq(pd_data_1 <= pd_data_2, gdf_data_1 <= gdf_data_2)
assert_eq(pd_data_1 >= pd_data_2, gdf_data_1 >= gdf_data_2)
@pytest.mark.parametrize(
"lhs_dtype",
["datetime64[s]", "datetime64[ms]", "datetime64[us]", "datetime64[ns]"],
)
@pytest.mark.parametrize(
"rhs_dtype",
["datetime64[s]", "datetime64[ms]", "datetime64[us]", "datetime64[ns]"],
)
def test_datetime_series_binops_numpy(lhs_dtype, rhs_dtype):
pd_data_1 = pd.Series(
pd.date_range("20010101", "20020215", freq="400h", name="times")
)
pd_data_2 = pd.Series(
pd.date_range("20010101", "20020215", freq="401h", name="times")
)
gdf_data_1 = Series(pd_data_1).astype(lhs_dtype)
gdf_data_2 = Series(pd_data_2).astype(rhs_dtype)
np_data_1 = np.array(pd_data_1).astype(lhs_dtype)
np_data_2 = np.array(pd_data_2).astype(rhs_dtype)
np.testing.assert_equal(np_data_1, gdf_data_1.to_array())
np.testing.assert_equal(np_data_2, gdf_data_2.to_array())
np.testing.assert_equal(
np.less(np_data_1, np_data_2), (gdf_data_1 < gdf_data_2).to_array()
)
np.testing.assert_equal(
np.greater(np_data_1, np_data_2), (gdf_data_1 > gdf_data_2).to_array()
)
np.testing.assert_equal(
np.equal(np_data_1, np_data_2), (gdf_data_1 == gdf_data_2).to_array()
)
np.testing.assert_equal(
np.less_equal(np_data_1, np_data_2),
(gdf_data_1 <= gdf_data_2).to_array(),
)
np.testing.assert_equal(
np.greater_equal(np_data_1, np_data_2),
(gdf_data_1 >= gdf_data_2).to_array(),
)
@pytest.mark.parametrize("data", [data1(), data2()])
def test_dt_ops(data):
pd_data = pd.Series(data.copy())
gdf_data = Series(data.copy())
assert_eq(pd_data == pd_data, gdf_data == gdf_data)
assert_eq(pd_data < pd_data, gdf_data < gdf_data)
assert_eq(pd_data > pd_data, gdf_data > gdf_data)
# libgdf doesn't respect timezones
@pytest.mark.parametrize("data", [data1()])
@pytest.mark.parametrize("field", fields)
def test_dt_series(data, field):
pd_data = pd.Series(data.copy())
gdf_data = Series(pd_data)
base = getattr(pd_data.dt, field)
test = getattr(gdf_data.dt, field).to_pandas().astype("int64")
assert_series_equal(base, test)
@pytest.mark.parametrize("data", [data1()])
@pytest.mark.parametrize("field", fields)
def test_dt_index(data, field):
pd_data = data.copy()
gdf_data = DatetimeIndex(pd_data)
assert_index_equal(
getattr(gdf_data, field).to_pandas(), getattr(pd_data, field)
)
def test_setitem_datetime():
df = DataFrame()
df["date"] = pd.date_range("20010101", "20010105").values
assert np.issubdtype(df.date.dtype, np.datetime64)
def test_sort_datetime():
df = pd.DataFrame()
df["date"] = np.array(
[
np.datetime64("2016-11-20"),
np.datetime64("2020-11-20"),
np.datetime64("2019-11-20"),
np.datetime64("1918-11-20"),
np.datetime64("2118-11-20"),
]
)
df["vals"] = np.random.sample(len(df["date"]))
gdf = cudf.from_pandas(df)
s_df = df.sort_values(by="date")
s_gdf = gdf.sort_values(by="date")
assert_eq(s_df, s_gdf)
def test_issue_165():
df_pandas = pd.DataFrame()
start_date = dt.datetime.strptime("2000-10-21", "%Y-%m-%d")
data = [(start_date + dt.timedelta(days=x)) for x in range(6)]
df_pandas["dates"] = data
df_pandas["num"] = [1, 2, 3, 4, 5, 6]
df_cudf = DataFrame.from_pandas(df_pandas)
base = df_pandas.query("dates==@start_date")
test = df_cudf.query("dates==@start_date")
assert_frame_equal(base, test.to_pandas())
assert len(test) > 0
mask = df_cudf.dates == start_date
base_mask = df_pandas.dates == start_date
assert_series_equal(mask.to_pandas(), base_mask, check_names=False)
assert mask.to_pandas().sum() > 0
start_date_ts = pd.Timestamp(start_date)
test = df_cudf.query("dates==@start_date_ts")
base = df_pandas.query("dates==@start_date_ts")
assert_frame_equal(base, test.to_pandas())
assert len(test) > 0
mask = df_cudf.dates == start_date_ts
base_mask = df_pandas.dates == start_date_ts
assert_series_equal(mask.to_pandas(), base_mask, check_names=False)
assert mask.to_pandas().sum() > 0
start_date_np = np.datetime64(start_date_ts, "ns")
test = df_cudf.query("dates==@start_date_np")
base = df_pandas.query("dates==@start_date_np")
assert_frame_equal(base, test.to_pandas())
assert len(test) > 0
mask = df_cudf.dates == start_date_np
base_mask = df_pandas.dates == start_date_np
assert_series_equal(mask.to_pandas(), base_mask, check_names=False)
assert mask.to_pandas().sum() > 0
@pytest.mark.parametrize("data", [data1(), data2()])
@pytest.mark.parametrize("dtype", NUMERIC_TYPES)
def test_typecast_from_datetime(data, dtype):
pd_data = pd.Series(data.copy())
np_data = np.array(pd_data)
gdf_data = Series(pd_data)
np_casted = np_data.astype(dtype)
gdf_casted = gdf_data.astype(dtype)
np.testing.assert_equal(np_casted, gdf_casted.to_array())
@pytest.mark.parametrize("data", [data1(), data2()])
@pytest.mark.parametrize(
"dtype",
["datetime64[s]", "datetime64[ms]", "datetime64[us]", "datetime64[ns]"],
)
def test_typecast_from_datetime_to_int64_to_datetime(data, dtype):
pd_data = pd.Series(data.copy())
np_data = np.array(pd_data)
gdf_data = Series(pd_data)
np_casted = np_data.astype(np.int64).astype(dtype)
gdf_casted = gdf_data.astype(np.int64).astype(dtype)
np.testing.assert_equal(np_casted, gdf_casted.to_array())
@pytest.mark.parametrize("data", [timeseries_us_data()])
@pytest.mark.parametrize(
"dtype",
["datetime64[s]", "datetime64[ms]", "datetime64[us]", "datetime64[ns]"],
)
def test_typecast_to_different_datetime_resolutions(data, dtype):
pd_data = pd.Series(data.copy())
np_data = np.array(pd_data).astype(dtype)
gdf_series = Series(pd_data).astype(dtype)
np.testing.assert_equal(np_data, gdf_series.to_array())
@pytest.mark.parametrize(
"data", [timestamp_ms_data(), timestamp_us_data(), timestamp_ns_data()]
)
@pytest.mark.parametrize(
"dtype",
["datetime64[s]", "datetime64[ms]", "datetime64[us]", "datetime64[ns]"],
)
def test_string_timstamp_typecast_to_different_datetime_resolutions(
data, dtype
):
pd_sr = data
gdf_sr = cudf.Series.from_pandas(pd_sr)
expect = pd_sr.values.astype(dtype)
got = gdf_sr.astype(dtype).values_host
np.testing.assert_equal(expect, got)
@pytest.mark.parametrize("data", [numerical_data()])
@pytest.mark.parametrize("from_dtype", NUMERIC_TYPES)
@pytest.mark.parametrize(
"to_dtype",
["datetime64[s]", "datetime64[ms]", "datetime64[us]", "datetime64[ns]"],
)
def test_typecast_to_datetime(data, from_dtype, to_dtype):
np_data = data.astype(from_dtype)
gdf_data = Series(np_data)
np_casted = np_data.astype(to_dtype)
gdf_casted = gdf_data.astype(to_dtype)
np.testing.assert_equal(np_casted, gdf_casted.to_array())
@pytest.mark.parametrize("data", [numerical_data()])
@pytest.mark.parametrize("from_dtype", NUMERIC_TYPES)
@pytest.mark.parametrize(
"to_dtype",
["datetime64[s]", "datetime64[ms]", "datetime64[us]", "datetime64[ns]"],
)
def test_typecast_to_from_datetime(data, from_dtype, to_dtype):
np_data = data.astype(from_dtype)
gdf_data = Series(np_data)
np_casted = np_data.astype(to_dtype).astype(from_dtype)
gdf_casted = gdf_data.astype(to_dtype).astype(from_dtype)
np.testing.assert_equal(np_casted, gdf_casted.to_array())
@pytest.mark.parametrize("data", [numerical_data()])
@pytest.mark.parametrize(
"from_dtype",
["datetime64[s]", "datetime64[ms]", "datetime64[us]", "datetime64[ns]"],
)
@pytest.mark.parametrize(
"to_dtype",
["datetime64[s]", "datetime64[ms]", "datetime64[us]", "datetime64[ns]"],
)
def test_typecast_from_datetime_to_datetime(data, from_dtype, to_dtype):
np_data = data.astype(from_dtype)
gdf_col = Series(np_data)._column
np_casted = np_data.astype(to_dtype)
gdf_casted = gdf_col.astype(to_dtype)
np.testing.assert_equal(np_casted, gdf_casted.to_array())
@pytest.mark.parametrize("data", [numerical_data()])
@pytest.mark.parametrize("nulls", ["some", "all"])
def test_to_from_pandas_nulls(data, nulls):
pd_data = pd.Series(data.copy().astype("datetime64[ns]"))
if nulls == "some":
# Fill half the values with NaT
pd_data[list(range(0, len(pd_data), 2))] = np.datetime64("nat", "ns")
elif nulls == "all":
# Fill all the values with NaT
pd_data[:] = np.datetime64("nat", "ns")
gdf_data = Series.from_pandas(pd_data)
expect = pd_data
got = gdf_data.to_pandas()
assert_eq(expect, got)
@pytest.mark.parametrize(
"dtype",
["datetime64[s]", "datetime64[ms]", "datetime64[us]", "datetime64[ns]"],
)
def test_datetime_to_arrow(dtype):
timestamp = (
cudf.datasets.timeseries(
start="2000-01-01", end="2000-01-02", freq="3600s", dtypes={}
)
.reset_index()["timestamp"]
.reset_index(drop=True)
)
gdf = DataFrame({"timestamp": timestamp.astype(dtype)})
assert_eq(gdf, DataFrame.from_arrow(gdf.to_arrow(preserve_index=False)))
@pytest.mark.parametrize(
"data",
[
[],
pd.Series(pd.date_range("2010-01-01", "2010-02-01")),
pd.Series([None, None], dtype="datetime64[ns]"),
],
)
@pytest.mark.parametrize(
"nulls", ["none", pytest.param("some", marks=pytest.mark.xfail)]
)
def test_datetime_unique(data, nulls):
psr = pd.Series(data)
print(data)
print(nulls)
if len(data) > 0:
if nulls == "some":
p = np.random.randint(0, len(data), 2)
psr[p] = None
gsr = cudf.from_pandas(psr)
expected = psr.unique()
got = gsr.unique()
assert_eq(pd.Series(expected), got.to_pandas())
@pytest.mark.parametrize(
"data",
[
[],
pd.Series(pd.date_range("2010-01-01", "2010-02-01")),
| pd.Series([None, None], dtype="datetime64[ns]") | pandas.Series |
import numpy as np
import pytest
import pandas as pd
from pandas.core.sorting import nargsort
import pandas.util.testing as tm
from .base import BaseExtensionTests
class BaseMethodsTests(BaseExtensionTests):
"""Various Series and DataFrame methods."""
@pytest.mark.parametrize('dropna', [True, False])
def test_value_counts(self, all_data, dropna):
all_data = all_data[:10]
if dropna:
other = np.array(all_data[~all_data.isna()])
else:
other = all_data
result = pd.Series(all_data).value_counts(dropna=dropna).sort_index()
expected = pd.Series(other).value_counts(
dropna=dropna).sort_index()
self.assert_series_equal(result, expected)
def test_count(self, data_missing):
df = pd.DataFrame({"A": data_missing})
result = df.count(axis='columns')
expected = pd.Series([0, 1])
self.assert_series_equal(result, expected)
def test_series_count(self, data_missing):
# GH#26835
ser = pd.Series(data_missing)
result = ser.count()
expected = 1
assert result == expected
def test_apply_simple_series(self, data):
result = pd.Series(data).apply(id)
assert isinstance(result, pd.Series)
def test_argsort(self, data_for_sorting):
result = pd.Series(data_for_sorting).argsort()
expected = pd.Series(np.array([2, 0, 1], dtype=np.int64))
self.assert_series_equal(result, expected)
def test_argsort_missing(self, data_missing_for_sorting):
result = pd.Series(data_missing_for_sorting).argsort()
expected = pd.Series(np.array([1, -1, 0], dtype=np.int64))
self.assert_series_equal(result, expected)
@pytest.mark.parametrize('na_position, expected', [
('last', np.array([2, 0, 1], dtype=np.dtype('intp'))),
('first', np.array([1, 2, 0], dtype=np.dtype('intp')))
])
def test_nargsort(self, data_missing_for_sorting, na_position, expected):
# GH 25439
result = nargsort(data_missing_for_sorting, na_position=na_position)
tm.assert_numpy_array_equal(result, expected)
@pytest.mark.parametrize('ascending', [True, False])
def test_sort_values(self, data_for_sorting, ascending):
ser = pd.Series(data_for_sorting)
result = ser.sort_values(ascending=ascending)
expected = ser.iloc[[2, 0, 1]]
if not ascending:
expected = expected[::-1]
self.assert_series_equal(result, expected)
@pytest.mark.parametrize('ascending', [True, False])
def test_sort_values_missing(self, data_missing_for_sorting, ascending):
ser = pd.Series(data_missing_for_sorting)
result = ser.sort_values(ascending=ascending)
if ascending:
expected = ser.iloc[[2, 0, 1]]
else:
expected = ser.iloc[[0, 2, 1]]
self.assert_series_equal(result, expected)
@pytest.mark.parametrize('ascending', [True, False])
def test_sort_values_frame(self, data_for_sorting, ascending):
df = pd.DataFrame({"A": [1, 2, 1],
"B": data_for_sorting})
result = df.sort_values(['A', 'B'])
expected = pd.DataFrame({"A": [1, 1, 2],
'B': data_for_sorting.take([2, 0, 1])},
index=[2, 0, 1])
self.assert_frame_equal(result, expected)
@pytest.mark.parametrize('box', [pd.Series, lambda x: x])
@pytest.mark.parametrize('method', [lambda x: x.unique(), pd.unique])
def test_unique(self, data, box, method):
duplicated = box(data._from_sequence([data[0], data[0]]))
result = method(duplicated)
assert len(result) == 1
assert isinstance(result, type(data))
assert result[0] == duplicated[0]
@pytest.mark.parametrize('na_sentinel', [-1, -2])
def test_factorize(self, data_for_grouping, na_sentinel):
labels, uniques = pd.factorize(data_for_grouping,
na_sentinel=na_sentinel)
expected_labels = np.array([0, 0, na_sentinel,
na_sentinel, 1, 1, 0, 2],
dtype=np.intp)
expected_uniques = data_for_grouping.take([0, 4, 7])
tm.assert_numpy_array_equal(labels, expected_labels)
self.assert_extension_array_equal(uniques, expected_uniques)
@pytest.mark.parametrize('na_sentinel', [-1, -2])
def test_factorize_equivalence(self, data_for_grouping, na_sentinel):
l1, u1 = pd.factorize(data_for_grouping, na_sentinel=na_sentinel)
l2, u2 = data_for_grouping.factorize(na_sentinel=na_sentinel)
tm.assert_numpy_array_equal(l1, l2)
self.assert_extension_array_equal(u1, u2)
def test_factorize_empty(self, data):
labels, uniques = pd.factorize(data[:0])
expected_labels = np.array([], dtype=np.intp)
expected_uniques = type(data)._from_sequence([], dtype=data[:0].dtype)
tm.assert_numpy_array_equal(labels, expected_labels)
self.assert_extension_array_equal(uniques, expected_uniques)
def test_fillna_copy_frame(self, data_missing):
arr = data_missing.take([1, 1])
df = pd.DataFrame({"A": arr})
filled_val = df.iloc[0, 0]
result = df.fillna(filled_val)
assert df.A.values is not result.A.values
def test_fillna_copy_series(self, data_missing):
arr = data_missing.take([1, 1])
ser = pd.Series(arr)
filled_val = ser[0]
result = ser.fillna(filled_val)
assert ser._values is not result._values
assert ser._values is arr
def test_fillna_length_mismatch(self, data_missing):
msg = "Length of 'value' does not match."
with pytest.raises(ValueError, match=msg):
data_missing.fillna(data_missing.take([1]))
def test_combine_le(self, data_repeated):
# GH 20825
# Test that combine works when doing a <= (le) comparison
orig_data1, orig_data2 = data_repeated(2)
s1 = pd.Series(orig_data1)
s2 = | pd.Series(orig_data2) | pandas.Series |
"""
Getting most discussed stocks from r/wallstreetbets hot
"""
import json
import os
import re
import time
from collections import ChainMap, Counter
from datetime import datetime
import pandas as pd
import requests
from dotenv import load_dotenv
load_dotenv()
CLIENT_ID = os.getenv("CLIENT_ID")
SECRET = os.getenv("SECRET")
data = {
"grant_type": "password",
"username": os.getenv("username"),
"password": os.getenv("password"),
}
headers = {"User-Agent": "wsb_API/0.0.1"}
auth = requests.auth.HTTPBasicAuth(CLIENT_ID, SECRET)
def get_data(auth, data, headers):
"""
Create access to Reddit API and load data from recent threads in r/wsb/hot
auth:
HTTP Authentication with Client_ID and Secret
data:
Access to Reddit via username and password
headers:
Extra information about the request
return:
GET response
"""
res = requests.post(
"https://www.reddit.com/api/v1/access_token",
auth=auth,
data=data,
headers=headers,
)
TOKEN = res.json()["access_token"]
headers["Authorization"] = f"bearer {TOKEN}"
response = requests.get(
"https://oauth.reddit.com/r/wallstreetbets/hot", headers=headers
)
return response
def extract_sym(series): # Should work also with 2 Character stock symbols
"""
Extracts all word with 3 or more capital letters
params :series:
pd.Series
return:
list of stock symbols
"""
symbols = []
for i in series:
symbols.append(re.findall("[A-Z]{3,}", i))
return symbols
def get_stocks_with_count(series):
"""
Count all mentioned stocks
params: Series
p.Series
return:
dict with stock symbol and total count
"""
j = 0
counter_list = []
dict_counter = []
for i in series:
counter_list.append(Counter(series.loc[j]).items())
for i in counter_list:
dict_counter.append(dict(counter_list[j]))
j += 1
return dict_counter
# this json is from stocks_with_symbol.py
with open("data/stocks_symbol.json") as json_file:
stocks_dict = json.load(json_file)
def data_wrangling(dict_stock):
"""
Prep for final df
params: dict_stock
dictionary with symbols and count
return:
final df
"""
df_stocks = dict(ChainMap(*dict_stock))
df_stocks = pd.Series(df_stocks).to_frame()
df_stocks["Stocks_Name"] = df_stocks.index.map(stocks_dict)
df_stocks.dropna(inplace=True)
df_stocks = df_stocks.rename(columns={0: "Count"})
df_stocks.sort_values(by=["Count"], ascending=False, inplace=True)
print(df_stocks)
return df_stocks
if __name__ == "__main__":
while True:
print(datetime.now())
wsb_hot = get_data(auth, data, headers)
df = | pd.DataFrame() | pandas.DataFrame |
from datetime import datetime
import operator
import numpy as np
import pytest
from pandas import DataFrame, Index, Series, bdate_range
import pandas._testing as tm
from pandas.core import ops
class TestSeriesLogicalOps:
@pytest.mark.parametrize("bool_op", [operator.and_, operator.or_, operator.xor])
def test_bool_operators_with_nas(self, bool_op):
# boolean &, |, ^ should work with object arrays and propagate NAs
ser = Series(bdate_range("1/1/2000", periods=10), dtype=object)
ser[::2] = np.nan
mask = ser.isna()
filled = ser.fillna(ser[0])
result = bool_op(ser < ser[9], ser > ser[3])
expected = bool_op(filled < filled[9], filled > filled[3])
expected[mask] = False
tm.assert_series_equal(result, expected)
def test_logical_operators_bool_dtype_with_empty(self):
# GH#9016: support bitwise op for integer types
index = list("bca")
s_tft = Series([True, False, True], index=index)
s_fff = Series([False, False, False], index=index)
s_empty = Series([], dtype=object)
res = s_tft & s_empty
expected = s_fff
tm.assert_series_equal(res, expected)
res = s_tft | s_empty
expected = s_tft
tm.assert_series_equal(res, expected)
def test_logical_operators_int_dtype_with_int_dtype(self):
# GH#9016: support bitwise op for integer types
# TODO: unused
# s_0101 = Series([0, 1, 0, 1])
s_0123 = Series(range(4), dtype="int64")
s_3333 = Series([3] * 4)
s_4444 = Series([4] * 4)
res = s_0123 & s_3333
expected = Series(range(4), dtype="int64")
tm.assert_series_equal(res, expected)
res = s_0123 | s_4444
expected = Series(range(4, 8), dtype="int64")
tm.assert_series_equal(res, expected)
s_1111 = Series([1] * 4, dtype="int8")
res = s_0123 & s_1111
expected = Series([0, 1, 0, 1], dtype="int64")
tm.assert_series_equal(res, expected)
res = s_0123.astype(np.int16) | s_1111.astype(np.int32)
expected = Series([1, 1, 3, 3], dtype="int32")
tm.assert_series_equal(res, expected)
def test_logical_operators_int_dtype_with_int_scalar(self):
# GH#9016: support bitwise op for integer types
s_0123 = Series(range(4), dtype="int64")
res = s_0123 & 0
expected = Series([0] * 4)
tm.assert_series_equal(res, expected)
res = s_0123 & 1
expected = Series([0, 1, 0, 1])
tm.assert_series_equal(res, expected)
def test_logical_operators_int_dtype_with_float(self):
# GH#9016: support bitwise op for integer types
s_0123 = Series(range(4), dtype="int64")
msg = "Cannot perform.+with a dtyped.+array and scalar of type"
with pytest.raises(TypeError, match=msg):
s_0123 & np.NaN
with pytest.raises(TypeError, match=msg):
s_0123 & 3.14
msg = "unsupported operand type.+for &:"
with pytest.raises(TypeError, match=msg):
s_0123 & [0.1, 4, 3.14, 2]
with pytest.raises(TypeError, match=msg):
s_0123 & np.array([0.1, 4, 3.14, 2])
with pytest.raises(TypeError, match=msg):
s_0123 & Series([0.1, 4, -3.14, 2])
def test_logical_operators_int_dtype_with_str(self):
s_1111 = Series([1] * 4, dtype="int8")
msg = "Cannot perform 'and_' with a dtyped.+array and scalar of type"
with pytest.raises(TypeError, match=msg):
s_1111 & "a"
with pytest.raises(TypeError, match="unsupported operand.+for &"):
s_1111 & ["a", "b", "c", "d"]
def test_logical_operators_int_dtype_with_bool(self):
# GH#9016: support bitwise op for integer types
s_0123 = Series(range(4), dtype="int64")
expected = Series([False] * 4)
result = s_0123 & False
tm.assert_series_equal(result, expected)
result = s_0123 & [False]
tm.assert_series_equal(result, expected)
result = s_0123 & (False,)
tm.assert_series_equal(result, expected)
result = s_0123 ^ False
expected = Series([False, True, True, True])
tm.assert_series_equal(result, expected)
def test_logical_operators_int_dtype_with_object(self):
# GH#9016: support bitwise op for integer types
s_0123 = Series(range(4), dtype="int64")
result = s_0123 & Series([False, np.NaN, False, False])
expected = Series([False] * 4)
tm.assert_series_equal(result, expected)
s_abNd = Series(["a", "b", np.NaN, "d"])
with pytest.raises(TypeError, match="unsupported.* 'int' and 'str'"):
s_0123 & s_abNd
def test_logical_operators_bool_dtype_with_int(self):
index = list("bca")
s_tft = Series([True, False, True], index=index)
s_fff = Series([False, False, False], index=index)
res = s_tft & 0
expected = s_fff
tm.assert_series_equal(res, expected)
res = s_tft & 1
expected = s_tft
tm.assert_series_equal(res, expected)
def test_logical_ops_bool_dtype_with_ndarray(self):
# make sure we operate on ndarray the same as Series
left = Series([True, True, True, False, True])
right = [True, False, None, True, np.nan]
expected = Series([True, False, False, False, False])
result = left & right
tm.assert_series_equal(result, expected)
result = left & np.array(right)
tm.assert_series_equal(result, expected)
result = left & Index(right)
tm.assert_series_equal(result, expected)
result = left & Series(right)
tm.assert_series_equal(result, expected)
expected = Series([True, True, True, True, True])
result = left | right
tm.assert_series_equal(result, expected)
result = left | np.array(right)
tm.assert_series_equal(result, expected)
result = left | Index(right)
| tm.assert_series_equal(result, expected) | pandas._testing.assert_series_equal |
"""
Extracts path data for a user or a set of users and analyses with pathpy.
"""
import csv
import json
import os
import numpy as np
import matplotlib.pyplot as plt
import igraph
import pathpy as pp
from scipy.stats import chi2
from collections import Counter
from pandas import DataFrame
import seaborn as sns
from scipy.stats import pearsonr
sns.set(style='white', font_scale=1.2)
PATH = os.path.expanduser("~/git/network_games_analysis/sql_data/")
SAVE_PATH = os.path.expanduser("~/git/network_games_analysis/scaffold/")
FILENAME = 'scaffold_data_mysql.csv'
users = ["darigan17", "Fandy", "heptone", "khana", "badhanddoek", "sittaford", "Krab", "tamas", "skillz25", "meezocool", "ThatOneGuy", "BirdEyeView", "Mursuka"]
users_short = ["darigan17", "Fandy", "heptone"]
def listrun(pattern, values):
"""Find runs in a pattern containing elements from given values"""
runs = list()
run = list()
for i in pattern:
if i in values:
run.append(i)
else:
if len(run) > 1:
runs.append(run)
run = list()
elif len(run) == 1:
run = list()
if len(run) > 1:
runs.append(run)
return runs
def get_users():
users = list()
##PATH COLLECTION
filename = PATH + FILENAME
with open(filename, 'r', encoding='utf-8') as csvfile:
csv_reader = csv.reader(csvfile, delimiter='\t')
print(f"Parsed file: {FILENAME}")
line_count = 0
user_count = 0
for row in csv_reader:
# Ignoring header row
if line_count == 0:
print(f'Columns: {", ".join(row)}')
line_count += 1
# Ignoring data from other users
else:
line_count += 1
user = row[2]
if user not in users:
users.append(user)
return users
def estimate_user_kopt(user, top_nodes):
USER = user
##PATH COLLECTION
paths = list()
path = list()
filename = PATH + FILENAME
with open(filename, 'r', encoding='utf-8') as csvfile:
csv_reader = csv.reader(csvfile, delimiter='\t')
print(f"Parsed file: {FILENAME}")
line_count = 0
user_count = 0
user_last_clicks = {}
for row in csv_reader:
# Ignoring header row
if line_count == 0:
print(f'Columns: {", ".join(row)}')
line_count += 1
# Ignoring data from other users
elif USER == "all":
line_count += 1
user = row[2]
article = row[3]
game = row[4]
if user_last_clicks.get('game', "") == game:
if user_last_clicks['article'] != article:
path.append(article)
else:
if len(path) != 0:
paths.append(path)
path = list()
path.append(article)
user_last_clicks = {"article": article, "game": game}
elif row[2] == USER:
line_count += 1
user = row[2]
article = row[3]
game = row[4]
if user_last_clicks.get('game', "") == game:
if user_last_clicks['article'] != article:
path.append(article)
else:
if len(path) != 0:
paths.append(path)
path = list()
path.append(article)
user_last_clicks = {"article": article, "game": game}
else:
continue
##PATH FILTERING
top_node_number=top_nodes
flat_list=Counter([item for path in paths for item in path])
#print(flat_list)
sorted_nodes=[ x[0] for x in sorted( flat_list.items() , key=lambda x: x[1], reverse=True)]
top_sorted_nodes=sorted_nodes[0:top_node_number]
#print(top_sorted_nodes, end="\n\n")
paths_reduced = list()
for path in paths:
runs = listrun(path, top_sorted_nodes)
for run in runs:
paths_reduced.append(run)
#print(paths_reduced)
## Add paths to pathpy
p = pp.Paths()
for path in paths_reduced:
p.add_path(path)
print(p)
mog = pp.MultiOrderModel(p, max_order=2)
#print('Optimal order = ', mog.estimate_order())
return (len(paths_reduced), mog.estimate_order())
user_list = get_users()
kopts = list()
pathnums = list()
for user in user_list:
pathnum,kopt = estimate_user_kopt(user, 25)
kopts.append(kopt)
pathnums.append(pathnum)
print('Optimal orders = ', kopts)
print(len(kopts))
# plt.plot(kopts)
# plt.plot(pathnums)
# plt.show()
eval_list = (pathnums,kopts)
df = | DataFrame(eval_list) | pandas.DataFrame |
import matplotlib
matplotlib.use('Agg')
import re
import argparse
from datetime import datetime, timedelta, time
import matplotlib.pyplot as plt
import matplotlib.lines as mlines
import matplotlib.patches as mpatches
import numpy as np
import pandas as pd
from pandas.plotting import register_matplotlib_converters
register_matplotlib_converters()
import itertools
from sklearn.metrics import confusion_matrix
ANNO_LABEL_DICT = 'annotation-label-dictionary.csv'
DOHERTY2018_DICT_COL = 'label:Doherty2018'
DOHERTY2018_COLOURS = {'sleep':'blue',
'sedentary': 'red',
'tasks-light': 'darkorange',
'walking': 'lightgreen',
'moderate': 'green'}
DOHERTY2018_LABELS = list(DOHERTY2018_COLOURS.keys())
WILLETTS2018_DICT_COL = 'label:Willetts2018'
WILLETTS2018_COLOURS = {'sleep':'blue',
'sit-stand': 'red',
'vehicle': 'darkorange',
'walking': 'lightgreen',
'mixed': 'green',
'bicycling': 'purple'}
WILLETTS2018_LABELS = list(WILLETTS2018_COLOURS.keys())
WALMSLEY2020_DICT_COL = 'label:Walmsley2020'
WALMSLEY2020_COLOURS = {'sleep':'blue',
'sedentary': 'red',
'light': 'darkorange',
'moderate-vigorous': 'green'}
WALMSLEY2020_LABELS = list(WALMSLEY2020_COLOURS.keys())
IMPUTED_COLOR = '#fafc6f' # yellow
UNCODEABLE_COLOR = '#d3d3d3' # lightgray
BACKGROUND_COLOR = '#d3d3d3' # lightgray
def annotationSimilarity(anno1, anno2):
''' Naive sentence similarity '''
DELIMITERS = ";|, | "
words1 = re.split(DELIMITERS, anno1)
words2 = re.split(DELIMITERS, anno2)
shared_words = set(set(words1) & set(words2))
similarity = len(shared_words) / len(words1) # why words1 and not words2? how about averaging?
return similarity
def nearestAnnotation(annoList, annoTarget, threshold=.8):
similarities = [annotationSimilarity(annoTarget, _) for _ in annoList]
if np.max(similarities) < threshold:
print(f"No similar annotation found in dictionary for: '{annoTarget}'")
return None
return annoList[np.argmax(similarities)]
def buildLabelDict(labelDictCSV, labelDictCol):
df = | pd.read_csv(labelDictCSV, usecols=['annotation', labelDictCol]) | pandas.read_csv |
from mpl_toolkits import mplot3d
import sys, os
import numpy as np
import matplotlib.pyplot as plt
import pandas as pd
from plotnine import *
import copy, math
dist = 10
def find_min_discm_each_hyperparam(df):
x = df.sort_values(by=['Discm_percent', 'Points-Removed']).groupby("Model-count", as_index=False).first()
assert len(x) == 240
return x
def process_rows(row, batches):
# global batches
model_count = 0
for perm in range(20):
for h1units in [16, 24, 32]:
for h2units in [8, 12]:
for batch in batches: # different batch sizes for this dataset
if perm == row['Dataperm'] and h1units == row['H1Units'] and h2units == row['H2Units'] and batch == row['Batch']:
return model_count
else:
model_count += 1
def process_dfs(name, batches, df):
# import ipdb; ipdb.set_trace()
if 'Model-count' in df.columns:
df['Model-count2'] = df.apply(process_rows, axis=1, args=((batches,)))
assert (df['Model-count'] == df['Model-count2']).all()
df.drop(columns=['Model-count2'], inplace=True)
else:
df['Model-count'] = df.apply(process_rows, axis=1, args=((batches,)))
assert len(df['Model-count'].unique()) == 240 and df['Model-count'].max() == 239 and df['Model-count'].min() == 0
df = df.sort_values("Discm_percent").groupby("Model-count", as_index=False).first() # must be sorted in order of model count for comparison across baselines
# df = df[['Model-count','Discm_percent','Test_acc']]
df = df[['Model-count','Discm_percent','Test_acc', 'Class0_Pos', 'Class1_Pos']]
df['diff'] = abs(df['Class0_Pos'] - df['Class1_Pos']) * 100
df['Test_acc'] = df['Test_acc'].apply(lambda x: x * 100)
df['Techniques'] = name
if len(name.split()) > 1:
words = name.split()
letters = [word[0] for word in words]
x = "".join(letters)
df['Baseline'] = x
else:
df['Baseline'] = name[:2]
return df
def boxplots_datasets(dataset, plot):
df1 = pd.read_csv(f"{dataset}/results_{dataset}_method1.csv")
batches = sorted(list(df1.Batch.unique())) # sorting is important
assert(len(batches) == 2)
df_our = find_min_discm_each_hyperparam(df1)
df_our = df_our[['Model-count','Discm_percent', 'Test_acc', 'Class0_Pos', 'Class1_Pos']]
df_our['diff'] = abs(df_our['Class0_Pos'] - df_our['Class1_Pos'])*100
df_our['Test_acc'] = df_our['Test_acc'].apply(lambda x: x*100)
df_our['Techniques'] = "Our Technique"
df_our['Baseline'] = "Our"
# Massaging
df_massaging = process_dfs("MAssaging", batches, pd.read_csv(f"{dataset}/massaging/results_massaged_{dataset}.csv"))
# Preferential Sampling
df_ps = process_dfs("Prefer. Sampling", batches, pd.read_csv(f"{dataset}/preferential_sampling/results_resampling_{dataset}.csv"))
# Learning Fair representations
df_lfr = process_dfs("Learning Fair Repr.", batches, pd.read_csv(f"{dataset}/learning_fair_representations/results_lfr_{dataset}.csv"))
# Disparate Impact Removed
df_DIR = process_dfs("Disp. Impact Rem", batches, pd.read_csv(f"{dataset}/disparate_impact_removed/results_disparate_removed_{dataset}.csv"))
# Adversarial Sampling
df_adver = pd.read_csv(f"{dataset}/adversarial_debiasing/results_adversarial_debiased_{dataset}.csv")
df_adver['Model-count'] = df_adver['Dataperm']*12
df_adver = df_adver.sort_values("Discm_percent").groupby("Model-count", as_index=False).first() # must be sorted in order of model count for comparison across baselines
df_adver = df_adver[['Model-count','Discm_percent','Test_acc','diff']]
df_adver['diff'] = df_adver['diff']*100
df_adver['Test_acc'] = df_adver['Test_acc'].apply(lambda x: x*100)
df_adver['Techniques'] = "Adversa. debias"
df_adver['Baseline'] = "AD"
# # Sensitive Attribute removed, therefore no discrimination
df_nosensitive = pd.read_csv(f"{dataset}/results_{dataset}_nosensitive.csv")
df_nosensitive = df_nosensitive[['Model-count','Test_acc', 'Class0_Pos', 'Class1_Pos']]
df_nosensitive['diff'] = abs(df_nosensitive['Class0_Pos'] - df_nosensitive['Class1_Pos'])*100
df_nosensitive['Discm_percent'] = 0.0
df_nosensitive['Test_acc'] = df_nosensitive['Test_acc'].apply(lambda x: x*100)
df_nosensitive['Techniques'] = "Sens. Removed"
df_nosensitive['Baseline'] = "SR"
# df_nosensitive = process_dfs("Sensitive Removed", batches, pd.read_csv(f"{dataset}/results_{dataset}_nosensitive.csv"))
# No technique used
df_noremoval = process_dfs("FULL", batches, pd.read_csv(f"{dataset}/results_{dataset}_noremoval.csv"))
df_main = pd.concat([df_noremoval, df_nosensitive, df_massaging, df_ps, df_lfr, df_DIR, df_adver, df_our])
try:
assert(len(df_main) == 7*240 + 20)
except:
import ipdb; ipdb.set_trace()
if dataset == "compas-score":
dataset = "Recidivism-score"
elif dataset == "compas-ground":
dataset = "Recidivism-ground"
# df_main['Dataset'] = dataset.capitalize()
if dataset == "adult":
sizeofPSI = 4522200
id_ = "D1"
elif dataset == "adult_race":
sizeofPSI = 4313100
id_ = "D2"
elif dataset == "german":
sizeofPSI = 100000
id_ = "D3"
elif dataset == "student":
sizeofPSI = 64900
id_ = "D4"
elif dataset == "Recidivism-ground":
sizeofPSI = 615000
id_ = "D5"
elif dataset == "Recidivism-score":
sizeofPSI = 615000
id_ = "D6"
elif dataset == "default":
sizeofPSI = 3000000
id_ = "D7"
elif dataset == "salary":
sizeofPSI = 5200
id_ = "D8"
else:
raise NotImplementedError
df_main['Dataset'] = id_
precision = 1
if plot == 0:
min_discm = True
test_accuracy_for_min_discm = True
max_accuracy = True
discm_for_max_accuracy = True
median_discm = False
mean_accuracy = False
median_accuracy = False
if min_discm:
x = ' & '.join([f"{id_}", f"{df_noremoval['Discm_percent'].min():.{precision}e}", '0.0' ,f"{df_DIR['Discm_percent'].min():.{precision}e}", f"{df_ps['Discm_percent'].min():.{precision}e}", f"{df_massaging['Discm_percent'].min():.{precision}e}", f"{df_lfr['Discm_percent'].min():.{precision}e}", f"{df_adver['Discm_percent'].min():.{precision}e}", f"{df_our['Discm_percent'].min():.{precision}e}"])
print_to_tex(x, 'min-discm.tex', dataset)
if max_accuracy:
y = ' & '.join([f"{id_}", f"{df_noremoval['Test_acc'].max():.{precision}e}", f"{df_nosensitive['Test_acc'].max():.{precision}e}", f"{df_DIR['Test_acc'].max():.{precision}e}", f"{df_ps['Test_acc'].max():.{precision}e}", f"{df_massaging['Test_acc'].max():.{precision}e}", f"{df_lfr['Test_acc'].max():.{precision}e}", f"{df_adver['Test_acc'].max():.{precision}e}", f"{df_our['Test_acc'].max():.{precision}e}"])
print_to_tex(y, 'max-test-accuracy.tex', dataset)
if test_accuracy_for_min_discm:
# for sensitive there is always 0 discrimination.
z = ' & '.join([f"{id_}", f"{df_noremoval.loc[df_noremoval['Discm_percent'] == df_noremoval['Discm_percent'].min()]['Test_acc'].max():.{precision}e}",
f"{df_nosensitive['Test_acc'].max():.{precision}e}",
f"{df_DIR.loc[df_DIR['Discm_percent'] == df_DIR['Discm_percent'].min()]['Test_acc'].max():.{precision}e}",
f"{df_ps.loc[df_ps['Discm_percent'] == df_ps['Discm_percent'].min()]['Test_acc'].max():.{precision}e}",
f"{df_massaging.loc[df_massaging['Discm_percent'] == df_massaging['Discm_percent'].min()]['Test_acc'].max():.{precision}e}",
f"{df_lfr.loc[df_lfr['Discm_percent'] == df_lfr['Discm_percent'].min()]['Test_acc'].max():.{precision}e}",
f"{df_adver.loc[df_adver['Discm_percent'] == df_adver['Discm_percent'].min()]['Test_acc'].max():.{precision}e}",
f"{df_our.loc[df_our['Discm_percent'] == df_our['Discm_percent'].min()]['Test_acc'].max():.{precision}e}"])
print_to_tex(z, 'test_accuracy_for_min_discm.tex', dataset)
if median_discm:
x = ' & '.join([f"{id_}", f"{df_noremoval['Discm_percent'].median():.{precision}e}", "\\textbf{%s}"%(0.0) ,f"{df_DIR['Discm_percent'].median():.{precision}e}", f"{df_ps['Discm_percent'].median():.{precision}e}", f"{df_massaging['Discm_percent'].median():.{precision}e}", f"{df_lfr['Discm_percent'].median():.{precision}e}", f"{df_adver['Discm_percent'].median():.{precision}e}", "\\textbf{%s}"%(f"{df_our['Discm_percent'].median():.{precision}e}")])
print_to_tex(x, 'median-discm.tex', dataset)
if mean_accuracy:
a = ' & '.join([f"{id_}", f"{df_noremoval['Test_acc'].mean():.{precision}e}", f"{df_nosensitive['Test_acc'].mean():.{precision}e}", f"{df_DIR['Test_acc'].mean():.{precision}e}", f"{df_ps['Test_acc'].mean():.{precision}e}", f"{df_massaging['Test_acc'].mean():.{precision}e}", f"{df_lfr['Test_acc'].mean():.{precision}e}", f"{df_adver['Test_acc'].mean():.{precision}e}", "\\textbf{%s}"%(f"{df_our['Test_acc'].mean():.{precision}e}")])
print_to_tex(a, 'mean-test-accuracy.tex', dataset)
if median_accuracy:
b = ' & '.join([f"{id_}", f"{df_noremoval['Test_acc'].median():.{precision}e}", f"{df_nosensitive['Test_acc'].median():.{precision}e}", f"{df_DIR['Test_acc'].median():.{precision}e}", f"{df_ps['Test_acc'].median():.{precision}e}", f"{df_massaging['Test_acc'].median():.{precision}e}", f"{df_lfr['Test_acc'].median():.{precision}e}", f"{df_adver['Test_acc'].median():.{precision}e}", "\\textbf{%s}"%(f"{df_our['Test_acc'].median():.{precision}e}")])
print_to_tex(b, 'median-test-accuracy.tex', dataset)
if discm_for_max_accuracy:
k = ' & '.join([f"{id_}", f"{df_noremoval.loc[df_noremoval['Test_acc'] == df_noremoval['Test_acc'].max()]['Discm_percent'].min():.{precision}e}",
"0.0",
f"{df_DIR.loc[df_DIR['Test_acc'] == df_DIR['Test_acc'].max()]['Discm_percent'].min():.{precision}e}",
f"{df_ps.loc[df_ps['Test_acc'] == df_ps['Test_acc'].max()]['Discm_percent'].min():.{precision}e}",
f"{df_massaging.loc[df_massaging['Test_acc'] == df_massaging['Test_acc'].max()]['Discm_percent'].min():.{precision}e}",
f"{df_lfr.loc[df_lfr['Test_acc'] == df_lfr['Test_acc'].max()]['Discm_percent'].min():.{precision}e}",
f"{df_adver.loc[df_adver['Test_acc'] == df_adver['Test_acc'].max()]['Discm_percent'].min():.{precision}e}",
f"{df_our.loc[df_our['Test_acc'] == df_our['Test_acc'].max()]['Discm_percent'].min():.{precision}e}"])
print_to_tex(k, 'discm_for_max_accuracy.tex', dataset)
return df_main
def boxplots_datasets_dist(dataset, plot):
df1 = pd.read_csv(f"{dataset}/results_{dataset}_method1_dist{dist}.csv")
batches = sorted(list(df1.Batch.unique())) # sorting is important
assert(len(batches) == 2)
df_our = find_min_discm_each_hyperparam(df1)
df_our = df_our[['Model-count', 'Discm_percent', 'Test_acc', 'Class0_Pos', 'Class1_Pos']]
df_our['diff'] = abs(df_our['Class0_Pos'] - df_our['Class1_Pos']) * 100 # Statistical parity diff
df_our['Test_acc'] = df_our['Test_acc'].apply(lambda x: x * 100)
df_our['Techniques'] = "Our Technique"
df_our['Baseline'] = "Our"
# Massaging
df_massaging = process_dfs("MAssaging", batches, pd.read_csv(f"{dataset}/massaging/results_massaged_{dataset}_dist{dist}.csv"))
# Preferential Sampling
df_ps = process_dfs("Prefer. Sampling", batches, pd.read_csv(f"{dataset}/preferential_sampling/results_resampling_{dataset}_dist{dist}.csv"))
# Learning Fair representations
df_lfr = process_dfs("Learning Fair Repr.", batches, pd.read_csv(f"{dataset}/learning_fair_representations/results_lfr_{dataset}_dist{dist}.csv"))
# Disparate Impact Removed
df_DIR = process_dfs("Disp. Impact Rem", batches, pd.read_csv(f"{dataset}/disparate_impact_removed/results_disparate_removed_{dataset}_dist{dist}.csv"))
# Adversarial Sampling
df_adver = pd.read_csv(f"{dataset}/adversarial_debiasing/results_adversarial_debiased_{dataset}_dist{dist}.csv")
df_adver['Model-count'] = df_adver['Dataperm'] * 12
df_adver = df_adver.sort_values("Discm_percent").groupby("Model-count", as_index=False).first() # must be sorted in order of model count for comparison across baselines
df_adver = df_adver[['Model-count','Discm_percent','Test_acc','diff']]
df_adver['diff'] = df_adver['diff'] * 100
df_adver['Test_acc'] = df_adver['Test_acc'].apply(lambda x: x*100)
df_adver['Techniques'] = "Adversa. debias"
df_adver['Baseline'] = "AD"
# # Sensitive Attribute removed, therefore no discrimination
# df_nosensitive = pd.read_csv(f"{dataset}/results_{dataset}_nosensitive.csv")
df_nosensitive = process_dfs("Sens. Removed", batches, pd.read_csv(f"{dataset}/results_{dataset}_nosensitive_dist{dist}.csv"))
# df_nosensitive = df_nosensitive[['Model-count','Test_acc', 'Class0_Pos', 'Class1_Pos']]
# df_nosensitive['diff'] = abs(df_nosensitive['Class0_Pos'] - df_nosensitive['Class1_Pos'])*100
# df_nosensitive['Discm_percent'] = 0.0
# df_nosensitive['Test_acc'] = df_nosensitive['Test_acc'].apply(lambda x: x*100)
# df_nosensitive['Techniques'] = "Sens. Removed"
# df_nosensitive['Baseline'] = "SR"
# No technique used
df_noremoval = process_dfs("FULL", batches, pd.read_csv(f"{dataset}/results_{dataset}_noremoval_dist{dist}.csv"))
df_main = pd.concat([df_noremoval, df_nosensitive, df_massaging, df_ps, df_lfr, df_DIR, df_adver, df_our], sort=True)
try:
assert(len(df_main) == 7*240 + 20)
except:
import ipdb; ipdb.set_trace()
if dataset == "compas-score":
dataset = "Recidivism-score"
elif dataset == "compas-ground":
dataset = "Recidivism-ground"
# df_main['Dataset'] = dataset.capitalize()
if dataset == "adult":
sizeofPSI = 4522200
id_ = "D1"
elif dataset == "adult_race":
sizeofPSI = 4313100
id_ = "D2"
elif dataset == "german":
sizeofPSI = 100000
id_ = "D3"
elif dataset == "student":
sizeofPSI = 64900
id_ = "D4"
elif dataset == "Recidivism-ground":
sizeofPSI = 615000
id_ = "D5"
elif dataset == "Recidivism-score":
sizeofPSI = 615000
id_ = "D6"
elif dataset == "default":
sizeofPSI = 3000000
id_ = "D7"
elif dataset == "salary":
sizeofPSI = 5200
id_ = "D8"
else:
raise NotImplementedError
df_main['Dataset'] = id_
precision = 1
if plot == 0:
min_discm = True
test_accuracy_for_min_discm = True
max_accuracy = True
discm_for_max_accuracy = True
median_discm = False
mean_accuracy = False
median_accuracy = False
if min_discm:
x = ' & '.join([f"{id_}", f"{df_noremoval['Discm_percent'].min():.{precision}e}", f"{df_nosensitive['Discm_percent'].min():.{precision}e}" ,f"{df_DIR['Discm_percent'].min():.{precision}e}", f"{df_ps['Discm_percent'].min():.{precision}e}", f"{df_massaging['Discm_percent'].min():.{precision}e}", f"{df_lfr['Discm_percent'].min():.{precision}e}", f"{df_adver['Discm_percent'].min():.{precision}e}", f"{df_our['Discm_percent'].min():.{precision}e}"])
print_to_tex(x, f'min-discm_dist{dist}.tex', dataset)
if max_accuracy:
y = ' & '.join([f"{id_}", f"{df_noremoval['Test_acc'].max():.{precision}e}", f"{df_nosensitive['Test_acc'].max():.{precision}e}", f"{df_DIR['Test_acc'].max():.{precision}e}", f"{df_ps['Test_acc'].max():.{precision}e}", f"{df_massaging['Test_acc'].max():.{precision}e}", f"{df_lfr['Test_acc'].max():.{precision}e}", f"{df_adver['Test_acc'].max():.{precision}e}", f"{df_our['Test_acc'].max():.{precision}e}"])
print_to_tex(y, f'max-test-accuracy_dist{dist}.tex', dataset)
if test_accuracy_for_min_discm:
z = ' & '.join([f"{id_}", f"{df_noremoval.loc[df_noremoval['Discm_percent'] == df_noremoval['Discm_percent'].min()]['Test_acc'].max():.{precision}e}",
f"{df_nosensitive.loc[df_nosensitive['Discm_percent'] == df_nosensitive['Discm_percent'].min()]['Test_acc'].max():.{precision}e}",
f"{df_DIR.loc[df_DIR['Discm_percent'] == df_DIR['Discm_percent'].min()]['Test_acc'].max():.{precision}e}",
f"{df_ps.loc[df_ps['Discm_percent'] == df_ps['Discm_percent'].min()]['Test_acc'].max():.{precision}e}",
f"{df_massaging.loc[df_massaging['Discm_percent'] == df_massaging['Discm_percent'].min()]['Test_acc'].max():.{precision}e}",
f"{df_lfr.loc[df_lfr['Discm_percent'] == df_lfr['Discm_percent'].min()]['Test_acc'].max():.{precision}e}",
f"{df_adver.loc[df_adver['Discm_percent'] == df_adver['Discm_percent'].min()]['Test_acc'].max():.{precision}e}",
f"{df_our.loc[df_our['Discm_percent'] == df_our['Discm_percent'].min()]['Test_acc'].max():.{precision}e}"])
print_to_tex(z, f'test_accuracy_for_min_discm_dist{dist}.tex', dataset)
if median_discm:
raise NotImplementedError
x = ' & '.join([f"{id_}", f"{df_noremoval['Discm_percent'].median():.{precision}e}", "\\textbf{%s}"%(0.0) ,f"{df_DIR['Discm_percent'].median():.{precision}e}", f"{df_ps['Discm_percent'].median():.{precision}e}", f"{df_massaging['Discm_percent'].median():.{precision}e}", f"{df_lfr['Discm_percent'].median():.{precision}e}", f"{df_adver['Discm_percent'].median():.{precision}e}", "\\textbf{%s}"%(f"{df_our['Discm_percent'].median():.{precision}e}")])
print_to_tex(x, 'median-discm.tex', dataset)
if mean_accuracy:
raise NotImplementedError
a = ' & '.join([f"{id_}", f"{df_noremoval['Test_acc'].mean():.{precision}e}", f"{df_nosensitive['Test_acc'].mean():.{precision}e}", f"{df_DIR['Test_acc'].mean():.{precision}e}", f"{df_ps['Test_acc'].mean():.{precision}e}", f"{df_massaging['Test_acc'].mean():.{precision}e}", f"{df_lfr['Test_acc'].mean():.{precision}e}", f"{df_adver['Test_acc'].mean():.{precision}e}", "\\textbf{%s}"%(f"{df_our['Test_acc'].mean():.{precision}e}")])
print_to_tex(a, 'mean-test-accuracy.tex', dataset)
if median_accuracy:
raise NotImplementedError
b = ' & '.join([f"{id_}", f"{df_noremoval['Test_acc'].median():.{precision}e}", f"{df_nosensitive['Test_acc'].median():.{precision}e}", f"{df_DIR['Test_acc'].median():.{precision}e}", f"{df_ps['Test_acc'].median():.{precision}e}", f"{df_massaging['Test_acc'].median():.{precision}e}", f"{df_lfr['Test_acc'].median():.{precision}e}", f"{df_adver['Test_acc'].median():.{precision}e}", "\\textbf{%s}"%(f"{df_our['Test_acc'].median():.{precision}e}")])
print_to_tex(b, 'median-test-accuracy.tex', dataset)
if discm_for_max_accuracy:
k = ' & '.join([f"{id_}", f"{df_noremoval.loc[df_noremoval['Test_acc'] == df_noremoval['Test_acc'].max()]['Discm_percent'].min():.{precision}e}",
f"{df_nosensitive.loc[df_nosensitive['Test_acc'] == df_nosensitive['Test_acc'].max()]['Discm_percent'].min():.{precision}e}",
f"{df_DIR.loc[df_DIR['Test_acc'] == df_DIR['Test_acc'].max()]['Discm_percent'].min():.{precision}e}",
f"{df_ps.loc[df_ps['Test_acc'] == df_ps['Test_acc'].max()]['Discm_percent'].min():.{precision}e}",
f"{df_massaging.loc[df_massaging['Test_acc'] == df_massaging['Test_acc'].max()]['Discm_percent'].min():.{precision}e}",
f"{df_lfr.loc[df_lfr['Test_acc'] == df_lfr['Test_acc'].max()]['Discm_percent'].min():.{precision}e}",
f"{df_adver.loc[df_adver['Test_acc'] == df_adver['Test_acc'].max()]['Discm_percent'].min():.{precision}e}",
f"{df_our.loc[df_our['Test_acc'] == df_our['Test_acc'].max()]['Discm_percent'].min():.{precision}e}"])
print_to_tex(k, f'discm_for_max_accuracy_dist{dist}.tex', dataset)
return df_main
def print_to_tex(string, file, dataset, mode=None):
if mode == None:
if dataset == "adult":
mode = "w"
else:
mode = "a"
# with open(f"../../neurips_fairness_paper/tables/{file}", mode) as f:
with open(f"tables/{file}", mode) as f:
if dataset == "salary":
string += " \\\ \midrule"
else:
string += " \\\\ "
print(string, file=f)
# print(dataset)
# print("Min discm: ", df_DIR['Discm_percent'].min())
# print("Min discm: ", df_ps['Discm_percent'].min())
# print("Min discm: ", df_massaging['Discm_percent'].min())
# print("Min discm: ", df_lfr['Discm_percent'].min())
# print("Min discm: ", df_adver['Discm_percent'].min())
# print("Min discm: ", df_our['Discm_percent'].min())
def main(plot):
df_main = None
benchmarks = ["adult", "adult_race", "german", "student", "compas-ground", "compas-score", "default", "salary"]
for dataset in benchmarks:
# df_onedataset = boxplots_datasets(dataset, plot)
df_onedataset = boxplots_datasets_dist(dataset, plot)
if not df_main is None:
df_main = pd.concat([df_main, df_onedataset])
else:
df_main = copy.deepcopy(df_onedataset)
print(f"Done {dataset}")
if plot == 0:
return
labels = ['FU', 'SR', 'DIR', 'PS', 'MA', 'LFR', 'AD', 'Our']
tech_cat = pd.Categorical(df_main['Baseline'], categories=labels)
df_main = df_main.assign(Technique_x = tech_cat)
dataset_order = ["D1", "D2", "D3", "D4", "D5", "D6", "D7", "D8"]
data_cat = pd.Categorical(df_main['Dataset'], categories=dataset_order)
df_main = df_main.assign(Dataset_x = data_cat)
# x = (ggplot(aes(x='Technique_x', y='Discm_percent', color='Techniques'), data=df_main) +\
# geom_boxplot() +\
# facet_wrap(['Dataset'], scales = 'free', nrow=2, labeller='label_both', shrink=False) + \
# ylab("Remaining Individual Discrimination") + \
# xlab("Discrimination reducing techniques") + \
# # ylim(0, 20) + \
# # ggtitle("Box plot showing remaining discrimination for each technique in each dataset") +\
# theme(axis_text_x = element_text(size=6), dpi=151) + \
# theme_seaborn()
# )
# This is responsible for the legend - remove color='Techniques'
x = (ggplot(aes(x='Technique_x', y='Discm_percent'), data=df_main) +\
geom_boxplot() +\
facet_wrap(['Dataset_x'], scales = 'free', nrow=2, labeller='label_value', shrink=True) + \
ylab("Remaining Individual Discrimination") + \
xlab("Discrimination reducing techniques") + \
# ylim(0, 20) + \
# ggtitle("Box plot showing remaining discrimination for each technique in each dataset") +\
theme(axis_text_x = element_text(size=6), dpi=151) + \
theme_seaborn()
)
x = x.draw()
x.set_figwidth(20)
x.set_figheight(12)
for ax in range(len(benchmarks)):
low_limit = -0.05
top_limit = df_main[df_main['Dataset'] == f'D{ax+1}']['Discm_percent'].max()
if df_main[df_main['Dataset'] == f'D{ax+1}']['Discm_percent'].max() > 20:
top_limit = 20
if top_limit > 13: # These hacks are for aligning the 0 at the bottom of the plots.
low_limit = -0.3
x.axes[ax].set_ylim(low_limit, top_limit)
# x.tight_layout() # This didn't work
x.savefig(f"boxplots/boxplot_discm_freeaxis_matplotlib_dist{dist}.eps", format='eps', bbox_inches='tight')
x.savefig(f"boxplots/boxplot_discm_freeaxis_matplotlib_dist{dist}.png", bbox_inches='tight')
# x.save(f"boxplot_discm_freeaxis_matplotlib.png", height=8, width=18)
# x.save(f"boxplot_discm_freeaxis_withoutfull.png", height=12, width=15)
# x.save(f"boxplot_discm_fixedaxis.png", height=5, width=12)
y = (ggplot(aes(x='Technique_x', y='Test_acc'), data=df_main) +\
geom_boxplot() +\
facet_wrap(['Dataset_x'], scales = 'free', nrow=2, labeller='label_value', shrink=True) + \
ylab("Test Accuracy") + \
xlab("Discrimination reducing techniques") + \
# ylim(0, 100) + \
# ggtitle("Box plot showing remaining discrimination for each technique in each dataset") +\
theme(axis_text_x = element_text(size=6), dpi=151) + \
theme_seaborn()
)
# y.save(f"boxplot_accuracy_freeaxis.png", height=8, width=18)
y = y.draw()
y.set_figwidth(20)
y.set_figheight(12)
for ax in range(len(benchmarks)):
bot_limit = df_main[df_main['Dataset'] == f'D{ax+1}']['Test_acc'].min()
top_limit = df_main[df_main['Dataset'] == f'D{ax+1}']['Test_acc'].max()
y.axes[ax].set_ylim(bot_limit - 1, top_limit + 2)
# y.tight_layout()
y.savefig(f"boxplots/boxplot_accuracy_freeaxis_matplotlib_dist{dist}.eps", format='eps', bbox_inches='tight')
y.savefig(f"boxplots/boxplot_accuracy_freeaxis_matplotlib_dist{dist}.png", bbox_inches='tight')
def real_accuracy_tables(debiased):
dataset = "compas-score"
if debiased:
deb = "debiased"
else:
deb = "full"
df1 = pd.read_csv(f"{dataset}/results_{dataset}_method1.csv")
batches = sorted(list(df1.Batch.unique()))
assert(len(batches) == 2)
df_our = find_min_discm_each_hyperparam(df1)
df_our = df_our[['Model-count','Discm_percent']]
df_our_2 = pd.read_csv(f"{dataset}/results_our_real_accuracy_{deb}.csv")
df_our_final = pd.merge(df_our, df_our_2, on=['Model-count'])
df_our_final['Test_acc'] = df_our_final['Test_acc'].apply(lambda x: x*100)
df_our_final['Techniques'] = "Our Technique"
df_our_final['Baseline'] = "Our"
# import ipdb; ipdb.set_trace()
# Massaging
df_massaging = process_dfs("MAssaging", batches, pd.read_csv(f"{dataset}/massaging/results_massaged_{dataset}.csv"))
df_massaging.drop(columns=['Test_acc'], inplace=True)
df_massaging_2 = pd.read_csv(f"{dataset}/massaging/results_massaged_{dataset}_real_accuracy_{deb}.csv")
df_massaging_final = pd.merge(df_massaging, df_massaging_2, on=['Model-count'])
df_massaging_final['Test_acc'] = df_massaging_final['Test_acc'].apply(lambda x: x*100)
# Preferential Sampling
df_ps = process_dfs("Prefer. Sampling", batches, pd.read_csv(f"{dataset}/preferential_sampling/results_resampling_{dataset}.csv"))
df_ps.drop(columns=['Test_acc'], inplace=True)
df_ps_2 = pd.read_csv(f"{dataset}/preferential_sampling/results_resampling_{dataset}_real_accuracy_{deb}.csv")
df_ps_final = pd.merge(df_ps, df_ps_2, on=['Model-count'])
df_ps_final['Test_acc'] = df_ps_final['Test_acc'].apply(lambda x: x*100)
# Learning Fair representations
df_lfr = process_dfs("Learning Fair Repr.", batches, pd.read_csv(f"{dataset}/learning_fair_representations/results_lfr_{dataset}.csv"))
df_lfr.drop(columns=['Test_acc'], inplace=True)
df_lfr_2 = pd.read_csv(f"{dataset}/learning_fair_representations/results_lfr_{dataset}_real_accuracy_{deb}.csv")
df_lfr_final = pd.merge(df_lfr, df_lfr_2, on=['Model-count'])
df_lfr_final['Test_acc'] = df_lfr_final['Test_acc'].apply(lambda x: x*100)
# Disparate Impact Removed
df_DIR = process_dfs("Disp. Impact Rem", batches, pd.read_csv(f"{dataset}/disparate_impact_removed/results_disparate_removed_{dataset}.csv"))
df_DIR.drop(columns=['Test_acc'], inplace=True)
df_DIR_2 = pd.read_csv(f"{dataset}/disparate_impact_removed/results_disparate_removed_{dataset}_real_accuracy_{deb}.csv")
df_DIR_final = pd.merge(df_DIR, df_DIR_2, on=['Model-count'])
df_DIR_final['Test_acc'] = df_DIR_final['Test_acc'].apply(lambda x: x*100)
# Adversarial Sampling
df_adver = pd.read_csv(f"{dataset}/adversarial_debiasing/results_adversarial_debiased_{dataset}.csv")
df_adver['Model-count'] = df_adver['Dataperm']*12
df_adver = df_adver.sort_values("Discm_percent").groupby("Model-count", as_index=False).first() # must be sorted in order of model count for comparison across baselines
df_adver = df_adver[['Model-count','Discm_percent']]
df_adver_2 = pd.read_csv(f"{dataset}/adversarial_debiasing/results_adversarial_debiased_{dataset}_real_accuracy_{deb}.csv")
df_adver_2['Model-count'] = df_adver_2['Dataperm']*12
df_adver_final = pd.merge(df_adver, df_adver_2, on=['Model-count'])
df_adver_final['Test_acc'] = df_adver_final['Test_acc'].apply(lambda x: x*100)
df_adver_final['Techniques'] = "Adversa. debias"
df_adver_final['Baseline'] = "AD"
# # Sensitive Attribute removed, therefore no discrimination
df_nosensitive = pd.read_csv(f"{dataset}/results_nosensitive_real_accuracy_{deb}.csv")
df_nosensitive = df_nosensitive[['Model-count','Test_acc']]
df_nosensitive['Test_acc'] = df_nosensitive['Test_acc'].apply(lambda x: x*100)
df_nosensitive['Techniques'] = "Sens. Removed"
df_nosensitive['Baseline'] = "SR"
# df_nosensitive = process_dfs("Sensitive Removed", batches, pd.read_csv(f"{dataset}/results_{dataset}_nosensitive.csv"))
# No technique used
df_noremoval = process_dfs("FULL", batches, pd.read_csv(f"{dataset}/results_{dataset}_noremoval.csv"))
df_noremoval.drop(columns=['Test_acc'], inplace=True)
df_noremoval_2 = pd.read_csv(f"{dataset}/results_noremoval_real_accuracy_{deb}.csv")
df_noremoval_final = pd.merge(df_noremoval, df_noremoval_2, on=['Model-count'])
df_noremoval_final['Test_acc'] = df_noremoval_final['Test_acc'].apply(lambda x: x*100)
max_accuracy = True
corresponding_max_accuracy = True
mean_accuracy = False
median_accuracy = False
id_ = "D5"
precision = 1
if corresponding_max_accuracy:
# for sensitive there is always 0 discrimination.
z = ' & '.join([f"{id_}", f"{df_noremoval_final.loc[df_noremoval_final['Discm_percent'] == df_noremoval_final['Discm_percent'].min()]['Test_acc'].max():.{precision}e}",
f"{df_nosensitive['Test_acc'].max():.{precision}e}",
f"{df_DIR_final.loc[df_DIR_final['Discm_percent'] == df_DIR_final['Discm_percent'].min()]['Test_acc'].max():.{precision}e}",
f"{df_ps_final.loc[df_ps_final['Discm_percent'] == df_ps_final['Discm_percent'].min()]['Test_acc'].max():.{precision}e}",
f"{df_massaging_final.loc[df_massaging_final['Discm_percent'] == df_massaging_final['Discm_percent'].min()]['Test_acc'].max():.{precision}e}",
f"{df_lfr_final.loc[df_lfr_final['Discm_percent'] == df_lfr_final['Discm_percent'].min()]['Test_acc'].max():.{precision}e}",
f"{df_adver_final.loc[df_adver_final['Discm_percent'] == df_adver_final['Discm_percent'].min()]['Test_acc'].max():.{precision}e}",
f"{df_our_final.loc[df_our_final['Discm_percent'] == df_our_final['Discm_percent'].min()]['Test_acc'].max():.{precision}e}"])
a = ' & '.join([f"{id_}", f"{df_noremoval_final['Discm_percent'].min():.{precision}e}",
"0.0",
f"{df_DIR_final['Discm_percent'].min():.{precision}e}",
f"{df_ps_final['Discm_percent'].min():.{precision}e}",
f"{df_massaging_final['Discm_percent'].min():.{precision}e}",
f"{df_lfr_final['Discm_percent'].min():.{precision}e}",
f"{df_adver_final['Discm_percent'].min():.{precision}e}",
f"{df_our_final['Discm_percent'].min():.{precision}e}"])
print_to_tex(z, f'correspond-real-accuracy_{deb}.tex', dataset, "w")
print_to_tex(a, f'correspond-real-accuracy_{deb}.tex', dataset, "a")
if max_accuracy:
y = ' & '.join([f"{id_}", f"{df_noremoval_final['Test_acc'].max():.{precision}e}", f"{df_nosensitive['Test_acc'].max():.{precision}e}", f"{df_DIR_final['Test_acc'].max():.{precision}e}", f"{df_ps_final['Test_acc'].max():.{precision}e}", f"{df_massaging_final['Test_acc'].max():.{precision}e}", f"{df_lfr_final['Test_acc'].max():.{precision}e}", f"{df_adver_final['Test_acc'].max():.{precision}e}", f"{df_our_final['Test_acc'].max():.{precision}e}"])
print_to_tex(y, f'max-real-accuracy_{deb}.tex', dataset, "w")
print("Done real accuracy")
def fpr_fnr_process_dfs(name, batches, df):
if 'Model-count'in df.columns:
df['Model-count2'] = df.apply(process_rows, axis=1, args=((batches,)))
assert (df['Model-count'] == df['Model-count2']).all()
df.drop(columns=['Model-count2'], inplace=True)
else:
df['Model-count'] = df.apply(process_rows, axis=1, args=((batches,)))
assert len(df['Model-count'].unique()) == 240 and df['Model-count'].max() == 239 and df['Model-count'].min() == 0
df = df.sort_values("Discm_percent").groupby("Model-count", as_index=False).first() # must be sorted in order of model count for comparison across baselines
df = df[['Model-count','Discm_percent', 'Test_acc', 'Class0_FPR', 'Class0_FNR', 'Class1_FPR', 'Class1_FNR']]
df['FPR_diff'] = abs(df['Class0_FPR'] - df['Class1_FPR'])*100
# df_our['FPR_sum'] = df_our['Class0_FPR'] + df_our['Class1_FPR']
df['FPR_ratio'] = df['Class0_FPR'] / df['Class1_FPR']
df['FNR_diff'] = abs(df['Class0_FNR'] - df['Class1_FNR'])*100
# df_our['FNR_sum'] = df_our['Class0_FNR'] + df_our['Class1_FNR']
df['FNR_ratio'] = df['Class0_FNR'] / df['Class1_FNR']
# df['diff'] = abs(df['Class0_Pos'] - df['Class1_Pos'])*100
df['Test_acc'] = df['Test_acc'].apply(lambda x: x*100)
df['Techniques'] = name
return df
def fpr_fnr_rates():
def fpr_fnr_print(dataset, id_, kind):
if kind:
df1 = pd.read_csv(f"{dataset}/results_{dataset}_method1.csv")
else:
df1 = pd.read_csv(f"{dataset}/results_{dataset}_method1_fulltest.csv")
batches = sorted(list(df1.Batch.unique()))
assert(len(batches) == 2)
# import ipdb; ipdb.set_trace()
df_our = find_min_discm_each_hyperparam(df1)
df_our = df_our[['Model-count','Discm_percent', 'Test_acc', 'Class0_FPR', 'Class0_FNR', 'Class1_FPR', 'Class1_FNR']]
df_our['FPR_diff'] = abs(df_our['Class0_FPR'] - df_our['Class1_FPR'])*100
# df_our['FPR_sum'] = df_our['Class0_FPR'] + df_our['Class1_FPR']
df_our['FPR_ratio'] = df_our['Class0_FPR'] / df_our['Class1_FPR']
df_our['FNR_diff'] = abs(df_our['Class0_FNR'] - df_our['Class1_FNR'])*100
# df_our['FNR_sum'] = df_our['Class0_FNR'] + df_our['Class1_FNR']
df_our['FNR_ratio'] = df_our['Class0_FNR'] / df_our['Class1_FNR']
df_our['Techniques'] = "Our Technique"
df_our['Baseline'] = "Our"
if kind:
df_massaging = fpr_fnr_process_dfs("MAssaging", batches, pd.read_csv(f"{dataset}/massaging/results_massaged_{dataset}_fulltest.csv"))
else:
df_massaging = fpr_fnr_process_dfs("MAssaging", batches, pd.read_csv(f"{dataset}/massaging/results_massaged_{dataset}.csv"))
# Preferential Sampling
if kind:
df_ps = fpr_fnr_process_dfs("Prefer. Sampling", batches, pd.read_csv(f"{dataset}/preferential_sampling/results_resampling_{dataset}_fulltest.csv"))
else:
df_ps = fpr_fnr_process_dfs("Prefer. Sampling", batches, pd.read_csv(f"{dataset}/preferential_sampling/results_resampling_{dataset}.csv"))
# Learning Fair representations
if kind:
df_lfr = fpr_fnr_process_dfs("Learning Fair Repr.", batches, pd.read_csv(f"{dataset}/learning_fair_representations/results_lfr_{dataset}_fulltest.csv"))
else:
df_lfr = fpr_fnr_process_dfs("Learning Fair Repr.", batches, pd.read_csv(f"{dataset}/learning_fair_representations/results_lfr_{dataset}.csv"))
# Disparate Impact Removed
if kind:
df_DIR = fpr_fnr_process_dfs("Disp. Impact Rem", batches, pd.read_csv(f"{dataset}/disparate_impact_removed/results_disparate_removed_{dataset}_fulltest.csv"))
else:
df_DIR = fpr_fnr_process_dfs("Disp. Impact Rem", batches, pd.read_csv(f"{dataset}/disparate_impact_removed/results_disparate_removed_{dataset}.csv"))
# Adversarial Sampling
if kind:
df_adver = pd.read_csv(f"{dataset}/adversarial_debiasing/results_adversarial_debiased_{dataset}_fulltest.csv")
else:
df_adver = pd.read_csv(f"{dataset}/adversarial_debiasing/results_adversarial_debiased_{dataset}.csv")
df_adver['Model-count'] = df_adver['Dataperm']*12
df_adver = df_adver.sort_values("Discm_percent").groupby("Model-count", as_index=False).first() # must be sorted in order of model count for comparison across baselines
# df_adver = df_adver[['Model-count','Discm_percent', 'Test_acc', 'Class0_FPR', 'Class0_FNR', 'Class1_FPR', 'Class1_FNR']]
# df_adver['FPR_diff'] = abs(df_adver['Class0_FPR'] - df_adver['Class1_FPR'])*100
# # df_our['FPR_sum'] = df_our['Class0_FPR'] + df_our['Class1_FPR']
# df_adver['FPR_ratio'] = df_adver['Class0_FPR'] / df_adver['Class1_FPR']
# df_adver['FNR_diff'] = abs(df_adver['Class0_FNR'] - df_adver['Class1_FNR'])*100
# # df_our['FNR_sum'] = df_our['Class0_FNR'] + df_our['Class1_FNR']
# df_adver['FNR_ratio'] = df_adver['Class0_FNR'] / df_adver['Class1_FNR']
df_adver['FPR_diff'] = df_adver['FPR_ratio'] = df_adver['FNR_diff'] = df_adver['FNR_ratio'] = 1000.0
# df_adver['diff'] = df_adver['diff']*100
df_adver['Test_acc'] = df_adver['Test_acc'].apply(lambda x: x*100)
df_adver['Techniques'] = "Adversa. debias"
df_adver['Baseline'] = "AD"
df_nosensitive = pd.read_csv(f"{dataset}/results_{dataset}_nosensitive.csv")
df_nosensitive = df_nosensitive[['Model-count', 'Test_acc', 'Class0_FPR', 'Class0_FNR', 'Class1_FPR', 'Class1_FNR']]
df_nosensitive['FPR_diff'] = abs(df_nosensitive['Class0_FPR'] - df_nosensitive['Class1_FPR'])*100
# df_nosensitive['FPR_sum'] = df_nosensitive['Class0_FPR'] + df_nosensitive['Class1_FPR']
df_nosensitive['FPR_ratio'] = df_nosensitive['Class0_FPR'] / df_nosensitive['Class1_FPR']
df_nosensitive['FNR_diff'] = abs(df_nosensitive['Class0_FNR'] - df_nosensitive['Class1_FNR'])*100
# df_nosensitive['FNR_sum'] = df_nosensitive['Class0_FNR'] + df_nosensitive['Class1_FNR']
df_nosensitive['FNR_ratio'] = df_nosensitive['Class0_FNR'] / df_nosensitive['Class1_FNR']
df_nosensitive['Techniques'] = "Sens. Removed"
df_nosensitive['Baseline'] = "SR"
if kind:
df_noremoval = fpr_fnr_process_dfs("FULL", batches, pd.read_csv(f"{dataset}/results_{dataset}_noremoval_fulltest.csv"))
else:
df_noremoval = fpr_fnr_process_dfs("FULL", batches, pd.read_csv(f"{dataset}/results_{dataset}_noremoval.csv"))
min_rate_difference = True
rate_difference_for_min_discm = True
rate_difference_for_max_accuracy = True
precision = 1
if min_rate_difference:
a = ' & '.join([f"{id_}",
str(float(f"{df_noremoval['FPR_diff'].min():.{precision}e}")),
str(float(f"{df_nosensitive['FPR_diff'].min():.{precision}e}")),
str(float(f"{df_DIR['FPR_diff'].min():.{precision}e}")),
str(float(f"{df_ps['FPR_diff'].min():.{precision}e}")),
str(float(f"{df_massaging['FPR_diff'].min():.{precision}e}")),
str(float(f"{df_lfr['FPR_diff'].min():.{precision}e}")),
str(float(f"{df_adver['FPR_diff'].min():.{precision}e}")),
str(float(f"{df_our['FPR_diff'].min():.{precision}e}"))])
b = ' & '.join([f"{id_}",
str(float(f"{df_noremoval['FNR_diff'].min():.{precision}e}")),
str(float(f"{df_nosensitive['FNR_diff'].min():.{precision}e}")),
str(float(f"{df_DIR['FNR_diff'].min():.{precision}e}")),
str(float(f"{df_ps['FNR_diff'].min():.{precision}e}")),
str(float(f"{df_massaging['FNR_diff'].min():.{precision}e}")),
str(float(f"{df_lfr['FNR_diff'].min():.{precision}e}")),
str(float(f"{df_adver['FNR_diff'].min():.{precision}e}")),
str(float(f"{df_our['FNR_diff'].min():.{precision}e}"))])
# b = ' & '.join([f"{id_}", f"{df_nosensitive['FNR_diff'].min():.{precision}e}", "\\textbf{%s}"%(f"{df_our['FNR_diff'].min():.{precision}e}")])
# c = ' & '.join([f"{id_}", f"{df_nosensitive['FPR_ratio'].min():.{precision}e}", "\\textbf{%s}"%(f"{df_our['FPR_ratio'].min():.{precision}e}")])
# d = ' & '.join([f"{id_}", f"{df_nosensitive['FNR_ratio'].min():.{precision}e}", "\\textbf{%s}"%(f"{df_our['FNR_ratio'].min():.{precision}e}")])
# e = ' & '.join([f"{id_}", f"{df_nosensitive['Class0_FPR'].min():.{precision}e}", f"{df_nosensitive['Class1_FPR'].min():.{precision}e}", f"{df_our['Class0_FNR'].min():.{precision}e}", f"{df_our['Class1_FNR'].min():.{precision}e}"])
if kind:
print_to_tex(a, 'min-fpr_rate_fulltest.tex', dataset)
print_to_tex(b, 'min-fnr_rate_fulltest.tex', dataset)
else:
print_to_tex(a, 'min-fpr_rate_debiasedtest.tex', dataset)
print_to_tex(b, 'min-fnr_rate_debiasedtest.tex', dataset)
if rate_difference_for_min_discm:
x = ' & '.join([f"{id_}",
str(float(f"{df_noremoval.loc[df_noremoval['Discm_percent'] == df_noremoval['Discm_percent'].min()]['FPR_diff'].min():.{precision}e}")),
str(float(f"{df_nosensitive.loc[df_nosensitive['Test_acc'] == df_nosensitive['Test_acc'].max()]['FPR_diff'].min():.{precision}e}")),
str(float(f"{df_DIR.loc[df_DIR['Discm_percent'] == df_DIR['Discm_percent'].min()]['FPR_diff'].min():.{precision}e}")),
str(float(f"{df_ps.loc[df_ps['Discm_percent'] == df_ps['Discm_percent'].min()]['FPR_diff'].min():.{precision}e}")),
str(float(f"{df_massaging.loc[df_massaging['Discm_percent'] == df_massaging['Discm_percent'].min()]['FPR_diff'].min():.{precision}e}")),
str(float(f"{df_lfr.loc[df_lfr['Discm_percent'] == df_lfr['Discm_percent'].min()]['FPR_diff'].min():.{precision}e}")),
str(float(f"{df_adver.loc[df_adver['Discm_percent'] == df_adver['Discm_percent'].min()]['FPR_diff'].min():.{precision}e}")),
str(float(f"{df_our.loc[df_our['Discm_percent'] == df_our['Discm_percent'].min()]['FPR_diff'].min():.{precision}e}"))])
y = ' & '.join([f"{id_}",
str(float(f"{df_noremoval.loc[df_noremoval['Discm_percent'] == df_noremoval['Discm_percent'].min()]['FNR_diff'].min():.{precision}e}")),
str(float(f"{df_nosensitive.loc[df_nosensitive['Test_acc'] == df_nosensitive['Test_acc'].max()]['FNR_diff'].min():.{precision}e}")),
str(float(f"{df_DIR.loc[df_DIR['Discm_percent'] == df_DIR['Discm_percent'].min()]['FNR_diff'].min():.{precision}e}")),
str(float(f"{df_ps.loc[df_ps['Discm_percent'] == df_ps['Discm_percent'].min()]['FNR_diff'].min():.{precision}e}")),
str(float(f"{df_massaging.loc[df_massaging['Discm_percent'] == df_massaging['Discm_percent'].min()]['FNR_diff'].min():.{precision}e}")),
str(float(f"{df_lfr.loc[df_lfr['Discm_percent'] == df_lfr['Discm_percent'].min()]['FNR_diff'].min():.{precision}e}")),
str(float(f"{df_adver.loc[df_adver['Discm_percent'] == df_adver['Discm_percent'].min()]['FNR_diff'].min():.{precision}e}")),
str(float(f"{df_our.loc[df_our['Discm_percent'] == df_our['Discm_percent'].min()]['FNR_diff'].min():.{precision}e}"))])
# l = ' & '.join([f"{id_}",
# f"{df_nosensitive.loc[df_nosensitive['Test_acc'] == df_nosensitive['Test_acc'].max()]['FPR_sum'].min():.{precision}e}",
# f"{df_our.loc[df_our['Discm_percent'] == df_our['Discm_percent'].min()]['FPR_sum'].min():.{precision}e}"])
# m = ' & '.join([f"{id_}",
# f"{df_nosensitive.loc[df_nosensitive['Test_acc'] == df_nosensitive['Test_acc'].max()]['FNR_sum'].min():.{precision}e}",
# f"{df_our.loc[df_our['Discm_percent'] == df_our['Discm_percent'].min()]['FNR_sum'].min():.{precision}e}"])
# q = ' & '.join([f"{id_}",
# f"{df_nosensitive.loc[df_nosensitive['Test_acc'] == df_nosensitive['Test_acc'].max()]['FPR_ratio'].min():.{precision}e}",
# f"{df_our.loc[df_our['Discm_percent'] == df_our['Discm_percent'].min()]['FPR_ratio'].min():.{precision}e}"])
# r = ' & '.join([f"{id_}",
# f"{df_nosensitive.loc[df_nosensitive['Test_acc'] == df_nosensitive['Test_acc'].max()]['FNR_ratio'].min():.{precision}e}",
# f"{df_our.loc[df_our['Discm_percent'] == df_our['Discm_percent'].min()]['FNR_ratio'].min():.{precision}e}"])
# z = ' & '.join([f"{id_}",
# f"{df_nosensitive.loc[df_nosensitive['Test_acc'] == df_nosensitive['Test_acc'].max()]['FPR_ratio'].min():.{precision}e}",
# f"{df_our.loc[df_our['Discm_percent'] == df_our['Discm_percent'].min()]['FPR_ratio'].min():.{precision}e}"])
# z1 = ' & '.join([f"{id_}",
# f"{df_nosensitive.loc[df_nosensitive['Test_acc'] == df_nosensitive['Test_acc'].max()]['FNR_ratio'].min():.{precision}e}",
# f"{df_our.loc[df_our['Discm_percent'] == df_our['Discm_percent'].min()]['FNR_ratio'].min():.{precision}e}"])
# z2 = ' & '.join([f"{id_}",
# f"{df_nosensitive.loc[df_nosensitive['Test_acc'] == df_nosensitive['Test_acc'].max()]['Class0_FPR'].min():.{precision}e}",
# f"{df_nosensitive.loc[df_nosensitive['Test_acc'] == df_nosensitive['Test_acc'].max()]['Class1_FPR'].min():.{precision}e}",
# f"{df_our.loc[df_our['Discm_percent'] == df_our['Discm_percent'].min()]['Class0_FNR'].min():.{precision}e}",
# f"{df_our.loc[df_our['Discm_percent'] == df_our['Discm_percent'].min()]['Class1_FNR'].min():.{precision}e}"])
if kind:
print_to_tex(x, 'fpr_rate-min-discm_fulltest.tex', dataset)
print_to_tex(y, 'fnr_rate-min-discm_fulltest.tex', dataset)
else:
print_to_tex(x, 'fpr_rate-min-discm_debiasedtest.tex', dataset)
print_to_tex(y, 'fnr_rate-min-discm_debiasedtest.tex', dataset)
if rate_difference_for_max_accuracy:
l = ' & '.join([f"{id_}",
str(float(f"{df_noremoval.loc[df_noremoval['Test_acc'] == df_noremoval['Test_acc'].max()]['FPR_diff'].min():.{precision}e}")),
str(float(f"{df_nosensitive.loc[df_nosensitive['Test_acc'] == df_nosensitive['Test_acc'].max()]['FPR_diff'].min():.{precision}e}")),
str(float(f"{df_DIR.loc[df_DIR['Test_acc'] == df_DIR['Test_acc'].max()]['FPR_diff'].min():.{precision}e}")),
str(float(f"{df_ps.loc[df_ps['Test_acc'] == df_ps['Test_acc'].max()]['FPR_diff'].min():.{precision}e}")),
str(float(f"{df_massaging.loc[df_massaging['Test_acc'] == df_massaging['Test_acc'].max()]['FPR_diff'].min():.{precision}e}")),
str(float(f"{df_lfr.loc[df_lfr['Test_acc'] == df_lfr['Test_acc'].max()]['FPR_diff'].min():.{precision}e}")),
str(float(f"{df_adver.loc[df_adver['Test_acc'] == df_adver['Test_acc'].max()]['FPR_diff'].min():.{precision}e}")),
str(float(f"{df_our.loc[df_our['Test_acc'] == df_our['Test_acc'].max()]['FPR_diff'].min():.{precision}e}"))])
m = ' & '.join([f"{id_}",
str(float(f"{df_noremoval.loc[df_noremoval['Test_acc'] == df_noremoval['Test_acc'].max()]['FNR_diff'].min():.{precision}e}")),
str(float(f"{df_nosensitive.loc[df_nosensitive['Test_acc'] == df_nosensitive['Test_acc'].max()]['FNR_diff'].min():.{precision}e}")),
str(float(f"{df_DIR.loc[df_DIR['Test_acc'] == df_DIR['Test_acc'].max()]['FNR_diff'].min():.{precision}e}")),
str(float(f"{df_ps.loc[df_ps['Test_acc'] == df_ps['Test_acc'].max()]['FNR_diff'].min():.{precision}e}")),
str(float(f"{df_massaging.loc[df_massaging['Test_acc'] == df_massaging['Test_acc'].max()]['FNR_diff'].min():.{precision}e}")),
str(float(f"{df_lfr.loc[df_lfr['Test_acc'] == df_lfr['Test_acc'].max()]['FNR_diff'].min():.{precision}e}")),
str(float(f"{df_adver.loc[df_adver['Test_acc'] == df_adver['Test_acc'].max()]['FNR_diff'].min():.{precision}e}")),
str(float(f"{df_our.loc[df_our['Test_acc'] == df_our['Test_acc'].max()]['FNR_diff'].min():.{precision}e}"))])
if kind:
print_to_tex(l, 'fpr_rate-max-accuracy_fulltest.tex', dataset)
print_to_tex(m, 'fnr_rate-max-accuracy_fulltest.tex', dataset)
else:
print_to_tex(l, 'fpr_rate-max-accuracy_debiasedtest.tex', dataset)
print_to_tex(m, 'fnr_rate-max-accuracy_debiasedtest.tex', dataset)
# df_main = None
benchmarks = ["adult", "adult_race", "german", "student", "compas-ground", "compas-score", "default", "salary"]
# benchmarks = ["adult", "german", "student", "compas-ground", "compas-score", "default"]
kind = "debiased"
# kind = "full"
for dataset in benchmarks:
if dataset == "adult":
id_ = "D1"
elif dataset == "adult_race":
id_ = "D2"
elif dataset == "german":
id_ = "D3"
elif dataset == "student":
id_ = "D4"
elif dataset == "compas-ground":
id_ = "D5"
elif dataset == "compas-score":
id_ = "D6"
elif dataset == "default":
id_ = "D7"
elif dataset == "salary":
id_ = "D8"
else:
raise NotImplementedError
if kind == "full":
fpr_fnr_print(dataset, id_, kind=True)
elif kind == "debiased":
fpr_fnr_print(dataset, id_, kind=False)
print(f"Done {dataset}")
def parity_process_dfs(name, batches, df):
if 'Model-count'in df.columns:
df['Model-count2'] = df.apply(process_rows, axis=1, args=((batches,)))
assert (df['Model-count'] == df['Model-count2']).all()
df.drop(columns=['Model-count2'], inplace=True)
else:
df['Model-count'] = df.apply(process_rows, axis=1, args=((batches,)))
assert len(df['Model-count'].unique()) == 240 and df['Model-count'].max() == 239 and df['Model-count'].min() == 0
df = df.sort_values("Discm_percent").groupby("Model-count", as_index=False).first() # must be sorted in order of model count for comparison across baselines
df = df[['Model-count','Discm_percent','Test_acc', 'Class0_Pos', 'Class1_Pos']]
df['diff'] = abs(df['Class0_Pos'] - df['Class1_Pos'])*100
df['Test_acc'] = df['Test_acc'].apply(lambda x: x*100)
df['Techniques'] = name
if len(name.split()) > 1:
words = name.split()
letters = [word[0] for word in words]
x = "".join(letters)
df['Baseline'] = x
else:
df['Baseline'] = name[:2]
return df
def statistical_parity(dist_metric):
def parity_print(dataset, id_, kind, plot=False):
if kind:
if dist_metric:
df1 = pd.read_csv(f"{dataset}/results_{dataset}_method1_fulltest_dist{dist}.csv")
df2 = pd.read_csv(f"{dataset}/results_{dataset}_method1_dist{dist}.csv")
else:
df1 = pd.read_csv(f"{dataset}/results_{dataset}_method1_fulltest.csv")
df2 = pd.read_csv(f"{dataset}/results_{dataset}_method1.csv")
else:
if dist_metric:
df1 = pd.read_csv(f"{dataset}/results_{dataset}_method1_dist{dist}.csv")
else:
df1 = pd.read_csv(f"{dataset}/results_{dataset}_method1.csv")
batches = sorted(list(df1.Batch.unique()))
assert(len(batches) == 2)
df_our = find_min_discm_each_hyperparam(df1)
df_our = df_our[['Model-count','Discm_percent', 'Test_acc', 'Class0_Pos', 'Class1_Pos']]
df_our['diff'] = abs(df_our['Class0_Pos'] - df_our['Class1_Pos']) * 100
if kind:
df_our2 = find_min_discm_each_hyperparam(df2) # since the sorting is on the basis of discm, it remains same
df_our2['Test_acc'] = df_our2['Test_acc'].apply(lambda x: x * 100)
df_our['Techniques'] = "Our Technique"
df_our['Baseline'] = "Our"
# import ipdb; ipdb.set_trace()
if kind:
if dist_metric:
df_massaging = parity_process_dfs("MAssaging", batches, pd.read_csv(f"{dataset}/massaging/results_massaged_{dataset}_fulltest_dist{dist}.csv"))
df_massaging2 = parity_process_dfs("MAssaging", batches, pd.read_csv(f"{dataset}/massaging/results_massaged_{dataset}_dist{dist}.csv"))
else:
df_massaging = parity_process_dfs("MAssaging", batches, pd.read_csv(f"{dataset}/massaging/results_massaged_{dataset}_fulltest.csv"))
df_massaging2 = parity_process_dfs("MAssaging", batches, pd.read_csv(f"{dataset}/massaging/results_massaged_{dataset}.csv"))
else:
if dist_metric:
df_massaging = parity_process_dfs("MAssaging", batches, pd.read_csv(f"{dataset}/massaging/results_massaged_{dataset}_dist{dist}.csv"))
else:
df_massaging = parity_process_dfs("MAssaging", batches, pd.read_csv(f"{dataset}/massaging/results_massaged_{dataset}.csv"))
# Preferential Sampling
if kind:
if dist_metric:
df_ps = parity_process_dfs("Prefer. Sampling", batches, pd.read_csv(f"{dataset}/preferential_sampling/results_resampling_{dataset}_fulltest_dist{dist}.csv"))
df_ps2 = parity_process_dfs("Prefer. Sampling", batches, pd.read_csv(f"{dataset}/preferential_sampling/results_resampling_{dataset}_dist{dist}.csv"))
else:
df_ps = parity_process_dfs("Prefer. Sampling", batches, pd.read_csv(f"{dataset}/preferential_sampling/results_resampling_{dataset}_fulltest.csv"))
df_ps2 = parity_process_dfs("Prefer. Sampling", batches, pd.read_csv(f"{dataset}/preferential_sampling/results_resampling_{dataset}.csv"))
else:
if dist_metric:
df_ps = parity_process_dfs("Prefer. Sampling", batches, pd.read_csv(f"{dataset}/preferential_sampling/results_resampling_{dataset}_dist{dist}.csv"))
else:
df_ps = parity_process_dfs("Prefer. Sampling", batches, pd.read_csv(f"{dataset}/preferential_sampling/results_resampling_{dataset}.csv"))
# Learning Fair representations
if kind:
if dist_metric:
df_lfr = parity_process_dfs("Learning Fair Repr.", batches, pd.read_csv(f"{dataset}/learning_fair_representations/results_lfr_{dataset}_fulltest_dist{dist}.csv"))
df_lfr2 = parity_process_dfs("Learning Fair Repr.", batches, pd.read_csv(f"{dataset}/learning_fair_representations/results_lfr_{dataset}_dist{dist}.csv"))
else:
df_lfr = parity_process_dfs("Learning Fair Repr.", batches, pd.read_csv(f"{dataset}/learning_fair_representations/results_lfr_{dataset}_fulltest.csv"))
df_lfr2 = parity_process_dfs("Learning Fair Repr.", batches, pd.read_csv(f"{dataset}/learning_fair_representations/results_lfr_{dataset}.csv"))
else:
if dist_metric:
df_lfr = parity_process_dfs("Learning Fair Repr.", batches, pd.read_csv(f"{dataset}/learning_fair_representations/results_lfr_{dataset}_dist{dist}.csv"))
else:
df_lfr = parity_process_dfs("Learning Fair Repr.", batches, pd.read_csv(f"{dataset}/learning_fair_representations/results_lfr_{dataset}.csv"))
# Disparate Impact Removed
if kind:
if dist_metric:
df_DIR = parity_process_dfs("Disp. Impact Rem", batches, pd.read_csv(f"{dataset}/disparate_impact_removed/results_disparate_removed_{dataset}_fulltest_dist{dist}.csv"))
df_DIR2 = parity_process_dfs("Disp. Impact Rem", batches, pd.read_csv(f"{dataset}/disparate_impact_removed/results_disparate_removed_{dataset}_dist{dist}.csv"))
else:
df_DIR = parity_process_dfs("Disp. Impact Rem", batches, pd.read_csv(f"{dataset}/disparate_impact_removed/results_disparate_removed_{dataset}_fulltest.csv"))
df_DIR2 = parity_process_dfs("Disp. Impact Rem", batches, pd.read_csv(f"{dataset}/disparate_impact_removed/results_disparate_removed_{dataset}.csv"))
else:
if dist_metric:
df_DIR = parity_process_dfs("Disp. Impact Rem", batches, pd.read_csv(f"{dataset}/disparate_impact_removed/results_disparate_removed_{dataset}_dist{dist}.csv"))
else:
df_DIR = parity_process_dfs("Disp. Impact Rem", batches, pd.read_csv(f"{dataset}/disparate_impact_removed/results_disparate_removed_{dataset}.csv"))
# Adversarial Sampling
if kind:
if dist_metric:
df_adver = pd.read_csv(f"{dataset}/adversarial_debiasing/results_adversarial_debiased_{dataset}_fulltest_dist{dist}.csv")
df_adver2 = pd.read_csv(f"{dataset}/adversarial_debiasing/results_adversarial_debiased_{dataset}_dist{dist}.csv")
else:
df_adver = pd.read_csv(f"{dataset}/adversarial_debiasing/results_adversarial_debiased_{dataset}_fulltest.csv")
df_adver2 = pd.read_csv(f"{dataset}/adversarial_debiasing/results_adversarial_debiased_{dataset}.csv")
else:
if dist_metric:
df_adver = pd.read_csv(f"{dataset}/adversarial_debiasing/results_adversarial_debiased_{dataset}_dist{dist}.csv")
else:
df_adver = pd.read_csv(f"{dataset}/adversarial_debiasing/results_adversarial_debiased_{dataset}.csv")
df_adver['Model-count'] = df_adver['Dataperm'] * 12
df_adver = df_adver.sort_values("Discm_percent").groupby("Model-count", as_index=False).first() # must be sorted in order of model count for comparison across baselines
df_adver['diff'] = df_adver['diff'] * 100
df_adver['Test_acc'] = df_adver['Test_acc'].apply(lambda x: x * 100)
if kind:
df_adver2['Model-count'] = df_adver2['Dataperm'] * 12
df_adver2 = df_adver2.sort_values("Discm_percent").groupby("Model-count", as_index=False).first()
df_adver2['Test_acc'] = df_adver2['Test_acc'].apply(lambda x: x * 100)
df_adver['Techniques'] = "Adversa. debias"
df_adver['Baseline'] = "AD"
if kind:
if dist_metric:
df_nosensitive = pd.read_csv(f"{dataset}/results_{dataset}_nosensitive_fulltest_dist{dist}.csv")
df_nosensitive2 = pd.read_csv(f"{dataset}/results_{dataset}_nosensitive_dist{dist}.csv")
else:
df_nosensitive = pd.read_csv(f"{dataset}/results_{dataset}_nosensitive_fulltest.csv")
df_nosensitive2 = pd.read_csv(f"{dataset}/results_{dataset}_nosensitive.csv")
else:
if dist_metric:
df_nosensitive = pd.read_csv(f"{dataset}/results_{dataset}_nosensitive_dist{dist}.csv")
else:
df_nosensitive = pd.read_csv(f"{dataset}/results_{dataset}_nosensitive.csv")
# import ipdb; ipdb.set_trace()
if dist_metric:
df_nosensitive = df_nosensitive[['Model-count', 'Test_acc', 'Class0_Pos', 'Class1_Pos', 'Discm_percent']]
else:
df_nosensitive = df_nosensitive[['Model-count', 'Test_acc', 'Class0_Pos', 'Class1_Pos']]
df_nosensitive['diff'] = abs(df_nosensitive['Class0_Pos'] - df_nosensitive['Class1_Pos']) * 100
if kind:
df_nosensitive2['Test_acc'] = df_nosensitive2['Test_acc'].apply(lambda x: x * 100)
df_nosensitive['Techniques'] = "Sens. Removed"
df_nosensitive['Baseline'] = "SR"
if kind:
if dist_metric:
df_noremoval = parity_process_dfs("FULL", batches, pd.read_csv(f"{dataset}/results_{dataset}_noremoval_fulltest_dist{dist}.csv"))
df_noremoval2 = parity_process_dfs("FULL", batches, pd.read_csv(f"{dataset}/results_{dataset}_noremoval_dist{dist}.csv"))
else:
df_noremoval = parity_process_dfs("FULL", batches, pd.read_csv(f"{dataset}/results_{dataset}_noremoval_fulltest.csv"))
df_noremoval2 = parity_process_dfs("FULL", batches, pd.read_csv(f"{dataset}/results_{dataset}_noremoval.csv"))
else:
if dist_metric:
df_noremoval = parity_process_dfs("FULL", batches, pd.read_csv(f"{dataset}/results_{dataset}_noremoval_dist{dist}.csv"))
else:
df_noremoval = parity_process_dfs("FULL", batches, pd.read_csv(f"{dataset}/results_{dataset}_noremoval.csv"))
df_main = | pd.concat([df_noremoval, df_nosensitive, df_massaging, df_ps, df_lfr, df_DIR, df_adver, df_our], sort=True) | pandas.concat |
Subsets and Splits